repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/07-asvspoof-ssl/model-W2V-Large1-fix-LLGF/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import os import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import sandbox.eval_asvspoof as nii_asvspoof __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############################ ## FOR pre-trained MODEL ############################ import fairseq class SSLModel(): def __init__(self, cp_path, ssl_orig_output_dim): """ SSLModel(cp_path, ssl_orig_output_dim) Args: cp_path: string, path to the pre-trained SSL model ssl_orig_output_dim: int, dimension of the SSL model output feature """ model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([cp_path]) self.model = model[0] self.out_dim = ssl_orig_output_dim return def extract_feat(self, input_data): """ feature = extract_feat(input_data) Args: input_data: tensor, waveform, (batch, length) Return: feature: tensor, feature, (batch, frame_num, feat_dim) """ if next(self.model.parameters()).device != input_data.device \ or next(self.model.parameters()).dtype != input_data.dtype: self.model.to(input_data.device, dtype=input_data.dtype) self.model.eval() with torch.no_grad(): if input_data.ndim == 3: input_tmp = input_data[:, :, 0] else: input_tmp = input_data # [batch, length, dim] emb = self.model(input_tmp, mask=False, features_only=True)['x'] return emb # not good way, but the path is fixed ssl_path = os.path.dirname(__file__) + '/../../../SSL_pretrained/libri960_big.pt' # This model produces 1024 output feature dimensions (per frame) ssl_orig_output_dim = 1024 # SSL model is declared as a global var since it is fixed g_ssl_model = SSLModel(ssl_path, ssl_orig_output_dim) ################# ## Misc functions ################# # A function to load in/out label for OOD detection. This is just a place holder # in this project g_attack_map = {} def protocol_parse_general(protocol_filepaths, g_map, sep=' ', target_row=-1): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial The format is: SPEAKER TRIAL_NAME - SPOOF_TYPE TAG LA_0031 LA_E_5932896 - A13 spoof LA_0030 LA_E_5849185 - - bonafide ... input: ----- protocol_filepath: string, path to the protocol file target_row: int, default -1, use line[-1] as the target label output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = nii_asvspoof.CustomDict(missing_value=True) if type(protocol_filepaths) is str: tmp = [protocol_filepaths] else: tmp = protocol_filepaths for protocol_filepath in tmp: if len(protocol_filepath) and os.path.isfile(protocol_filepath): with open(protocol_filepath, 'r') as file_ptr: for line in file_ptr: line = line.rstrip('\n') cols = line.split(sep) if g_map: try: data_buffer[cols[1]] = g_map[cols[target_row]] except KeyError: data_buffer[cols[1]] = False else: data_buffer[cols[1]] = True return data_buffer ############## ## FOR MODEL ############## class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std( in_dim,out_dim, args, prj_conf, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_f = prj_conf.optional_argument # Load CM protocol (if available) self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f) # Load OOD protocl (if available) self.in_out_parser = protocol_parse_general(protocol_f, g_attack_map, ' ', -2) # Working sampling rate # torchaudio may be used to change sampling rate #self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # self.v_feat_dim = [128] # number of sub-models (by default, a single model) self.v_submodels = len(self.v_feat_dim) # dimension of embedding vectors # here, the embedding is just the activation before sigmoid() self.v_emd_dim = None self.v_out_class = 2 #### # create network #### # 1st part of the classifier self.m_transform = [] # self.m_before_pooling = [] # 2nd part of the classifier self.m_output_act = [] # front-end self.m_frontend = [] # confidence predictor self.m_conf = [] # it can handle models with multiple front-end configuration # by default, only a single front-end for idx, v_feat_dim in enumerate(self.v_feat_dim): self.m_transform.append( torch_nn.Sequential( torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Dropout(0.7) ) ) self.m_before_pooling.append( torch_nn.Sequential( nii_nn.BLSTMLayer((v_feat_dim//16) * 32, (v_feat_dim//16) * 32), nii_nn.BLSTMLayer((v_feat_dim//16) * 32, (v_feat_dim//16) * 32) ) ) if self.v_emd_dim is None: self.v_emd_dim = (v_feat_dim // 16) * 32 else: assert self.v_emd_dim == (v_feat_dim//16)*32, "v_emd_dim error" self.m_output_act.append( torch_nn.Linear((v_feat_dim // 16) * 32, self.v_out_class) ) self.m_frontend.append( torch_nn.Linear(g_ssl_model.out_dim, v_feat_dim) ) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling) # output self.m_loss = torch_nn.CrossEntropyLoss() self.m_temp = 1 self.m_lambda = 0. self.m_e_m_in = -25.0 self.m_e_m_out = -7.0 # done return def prepare_mean_std(self, in_dim, out_dim, args, prj_conf, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_ssl_feat = g_ssl_model.extract_feat(wav.squeeze(-1)) # return return self.m_frontend[idx](x_ssl_feat) def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] # buffer to store output scores from sub-models output_emb = torch.zeros( [batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (m_trans, m_be_pool, m_output) in \ enumerate( zip(self.m_transform, self.m_before_pooling, self.m_output_act)): # extract front-end feature x_sp_amp = self._front_end(x, idx, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. (batch, channel, frame//N, feat_dim//N) -> # (batch, frame//N, channel * feat_dim//N) # where N is caused by conv with stride hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous() frame_num = hidden_features.shape[1] hidden_features = hidden_features.view(batch_size, frame_num, -1) # 4. pooling # 4. pass through LSTM then summing hidden_features_lstm = m_be_pool(hidden_features) # 5. pass through the output layer tmp_emb = (hidden_features_lstm + hidden_features).mean(1) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb return output_emb def _compute_logit(self, feature_vec, inference=False): """ """ # number of sub models batch_size = feature_vec.shape[0] # buffer to store output scores from sub-models output_act = torch.zeros( [batch_size * self.v_submodels, self.v_out_class], device=feature_vec.device, dtype=feature_vec.dtype) # compute scores for each sub-models for idx, m_output in enumerate(self.m_output_act): tmp_emb = feature_vec[idx*batch_size : (idx+1)*batch_size] output_act[idx*batch_size : (idx+1)*batch_size] = m_output(tmp_emb) # feature_vec is [batch * submodel, output-class] return output_act def _compute_score(self, logits): """ """ # [batch * submodel, output-class], logits # [:, 1] denotes being bonafide if logits.shape[1] == 2: return logits[:, 1] - logits[:, 0] else: return logits[:, -1] def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def _clamp_prob(self, input_prob, clamp_val=1e-12): return torch.clamp(input_prob, 0.0 + clamp_val, 1.0 - clamp_val) def _get_in_out_indx(self, filenames): in_indx = [] out_indx = [] for x, y in enumerate(filenames): if self.in_out_parser[y]: in_indx.append(x) else: out_indx.append(x) return np.array(in_indx), np.array(out_indx) def _energy(self, logits): """ """ # - T \log \sum_y \exp (logits[x, y] / T) eng = - self.m_temp * torch.logsumexp(logits / self.m_temp, dim=1) return eng def _loss(self, logits, targets, energy, in_indx, out_indx): """ """ # loss over cross-entropy on in-dist. data if len(in_indx): loss = self.m_loss(logits[in_indx], targets[in_indx]) else: loss = 0 # loss on energy of in-dist.data if len(in_indx): loss += self.m_lambda * torch.pow( torch_nn_func.relu(energy[in_indx] - self.m_e_m_in), 2).mean() # loss on energy of out-dist. data if len(out_indx): loss += self.m_lambda * torch.pow( torch_nn_func.relu(self.m_e_m_out - energy[out_indx]), 2).mean() return loss def forward(self, x, fileinfo): filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] # too short sentences if not self.training and x.shape[1] < 3000: targets = self._get_target(filenames) for filename, target in zip(filenames, targets): print("Output, %s, %d, %f, %f" % ( filename, target, 0.0, 0.0)) return None feature_vec = self._compute_embedding(x, datalength) logits = self._compute_logit(feature_vec) energy = self._energy(logits) in_indx, out_indx = self._get_in_out_indx(filenames) if self.training: # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=torch.long) target_vec = target_vec.repeat(self.v_submodels) # loss loss = self._loss(logits, target_vec, energy, in_indx, out_indx) return loss else: scores = self._compute_score(logits) targets = self._get_target(filenames) for filename, target, score, energytmp in \ zip(filenames, targets, scores, energy): print("Output, %s, %d, %f, %f" % ( filename, target, score.item(), -energytmp.item())) # don't write output score as a single file return None def get_embedding(self, x, fileinfo): filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] feature_vec = self._compute_embedding(x, datalength) return feature_vec class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ def compute(self, outputs, target): """ """ return outputs if __name__ == "__main__": print("Definition of model")
19,250
33.749097
86
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/08-asvspoof-activelearn/main.py
#!/usr/bin/env python """ main.py for project-NN-pytorch/projects The training/inference process wrapper for active learning. The base is on main_mergedataset.py Requires model.py and config.py (config_merge_datasets.py) Usage: $: python main.py [options] """ from __future__ import absolute_import import os import sys import copy import torch import importlib import core_scripts.other_tools.display as nii_warn import core_scripts.data_io.default_data_io as nii_default_dset import core_scripts.data_io.customize_dataset as nii_dset import core_scripts.data_io.conf as nii_dconf import core_scripts.other_tools.list_tools as nii_list_tool import core_scripts.config_parse.config_parse as nii_config_parse import core_scripts.config_parse.arg_parse as nii_arg_parse import core_scripts.op_manager.op_manager as nii_op_wrapper import core_scripts.nn_manager.nn_manager_AL as nii_nn_wrapper import core_scripts.nn_manager.nn_manager as nii_nn_wrapper_base import core_scripts.startup_config as nii_startup __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2022, Xin Wang" def main(): """ main(): the default wrapper for training and inference process Please prepare config.py and model.py """ # arguments initialization args = nii_arg_parse.f_args_parsed() # nii_warn.f_print_w_date("Start program", level='h') nii_warn.f_print("Load module: %s" % (args.module_config)) nii_warn.f_print("Load module: %s" % (args.module_model)) prj_conf = importlib.import_module(args.module_config) prj_model = importlib.import_module(args.module_model) # initialization nii_startup.set_random_seed(args.seed, args) use_cuda = not args.no_cuda and torch.cuda.is_available() device = torch.device("cuda" if use_cuda else "cpu") # prepare data io if not args.inference: params = {'batch_size': args.batch_size, 'shuffle': args.shuffle, 'num_workers': args.num_workers, 'sampler': args.sampler} in_trans_fns = prj_conf.input_trans_fns \ if hasattr(prj_conf, 'input_trans_fns') else None out_trans_fns = prj_conf.output_trans_fns \ if hasattr(prj_conf, 'output_trans_fns') else None # Load file list and create data loader trn_lst = prj_conf.trn_list trn_set = nii_dset.NII_MergeDataSetLoader( prj_conf.trn_set_name, \ trn_lst, prj_conf.input_dirs, \ prj_conf.input_exts, \ prj_conf.input_dims, \ prj_conf.input_reso, \ prj_conf.input_norm, \ prj_conf.output_dirs, \ prj_conf.output_exts, \ prj_conf.output_dims, \ prj_conf.output_reso, \ prj_conf.output_norm, \ './', params = params, truncate_seq = prj_conf.truncate_seq, min_seq_len = prj_conf.minimum_len, save_mean_std = True, wav_samp_rate = prj_conf.wav_samp_rate, way_to_merge = args.way_to_merge_datasets, global_arg = args, dset_config = prj_conf, input_augment_funcs = in_trans_fns, output_augment_funcs = out_trans_fns) # Load data pool and create data loader pool_lst = prj_conf.al_pool_list pool_set = nii_dset.NII_MergeDataSetLoader( prj_conf.al_pool_set_name, \ pool_lst, prj_conf.al_pool_in_dirs, \ prj_conf.input_exts, \ prj_conf.input_dims, \ prj_conf.input_reso, \ prj_conf.input_norm, \ prj_conf.al_pool_out_dirs, \ prj_conf.output_exts, \ prj_conf.output_dims, \ prj_conf.output_reso, \ prj_conf.output_norm, \ './', params = params, truncate_seq = prj_conf.truncate_seq, min_seq_len = prj_conf.minimum_len, save_mean_std = True, wav_samp_rate = prj_conf.wav_samp_rate, way_to_merge = args.way_to_merge_datasets, global_arg = args, dset_config = prj_conf, input_augment_funcs = in_trans_fns, output_augment_funcs = out_trans_fns) if hasattr(prj_conf, 'val_input_dirs'): val_input_dirs = prj_conf.val_input_dirs else: val_input_dirs = prj_conf.input_dirs if hasattr(prj_conf, 'val_output_dirs'): val_output_dirs = prj_conf.val_output_dirs else: val_output_dirs = prj_conf.output_dirs if prj_conf.val_list is not None: val_lst = prj_conf.val_list val_set = nii_dset.NII_MergeDataSetLoader( prj_conf.val_set_name, val_lst, val_input_dirs, \ prj_conf.input_exts, \ prj_conf.input_dims, \ prj_conf.input_reso, \ prj_conf.input_norm, \ val_output_dirs, \ prj_conf.output_exts, \ prj_conf.output_dims, \ prj_conf.output_reso, \ prj_conf.output_norm, \ './', \ params = params, truncate_seq= prj_conf.truncate_seq, min_seq_len = prj_conf.minimum_len, save_mean_std = False, wav_samp_rate = prj_conf.wav_samp_rate, way_to_merge = args.way_to_merge_datasets, global_arg = args, dset_config = prj_conf, input_augment_funcs = in_trans_fns, output_augment_funcs = out_trans_fns) else: val_set = None # initialize the model and loss function model = prj_model.Model(trn_set.get_in_dim(), \ trn_set.get_out_dim(), \ args, prj_conf, trn_set.get_data_mean_std()) loss_wrapper = prj_model.Loss(args) # initialize the optimizer optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args) # if necessary, resume training if args.trained_model == "": checkpoint = None else: checkpoint = torch.load(args.trained_model) # pre-training using standard procedure # change args args_tmp = copy.deepcopy(args) args_tmp.epochs = args.active_learning_pre_train_epoch_num args_tmp.not_save_each_epoch = True args_tmp.save_trained_name += '_pretrained' args_tmp.active_learning_cycle_num = 0 pretraind_name = args_tmp.save_trained_name + args_tmp.save_model_ext if args.active_learning_pre_train_epoch_num: nii_warn.f_print_w_date("Normal training (warm-up) phase",level='h') nii_warn.f_print("Normal training for {:d} epochs".format( args.active_learning_pre_train_epoch_num)) op_wrapper_tmp = nii_op_wrapper.OptimizerWrapper(model, args_tmp) loss_wrapper_tmp = prj_model.Loss(args_tmp) nii_nn_wrapper_base.f_train_wrapper( args_tmp, model, loss_wrapper, device, op_wrapper_tmp, trn_set, val_set, checkpoint) checkpoint = torch.load(pretraind_name) elif checkpoint is None: if os.path.isfile(pretraind_name): checkpoint = torch.load(pretraind_name) nii_warn.f_print("Use pretrained model before active learning") else: nii_warn.f_print("Use seed model to initialize") nii_warn.f_print_w_date("Active learning phase",level='h') # start training nii_nn_wrapper.f_train_wrapper( args, model, loss_wrapper, device, optimizer_wrapper, trn_set, pool_set, val_set, checkpoint) # done for traing else: # for inference # default, no truncating, no shuffling params = {'batch_size': args.batch_size, 'shuffle': False, 'num_workers': args.num_workers, 'sampler': args.sampler} in_trans_fns = prj_conf.test_input_trans_fns \ if hasattr(prj_conf, 'test_input_trans_fns') else None out_trans_fns = prj_conf.test_output_trans_fns \ if hasattr(prj_conf, 'test_output_trans_fns') else None if type(prj_conf.test_list) is list: t_lst = prj_conf.test_list else: t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list) test_set = nii_dset.NII_MergeDataSetLoader( prj_conf.test_set_name, \ t_lst, \ prj_conf.test_input_dirs, prj_conf.input_exts, prj_conf.input_dims, prj_conf.input_reso, prj_conf.input_norm, prj_conf.test_output_dirs, prj_conf.output_exts, prj_conf.output_dims, prj_conf.output_reso, prj_conf.output_norm, './', params = params, truncate_seq= None, min_seq_len = None, save_mean_std = False, wav_samp_rate = prj_conf.wav_samp_rate, way_to_merge = args.way_to_merge_datasets, global_arg = args, dset_config = prj_conf, input_augment_funcs = in_trans_fns, output_augment_funcs = out_trans_fns) # initialize model model = prj_model.Model(test_set.get_in_dim(), \ test_set.get_out_dim(), \ args, prj_conf) if args.trained_model == "": print("No model is loaded by ---trained-model for inference") print("By default, load %s%s" % (args.save_trained_name, args.save_model_ext)) checkpoint = torch.load("%s%s" % (args.save_trained_name, args.save_model_ext)) else: checkpoint = torch.load(args.trained_model) # do inference and output data nii_nn_wrapper_base.f_inference_wrapper( args, model, device, test_set, checkpoint) # done return if __name__ == "__main__": main()
10,498
36.766187
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/08-asvspoof-activelearn/config_AL_train_toyset.py
#!/usr/bin/env python """ config.py for project-NN-pytorch/projects Usage: For training, change Configuration for training stage For inference, change Configuration for inference stage """ import os __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2022, Xin Wang" ######################################################### ## Configuration for training stage ######################################################### # Name of datasets # this will be used as the name of cache files created for each set # # Name for the seed training set, in case you merge multiple data sets as # a single training set, just specify the name for each subset. # Here we only have 1 training subset trn_set_name = ['asvspoof2019_toyset_trn'] # Name for the development set val_set_name = ['asvspoof2019_toyset_val'] # For convenience, specify a path to the toy data set # because config*.py will be copied into model-*/config_AL_train_toyset/NN # we need to use ../../../ tmp = os.path.dirname(__file__) + '/../../../DATA/toy_example' # File list for training and development sets # (text file, one file name per line, without name extension) # we need to provide one lst for each subset # trn_list[n] will correspond to trn_set_name[n] # for training set trn_list = [tmp + '/scp/train.lst'] # for development set val_list = [tmp + '/scp/val.lst'] # Directories for input data # We need to provide the path to the directory that saves the input data. # We assume waveforms for training and development of one subset # are stored in the same directory. # Hence, input_dirs[n] is for trn_set_name[n] and val_set_name[n] # # If you need to specify a separate val_input_dirs # val_input_dirs = [[PATH_TO_DEVELOPMENT_SET]] # # Each input_dirs[n] is a list, # for example, input_dirs[n] = [wav, speaker_label, augmented_wav, ...] # # Here, input for each file is a single waveform input_dirs = [[tmp + '/train_dev']] # Dimensions of input features # What is the dimension of the input feature # len(input_dims) should be equal to len(input_dirs[n]) # # Here, input for each file is a single waveform, dimension is 1 input_dims = [1] # File name extension for input features # input_exts = [name_extention_of_feature_1, ...] # len(input_exts) should be equal to len(input_dirs[n]) # # Here, input file extension is .wav # We use .wav not .flac input_exts = ['.wav'] # Temporal resolution for input features # This is not relevant for CM but for other projects # len(input_reso) should be equal to len(input_dirs[n]) # Here, it is 1 for waveform input_reso = [1] # Whether input features should be z-normalized # This is not relevant for CM but for other projects # len(input_norm) should be equal to len(input_dirs[n]) # Here, it is False for waveform # We don't normalize the waveform input_norm = [False] # Similar configurations for output features # Here, we set output to empty because we will load # the target labels from protocol rather than output feature # '.bin' is also a place holder output_dirs = [[] for x in input_dirs] output_dims = [1] output_exts = ['.bin'] output_reso = [1] output_norm = [False] # === # For active learning pool data # === # Similar configurations as above # # This is for demonstration, we still use the toy set as pool set. # And we will merge the trainin and development sets as the pool set # # Name of the pool subsets al_pool_set_name = ['pool_toyset_trn', 'pool_toyset_val'] # list of files for each pool subsets al_pool_list = [tmp + '/scp/train.lst', tmp + '/scp/val.lst'] # list of input data directories al_pool_in_dirs = [[tmp + '/train_dev'], [tmp + '/train_dev']] al_pool_out_dirs = [[] for x in al_pool_in_dirs] # === # Waveform configuration # === # Waveform sampling rate # wav_samp_rate can be None if no waveform data is used wav_samp_rate = 16000 # Truncating input sequences so that the maximum length = truncate_seq # When truncate_seq is larger, more GPU mem required # If you don't want truncating, please truncate_seq = None truncate_seq = 64000 # Minimum sequence length # If sequence length < minimum_len, this sequence is not used for training # minimum_len can be None minimum_len = 8000 # Optional argument # This used to load protocol(s) # Multiple protocol files can be specified in the list # # Note that these protocols should cover all the # training, development, and pool set data. # Otherwise, the code will raise an error # # Here, this protocol will cover all the data in the toy set optional_argument = [tmp + '/protocol.txt'] # === # pre-trained SSL model # === # We will load this pre-trained SSL model as the front-end # # path to the SSL model (it is downloaded by 01_download.sh) ssl_front_end_path = os.path.dirname(__file__) \ + '/../../../SSL_pretrained/xlsr_53_56k.pt' # dimension of the SSL model output # this must be provided. ssl_front_end_out_dim = 1024 ######################################################### ## Configuration for inference stage ######################################################### # This part is not used in this project # They are place holders test_set_name = trn_set_name + val_set_name # List of test set data # for convenience, you may directly load test_set list here test_list = trn_list + val_list # Directories for input features # input_dirs = [path_of_feature_1, path_of_feature_2, ..., ] # we assume train and validation data are put in the same sub-directory test_input_dirs = input_dirs * 2 # Directories for output features, which are [] test_output_dirs = [[]] * 2
5,691
30.274725
75
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/08-asvspoof-activelearn/config_auto.py
#!/usr/bin/env python """ config.py This configuration file will read environment variables for configuration. it is used for scoring It assumes that input data will be waveform files (*.wav) No need to change settings here """ import os __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2022, Xin Wang" ######################################################### ## Configuration for training stage ######################################################### trn_set_name = [''] val_set_name = [''] trn_list = [''] val_list = [''] input_dirs = [['']] input_dims = [1] input_exts = ['.wav'] input_reso = [1] input_norm = [False] output_dirs = [[]] output_dims = [1] output_exts = ['.bin'] output_reso = [1] output_norm = [False] # Waveform sampling rate # wav_samp_rate can be None if no waveform data is used # ASVspoof uses 16000 Hz wav_samp_rate = 16000 # Truncating input sequences so that the maximum length = truncate_seq # When truncate_seq is larger, more GPU mem required # If you don't want truncating, please truncate_seq = None # For ASVspoof, we don't do truncate here truncate_seq = None # Minimum sequence length # If sequence length < minimum_len, this sequence is not used for training # minimum_len can be None # For ASVspoof, we don't set minimum length of input trial minimum_len = None # Optional argument # We will use this optional_argument to read protocol file # When evaluating on a eval set without protocol file, set this to [''] optional_argument = [''] # === # pre-trained SSL model # === # We will load this pre-trained SSL model as the front-end # We need this because the model definition is written in # this file. # Its weight will be overwritten by the trained CM. # # path to the SSL model. It will be loaded as front-end ssl_front_end_path = os.path.dirname(__file__) \ + '/../../../SSL_pretrained/xlsr_53_56k.pt' # dimension of the SSL model output ssl_front_end_out_dim = 1024 ######################################################### ## Configuration for inference stage ######################################################### # We set environment variables # No need to change test_set_name = [os.getenv('TEMP_DATA_NAME')] # List of test set data # for convenience, you may directly load test_set list here test_list = [test_set_name[0] + '.lst'] # Directories for input features # input_dirs = [[path_of_feature_1, path_of_feature_2, ..., ]] # directory of the evaluation set waveform test_input_dirs = [[os.getenv('TEMP_DATA_DIR')]] # Directories for output features, which are [[]] test_output_dirs = [[]]
2,644
26.268041
75
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/08-asvspoof-activelearn/model-AL-Adv/model.py
#!/usr/bin/env python """ model.py for Active learning model This model.py consists of two parts: 1. A CM with SSL-based front-end and linear back-end. The same model as 07-asvspoof-ssl/model-W2V-XLSR-ft-GF/model.py, but the code is revised and simplified. 2. A function al_retrieve_data_knowing_train to select data for training The above function is called in core_scripts/nn_manager/nn_manager_AL.py. Please check the training algorithm there. """ from __future__ import absolute_import from __future__ import print_function import os import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import sandbox.eval_asvspoof as nii_asvspoof import sandbox.util_bayesian as nii_bayesian import sandbox.util_loss_metric as nii_loss_util __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2022, Xin Wang" ############################ ## FOR pre-trained MODEL ############################ import fairseq as fq class SSLModel(torch_nn.Module): def __init__(self, mpath, ssl_orig_output_dim): """ SSLModel(cp_path, ssl_orig_output_dim) Args ---- mpath: string, path to the pre-trained SSL model ssl_orig_output_dim: int, dimension of the SSL model output feature """ super(SSLModel, self).__init__() md, _, _ = fq.checkpoint_utils.load_model_ensemble_and_task([mpath]) self.model = md[0] # this should be loaded from md self.out_dim = ssl_orig_output_dim return def extract_feat(self, input_data): """ output = extract_feat(input_data) input: ------ input_data,tensor, (batch, length, 1) or (batch, length) datalength: list of int, length of wav in the mini-batch output: ------- output: tensor, (batch, frame_num, frame_feat_dim) """ # put the model to GPU if it not there if next(self.model.parameters()).device != input_data.device \ or next(self.model.parameters()).dtype != input_data.dtype: self.model.to(input_data.device, dtype=input_data.dtype) # input should be in shape (batch, length) if input_data.ndim == 3: input_tmp = input_data[:, :, 0] else: input_tmp = input_data # emb has shape [batch, length, dim] emb = self.model(input_tmp, mask=False, features_only=True)['x'] return emb ############## ## FOR MODEL ############## class FrontEnd(torch_nn.Module): """ Front end wrapper """ def __init__(self, output_dim, mpath, ssl_out_dim, fix_ssl=False): super(FrontEnd, self).__init__() # dimension of output feature self.out_dim = output_dim # whether fix SSL or not self.flag_fix_ssl = fix_ssl # ssl part self.ssl_model = SSLModel(mpath, ssl_out_dim) # post transformation part self.m_front_end_process = torch_nn.Linear( self.ssl_model.out_dim, self.out_dim) return def set_flag_fix_ssl(self, fix_ssl): self.flag_fix_ssl = fix_ssl return def forward(self, wav): """ output = front_end(wav) input: ------ wav: tensor, (batch, length, 1) output: ------- output: tensor, (batch, frame_num, frame_feat_dim) """ if self.flag_fix_ssl: self.ssl_model.eval() with torch.no_grad(): x_ssl_feat = self.ssl_model.extract_feat(wav) else: x_ssl_feat = self.ssl_model.extract_feat(wav) output = self.m_front_end_process(x_ssl_feat) return output class BackEnd(torch_nn.Module): """Back End Wrapper """ def __init__(self, input_dim, out_dim, num_classes, dropout_rate, dropout_flag=True, dropout_trials=[1]): super(BackEnd, self).__init__() # input feature dimension self.in_dim = input_dim # output embedding dimension self.out_dim = out_dim # number of output classes self.num_class = num_classes # dropout rate self.m_mcdp_rate = dropout_rate self.m_mcdp_flag = dropout_flag self.m_mcdp_num = dropout_trials # linear linear to produce output logits self.m_utt_level = torch_nn.Linear(self.out_dim, self.num_class) return def forward(self, feat): """ logits, emb_vec = back_end_emb(feat) input: ------ feat: tensor, (batch, frame_num, feat_feat_dim) output: ------- logits: tensor, (batch, num_output_class) emb_vec: tensor, (batch, emb_dim) """ # through the frame-level network # (batch, frame_num, self.out_dim) # average pooling -> (batch, self.out_dim) feat_utt = feat.mean(1) # output linear logits = self.m_utt_level(feat_utt) return logits, feat_utt def inference(self, feat): """scores, emb_vec, energy = inference(feat) This is used for inference, output includes the logits and confidence scores. input: ------ feat: tensor, (batch, frame_num, feat_feat_dim) output: ------- scores: tensor, (batch, 1) emb_vec: tensor, (batch, emb_dim) energy: tensor, (batch, 1) """ # logits logits, feat_utt = self.forward(feat) # logits -> score scores = logits[:, 1] - logits[:, 0] # compute confidence using negative energy energy = nii_loss_util.neg_energy(logits) return scores, feat_utt, energy class MainLossModule(torch_nn.Module): """ Loss wrapper """ def __init__(self): super(MainLossModule, self).__init__() self.m_loss = torch_nn.CrossEntropyLoss() return def forward(self, logits, target): return self.m_loss(logits, target) class FeatLossModule(torch_nn.Module): """ Loss wrapper over features Not used here """ def __init__(self): super(FeatLossModule, self).__init__() return def forward(self, data, target): """ """ return 0 class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std( in_dim,out_dim, args, prj_conf, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ############################################ #### # auxililary #### # flag of current training stage # this variable will be overwritten self.temp_flag = args.temp_flag #### # Load protocol and prepare the target data for network training #### protocol_f = prj_conf.optional_argument self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f) #### # Bayesian parameter #### self.m_mcdp_rate = None self.m_mcdp_flag = True # if [1], we will only do one inference self.m_mcdropout_num = [1] #### # Model definition #### # front-end # dimension of compressed front-end feature self.v_feat_dim = 128 self.m_front_end = FrontEnd(self.v_feat_dim, prj_conf.ssl_front_end_path, prj_conf.ssl_front_end_out_dim) # back-end # dimension of utterance-level embedding vectors self.v_emd_dim = self.v_feat_dim # number of output classes self.v_out_class = 2 self.m_back_end = BackEnd(self.v_feat_dim, self.v_emd_dim, self.v_out_class, self.m_mcdp_rate, self.m_mcdp_flag, self.m_mcdropout_num) ##### # Loss function ##### self.m_ce_loss = MainLossModule() self.m_cr_loss = FeatLossModule() # weight for the feature loss self.m_feat = 0.0 # done return def prepare_mean_std(self, in_dim, out_dim, args, prj_conf, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but irrelevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but irrelevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but irrelevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but irrelevant to this code """ return y * self.output_std + self.output_mean def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def _get_target_vec(self, num_sys, num_aug, bs, device, dtype): target = [1] * num_aug + [0 for x in range((num_sys-1) * num_aug)] target = np.tile(target, bs) target = torch.tensor(target, device=device, dtype=dtype) return target def __inference(self, x, fileinfo): """ """ filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] # too short sentences, skip it if not self.training and x.shape[1] < 3000: targets = self._get_target(filenames) for filename, target in zip(filenames, targets): print("Output, {:s}, {:d}, {:f}, {:f}, {:f}".format( filename, target, 0.0, 0.0, 0.0)) return None # front-end feat_vec = self.m_front_end(x) # back-end scores, _, energy = self.m_back_end.inference(feat_vec) # print output targets = self._get_target(filenames) for filename, target, score, eps in \ zip(filenames, targets, scores, energy): print("Output, {:s}, {:d}, {:f}, {:f}".format( filename, target, score.item(), eps.item())) # don't write output score as a single file return None def __forward_single_view(self, x, fileinfo): """ """ filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] # front-end & back-end feat_vec = self.m_front_end(x) logits, emb_vec = self.m_back_end(feat_vec) target = self._get_target(filenames) target_ = torch.tensor(target, device=x.device, dtype=torch.long) # loss loss = self.m_ce_loss(logits, target_) return loss def __forward_multi_view(self, x, fileinfo): """ """ filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] # input will be (batchsize, length, 1+num_spoofed, num_aug) bat_siz = x.shape[0] pad_len = x.shape[1] num_sys = x.shape[2] num_aug = x.shape[3] # to (batchsize * (1+num_spoofed) * num_aug, length) x_new = x.permute(0, 2, 3, 1).contiguous().view(-1, pad_len) datalen_tmp = np.repeat(datalength, num_sys * num_aug) # target vector # this is for CE loss # [1, 0, 0, ..., 1, 0, 0 ...] target = self._get_target_vec(num_sys, num_aug, bat_siz, x.device, torch.long) # this is for contrasitive loss (ignore the augmentation) target_feat = self._get_target_vec(num_sys, 1, bat_siz, x.device, torch.long) # front-end & back-end feat_vec = self.m_front_end(x_new) logits, emb_vec = self.m_back_end(feat_vec) # CE loss loss_ce = self.m_ce_loss(logits, target) if self.m_feat: # feat loss loss_cr_1 = 0 loss_cr_2 = 0 # reshape to multi-view format # (batch, (1+num_spoof), nview, dimension...) feat_vec_ = feat_vec.view(bat_siz, num_sys, num_aug, -1, feat_vec.shape[-1]) emb_vec_ = emb_vec.view(bat_siz, num_sys, num_aug, -1) for bat_idx in range(bat_siz): loss_cr_1 += self.m_feat / bat_siz * self.m_cr_loss( feat_vec_[bat_idx], target_feat[bat_idx * num_sys :(bat_idx+1) * num_sys]) loss_cr_2 += self.m_feat / bat_siz * self.m_cr_loss( emb_vec_[bat_idx], target_feat[bat_idx * num_sys :(bat_idx+1) * num_sys]) return [[loss_ce, loss_cr_1, loss_cr_2], [True, True, True]] else: return loss_ce def forward(self, x, fileinfo): """ """ if self.training and x.shape[2] > 1: # if training with multi-view data return self.__forward_multi_view(x, fileinfo) elif self.training: return self.__forward_single_view(x, fileinfo) else: return self.__inference(x, fileinfo) def get_embedding(self, x, fileinfo): filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] feature_vec = self._compute_embedding(x, datalength) return feature_vec def al_retrieve_data_knowing_train(self, train_data_loader, pool_data_loader, num_sample): """idx = al_retrieve_data_knowing_train( train_data_loader, pool_data_loader, num_sample) Data retrival function for active learning Args: ----- train_data_loader: Pytorch DataLoader, for train data pool_data_loader: Pytorch DataLoader, for pool data num_sample: int, number of samples selected Return ------ idx: list of index """ def _adv_attack(data, data_grad, epsilon=0.3): return data+ data_grad * epsilon def _feat_dis(feat1, feat2): # feat1 (batch, feat) # feat2 (batch, feat) edis = torch.cdist(feat1.unsqueeze(0), feat2.unsqueeze(0))[0] return torch.min(edis, dim=0)[0] # note that data_loader.dataset.__len__() returns the number of # individual samples, not the number of mini-batches idx_list = np.zeros([pool_data_loader.dataset.__len__()]) conf_list = np.zeros([pool_data_loader.dataset.__len__()]) # counter = 0 # get gradients for data_idx, (x, y, data_info, idx_orig) in \ enumerate(train_data_loader): filenames = [nii_seq_tk.parse_filename(y) for y in data_info] datalength = [nii_seq_tk.parse_length(y) for y in data_info] if isinstance(x, torch.Tensor): x = x.to(self.input_std.device, dtype=self.input_std.dtype) else: nii_display.f_die("data input is not a tensor") # To collect gradient x.requires_grad = True # Forward pass (copied from forward()) # We cannot directly use forward() because that function requires # self.training, and mini-batch will be made balanced feat_vec = self.m_front_end(x) logits, _ = self.m_back_end(feat_vec) target = self._get_target(filenames) target_ = torch.tensor(target, device=x.device, dtype=torch.long) loss = self.m_ce_loss(logits, target_) # Backward pass self.zero_grad() loss.backward() # get gradient data_grad = x.grad.data break # create adversarial example perturbed_data = _adv_attack(x, data_grad) # loop over the pool and find the nearest pool data with torch.no_grad(): # feature vec for adversarial example ad_feature_vec = self.m_front_end(perturbed_data) _, ad_feature_vec = self.m_back_end(ad_feature_vec) for data_idx, (x, y, data_info, idx_orig) in \ enumerate(pool_data_loader): filenames = [nii_seq_tk.parse_filename(y) for y in data_info] datalength = [nii_seq_tk.parse_length(y) for y in data_info] if isinstance(x, torch.Tensor): x = x.to(self.input_std.device, dtype=self.input_std.dtype) else: nii_display.f_die("data input is not a tensor") or_feature_vec = self.m_front_end(x) _, or_feature_vec = self.m_back_end(or_feature_vec) scores = _feat_dis(ad_feature_vec, or_feature_vec) # add the distance score) and data index to the buffer conf_list[counter:counter+x.shape[0]] = np.array( [x.item() for x in scores]) idx_list[counter:counter+x.shape[0]] = np.array( idx_orig) counter += x.shape[0] # select the best sorted_idx = np.argsort(conf_list) return_idx = [idx_list[x] for x in sorted_idx[:num_sample]] return return_idx class Loss(): """ Wrapper for scripts, ignore it """ def __init__(self, args): """ """ def compute(self, outputs, target): """ """ return outputs if __name__ == "__main__": print("Definition of model")
20,901
32.125198
79
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/08-asvspoof-activelearn/model-AL-Rem/model.py
#!/usr/bin/env python """ model.py for Active learning model This model.py consists of two parts: 1. A CM with SSL-based front-end and linear back-end. The same model as 07-asvspoof-ssl/model-W2V-XLSR-ft-GF/model.py, but the code is revised and simplified. 2. A function al_exclude_data to select data to be excluded from the pool then A function al_retrieve_data to select data from the pool (for training) Both functions are called in core_scripts/nn_manager/nn_manager_AL.py. Please check the training algorithm there. """ from __future__ import absolute_import from __future__ import print_function import os import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import sandbox.eval_asvspoof as nii_asvspoof import sandbox.util_bayesian as nii_bayesian import sandbox.util_loss_metric as nii_loss_util __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2022, Xin Wang" ############################ ## FOR pre-trained MODEL ############################ import fairseq as fq class SSLModel(torch_nn.Module): def __init__(self, mpath, ssl_orig_output_dim): """ SSLModel(cp_path, ssl_orig_output_dim) Args ---- mpath: string, path to the pre-trained SSL model ssl_orig_output_dim: int, dimension of the SSL model output feature """ super(SSLModel, self).__init__() md, _, _ = fq.checkpoint_utils.load_model_ensemble_and_task([mpath]) self.model = md[0] # this should be loaded from md self.out_dim = ssl_orig_output_dim return def extract_feat(self, input_data): """ output = extract_feat(input_data) input: ------ input_data,tensor, (batch, length, 1) or (batch, length) datalength: list of int, length of wav in the mini-batch output: ------- output: tensor, (batch, frame_num, frame_feat_dim) """ # put the model to GPU if it not there if next(self.model.parameters()).device != input_data.device \ or next(self.model.parameters()).dtype != input_data.dtype: self.model.to(input_data.device, dtype=input_data.dtype) # input should be in shape (batch, length) if input_data.ndim == 3: input_tmp = input_data[:, :, 0] else: input_tmp = input_data # emb has shape [batch, length, dim] emb = self.model(input_tmp, mask=False, features_only=True)['x'] return emb ############## ## FOR MODEL ############## class FrontEnd(torch_nn.Module): """ Front end wrapper """ def __init__(self, output_dim, mpath, ssl_out_dim, fix_ssl=False): super(FrontEnd, self).__init__() # dimension of output feature self.out_dim = output_dim # whether fix SSL or not self.flag_fix_ssl = fix_ssl # ssl part self.ssl_model = SSLModel(mpath, ssl_out_dim) # post transformation part self.m_front_end_process = torch_nn.Linear( self.ssl_model.out_dim, self.out_dim) return def set_flag_fix_ssl(self, fix_ssl): self.flag_fix_ssl = fix_ssl return def forward(self, wav): """ output = front_end(wav) input: ------ wav: tensor, (batch, length, 1) output: ------- output: tensor, (batch, frame_num, frame_feat_dim) """ if self.flag_fix_ssl: self.ssl_model.eval() with torch.no_grad(): x_ssl_feat = self.ssl_model.extract_feat(wav) else: x_ssl_feat = self.ssl_model.extract_feat(wav) output = self.m_front_end_process(x_ssl_feat) return output class BackEnd(torch_nn.Module): """Back End Wrapper """ def __init__(self, input_dim, out_dim, num_classes, dropout_rate, dropout_flag=True, dropout_trials=[1]): super(BackEnd, self).__init__() # input feature dimension self.in_dim = input_dim # output embedding dimension self.out_dim = out_dim # number of output classes self.num_class = num_classes # dropout rate self.m_mcdp_rate = dropout_rate self.m_mcdp_flag = dropout_flag self.m_mcdp_num = dropout_trials # linear linear to produce output logits self.m_utt_level = torch_nn.Linear(self.out_dim, self.num_class) return def forward(self, feat): """ logits, emb_vec = back_end_emb(feat) input: ------ feat: tensor, (batch, frame_num, feat_feat_dim) output: ------- logits: tensor, (batch, num_output_class) emb_vec: tensor, (batch, emb_dim) """ # through the frame-level network # (batch, frame_num, self.out_dim) # average pooling -> (batch, self.out_dim) feat_utt = feat.mean(1) # output linear logits = self.m_utt_level(feat_utt) return logits, feat_utt def inference(self, feat): """scores, emb_vec, energy = inference(feat) This is used for inference, output includes the logits and confidence scores. input: ------ feat: tensor, (batch, frame_num, feat_feat_dim) output: ------- scores: tensor, (batch, 1) emb_vec: tensor, (batch, emb_dim) energy: tensor, (batch, 1) """ # logits logits, feat_utt = self.forward(feat) # logits -> score scores = logits[:, 1] - logits[:, 0] # compute confidence using negative energy energy = nii_loss_util.neg_energy(logits) return scores, feat_utt, energy class MainLossModule(torch_nn.Module): """ Loss wrapper """ def __init__(self): super(MainLossModule, self).__init__() self.m_loss = torch_nn.CrossEntropyLoss() return def forward(self, logits, target): return self.m_loss(logits, target) class FeatLossModule(torch_nn.Module): """ Loss wrapper over features Not used here """ def __init__(self): super(FeatLossModule, self).__init__() return def forward(self, data, target): """ """ return 0 class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std( in_dim,out_dim, args, prj_conf, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ############################################ #### # auxililary #### # flag of current training stage # this variable will be overwritten self.temp_flag = args.temp_flag #### # Load protocol and prepare the target data for network training #### protocol_f = prj_conf.optional_argument self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f) #### # Bayesian parameter #### self.m_mcdp_rate = None self.m_mcdp_flag = True # if [1], we will only do one inference self.m_mcdropout_num = [1] #### # Model definition #### # front-end # dimension of compressed front-end feature self.v_feat_dim = 128 self.m_front_end = FrontEnd(self.v_feat_dim, prj_conf.ssl_front_end_path, prj_conf.ssl_front_end_out_dim) # back-end # dimension of utterance-level embedding vectors self.v_emd_dim = self.v_feat_dim # number of output classes self.v_out_class = 2 self.m_back_end = BackEnd(self.v_feat_dim, self.v_emd_dim, self.v_out_class, self.m_mcdp_rate, self.m_mcdp_flag, self.m_mcdropout_num) ##### # Loss function ##### self.m_ce_loss = MainLossModule() self.m_cr_loss = FeatLossModule() # weight for the feature loss self.m_feat = 0.0 # done return def prepare_mean_std(self, in_dim, out_dim, args, prj_conf, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but irrelevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but irrelevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but irrelevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but irrelevant to this code """ return y * self.output_std + self.output_mean def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def _get_target_vec(self, num_sys, num_aug, bs, device, dtype): target = [1] * num_aug + [0 for x in range((num_sys-1) * num_aug)] target = np.tile(target, bs) target = torch.tensor(target, device=device, dtype=dtype) return target def __inference(self, x, fileinfo): """ """ filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] # too short sentences, skip it if not self.training and x.shape[1] < 3000: targets = self._get_target(filenames) for filename, target in zip(filenames, targets): print("Output, {:s}, {:d}, {:f}, {:f}, {:f}".format( filename, target, 0.0, 0.0, 0.0)) return None # front-end feat_vec = self.m_front_end(x) # back-end scores, _, energy = self.m_back_end.inference(feat_vec) # print output targets = self._get_target(filenames) for filename, target, score, eps in \ zip(filenames, targets, scores, energy): print("Output, {:s}, {:d}, {:f}, {:f}".format( filename, target, score.item(), eps.item())) # don't write output score as a single file return None def __forward_single_view(self, x, fileinfo): """ """ filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] # front-end & back-end feat_vec = self.m_front_end(x) logits, emb_vec = self.m_back_end(feat_vec) target = self._get_target(filenames) target_ = torch.tensor(target, device=x.device, dtype=torch.long) # loss loss = self.m_ce_loss(logits, target_) return loss def __forward_multi_view(self, x, fileinfo): """ """ filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] # input will be (batchsize, length, 1+num_spoofed, num_aug) bat_siz = x.shape[0] pad_len = x.shape[1] num_sys = x.shape[2] num_aug = x.shape[3] # to (batchsize * (1+num_spoofed) * num_aug, length) x_new = x.permute(0, 2, 3, 1).contiguous().view(-1, pad_len) datalen_tmp = np.repeat(datalength, num_sys * num_aug) # target vector # this is for CE loss # [1, 0, 0, ..., 1, 0, 0 ...] target = self._get_target_vec(num_sys, num_aug, bat_siz, x.device, torch.long) # this is for contrasitive loss (ignore the augmentation) target_feat = self._get_target_vec(num_sys, 1, bat_siz, x.device, torch.long) # front-end & back-end feat_vec = self.m_front_end(x_new) logits, emb_vec = self.m_back_end(feat_vec) # CE loss loss_ce = self.m_ce_loss(logits, target) if self.m_feat: # feat loss loss_cr_1 = 0 loss_cr_2 = 0 # reshape to multi-view format # (batch, (1+num_spoof), nview, dimension...) feat_vec_ = feat_vec.view(bat_siz, num_sys, num_aug, -1, feat_vec.shape[-1]) emb_vec_ = emb_vec.view(bat_siz, num_sys, num_aug, -1) for bat_idx in range(bat_siz): loss_cr_1 += self.m_feat / bat_siz * self.m_cr_loss( feat_vec_[bat_idx], target_feat[bat_idx * num_sys :(bat_idx+1) * num_sys]) loss_cr_2 += self.m_feat / bat_siz * self.m_cr_loss( emb_vec_[bat_idx], target_feat[bat_idx * num_sys :(bat_idx+1) * num_sys]) return [[loss_ce, loss_cr_1, loss_cr_2], [True, True, True]] else: return loss_ce def forward(self, x, fileinfo): """ """ if self.training and x.shape[2] > 1: # if training with multi-view data return self.__forward_multi_view(x, fileinfo) elif self.training: return self.__forward_single_view(x, fileinfo) else: return self.__inference(x, fileinfo) def get_embedding(self, x, fileinfo): filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] feature_vec = self._compute_embedding(x, datalength) return feature_vec def al_retrieve_data(self, data_loader, num_sample): """idx = al_retrieve_data(data_loader, num_sample) Data retrival function for active learning Args: ----- data_loader: Pytorch DataLoader for the pool data set num_sample: int, number of samples to be selected Return ------ idx: list of index """ # randomly select data index sorted_idx = np.arange(data_loader.dataset.__len__()) np.random.shuffle(sorted_idx) return_idx = sorted_idx[0:num_sample] # return the data index, # the corresponding samples will be added to training set return return_idx def al_exclude_data(self, data_loader, num_sample): """idx = al_exclude_data(data_loader, num_sample) Function to select useless data from the pool and remove them Args: ----- data_loader: Pytorch DataLoader for the pool data set num_sample: int, number of samples to be selected Return ------ idx: list of index """ # buffer # note that data_loader.dataset.__len__() returns the number of # individual samples, not the number of mini-batches idx_list = np.zeros([data_loader.dataset.__len__()]) conf_list = np.zeros([data_loader.dataset.__len__()]) # counter = 0 # loop over the pool set with torch.no_grad(): for data_idx, (x, y, data_info, idx_orig) in \ enumerate(data_loader): filenames = [nii_seq_tk.parse_filename(y) for y in data_info] datalength = [nii_seq_tk.parse_length(y) for y in data_info] if isinstance(x, torch.Tensor): x = x.to(self.input_std.device, dtype=self.input_std.dtype) else: nii_display.f_die("data input is not a tensor") # front-end feat_vec = self.m_front_end(x) # back-end scores, _, energy = self.m_back_end.inference(feat_vec) # add the energy (confidence score) and data index to the buffer conf_list[counter:counter+x.shape[0]] = np.array( [x.item() for x in energy]) idx_list[counter:counter+x.shape[0]] = np.array( idx_orig) counter += x.shape[0] # select data with low enerngy (i.e., high confidence, the model # already seen this kind of data, thus the data is useless) sorted_idx = np.argsort(conf_list) # retrieve the data index return_idx = [idx_list[x] for x in sorted_idx[:num_sample]] # return the data index, # the corresponding samples will be added to training set return return_idx class Loss(): """ Wrapper for scripts, ignore it """ def __init__(self, args): """ """ def compute(self, outputs, target): """ """ return outputs if __name__ == "__main__": print("Definition of model")
19,868
31.518822
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/08-asvspoof-activelearn/model-AL-PosE/model.py
#!/usr/bin/env python """ model.py for Active learning model This model.py consists of two parts: 1. A CM with SSL-based front-end and linear back-end. The same model as 07-asvspoof-ssl/model-W2V-XLSR-ft-GF/model.py, but the code is revised and simplified. 2. A function al_retrieve_data to scoring the pool set data. al_retrieve_data scores the pool set data and returns a list of data index. The returned data index will be used to retrieve the data from pool. al_retrieve_data is called in core_scripts/nn_manager/nn_manager_AL.py. Please check the training algorithm there. """ from __future__ import absolute_import from __future__ import print_function import os import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import sandbox.eval_asvspoof as nii_asvspoof import sandbox.util_bayesian as nii_bayesian import sandbox.util_loss_metric as nii_loss_util __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2022, Xin Wang" ############################ ## FOR pre-trained MODEL ############################ import fairseq as fq class SSLModel(torch_nn.Module): def __init__(self, mpath, ssl_orig_output_dim): """ SSLModel(cp_path, ssl_orig_output_dim) Args ---- mpath: string, path to the pre-trained SSL model ssl_orig_output_dim: int, dimension of the SSL model output feature """ super(SSLModel, self).__init__() md, _, _ = fq.checkpoint_utils.load_model_ensemble_and_task([mpath]) self.model = md[0] # this should be loaded from md self.out_dim = ssl_orig_output_dim return def extract_feat(self, input_data): """ output = extract_feat(input_data) input: ------ input_data,tensor, (batch, length, 1) or (batch, length) datalength: list of int, length of wav in the mini-batch output: ------- output: tensor, (batch, frame_num, frame_feat_dim) """ # put the model to GPU if it not there if next(self.model.parameters()).device != input_data.device \ or next(self.model.parameters()).dtype != input_data.dtype: self.model.to(input_data.device, dtype=input_data.dtype) # input should be in shape (batch, length) if input_data.ndim == 3: input_tmp = input_data[:, :, 0] else: input_tmp = input_data # emb has shape [batch, length, dim] emb = self.model(input_tmp, mask=False, features_only=True)['x'] return emb ############## ## FOR MODEL ############## class FrontEnd(torch_nn.Module): """ Front end wrapper """ def __init__(self, output_dim, mpath, ssl_out_dim, fix_ssl=False): super(FrontEnd, self).__init__() # dimension of output feature self.out_dim = output_dim # whether fix SSL or not self.flag_fix_ssl = fix_ssl # ssl part self.ssl_model = SSLModel(mpath, ssl_out_dim) # post transformation part self.m_front_end_process = torch_nn.Linear( self.ssl_model.out_dim, self.out_dim) return def set_flag_fix_ssl(self, fix_ssl): self.flag_fix_ssl = fix_ssl return def forward(self, wav): """ output = front_end(wav) input: ------ wav: tensor, (batch, length, 1) output: ------- output: tensor, (batch, frame_num, frame_feat_dim) """ if self.flag_fix_ssl: self.ssl_model.eval() with torch.no_grad(): x_ssl_feat = self.ssl_model.extract_feat(wav) else: x_ssl_feat = self.ssl_model.extract_feat(wav) output = self.m_front_end_process(x_ssl_feat) return output class BackEnd(torch_nn.Module): """Back End Wrapper """ def __init__(self, input_dim, out_dim, num_classes, dropout_rate, dropout_flag=True, dropout_trials=[1]): super(BackEnd, self).__init__() # input feature dimension self.in_dim = input_dim # output embedding dimension self.out_dim = out_dim # number of output classes self.num_class = num_classes # dropout rate self.m_mcdp_rate = dropout_rate self.m_mcdp_flag = dropout_flag self.m_mcdp_num = dropout_trials # linear linear to produce output logits self.m_utt_level = torch_nn.Linear(self.out_dim, self.num_class) return def forward(self, feat): """ logits, emb_vec = back_end_emb(feat) input: ------ feat: tensor, (batch, frame_num, feat_feat_dim) output: ------- logits: tensor, (batch, num_output_class) emb_vec: tensor, (batch, emb_dim) """ # through the frame-level network # (batch, frame_num, self.out_dim) # average pooling -> (batch, self.out_dim) feat_utt = feat.mean(1) # output linear logits = self.m_utt_level(feat_utt) return logits, feat_utt def inference(self, feat): """scores, emb_vec, energy = inference(feat) This is used for inference, output includes the logits and confidence scores. input: ------ feat: tensor, (batch, frame_num, feat_feat_dim) output: ------- scores: tensor, (batch, 1) emb_vec: tensor, (batch, emb_dim) energy: tensor, (batch, 1) """ # logits logits, feat_utt = self.forward(feat) # logits -> score scores = logits[:, 1] - logits[:, 0] # compute confidence using negative energy energy = nii_loss_util.neg_energy(logits) return scores, feat_utt, energy class MainLossModule(torch_nn.Module): """ Loss wrapper """ def __init__(self): super(MainLossModule, self).__init__() self.m_loss = torch_nn.CrossEntropyLoss() return def forward(self, logits, target): return self.m_loss(logits, target) class FeatLossModule(torch_nn.Module): """ Loss wrapper over features Not used here """ def __init__(self): super(FeatLossModule, self).__init__() return def forward(self, data, target): """ """ return 0 class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std( in_dim,out_dim, args, prj_conf, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ############################################ #### # auxililary #### # flag of current training stage # this variable will be overwritten self.temp_flag = args.temp_flag #### # Load protocol and prepare the target data for network training #### protocol_f = prj_conf.optional_argument self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f) #### # Bayesian parameter #### self.m_mcdp_rate = None self.m_mcdp_flag = True # if [1], we will only do one inference self.m_mcdropout_num = [1] #### # Model definition #### # front-end # dimension of compressed front-end feature self.v_feat_dim = 128 self.m_front_end = FrontEnd(self.v_feat_dim, prj_conf.ssl_front_end_path, prj_conf.ssl_front_end_out_dim) # back-end # dimension of utterance-level embedding vectors self.v_emd_dim = self.v_feat_dim # number of output classes self.v_out_class = 2 self.m_back_end = BackEnd(self.v_feat_dim, self.v_emd_dim, self.v_out_class, self.m_mcdp_rate, self.m_mcdp_flag, self.m_mcdropout_num) ##### # Loss function ##### self.m_ce_loss = MainLossModule() self.m_cr_loss = FeatLossModule() # weight for the feature loss self.m_feat = 0.0 # done return def prepare_mean_std(self, in_dim, out_dim, args, prj_conf, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but irrelevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but irrelevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but irrelevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but irrelevant to this code """ return y * self.output_std + self.output_mean def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def _get_target_vec(self, num_sys, num_aug, bs, device, dtype): target = [1] * num_aug + [0 for x in range((num_sys-1) * num_aug)] target = np.tile(target, bs) target = torch.tensor(target, device=device, dtype=dtype) return target def __inference(self, x, fileinfo): """ """ filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] # too short sentences, skip it if not self.training and x.shape[1] < 3000: targets = self._get_target(filenames) for filename, target in zip(filenames, targets): print("Output, {:s}, {:d}, {:f}, {:f}, {:f}".format( filename, target, 0.0, 0.0, 0.0)) return None # front-end feat_vec = self.m_front_end(x) # back-end scores, _, energy = self.m_back_end.inference(feat_vec) # print output targets = self._get_target(filenames) for filename, target, score, eps in \ zip(filenames, targets, scores, energy): print("Output, {:s}, {:d}, {:f}, {:f}".format( filename, target, score.item(), eps.item())) # don't write output score as a single file return None def __forward_single_view(self, x, fileinfo): """ """ filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] # front-end & back-end feat_vec = self.m_front_end(x) logits, emb_vec = self.m_back_end(feat_vec) target = self._get_target(filenames) target_ = torch.tensor(target, device=x.device, dtype=torch.long) # loss loss = self.m_ce_loss(logits, target_) return loss def __forward_multi_view(self, x, fileinfo): """ """ filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] # input will be (batchsize, length, 1+num_spoofed, num_aug) bat_siz = x.shape[0] pad_len = x.shape[1] num_sys = x.shape[2] num_aug = x.shape[3] # to (batchsize * (1+num_spoofed) * num_aug, length) x_new = x.permute(0, 2, 3, 1).contiguous().view(-1, pad_len) datalen_tmp = np.repeat(datalength, num_sys * num_aug) # target vector # this is for CE loss # [1, 0, 0, ..., 1, 0, 0 ...] target = self._get_target_vec(num_sys, num_aug, bat_siz, x.device, torch.long) # this is for contrasitive loss (ignore the augmentation) target_feat = self._get_target_vec(num_sys, 1, bat_siz, x.device, torch.long) # front-end & back-end feat_vec = self.m_front_end(x_new) logits, emb_vec = self.m_back_end(feat_vec) # CE loss loss_ce = self.m_ce_loss(logits, target) if self.m_feat: # feat loss loss_cr_1 = 0 loss_cr_2 = 0 # reshape to multi-view format # (batch, (1+num_spoof), nview, dimension...) feat_vec_ = feat_vec.view(bat_siz, num_sys, num_aug, -1, feat_vec.shape[-1]) emb_vec_ = emb_vec.view(bat_siz, num_sys, num_aug, -1) for bat_idx in range(bat_siz): loss_cr_1 += self.m_feat / bat_siz * self.m_cr_loss( feat_vec_[bat_idx], target_feat[bat_idx * num_sys :(bat_idx+1) * num_sys]) loss_cr_2 += self.m_feat / bat_siz * self.m_cr_loss( emb_vec_[bat_idx], target_feat[bat_idx * num_sys :(bat_idx+1) * num_sys]) return [[loss_ce, loss_cr_1, loss_cr_2], [True, True, True]] else: return loss_ce def forward(self, x, fileinfo): """ """ if self.training and x.shape[2] > 1: # if training with multi-view data return self.__forward_multi_view(x, fileinfo) elif self.training: return self.__forward_single_view(x, fileinfo) else: return self.__inference(x, fileinfo) def get_embedding(self, x, fileinfo): filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] feature_vec = self._compute_embedding(x, datalength) return feature_vec def al_retrieve_data(self, data_loader, num_sample): """idx = al_retrieve_data(data_loader, num_sample) Data retrival function for active learning Args: ----- data_loader: Pytorch DataLoader for the pool data set num_sample: int, number of samples to be selected Return ------ idx: list of index """ # buffer # note that data_loader.dataset.__len__() returns the number of # individual samples, not the number of mini-batches idx_list = np.zeros([data_loader.dataset.__len__()]) conf_list = np.zeros([data_loader.dataset.__len__()]) # counter = 0 # loop over the pool set with torch.no_grad(): for data_idx, (x, y, data_info, idx_orig) in \ enumerate(data_loader): # feedforward pass filenames = [nii_seq_tk.parse_filename(y) for y in data_info] datalength = [nii_seq_tk.parse_length(y) for y in data_info] if isinstance(x, torch.Tensor): x = x.to(self.input_std.device, dtype=self.input_std.dtype) else: nii_display.f_die("data input is not a tensor") # front-end feat_vec = self.m_front_end(x) # back-end scores, _, energy = self.m_back_end.inference(feat_vec) # add the energy (confidence score) and data index to the buffer conf_list[counter:counter+x.shape[0]] = np.array( [x.item() for x in energy]) idx_list[counter:counter+x.shape[0]] = np.array( idx_orig) counter += x.shape[0] # select the least useful data (those with low enerngy, high-confidence) sorted_idx = np.argsort(conf_list) # retrieve the data index return_idx = [idx_list[x] for x in sorted_idx[:num_sample]] # return the data index, # the corresponding samples will be added to training set return return_idx class Loss(): """ Wrapper for scripts, ignore it """ def __init__(self, args): """ """ def compute(self, outputs, target): """ """ return outputs if __name__ == "__main__": print("Definition of model")
19,133
31.651877
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/08-asvspoof-activelearn/model-AL-NegE/model.py
#!/usr/bin/env python """ model.py for Active learning model This model.py consists of two parts: 1. A CM with SSL-based front-end and linear back-end. The same model as 07-asvspoof-ssl/model-W2V-XLSR-ft-GF/model.py, but the code is revised and simplified. 2. A function al_retrieve_data to scoring the pool set data. al_retrieve_data scores the pool set data and returns a list of data index. The returned data index will be used to retrieve the data from pool. al_retrieve_data is called in core_scripts/nn_manager/nn_manager_AL.py. Please check the training algorithm there. """ from __future__ import absolute_import from __future__ import print_function import os import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import sandbox.eval_asvspoof as nii_asvspoof import sandbox.util_bayesian as nii_bayesian import sandbox.util_loss_metric as nii_loss_util __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2022, Xin Wang" ############################ ## FOR pre-trained MODEL ############################ import fairseq as fq class SSLModel(torch_nn.Module): def __init__(self, mpath, ssl_orig_output_dim): """ SSLModel(cp_path, ssl_orig_output_dim) Args ---- mpath: string, path to the pre-trained SSL model ssl_orig_output_dim: int, dimension of the SSL model output feature """ super(SSLModel, self).__init__() md, _, _ = fq.checkpoint_utils.load_model_ensemble_and_task([mpath]) self.model = md[0] # this should be loaded from md self.out_dim = ssl_orig_output_dim return def extract_feat(self, input_data): """ output = extract_feat(input_data) input: ------ input_data,tensor, (batch, length, 1) or (batch, length) datalength: list of int, length of wav in the mini-batch output: ------- output: tensor, (batch, frame_num, frame_feat_dim) """ # put the model to GPU if it not there if next(self.model.parameters()).device != input_data.device \ or next(self.model.parameters()).dtype != input_data.dtype: self.model.to(input_data.device, dtype=input_data.dtype) # input should be in shape (batch, length) if input_data.ndim == 3: input_tmp = input_data[:, :, 0] else: input_tmp = input_data # emb has shape [batch, length, dim] emb = self.model(input_tmp, mask=False, features_only=True)['x'] return emb ############## ## FOR MODEL ############## class FrontEnd(torch_nn.Module): """ Front end wrapper """ def __init__(self, output_dim, mpath, ssl_out_dim, fix_ssl=False): super(FrontEnd, self).__init__() # dimension of output feature self.out_dim = output_dim # whether fix SSL or not self.flag_fix_ssl = fix_ssl # ssl part self.ssl_model = SSLModel(mpath, ssl_out_dim) # post transformation part self.m_front_end_process = torch_nn.Linear( self.ssl_model.out_dim, self.out_dim) return def set_flag_fix_ssl(self, fix_ssl): self.flag_fix_ssl = fix_ssl return def forward(self, wav): """ output = front_end(wav) input: ------ wav: tensor, (batch, length, 1) output: ------- output: tensor, (batch, frame_num, frame_feat_dim) """ if self.flag_fix_ssl: self.ssl_model.eval() with torch.no_grad(): x_ssl_feat = self.ssl_model.extract_feat(wav) else: x_ssl_feat = self.ssl_model.extract_feat(wav) output = self.m_front_end_process(x_ssl_feat) return output class BackEnd(torch_nn.Module): """Back End Wrapper """ def __init__(self, input_dim, out_dim, num_classes, dropout_rate, dropout_flag=True, dropout_trials=[1]): super(BackEnd, self).__init__() # input feature dimension self.in_dim = input_dim # output embedding dimension self.out_dim = out_dim # number of output classes self.num_class = num_classes # dropout rate self.m_mcdp_rate = dropout_rate self.m_mcdp_flag = dropout_flag self.m_mcdp_num = dropout_trials # linear linear to produce output logits self.m_utt_level = torch_nn.Linear(self.out_dim, self.num_class) return def forward(self, feat): """ logits, emb_vec = back_end_emb(feat) input: ------ feat: tensor, (batch, frame_num, feat_feat_dim) output: ------- logits: tensor, (batch, num_output_class) emb_vec: tensor, (batch, emb_dim) """ # through the frame-level network # (batch, frame_num, self.out_dim) # average pooling -> (batch, self.out_dim) feat_utt = feat.mean(1) # output linear logits = self.m_utt_level(feat_utt) return logits, feat_utt def inference(self, feat): """scores, emb_vec, energy = inference(feat) This is used for inference, output includes the logits and confidence scores. input: ------ feat: tensor, (batch, frame_num, feat_feat_dim) output: ------- scores: tensor, (batch, 1) emb_vec: tensor, (batch, emb_dim) energy: tensor, (batch, 1) """ # logits logits, feat_utt = self.forward(feat) # logits -> score scores = logits[:, 1] - logits[:, 0] # compute confidence using negative energy energy = nii_loss_util.neg_energy(logits) return scores, feat_utt, energy class MainLossModule(torch_nn.Module): """ Loss wrapper """ def __init__(self): super(MainLossModule, self).__init__() self.m_loss = torch_nn.CrossEntropyLoss() return def forward(self, logits, target): return self.m_loss(logits, target) class FeatLossModule(torch_nn.Module): """ Loss wrapper over features Not used here """ def __init__(self): super(FeatLossModule, self).__init__() return def forward(self, data, target): """ """ return 0 class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std( in_dim,out_dim, args, prj_conf, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ############################################ #### # auxililary #### # flag of current training stage # this variable will be overwritten self.temp_flag = args.temp_flag #### # Load protocol and prepare the target data for network training #### protocol_f = prj_conf.optional_argument self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f) #### # Bayesian parameter #### self.m_mcdp_rate = None self.m_mcdp_flag = True # if [1], we will only do one inference self.m_mcdropout_num = [1] #### # Model definition #### # front-end # dimension of compressed front-end feature self.v_feat_dim = 128 self.m_front_end = FrontEnd(self.v_feat_dim, prj_conf.ssl_front_end_path, prj_conf.ssl_front_end_out_dim) # back-end # dimension of utterance-level embedding vectors self.v_emd_dim = self.v_feat_dim # number of output classes self.v_out_class = 2 self.m_back_end = BackEnd(self.v_feat_dim, self.v_emd_dim, self.v_out_class, self.m_mcdp_rate, self.m_mcdp_flag, self.m_mcdropout_num) ##### # Loss function ##### self.m_ce_loss = MainLossModule() self.m_cr_loss = FeatLossModule() # weight for the feature loss self.m_feat = 0.0 # done return def prepare_mean_std(self, in_dim, out_dim, args, prj_conf, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but irrelevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but irrelevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but irrelevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but irrelevant to this code """ return y * self.output_std + self.output_mean def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def _get_target_vec(self, num_sys, num_aug, bs, device, dtype): target = [1] * num_aug + [0 for x in range((num_sys-1) * num_aug)] target = np.tile(target, bs) target = torch.tensor(target, device=device, dtype=dtype) return target def __inference(self, x, fileinfo): """ """ filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] # too short sentences, skip it if not self.training and x.shape[1] < 3000: targets = self._get_target(filenames) for filename, target in zip(filenames, targets): print("Output, {:s}, {:d}, {:f}, {:f}, {:f}".format( filename, target, 0.0, 0.0, 0.0)) return None # front-end feat_vec = self.m_front_end(x) # back-end scores, _, energy = self.m_back_end.inference(feat_vec) # print output targets = self._get_target(filenames) for filename, target, score, eps in \ zip(filenames, targets, scores, energy): print("Output, {:s}, {:d}, {:f}, {:f}".format( filename, target, score.item(), eps.item())) # don't write output score as a single file return None def __forward_single_view(self, x, fileinfo): """ """ filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] # front-end & back-end feat_vec = self.m_front_end(x) logits, emb_vec = self.m_back_end(feat_vec) target = self._get_target(filenames) target_ = torch.tensor(target, device=x.device, dtype=torch.long) # loss loss = self.m_ce_loss(logits, target_) return loss def __forward_multi_view(self, x, fileinfo): """ """ filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] # input will be (batchsize, length, 1+num_spoofed, num_aug) bat_siz = x.shape[0] pad_len = x.shape[1] num_sys = x.shape[2] num_aug = x.shape[3] # to (batchsize * (1+num_spoofed) * num_aug, length) x_new = x.permute(0, 2, 3, 1).contiguous().view(-1, pad_len) datalen_tmp = np.repeat(datalength, num_sys * num_aug) # target vector # this is for CE loss # [1, 0, 0, ..., 1, 0, 0 ...] target = self._get_target_vec(num_sys, num_aug, bat_siz, x.device, torch.long) # this is for contrasitive loss (ignore the augmentation) target_feat = self._get_target_vec(num_sys, 1, bat_siz, x.device, torch.long) # front-end & back-end feat_vec = self.m_front_end(x_new) logits, emb_vec = self.m_back_end(feat_vec) # CE loss loss_ce = self.m_ce_loss(logits, target) if self.m_feat: # feat loss loss_cr_1 = 0 loss_cr_2 = 0 # reshape to multi-view format # (batch, (1+num_spoof), nview, dimension...) feat_vec_ = feat_vec.view(bat_siz, num_sys, num_aug, -1, feat_vec.shape[-1]) emb_vec_ = emb_vec.view(bat_siz, num_sys, num_aug, -1) for bat_idx in range(bat_siz): loss_cr_1 += self.m_feat / bat_siz * self.m_cr_loss( feat_vec_[bat_idx], target_feat[bat_idx * num_sys :(bat_idx+1) * num_sys]) loss_cr_2 += self.m_feat / bat_siz * self.m_cr_loss( emb_vec_[bat_idx], target_feat[bat_idx * num_sys :(bat_idx+1) * num_sys]) return [[loss_ce, loss_cr_1, loss_cr_2], [True, True, True]] else: return loss_ce def forward(self, x, fileinfo): """ """ if self.training and x.shape[2] > 1: # if training with multi-view data return self.__forward_multi_view(x, fileinfo) elif self.training: return self.__forward_single_view(x, fileinfo) else: return self.__inference(x, fileinfo) def get_embedding(self, x, fileinfo): filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] feature_vec = self._compute_embedding(x, datalength) return feature_vec def al_retrieve_data(self, data_loader, num_sample): """idx = al_retrieve_data(data_loader, num_sample) Data retrival function for active learning Args: ----- data_loader: Pytorch DataLoader for the pool data set num_sample: int, number of samples to be selected Return ------ idx: list of index """ # buffer # note that data_loader.dataset.__len__() returns the number of # individual samples, not the number of mini-batches idx_list = np.zeros([data_loader.dataset.__len__()]) conf_list = np.zeros([data_loader.dataset.__len__()]) # counter = 0 # loop over the pool set with torch.no_grad(): for data_idx, (x, y, data_info, idx_orig) in \ enumerate(data_loader): # feedforward pass filenames = [nii_seq_tk.parse_filename(y) for y in data_info] datalength = [nii_seq_tk.parse_length(y) for y in data_info] if isinstance(x, torch.Tensor): x = x.to(self.input_std.device, dtype=self.input_std.dtype) else: nii_display.f_die("data input is not a tensor") # front-end feat_vec = self.m_front_end(x) # back-end scores, _, energy = self.m_back_end.inference(feat_vec) # add the energy (confidence score) and data index to the buffer conf_list[counter:counter+x.shape[0]] = np.array( [x.item() for x in energy]) idx_list[counter:counter+x.shape[0]] = np.array( idx_orig) counter += x.shape[0] # select the most useful data (those with high enerngy, low-confidence) sorted_idx = np.argsort(conf_list)[::-1] # retrieve the data index return_idx = [idx_list[x] for x in sorted_idx[:num_sample]] # return the data index, # the corresponding samples will be added to training set return return_idx class Loss(): """ Wrapper for scripts, ignore it """ def __init__(self, args): """ """ def compute(self, outputs, target): """ """ return outputs if __name__ == "__main__": print("Definition of model")
19,138
31.66041
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/08-asvspoof-activelearn/model-AL-Pas/model.py
#!/usr/bin/env python """ model.py for Active learning model This model.py consists of two parts: 1. A CM with SSL-based front-end and linear back-end. The same model as 07-asvspoof-ssl/model-W2V-XLSR-ft-GF/model.py, but the code is revised and simplified. 2. A function al_retrieve_data to scoring the pool set data. al_retrieve_data scores the pool set data and returns a list of data index. The returned data index will be used to retrieve the data from pool. al_retrieve_data is called in core_scripts/nn_manager/nn_manager_AL.py. Please check the training algorithm there. """ from __future__ import absolute_import from __future__ import print_function import os import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import sandbox.eval_asvspoof as nii_asvspoof import sandbox.util_bayesian as nii_bayesian import sandbox.util_loss_metric as nii_loss_util __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2022, Xin Wang" ############################ ## FOR pre-trained MODEL ############################ import fairseq as fq class SSLModel(torch_nn.Module): def __init__(self, mpath, ssl_orig_output_dim): """ SSLModel(cp_path, ssl_orig_output_dim) Args ---- mpath: string, path to the pre-trained SSL model ssl_orig_output_dim: int, dimension of the SSL model output feature """ super(SSLModel, self).__init__() md, _, _ = fq.checkpoint_utils.load_model_ensemble_and_task([mpath]) self.model = md[0] # this should be loaded from md self.out_dim = ssl_orig_output_dim return def extract_feat(self, input_data): """ output = extract_feat(input_data) input: ------ input_data,tensor, (batch, length, 1) or (batch, length) datalength: list of int, length of wav in the mini-batch output: ------- output: tensor, (batch, frame_num, frame_feat_dim) """ # put the model to GPU if it not there if next(self.model.parameters()).device != input_data.device \ or next(self.model.parameters()).dtype != input_data.dtype: self.model.to(input_data.device, dtype=input_data.dtype) # input should be in shape (batch, length) if input_data.ndim == 3: input_tmp = input_data[:, :, 0] else: input_tmp = input_data # emb has shape [batch, length, dim] emb = self.model(input_tmp, mask=False, features_only=True)['x'] return emb ############## ## FOR MODEL ############## class FrontEnd(torch_nn.Module): """ Front end wrapper """ def __init__(self, output_dim, mpath, ssl_out_dim, fix_ssl=False): super(FrontEnd, self).__init__() # dimension of output feature self.out_dim = output_dim # whether fix SSL or not self.flag_fix_ssl = fix_ssl # ssl part self.ssl_model = SSLModel(mpath, ssl_out_dim) # post transformation part self.m_front_end_process = torch_nn.Linear( self.ssl_model.out_dim, self.out_dim) return def set_flag_fix_ssl(self, fix_ssl): self.flag_fix_ssl = fix_ssl return def forward(self, wav): """ output = front_end(wav) input: ------ wav: tensor, (batch, length, 1) output: ------- output: tensor, (batch, frame_num, frame_feat_dim) """ if self.flag_fix_ssl: self.ssl_model.eval() with torch.no_grad(): x_ssl_feat = self.ssl_model.extract_feat(wav) else: x_ssl_feat = self.ssl_model.extract_feat(wav) output = self.m_front_end_process(x_ssl_feat) return output class BackEnd(torch_nn.Module): """Back End Wrapper """ def __init__(self, input_dim, out_dim, num_classes, dropout_rate, dropout_flag=True, dropout_trials=[1]): super(BackEnd, self).__init__() # input feature dimension self.in_dim = input_dim # output embedding dimension self.out_dim = out_dim # number of output classes self.num_class = num_classes # dropout rate self.m_mcdp_rate = dropout_rate self.m_mcdp_flag = dropout_flag self.m_mcdp_num = dropout_trials # linear linear to produce output logits self.m_utt_level = torch_nn.Linear(self.out_dim, self.num_class) return def forward(self, feat): """ logits, emb_vec = back_end_emb(feat) input: ------ feat: tensor, (batch, frame_num, feat_feat_dim) output: ------- logits: tensor, (batch, num_output_class) emb_vec: tensor, (batch, emb_dim) """ # through the frame-level network # (batch, frame_num, self.out_dim) # average pooling -> (batch, self.out_dim) feat_utt = feat.mean(1) # output linear logits = self.m_utt_level(feat_utt) return logits, feat_utt def inference(self, feat): """scores, emb_vec, energy = inference(feat) This is used for inference, output includes the logits and confidence scores. input: ------ feat: tensor, (batch, frame_num, feat_feat_dim) output: ------- scores: tensor, (batch, 1) emb_vec: tensor, (batch, emb_dim) energy: tensor, (batch, 1) """ # logits logits, feat_utt = self.forward(feat) # logits -> score scores = logits[:, 1] - logits[:, 0] # compute confidence using negative energy energy = nii_loss_util.neg_energy(logits) return scores, feat_utt, energy class MainLossModule(torch_nn.Module): """ Loss wrapper """ def __init__(self): super(MainLossModule, self).__init__() self.m_loss = torch_nn.CrossEntropyLoss() return def forward(self, logits, target): return self.m_loss(logits, target) class FeatLossModule(torch_nn.Module): """ Loss wrapper over features Not used here """ def __init__(self): super(FeatLossModule, self).__init__() return def forward(self, data, target): """ """ return 0 class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std( in_dim,out_dim, args, prj_conf, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ############################################ #### # auxililary #### # flag of current training stage # this variable will be overwritten self.temp_flag = args.temp_flag #### # Load protocol and prepare the target data for network training #### protocol_f = prj_conf.optional_argument self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f) #### # Bayesian parameter #### self.m_mcdp_rate = None self.m_mcdp_flag = True # if [1], we will only do one inference self.m_mcdropout_num = [1] #### # Model definition #### # front-end # dimension of compressed front-end feature self.v_feat_dim = 128 self.m_front_end = FrontEnd(self.v_feat_dim, prj_conf.ssl_front_end_path, prj_conf.ssl_front_end_out_dim) # back-end # dimension of utterance-level embedding vectors self.v_emd_dim = self.v_feat_dim # number of output classes self.v_out_class = 2 self.m_back_end = BackEnd(self.v_feat_dim, self.v_emd_dim, self.v_out_class, self.m_mcdp_rate, self.m_mcdp_flag, self.m_mcdropout_num) ##### # Loss function ##### self.m_ce_loss = MainLossModule() self.m_cr_loss = FeatLossModule() # weight for the feature loss self.m_feat = 0.0 # done return def prepare_mean_std(self, in_dim, out_dim, args, prj_conf, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but irrelevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but irrelevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but irrelevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but irrelevant to this code """ return y * self.output_std + self.output_mean def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def _get_target_vec(self, num_sys, num_aug, bs, device, dtype): target = [1] * num_aug + [0 for x in range((num_sys-1) * num_aug)] target = np.tile(target, bs) target = torch.tensor(target, device=device, dtype=dtype) return target def __inference(self, x, fileinfo): """ """ filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] # too short sentences, skip it if not self.training and x.shape[1] < 3000: targets = self._get_target(filenames) for filename, target in zip(filenames, targets): print("Output, {:s}, {:d}, {:f}, {:f}, {:f}".format( filename, target, 0.0, 0.0, 0.0)) return None # front-end feat_vec = self.m_front_end(x) # back-end scores, _, energy = self.m_back_end.inference(feat_vec) # print output targets = self._get_target(filenames) for filename, target, score, eps in \ zip(filenames, targets, scores, energy): print("Output, {:s}, {:d}, {:f}, {:f}".format( filename, target, score.item(), eps.item())) # don't write output score as a single file return None def __forward_single_view(self, x, fileinfo): """ """ filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] # front-end & back-end feat_vec = self.m_front_end(x) logits, emb_vec = self.m_back_end(feat_vec) target = self._get_target(filenames) target_ = torch.tensor(target, device=x.device, dtype=torch.long) # loss loss = self.m_ce_loss(logits, target_) return loss def __forward_multi_view(self, x, fileinfo): """ """ filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] # input will be (batchsize, length, 1+num_spoofed, num_aug) bat_siz = x.shape[0] pad_len = x.shape[1] num_sys = x.shape[2] num_aug = x.shape[3] # to (batchsize * (1+num_spoofed) * num_aug, length) x_new = x.permute(0, 2, 3, 1).contiguous().view(-1, pad_len) datalen_tmp = np.repeat(datalength, num_sys * num_aug) # target vector # this is for CE loss # [1, 0, 0, ..., 1, 0, 0 ...] target = self._get_target_vec(num_sys, num_aug, bat_siz, x.device, torch.long) # this is for contrasitive loss (ignore the augmentation) target_feat = self._get_target_vec(num_sys, 1, bat_siz, x.device, torch.long) # front-end & back-end feat_vec = self.m_front_end(x_new) logits, emb_vec = self.m_back_end(feat_vec) # CE loss loss_ce = self.m_ce_loss(logits, target) if self.m_feat: # feat loss loss_cr_1 = 0 loss_cr_2 = 0 # reshape to multi-view format # (batch, (1+num_spoof), nview, dimension...) feat_vec_ = feat_vec.view(bat_siz, num_sys, num_aug, -1, feat_vec.shape[-1]) emb_vec_ = emb_vec.view(bat_siz, num_sys, num_aug, -1) for bat_idx in range(bat_siz): loss_cr_1 += self.m_feat / bat_siz * self.m_cr_loss( feat_vec_[bat_idx], target_feat[bat_idx * num_sys :(bat_idx+1) * num_sys]) loss_cr_2 += self.m_feat / bat_siz * self.m_cr_loss( emb_vec_[bat_idx], target_feat[bat_idx * num_sys :(bat_idx+1) * num_sys]) return [[loss_ce, loss_cr_1, loss_cr_2], [True, True, True]] else: return loss_ce def forward(self, x, fileinfo): """ """ if self.training and x.shape[2] > 1: # if training with multi-view data return self.__forward_multi_view(x, fileinfo) elif self.training: return self.__forward_single_view(x, fileinfo) else: return self.__inference(x, fileinfo) def get_embedding(self, x, fileinfo): filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] feature_vec = self._compute_embedding(x, datalength) return feature_vec def al_retrieve_data(self, data_loader, num_sample): """idx = al_retrieve_data(data_loader, num_sample) Data retrival function for active learning Args: ----- data_loader: Pytorch DataLoader for the pool data set num_sample: int, number of samples to be selected Return ------ idx: list of index """ # randomly select data index sorted_idx = np.arange(data_loader.dataset.__len__()) np.random.shuffle(sorted_idx) return_idx = sorted_idx[0:num_sample] # return the data index, # the corresponding samples will be added to training set return return_idx class Loss(): """ Wrapper for scripts, ignore it """ def __init__(self, args): """ """ def compute(self, outputs, target): """ """ return outputs if __name__ == "__main__": print("Definition of model")
17,572
31.009107
78
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/02-asvspoof/00_evaluate.py
#!/usr/bin/python """ Wrapper to parse the score file and compute EER and min tDCF Usage: python 00_evaluate.py log_file """ import os import sys import numpy as np from sandbox import eval_asvspoof def parse_txt(file_path): bonafide = [] spoofed = [] with open(file_path, 'r') as file_ptr: for line in file_ptr: if line.startswith('Output,'): temp = line.split(',') flag = int(temp[2]) if flag: bonafide.append(float(temp[-1])) else: spoofed.append(float(temp[-1])) bonafide = np.array(bonafide) spoofed = np.array(spoofed) return bonafide, spoofed if __name__ == "__main__": data_path = sys.argv[1] bonafide, spoofed = parse_txt(data_path) mintDCF, eer, threshold = eval_asvspoof.tDCF_wrapper(bonafide, spoofed) print("mintDCF: %f\tEER: %2.3f %%\tThreshold: %f" % (mintDCF, eer * 100, threshold))
1,038
24.975
77
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/02-asvspoof/lfcc-lcnn-ocsoftmax/main.py
#!/usr/bin/env python """ main.py for project-NN-pytorch/projects The default training/inference process wrapper Requires model.py and config.py Usage: $: python main.py [options] """ from __future__ import absolute_import import os import sys import torch import importlib import core_scripts.other_tools.display as nii_warn import core_scripts.data_io.default_data_io as nii_dset import core_scripts.data_io.conf as nii_dconf import core_scripts.other_tools.list_tools as nii_list_tool import core_scripts.config_parse.config_parse as nii_config_parse import core_scripts.config_parse.arg_parse as nii_arg_parse import core_scripts.op_manager.op_manager as nii_op_wrapper import core_scripts.nn_manager.nn_manager as nii_nn_wrapper import core_scripts.startup_config as nii_startup __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" def main(): """ main(): the default wrapper for training and inference process Please prepare config.py and model.py """ # arguments initialization args = nii_arg_parse.f_args_parsed() # nii_warn.f_print_w_date("Start program", level='h') nii_warn.f_print("Load module: %s" % (args.module_config)) nii_warn.f_print("Load module: %s" % (args.module_model)) prj_conf = importlib.import_module(args.module_config) prj_model = importlib.import_module(args.module_model) # initialization nii_startup.set_random_seed(args.seed) use_cuda = not args.no_cuda and torch.cuda.is_available() device = torch.device("cuda" if use_cuda else "cpu") # prepare data io if not args.inference: params = {'batch_size': args.batch_size, 'shuffle': args.shuffle, 'num_workers': args.num_workers} # Load file list and create data loader trn_lst = nii_list_tool.read_list_from_text(prj_conf.trn_list) trn_set = nii_dset.NIIDataSetLoader( prj_conf.trn_set_name, \ trn_lst, prj_conf.input_dirs, \ prj_conf.input_exts, \ prj_conf.input_dims, \ prj_conf.input_reso, \ prj_conf.input_norm, \ prj_conf.output_dirs, \ prj_conf.output_exts, \ prj_conf.output_dims, \ prj_conf.output_reso, \ prj_conf.output_norm, \ './', params = params, truncate_seq = prj_conf.truncate_seq, min_seq_len = prj_conf.minimum_len, save_mean_std = True, wav_samp_rate = prj_conf.wav_samp_rate) if prj_conf.val_list is not None: val_lst = nii_list_tool.read_list_from_text(prj_conf.val_list) val_set = nii_dset.NIIDataSetLoader( prj_conf.val_set_name, val_lst, prj_conf.input_dirs, \ prj_conf.input_exts, \ prj_conf.input_dims, \ prj_conf.input_reso, \ prj_conf.input_norm, \ prj_conf.output_dirs, \ prj_conf.output_exts, \ prj_conf.output_dims, \ prj_conf.output_reso, \ prj_conf.output_norm, \ './', \ params = params, truncate_seq= prj_conf.truncate_seq, min_seq_len = prj_conf.minimum_len, save_mean_std = False, wav_samp_rate = prj_conf.wav_samp_rate) else: val_set = None # initialize the model and loss function model = prj_model.Model(trn_set.get_in_dim(), \ trn_set.get_out_dim(), \ args, trn_set.get_data_mean_std()) loss_wrapper = prj_model.Loss(args) # initialize the optimizer optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args) # if necessary, resume training if args.trained_model == "": checkpoint = None else: checkpoint = torch.load(args.trained_model) # start training nii_nn_wrapper.f_train_wrapper(args, model, loss_wrapper, device, optimizer_wrapper, trn_set, val_set, checkpoint) # done for traing else: # for inference # default, no truncating, no shuffling params = {'batch_size': args.batch_size, 'shuffle': False, 'num_workers': args.num_workers} if type(prj_conf.test_list) is list: t_lst = prj_conf.test_list else: t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list) test_set = nii_dset.NIIDataSetLoader( prj_conf.test_set_name, \ t_lst, \ prj_conf.test_input_dirs, prj_conf.input_exts, prj_conf.input_dims, prj_conf.input_reso, prj_conf.input_norm, prj_conf.test_output_dirs, prj_conf.output_exts, prj_conf.output_dims, prj_conf.output_reso, prj_conf.output_norm, './', params = params, truncate_seq= None, min_seq_len = None, save_mean_std = False, wav_samp_rate = prj_conf.wav_samp_rate) # initialize model model = prj_model.Model(test_set.get_in_dim(), \ test_set.get_out_dim(), \ args) if args.trained_model == "": print("No model is loaded by ---trained-model for inference") print("By default, load %s%s" % (args.save_trained_name, args.save_model_ext)) checkpoint = torch.load("%s%s" % (args.save_trained_name, args.save_model_ext)) else: checkpoint = torch.load(args.trained_model) # do inference and output data nii_nn_wrapper.f_inference_wrapper(args, model, device, \ test_set, checkpoint) # done return if __name__ == "__main__": main()
6,366
34.569832
74
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/02-asvspoof/lfcc-lcnn-ocsoftmax/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_modules.oc_softmax as nii_oc_softmax import core_scripts.data_io.seq_info as nii_seq_tk import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) self.model_debug = False self.flag_validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # Working sampling rate # torchaudio may be used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of waveform points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # LFCC dim (base component) self.lfcc_dim = [20] self.lfcc_with_delta = True # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating (not used) self.amp_floor = 0.00001 # number of frames to be kept for each trial # 750 frames are quite long for ASVspoof2019 LA with frame_shift = 10ms self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops] # number of sub-models (by default, a single model) self.v_submodels = len(self.frame_lens) # dimension of embedding vectors, which is input to oc-softmax layer self.v_emd_dim = 256 # output class (1 for one-class softmax) self.v_out_class = 1 #### # create network #### # 1st part of the classifier self.m_transform = [] # 2nd part of the classifier self.m_output_act = [] # front-end self.m_frontend = [] # softmax self.m_a_softmax = [] # it can handle models with multiple front-end configuration # by default, only a single front-end for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip( self.v_truncate_lens, self.fft_n, self.lfcc_dim)): fft_n_bins = fft_n // 2 + 1 if self.lfcc_with_delta: lfcc_dim = lfcc_dim * 3 self.m_transform.append( torch_nn.Sequential( torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]) ) ) self.m_output_act.append( torch_nn.Sequential( torch_nn.Dropout(0.7), torch_nn.Linear((trunc_len // 16) * (lfcc_dim // 16) * 32, 512), nii_nn.MaxFeatureMap2D(), torch_nn.Linear(256, self.v_emd_dim) ) ) self.m_frontend.append( nii_front_end.LFCC(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfcc_dim[idx], with_energy=True) ) self.m_a_softmax.append( nii_oc_softmax.OCAngleLayer(self.v_emd_dim) ) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_a_softmax = torch_nn.ModuleList(self.m_a_softmax) # output # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch trunc_len: number of frames to be kept after truncation datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # permute to (batch, frame_feat_dim, frame_num) x_sp_amp = x_sp_amp.permute(0, 2, 1) # make sure the buffer is long enough x_sp_amp_buff = torch.zeros( [x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len], dtype=x_sp_amp.dtype, device=x_sp_amp.device) # for batch of data, pad or trim each trial independently fs = self.frame_hops[idx] for fileidx in range(x_sp_amp.shape[0]): # roughtly this is the number of frames true_frame_num = datalength[fileidx] // fs if true_frame_num > trunc_len: # trim randomly pos = torch.rand([1]) * (true_frame_num-trunc_len) pos = torch.floor(pos[0]).long() tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos] x_sp_amp_buff[fileidx] = tmp else: rep = int(np.ceil(trunc_len / true_frame_num)) tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep) x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len] # permute to (batch, frame_num, frame_feat_dim) x_sp_amp = x_sp_amp_buff.permute(0, 2, 1) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] # buffer to store output scores from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_output_act)): # extract feature x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. flatten and transform through output function tmp_score = m_output(torch.flatten(hidden_features, 1)) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score return output_emb def _compute_score(self, feature_vec, inference=False): """ """ # compute softmax output for each feature configuration batch_size = feature_vec.shape[0] // self.v_submodels # negative class scores x_cos_val = torch.zeros( [feature_vec.shape[0], self.v_out_class], dtype=feature_vec.dtype, device=feature_vec.device) # positive class scores x_phi_val = torch.zeros_like(x_cos_val) # get scores for idx in range(self.v_submodels): s_idx = idx * batch_size e_idx = idx * batch_size + batch_size tmp1, tmp2 = self.m_a_softmax[idx](feature_vec[s_idx:e_idx], inference) x_cos_val[s_idx:e_idx] = tmp1 x_phi_val[s_idx:e_idx] = tmp2 if inference: return x_cos_val else: return [x_cos_val, x_phi_val] def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): #with torch.no_grad(): # vad_waveform = self.m_vad(x.squeeze(-1)) # vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1])) # if vad_waveform.shape[-1] > 0: # x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1) # else: # pass filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) a_softmax_act = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device).long() target_vec = target_vec.repeat(self.v_submodels) return [a_softmax_act, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) score = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], score.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_oc_softmax.OCSoftmaxWithLoss() def compute(self, outputs, target): """ """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
16,219
34.884956
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/02-asvspoof/lfcc-lcnn-ocsoftmax/config.py
#!/usr/bin/env python """ config.py for project-NN-pytorch/projects Usage: For training, change Configuration for training stage For inference, change Configuration for inference stage """ __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ######################################################### ## Configuration for training stage ######################################################### # Name of datasets # after data preparation, trn/val_set_name are used to save statistics # about the data sets trn_set_name = 'asvspoof2019_trn' val_set_name = 'asvspoof2019_val' # for convenience tmp = '../DATA/asvspoof2019_LA/' # File lists (text file, one data name per line, without name extension) # trin_file_list: list of files for training set trn_list = tmp + '/scp/train.lst' # val_file_list: list of files for validation set. It can be None val_list = tmp + '/scp/val.lst' # Directories for input features # input_dirs = [path_of_feature_1, path_of_feature_2, ..., ] # we assume train and validation data are put in the same sub-directory input_dirs = [tmp + '/train_dev'] # Dimensions of input features # input_dims = [dimension_of_feature_1, dimension_of_feature_2, ...] input_dims = [1] # File name extension for input features # input_exts = [name_extention_of_feature_1, ...] # Please put ".f0" as the last feature input_exts = ['.wav'] # Temporal resolution for input features # input_reso = [reso_feature_1, reso_feature_2, ...] # for waveform modeling, temporal resolution of input acoustic features # may be = waveform_sampling_rate * frame_shift_of_acoustic_features # for example, 80 = 16000 Hz * 5 ms input_reso = [1] # Whether input features should be z-normalized # input_norm = [normalize_feature_1, normalize_feature_2] input_norm = [False] # Similar configurations for output features output_dirs = [] output_dims = [1] output_exts = ['.bin'] output_reso = [1] output_norm = [False] # Waveform sampling rate # wav_samp_rate can be None if no waveform data is used wav_samp_rate = 16000 # Truncating input sequences so that the maximum length = truncate_seq # When truncate_seq is larger, more GPU mem required # If you don't want truncating, please truncate_seq = None truncate_seq = None # Minimum sequence length # If sequence length < minimum_len, this sequence is not used for training # minimum_len can be None minimum_len = None # Optional argument # Just a buffer for convenience # It can contain anything optional_argument = ['../DATA/asvspoof2019_LA/protocol.txt'] ######################################################### ## Configuration for inference stage ######################################################### # similar options to training stage test_set_name = 'asvspoof2019_test' # List of test set data # for convenience, you may directly load test_set list here test_list = tmp + '/scp/test.lst' # Directories for input features # input_dirs = [path_of_feature_1, path_of_feature_2, ..., ] # we assume train and validation data are put in the same sub-directory test_input_dirs = ['../DATA/asvspoof2019_LA/eval'] # Directories for output features, which are [] test_output_dirs = []
3,226
29.733333
75
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/02-asvspoof/lfcc-restnet-ocsoftmax/main.py
#!/usr/bin/env python """ main.py for project-NN-pytorch/projects The default training/inference process wrapper Requires model.py and config.py Usage: $: python main.py [options] """ from __future__ import absolute_import import os import sys import torch import importlib import core_scripts.other_tools.display as nii_warn import core_scripts.data_io.default_data_io as nii_dset import core_scripts.data_io.conf as nii_dconf import core_scripts.other_tools.list_tools as nii_list_tool import core_scripts.config_parse.config_parse as nii_config_parse import core_scripts.config_parse.arg_parse as nii_arg_parse import core_scripts.op_manager.op_manager as nii_op_wrapper import core_scripts.nn_manager.nn_manager as nii_nn_wrapper import core_scripts.startup_config as nii_startup __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" def main(): """ main(): the default wrapper for training and inference process Please prepare config.py and model.py """ # arguments initialization args = nii_arg_parse.f_args_parsed() # nii_warn.f_print_w_date("Start program", level='h') nii_warn.f_print("Load module: %s" % (args.module_config)) nii_warn.f_print("Load module: %s" % (args.module_model)) prj_conf = importlib.import_module(args.module_config) prj_model = importlib.import_module(args.module_model) # initialization nii_startup.set_random_seed(args.seed) use_cuda = not args.no_cuda and torch.cuda.is_available() device = torch.device("cuda" if use_cuda else "cpu") # prepare data io if not args.inference: params = {'batch_size': args.batch_size, 'shuffle': args.shuffle, 'num_workers': args.num_workers} # Load file list and create data loader trn_lst = nii_list_tool.read_list_from_text(prj_conf.trn_list) trn_set = nii_dset.NIIDataSetLoader( prj_conf.trn_set_name, \ trn_lst, prj_conf.input_dirs, \ prj_conf.input_exts, \ prj_conf.input_dims, \ prj_conf.input_reso, \ prj_conf.input_norm, \ prj_conf.output_dirs, \ prj_conf.output_exts, \ prj_conf.output_dims, \ prj_conf.output_reso, \ prj_conf.output_norm, \ './', params = params, truncate_seq = prj_conf.truncate_seq, min_seq_len = prj_conf.minimum_len, save_mean_std = True, wav_samp_rate = prj_conf.wav_samp_rate) if prj_conf.val_list is not None: val_lst = nii_list_tool.read_list_from_text(prj_conf.val_list) val_set = nii_dset.NIIDataSetLoader( prj_conf.val_set_name, val_lst, prj_conf.input_dirs, \ prj_conf.input_exts, \ prj_conf.input_dims, \ prj_conf.input_reso, \ prj_conf.input_norm, \ prj_conf.output_dirs, \ prj_conf.output_exts, \ prj_conf.output_dims, \ prj_conf.output_reso, \ prj_conf.output_norm, \ './', \ params = params, truncate_seq= prj_conf.truncate_seq, min_seq_len = prj_conf.minimum_len, save_mean_std = False, wav_samp_rate = prj_conf.wav_samp_rate) else: val_set = None # initialize the model and loss function model = prj_model.Model(trn_set.get_in_dim(), \ trn_set.get_out_dim(), \ args, trn_set.get_data_mean_std()) loss_wrapper = prj_model.Loss(args) # initialize the optimizer optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args) # if necessary, resume training if args.trained_model == "": checkpoint = None else: checkpoint = torch.load(args.trained_model) # start training nii_nn_wrapper.f_train_wrapper(args, model, loss_wrapper, device, optimizer_wrapper, trn_set, val_set, checkpoint) # done for traing else: # for inference # default, no truncating, no shuffling params = {'batch_size': args.batch_size, 'shuffle': False, 'num_workers': args.num_workers} if type(prj_conf.test_list) is list: t_lst = prj_conf.test_list else: t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list) test_set = nii_dset.NIIDataSetLoader( prj_conf.test_set_name, \ t_lst, \ prj_conf.test_input_dirs, prj_conf.input_exts, prj_conf.input_dims, prj_conf.input_reso, prj_conf.input_norm, prj_conf.test_output_dirs, prj_conf.output_exts, prj_conf.output_dims, prj_conf.output_reso, prj_conf.output_norm, './', params = params, truncate_seq= None, min_seq_len = None, save_mean_std = False, wav_samp_rate = prj_conf.wav_samp_rate) # initialize model model = prj_model.Model(test_set.get_in_dim(), \ test_set.get_out_dim(), \ args) if args.trained_model == "": print("No model is loaded by ---trained-model for inference") print("By default, load %s%s" % (args.save_trained_name, args.save_model_ext)) checkpoint = torch.load("%s%s" % (args.save_trained_name, args.save_model_ext)) else: checkpoint = torch.load(args.trained_model) # do inference and output data nii_nn_wrapper.f_inference_wrapper(args, model, device, \ test_set, checkpoint) # done return if __name__ == "__main__": main()
6,366
34.569832
74
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/02-asvspoof/lfcc-restnet-ocsoftmax/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import sandbox.block_resnet as nii_resnet import core_scripts.other_tools.debug as nii_debug import core_modules.oc_softmax as nii_oc_softmax import core_scripts.data_io.seq_info as nii_seq_tk import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) self.model_debug = False self.flag_validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # Working sampling rate # torchaudio may be used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of waveform points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # LFCC dim (base component) self.lfcc_dim = [20] self.lfcc_with_delta = True # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating (not used) self.amp_floor = 0.00001 # number of frames to be kept for each trial # 750 frames are quite long for ASVspoof2019 LA with frame_shift = 10ms self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops] # number of sub-models (by default, a single model) self.v_submodels = len(self.frame_lens) # dimension of embedding vectors, which will be into to oc-softmax layer self.v_emd_dim = 256 # output class (1 for one-class softmax) self.v_out_class = 1 #### # create network #### # backend self.m_model = [] # fronend self.m_frontend = [] # softmax layer for backend self.m_a_softmax = [] for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip( self.v_truncate_lens, self.fft_n, self.lfcc_dim)): fft_n_bins = fft_n // 2 + 1 if self.lfcc_with_delta: lfcc_dim = lfcc_dim * 3 self.m_model.append( nii_resnet.ResNet(self.v_emd_dim) ) self.m_frontend.append( nii_front_end.LFCC( self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfcc_dim[idx], with_energy=True) ) self.m_a_softmax.append( nii_oc_softmax.OCAngleLayer(self.v_emd_dim) ) self.m_model = torch_nn.ModuleList(self.m_model) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_a_softmax = torch_nn.ModuleList(self.m_a_softmax) # output # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch trunc_len: number of frames to be kept after truncation datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # permute to (batch, frame_feat_dim, frame_num) x_sp_amp = x_sp_amp.permute(0, 2, 1) # make sure the buffer is long enough x_sp_amp_buff = torch.zeros( [x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len], dtype=x_sp_amp.dtype, device=x_sp_amp.device) # for batch of data, handle the padding and trim independently fs = self.frame_hops[idx] for fileidx in range(x_sp_amp.shape[0]): # roughtly this is the number of frames true_frame_num = datalength[fileidx] // fs if true_frame_num > trunc_len: # trim randomly pos = torch.rand([1]) * (true_frame_num-trunc_len) pos = torch.floor(pos[0]).long() tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos] x_sp_amp_buff[fileidx] = tmp else: rep = int(np.ceil(trunc_len / true_frame_num)) tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep) x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len] # input to resnet should be (batch, frame_feat_dim, frame_num) x_sp_amp = x_sp_amp_buff # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] # buffer to store output scores from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_model) in enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_model)): # extract feature (stft spectrogram) x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, fft_bin, frame_length) # 2. compute hidden features features, final_output = m_model(x_sp_amp.unsqueeze(1)) output_emb[idx * batch_size : (idx+1) * batch_size] = features return output_emb def _compute_score(self, feature_vec, angle=False): """ """ # compute a-softmax output for each feature configuration batch_size = feature_vec.shape[0] // self.v_submodels # negaitve class scores x_cos_val = torch.zeros( [feature_vec.shape[0], self.v_out_class], dtype=feature_vec.dtype, device=feature_vec.device) # positive class scores x_phi_val = torch.zeros_like(x_cos_val) for idx in range(self.v_submodels): s_idx = idx * batch_size e_idx = idx * batch_size + batch_size tmp1, tmp2 = self.m_a_softmax[idx](feature_vec[s_idx:e_idx], angle) x_cos_val[s_idx:e_idx] = tmp1 x_phi_val[s_idx:e_idx] = tmp2 return [x_cos_val, x_phi_val] def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): #with torch.no_grad(): # vad_waveform = self.m_vad(x.squeeze(-1)) # vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1])) # if vad_waveform.shape[-1] > 0: # x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1) # else: # pass filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) a_softmax_act = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device).long() target_vec = target_vec.repeat(self.v_submodels) return [a_softmax_act, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) score = self._compute_score(feature_vec, True)[0] target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], score.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_oc_softmax.OCSoftmaxWithLoss() def compute(self, outputs, target): """ """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
13,524
33.414758
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/02-asvspoof/lfcc-restnet-ocsoftmax/config.py
#!/usr/bin/env python """ config.py for project-NN-pytorch/projects Usage: For training, change Configuration for training stage For inference, change Configuration for inference stage """ __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ######################################################### ## Configuration for training stage ######################################################### # Name of datasets # after data preparation, trn/val_set_name are used to save statistics # about the data sets trn_set_name = 'asvspoof2019_trn' val_set_name = 'asvspoof2019_val' # for convenience tmp = '../DATA/asvspoof2019_LA/' # File lists (text file, one data name per line, without name extension) # trin_file_list: list of files for training set trn_list = tmp + '/scp/train.lst' # val_file_list: list of files for validation set. It can be None val_list = tmp + '/scp/val.lst' # Directories for input features # input_dirs = [path_of_feature_1, path_of_feature_2, ..., ] # we assume train and validation data are put in the same sub-directory input_dirs = [tmp + '/train_dev'] # Dimensions of input features # input_dims = [dimension_of_feature_1, dimension_of_feature_2, ...] input_dims = [1] # File name extension for input features # input_exts = [name_extention_of_feature_1, ...] # Please put ".f0" as the last feature input_exts = ['.wav'] # Temporal resolution for input features # input_reso = [reso_feature_1, reso_feature_2, ...] # for waveform modeling, temporal resolution of input acoustic features # may be = waveform_sampling_rate * frame_shift_of_acoustic_features # for example, 80 = 16000 Hz * 5 ms input_reso = [1] # Whether input features should be z-normalized # input_norm = [normalize_feature_1, normalize_feature_2] input_norm = [False] # Similar configurations for output features output_dirs = [] output_dims = [1] output_exts = ['.bin'] output_reso = [1] output_norm = [False] # Waveform sampling rate # wav_samp_rate can be None if no waveform data is used wav_samp_rate = 16000 # Truncating input sequences so that the maximum length = truncate_seq # When truncate_seq is larger, more GPU mem required # If you don't want truncating, please truncate_seq = None truncate_seq = None # Minimum sequence length # If sequence length < minimum_len, this sequence is not used for training # minimum_len can be None minimum_len = None # Optional argument # Just a buffer for convenience # It can contain anything optional_argument = ['../DATA/asvspoof2019_LA/protocol.txt'] ######################################################### ## Configuration for inference stage ######################################################### # similar options to training stage test_set_name = 'asvspoof2019_test' # List of test set data # for convenience, you may directly load test_set list here test_list = tmp + '/scp/test.lst' # Directories for input features # input_dirs = [path_of_feature_1, path_of_feature_2, ..., ] # we assume train and validation data are put in the same sub-directory test_input_dirs = ['../DATA/asvspoof2019_LA/eval'] # Directories for output features, which are [] test_output_dirs = []
3,226
29.733333
75
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/02-asvspoof/lfcc-lcnn-a-softmax/main.py
#!/usr/bin/env python """ main.py for project-NN-pytorch/projects The default training/inference process wrapper Requires model.py and config.py Usage: $: python main.py [options] """ from __future__ import absolute_import import os import sys import torch import importlib import core_scripts.other_tools.display as nii_warn import core_scripts.data_io.default_data_io as nii_dset import core_scripts.data_io.conf as nii_dconf import core_scripts.other_tools.list_tools as nii_list_tool import core_scripts.config_parse.config_parse as nii_config_parse import core_scripts.config_parse.arg_parse as nii_arg_parse import core_scripts.op_manager.op_manager as nii_op_wrapper import core_scripts.nn_manager.nn_manager as nii_nn_wrapper import core_scripts.startup_config as nii_startup __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" def main(): """ main(): the default wrapper for training and inference process Please prepare config.py and model.py """ # arguments initialization args = nii_arg_parse.f_args_parsed() # nii_warn.f_print_w_date("Start program", level='h') nii_warn.f_print("Load module: %s" % (args.module_config)) nii_warn.f_print("Load module: %s" % (args.module_model)) prj_conf = importlib.import_module(args.module_config) prj_model = importlib.import_module(args.module_model) # initialization nii_startup.set_random_seed(args.seed) use_cuda = not args.no_cuda and torch.cuda.is_available() device = torch.device("cuda" if use_cuda else "cpu") # prepare data io if not args.inference: params = {'batch_size': args.batch_size, 'shuffle': args.shuffle, 'num_workers': args.num_workers} # Load file list and create data loader trn_lst = nii_list_tool.read_list_from_text(prj_conf.trn_list) trn_set = nii_dset.NIIDataSetLoader( prj_conf.trn_set_name, \ trn_lst, prj_conf.input_dirs, \ prj_conf.input_exts, \ prj_conf.input_dims, \ prj_conf.input_reso, \ prj_conf.input_norm, \ prj_conf.output_dirs, \ prj_conf.output_exts, \ prj_conf.output_dims, \ prj_conf.output_reso, \ prj_conf.output_norm, \ './', params = params, truncate_seq = prj_conf.truncate_seq, min_seq_len = prj_conf.minimum_len, save_mean_std = True, wav_samp_rate = prj_conf.wav_samp_rate) if prj_conf.val_list is not None: val_lst = nii_list_tool.read_list_from_text(prj_conf.val_list) val_set = nii_dset.NIIDataSetLoader( prj_conf.val_set_name, val_lst, prj_conf.input_dirs, \ prj_conf.input_exts, \ prj_conf.input_dims, \ prj_conf.input_reso, \ prj_conf.input_norm, \ prj_conf.output_dirs, \ prj_conf.output_exts, \ prj_conf.output_dims, \ prj_conf.output_reso, \ prj_conf.output_norm, \ './', \ params = params, truncate_seq= prj_conf.truncate_seq, min_seq_len = prj_conf.minimum_len, save_mean_std = False, wav_samp_rate = prj_conf.wav_samp_rate) else: val_set = None # initialize the model and loss function model = prj_model.Model(trn_set.get_in_dim(), \ trn_set.get_out_dim(), \ args, trn_set.get_data_mean_std()) loss_wrapper = prj_model.Loss(args) # initialize the optimizer optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args) # if necessary, resume training if args.trained_model == "": checkpoint = None else: checkpoint = torch.load(args.trained_model) # start training nii_nn_wrapper.f_train_wrapper(args, model, loss_wrapper, device, optimizer_wrapper, trn_set, val_set, checkpoint) # done for traing else: # for inference # default, no truncating, no shuffling params = {'batch_size': args.batch_size, 'shuffle': False, 'num_workers': args.num_workers} if type(prj_conf.test_list) is list: t_lst = prj_conf.test_list else: t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list) test_set = nii_dset.NIIDataSetLoader( prj_conf.test_set_name, \ t_lst, \ prj_conf.test_input_dirs, prj_conf.input_exts, prj_conf.input_dims, prj_conf.input_reso, prj_conf.input_norm, prj_conf.test_output_dirs, prj_conf.output_exts, prj_conf.output_dims, prj_conf.output_reso, prj_conf.output_norm, './', params = params, truncate_seq= None, min_seq_len = None, save_mean_std = False, wav_samp_rate = prj_conf.wav_samp_rate) # initialize model model = prj_model.Model(test_set.get_in_dim(), \ test_set.get_out_dim(), \ args) if args.trained_model == "": print("No model is loaded by ---trained-model for inference") print("By default, load %s%s" % (args.save_trained_name, args.save_model_ext)) checkpoint = torch.load("%s%s" % (args.save_trained_name, args.save_model_ext)) else: checkpoint = torch.load(args.trained_model) # do inference and output data nii_nn_wrapper.f_inference_wrapper(args, model, device, \ test_set, checkpoint) # done return if __name__ == "__main__": main()
6,366
34.569832
74
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/02-asvspoof/lfcc-lcnn-a-softmax/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_modules.a_softmax as nii_a_softmax import core_scripts.data_io.seq_info as nii_seq_tk import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### torch.manual_seed(1) # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) self.model_debug = False self.flag_validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # Working sampling rate # torchaudio may be used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of waveform points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # LFCC dim (base component) self.lfcc_dim = [20] self.lfcc_with_delta = True # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating (not used) self.amp_floor = 0.00001 # number of frames to be kept for each trial # 750 frames are quite long for ASVspoof2019 LA with frame_shift = 10ms self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops] # number of sub-models (by default, a single model) self.v_submodels = len(self.frame_lens) # dimension of embedding vectors, which will be into to a-softmax layer self.v_emd_dim = 2 # output class (2 for a-softmax layer) self.v_out_class = 2 #### # create network #### # 1st part of the classifier self.m_transform = [] # 2nd part of the classifier self.m_output_act = [] # front-end self.m_frontend = [] # softmax self.m_a_softmax = [] for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip( self.v_truncate_lens, self.fft_n, self.lfcc_dim)): fft_n_bins = fft_n // 2 + 1 if self.lfcc_with_delta: lfcc_dim = lfcc_dim * 3 self.m_transform.append( torch_nn.Sequential( torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]) ) ) self.m_output_act.append( torch_nn.Sequential( torch_nn.Dropout(0.7), torch_nn.Linear((trunc_len // 16) * (lfcc_dim // 16) * 32, 160), nii_nn.MaxFeatureMap2D(), torch_nn.Linear(80, self.v_emd_dim) ) ) self.m_frontend.append( nii_front_end.LFCC(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfcc_dim[idx], with_energy=True) ) self.m_a_softmax.append( nii_a_softmax.AngleLayer(self.v_emd_dim, self.v_out_class) ) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_a_softmax = torch_nn.ModuleList(self.m_a_softmax) # output # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch trunc_len: number of frames to be kept after truncation datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # permute to (batch, frame_feat_dim, frame_num) x_sp_amp = x_sp_amp.permute(0, 2, 1) # make sure the buffer is long enough x_sp_amp_buff = torch.zeros( [x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len], dtype=x_sp_amp.dtype, device=x_sp_amp.device) # for batch of data, pad or trim each trial independently fs = self.frame_hops[idx] for fileidx in range(x_sp_amp.shape[0]): # roughtly this is the number of frames true_frame_num = datalength[fileidx] // fs if true_frame_num > trunc_len: # trim randomly pos = torch.rand([1]) * (true_frame_num-trunc_len) pos = torch.floor(pos[0]).long() tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos] x_sp_amp_buff[fileidx] = tmp else: rep = int(np.ceil(trunc_len / true_frame_num)) tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep) x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len] # permute to (batch, frame_num, frame_feat_dim) x_sp_amp = x_sp_amp_buff.permute(0, 2, 1) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] # buffer to store output scores from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_output_act)): # extract feature (stft spectrogram) x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. flatten and transform through output function tmp_score = m_output(torch.flatten(hidden_features, 1)) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score return output_emb def _compute_score(self, feature_vec, inference=False): """ """ # compute a-softmax output for each feature configuration batch_size = feature_vec.shape[0] // self.v_submodels # negative class scores x_cos_val = torch.zeros( [feature_vec.shape[0], self.v_out_class], dtype=feature_vec.dtype, device=feature_vec.device) # positive class scores x_phi_val = torch.zeros_like(x_cos_val) for idx in range(self.v_submodels): s_idx = idx * batch_size e_idx = idx * batch_size + batch_size tmp1, tmp2 = self.m_a_softmax[idx](feature_vec[s_idx:e_idx], inference) x_cos_val[s_idx:e_idx] = tmp1 x_phi_val[s_idx:e_idx] = tmp2 if inference: return torch_nn_func.softmax(x_cos_val, dim=1)[:, 1] else: return [x_cos_val, x_phi_val] def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): #with torch.no_grad(): # vad_waveform = self.m_vad(x.squeeze(-1)) # vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1])) # if vad_waveform.shape[-1] > 0: # x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1) # else: # pass filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) a_softmax_act = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device).long() target_vec = target_vec.repeat(self.v_submodels) return [a_softmax_act, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) score = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], score.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_a_softmax.AngularSoftmaxWithLoss() def compute(self, outputs, target): """ """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
16,175
34.946667
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/02-asvspoof/lfcc-lcnn-a-softmax/config.py
#!/usr/bin/env python """ config.py for project-NN-pytorch/projects Usage: For training, change Configuration for training stage For inference, change Configuration for inference stage """ __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ######################################################### ## Configuration for training stage ######################################################### # Name of datasets # after data preparation, trn/val_set_name are used to save statistics # about the data sets trn_set_name = 'asvspoof2019_trn' val_set_name = 'asvspoof2019_val' # for convenience tmp = '../DATA/asvspoof2019_LA/' # File lists (text file, one data name per line, without name extension) # trin_file_list: list of files for training set trn_list = tmp + '/scp/train.lst' # val_file_list: list of files for validation set. It can be None val_list = tmp + '/scp/val.lst' # Directories for input features # input_dirs = [path_of_feature_1, path_of_feature_2, ..., ] # we assume train and validation data are put in the same sub-directory input_dirs = [tmp + '/train_dev'] # Dimensions of input features # input_dims = [dimension_of_feature_1, dimension_of_feature_2, ...] input_dims = [1] # File name extension for input features # input_exts = [name_extention_of_feature_1, ...] # Please put ".f0" as the last feature input_exts = ['.wav'] # Temporal resolution for input features # input_reso = [reso_feature_1, reso_feature_2, ...] # for waveform modeling, temporal resolution of input acoustic features # may be = waveform_sampling_rate * frame_shift_of_acoustic_features # for example, 80 = 16000 Hz * 5 ms input_reso = [1] # Whether input features should be z-normalized # input_norm = [normalize_feature_1, normalize_feature_2] input_norm = [False] # Similar configurations for output features output_dirs = [] output_dims = [1] output_exts = ['.bin'] output_reso = [1] output_norm = [False] # Waveform sampling rate # wav_samp_rate can be None if no waveform data is used wav_samp_rate = 16000 # Truncating input sequences so that the maximum length = truncate_seq # When truncate_seq is larger, more GPU mem required # If you don't want truncating, please truncate_seq = None truncate_seq = None # Minimum sequence length # If sequence length < minimum_len, this sequence is not used for training # minimum_len can be None minimum_len = None # Optional argument # Just a buffer for convenience # It can contain anything optional_argument = ['../DATA/asvspoof2019_LA/protocol.txt'] ######################################################### ## Configuration for inference stage ######################################################### # similar options to training stage test_set_name = 'asvspoof2019_test' # List of test set data # for convenience, you may directly load test_set list here test_list = tmp + '/scp/test.lst' # Directories for input features # input_dirs = [path_of_feature_1, path_of_feature_2, ..., ] # we assume train and validation data are put in the same sub-directory test_input_dirs = ['../DATA/asvspoof2019_LA/eval'] # Directories for output features, which are [] test_output_dirs = []
3,226
29.733333
75
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/02-asvspoof/lfcc-lcnn-sigmoid/main.py
#!/usr/bin/env python """ main.py for project-NN-pytorch/projects The default training/inference process wrapper Requires model.py and config.py Usage: $: python main.py [options] """ from __future__ import absolute_import import os import sys import torch import importlib import core_scripts.other_tools.display as nii_warn import core_scripts.data_io.default_data_io as nii_dset import core_scripts.data_io.conf as nii_dconf import core_scripts.other_tools.list_tools as nii_list_tool import core_scripts.config_parse.config_parse as nii_config_parse import core_scripts.config_parse.arg_parse as nii_arg_parse import core_scripts.op_manager.op_manager as nii_op_wrapper import core_scripts.nn_manager.nn_manager as nii_nn_wrapper import core_scripts.startup_config as nii_startup __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" def main(): """ main(): the default wrapper for training and inference process Please prepare config.py and model.py """ # arguments initialization args = nii_arg_parse.f_args_parsed() # nii_warn.f_print_w_date("Start program", level='h') nii_warn.f_print("Load module: %s" % (args.module_config)) nii_warn.f_print("Load module: %s" % (args.module_model)) prj_conf = importlib.import_module(args.module_config) prj_model = importlib.import_module(args.module_model) # initialization nii_startup.set_random_seed(args.seed) use_cuda = not args.no_cuda and torch.cuda.is_available() device = torch.device("cuda" if use_cuda else "cpu") # prepare data io if not args.inference: params = {'batch_size': args.batch_size, 'shuffle': args.shuffle, 'num_workers': args.num_workers} # Load file list and create data loader trn_lst = nii_list_tool.read_list_from_text(prj_conf.trn_list) trn_set = nii_dset.NIIDataSetLoader( prj_conf.trn_set_name, \ trn_lst, prj_conf.input_dirs, \ prj_conf.input_exts, \ prj_conf.input_dims, \ prj_conf.input_reso, \ prj_conf.input_norm, \ prj_conf.output_dirs, \ prj_conf.output_exts, \ prj_conf.output_dims, \ prj_conf.output_reso, \ prj_conf.output_norm, \ './', params = params, truncate_seq = prj_conf.truncate_seq, min_seq_len = prj_conf.minimum_len, save_mean_std = True, wav_samp_rate = prj_conf.wav_samp_rate) if prj_conf.val_list is not None: val_lst = nii_list_tool.read_list_from_text(prj_conf.val_list) val_set = nii_dset.NIIDataSetLoader( prj_conf.val_set_name, val_lst, prj_conf.input_dirs, \ prj_conf.input_exts, \ prj_conf.input_dims, \ prj_conf.input_reso, \ prj_conf.input_norm, \ prj_conf.output_dirs, \ prj_conf.output_exts, \ prj_conf.output_dims, \ prj_conf.output_reso, \ prj_conf.output_norm, \ './', \ params = params, truncate_seq= prj_conf.truncate_seq, min_seq_len = prj_conf.minimum_len, save_mean_std = False, wav_samp_rate = prj_conf.wav_samp_rate) else: val_set = None # initialize the model and loss function model = prj_model.Model(trn_set.get_in_dim(), \ trn_set.get_out_dim(), \ args, trn_set.get_data_mean_std()) loss_wrapper = prj_model.Loss(args) # initialize the optimizer optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args) # if necessary, resume training if args.trained_model == "": checkpoint = None else: checkpoint = torch.load(args.trained_model) # start training nii_nn_wrapper.f_train_wrapper(args, model, loss_wrapper, device, optimizer_wrapper, trn_set, val_set, checkpoint) # done for traing else: # for inference # default, no truncating, no shuffling params = {'batch_size': args.batch_size, 'shuffle': False, 'num_workers': args.num_workers} if type(prj_conf.test_list) is list: t_lst = prj_conf.test_list else: t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list) test_set = nii_dset.NIIDataSetLoader( prj_conf.test_set_name, \ t_lst, \ prj_conf.test_input_dirs, prj_conf.input_exts, prj_conf.input_dims, prj_conf.input_reso, prj_conf.input_norm, prj_conf.test_output_dirs, prj_conf.output_exts, prj_conf.output_dims, prj_conf.output_reso, prj_conf.output_norm, './', params = params, truncate_seq= None, min_seq_len = None, save_mean_std = False, wav_samp_rate = prj_conf.wav_samp_rate) # initialize model model = prj_model.Model(test_set.get_in_dim(), \ test_set.get_out_dim(), \ args) if args.trained_model == "": print("No model is loaded by ---trained-model for inference") print("By default, load %s%s" % (args.save_trained_name, args.save_model_ext)) checkpoint = torch.load("%s%s" % (args.save_trained_name, args.save_model_ext)) else: checkpoint = torch.load(args.trained_model) # do inference and output data nii_nn_wrapper.f_inference_wrapper(args, model, device, \ test_set, checkpoint) # done return if __name__ == "__main__": main()
6,366
34.569832
74
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/02-asvspoof/lfcc-lcnn-sigmoid/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) self.model_debug = False self.flag_validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # Working sampling rate # torchaudio may be used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of waveform points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # LFCC dim (base component) self.lfcc_dim = [20] self.lfcc_with_delta = True # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating (not used) self.amp_floor = 0.00001 # number of frames to be kept for each trial # 750 frames are quite long for ASVspoof2019 LA with frame_shift = 10ms self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops] # number of sub-models (by default, a single model) self.v_submodels = len(self.frame_lens) # dimension of embedding vectors # here, the embedding is just the activation before sigmoid() self.v_emd_dim = 1 #### # create network #### # 1st part of the classifier self.m_transform = [] # 2nd part of the classifier self.m_output_act = [] # front-end self.m_frontend = [] # it can handle models with multiple front-end configuration # by default, only a single front-end for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip( self.v_truncate_lens, self.fft_n, self.lfcc_dim)): fft_n_bins = fft_n // 2 + 1 if self.lfcc_with_delta: lfcc_dim = lfcc_dim * 3 self.m_transform.append( torch_nn.Sequential( torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]) ) ) self.m_output_act.append( torch_nn.Sequential( torch_nn.Dropout(0.7), torch_nn.Linear((trunc_len // 16) * (lfcc_dim // 16) * 32, 160), nii_nn.MaxFeatureMap2D(), torch_nn.Linear(80, self.v_emd_dim) ) ) self.m_frontend.append( nii_front_end.LFCC(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfcc_dim[idx], with_energy=True) ) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_frontend = torch_nn.ModuleList(self.m_frontend) # output # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch trunc_len: number of frames to be kept after truncation datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # permute to (batch, frame_feat_dim, frame_num) x_sp_amp = x_sp_amp.permute(0, 2, 1) # make sure the buffer is long enough x_sp_amp_buff = torch.zeros( [x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len], dtype=x_sp_amp.dtype, device=x_sp_amp.device) # for batch of data, pad or trim each trial independently fs = self.frame_hops[idx] for fileidx in range(x_sp_amp.shape[0]): # roughtly this is the number of frames true_frame_num = datalength[fileidx] // fs if true_frame_num > trunc_len: # trim randomly pos = torch.rand([1]) * (true_frame_num-trunc_len) pos = torch.floor(pos[0]).long() tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos] x_sp_amp_buff[fileidx] = tmp else: rep = int(np.ceil(trunc_len / true_frame_num)) tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep) x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len] # permute to (batch, frame_num, frame_feat_dim) x_sp_amp = x_sp_amp_buff.permute(0, 2, 1) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] # buffer to store output scores from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_output_act)): # extract front-end feature x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. flatten and transform through output function tmp_score = m_output(torch.flatten(hidden_features, 1)) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score return output_emb def _compute_score(self, feature_vec, inference=False): """ """ # feature_vec is [batch * submodel, 1] if inference: return feature_vec.squeeze(1) else: return torch.sigmoid(feature_vec).squeeze(1) def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): #with torch.no_grad(): # vad_waveform = self.m_vad(x.squeeze(-1)) # vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1])) # if vad_waveform.shape[-1] > 0: # x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1) # else: # pass filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=scores.dtype) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = torch_nn.BCELoss() def compute(self, outputs, target): """ """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
15,225
34.741784
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/02-asvspoof/lfcc-lcnn-sigmoid/config.py
#!/usr/bin/env python """ config.py for project-NN-pytorch/projects Usage: For training, change Configuration for training stage For inference, change Configuration for inference stage """ __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ######################################################### ## Configuration for training stage ######################################################### # Name of datasets # after data preparation, trn/val_set_name are used to save statistics # about the data sets trn_set_name = 'asvspoof2019_trn' val_set_name = 'asvspoof2019_val' # for convenience tmp = '../DATA/asvspoof2019_LA/' # File lists (text file, one data name per line, without name extension) # trin_file_list: list of files for training set trn_list = tmp + '/scp/train.lst' # val_file_list: list of files for validation set. It can be None val_list = tmp + '/scp/val.lst' # Directories for input features # input_dirs = [path_of_feature_1, path_of_feature_2, ..., ] # we assume train and validation data are put in the same sub-directory input_dirs = [tmp + '/train_dev'] # Dimensions of input features # input_dims = [dimension_of_feature_1, dimension_of_feature_2, ...] input_dims = [1] # File name extension for input features # input_exts = [name_extention_of_feature_1, ...] # Please put ".f0" as the last feature input_exts = ['.wav'] # Temporal resolution for input features # input_reso = [reso_feature_1, reso_feature_2, ...] # for waveform modeling, temporal resolution of input acoustic features # may be = waveform_sampling_rate * frame_shift_of_acoustic_features # for example, 80 = 16000 Hz * 5 ms input_reso = [1] # Whether input features should be z-normalized # input_norm = [normalize_feature_1, normalize_feature_2] input_norm = [False] # Similar configurations for output features output_dirs = [] output_dims = [1] output_exts = ['.bin'] output_reso = [1] output_norm = [False] # Waveform sampling rate # wav_samp_rate can be None if no waveform data is used wav_samp_rate = 16000 # Truncating input sequences so that the maximum length = truncate_seq # When truncate_seq is larger, more GPU mem required # If you don't want truncating, please truncate_seq = None truncate_seq = None # Minimum sequence length # If sequence length < minimum_len, this sequence is not used for training # minimum_len can be None minimum_len = None # Optional argument # Just a buffer for convenience # It can contain anything optional_argument = ['../DATA/asvspoof2019_LA/protocol.txt'] ######################################################### ## Configuration for inference stage ######################################################### # similar options to training stage test_set_name = 'asvspoof2019_test' # List of test set data # for convenience, you may directly load test_set list here test_list = tmp + '/scp/test.lst' # Directories for input features # input_dirs = [path_of_feature_1, path_of_feature_2, ..., ] # we assume train and validation data are put in the same sub-directory test_input_dirs = ['../DATA/asvspoof2019_LA/eval'] # Directories for output features, which are [] test_output_dirs = []
3,226
29.733333
75
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/01-nsf/hn-sinc-nsf-10/main.py
#!/usr/bin/env python """ main.py for project-NN-pytorch/projects The training/inference process wrapper. Dataset API is replaced with NII_MergeDataSetLoader. It is more convenient to train model on corpora stored in different directories. Requires model.py and config.py (config_merge_datasets.py) Usage: $: python main.py [options] """ from __future__ import absolute_import import os import sys import torch import importlib import core_scripts.other_tools.display as nii_warn import core_scripts.data_io.default_data_io as nii_default_dset import core_scripts.data_io.customize_dataset as nii_dset import core_scripts.data_io.conf as nii_dconf import core_scripts.other_tools.list_tools as nii_list_tool import core_scripts.config_parse.config_parse as nii_config_parse import core_scripts.config_parse.arg_parse as nii_arg_parse import core_scripts.op_manager.op_manager as nii_op_wrapper import core_scripts.nn_manager.nn_manager as nii_nn_wrapper import core_scripts.startup_config as nii_startup __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" def main(): """ main(): the default wrapper for training and inference process Please prepare config.py and model.py """ # arguments initialization args = nii_arg_parse.f_args_parsed() # nii_warn.f_print_w_date("Start program", level='h') nii_warn.f_print("Load module: %s" % (args.module_config)) nii_warn.f_print("Load module: %s" % (args.module_model)) prj_conf = importlib.import_module(args.module_config) prj_model = importlib.import_module(args.module_model) # initialization nii_startup.set_random_seed(args.seed, args) use_cuda = not args.no_cuda and torch.cuda.is_available() device = torch.device("cuda" if use_cuda else "cpu") # prepare data io if not args.inference: params = {'batch_size': args.batch_size, 'shuffle': args.shuffle, 'num_workers': args.num_workers, 'sampler': args.sampler} in_trans_fns = prj_conf.input_trans_fns \ if hasattr(prj_conf, 'input_trans_fns') else None out_trans_fns = prj_conf.output_trans_fns \ if hasattr(prj_conf, 'output_trans_fns') else None # Load file list and create data loader trn_lst = prj_conf.trn_list trn_set = nii_dset.NII_MergeDataSetLoader( prj_conf.trn_set_name, \ trn_lst, prj_conf.input_dirs, \ prj_conf.input_exts, \ prj_conf.input_dims, \ prj_conf.input_reso, \ prj_conf.input_norm, \ prj_conf.output_dirs, \ prj_conf.output_exts, \ prj_conf.output_dims, \ prj_conf.output_reso, \ prj_conf.output_norm, \ './', params = params, truncate_seq = prj_conf.truncate_seq, min_seq_len = prj_conf.minimum_len, save_mean_std = True, wav_samp_rate = prj_conf.wav_samp_rate, way_to_merge = args.way_to_merge_datasets, global_arg = args, dset_config = prj_conf, input_augment_funcs = in_trans_fns, output_augment_funcs = out_trans_fns) if prj_conf.val_list is not None: val_lst = prj_conf.val_list val_set = nii_dset.NII_MergeDataSetLoader( prj_conf.val_set_name, val_lst, prj_conf.input_dirs, \ prj_conf.input_exts, \ prj_conf.input_dims, \ prj_conf.input_reso, \ prj_conf.input_norm, \ prj_conf.output_dirs, \ prj_conf.output_exts, \ prj_conf.output_dims, \ prj_conf.output_reso, \ prj_conf.output_norm, \ './', \ params = params, truncate_seq= prj_conf.truncate_seq, min_seq_len = prj_conf.minimum_len, save_mean_std = False, wav_samp_rate = prj_conf.wav_samp_rate, way_to_merge = args.way_to_merge_datasets, global_arg = args, dset_config = prj_conf, input_augment_funcs = in_trans_fns, output_augment_funcs = out_trans_fns) else: val_set = None # initialize the model and loss function model = prj_model.Model(trn_set.get_in_dim(), \ trn_set.get_out_dim(), \ args, prj_conf, trn_set.get_data_mean_std()) loss_wrapper = prj_model.Loss(args) # initialize the optimizer optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args) # if necessary, resume training if args.trained_model == "": checkpoint = None else: checkpoint = torch.load(args.trained_model) # start training nii_nn_wrapper.f_train_wrapper(args, model, loss_wrapper, device, optimizer_wrapper, trn_set, val_set, checkpoint) # done for traing else: # for inference # default, no truncating, no shuffling params = {'batch_size': args.batch_size, 'shuffle': False, 'num_workers': args.num_workers} in_trans_fns = prj_conf.test_input_trans_fns \ if hasattr(prj_conf, 'test_input_trans_fns') else None out_trans_fns = prj_conf.test_output_trans_fns \ if hasattr(prj_conf, 'test_output_trans_fns') else None if type(prj_conf.test_list) is list: t_lst = prj_conf.test_list else: t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list) test_set = nii_dset.NII_MergeDataSetLoader( prj_conf.test_set_name, \ t_lst, \ prj_conf.test_input_dirs, prj_conf.input_exts, prj_conf.input_dims, prj_conf.input_reso, prj_conf.input_norm, prj_conf.test_output_dirs, prj_conf.output_exts, prj_conf.output_dims, prj_conf.output_reso, prj_conf.output_norm, './', params = params, truncate_seq= None, min_seq_len = None, save_mean_std = False, wav_samp_rate = prj_conf.wav_samp_rate, way_to_merge = args.way_to_merge_datasets, global_arg = args, dset_config = prj_conf, input_augment_funcs = in_trans_fns, output_augment_funcs = out_trans_fns) # initialize model model = prj_model.Model(test_set.get_in_dim(), \ test_set.get_out_dim(), \ args, prj_conf) if args.trained_model == "": print("No model is loaded by ---trained-model for inference") print("By default, load %s%s" % (args.save_trained_name, args.save_model_ext)) checkpoint = torch.load("%s%s" % (args.save_trained_name, args.save_model_ext)) else: checkpoint = torch.load(args.trained_model) # do inference and output data nii_nn_wrapper.f_inference_wrapper(args, model, device, \ test_set, checkpoint) # done return if __name__ == "__main__": main()
7,819
35.886792
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/01-nsf/hn-sinc-nsf-10/model.py
#!/usr/bin/env python """ model.py for harmonic-plus-noise NSF with trainable sinc filter version: 9 """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import core_scripts.other_tools.debug as nii_debug __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## # Building blocks (torch.nn modules + dimension operation) # # For blstm class BLSTMLayer(torch_nn.Module): """ Wrapper over dilated BLSTM Input tensor: (batchsize=1, length, dim_in) Output tensor: (batchsize=1, length, dim_out) Recurrency is conducted along "length" """ def __init__(self, input_dim, output_dim): super(BLSTMLayer, self).__init__() if output_dim % 2 != 0: print("Output_dim of BLSTMLayer is {:d}".format(output_dim)) print("BLSTMLayer expects a layer size of even number") sys.exit(1) # bi-directional LSTM self.l_blstm = torch_nn.LSTM(input_dim, output_dim // 2, \ bidirectional=True) def forward(self, x): # permute to (length, batchsize=1, dim) blstm_data, _ = self.l_blstm(x.permute(1, 0, 2)) # permute it backt to (batchsize=1, length, dim) return blstm_data.permute(1, 0, 2) # # 1D dilated convolution that keep the input/output length class Conv1dKeepLength(torch_nn.Conv1d): """ Wrapper for causal convolution Input tensor: (batchsize=1, length, dim_in) Output tensor: (batchsize=1, length, dim_out) https://github.com/pytorch/pytorch/issues/1333 Note: Tanh is optional """ def __init__(self, input_dim, output_dim, dilation_s, kernel_s, causal = False, stride = 1, groups=1, bias=True, \ tanh = True, pad_mode='constant'): super(Conv1dKeepLength, self).__init__( input_dim, output_dim, kernel_s, stride=stride, padding = 0, dilation = dilation_s, groups=groups, bias=bias) self.pad_mode = pad_mode self.causal = causal # input & output length will be the same if self.causal: # left pad to make the convolution causal self.pad_le = dilation_s * (kernel_s - 1) self.pad_ri = 0 else: # pad on both sizes self.pad_le = dilation_s * (kernel_s - 1) // 2 self.pad_ri = dilation_s * (kernel_s - 1) - self.pad_le if tanh: self.l_ac = torch_nn.Tanh() else: self.l_ac = torch_nn.Identity() def forward(self, data): # permute to (batchsize=1, dim, length) # add one dimension (batchsize=1, dim, ADDED_DIM, length) # pad to ADDED_DIM # squeeze and return to (batchsize=1, dim, length) # https://github.com/pytorch/pytorch/issues/1333 x = torch_nn_func.pad(data.permute(0, 2, 1).unsqueeze(2), \ (self.pad_le, self.pad_ri, 0, 0), mode = self.pad_mode).squeeze(2) # tanh(conv1()) # permmute back to (batchsize=1, length, dim) output = self.l_ac(super(Conv1dKeepLength, self).forward(x)) return output.permute(0, 2, 1) # # Moving average class MovingAverage(Conv1dKeepLength): """ Wrapper to define a moving average smoothing layer Note: MovingAverage can be implemented using TimeInvFIRFilter too. Here we define another Module dicrectly on Conv1DKeepLength """ def __init__(self, feature_dim, window_len, causal=False, \ pad_mode='replicate'): super(MovingAverage, self).__init__( feature_dim, feature_dim, 1, window_len, causal, groups=feature_dim, bias=False, tanh=False, \ pad_mode=pad_mode) # set the weighting coefficients torch_nn.init.constant_(self.weight, 1/window_len) # turn off grad for this layer for p in self.parameters(): p.requires_grad = False def forward(self, data): return super(MovingAverage, self).forward(data) # # FIR filter layer class TimeInvFIRFilter(Conv1dKeepLength): """ Wrapper to define a FIR filter over Conv1d Note: FIR Filtering is conducted on each dimension (channel) independently: groups=channel_num in conv1d """ def __init__(self, feature_dim, filter_coef, causal=True, flag_train=False): """ __init__(self, feature_dim, filter_coef, causal=True, flag_train=False) feature_dim: dimension of input data filter_coef: 1-D tensor of filter coefficients causal: FIR is causal or not (default: true) flag_train: whether train the filter coefficients (default false) Input data: (batchsize=1, length, feature_dim) Output data: (batchsize=1, length, feature_dim) """ super(TimeInvFIRFilter, self).__init__( feature_dim, feature_dim, 1, filter_coef.shape[0], causal, groups=feature_dim, bias=False, tanh=False) if filter_coef.ndim == 1: # initialize weight using provided filter_coef with torch.no_grad(): tmp_coef = torch.zeros([feature_dim, 1, filter_coef.shape[0]]) tmp_coef[:, 0, :] = filter_coef tmp_coef = torch.flip(tmp_coef, dims=[2]) self.weight = torch.nn.Parameter(tmp_coef, requires_grad=flag_train) else: print("TimeInvFIRFilter expects filter_coef to be 1-D tensor") print("Please implement the code in __init__ if necessary") sys.exit(1) def forward(self, data): return super(TimeInvFIRFilter, self).forward(data) class TimeVarFIRFilter(torch_nn.Module): """ TimeVarFIRFilter Given sequences of filter coefficients and a signal, do filtering Filter coefs: (batchsize=1, signal_length, filter_order = K) Signal: (batchsize=1, signal_length, 1) For batch 0: For n in [1, sequence_length): output(0, n, 1) = \sum_{k=1}^{K} signal(0, n-k, 1)*coef(0, n, k) Note: filter coef (0, n, :) is only used to compute the output at (0, n, 1) """ def __init__(self): super(TimeVarFIRFilter, self).__init__() def forward(self, signal, f_coef): """ Filter coefs: (batchsize=1, signal_length, filter_order = K) Signal: (batchsize=1, signal_length, 1) Output: (batchsize=1, signal_length, 1) For n in [1, sequence_length): output(0, n, 1)= \sum_{k=1}^{K} signal(0, n-k, 1)*coef(0, n, k) This method may be not efficient: Suppose signal [x_1, ..., x_N], filter [a_1, ..., a_K] output [y_1, y_2, y_3, ..., y_N, *, * ... *] = a_1 * [x_1, x_2, x_3, ..., x_N, 0, ..., 0] + a_2 * [ 0, x_1, x_2, x_3, ..., x_N, 0, ..., 0] + a_3 * [ 0, 0, x_1, x_2, x_3, ..., x_N, 0, ..., 0] """ signal_l = signal.shape[1] order_k = f_coef.shape[-1] # pad to (batchsize=1, signal_length + filter_order-1, dim) padded_signal = torch_nn_func.pad(signal, (0, 0, 0, order_k - 1)) y = torch.zeros_like(signal) # roll and weighted sum, only take [0:signal_length] for k in range(order_k): y += torch.roll(padded_signal, k, dims=1)[:, 0:signal_l, :] \ * f_coef[:, :, k:k+1] # done return y # Sinc filter generator class SincFilter(torch_nn.Module): """ SincFilter Given the cut-off-frequency, produce the low-pass and high-pass windowed-sinc-filters. If input cut-off-frequency is (batchsize=1, signal_length, 1), output filter coef is (batchsize=1, signal_length, filter_order). For each time step in [1, signal_length), we calculate one filter for low-pass sinc filter and another for high-pass filter. Example: import scipy import scipy.signal import numpy as np filter_order = 31 cut_f = 0.2 sinc_layer = SincFilter(filter_order) lp_coef, hp_coef = sinc_layer(torch.ones(1, 10, 1) * cut_f) w, h1 = scipy.signal.freqz(lp_coef[0, 0, :].numpy(), [1]) w, h2 = scipy.signal.freqz(hp_coef[0, 0, :].numpy(), [1]) plt.plot(w, 20*np.log10(np.abs(h1))) plt.plot(w, 20*np.log10(np.abs(h2))) plt.plot([cut_f * np.pi, cut_f * np.pi], [-100, 0]) """ def __init__(self, filter_order): super(SincFilter, self).__init__() # Make the filter oder an odd number # [-(M-1)/2, ... 0, (M-1)/2] # self.half_k = (filter_order - 1) // 2 self.order = self.half_k * 2 +1 def hamming_w(self, n_index): """ prepare hamming window for each time step n_index (batchsize=1, signal_length, filter_order) For each time step, n_index will be [-(M-1)/2, ... 0, (M-1)/2] n_index[0, 0, :] = [-(M-1)/2, ... 0, (M-1)/2] n_index[0, 1, :] = [-(M-1)/2, ... 0, (M-1)/2] ... output (batchsize=1, signal_length, filter_order) output[0, 0, :] = hamming_window output[0, 1, :] = hamming_window ... """ # Hamming window return 0.54 + 0.46 * torch.cos(2 * np.pi * n_index / self.order) def sinc(self, x): """ Normalized sinc-filter sin( pi * x) / pi * x https://en.wikipedia.org/wiki/Sinc_function Assume x (batchsize, signal_length, filter_order) and x[0, 0, :] = [-half_order, - half_order+1, ... 0, ..., half_order] x[:, :, self.half_order] -> time index = 0, sinc(0)=1 """ y = torch.zeros_like(x) y[:,:,0:self.half_k]=torch.sin(np.pi * x[:, :, 0:self.half_k]) \ / (np.pi * x[:, :, 0:self.half_k]) y[:,:,self.half_k+1:]=torch.sin(np.pi * x[:, :, self.half_k+1:]) \ / (np.pi * x[:, :, self.half_k+1:]) y[:,:,self.half_k] = 1 return y def forward(self, cut_f): """ lp_coef, hp_coef = forward(self, cut_f) cut-off frequency cut_f (batchsize=1, length, dim = 1) lp_coef: low-pass filter coefs (batchsize, length, filter_order) hp_coef: high-pass filter coefs (batchsize, length, filter_order) """ # create the filter order index with torch.no_grad(): # [- (M-1) / 2, ..., 0, ..., (M-1)/2] lp_coef = torch.arange(-self.half_k, self.half_k + 1, device=cut_f.device) # [[[- (M-1) / 2, ..., 0, ..., (M-1)/2], # [- (M-1) / 2, ..., 0, ..., (M-1)/2], # ... # ], # [[- (M-1) / 2, ..., 0, ..., (M-1)/2], # [- (M-1) / 2, ..., 0, ..., (M-1)/2], # ... # ]] lp_coef = lp_coef.repeat(cut_f.shape[0], cut_f.shape[1], 1) hp_coef = torch.arange(-self.half_k, self.half_k + 1, device=cut_f.device) hp_coef = hp_coef.repeat(cut_f.shape[0], cut_f.shape[1], 1) # temporary buffer of [-1^n] for gain norm in hp_coef tmp_one = torch.pow(-1, hp_coef) # unnormalized filter coefs with hamming window lp_coef = cut_f * self.sinc(cut_f * lp_coef) \ * self.hamming_w(lp_coef) hp_coef = (self.sinc(hp_coef) \ - cut_f * self.sinc(cut_f * hp_coef)) \ * self.hamming_w(hp_coef) # normalize the coef to make gain at 0/pi is 0 dB # sum_n lp_coef[n] lp_coef_norm = torch.sum(lp_coef, axis=2).unsqueeze(-1) # sum_n hp_coef[n] * -1^n hp_coef_norm = torch.sum(hp_coef * tmp_one, axis=2).unsqueeze(-1) lp_coef = lp_coef / lp_coef_norm hp_coef = hp_coef / hp_coef_norm # return normed coef return lp_coef, hp_coef # # Up sampling class UpSampleLayer(torch_nn.Module): """ Wrapper over up-sampling Input tensor: (batchsize=1, length, dim) Ouput tensor: (batchsize=1, length * up-sampling_factor, dim) """ def __init__(self, feature_dim, up_sampling_factor, smoothing=False): super(UpSampleLayer, self).__init__() # wrap a up_sampling layer self.scale_factor = up_sampling_factor self.l_upsamp = torch_nn.Upsample(scale_factor=self.scale_factor) if smoothing: self.l_ave1 = MovingAverage(feature_dim, self.scale_factor) self.l_ave2 = MovingAverage(feature_dim, self.scale_factor) else: self.l_ave1 = torch_nn.Identity() self.l_ave2 = torch_nn.Identity() return def forward(self, x): # permute to (batchsize=1, dim, length) up_sampled_data = self.l_upsamp(x.permute(0, 2, 1)) # permute it backt to (batchsize=1, length, dim) # and do two moving average return self.l_ave1(self.l_ave2(up_sampled_data.permute(0, 2, 1))) # Neural filter block (1 block) class NeuralFilterBlock(torch_nn.Module): """ Wrapper over a single filter block """ def __init__(self, signal_size, hidden_size,\ kernel_size=3, conv_num=10): super(NeuralFilterBlock, self).__init__() self.signal_size = signal_size self.hidden_size = hidden_size self.kernel_size = kernel_size self.conv_num = conv_num self.dilation_s = [np.power(2, x) for x in np.arange(conv_num)] # ff layer to expand dimension self.l_ff_1 = torch_nn.Linear(signal_size, hidden_size, \ bias=False) self.l_ff_1_tanh = torch_nn.Tanh() # dilated conv layers tmp = [Conv1dKeepLength(hidden_size, hidden_size, x, \ kernel_size, causal=True, bias=False) \ for x in self.dilation_s] self.l_convs = torch_nn.ModuleList(tmp) # ff layer to de-expand dimension self.l_ff_2 = torch_nn.Linear(hidden_size, hidden_size//4, \ bias=False) self.l_ff_2_tanh = torch_nn.Tanh() self.l_ff_3 = torch_nn.Linear(hidden_size//4, signal_size, \ bias=False) self.l_ff_3_tanh = torch_nn.Tanh() # a simple scale self.scale = torch_nn.Parameter(torch.tensor([0.1]), requires_grad=False) return def forward(self, signal, context): """ Assume: signal (batchsize=1, length, signal_size) context (batchsize=1, length, hidden_size) Output: (batchsize=1, length, signal_size) """ # expand dimension tmp_hidden = self.l_ff_1_tanh(self.l_ff_1(signal)) # loop over dilated convs # output of a d-conv is input + context + d-conv(input) for l_conv in self.l_convs: tmp_hidden = tmp_hidden + l_conv(tmp_hidden) + context # to be consistent with legacy configuration in CURRENNT tmp_hidden = tmp_hidden * self.scale # compress the dimesion and skip-add tmp_hidden = self.l_ff_2_tanh(self.l_ff_2(tmp_hidden)) tmp_hidden = self.l_ff_3_tanh(self.l_ff_3(tmp_hidden)) output_signal = tmp_hidden + signal return output_signal # # Sine waveform generator # # Sine waveform generator class SineGen(torch_nn.Module): """ Definition of sine generator SineGen(samp_rate, harmonic_num = 0, sine_amp = 0.1, noise_std = 0.003, voiced_threshold = 0, flag_for_pulse=False) samp_rate: sampling rate in Hz harmonic_num: number of harmonic overtones (default 0) sine_amp: amplitude of sine-wavefrom (default 0.1) noise_std: std of Gaussian noise (default 0.003) voiced_thoreshold: F0 threshold for U/V classification (default 0) flag_for_pulse: this SinGen is used inside PulseGen (default False) Note: when flag_for_pulse is True, the first time step of a voiced segment is always sin(np.pi) or cos(0) """ def __init__(self, samp_rate, harmonic_num = 0, sine_amp = 0.1, noise_std = 0.003, voiced_threshold = 0, flag_for_pulse=False): super(SineGen, self).__init__() self.sine_amp = sine_amp self.noise_std = noise_std self.harmonic_num = harmonic_num self.dim = self.harmonic_num + 1 self.sampling_rate = samp_rate self.voiced_threshold = voiced_threshold self.flag_for_pulse = flag_for_pulse def _f02uv(self, f0): # generate uv signal uv = torch.ones_like(f0) uv = uv * (f0 > self.voiced_threshold) return uv def _f02sine(self, f0_values): """ f0_values: (batchsize, length, dim) where dim indicates fundamental tone and overtones """ # convert to F0 in rad. The interger part n can be ignored # because 2 * np.pi * n doesn't affect phase rad_values = (f0_values / self.sampling_rate) % 1 # initial phase noise (no noise for fundamental component) rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2],\ device = f0_values.device) rand_ini[:, 0] = 0 rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini # instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad) if not self.flag_for_pulse: # for normal case # To prevent torch.cumsum numerical overflow, # it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1. # Buffer tmp_over_one_idx indicates the time step to add -1. # This will not change F0 of sine because (x-1) * 2*pi = x * 2*pi tmp_over_one = torch.cumsum(rad_values, 1) % 1 tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 cumsum_shift = torch.zeros_like(rad_values) cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) \ * 2 * np.pi) else: # If necessary, make sure that the first time step of every # voiced segments is sin(pi) or cos(0) # This is used for pulse-train generation # identify the last time step in unvoiced segments uv = self._f02uv(f0_values) uv_1 = torch.roll(uv, shifts=-1, dims=1) uv_1[:, -1, :] = 1 u_loc = (uv < 1) * (uv_1 > 0) # get the instantanouse phase tmp_cumsum = torch.cumsum(rad_values, dim=1) # different batch needs to be processed differently for idx in range(f0_values.shape[0]): temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :] temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :] # stores the accumulation of i.phase within # each voiced segments tmp_cumsum[idx, :, :] = 0 tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum # rad_values - tmp_cumsum: remove the accumulation of i.phase # within the previous voiced segment. i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1) # get the sines sines = torch.cos(i_phase * 2 * np.pi) return sines def forward(self, f0): """ sine_tensor, uv = forward(f0) input F0: tensor(batchsize=1, length, dim=1) f0 for unvoiced steps should be 0 output sine_tensor: tensor(batchsize=1, length, dim) output uv: tensor(batchsize=1, length, 1) """ with torch.no_grad(): f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, \ device=f0.device) # fundamental component f0_buf[:, :, 0] = f0[:, :, 0] for idx in np.arange(self.harmonic_num): # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic f0_buf[:, :, idx+1] = f0_buf[:, :, 0] * (idx+2) # generate sine waveforms sine_waves = self._f02sine(f0_buf) * self.sine_amp # generate uv signal #uv = torch.ones(f0.shape) #uv = uv * (f0 > self.voiced_threshold) uv = self._f02uv(f0) # noise: for unvoiced should be similar to sine_amp # std = self.sine_amp/3 -> max value ~ self.sine_amp #. for voiced regions is self.noise_std noise_amp = uv * self.noise_std + (1-uv) * self.sine_amp / 3 noise = noise_amp * torch.randn_like(sine_waves) # first: set the unvoiced part to 0 by uv # then: additive noise sine_waves = sine_waves * uv + noise return sine_waves, uv, noise ##### ## Model definition ## ## For condition module only provide Spectral feature to Filter block class CondModuleHnSincNSF(torch_nn.Module): """ Condition module for hn-sinc-NSF Upsample and transform input features CondModuleHnSincNSF(input_dimension, output_dimension, up_sample_rate, blstm_dimension = 64, cnn_kernel_size = 3) Spec, F0, cut_off_freq = CondModuleHnSincNSF(features, F0) Both input features should be frame-level features If x doesn't contain F0, just ignore the returned F0 CondModuleHnSincNSF(input_dim, output_dim, up_sample, blstm_s = 64, cnn_kernel_s = 3, voiced_threshold = 0): input_dim: sum of dimensions of input features output_dim: dim of the feature Spec to be used by neural filter-block up_sample: up sampling rate of input features blstm_s: dimension of the features from blstm (default 64) cnn_kernel_s: kernel size of CNN in condition module (default 3) voiced_threshold: f0 > voiced_threshold is voiced, otherwise unvoiced """ def __init__(self, input_dim, output_dim, up_sample, \ blstm_s = 64, cnn_kernel_s = 3, voiced_threshold = 0): super(CondModuleHnSincNSF, self).__init__() # input feature dimension self.input_dim = input_dim self.output_dim = output_dim self.up_sample = up_sample #self.blstm_s = blstm_s self.cnn_kernel_s = cnn_kernel_s self.cut_f_smooth = up_sample * 4 self.voiced_threshold = voiced_threshold # the blstm layer tmp_input_size = [input_dim, output_dim, output_dim] tmp_output_size = [output_dim, output_dim, output_dim] tmp = [Conv1dKeepLength(x, y, dilation_s = 1, kernel_s = self.cnn_kernel_s) for x, y in zip(tmp_input_size, tmp_output_size)] self.l_conv1ds = torch_nn.ModuleList(tmp) #self.l_conv1ds = BLSTMLayer(input_dim, self.blstm_s) # the CNN layer (+1 dim for cut_off_frequence of sinc filter) #self.l_conv1d = Conv1dKeepLength(self.blstm_s, \ # self.output_dim, \ # dilation_s = 1, \ # kernel_s = self.cnn_kernel_s) # Upsampling layer for hidden features self.l_upsamp = UpSampleLayer(self.output_dim, \ self.up_sample, True) # separate layer for up-sampling normalized F0 values self.l_upsamp_f0_hi = UpSampleLayer(1, self.up_sample, True) # Upsampling for F0: don't smooth up-sampled F0 self.l_upsamp_F0 = UpSampleLayer(1, self.up_sample, False) # Another smoothing layer to smooth the cut-off frequency # for sinc filters. Use a larger window to smooth self.l_cut_f_smooth = MovingAverage(1, self.cut_f_smooth) def get_cut_f(self, hidden_feat, f0): """ cut_f = get_cut_f(self, feature, f0) feature: (batchsize, length, dim=1) f0: (batchsize, length, dim=1) """ # generate uv signal uv = torch.ones_like(f0) * (f0 > self.voiced_threshold) # hidden_feat is between (-1, 1) after conv1d with tanh # (-0.2, 0.2) + 0.3 = (0.1, 0.5) # voiced: (0.1, 0.5) + 0.4 = (0.5, 0.9) # unvoiced: (0.1, 0.5) = (0.1, 0.5) return hidden_feat * 0.2 + uv * 0.4 + 0.3 def forward(self, feature, f0): """ spec, f0 = forward(self, feature, f0) feature: (batchsize, length, dim) f0: (batchsize, length, dim=1), which should be F0 at frame-level spec: (batchsize, length, self.output_dim), at wave-level f0: (batchsize, length, 1), at wave-level """ tmp = feature for l_conv in self.l_conv1ds: tmp = l_conv(tmp) tmp = self.l_upsamp(tmp) # concatenat normed F0 with hidden spectral features context = torch.cat((tmp[:, :, 0:self.output_dim-1], \ self.l_upsamp_f0_hi(feature[:, :, -1:])), \ dim=2) # hidden feature for cut-off frequency hidden_cut_f = tmp[:, :, self.output_dim-1:] # directly up-sample F0 without smoothing f0_upsamp = self.l_upsamp_F0(f0) # get the cut-off-frequency from output of CNN cut_f = self.get_cut_f(hidden_cut_f, f0_upsamp) # smooth the cut-off-frequency using fixed average smoothing cut_f_smoothed = self.l_cut_f_smooth(cut_f) # return return context, f0_upsamp, cut_f_smoothed, hidden_cut_f # For source module class SourceModuleHnNSF(torch_nn.Module): """ SourceModule for hn-nsf SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, add_noise_std=0.003, voiced_threshod=0) sampling_rate: sampling_rate in Hz harmonic_num: number of harmonic above F0 (default: 0) sine_amp: amplitude of sine source signal (default: 0.1) add_noise_std: std of additive Gaussian noise (default: 0.003) note that amplitude of noise in unvoiced is decided by sine_amp voiced_threshold: threhold to set U/V given F0 (default: 0) Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) F0_sampled (batchsize, length, 1) Sine_source (batchsize, length, 1) noise_source (batchsize, length 1) uv (batchsize, length, 1) """ def __init__(self, sampling_rate, harmonic_num=0, sine_amp=0.1, add_noise_std=0.003, voiced_threshod=0): super(SourceModuleHnNSF, self).__init__() self.sine_amp = sine_amp self.noise_std = add_noise_std # to produce sine waveforms self.l_sin_gen = SineGen(sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod) # to merge source harmonics into a single excitation self.l_linear = torch_nn.Linear(harmonic_num+1, 1) self.l_tanh = torch_nn.Tanh() def forward(self, x): """ Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) F0_sampled (batchsize, length, 1) Sine_source (batchsize, length, 1) noise_source (batchsize, length 1) """ # source for harmonic branch sine_wavs, uv, _ = self.l_sin_gen(x) sine_merge = self.l_tanh(self.l_linear(sine_wavs)) # source for noise branch, in the same shape as uv noise = torch.randn_like(uv) * self.sine_amp / 3 return sine_merge, noise, uv # For Filter module class FilterModuleHnSincNSF(torch_nn.Module): """ Filter for Hn-sinc-NSF FilterModuleHnSincNSF(signal_size, hidden_size, sinc_order = 31, block_num = 5, kernel_size = 3, conv_num_in_block = 10) signal_size: signal dimension (should be 1) hidden_size: dimension of hidden features inside neural filter block sinc_order: order of the sinc filter block_num: number of neural filter blocks in harmonic branch kernel_size: kernel size in dilated CNN conv_num_in_block: number of d-conv1d in one neural filter block Usage: output = FilterModuleHnSincNSF(har_source, noi_source, cut_f, context) har_source: source for harmonic branch (batchsize, length, dim=1) noi_source: source for noise branch (batchsize, length, dim=1) cut_f: cut-off-frequency of sinc filters (batchsize, length, dim=1) context: hidden features to be added (batchsize, length, dim) output: (batchsize, length, dim=1) """ def __init__(self, signal_size, hidden_size, sinc_order = 31, \ block_num = 5, kernel_size = 3, conv_num_in_block = 10): super(FilterModuleHnSincNSF, self).__init__() self.signal_size = signal_size self.hidden_size = hidden_size self.kernel_size = kernel_size self.block_num = block_num self.conv_num_in_block = conv_num_in_block self.sinc_order = sinc_order # filter blocks for harmonic branch tmp = [NeuralFilterBlock(signal_size, hidden_size, \ kernel_size, conv_num_in_block) \ for x in range(self.block_num)] self.l_har_blocks = torch_nn.ModuleList(tmp) # filter blocks for noise branch (only one block, 5 sub-blocks) tmp = [NeuralFilterBlock(signal_size, hidden_size, \ kernel_size, conv_num_in_block // 2) \ for x in range(1)] self.l_noi_blocks = torch_nn.ModuleList(tmp) # sinc filter generators and time-variant filtering layer self.l_sinc_coef = SincFilter(self.sinc_order) self.l_tv_filtering = TimeVarFIRFilter() # done def forward(self, har_component, noi_component, cond_feat, cut_f): """ """ # harmonic component for l_har_block in self.l_har_blocks: har_component = l_har_block(har_component, cond_feat) # noise componebt for l_noi_block in self.l_noi_blocks: noi_component = l_noi_block(noi_component, cond_feat) # get sinc filter coefficients lp_coef, hp_coef = self.l_sinc_coef(cut_f) # time-variant filtering har_signal = self.l_tv_filtering(har_component, lp_coef) noi_signal = self.l_tv_filtering(noi_component, hp_coef) # get output return har_signal + noi_signal ## FOR MODEL class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None): super(Model, self).__init__() # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) self.input_dim = in_dim self.output_dim = out_dim # configurations # amplitude of sine waveform (for each harmonic) self.sine_amp = 0.1 # standard deviation of Gaussian noise for additive noise self.noise_std = 0.003 # dimension of hidden features in filter blocks self.hidden_dim = 64 # upsampling rate on input acoustic features (16kHz * 5ms = 80) # assume input_reso has the same value self.upsamp_rate = prj_conf.input_reso[0] # sampling rate (Hz) self.sampling_rate = prj_conf.wav_samp_rate # CNN kernel size in filter blocks self.cnn_kernel_s = 3 # number of filter blocks (for harmonic branch) # noise branch only uses 1 block self.filter_block_num = 5 # number of dilated CNN in each filter block self.cnn_num_in_block = 10 # number of harmonic overtones in source self.harmonic_num = 7 # order of sinc-windowed-FIR-filter self.sinc_order = 31 # the three modules self.m_cond = CondModuleHnSincNSF(self.input_dim, \ self.hidden_dim, \ self.upsamp_rate, \ cnn_kernel_s=self.cnn_kernel_s) self.m_source = SourceModuleHnNSF(self.sampling_rate, self.harmonic_num, self.sine_amp, self.noise_std) self.m_filter = FilterModuleHnSincNSF(self.output_dim, \ self.hidden_dim, \ self.sinc_order, \ self.filter_block_num, \ self.cnn_kernel_s, \ self.cnn_num_in_block) # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network """ return y * self.output_std + self.output_mean def forward(self, x): """ definition of forward method Assume x (batchsize=1, length, dim) Return output(batchsize=1, length) """ # assume x[:, :, -1] is F0, denormalize F0 f0 = x[:, :, -1:] # normalize the input features data feat = self.normalize_input(x) # condition module # feature-to-filter-block, f0-up-sampled, cut-off-f-for-sinc, # hidden-feature-for-cut-off-f cond_feat, f0_upsamped, cut_f, hid_cut_f = self.m_cond(feat, f0) # source module # harmonic-source, noise-source (for noise branch), uv har_source, noi_source, uv = self.m_source(f0_upsamped) # neural filter module (including sinc-based FIR filtering) # output output = self.m_filter(har_source, noi_source, cond_feat, cut_f) if self.training: # just in case we need to penalize the hidden feauture for # cut-off-freq. return [output.squeeze(-1), hid_cut_f] else: return output.squeeze(-1) class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ # frame shift (number of points) self.frame_hops = [80, 40, 640] # frame length self.frame_lens = [320, 80, 1920] # fft length self.fft_n = [512, 128, 2048] # window type in stft self.win = torch.hann_window # floor in log-spectrum-amplitude calculating self.amp_floor = 0.00001 # loss function self.loss = torch_nn.MSELoss() # weight to penalize hidden features for cut-off-frequency # for experiments on CMU-arctic, ATR-F009, VCTK, cutoff_w = 0.0 self.cutoff_w = 0.0 return def _stft(self, signal, fft_p, frame_shift, frame_len): """ wrapper of torch.stft Remember to use onesided=True, pad_mode="constant" Signal (batchsize, length) Output (batchsize, fft_p/2+1, frame_num, 2) """ # to be compatible with different torch versions if torch.__version__.split('.')[1].isnumeric() and \ int(torch.__version__.split('.')[1]) < 7: return torch.stft( signal, fft_p, frame_shift, frame_len, window=self.win(frame_len, dtype=signal.dtype, device=signal.device), onesided=True, pad_mode="constant") else: return torch.stft( signal, fft_p, frame_shift, frame_len, window=self.win(frame_len, dtype=signal.dtype, device=signal.device), onesided=True, pad_mode="constant", return_complex=False) def _amp(self, x): """ _amp(stft) x_stft: (batchsize, fft_p/2+1, frame_num, 2) output: (batchsize, fft_p/2+1, frame_num) output[x, y, z] = log(x_stft[x, y, z, 1]^2 + x_stft[x, y, z, 2]^2 + floor) """ return torch.log(torch.norm(x, 2, -1).pow(2) + self.amp_floor) def compute(self, outputs, target): """ Loss().compute(outputs, target) should return the Loss in torch.tensor format Assume output and target as (batchsize=1, length) """ # hidden-feature for cut-off-frequency cut_f = outputs[1] # generated signal output = outputs[0] # convert from (batchsize=1, length, dim=1) to (1, length) if target.ndim == 3: target.squeeze_(-1) # compute loss loss = 0 for frame_shift, frame_len, fft_p in \ zip(self.frame_hops, self.frame_lens, self.fft_n): x_stft = self._stft(output, fft_p, frame_shift, frame_len) y_stft = self._stft(target, fft_p, frame_shift, frame_len) x_sp_amp = self._amp(x_stft) y_sp_amp = self._amp(y_stft) loss += self.loss(x_sp_amp, y_sp_amp) # A norm on cut_f, which forces sinc-cut-off-frequency # to be close to the U/V-decided value # Experiments on CMU-arctic, ATR-F009, and VCTK don't use it # by setting self.cutoff_w = 0.0 # However, just in case loss += self.cutoff_w * self.loss(cut_f, torch.zeros_like(cut_f)) return loss if __name__ == "__main__": print("Definition of model")
40,407
38.810837
78
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/01-nsf/hn-sinc-nsf-10/config.py
#!/usr/bin/env python """ config.py for project-NN-pytorch/projects Usage: For training, change Configuration for training stage For inference, change Configuration for inference stage """ __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ######################################################### ## Configuration for training stage ######################################################### # Name of datasets # after data preparation, trn/val_set_name are used to save statistics # about the data sets trn_set_name = 'cmu_all_trn' val_set_name = 'cmu_all_val' # for convenience tmp = '../DATA/cmu-arctic-data-set' # File lists (text file, one data name per line, without name extension) # trin_file_list: list of files for training set trn_list = [tmp + '/scp/train.lst'] # val_file_list: list of files for validation set. It can be None val_list = [tmp + '/scp/val.lst'] # Directories for input features # input_dirs = [path_of_feature_1, path_of_feature_2, ..., ] # we assume train and validation data are put in the same sub-directory input_dirs = [[tmp + '/5ms/melspec', tmp + '/5ms/f0']] # Dimensions of input features # input_dims = [dimension_of_feature_1, dimension_of_feature_2, ...] input_dims = [80, 1] # File name extension for input features # input_exts = [name_extention_of_feature_1, ...] # Please put ".f0" as the last feature input_exts = ['.mfbsp', '.f0'] # Temporal resolution for input features # input_reso = [reso_feature_1, reso_feature_2, ...] # for waveform modeling, temporal resolution of input acoustic features # may be = waveform_sampling_rate * frame_shift_of_acoustic_features # for example, 80 = 16000 Hz * 5 ms input_reso = [80, 80] # Whether input features should be z-normalized # input_norm = [normalize_feature_1, normalize_feature_2] input_norm = [True, True] # Similar configurations for output features output_dirs = [[tmp + '/wav_16k_norm']] output_dims = [1] output_exts = ['.wav'] output_reso = [1] output_norm = [False] # Waveform sampling rate # wav_samp_rate can be None if no waveform data is used wav_samp_rate = 16000 # Truncating input sequences so that the maximum length = truncate_seq # When truncate_seq is larger, more GPU mem required # If you don't want truncating, please truncate_seq = None truncate_seq = 16000 * 3 # Minimum sequence length # If sequence length < minimum_len, this sequence is not used for training # minimum_len can be None minimum_len = 80 * 50 ######################################################### ## Configuration for inference stage ######################################################### # similar options to training stage test_set_name = ['cmu_all_test_tiny'] # List of test set data # for convenience, you may directly load test_set list here test_list = [['slt_arctic_b0474', 'slt_arctic_b0475', 'slt_arctic_b0476', 'bdl_arctic_b0474', 'bdl_arctic_b0475', 'bdl_arctic_b0476', 'rms_arctic_b0474', 'rms_arctic_b0475', 'rms_arctic_b0476', 'clb_arctic_b0474', 'clb_arctic_b0475', 'clb_arctic_b0476']] # Directories for input features # input_dirs = [path_of_feature_1, path_of_feature_2, ..., ] # we assume train and validation data are put in the same sub-directory test_input_dirs = [[tmp + '/5ms/melspec', tmp + '/5ms/f0']] # Directories for output features, which are [] test_output_dirs = [[]]
3,430
32.31068
75
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/01-nsf/hn-nsf/main.py
#!/usr/bin/env python """ main.py for project-NN-pytorch/projects The training/inference process wrapper. Dataset API is replaced with NII_MergeDataSetLoader. It is more convenient to train model on corpora stored in different directories. Requires model.py and config.py (config_merge_datasets.py) Usage: $: python main.py [options] """ from __future__ import absolute_import import os import sys import torch import importlib import core_scripts.other_tools.display as nii_warn import core_scripts.data_io.default_data_io as nii_default_dset import core_scripts.data_io.customize_dataset as nii_dset import core_scripts.data_io.conf as nii_dconf import core_scripts.other_tools.list_tools as nii_list_tool import core_scripts.config_parse.config_parse as nii_config_parse import core_scripts.config_parse.arg_parse as nii_arg_parse import core_scripts.op_manager.op_manager as nii_op_wrapper import core_scripts.nn_manager.nn_manager as nii_nn_wrapper import core_scripts.startup_config as nii_startup __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" def main(): """ main(): the default wrapper for training and inference process Please prepare config.py and model.py """ # arguments initialization args = nii_arg_parse.f_args_parsed() # nii_warn.f_print_w_date("Start program", level='h') nii_warn.f_print("Load module: %s" % (args.module_config)) nii_warn.f_print("Load module: %s" % (args.module_model)) prj_conf = importlib.import_module(args.module_config) prj_model = importlib.import_module(args.module_model) # initialization nii_startup.set_random_seed(args.seed, args) use_cuda = not args.no_cuda and torch.cuda.is_available() device = torch.device("cuda" if use_cuda else "cpu") # prepare data io if not args.inference: params = {'batch_size': args.batch_size, 'shuffle': args.shuffle, 'num_workers': args.num_workers, 'sampler': args.sampler} in_trans_fns = prj_conf.input_trans_fns \ if hasattr(prj_conf, 'input_trans_fns') else None out_trans_fns = prj_conf.output_trans_fns \ if hasattr(prj_conf, 'output_trans_fns') else None # Load file list and create data loader trn_lst = prj_conf.trn_list trn_set = nii_dset.NII_MergeDataSetLoader( prj_conf.trn_set_name, \ trn_lst, prj_conf.input_dirs, \ prj_conf.input_exts, \ prj_conf.input_dims, \ prj_conf.input_reso, \ prj_conf.input_norm, \ prj_conf.output_dirs, \ prj_conf.output_exts, \ prj_conf.output_dims, \ prj_conf.output_reso, \ prj_conf.output_norm, \ './', params = params, truncate_seq = prj_conf.truncate_seq, min_seq_len = prj_conf.minimum_len, save_mean_std = True, wav_samp_rate = prj_conf.wav_samp_rate, way_to_merge = args.way_to_merge_datasets, global_arg = args, dset_config = prj_conf, input_augment_funcs = in_trans_fns, output_augment_funcs = out_trans_fns) if prj_conf.val_list is not None: val_lst = prj_conf.val_list val_set = nii_dset.NII_MergeDataSetLoader( prj_conf.val_set_name, val_lst, prj_conf.input_dirs, \ prj_conf.input_exts, \ prj_conf.input_dims, \ prj_conf.input_reso, \ prj_conf.input_norm, \ prj_conf.output_dirs, \ prj_conf.output_exts, \ prj_conf.output_dims, \ prj_conf.output_reso, \ prj_conf.output_norm, \ './', \ params = params, truncate_seq= prj_conf.truncate_seq, min_seq_len = prj_conf.minimum_len, save_mean_std = False, wav_samp_rate = prj_conf.wav_samp_rate, way_to_merge = args.way_to_merge_datasets, global_arg = args, dset_config = prj_conf, input_augment_funcs = in_trans_fns, output_augment_funcs = out_trans_fns) else: val_set = None # initialize the model and loss function model = prj_model.Model(trn_set.get_in_dim(), \ trn_set.get_out_dim(), \ args, prj_conf, trn_set.get_data_mean_std()) loss_wrapper = prj_model.Loss(args) # initialize the optimizer optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args) # if necessary, resume training if args.trained_model == "": checkpoint = None else: checkpoint = torch.load(args.trained_model) # start training nii_nn_wrapper.f_train_wrapper(args, model, loss_wrapper, device, optimizer_wrapper, trn_set, val_set, checkpoint) # done for traing else: # for inference # default, no truncating, no shuffling params = {'batch_size': args.batch_size, 'shuffle': False, 'num_workers': args.num_workers} in_trans_fns = prj_conf.test_input_trans_fns \ if hasattr(prj_conf, 'test_input_trans_fns') else None out_trans_fns = prj_conf.test_output_trans_fns \ if hasattr(prj_conf, 'test_output_trans_fns') else None if type(prj_conf.test_list) is list: t_lst = prj_conf.test_list else: t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list) test_set = nii_dset.NII_MergeDataSetLoader( prj_conf.test_set_name, \ t_lst, \ prj_conf.test_input_dirs, prj_conf.input_exts, prj_conf.input_dims, prj_conf.input_reso, prj_conf.input_norm, prj_conf.test_output_dirs, prj_conf.output_exts, prj_conf.output_dims, prj_conf.output_reso, prj_conf.output_norm, './', params = params, truncate_seq= None, min_seq_len = None, save_mean_std = False, wav_samp_rate = prj_conf.wav_samp_rate, way_to_merge = args.way_to_merge_datasets, global_arg = args, dset_config = prj_conf, input_augment_funcs = in_trans_fns, output_augment_funcs = out_trans_fns) # initialize model model = prj_model.Model(test_set.get_in_dim(), \ test_set.get_out_dim(), \ args, prj_conf) if args.trained_model == "": print("No model is loaded by ---trained-model for inference") print("By default, load %s%s" % (args.save_trained_name, args.save_model_ext)) checkpoint = torch.load("%s%s" % (args.save_trained_name, args.save_model_ext)) else: checkpoint = torch.load(args.trained_model) # do inference and output data nii_nn_wrapper.f_inference_wrapper(args, model, device, \ test_set, checkpoint) # done return if __name__ == "__main__": main()
7,819
35.886792
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/01-nsf/hn-nsf/model.py
#!/usr/bin/env python """ model.py for harmonic-plus-noise NSF version: 1 """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import core_scripts.other_tools.debug as nii_debug __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## # Building blocks (torch.nn modules + dimension operation) # # For blstm class BLSTMLayer(torch_nn.Module): """ Wrapper over BLSTM Input tensor: (batchsize, length, dim_in) Output tensor: (batchsize, length, dim_out) We want to keep the length the same """ def __init__(self, input_dim, output_dim): super(BLSTMLayer, self).__init__() if output_dim % 2 != 0: print("Output_dim of BLSTMLayer is {:d}".format(output_dim)) print("BLSTMLayer expects a layer size of even number") sys.exit(1) # bi-directional LSTM self.l_blstm = torch_nn.LSTM(input_dim, output_dim // 2, \ bidirectional=True) def forward(self, x): # permute to (length, batchsize=1, dim) blstm_data, _ = self.l_blstm(x.permute(1, 0, 2)) # permute it backt to (batchsize=1, length, dim) return blstm_data.permute(1, 0, 2) # # 1D dilated convolution that keep the input/output length class Conv1dKeepLength(torch_nn.Conv1d): """ Wrapper for causal convolution Input tensor: (batchsize, length, dim_in) Output tensor: (batchsize, length, dim_out) https://github.com/pytorch/pytorch/issues/1333 Note: Tanh is applied """ def __init__(self, input_dim, output_dim, dilation_s, kernel_s, causal = False, stride = 1, groups=1, bias=True, \ tanh = True): super(Conv1dKeepLength, self).__init__( input_dim, output_dim, kernel_s, stride=stride, padding = 0, dilation = dilation_s, groups=groups, bias=bias) self.causal = causal # input & output length will be the same if self.causal: # left pad to make the convolution causal self.pad_le = dilation_s * (kernel_s - 1) self.pad_ri = 0 else: # pad on both sizes self.pad_le = dilation_s * (kernel_s - 1) // 2 self.pad_ri = dilation_s * (kernel_s - 1) - self.pad_le if tanh: self.l_ac = torch_nn.Tanh() else: self.l_ac = torch_nn.Identity() def forward(self, data): # permute to (batchsize=1, dim, length) # add one dimension (batchsize=1, dim, ADDED_DIM, length) # pad to ADDED_DIM # squeeze and return to (batchsize=1, dim, length) # https://github.com/pytorch/pytorch/issues/1333 x = torch_nn_func.pad(data.permute(0, 2, 1).unsqueeze(2), \ (self.pad_le, self.pad_ri, 0, 0)).squeeze(2) # tanh(conv1()) # permmute back to (batchsize=1, length, dim) output = self.l_ac(super(Conv1dKeepLength, self).forward(x)) return output.permute(0, 2, 1) # # Moving average class MovingAverage(Conv1dKeepLength): """ Wrapper to define a moving average smoothing layer Note: MovingAverage can be implemented using TimeInvFIRFilter too. Here we define another Module dicrectly on Conv1DKeepLength """ def __init__(self, feature_dim, window_len, causal=False): super(MovingAverage, self).__init__( feature_dim, feature_dim, 1, window_len, causal, groups=feature_dim, bias=False, tanh=False) # set the weighting coefficients torch_nn.init.constant_(self.weight, 1/window_len) # turn off grad for this layer for p in self.parameters(): p.requires_grad = False def forward(self, data): return super(MovingAverage, self).forward(data) # # FIR filter layer class TimeInvFIRFilter(Conv1dKeepLength): """ Wrapper to define a FIR filter over Conv1d Note: FIR Filtering is conducted on each dimension (channel) independently: groups=channel_num in conv1d """ def __init__(self, feature_dim, filter_coef, causal=True, flag_train=False): """ __init__(self, feature_dim, filter_coef, causal=True, flag_train=False) feature_dim: dimension of input data filter_coef: 1-D tensor of filter coefficients causal: FIR is causal or not (default: true) flag_train: whether train the filter coefficients (default: false) Input data: (batchsize, length, feature_dim) Output data: (batchsize, length, feature_dim) """ super(TimeInvFIRFilter, self).__init__( feature_dim, feature_dim, 1, filter_coef.shape[0], causal, groups=feature_dim, bias=False, tanh=False) if filter_coef.ndim == 1: # initialize weight using provided filter_coef with torch.no_grad(): tmp_coef = torch.zeros([feature_dim, 1, filter_coef.shape[0]]) tmp_coef[:, 0, :] = filter_coef tmp_coef = torch.flip(tmp_coef, dims=[2]) self.weight = torch.nn.Parameter(tmp_coef, requires_grad=flag_train) else: print("TimeInvFIRFilter expects filter_coef to be 1-D tensor") print("Please implement the code in __init__ if necessary") sys.exit(1) def forward(self, data): return super(TimeInvFIRFilter, self).forward(data) # # Up sampling class UpSampleLayer(torch_nn.Module): """ Wrapper over up-sampling Input tensor: (batchsize, length, dim) Ouput tensor: (batchsize, length * up-sampling_factor, dim) """ def __init__(self, feature_dim, up_sampling_factor, smoothing=False): super(UpSampleLayer, self).__init__() # wrap a up_sampling layer self.scale_factor = up_sampling_factor self.l_upsamp = torch_nn.Upsample(scale_factor=self.scale_factor) if smoothing: self.l_ave1 = MovingAverage(feature_dim, self.scale_factor) self.l_ave2 = MovingAverage(feature_dim, self.scale_factor) else: self.l_ave1 = torch_nn.Identity() self.l_ave2 = torch_nn.Identity() return def forward(self, x): # permute to (batchsize=1, dim, length) up_sampled_data = self.l_upsamp(x.permute(0, 2, 1)) # permute it backt to (batchsize=1, length, dim) # and do two moving average return self.l_ave1(self.l_ave2(up_sampled_data.permute(0, 2, 1))) # Neural filter block (1 block) class NeuralFilterBlock(torch_nn.Module): """ Wrapper over a single filter block """ def __init__(self, signal_size, hidden_size, \ kernel_size=3, conv_num=10): super(NeuralFilterBlock, self).__init__() self.signal_size = signal_size self.hidden_size = hidden_size self.kernel_size = kernel_size self.conv_num = conv_num self.dilation_size = [np.power(2, x) for x in np.arange(conv_num)] # ff layer to expand dimension self.l_ff_1 = torch_nn.Linear(signal_size, hidden_size, \ bias=False) self.l_ff_1_tanh = torch_nn.Tanh() # dilated conv layers tmp = [Conv1dKeepLength(hidden_size, hidden_size, x, \ kernel_size, causal=True, bias=False) \ for x in self.dilation_size] self.l_convs = torch_nn.ModuleList(tmp) # ff layer to de-expand dimension self.l_ff_2 = torch_nn.Linear(hidden_size, hidden_size//4, bias=False) self.l_ff_2_tanh = torch_nn.Tanh() self.l_ff_3 = torch_nn.Linear(hidden_size//4, signal_size, bias=False) self.l_ff_3_tanh = torch_nn.Tanh() # a simple scale self.scale = torch_nn.Parameter(torch.tensor([0.1]), requires_grad=False) return def forward(self, signal, context): """ Assume: signal (batchsize=1, length, signal_size) context (batchsize=1, length, hidden_size) Output: (batchsize=1, length, signal_size) """ # expand dimension tmp_hidden = self.l_ff_1_tanh(self.l_ff_1(signal)) # loop over dilated convs # output of a d-conv is input + context + d-conv(input) for l_conv in self.l_convs: tmp_hidden = tmp_hidden + l_conv(tmp_hidden) + context # to be consistent with legacy configuration in CURRENNT tmp_hidden = tmp_hidden * self.scale # compress the dimesion and skip-add tmp_hidden = self.l_ff_2_tanh(self.l_ff_2(tmp_hidden)) tmp_hidden = self.l_ff_3_tanh(self.l_ff_3(tmp_hidden)) output_signal = tmp_hidden + signal return output_signal class SineGen(torch_nn.Module): """ Definition of sine generator SineGen(samp_rate, harmonic_num = 0, sine_amp = 0.1, noise_std = 0.003, voiced_threshold = 0, flag_for_pulse=False) samp_rate: sampling rate in Hz harmonic_num: number of harmonic overtones (default 0) sine_amp: amplitude of sine-wavefrom (default 0.1) noise_std: std of Gaussian noise (default 0.003) voiced_thoreshold: F0 threshold for U/V classification (default 0) flag_for_pulse: this SinGen is used inside PulseGen (default False) Note: when flag_for_pulse is True, the first time step of a voiced segment is always sin(np.pi) or cos(0) """ def __init__(self, samp_rate, harmonic_num = 0, sine_amp = 0.1, noise_std = 0.003, voiced_threshold = 0, flag_for_pulse=False): super(SineGen, self).__init__() self.sine_amp = sine_amp self.noise_std = noise_std self.harmonic_num = harmonic_num self.dim = self.harmonic_num + 1 self.sampling_rate = samp_rate self.voiced_threshold = voiced_threshold self.flag_for_pulse = flag_for_pulse def _f02uv(self, f0): # generate uv signal uv = torch.ones_like(f0) uv = uv * (f0 > self.voiced_threshold) return uv def _f02sine(self, f0_values): """ f0_values: (batchsize, length, dim) where dim indicates fundamental tone and overtones """ # convert to F0 in rad. The interger part n can be ignored # because 2 * np.pi * n doesn't affect phase rad_values = (f0_values / self.sampling_rate) % 1 # initial phase noise (no noise for fundamental component) rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2],\ device = f0_values.device) rand_ini[:, 0] = 0 rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini # instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad) if not self.flag_for_pulse: # for normal case # To prevent torch.cumsum numerical overflow, # it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1. # Buffer tmp_over_one_idx indicates the time step to add -1. # This will not change F0 of sine because (x-1) * 2*pi = x *2*pi tmp_over_one = torch.cumsum(rad_values, 1) % 1 tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 cumsum_shift = torch.zeros_like(rad_values) cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) \ * 2 * np.pi) else: # If necessary, make sure that the first time step of every # voiced segments is sin(pi) or cos(0) # This is used for pulse-train generation # identify the last time step in unvoiced segments uv = self._f02uv(f0_values) uv_1 = torch.roll(uv, shifts=-1, dims=1) uv_1[:, -1, :] = 1 u_loc = (uv < 1) * (uv_1 > 0) # get the instantanouse phase tmp_cumsum = torch.cumsum(rad_values, dim=1) # different batch needs to be processed differently for idx in range(f0_values.shape[0]): temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :] temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :] # stores the accumulation of i.phase within # each voiced segments tmp_cumsum[idx, :, :] = 0 tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum # rad_values - tmp_cumsum: remove the accumulation of i.phase # within the previous voiced segment. i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1) # get the sines sines = torch.cos(i_phase * 2 * np.pi) return sines def forward(self, f0): """ sine_tensor, uv = forward(f0) input F0: tensor(batchsize=1, length, dim=1) f0 for unvoiced steps should be 0 output sine_tensor: tensor(batchsize=1, length, dim) output uv: tensor(batchsize=1, length, 1) """ with torch.no_grad(): f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, \ device=f0.device) # fundamental component f0_buf[:, :, 0] = f0[:, :, 0] for idx in np.arange(self.harmonic_num): # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic f0_buf[:, :, idx+1] = f0_buf[:, :, 0] * (idx+2) # generate sine waveforms sine_waves = self._f02sine(f0_buf) * self.sine_amp # generate uv signal #uv = torch.ones(f0.shape) #uv = uv * (f0 > self.voiced_threshold) uv = self._f02uv(f0) # noise: for unvoiced should be similar to sine_amp # std = self.sine_amp/3 -> max value ~ self.sine_amp #. for voiced regions is self.noise_std noise_amp = uv * self.noise_std + (1-uv) * self.sine_amp / 3 noise = noise_amp * torch.randn_like(sine_waves) # first: set the unvoiced part to 0 by uv # then: additive noise sine_waves = sine_waves * uv + noise return sine_waves, uv, noise ##### ## Model definition ## ## For condition module only provide Spectral feature to Filter block class CondModule(torch_nn.Module): """ Conditiona module Upsample and transform input features CondModule(input_dimension, output_dimension, up_sample_rate, blstm_dimension = 64, cnn_kernel_size = 3) Spec, F0 = CondModule(features, F0) Both input features should be frame-level features If x doesn't contain F0, just ignore the returned F0 """ def __init__(self, input_dim, output_dim, up_sample, \ blstm_s = 64, cnn_kernel_s = 3): super(CondModule, self).__init__() self.input_dim = input_dim self.output_dim = output_dim self.up_sample = up_sample self.blstm_s = blstm_s self.cnn_kernel_s = cnn_kernel_s self.l_blstm = BLSTMLayer(input_dim, self.blstm_s) self.l_conv1d = Conv1dKeepLength(self.blstm_s, output_dim, 1, \ self.cnn_kernel_s) self.l_upsamp = UpSampleLayer(self.output_dim, self.up_sample, True) # Upsampling for F0: don't smooth up-sampled F0 self.l_upsamp_F0 = UpSampleLayer(1, self.up_sample, False) def forward(self, feature, f0): """ spec, f0 = forward(self, feature, f0) feature: (batchsize, length, dim) f0: (batchsize, length, dim=1), which should be F0 at frame-level spec: (batchsize, length, self.output_dim), at wave-level f0: (batchsize, length, 1), at wave-level """ spec = self.l_upsamp(self.l_conv1d(self.l_blstm(feature))) f0 = self.l_upsamp_F0(f0) return spec, f0 # For source module class SourceModuleHnNSF(torch_nn.Module): """ SourceModule for hn-nsf SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, add_noise_std=0.003, voiced_threshod=0) sampling_rate: sampling_rate in Hz harmonic_num: number of harmonic above F0 (default: 0) sine_amp: amplitude of sine source signal (default: 0.1) add_noise_std: std of additive Gaussian noise (default: 0.003) note that amplitude of noise in unvoiced is decided by sine_amp voiced_threshold: threhold to set U/V given F0 (default: 0) Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) F0_sampled (batchsize, length, 1) Sine_source (batchsize, length, 1) noise_source (batchsize, length 1) uv (batchsize, length, 1) """ def __init__(self, sampling_rate, harmonic_num=0, sine_amp=0.1, add_noise_std=0.003, voiced_threshod=0): super(SourceModuleHnNSF, self).__init__() self.sine_amp = sine_amp self.noise_std = add_noise_std # to produce sine waveforms self.l_sin_gen = SineGen(sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod) # to merge source harmonics into a single excitation self.l_linear = torch_nn.Linear(harmonic_num+1, 1) self.l_tanh = torch_nn.Tanh() def forward(self, x): """ Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) F0_sampled (batchsize, length, 1) Sine_source (batchsize, length, 1) noise_source (batchsize, length 1) """ # source for harmonic branch sine_wavs, uv, _ = self.l_sin_gen(x) sine_merge = self.l_tanh(self.l_linear(sine_wavs)) # source for noise branch, in the same shape as uv noise = torch.randn_like(uv) * self.sine_amp / 3 return sine_merge, noise, uv # For Filter module class FilterModuleHnNSF(torch_nn.Module): """ Filter for Hn-NSF FilterModuleHnNSF(signal_size, hidden_size, fir_coef, block_num = 5, kernel_size = 3, conv_num_in_block = 10) signal_size: signal dimension (should be 1) hidden_size: dimension of hidden features inside neural filter block fir_coef: list of FIR filter coeffs, (low_pass_1, low_pass_2, high_pass_1, high_pass_2) block_num: number of neural filter blocks in harmonic branch kernel_size: kernel size in dilated CNN conv_num_in_block: number of d-conv1d in one neural filter block output = FilterModuleHnNSF(harmonic_source, noise_source, uv, context) harmonic_source (batchsize, length, dim=1) noise_source (batchsize, length, dim=1) context (batchsize, length, dim) uv (batchsize, length, dim) output: (batchsize, length, dim=1) """ def __init__(self, signal_size, hidden_size, filter_coef, \ block_num = 5, kernel_size = 3, conv_num_in_block = 10): super(FilterModuleHnNSF, self).__init__() self.signal_size = signal_size self.hidden_size = hidden_size self.kernel_size = kernel_size self.block_num = block_num self.conv_num_in_block = conv_num_in_block self.filter_coef = filter_coef # filter blocks for harmonic branch tmp = [NeuralFilterBlock(signal_size, hidden_size, \ kernel_size, conv_num_in_block) \ for x in range(self.block_num)] self.l_har_blocks = torch_nn.ModuleList(tmp) # filter blocks for noise branch (only one block, 5 sub-blocks) tmp = [NeuralFilterBlock(signal_size, hidden_size, \ kernel_size, conv_num_in_block // 2) \ for x in range(1)] self.l_noi_blocks = torch_nn.ModuleList(tmp) # FIR filter groups # lp_v: filter for voiced region, harmonic component # lp_u: filter for unvoiced region, harmonic component # hp_v: filter for voiced region, noise component # hp_u: filter for unvoiced region, noise component self.l_fir_lp_v = TimeInvFIRFilter(signal_size, filter_coef[0]) self.l_fir_lp_u = TimeInvFIRFilter(signal_size, filter_coef[1]) self.l_fir_hp_v = TimeInvFIRFilter(signal_size, filter_coef[2]) self.l_fir_hp_u = TimeInvFIRFilter(signal_size, filter_coef[3]) def forward(self, har_component, noi_component, condition_feat, uv): """ """ # harmonic component for l_har_block in self.l_har_blocks: har_component = l_har_block(har_component, condition_feat) # noise componebt for l_noi_block in self.l_noi_blocks: noi_component = l_noi_block(noi_component, condition_feat) # harmonic + noise in time-domain # assume uv is {0, 1}, produce a weight vector for voiced/unvoiced # sigmoid is used to avoid {0, 1}, and uv is scaled to {-5, 5} w_voi = torch.sigmoid((uv - 0.5) * 10) w_unv = 1.0 - w_voi har_v = self.l_fir_lp_v(har_component) har_u = self.l_fir_lp_u(har_component) noi_v = self.l_fir_hp_v(noi_component) noi_u = self.l_fir_hp_u(noi_component) output = (har_v + noi_v) * w_voi + (har_u + noi_u) * w_unv return output ## FOR MODEL class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None): super(Model, self).__init__() # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) self.input_dim = in_dim self.output_dim = out_dim # configurations # amplitude of sine waveform (for each harmonic) self.sine_amp = 0.1 # standard deviation of Gaussian noise for additive noise self.noise_std = 0.003 # dimension of hidden features in filter blocks self.hidden_dim = 64 # upsampling rate on input acoustic features (16kHz * 5ms = 80) # assume input_reso has the same value self.upsamp_rate = prj_conf.input_reso[0] # sampling rate (Hz) self.sampling_rate = prj_conf.wav_samp_rate # CNN kernel size in filter blocks self.cnn_kernel_size = 3 # number of filter blocks (for harmonic branch) # noise branch only uses 1 block self.filter_block_num = 5 # number of dilated CNN in each filter block self.cnn_num_in_block = 10 # number of harmonic overtone in source self.harmonic_num = 7 # fixed filter coefficients # computed using PM algorithm # (tool: http://t-filter.engineerjs.com) # # low-pass for harmonic-component in voiced region # 16kHz, pass-band 0-5K, gain 1, ripple 5dB, # stop-band 7-8k, gain 0, ripple -40dB) lp_v = [0.08538414199291068, 0.04920229475534168, -0.1470178606967731, 0.24737764593887432, 0.7103067853166558, 0.24737764593887432, -0.1470178606967731, 0.04920229475534168, 0.08538414199291068] # low-pass for harmonic-copmonent in unvoiced region # 16kHz, pass-band 0-1K, gain 1, ripple 5dB, # stop-band 3-8k, gain 0, ripple -40dB) lp_u = [0.00936455546502, 0.0416254862901, 0.0878313219556, 0.146086321198, 0.192602581136, 0.211221591449, 0.192602581136, 0.146086321198, 0.0878313219556, 0.0416254862901, 0.00936455546502] # # high-pass for noise-component in voiced region # 16kHz, pass-band 7-8K, gain 1, ripple 5dB, # stop-band 0-5k, gain 0, ripple -40dB) hp_v = [-0.00936455546502148, 0.04162548629009957, -0.08783132195564508, 0.1460863211980122, -0.19260258113649556, 0.21122159144894015, -0.19260258113649556, 0.1460863211980122, -0.08783132195564508, 0.04162548629009957, -0.00936455546502148] # # high-pass for noise-component in unvoiced region # 16kHz, pass-band 3-8K, gain 1, ripple 5dB, # stop-band 0-1k, gain 0, ripple -40dB) hp_u = [0.0853841419929, -0.0492022947553, -0.147017860697, -0.247377645939, 0.710306785317, -0.247377645939, -0.147017860697, -0.0492022947553, 0.0853841419929] self.fir_filters = [torch.tensor(lp_v), torch.tensor(lp_u), torch.tensor(hp_v), torch.tensor(hp_u)] # the three modules self.m_condition = CondModule(self.input_dim, self.hidden_dim, \ self.upsamp_rate, \ cnn_kernel_s = self.cnn_kernel_size) self.m_source = SourceModuleHnNSF(self.sampling_rate, self.harmonic_num, self.sine_amp, self.noise_std) self.m_filter = FilterModuleHnNSF(self.output_dim, self.hidden_dim,\ self.fir_filters, self.filter_block_num, \ self.cnn_kernel_size, \ self.cnn_num_in_block) # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network """ return y * self.output_std + self.output_mean def forward(self, x): """ definition of forward method Assume x (batchsize=1, length, dim) Return output(batchsize=1, length) """ # assume x[:, :, -1] is F0, denormalize F0 f0 = x[:, :, -1:] # normalize the data feat = self.normalize_input(x) # condition module # features_for_filter_block, up-sampled F0 cond_feat, f0_upsamped = self.m_condition(feat, f0) # source module # harmonic-source, noise-source (for noise branch), uv flag har_source, noi_source, uv = self.m_source(f0_upsamped) # filter module (including FIR filtering) # output signal output = self.m_filter(har_source, noi_source, cond_feat, uv) # output return output.squeeze(-1) class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ # frame shift (number of points) self.frame_hops = [80, 40, 640] # frame length self.frame_lens = [320, 80, 1920] # FFT length self.fft_n = [512, 128, 2048] # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating self.amp_floor = 0.00001 # loss self.loss = torch_nn.MSELoss() return def _stft(self, signal, fft_p, frame_shift, frame_len): """ wrapper of torch.stft Remember to use onesided=True, pad_mode="constant" Signal (batchsize, length) Output (batchsize, fft_p/2+1, frame_num, 2) """ # to be compatible with different torch versions if torch.__version__.split('.')[1].isnumeric() and \ int(torch.__version__.split('.')[1]) < 7: return torch.stft( signal, fft_p, frame_shift, frame_len, window=self.win(frame_len, dtype=signal.dtype, device=signal.device), onesided=True, pad_mode="constant") else: return torch.stft( signal, fft_p, frame_shift, frame_len, window=self.win(frame_len, dtype=signal.dtype, device=signal.device), onesided=True, pad_mode="constant", return_complex=False) def _amp(self, x): """ _amp(stft) x_stft: (batchsize, fft_p/2+1, frame_num, 2) output: (batchsize, fft_p/2+1, frame_num) output[x, y, z] = log(x_stft[x, y, z, 1]^2 + x_stft[x, y, z, 2]^2 + floor) """ return torch.log(torch.norm(x, 2, -1).pow(2) + self.amp_floor) def compute(self, output, target): """ Loss().compute(output, target) should return the Loss in torch.tensor format Assume output and target as (batchsize=1, length) """ # convert from (batchsize=1, length, dim=1) to (1, length) if target.ndim == 3: target.squeeze_(-1) # compute loss loss = 0 for frame_shift, frame_len, fft_p in \ zip(self.frame_hops, self.frame_lens, self.fft_n): x_stft = self._stft(output, fft_p, frame_shift, frame_len) y_stft = self._stft(target, fft_p, frame_shift, frame_len) x_sp_amp = self._amp(x_stft) y_sp_amp = self._amp(y_stft) loss += self.loss(x_sp_amp, y_sp_amp) return loss if __name__ == "__main__": print("Definition of model")
32,145
39.384422
78
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/01-nsf/hn-nsf/config.py
#!/usr/bin/env python """ config.py for project-NN-pytorch/projects Usage: For training, change Configuration for training stage For inference, change Configuration for inference stage """ __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ######################################################### ## Configuration for training stage ######################################################### # Name of datasets # after data preparation, trn/val_set_name are used to save statistics # about the data sets trn_set_name = 'cmu_all_trn' val_set_name = 'cmu_all_val' # for convenience tmp = '../DATA/cmu-arctic-data-set' # File lists (text file, one data name per line, without name extension) # trin_file_list: list of files for training set trn_list = [tmp + '/scp/train.lst'] # val_file_list: list of files for validation set. It can be None val_list = [tmp + '/scp/val.lst'] # Directories for input features # input_dirs = [path_of_feature_1, path_of_feature_2, ..., ] # we assume train and validation data are put in the same sub-directory input_dirs = [[tmp + '/5ms/melspec', tmp + '/5ms/f0']] # Dimensions of input features # input_dims = [dimension_of_feature_1, dimension_of_feature_2, ...] input_dims = [80, 1] # File name extension for input features # input_exts = [name_extention_of_feature_1, ...] # Please put ".f0" as the last feature input_exts = ['.mfbsp', '.f0'] # Temporal resolution for input features # input_reso = [reso_feature_1, reso_feature_2, ...] # for waveform modeling, temporal resolution of input acoustic features # may be = waveform_sampling_rate * frame_shift_of_acoustic_features # for example, 80 = 16000 Hz * 5 ms input_reso = [80, 80] # Whether input features should be z-normalized # input_norm = [normalize_feature_1, normalize_feature_2] input_norm = [True, True] # Similar configurations for output features output_dirs = [[tmp + '/wav_16k_norm']] output_dims = [1] output_exts = ['.wav'] output_reso = [1] output_norm = [False] # Waveform sampling rate # wav_samp_rate can be None if no waveform data is used wav_samp_rate = 16000 # Truncating input sequences so that the maximum length = truncate_seq # When truncate_seq is larger, more GPU mem required # If you don't want truncating, please truncate_seq = None truncate_seq = 16000 * 3 # Minimum sequence length # If sequence length < minimum_len, this sequence is not used for training # minimum_len can be None minimum_len = 80 * 50 ######################################################### ## Configuration for inference stage ######################################################### # similar options to training stage test_set_name = ['cmu_all_test_tiny'] # List of test set data # for convenience, you may directly load test_set list here test_list = [['slt_arctic_b0474', 'slt_arctic_b0475', 'slt_arctic_b0476', 'bdl_arctic_b0474', 'bdl_arctic_b0475', 'bdl_arctic_b0476', 'rms_arctic_b0474', 'rms_arctic_b0475', 'rms_arctic_b0476', 'clb_arctic_b0474', 'clb_arctic_b0475', 'clb_arctic_b0476']] # Directories for input features # input_dirs = [path_of_feature_1, path_of_feature_2, ..., ] # we assume train and validation data are put in the same sub-directory test_input_dirs = [[tmp + '/5ms/melspec', tmp + '/5ms/f0']] # Directories for output features, which are [] test_output_dirs = [[]]
3,430
32.31068
75
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/01-nsf/hn-nsf/._main.py
Mac OS X  2ATTRcom.apple.lastuseddate#PS ^"9This resource fork intentionally left blank 
4,085
4,085
4,085
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/01-nsf/cyc-noise-nsf-4/main.py
#!/usr/bin/env python """ main.py for project-NN-pytorch/projects The training/inference process wrapper. Dataset API is replaced with NII_MergeDataSetLoader. It is more convenient to train model on corpora stored in different directories. Requires model.py and config.py (config_merge_datasets.py) Usage: $: python main.py [options] """ from __future__ import absolute_import import os import sys import torch import importlib import core_scripts.other_tools.display as nii_warn import core_scripts.data_io.default_data_io as nii_default_dset import core_scripts.data_io.customize_dataset as nii_dset import core_scripts.data_io.conf as nii_dconf import core_scripts.other_tools.list_tools as nii_list_tool import core_scripts.config_parse.config_parse as nii_config_parse import core_scripts.config_parse.arg_parse as nii_arg_parse import core_scripts.op_manager.op_manager as nii_op_wrapper import core_scripts.nn_manager.nn_manager as nii_nn_wrapper import core_scripts.startup_config as nii_startup __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" def main(): """ main(): the default wrapper for training and inference process Please prepare config.py and model.py """ # arguments initialization args = nii_arg_parse.f_args_parsed() # nii_warn.f_print_w_date("Start program", level='h') nii_warn.f_print("Load module: %s" % (args.module_config)) nii_warn.f_print("Load module: %s" % (args.module_model)) prj_conf = importlib.import_module(args.module_config) prj_model = importlib.import_module(args.module_model) # initialization nii_startup.set_random_seed(args.seed, args) use_cuda = not args.no_cuda and torch.cuda.is_available() device = torch.device("cuda" if use_cuda else "cpu") # prepare data io if not args.inference: params = {'batch_size': args.batch_size, 'shuffle': args.shuffle, 'num_workers': args.num_workers, 'sampler': args.sampler} in_trans_fns = prj_conf.input_trans_fns \ if hasattr(prj_conf, 'input_trans_fns') else None out_trans_fns = prj_conf.output_trans_fns \ if hasattr(prj_conf, 'output_trans_fns') else None # Load file list and create data loader trn_lst = prj_conf.trn_list trn_set = nii_dset.NII_MergeDataSetLoader( prj_conf.trn_set_name, \ trn_lst, prj_conf.input_dirs, \ prj_conf.input_exts, \ prj_conf.input_dims, \ prj_conf.input_reso, \ prj_conf.input_norm, \ prj_conf.output_dirs, \ prj_conf.output_exts, \ prj_conf.output_dims, \ prj_conf.output_reso, \ prj_conf.output_norm, \ './', params = params, truncate_seq = prj_conf.truncate_seq, min_seq_len = prj_conf.minimum_len, save_mean_std = True, wav_samp_rate = prj_conf.wav_samp_rate, way_to_merge = args.way_to_merge_datasets, global_arg = args, dset_config = prj_conf, input_augment_funcs = in_trans_fns, output_augment_funcs = out_trans_fns) if prj_conf.val_list is not None: val_lst = prj_conf.val_list val_set = nii_dset.NII_MergeDataSetLoader( prj_conf.val_set_name, val_lst, prj_conf.input_dirs, \ prj_conf.input_exts, \ prj_conf.input_dims, \ prj_conf.input_reso, \ prj_conf.input_norm, \ prj_conf.output_dirs, \ prj_conf.output_exts, \ prj_conf.output_dims, \ prj_conf.output_reso, \ prj_conf.output_norm, \ './', \ params = params, truncate_seq= prj_conf.truncate_seq, min_seq_len = prj_conf.minimum_len, save_mean_std = False, wav_samp_rate = prj_conf.wav_samp_rate, way_to_merge = args.way_to_merge_datasets, global_arg = args, dset_config = prj_conf, input_augment_funcs = in_trans_fns, output_augment_funcs = out_trans_fns) else: val_set = None # initialize the model and loss function model = prj_model.Model(trn_set.get_in_dim(), \ trn_set.get_out_dim(), \ args, prj_conf, trn_set.get_data_mean_std()) loss_wrapper = prj_model.Loss(args) # initialize the optimizer optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args) # if necessary, resume training if args.trained_model == "": checkpoint = None else: checkpoint = torch.load(args.trained_model) # start training nii_nn_wrapper.f_train_wrapper(args, model, loss_wrapper, device, optimizer_wrapper, trn_set, val_set, checkpoint) # done for traing else: # for inference # default, no truncating, no shuffling params = {'batch_size': args.batch_size, 'shuffle': False, 'num_workers': args.num_workers} in_trans_fns = prj_conf.test_input_trans_fns \ if hasattr(prj_conf, 'test_input_trans_fns') else None out_trans_fns = prj_conf.test_output_trans_fns \ if hasattr(prj_conf, 'test_output_trans_fns') else None if type(prj_conf.test_list) is list: t_lst = prj_conf.test_list else: t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list) test_set = nii_dset.NII_MergeDataSetLoader( prj_conf.test_set_name, \ t_lst, \ prj_conf.test_input_dirs, prj_conf.input_exts, prj_conf.input_dims, prj_conf.input_reso, prj_conf.input_norm, prj_conf.test_output_dirs, prj_conf.output_exts, prj_conf.output_dims, prj_conf.output_reso, prj_conf.output_norm, './', params = params, truncate_seq= None, min_seq_len = None, save_mean_std = False, wav_samp_rate = prj_conf.wav_samp_rate, way_to_merge = args.way_to_merge_datasets, global_arg = args, dset_config = prj_conf, input_augment_funcs = in_trans_fns, output_augment_funcs = out_trans_fns) # initialize model model = prj_model.Model(test_set.get_in_dim(), \ test_set.get_out_dim(), \ args, prj_conf) if args.trained_model == "": print("No model is loaded by ---trained-model for inference") print("By default, load %s%s" % (args.save_trained_name, args.save_model_ext)) checkpoint = torch.load("%s%s" % (args.save_trained_name, args.save_model_ext)) else: checkpoint = torch.load(args.trained_model) # do inference and output data nii_nn_wrapper.f_inference_wrapper(args, model, device, \ test_set, checkpoint) # done return if __name__ == "__main__": main()
7,819
35.886792
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/01-nsf/cyc-noise-nsf-4/model.py
#!/usr/bin/env python """ model.py for Cyclic-noise-NSF version: 4 """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import core_scripts.other_tools.debug as nii_debug __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## # Building blocks (torch.nn modules + dimension operation) # # For blstm class BLSTMLayer(torch_nn.Module): """ Wrapper over BLSTM Input tensor: (batchsize, length, dim_in) Output tensor: (batchsize, length, dim_out) We want to keep the length the same """ def __init__(self, input_dim, output_dim): super(BLSTMLayer, self).__init__() if output_dim % 2 != 0: print("Output_dim of BLSTMLayer is {:d}".format(output_dim)) print("BLSTMLayer expects a layer size of even number") sys.exit(1) # bi-directional LSTM self.l_blstm = torch_nn.LSTM(input_dim, output_dim // 2, \ bidirectional=True) def forward(self, x): # permute to (length, batchsize=1, dim) blstm_data, _ = self.l_blstm(x.permute(1, 0, 2)) # permute it backt to (batchsize=1, length, dim) return blstm_data.permute(1, 0, 2) # # 1D dilated convolution that keep the input/output length class Conv1dKeepLength(torch_nn.Conv1d): """ Wrapper for causal convolution Input tensor: (batchsize, length, dim_in) Output tensor: (batchsize, length, dim_out) https://github.com/pytorch/pytorch/issues/1333 Note: Tanh is optional """ def __init__(self, input_dim, output_dim, dilation_s, kernel_s, causal = False, stride = 1, groups=1, bias=True, \ tanh = True, pad_mode='constant'): super(Conv1dKeepLength, self).__init__( input_dim, output_dim, kernel_s, stride=stride, padding = 0, dilation = dilation_s, groups=groups, bias=bias) self.pad_mode = pad_mode self.causal = causal # input & output length will be the same if self.causal: # left pad to make the convolution causal self.pad_le = dilation_s * (kernel_s - 1) self.pad_ri = 0 else: # pad on both sizes self.pad_le = dilation_s * (kernel_s - 1) // 2 self.pad_ri = dilation_s * (kernel_s - 1) - self.pad_le if tanh: self.l_ac = torch_nn.Tanh() else: self.l_ac = torch_nn.Identity() def forward(self, data): # permute to (batchsize=1, dim, length) # add one dimension (batchsize=1, dim, ADDED_DIM, length) # pad to ADDED_DIM # squeeze and return to (batchsize=1, dim, length) # https://github.com/pytorch/pytorch/issues/1333 x = torch_nn_func.pad(data.permute(0, 2, 1).unsqueeze(2), \ (self.pad_le, self.pad_ri,0,0), \ mode = self.pad_mode).squeeze(2) # tanh(conv1()) # permmute back to (batchsize=1, length, dim) output = self.l_ac(super(Conv1dKeepLength, self).forward(x)) return output.permute(0, 2, 1) # # Moving average class MovingAverage(Conv1dKeepLength): """ Wrapper to define a moving average smoothing layer Note: MovingAverage can be implemented using TimeInvFIRFilter too. Here we define another Module dicrectly on Conv1DKeepLength """ def __init__(self, feature_dim, window_len, causal=False, \ pad_mode='replicate'): super(MovingAverage, self).__init__( feature_dim, feature_dim, 1, window_len, causal, groups=feature_dim, bias=False, tanh=False, \ pad_mode=pad_mode) # set the weighting coefficients torch_nn.init.constant_(self.weight, 1/window_len) # turn off grad for this layer for p in self.parameters(): p.requires_grad = False def forward(self, data): return super(MovingAverage, self).forward(data) # # FIR filter layer class TimeInvFIRFilter(Conv1dKeepLength): """ Wrapper to define a FIR filter over Conv1d FIR Filtering is conducted on each dimension (channel) independently, i.e., groups=channel_num in conv1d """ def __init__(self, feature_dim, filter_coef, causal=True, flag_trn=False): """ __init__(self, feature_dim, filter_coef, causal=True, flag_trn=False) feature_dim: dimension of input data filter_coef: 1-D tensor of filter coefficients causal: FIR is causal or not (default: true) flag_trn: whether learn the filter coefficients (default false) Input data: (batchsize=1, length, feature_dim) Output data: (batchsize=1, length, feature_dim) """ super(TimeInvFIRFilter, self).__init__( feature_dim, feature_dim, 1, filter_coef.shape[0], causal, groups=feature_dim, bias=False, tanh=False) if filter_coef.ndim == 1: # initialize weight using provided filter_coef with torch.no_grad(): tmp_coef = torch.zeros([feature_dim, 1, filter_coef.shape[0]]) tmp_coef[:, 0, :] = filter_coef tmp_coef = torch.flip(tmp_coef, dims=[2]) self.weight = torch.nn.Parameter(tmp_coef, requires_grad=flag_trn) else: print("TimeInvFIRFilter expects filter_coef as 1-D tensor") print("Please implement the code in __init__ if necessary") sys.exit(1) def forward(self, data): return super(TimeInvFIRFilter, self).forward(data) class TimeVarFIRFilter(torch_nn.Module): """ TimeVarFIRFilter Given sequences of filter coefficients and a signal, do filtering Filter coefs: (batchsize, signal_length, filter_order = K) Signal: (batchsize, signal_length, 1) For batch 0: For n in [1, sequence_length): output(0, n, 1) = \sum_{k=1}^{K} signal(0, n-k, 1)*coef(0, n, k) Note: filter coef (0, n, :) is only used to compute the output at (0, n, 1) """ def __init__(self): super(TimeVarFIRFilter, self).__init__() def forward(self, signal, f_coef): """ Filter coefs: (batchsize=1, signal_length, filter_order = K) Signal: (batchsize=1, signal_length, 1) Output: (batchsize=1, signal_length, 1) For n in [1, sequence_length): output(0, n, 1) = \sum_{k=1}^{K} signal(0, n-k, 1)*coef(0, n, k) This method may be not efficient: Suppose signal [x_1, ..., x_N], filter [a_1, ..., a_K] output [y_1, y_2, y_3, ..., y_N, *, * ... *] = a_1 * [x_1, x_2, x_3, ..., x_N, 0, ..., 0] + a_2 * [ 0, x_1, x_2, x_3, ..., x_N, 0, ..., 0] + a_3 * [ 0, 0, x_1, x_2, x_3, ..., x_N, 0, ..., 0] """ signal_l = signal.shape[1] order_k = f_coef.shape[-1] # pad to (batchsize=1, signal_length + filter_order-1, dim) padded_signal = torch_nn_func.pad(signal, (0, 0, 0, order_k - 1)) y = torch.zeros_like(signal) # roll and weighted sum, only take [0:signal_length] for k in range(order_k): y += torch.roll(padded_signal, k, dims=1)[:, 0:signal_l, :] \ * f_coef[:, :, k:k+1] # done return y class SignalsConv1d(torch_nn.Module): """ Filtering input signal with time invariant filter Note: FIRFilter conducted filtering given fixed FIR weight SignalsConv1d convolves two signals Note: this is based on torch.nn.functional.conv1d """ def __init__(self): super(SignalsConv1d, self).__init__() def forward(self, signal, system_ir): """ output = forward(signal, system_ir) signal: (batchsize, length1, dim) system_ir: (length2, dim) output: (batchsize, length1, dim) """ if signal.shape[-1] != system_ir.shape[-1]: print("Error: SignalsConv1d expects shape:") print("signal (batchsize, length1, dim)") print("system_id (batchsize, length2, dim)") print("But received signal: {:s}".format(str(signal.shape))) print(" system_ir: {:s}".format(str(system_ir.shape))) sys.exit(1) padding_length = system_ir.shape[0] - 1 groups = signal.shape[-1] # pad signal on the left signal_pad = torch_nn_func.pad(signal.permute(0, 2, 1),\ (padding_length, 0)) # prepare system impulse response as (dim, 1, length2) # also flip the impulse response ir = torch.flip(system_ir.unsqueeze(1).permute(2, 1, 0), \ dims=[2]) # convolute output = torch_nn_func.conv1d(signal_pad, ir, groups=groups) return output.permute(0, 2, 1) # Sinc filter generator class SincFilter(torch_nn.Module): """ SincFilter Given the cut-off-frequency, produce the low-pass and high-pass windowed-sinc-filters. If input cut-off-frequency is (batchsize=1, signal_length, 1), output filter coef is (batchsize=1, signal_length, filter_order). For each time step in [1, signal_length), we calculate one filter for low-pass sinc filter and another for high-pass filter. Example: import scipy import scipy.signal import numpy as np filter_order = 31 cut_f = 0.2 sinc_layer = SincFilter(filter_order) lp_coef, hp_coef = sinc_layer(torch.ones(1, 10, 1) * cut_f) w, h1 = scipy.signal.freqz(lp_coef[0, 0, :].numpy(), [1]) w, h2 = scipy.signal.freqz(hp_coef[0, 0, :].numpy(), [1]) plt.plot(w, 20*np.log10(np.abs(h1))) plt.plot(w, 20*np.log10(np.abs(h2))) plt.plot([cut_f * np.pi, cut_f * np.pi], [-100, 0]) """ def __init__(self, filter_order): super(SincFilter, self).__init__() # Make the filter oder an odd number # [-(M-1)/2, ... 0, (M-1)/2] # self.half_k = (filter_order - 1) // 2 self.order = self.half_k * 2 +1 def hamming_w(self, n_index): """ prepare hamming window for each time step n_index (batchsize=1, signal_length, filter_order) For each step, n_index.shape is [-(M-1)/2, ... 0, (M-1)/2] where, n_index[0, 0, :] = [-(M-1)/2, ... 0, (M-1)/2] n_index[0, 1, :] = [-(M-1)/2, ... 0, (M-1)/2] ... output (batchsize=1, signal_length, filter_order) output[0, 0, :] = hamming_window output[0, 1, :] = hamming_window ... """ # Hamming window return 0.54 + 0.46 * torch.cos(2 * np.pi * n_index / self.order) def sinc(self, x): """ Normalized sinc-filter sin( pi * x) / pi * x https://en.wikipedia.org/wiki/Sinc_function Assume x (batchsize, signal_length, filter_order) and x[0, 0, :] = [-half_order, - half_order+1, ... 0 ..., half_order] x[:, :, self.half_order] -> time index = 0, sinc(0)=1 """ y = torch.zeros_like(x) y[:,:,0:self.half_k]=torch.sin(np.pi * x[:, :, 0:self.half_k]) \ / (np.pi * x[:, :, 0:self.half_k]) y[:,:,self.half_k+1:]=torch.sin(np.pi * x[:, :, self.half_k+1:])\ / (np.pi * x[:, :, self.half_k+1:]) y[:,:,self.half_k] = 1 return y def forward(self, cut_f): """ lp_coef, hp_coef = forward(self, cut_f) cut-off frequency cut_f (batchsize=1, length, dim = 1) lp_coef: low-pass filter coefs (batchsize, length, filter_order) hp_coef: high-pass filter coefs (batchsize, length, filter_order) """ # create the filter order index with torch.no_grad(): # [- (M-1) / 2, ..., 0, ..., (M-1)/2] lp_coef = torch.arange(-self.half_k, self.half_k + 1, device=cut_f.device) # [[[- (M-1) / 2, ..., 0, ..., (M-1)/2], # [- (M-1) / 2, ..., 0, ..., (M-1)/2], # ... # ], # [[- (M-1) / 2, ..., 0, ..., (M-1)/2], # [- (M-1) / 2, ..., 0, ..., (M-1)/2], # ... # ]] lp_coef = lp_coef.repeat(cut_f.shape[0], cut_f.shape[1], 1) hp_coef = torch.arange(-self.half_k, self.half_k + 1, device=cut_f.device) hp_coef = hp_coef.repeat(cut_f.shape[0], cut_f.shape[1], 1) # temporary buffer of [-1^n] for gain norm in hp_coef tmp_one = torch.pow(-1, hp_coef) # unnormalized filter coefs with hamming window lp_coef = cut_f * self.sinc(cut_f * lp_coef) \ * self.hamming_w(lp_coef) hp_coef = (self.sinc(hp_coef) \ - cut_f * self.sinc(cut_f * hp_coef)) \ * self.hamming_w(hp_coef) # normalize the coef to make gain at 0/pi is 0 dB # sum_n lp_coef[n] lp_coef_norm = torch.sum(lp_coef, axis=2).unsqueeze(-1) # sum_n hp_coef[n] * -1^n hp_coef_norm = torch.sum(hp_coef * tmp_one, axis=2).unsqueeze(-1) lp_coef = lp_coef / lp_coef_norm hp_coef = hp_coef / hp_coef_norm # return normed coef return lp_coef, hp_coef # # Up sampling class UpSampleLayer(torch_nn.Module): """ Wrapper over up-sampling Input tensor: (batchsize, length, dim) Ouput tensor: (batchsize, length * up-sampling_factor, dim) """ def __init__(self, feature_dim, up_sampling_factor, smoothing=False): super(UpSampleLayer, self).__init__() # wrap a up_sampling layer self.scale_factor = up_sampling_factor self.l_upsamp = torch_nn.Upsample(scale_factor=self.scale_factor) if smoothing: self.l_ave1 = MovingAverage(feature_dim, self.scale_factor) self.l_ave2 = MovingAverage(feature_dim, self.scale_factor) else: self.l_ave1 = torch_nn.Identity() self.l_ave2 = torch_nn.Identity() return def forward(self, x): # permute to (batchsize=1, dim, length) up_sampled_data = self.l_upsamp(x.permute(0, 2, 1)) # permute it backt to (batchsize=1, length, dim) # and do two moving average return self.l_ave1(self.l_ave2(up_sampled_data.permute(0, 2, 1))) # Neural filter block (1 block) class NeuralFilterBlock(torch_nn.Module): """ Wrapper over a single filter block """ def __init__(self, signal_size, hidden_size, for_har_component=True,\ kernel_size=3, conv_num=10): super(NeuralFilterBlock, self).__init__() self.signal_size = signal_size self.hidden_size = hidden_size self.kernel_size = kernel_size self.conv_num = conv_num self.dilation_s = [np.power(2, x) for x in np.arange(conv_num)] self.for_har = for_har_component # ff layer to expand dimension self.l_ff_1 = torch_nn.Linear(signal_size, hidden_size) self.l_ff_1_tanh = torch_nn.Tanh() # dilated conv layers tmp = [Conv1dKeepLength(hidden_size, hidden_size, x, \ kernel_size, causal=True, bias=True) \ for x in self.dilation_s] self.l_convs = torch_nn.ModuleList(tmp) # ff layer to de-expand dimension self.l_ff_2 = torch_nn.Linear(hidden_size, hidden_size//4) self.l_ff_2_tanh = torch_nn.Tanh() self.l_ff_3 = torch_nn.Linear(hidden_size//4, signal_size) self.l_ff_3_tanh = torch_nn.Tanh() # a simple scale self.scale = torch_nn.Parameter(torch.tensor([0.1]), requires_grad=False) return def forward(self, signal, context): """ Assume: signal (batchsize=1, length, signal_size) context (batchsize=1, length, hidden_size) Output: (batchsize=1, length, signal_size) """ # expand dimension tmp_hidden = self.l_ff_1_tanh(self.l_ff_1(signal)) # loop over dilated convs # output of a d-conv is input + context + d-conv(input) for l_conv in self.l_convs: tmp_hidden = tmp_hidden + l_conv(tmp_hidden) + context # to be consistent with legacy configuration in CURRENNT tmp_hidden = tmp_hidden * self.scale # compress the dimesion and skip-add tmp_hidden = self.l_ff_2_tanh(self.l_ff_2(tmp_hidden)) if self.for_har: # if this block is used for harmonic component tmp_hidden = self.l_ff_3_tanh(self.l_ff_3(tmp_hidden)) output_signal = tmp_hidden + signal else: # for noise component, no need to use skip-connection output_signal = self.l_ff_3(tmp_hidden) return output_signal # # Sine waveform generator class SineGen(torch_nn.Module): """ Definition of sine generator SineGen(samp_rate, harmonic_num = 0, sine_amp = 0.1, noise_std = 0.003, voiced_threshold = 0, flag_for_pulse=False) samp_rate: sampling rate in Hz harmonic_num: number of harmonic overtones (default 0) sine_amp: amplitude of sine-wavefrom (default 0.1) noise_std: std of Gaussian noise (default 0.003) voiced_thoreshold: F0 threshold for U/V classification (default 0) flag_for_pulse: this SinGen is used inside PulseGen (default False) Note: when flag_for_pulse is True, the first time step of a voiced segment is always sin(np.pi) or cos(0) """ def __init__(self, samp_rate, harmonic_num = 0, sine_amp = 0.1, noise_std = 0.003, voiced_threshold = 0, flag_for_pulse=False): super(SineGen, self).__init__() self.sine_amp = sine_amp self.noise_std = noise_std self.harmonic_num = harmonic_num self.dim = self.harmonic_num + 1 self.sampling_rate = samp_rate self.voiced_threshold = voiced_threshold self.flag_for_pulse = flag_for_pulse def _f02uv(self, f0): # generate uv signal uv = torch.ones_like(f0) uv = uv * (f0 > self.voiced_threshold) return uv def _f02sine(self, f0_values): """ f0_values: (batchsize, length, dim) where dim indicates fundamental tone and overtones """ # convert to F0 in rad. The interger part n can be ignored # because 2 * np.pi * n doesn't affect phase rad_values = (f0_values / self.sampling_rate) % 1 # initial phase noise (no noise for fundamental component) rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2],\ device = f0_values.device) rand_ini[:, 0] = 0 rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini # instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad) if not self.flag_for_pulse: # for normal case # To prevent torch.cumsum numerical overflow, # it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1. # Buffer tmp_over_one_idx indicates the time step to add -1. # This will not change F0 of sine because (x-1) * 2*pi = x * 2*pi tmp_over_one = torch.cumsum(rad_values, 1) % 1 tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 cumsum_shift = torch.zeros_like(rad_values) cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi) else: # If necessary, make sure that the first time step of every # voiced segments is sin(pi) or cos(0) # This is used for pulse-train generation # identify the last time step in unvoiced segments uv = self._f02uv(f0_values) uv_1 = torch.roll(uv, shifts=-1, dims=1) uv_1[:, -1, :] = 1 u_loc = (uv < 1) * (uv_1 > 0) # get the instantanouse phase tmp_cumsum = torch.cumsum(rad_values, dim=1) # different batch needs to be processed differently for idx in range(f0_values.shape[0]): temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :] temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :] # stores the accumulation of i.phase within # each voiced segments tmp_cumsum[idx, :, :] = 0 tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum # rad_values - tmp_cumsum: remove the accumulation of i.phase # within the previous voiced segment. i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1) # get the sines sines = torch.cos(i_phase * 2 * np.pi) return sines def forward(self, f0): """ sine_tensor, uv = forward(f0) input F0: tensor(batchsize=1, length, dim=1) f0 for unvoiced steps should be 0 output sine_tensor: tensor(batchsize=1, length, dim) output uv: tensor(batchsize=1, length, 1) """ with torch.no_grad(): f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, \ device=f0.device) # fundamental component f0_buf[:, :, 0] = f0[:, :, 0] for idx in np.arange(self.harmonic_num): # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic f0_buf[:, :, idx+1] = f0_buf[:, :, 0] * (idx+2) # generate sine waveforms sine_waves = self._f02sine(f0_buf) * self.sine_amp # generate uv signal #uv = torch.ones(f0.shape) #uv = uv * (f0 > self.voiced_threshold) uv = self._f02uv(f0) # noise: for unvoiced should be similar to sine_amp # std = self.sine_amp/3 -> max value ~ self.sine_amp #. for voiced regions is self.noise_std noise_amp = uv * self.noise_std + (1-uv) * self.sine_amp / 3 noise = noise_amp * torch.randn_like(sine_waves) # first: set the unvoiced part to 0 by uv # then: additive noise sine_waves = sine_waves * uv + noise return sine_waves, uv, noise class PulseGen(torch_nn.Module): """ Definition of Pulse train generator There are many ways to implement pulse generator. Here, PulseGen is based on SinGen. For a perfect """ def __init__(self, samp_rate, pulse_amp = 0.1, noise_std = 0.003, voiced_threshold = 0): super(PulseGen, self).__init__() self.pulse_amp = pulse_amp self.sampling_rate = samp_rate self.voiced_threshold = voiced_threshold self.noise_std = noise_std self.l_sinegen = SineGen(self.sampling_rate, harmonic_num=0,\ sine_amp=self.pulse_amp, noise_std=0,\ voiced_threshold=self.voiced_threshold,\ flag_for_pulse=True) def forward(self, f0): """ Pulse train generator pulse_train, uv = forward(f0) input F0: tensor(batchsize=1, length, dim=1) f0 for unvoiced steps should be 0 output pulse_train: tensor(batchsize=1, length, dim) output uv: tensor(batchsize=1, length, 1) Note: self.l_sine doesn't make sure that the initial phase of a voiced segment is np.pi, the first pulse in a voiced segment may not be at the first time step within a voiced segment """ with torch.no_grad(): sine_wav, uv, noise = self.l_sinegen(f0) # sine without additive noise pure_sine = sine_wav - noise # step t corresponds to a pulse if # sine[t] > sine[t+1] & sine[t] > sine[t-1] # & sine[t-1], sine[t+1], and sine[t] are voiced # or # sine[t] is voiced, sine[t-1] is unvoiced # we use torch.roll to simulate sine[t+1] and sine[t-1] sine_1 = torch.roll(pure_sine, shifts=1, dims=1) uv_1 = torch.roll(uv, shifts=1, dims=1) uv_1[:, 0, :] = 0 sine_2 = torch.roll(pure_sine, shifts=-1, dims=1) uv_2 = torch.roll(uv, shifts=-1, dims=1) uv_2[:, -1, :] = 0 loc = (pure_sine > sine_1) * (pure_sine > sine_2) \ * (uv_1 > 0) * (uv_2 > 0) * (uv > 0) \ + (uv_1 < 1) * (uv > 0) # pulse train without noise pulse_train = pure_sine * loc # additive noise to pulse train # note that noise from sinegen is zero in voiced regions pulse_noise = torch.randn_like(pure_sine) * self.noise_std # with additive noise on pulse, and unvoiced regions pulse_train += pulse_noise * loc + pulse_noise * (1 - uv) return pulse_train, sine_wav, uv, pulse_noise class CyclicNoiseGen_v1(torch_nn.Module): """ CyclicnoiseGen_v1 Cyclic noise with a single parameter of beta. Pytorch v1 implementation assumes f_t is also fixed """ def __init__(self, samp_rate, noise_std = 0.003, voiced_threshold = 0): super(CyclicNoiseGen_v1, self).__init__() self.samp_rate = samp_rate self.noise_std = noise_std self.voiced_threshold = voiced_threshold self.l_pulse = PulseGen(samp_rate, pulse_amp=1.0, \ noise_std=noise_std, \ voiced_threshold=voiced_threshold) self.l_conv = SignalsConv1d() def noise_decay(self, beta, f0mean): """ decayed_noise = noise_decay(beta, f0mean) decayed_noise = n[t]exp(-t * f_mean / beta / samp_rate) beta: (dim=1) or (batchsize=1, 1, dim=1) f0mean (batchsize=1, 1, dim=1) decayed_noise (batchsize=1, length, dim=1) """ with torch.no_grad(): # exp(-1.0 n / T) < 0.01 => n > -log(0.01)*T = 4.60*T # truncate the noise when decayed by -40 dB length = 4.6 * self.samp_rate / f0mean length = length.int() time_idx = torch.arange(0, length, device=beta.device) time_idx = time_idx.unsqueeze(0).unsqueeze(2) time_idx = time_idx.repeat(beta.shape[0], 1, beta.shape[2]) noise = torch.randn(time_idx.shape, device=beta.device) # due to Pytorch implementation, use f0_mean as the f0 factor decay = torch.exp(-time_idx * f0mean / beta / self.samp_rate) return noise * self.noise_std * decay def forward(self, f0s, beta): """ Producde cyclic-noise """ # pulse train pulse_train, sine_wav, uv, noise = self.l_pulse(f0s) pure_pulse = pulse_train - noise * (1.0 - uv) # decayed_noise (length, dim=1) if (uv<1).all(): # all unvoiced cyc_noise = torch.zeros_like(sine_wav) else: f0mean = f0s[uv>0].mean() decayed_noise = self.noise_decay(beta, f0mean)[0, :, :] # convolute cyc_noise = self.l_conv(pure_pulse, decayed_noise) # add noise in invoiced segments cyc_noise = cyc_noise + noise * (1.0 - uv) return cyc_noise, pulse_train, sine_wav, uv, noise ##### ## Model definition ## ## For condition module only provide Spectral feature to Filter block class CondModuleHnSincNSF(torch_nn.Module): """ Condition module for hn-sinc-NSF Upsample and transform input features CondModuleHnSincNSF(input_dimension, output_dimension, up_sample_rate, blstm_dimension = 64, cnn_kernel_size = 3) Spec, F0, cut_off_freq = CondModuleHnSincNSF(features, F0) Both input features should be frame-level features If x doesn't contain F0, just ignore the returned F0 CondModuleHnSincNSF(input_dim, output_dim, up_sample, blstm_s = 64, cnn_kernel_s = 3, voiced_threshold = 0): input_dim: sum of dimensions of input features output_dim: dim of the feature Spec to be used by neural filter-block up_sample: up sampling rate of input features blstm_s: dimension of the features from blstm (default 64) cnn_kernel_s: kernel size of CNN in condition module (default 3) voiced_threshold: f0 > voiced_threshold is voiced, otherwise unvoiced """ def __init__(self, input_dim, output_dim, up_sample, \ blstm_s = 64, cnn_kernel_s = 3, voiced_threshold = 0): super(CondModuleHnSincNSF, self).__init__() # input feature dimension self.input_dim = input_dim self.output_dim = output_dim self.up_sample = up_sample self.blstm_s = blstm_s self.cnn_kernel_s = cnn_kernel_s self.cut_f_smooth = up_sample * 4 self.voiced_threshold = voiced_threshold # the blstm layer self.l_blstm = BLSTMLayer(input_dim, self.blstm_s) # the CNN layer (+1 dim for cut_off_frequence of sinc filter) self.l_conv1d = Conv1dKeepLength(self.blstm_s, \ self.output_dim + 1, \ dilation_s = 1, \ kernel_s = self.cnn_kernel_s) # Upsampling layer for hidden features self.l_upsamp = UpSampleLayer(self.output_dim + 1, \ self.up_sample, True) # Upsampling for F0: don't smooth up-sampled F0 self.l_upsamp_F0 = UpSampleLayer(1, self.up_sample, False) # Another smoothing layer to smooth the cut-off frequency # for sinc filters. Use a larger window to smooth self.l_cut_f_smooth = MovingAverage(1, self.cut_f_smooth) def get_cut_f(self, hidden_feat, f0): """ cut_f = get_cut_f(self, feature, f0) feature: (batchsize, length, dim=1) f0: (batchsize, length, dim=1) """ # generate uv signal uv = torch.ones_like(f0) * (f0 > self.voiced_threshold) # hidden_feat is between (-1, 1) after conv1d with tanh # (-0.2, 0.2) + 0.3 = (0.1, 0.5) # voiced: (0.1, 0.5) + 0.4 = (0.5, 0.9) # unvoiced: (0.1, 0.5) = (0.1, 0.5) return hidden_feat * 0.2 + uv * 0.4 + 0.3 def forward(self, feature, f0): """ spec, f0 = forward(self, feature, f0) feature: (batchsize, length, dim) f0: (batchsize, length, dim=1), which should be F0 at frame-level spec: (batchsize, length, self.output_dim), at wave-level f0: (batchsize, length, 1), at wave-level """ # Different from the paper, for simplicitiy, output of conv1d # is fed to the neural filter blocks without concatenating F0 tmp = self.l_upsamp(self.l_conv1d(self.l_blstm(feature))) spec = tmp[:, :, 0:self.output_dim] # directly up-sample F0 without smoothing f0_upsamp = self.l_upsamp_F0(f0) # get the cut-off-frequency from output of CNN cut_f = self.get_cut_f(tmp[:, :, self.output_dim:], f0_upsamp) # smooth the cut-off-frequency using fixed average smoothing cut_f_smoothed = self.l_cut_f_smooth(cut_f) # return return spec, f0_upsamp, cut_f_smoothed # For source module class SourceModuleCycNoise_v1(torch_nn.Module): """ SourceModuleCycNoise_v1 SourceModule(sampling_rate, noise_std=0.003, voiced_threshod=0) sampling_rate: sampling_rate in Hz noise_std: std of Gaussian noise (default: 0.003) voiced_threshold: threhold to set U/V given F0 (default: 0) cyc, noise, uv = SourceModuleCycNoise_v1(F0_upsampled, beta) F0_upsampled (batchsize, length, 1) beta (1) cyc (batchsize, length, 1) noise (batchsize, length, 1) uv (batchsize, length, 1) """ def __init__(self, sampling_rate, \ noise_std=0.003, voiced_threshod=0): super(SourceModuleCycNoise_v1, self).__init__() self.sampling_rate = sampling_rate self.noise_std = noise_std self.l_cyc_gen = CyclicNoiseGen_v1(sampling_rate, noise_std, voiced_threshod) def forward(self, f0_upsamped, beta): """ cyc, noise, uv = SourceModuleCycNoise_v1(F0, beta) F0_upsampled (batchsize, length, 1) beta (1) cyc (batchsize, length, 1) noise (batchsize, length, 1) uv (batchsize, length, 1) """ # source for harmonic branch cyc, pulse, sine, uv, add_noi = self.l_cyc_gen(f0_upsamped, beta) # source for noise branch, in the same shape as uv noise = torch.randn_like(uv) * self.noise_std / 3 return cyc, noise, uv # For Filter module class FilterModuleCycNoiseNSF(torch_nn.Module): """ Filter for cyclic noise nsf FilterModuleCycNoiseNSF(signal_size, hidden_size, sinc_order = 31, block_num = 5, kernel_size = 3, conv_num_in_block = 10) signal_size: signal dimension (should be 1) hidden_size: dimension of hidden features inside neural filter block sinc_order: order of the sinc filter block_num: number of neural filter blocks in harmonic branch kernel_size: kernel size in dilated CNN conv_num_in_block: number of d-conv1d in one neural filter block Usage: out = FilterModuleCycNoiseNSF(har_source, noi_source, cut_f, context) har_source: source for harmonic branch (batchsize, length, dim=1) noi_source: source for noise branch (batchsize, length, dim=1) cut_f: cut-off-frequency of sinc filters (batchsize, length, dim=1) context: hidden features to be added (batchsize, length, dim) out: (batchsize, length, dim=1) """ def __init__(self, signal_size, hidden_size, sinc_order = 31, \ block_num = 5, kernel_size = 3, conv_num_in_block = 10): super(FilterModuleCycNoiseNSF, self).__init__() self.signal_size = signal_size self.hidden_size = hidden_size self.kernel_size = kernel_size self.block_num = block_num self.conv_num_in_block = conv_num_in_block self.sinc_order = sinc_order # filter blocks for harmonic branch tmp = [NeuralFilterBlock(signal_size, hidden_size, True, \ kernel_size, conv_num_in_block) \ for x in range(self.block_num)] self.l_har_blocks = torch_nn.ModuleList(tmp) # filter blocks for noise branch (only one block, 5 sub-blocks) tmp = [NeuralFilterBlock(signal_size, hidden_size, False, \ kernel_size, conv_num_in_block // 2) \ for x in range(1)] self.l_noi_blocks = torch_nn.ModuleList(tmp) # sinc filter generators and time-variant filtering layer self.l_sinc_coef = SincFilter(self.sinc_order) self.l_tv_filtering = TimeVarFIRFilter() # done def forward(self, har_component, noi_component, cond_feat, cut_f): """ """ # harmonic component # hidden_signals = [] for l_har_block in self.l_har_blocks: hidden_signal = l_har_block(har_component, cond_feat) hidden_signals.append(hidden_signal) har_component = hidden_signal # noise componebt for l_noi_block in self.l_noi_blocks: noi_component = l_noi_block(noi_component, cond_feat) # get sinc filter coefficients lp_coef, hp_coef = self.l_sinc_coef(cut_f) # time-variant filtering har_signal = self.l_tv_filtering(har_component, lp_coef) noi_signal = self.l_tv_filtering(noi_component, hp_coef) # get output return har_signal + noi_signal, hidden_signals ## FOR MODEL class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None): super(Model, self).__init__() # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) self.input_dim = in_dim self.output_dim = out_dim # configurations # amplitude of sine waveform (for each harmonic) self.sine_amp = 0.1 # standard deviation of Gaussian noise for additive noise self.noise_std = 0.003 # dimension of hidden features in filter blocks self.hidden_dim = 64 # upsampling rate on input acoustic features (16kHz * 5ms = 80) # assume input_reso has the same value self.upsamp_rate = prj_conf.input_reso[0] # sampling rate (Hz) self.sampling_rate = prj_conf.wav_samp_rate # CNN kernel size in filter blocks self.cnn_kernel_s = 3 # number of filter blocks (for harmonic branch) # noise branch only uses 1 block self.filter_block_num = 5 # number of dilated CNN in each filter block self.cnn_num_in_block = 10 # sinc filter order (odd number) self.sinc_order = 31 # number of harmonics for sine mask # note: cyclis-noise-nsf doesn't use harmonic overtone in source self.harmonic_num = 7 # beta parameter for cyclic-noise self.beta = 0.870 # the three modules self.m_cond = CondModuleHnSincNSF(self.input_dim, \ self.hidden_dim, \ self.upsamp_rate, \ cnn_kernel_s=self.cnn_kernel_s) self.m_source = SourceModuleCycNoise_v1(self.sampling_rate, self.noise_std) self.m_filter = FilterModuleCycNoiseNSF(self.output_dim, \ self.hidden_dim, \ self.sinc_order, \ self.filter_block_num, \ self.cnn_kernel_s, \ self.cnn_num_in_block) # one additional module to generate sine mask self.m_sinemask = SineGen(self.sampling_rate, self.harmonic_num) # done return def prepare_mean_std(self, in_dim, out_dim, args, \ data_mean_std=None): """ Load mean/std of input/output features """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network """ return y * self.output_std + self.output_mean def forward(self, x): """ definition of forward method Assume x (batchsize=1, length, dim) Return output(batchsize=1, length) """ # assume x[:, :, -1] is F0, denormalize F0 f0 = x[:, :, -1:] # normalize the input features data feat = self.normalize_input(x) # condition module # features_for_filter_block, up-sampled F0, cut-off-frequency cond_feat, f0_upsamp, cut_f = self.m_cond(feat, f0) # source module # here we assume beta is fixed beta = torch.ones(1, 1, 1, device=f0_upsamp.device) * self.beta # harmonic-source signal, noise-source signal, uv flag har_source, noi_source, uv = self.m_source(f0_upsamp, beta) # neural filter module (including sinc-based FIR filtering) # output signal, hidden signals output, hidden = self.m_filter(har_source, noi_source, \ cond_feat, cut_f) if self.training: # hidden signals shape as (batchsize=1, length) hidden = [x.squeeze(-1) for x in hidden] # sine for masking with torch.no_grad(): sine_mask, uv, noise = self.m_sinemask(f0_upsamp) sine_mask = (sine_mask - noise).mean(axis=-1) # return return [output.squeeze(-1), hidden, sine_mask] else: return output.squeeze(-1) class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ # frame shift (number of points) self.frame_hops = [80, 40, 640] # frame length self.frame_lens = [320, 80, 1920] # FFT length self.fft_n = [512, 128, 2048] # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating self.amp_floor = 0.00001 # floor to determine the frames to be masked self.mask_power_threshold = 0.0000001 # loss function self.loss = torch_nn.MSELoss() return def _stft(self, signal, fft_p, frame_shift, frame_len): """ wrapper of torch.stft Remember to use onesided=True, pad_mode="constant" Signal (batchsize, length) Output (batchsize, fft_p/2+1, frame_num, 2) """ # to be compatible with different torch versions if torch.__version__.split('.')[1].isnumeric() and \ int(torch.__version__.split('.')[1]) < 7: return torch.stft( signal, fft_p, frame_shift, frame_len, window=self.win(frame_len, dtype=signal.dtype, device=signal.device), onesided=True, pad_mode="constant") else: return torch.stft( signal, fft_p, frame_shift, frame_len, window=self.win(frame_len, dtype=signal.dtype, device=signal.device), onesided=True, pad_mode="constant", return_complex=False) def _amp(self, x): """ _amp(stft) x_stft: (batchsize, fft_p/2+1, frame_num, 2) output: (batchsize, fft_p/2+1, frame_num) output[x, y, z] = log(x_stft[x, y, z, 1]^2 + x_stft[x, y, z, 2]^2 + floor) """ return torch.log(torch.norm(x, 2, -1).pow(2) + self.amp_floor) def _mask_stft(self, mask_signal, fft_p, frame_shift, frame_len): """ calculate the masking weights from input mask signal (sine) """ # power spectrum of the masking signal (sine signal) x_stft = self._stft(mask_signal, fft_p, frame_shift, frame_len) # x_pow (batchsize, fft_p/2+1, frame_num) x_stft_pow = torch.norm(x_stft, 2, -1).pow(2) # get the normalizing weight for each frame # x_flag (batchsize, frame_num) x_flag = x_stft_pow.mean(axis=1) > self.mask_power_threshold # x_stft_max (batchsize, frame_num) x_stft_max = x_stft_pow.max(axis=1)[0] x_stft_max[~x_flag] = 1.0 # x_stft_weight (batchsize, frame_num) x_stft_weight = 1 / x_stft_max * x_flag # normalizing the mask # mask_normed (batchsize, fft_p/2+1, frame_num, 2) mask_normed = torch.ones_like(x_stft) # normalize the mask, so that maximum mask weight = 1 # mask_normed[:, :, :, 0] is used to mask the real-part # of an spectrum # mask_normed[:, :, :, 1] is used to mask the imaginary-part # of an spectrum mask_normed[:, :, :, 0] = x_stft_pow * x_stft_weight.unsqueeze(1) mask_normed[:, :, :, 1] = mask_normed[:, :, :, 0] return mask_normed def stft_amp(self, signal, fft_p, frame_shift, frame_len, mask=None): """ compute STFT log amplitude signal: (batchsize, length) output: (batchsize, fft_p/2+1, frame_num) mask: (batchsize, fft_p/2+1, frame_num, 2) """ x_stft = self._stft(signal, fft_p, frame_shift, frame_len) if mask is None: x_sp_amp = self._amp(x_stft) else: # apply mask if necessary # mask[:, :, :, 0] is used to mask the real-part # of an spectrum # mask[:, :, :, 1] is used to mask the imaginary-part # of an spectrum x_sp_amp = self._amp(x_stft * mask) return x_sp_amp def compute(self, outputs, target): """ Loss().compute(output, target) should return the Loss in torch.tensor format Assume output and target as (batchsize=1, length) """ # generated signal output = outputs[0] # hidden signals from each filter block in harmonic branch hiddens = outputs[1] # sine mask signal sinemask = outputs[2] # convert from (batchsize=1, length, dim=1) to (1, length) if target.ndim == 3: target.squeeze_(-1) # compute loss over target and output loss = 0 for frame_s, frame_l, fft_p in \ zip(self.frame_hops, self.frame_lens, self.fft_n): # between generated signal and target gen_sp_amp = self.stft_amp(output, fft_p, frame_s, frame_l) tar_sp_amp = self.stft_amp(target, fft_p, frame_s, frame_l) loss += self.loss(gen_sp_amp, tar_sp_amp) # masked spectral loss between hidden signals & target with torch.no_grad(): # produce stft of sine mask mask = self._mask_stft(sinemask, fft_p, frame_s, frame_l) # apply mask to target signal tar_sp_masked_amp = self.stft_amp(target, fft_p, \ frame_s, frame_l, mask) for hidden in hiddens: h_sp_masked_amp = self.stft_amp(hidden, fft_p, frame_s, \ frame_l, mask) loss += self.loss(h_sp_masked_amp, tar_sp_masked_amp) # done return loss if __name__ == "__main__": print("Definition of model")
49,402
38.937753
77
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/01-nsf/cyc-noise-nsf-4/config.py
#!/usr/bin/env python """ config.py for project-NN-pytorch/projects Usage: For training, change Configuration for training stage For inference, change Configuration for inference stage """ __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ######################################################### ## Configuration for training stage ######################################################### # Name of datasets # after data preparation, trn/val_set_name are used to save statistics # about the data sets trn_set_name = 'cmu_all_trn' val_set_name = 'cmu_all_val' # for convenience tmp = '../DATA/cmu-arctic-data-set' # File lists (text file, one data name per line, without name extension) # trin_file_list: list of files for training set trn_list = [tmp + '/scp/train.lst'] # val_file_list: list of files for validation set. It can be None val_list = [tmp + '/scp/val.lst'] # Directories for input features # input_dirs = [path_of_feature_1, path_of_feature_2, ..., ] # we assume train and validation data are put in the same sub-directory input_dirs = [[tmp + '/5ms/melspec', tmp + '/5ms/f0']] # Dimensions of input features # input_dims = [dimension_of_feature_1, dimension_of_feature_2, ...] input_dims = [80, 1] # File name extension for input features # input_exts = [name_extention_of_feature_1, ...] # Please put ".f0" as the last feature input_exts = ['.mfbsp', '.f0'] # Temporal resolution for input features # input_reso = [reso_feature_1, reso_feature_2, ...] # for waveform modeling, temporal resolution of input acoustic features # may be = waveform_sampling_rate * frame_shift_of_acoustic_features # for example, 80 = 16000 Hz * 5 ms input_reso = [80, 80] # Whether input features should be z-normalized # input_norm = [normalize_feature_1, normalize_feature_2] input_norm = [True, True] # Similar configurations for output features output_dirs = [[tmp + '/wav_16k_norm']] output_dims = [1] output_exts = ['.wav'] output_reso = [1] output_norm = [False] # Waveform sampling rate # wav_samp_rate can be None if no waveform data is used wav_samp_rate = 16000 # Truncating input sequences so that the maximum length = truncate_seq # When truncate_seq is larger, more GPU mem required # If you don't want truncating, please truncate_seq = None truncate_seq = 16000 * 3 # Minimum sequence length # If sequence length < minimum_len, this sequence is not used for training # minimum_len can be None minimum_len = 80 * 50 ######################################################### ## Configuration for inference stage ######################################################### # similar options to training stage test_set_name = ['cmu_all_test_tiny'] # List of test set data # for convenience, you may directly load test_set list here test_list = [['slt_arctic_b0474', 'slt_arctic_b0475', 'slt_arctic_b0476', 'bdl_arctic_b0474', 'bdl_arctic_b0475', 'bdl_arctic_b0476', 'rms_arctic_b0474', 'rms_arctic_b0475', 'rms_arctic_b0476', 'clb_arctic_b0474', 'clb_arctic_b0475', 'clb_arctic_b0476']] # Directories for input features # input_dirs = [path_of_feature_1, path_of_feature_2, ..., ] # we assume train and validation data are put in the same sub-directory test_input_dirs = [[tmp + '/5ms/melspec', tmp + '/5ms/f0']] # Directories for output features, which are [] test_output_dirs = [[]]
3,430
32.31068
75
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/01-nsf/hn-sinc-nsf-hifigan/main.py
#!/usr/bin/env python """ main.py for project-NN-pytorch/projects The default training/inference process wrapper Requires model.py and config.py Usage: $: python main.py [options] """ from __future__ import absolute_import import os import sys import torch import importlib import core_scripts.data_io.default_data_io as nii_default_dset import core_scripts.data_io.customize_dataset as nii_dset import core_scripts.other_tools.display as nii_warn import core_scripts.data_io.conf as nii_dconf import core_scripts.other_tools.list_tools as nii_list_tool import core_scripts.config_parse.config_parse as nii_config_parse import core_scripts.config_parse.arg_parse as nii_arg_parse import core_scripts.op_manager.op_manager as nii_op_wrapper import core_scripts.nn_manager.nn_manager as nii_nn_wrapper import core_scripts.nn_manager.nn_manager_GAN as nii_nn_wrapper_GAN import core_scripts.startup_config as nii_startup __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2021, Xin Wang" def main(): """ main(): the default wrapper for training and inference process Please prepare config.py and model.py """ # arguments initialization args = nii_arg_parse.f_args_parsed() # nii_warn.f_print_w_date("Start program", level='h') nii_warn.f_print("Load module: %s" % (args.module_config)) nii_warn.f_print("Load module: %s" % (args.module_model)) prj_conf = importlib.import_module(args.module_config) prj_model = importlib.import_module(args.module_model) # initialization nii_startup.set_random_seed(args.seed, args) use_cuda = not args.no_cuda and torch.cuda.is_available() device = torch.device("cuda" if use_cuda else "cpu") # prepare data io if not args.inference: params = {'batch_size': args.batch_size, 'shuffle': args.shuffle, 'num_workers': args.num_workers, 'sampler': args.sampler, 'pin_memory': True} in_trans_fns = prj_conf.input_trans_fns \ if hasattr(prj_conf, 'input_trans_fns') else None out_trans_fns = prj_conf.output_trans_fns \ if hasattr(prj_conf, 'output_trans_fns') else None inout_trans_fns = prj_conf.input_output_trans_fn \ if hasattr(prj_conf, 'input_output_trans_fn') else None # Load file list and create data loader trn_lst = prj_conf.trn_list trn_set = nii_dset.NII_MergeDataSetLoader( prj_conf.trn_set_name, \ trn_lst, prj_conf.input_dirs, \ prj_conf.input_exts, \ prj_conf.input_dims, \ prj_conf.input_reso, \ prj_conf.input_norm, \ prj_conf.output_dirs, \ prj_conf.output_exts, \ prj_conf.output_dims, \ prj_conf.output_reso, \ prj_conf.output_norm, \ './', params = params, truncate_seq = prj_conf.truncate_seq, min_seq_len = prj_conf.minimum_len, save_mean_std = True, wav_samp_rate = prj_conf.wav_samp_rate, way_to_merge = args.way_to_merge_datasets, global_arg = args, dset_config = prj_conf, input_augment_funcs = in_trans_fns, output_augment_funcs = out_trans_fns, inoutput_augment_func = inout_trans_fns) if prj_conf.val_list is not None: val_lst = prj_conf.val_list val_set = nii_dset.NII_MergeDataSetLoader( prj_conf.val_set_name, val_lst, prj_conf.input_dirs, \ prj_conf.input_exts, \ prj_conf.input_dims, \ prj_conf.input_reso, \ prj_conf.input_norm, \ prj_conf.output_dirs, \ prj_conf.output_exts, \ prj_conf.output_dims, \ prj_conf.output_reso, \ prj_conf.output_norm, \ './', \ params = params, truncate_seq= prj_conf.truncate_seq, min_seq_len = prj_conf.minimum_len, save_mean_std = False, wav_samp_rate = prj_conf.wav_samp_rate, way_to_merge = args.way_to_merge_datasets, global_arg = args, dset_config = prj_conf, input_augment_funcs = in_trans_fns, output_augment_funcs = out_trans_fns, inoutput_augment_func = inout_trans_fns) else: val_set = None # initialize the model and loss function model_G = prj_model.ModelGenerator( trn_set.get_in_dim(), trn_set.get_out_dim(), \ args, prj_conf, trn_set.get_data_mean_std()) model_D = prj_model.ModelDiscriminator( trn_set.get_in_dim(), trn_set.get_out_dim(), args, prj_conf, trn_set.get_data_mean_std()) loss_wrapper = None # initialize the optimizer optimizer_G_wrap = nii_op_wrapper.OptimizerWrapper(model_G, args) optimizer_D_wrap = nii_op_wrapper.OptimizerWrapper(model_D, args) # if necessary, resume training if args.trained_model == "": checkpoint_G = None checkpoint_D = None else: tmp_str = args.trained_model.split(",") checkpoint_G = torch.load(tmp_str[0]) if len(tmp_str) > 1: checkpoint_D = torch.load(tmp_str[1]) else: checkpoint_D = None # start training nii_nn_wrapper_GAN.f_train_wrapper_GAN( args, model_G, model_D, loss_wrapper, device, optimizer_G_wrap, optimizer_D_wrap, trn_set, val_set, checkpoint_G, checkpoint_D) # done for traing else: # for inference # default, no truncating, no shuffling params = {'batch_size': args.batch_size, 'shuffle': False, 'num_workers': args.num_workers} in_trans_fns = prj_conf.input_trans_fns \ if hasattr(prj_conf, 'test_input_trans_fns') else None out_trans_fns = prj_conf.output_trans_fns \ if hasattr(prj_conf, 'test_output_trans_fns') else None inout_trans_fns = prj_conf.output_trans_fns \ if hasattr(prj_conf, 'test_input_output_trans_fn') \ else None if type(prj_conf.test_list) is list: t_lst = prj_conf.test_list else: t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list) test_set = nii_dset.NII_MergeDataSetLoader( prj_conf.test_set_name, \ t_lst, \ prj_conf.test_input_dirs, prj_conf.input_exts, prj_conf.input_dims, prj_conf.input_reso, prj_conf.input_norm, prj_conf.test_output_dirs, prj_conf.output_exts, prj_conf.output_dims, prj_conf.output_reso, prj_conf.output_norm, './', params = params, truncate_seq = None, min_seq_len = None, save_mean_std = False, wav_samp_rate = prj_conf.wav_samp_rate, way_to_merge = args.way_to_merge_datasets, global_arg = args, dset_config = prj_conf, input_augment_funcs = in_trans_fns, output_augment_funcs = out_trans_fns, inoutput_augment_func = inout_trans_fns) # initialize model model = prj_model.ModelGenerator( test_set.get_in_dim(), test_set.get_out_dim(), args, prj_conf) if args.trained_model == "": print("Please provide ---trained-model") sys.exit(1) else: checkpoint = torch.load(args.trained_model) # do inference and output data nii_nn_wrapper.f_inference_wrapper( args, model, device, test_set, checkpoint) # done return if __name__ == "__main__": main()
8,338
35.574561
79
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/01-nsf/hn-sinc-nsf-hifigan/model.py
#!/usr/bin/env python """ model.py for hn-nsf + hifigan discriminator HifiGAN part is adopted from https://github.com/jik876/hifi-gan HiFi-GAN: Generative Adversarial Networks for Efficient and High Fidelity Speech Synthesis By Jungil Kong, Jaehyeon Kim, Jaekyoung Bae """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm import core_scripts.other_tools.debug as nii_debug __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2021, Xin Wang" ######### ## Loss definition ######### class LossAuxGen(): """ Wrapper to define loss function """ def __init__(self): """ Multi-resolution STFT loss """ # frame shift (number of points) self.frame_hops = [80, 40, 640] # frame length self.frame_lens = [320, 80, 1920] # fft length self.fft_n = [512, 128, 2048] # window type in stft self.win = torch.hann_window # floor in log-spectrum-amplitude calculating self.amp_floor = 0.00001 # loss function self.loss = torch_nn.L1Loss() # weight for this loss self.loss_weight = 45 # a buffer to store the window coefficients self.win_buf = {} return def _stft(self, signal, fft_p, frame_shift, frame_len): """ output = _stft(signal, fft_p, frame_shift, frame_len) wrapper of torch.stft Remember to use onesided=True, pad_mode="constant" input ----- Signal, tensor, (batchsize, length) fft_p: int, FFT points frame_shift: int, frame shift, in number of waveform points frame_len: int, frame length, in number of waveform points output ------ Output, tensor (batchsize, fft_p/2+1, frame_num, 2) """ # buffer to store the window coefficients if not frame_len in self.win_buf: win_coef = self.win(frame_len, dtype=signal.dtype, device=signal.device) self.win_buf[frame_len] = win_coef win_coef = self.win_buf[frame_len] # to be compatible with different torch versions if torch.__version__.split('.')[1].isnumeric() and \ int(torch.__version__.split('.')[1]) < 7: return torch.stft( signal, fft_p, frame_shift, frame_len, window=win_coef, onesided=True, pad_mode="constant") else: return torch.stft( signal, fft_p, frame_shift, frame_len, window=win_coef, onesided=True, pad_mode="constant", return_complex=False) def _amp(self, x): """ otuput = _amp(stft) compute STFT amplitude input ----- x_stft: tensor (batchsize, fft_p/2+1, frame_num, 2) output: output: (batchsize, fft_p/2+1, frame_num) Note that output[x, y, z] = log(x_stft[x, y, z, 1]^2 + x_stft[x, y, z, 2]^2 + floor) """ return torch.log(torch.norm(x, 2, -1).pow(2) + self.amp_floor) def compute(self, output, target): """ loss = compute(output, target) input ----- output: tensor, output signal from a model, (batch, length, 1) target: tensor, natural target signal, (batch, length, 1) output ------ loss: scalar, """ if output.ndim == 3: output_tmp = output.squeeze(-1) if target.ndim == 3: target_tmp = target.squeeze(-1) # compute loss loss = 0 for frame_shift, frame_len, fft_p in \ zip(self.frame_hops, self.frame_lens, self.fft_n): x_stft = self._stft(output_tmp, fft_p, frame_shift, frame_len) y_stft = self._stft(target_tmp, fft_p, frame_shift, frame_len) x_sp_amp = self._amp(x_stft) y_sp_amp = self._amp(y_stft) loss += self.loss(x_sp_amp, y_sp_amp) return loss * self.loss_weight ##### ## Model Generator definition ##### class BLSTMLayer(torch_nn.Module): """ Wrapper over BLSTM Assume hidden layer = 1 """ def __init__(self, input_dim, output_dim): super(BLSTMLayer, self).__init__() if output_dim % 2 != 0: print("Output_dim of BLSTMLayer is {:d}".format(output_dim)) print("BLSTMLayer expects a layer size of even number") sys.exit(1) # bi-directional LSTM self.l_blstm = torch_nn.LSTM(input_dim, output_dim // 2, \ bidirectional=True, batch_first=True) def forward(self, x): """output = fowrard(x) input ----- x: tensor (batchsize=1, length, dim_in) output ------ Output: tensor, (batchsize=1, length, dim_out) """ blstm_data, _ = self.l_blstm(x) return blstm_data class Conv1dKeepLength(torch_nn.Conv1d): """ Wrapper for causal convolution Input tensor: (batchsize=1, length, dim_in) Output tensor: (batchsize=1, length, dim_out) https://github.com/pytorch/pytorch/issues/1333 Note: Tanh is optional """ def __init__(self, input_dim, output_dim, dilation_s, kernel_s, causal = False, stride = 1, groups=1, bias=True, \ tanh = True, pad_mode='constant'): super(Conv1dKeepLength, self).__init__( input_dim, output_dim, kernel_s, stride=stride, padding = 0, dilation = dilation_s, groups=groups, bias=bias) self.pad_mode = pad_mode self.causal = causal # input & output length will be the same if self.causal: # left pad to make the convolution causal self.pad_le = dilation_s * (kernel_s - 1) self.pad_ri = 0 else: # pad on both sizes self.pad_le = dilation_s * (kernel_s - 1) // 2 self.pad_ri = dilation_s * (kernel_s - 1) - self.pad_le if tanh: self.l_ac = torch_nn.Tanh() else: self.l_ac = torch_nn.Identity() def forward(self, data): # permute to (batchsize=1, dim, length) # add one dimension (batchsize=1, dim, ADDED_DIM, length) # pad to ADDED_DIM # squeeze and return to (batchsize=1, dim, length) # https://github.com/pytorch/pytorch/issues/1333 x = torch_nn_func.pad(data.permute(0, 2, 1).unsqueeze(2), \ (self.pad_le, self.pad_ri, 0, 0), mode = self.pad_mode).squeeze(2) # tanh(conv1()) # permmute back to (batchsize=1, length, dim) output = self.l_ac(super(Conv1dKeepLength, self).forward(x)) return output.permute(0, 2, 1) # # Moving average class MovingAverage(Conv1dKeepLength): """ Wrapper to define a moving average smoothing layer Note: MovingAverage can be implemented using TimeInvFIRFilter too. Here we define another Module dicrectly on Conv1DKeepLength """ def __init__(self, feature_dim, window_len, causal=False, \ pad_mode='replicate'): super(MovingAverage, self).__init__( feature_dim, feature_dim, 1, window_len, causal, groups=feature_dim, bias=False, tanh=False, \ pad_mode=pad_mode) # set the weighting coefficients torch_nn.init.constant_(self.weight, 1/window_len) # turn off grad for this layer for p in self.parameters(): p.requires_grad = False def forward(self, data): return super(MovingAverage, self).forward(data) # # FIR filter layer class TimeInvFIRFilter(Conv1dKeepLength): """ Wrapper to define a FIR filter over Conv1d Note: FIR Filtering is conducted on each dimension (channel) independently: groups=channel_num in conv1d """ def __init__(self, feature_dim, filter_coef, causal=True, flag_train=False): """ __init__(self, feature_dim, filter_coef, causal=True, flag_train=False) feature_dim: dimension of input data filter_coef: 1-D tensor of filter coefficients causal: FIR is causal or not (default: true) flag_train: whether train the filter coefficients (default false) Input data: (batchsize=1, length, feature_dim) Output data: (batchsize=1, length, feature_dim) """ super(TimeInvFIRFilter, self).__init__( feature_dim, feature_dim, 1, filter_coef.shape[0], causal, groups=feature_dim, bias=False, tanh=False) if filter_coef.ndim == 1: # initialize weight using provided filter_coef with torch.no_grad(): tmp_coef = torch.zeros([feature_dim, 1, filter_coef.shape[0]]) tmp_coef[:, 0, :] = filter_coef tmp_coef = torch.flip(tmp_coef, dims=[2]) self.weight = torch.nn.Parameter(tmp_coef, requires_grad=flag_train) else: print("TimeInvFIRFilter expects filter_coef to be 1-D tensor") print("Please implement the code in __init__ if necessary") sys.exit(1) def forward(self, data): return super(TimeInvFIRFilter, self).forward(data) class TimeVarFIRFilter(torch_nn.Module): """ TimeVarFIRFilter Given sequences of filter coefficients and a signal, do filtering Filter coefs: (batchsize=1, signal_length, filter_order = K) Signal: (batchsize=1, signal_length, 1) For batch 0: For n in [1, sequence_length): output(0, n, 1) = \sum_{k=1}^{K} signal(0, n-k, 1)*coef(0, n, k) Note: filter coef (0, n, :) is only used to compute the output at (0, n, 1) """ def __init__(self): super(TimeVarFIRFilter, self).__init__() def forward(self, signal, f_coef): """ Filter coefs: (batchsize=1, signal_length, filter_order = K) Signal: (batchsize=1, signal_length, 1) Output: (batchsize=1, signal_length, 1) For n in [1, sequence_length): output(0, n, 1)= \sum_{k=1}^{K} signal(0, n-k, 1)*coef(0, n, k) This method may be not efficient: Suppose signal [x_1, ..., x_N], filter [a_1, ..., a_K] output [y_1, y_2, y_3, ..., y_N, *, * ... *] = a_1 * [x_1, x_2, x_3, ..., x_N, 0, ..., 0] + a_2 * [ 0, x_1, x_2, x_3, ..., x_N, 0, ..., 0] + a_3 * [ 0, 0, x_1, x_2, x_3, ..., x_N, 0, ..., 0] """ signal_l = signal.shape[1] order_k = f_coef.shape[-1] # pad to (batchsize=1, signal_length + filter_order-1, dim) padded_signal = torch_nn_func.pad(signal, (0, 0, 0, order_k - 1)) y = torch.zeros_like(signal) # roll and weighted sum, only take [0:signal_length] for k in range(order_k): y += torch.roll(padded_signal, k, dims=1)[:, 0:signal_l, :] \ * f_coef[:, :, k:k+1] # done return y # Sinc filter generator class SincFilter(torch_nn.Module): """ SincFilter Given the cut-off-frequency, produce the low-pass and high-pass windowed-sinc-filters. If input cut-off-frequency is (batchsize=1, signal_length, 1), output filter coef is (batchsize=1, signal_length, filter_order). For each time step in [1, signal_length), we calculate one filter for low-pass sinc filter and another for high-pass filter. Example: import scipy import scipy.signal import numpy as np filter_order = 31 cut_f = 0.2 sinc_layer = SincFilter(filter_order) lp_coef, hp_coef = sinc_layer(torch.ones(1, 10, 1) * cut_f) w, h1 = scipy.signal.freqz(lp_coef[0, 0, :].numpy(), [1]) w, h2 = scipy.signal.freqz(hp_coef[0, 0, :].numpy(), [1]) plt.plot(w, 20*np.log10(np.abs(h1))) plt.plot(w, 20*np.log10(np.abs(h2))) plt.plot([cut_f * np.pi, cut_f * np.pi], [-100, 0]) """ def __init__(self, filter_order): super(SincFilter, self).__init__() # Make the filter oder an odd number # [-(M-1)/2, ... 0, (M-1)/2] # self.half_k = (filter_order - 1) // 2 self.order = self.half_k * 2 +1 def hamming_w(self, n_index): """ prepare hamming window for each time step n_index (batchsize=1, signal_length, filter_order) For each time step, n_index will be [-(M-1)/2, ... 0, (M-1)/2] n_index[0, 0, :] = [-(M-1)/2, ... 0, (M-1)/2] n_index[0, 1, :] = [-(M-1)/2, ... 0, (M-1)/2] ... output (batchsize=1, signal_length, filter_order) output[0, 0, :] = hamming_window output[0, 1, :] = hamming_window ... """ # Hamming window return 0.54 + 0.46 * torch.cos(2 * np.pi * n_index / self.order) def sinc(self, x): """ Normalized sinc-filter sin( pi * x) / pi * x https://en.wikipedia.org/wiki/Sinc_function Assume x (batchsize, signal_length, filter_order) and x[0, 0, :] = [-half_order, - half_order+1, ... 0, ..., half_order] x[:, :, self.half_order] -> time index = 0, sinc(0)=1 """ y = torch.zeros_like(x) y[:,:,0:self.half_k]=torch.sin(np.pi * x[:, :, 0:self.half_k]) \ / (np.pi * x[:, :, 0:self.half_k]) y[:,:,self.half_k+1:]=torch.sin(np.pi * x[:, :, self.half_k+1:]) \ / (np.pi * x[:, :, self.half_k+1:]) y[:,:,self.half_k] = 1 return y def forward(self, cut_f): """ lp_coef, hp_coef = forward(self, cut_f) cut-off frequency cut_f (batchsize=1, length, dim = 1) lp_coef: low-pass filter coefs (batchsize, length, filter_order) hp_coef: high-pass filter coefs (batchsize, length, filter_order) """ # create the filter order index with torch.no_grad(): # [- (M-1) / 2, ..., 0, ..., (M-1)/2] lp_coef = torch.arange(-self.half_k, self.half_k + 1, device=cut_f.device) # [[[- (M-1) / 2, ..., 0, ..., (M-1)/2], # [- (M-1) / 2, ..., 0, ..., (M-1)/2], # ... # ], # [[- (M-1) / 2, ..., 0, ..., (M-1)/2], # [- (M-1) / 2, ..., 0, ..., (M-1)/2], # ... # ]] lp_coef = lp_coef.repeat(cut_f.shape[0], cut_f.shape[1], 1) hp_coef = torch.arange(-self.half_k, self.half_k + 1, device=cut_f.device) hp_coef = hp_coef.repeat(cut_f.shape[0], cut_f.shape[1], 1) # temporary buffer of [-1^n] for gain norm in hp_coef tmp_one = torch.pow(-1, hp_coef) # unnormalized filter coefs with hamming window lp_coef = cut_f * self.sinc(cut_f * lp_coef) \ * self.hamming_w(lp_coef) hp_coef = (self.sinc(hp_coef) \ - cut_f * self.sinc(cut_f * hp_coef)) \ * self.hamming_w(hp_coef) # normalize the coef to make gain at 0/pi is 0 dB # sum_n lp_coef[n] lp_coef_norm = torch.sum(lp_coef, axis=2).unsqueeze(-1) # sum_n hp_coef[n] * -1^n hp_coef_norm = torch.sum(hp_coef * tmp_one, axis=2).unsqueeze(-1) lp_coef = lp_coef / lp_coef_norm hp_coef = hp_coef / hp_coef_norm # return normed coef return lp_coef, hp_coef # # Up sampling class UpSampleLayer(torch_nn.Module): """ Wrapper over up-sampling Input tensor: (batchsize=1, length, dim) Ouput tensor: (batchsize=1, length * up-sampling_factor, dim) """ def __init__(self, feature_dim, up_sampling_factor, smoothing=False): super(UpSampleLayer, self).__init__() # wrap a up_sampling layer self.scale_factor = up_sampling_factor self.l_upsamp = torch_nn.Upsample(scale_factor=self.scale_factor) if smoothing: self.l_ave1 = MovingAverage(feature_dim, self.scale_factor) self.l_ave2 = MovingAverage(feature_dim, self.scale_factor) else: self.l_ave1 = torch_nn.Identity() self.l_ave2 = torch_nn.Identity() return def forward(self, x): # permute to (batchsize=1, dim, length) up_sampled_data = self.l_upsamp(x.permute(0, 2, 1)) # permute it backt to (batchsize=1, length, dim) # and do two moving average return self.l_ave1(self.l_ave2(up_sampled_data.permute(0, 2, 1))) # Neural filter block (1 block) class NeuralFilterBlock(torch_nn.Module): """ Wrapper over a single filter block """ def __init__(self, signal_size, hidden_size,\ kernel_size=3, conv_num=10): super(NeuralFilterBlock, self).__init__() self.signal_size = signal_size self.hidden_size = hidden_size self.kernel_size = kernel_size self.conv_num = conv_num self.dilation_s = [np.power(2, x) for x in np.arange(conv_num)] # ff layer to expand dimension self.l_ff_1 = torch_nn.Linear(signal_size, hidden_size, \ bias=False) self.l_ff_1_tanh = torch_nn.Tanh() # dilated conv layers tmp = [Conv1dKeepLength(hidden_size, hidden_size, x, \ kernel_size, causal=True, bias=False) \ for x in self.dilation_s] self.l_convs = torch_nn.ModuleList(tmp) # ff layer to de-expand dimension self.l_ff_2 = torch_nn.Linear(hidden_size, hidden_size//4, \ bias=False) self.l_ff_2_tanh = torch_nn.Tanh() self.l_ff_3 = torch_nn.Linear(hidden_size//4, signal_size, \ bias=False) self.l_ff_3_tanh = torch_nn.Tanh() # a simple scale self.scale = torch_nn.Parameter(torch.tensor([1/len(self.l_convs)]), requires_grad=False) return def forward(self, signal, context): """ Assume: signal (batchsize=1, length, signal_size) context (batchsize=1, length, hidden_size) Output: (batchsize=1, length, signal_size) """ # expand dimension tmp_hidden = self.l_ff_1_tanh(self.l_ff_1(signal)) # loop over dilated convs # output of a d-conv is input + context + d-conv(input) for l_conv in self.l_convs: tmp_hidden = tmp_hidden + l_conv(tmp_hidden) + context # to be consistent with legacy configuration in CURRENNT tmp_hidden = tmp_hidden * self.scale # compress the dimesion and skip-add tmp_hidden = self.l_ff_2_tanh(self.l_ff_2(tmp_hidden)) tmp_hidden = self.l_ff_3_tanh(self.l_ff_3(tmp_hidden)) output_signal = tmp_hidden + signal return output_signal # # Sine waveform generator # # Sine waveform generator class SineGen(torch_nn.Module): """ Definition of sine generator SineGen(samp_rate, harmonic_num = 0, sine_amp = 0.1, noise_std = 0.003, voiced_threshold = 0, flag_for_pulse=False) samp_rate: sampling rate in Hz harmonic_num: number of harmonic overtones (default 0) sine_amp: amplitude of sine-wavefrom (default 0.1) noise_std: std of Gaussian noise (default 0.003) voiced_thoreshold: F0 threshold for U/V classification (default 0) flag_for_pulse: this SinGen is used inside PulseGen (default False) Note: when flag_for_pulse is True, the first time step of a voiced segment is always sin(np.pi) or cos(0) """ def __init__(self, samp_rate, harmonic_num = 0, sine_amp = 0.1, noise_std = 0.003, voiced_threshold = 0, flag_for_pulse=False): super(SineGen, self).__init__() self.sine_amp = sine_amp self.noise_std = noise_std self.harmonic_num = harmonic_num self.dim = self.harmonic_num + 1 self.sampling_rate = samp_rate self.voiced_threshold = voiced_threshold self.flag_for_pulse = flag_for_pulse def _f02uv(self, f0): # generate uv signal uv = torch.ones_like(f0) uv = uv * (f0 > self.voiced_threshold) return uv def _f02sine(self, f0_values): """ f0_values: (batchsize, length, dim) where dim indicates fundamental tone and overtones """ # convert to F0 in rad. The interger part n can be ignored # because 2 * np.pi * n doesn't affect phase rad_values = (f0_values / self.sampling_rate) % 1 # initial phase noise (no noise for fundamental component) rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2],\ device = f0_values.device) rand_ini[:, 0] = 0 rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini # instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad) if not self.flag_for_pulse: # for normal case # To prevent torch.cumsum numerical overflow, # it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1. # Buffer tmp_over_one_idx indicates the time step to add -1. # This will not change F0 of sine because (x-1) * 2*pi = x *2*pi tmp_over_one = torch.cumsum(rad_values, 1) % 1 tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 cumsum_shift = torch.zeros_like(rad_values) cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi) else: # If necessary, make sure that the first time step of every # voiced segments is sin(pi) or cos(0) # This is used for pulse-train generation # identify the last time step in unvoiced segments uv = self._f02uv(f0_values) uv_1 = torch.roll(uv, shifts=-1, dims=1) uv_1[:, -1, :] = 1 u_loc = (uv < 1) * (uv_1 > 0) # get the instantanouse phase tmp_cumsum = torch.cumsum(rad_values, dim=1) # different batch needs to be processed differently for idx in range(f0_values.shape[0]): temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :] temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :] # stores the accumulation of i.phase within # each voiced segments tmp_cumsum[idx, :, :] = 0 tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum # rad_values - tmp_cumsum: remove the accumulation of i.phase # within the previous voiced segment. i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1) # get the sines sines = torch.cos(i_phase * 2 * np.pi) return sines def forward(self, f0): """ sine_tensor, uv = forward(f0) input F0: tensor(batchsize=1, length, dim=1) f0 for unvoiced steps should be 0 output sine_tensor: tensor(batchsize=1, length, dim) output uv: tensor(batchsize=1, length, 1) """ with torch.no_grad(): f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, \ device=f0.device) # fundamental component f0_buf[:, :, 0] = f0[:, :, 0] for idx in np.arange(self.harmonic_num): # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic f0_buf[:, :, idx+1] = f0_buf[:, :, 0] * (idx+2) # generate sine waveforms sine_waves = self._f02sine(f0_buf) * self.sine_amp # generate uv signal #uv = torch.ones(f0.shape) #uv = uv * (f0 > self.voiced_threshold) uv = self._f02uv(f0) # noise: for unvoiced should be similar to sine_amp # std = self.sine_amp/3 -> max value ~ self.sine_amp #. for voiced regions is self.noise_std noise_amp = uv * self.noise_std + (1-uv) * self.sine_amp / 3 noise = noise_amp * torch.randn_like(sine_waves) # first: set the unvoiced part to 0 by uv # then: additive noise sine_waves = sine_waves * uv + noise return sine_waves, uv, noise ##### ## Model definition ## ## For condition module only provide Spectral feature to Filter block class CondModuleHnSincNSF(torch_nn.Module): """ Condition module for hn-sinc-NSF Upsample and transform input features CondModuleHnSincNSF(input_dimension, output_dimension, up_sample_rate, blstm_dimension = 64, cnn_kernel_size = 3) Spec, F0, cut_off_freq = CondModuleHnSincNSF(features, F0) Both input features should be frame-level features If x doesn't contain F0, just ignore the returned F0 CondModuleHnSincNSF(input_dim, output_dim, up_sample, blstm_s = 64, cnn_kernel_s = 3, voiced_threshold = 0): input_dim: sum of dimensions of input features output_dim: dim of the feature Spec to be used by neural filter-block up_sample: up sampling rate of input features blstm_s: dimension of the features from blstm (default 64) cnn_kernel_s: kernel size of CNN in condition module (default 3) voiced_threshold: f0 > voiced_threshold is voiced, otherwise unvoiced """ def __init__(self, input_dim, output_dim, up_sample, \ blstm_s = 64, cnn_kernel_s = 3, voiced_threshold = 0): super(CondModuleHnSincNSF, self).__init__() # input feature dimension self.input_dim = input_dim self.output_dim = output_dim self.up_sample = up_sample self.blstm_s = blstm_s self.cnn_kernel_s = cnn_kernel_s self.cut_f_smooth = up_sample * 4 self.voiced_threshold = voiced_threshold # the blstm layer self.l_blstm = BLSTMLayer(input_dim, self.blstm_s) # the CNN layer (+1 dim for cut_off_frequence of sinc filter) self.l_conv1d = Conv1dKeepLength(self.blstm_s, \ self.output_dim, \ dilation_s = 1, \ kernel_s = self.cnn_kernel_s) # Upsampling layer for hidden features self.l_upsamp = UpSampleLayer(self.output_dim, \ self.up_sample, True) # separate layer for up-sampling normalized F0 values self.l_upsamp_f0_hi = UpSampleLayer(1, self.up_sample, True) # Upsampling for F0: don't smooth up-sampled F0 self.l_upsamp_F0 = UpSampleLayer(1, self.up_sample, False) # Another smoothing layer to smooth the cut-off frequency # for sinc filters. Use a larger window to smooth self.l_cut_f_smooth = MovingAverage(1, self.cut_f_smooth) def get_cut_f(self, hidden_feat, f0): """ cut_f = get_cut_f(self, feature, f0) feature: (batchsize, length, dim=1) f0: (batchsize, length, dim=1) """ # generate uv signal uv = torch.ones_like(f0) * (f0 > self.voiced_threshold) # hidden_feat is between (-1, 1) after conv1d with tanh # (-0.2, 0.2) + 0.3 = (0.1, 0.5) # voiced: (0.1, 0.5) + 0.4 = (0.5, 0.9) # unvoiced: (0.1, 0.5) = (0.1, 0.5) return hidden_feat * 0.2 + uv * 0.4 + 0.3 def forward(self, feature, f0): """ spec, f0 = forward(self, feature, f0) feature: (batchsize, length, dim) f0: (batchsize, length, dim=1), which should be F0 at frame-level spec: (batchsize, length, self.output_dim), at wave-level f0: (batchsize, length, 1), at wave-level """ tmp = self.l_upsamp(self.l_conv1d(self.l_blstm(feature))) # concatenat normed F0 with hidden spectral features context = torch.cat((tmp[:, :, 0:self.output_dim-1], \ self.l_upsamp_f0_hi(feature[:, :, -1:])), \ dim=2) # hidden feature for cut-off frequency hidden_cut_f = tmp[:, :, self.output_dim-1:] # directly up-sample F0 without smoothing f0_upsamp = self.l_upsamp_F0(f0) # get the cut-off-frequency from output of CNN cut_f = self.get_cut_f(hidden_cut_f, f0_upsamp) # smooth the cut-off-frequency using fixed average smoothing cut_f_smoothed = self.l_cut_f_smooth(cut_f) # return return context, f0_upsamp, cut_f_smoothed, hidden_cut_f # For source module class SourceModuleHnNSF(torch_nn.Module): """ SourceModule for hn-nsf SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, add_noise_std=0.003, voiced_threshod=0) sampling_rate: sampling_rate in Hz harmonic_num: number of harmonic above F0 (default: 0) sine_amp: amplitude of sine source signal (default: 0.1) add_noise_std: std of additive Gaussian noise (default: 0.003) note that amplitude of noise in unvoiced is decided by sine_amp voiced_threshold: threhold to set U/V given F0 (default: 0) Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) F0_sampled (batchsize, length, 1) Sine_source (batchsize, length, 1) noise_source (batchsize, length 1) uv (batchsize, length, 1) """ def __init__(self, sampling_rate, harmonic_num=0, sine_amp=0.1, add_noise_std=0.003, voiced_threshod=0): super(SourceModuleHnNSF, self).__init__() self.sine_amp = sine_amp self.noise_std = add_noise_std # to produce sine waveforms self.l_sin_gen = SineGen(sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod) # to merge source harmonics into a single excitation self.l_linear = torch_nn.Linear(harmonic_num+1, 1) self.l_tanh = torch_nn.Tanh() def forward(self, x): """ Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) F0_sampled (batchsize, length, 1) Sine_source (batchsize, length, 1) noise_source (batchsize, length 1) """ # source for harmonic branch sine_wavs, uv, _ = self.l_sin_gen(x) sine_merge = self.l_tanh(self.l_linear(sine_wavs)) # source for noise branch, in the same shape as uv noise = torch.randn_like(uv) * self.sine_amp / 3 return sine_merge, noise, uv # For Filter module class FilterModuleHnSincNSF(torch_nn.Module): """ Filter for Hn-sinc-NSF FilterModuleHnSincNSF(signal_size, hidden_size, sinc_order = 31, block_num = 5, kernel_size = 3, conv_num_in_block = 10) signal_size: signal dimension (should be 1) hidden_size: dimension of hidden features inside neural filter block sinc_order: order of the sinc filter block_num: number of neural filter blocks in harmonic branch kernel_size: kernel size in dilated CNN conv_num_in_block: number of d-conv1d in one neural filter block Usage: output = FilterModuleHnSincNSF(har_source, noi_source, cut_f, context) har_source: source for harmonic branch (batchsize, length, dim=1) noi_source: source for noise branch (batchsize, length, dim=1) cut_f: cut-off-frequency of sinc filters (batchsize, length, dim=1) context: hidden features to be added (batchsize, length, dim) output: (batchsize, length, dim=1) """ def __init__(self, signal_size, hidden_size, sinc_order = 31, \ block_num = 5, kernel_size = 3, conv_num_in_block = 10): super(FilterModuleHnSincNSF, self).__init__() self.signal_size = signal_size self.hidden_size = hidden_size self.kernel_size = kernel_size self.block_num = block_num self.conv_num_in_block = conv_num_in_block self.sinc_order = sinc_order # filter blocks for harmonic branch tmp = [NeuralFilterBlock(signal_size, hidden_size, \ kernel_size, conv_num_in_block) \ for x in range(self.block_num)] self.l_har_blocks = torch_nn.ModuleList(tmp) # filter blocks for noise branch (only one block, 5 sub-blocks) tmp = [NeuralFilterBlock(signal_size, hidden_size, \ kernel_size, conv_num_in_block // 2) \ for x in range(1)] self.l_noi_blocks = torch_nn.ModuleList(tmp) # sinc filter generators and time-variant filtering layer self.l_sinc_coef = SincFilter(self.sinc_order) self.l_tv_filtering = TimeVarFIRFilter() # done def forward(self, har_component, noi_component, cond_feat, cut_f): """ """ # harmonic component for l_har_block in self.l_har_blocks: har_component = l_har_block(har_component, cond_feat) # noise componebt for l_noi_block in self.l_noi_blocks: noi_component = l_noi_block(noi_component, cond_feat) # get sinc filter coefficients lp_coef, hp_coef = self.l_sinc_coef(cut_f) # time-variant filtering har_signal = self.l_tv_filtering(har_component, lp_coef) noi_signal = self.l_tv_filtering(noi_component, hp_coef) # get output return har_signal + noi_signal class ModelGenerator(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None): super(ModelGenerator, self).__init__() ########## basic config ######## # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) self.input_dim = in_dim self.output_dim = out_dim ############################### # configurations # amplitude of sine waveform (for each harmonic) self.sine_amp = 0.1 # standard deviation of Gaussian noise for additive noise self.noise_std = 0.003 # dimension of hidden features in filter blocks self.hidden_dim = 64 # upsampling rate on input acoustic features (16kHz * 5ms = 80) # assume input_reso has the same value self.upsamp_rate = prj_conf.input_reso[0] # sampling rate (Hz) self.sampling_rate = prj_conf.wav_samp_rate # CNN kernel size in filter blocks self.cnn_kernel_s = 3 # number of filter blocks (for harmonic branch) # noise branch only uses 1 block self.filter_block_num = 5 # number of dilated CNN in each filter block self.cnn_num_in_block = 10 # number of harmonic overtones in source self.harmonic_num = 7 # order of sinc-windowed-FIR-filter self.sinc_order = 31 # the three modules self.m_cond = CondModuleHnSincNSF(self.input_dim, \ self.hidden_dim, \ self.upsamp_rate, \ cnn_kernel_s=self.cnn_kernel_s) self.m_source = SourceModuleHnNSF(self.sampling_rate, self.harmonic_num, self.sine_amp, self.noise_std) self.m_filter = FilterModuleHnSincNSF(self.output_dim, \ self.hidden_dim, \ self.sinc_order, \ self.filter_block_num, \ self.cnn_kernel_s, \ self.cnn_num_in_block) # loss function on spectra self.m_aux_loss = LossAuxGen() # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network """ return y * self.output_std + self.output_mean def forward(self, x): """ definition of forward method Assume x (batchsize=1, length, dim) Return output(batchsize=1, length) """ # assume x[:, :, -1] is F0, denormalize F0 f0 = x[:, :, -1:] # normalize the input features data feat = self.normalize_input(x) # condition module # feature-to-filter-block, f0-up-sampled, cut-off-f-for-sinc, # hidden-feature-for-cut-off-f cond_feat, f0_upsamped, cut_f, hid_cut_f = self.m_cond(feat, f0) # source module # harmonic-source, noise-source (for noise branch), uv har_source, noi_source, uv = self.m_source(f0_upsamped) # neural filter module (including sinc-based FIR filtering) # output output = self.m_filter(har_source, noi_source, cond_feat, cut_f) return output def loss_aux(self, nat_wav, gen_tuple, data_in): return self.m_aux_loss.compute(gen_tuple, nat_wav) ######### ## Model Discriminator definition ######### def get_padding(kernel_size, dilation=1): """Function to compute the padding length for CNN layers """ # L_out = (L_in + 2*pad - dila * (ker - 1) - 1) // stride + 1 # stride -> 1 # L_out = L_in + 2*pad - dila * (ker - 1) # L_out == L_in -> # 2 * pad = dila * (ker - 1) return int((kernel_size*dilation - dilation)/2) class DiscriminatorP(torch_nn.Module): def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): super(DiscriminatorP, self).__init__() self.leaky_relu_slope = 0.1 self.period = period norm_f = weight_norm if use_spectral_norm == False else spectral_norm self.convs = torch_nn.ModuleList([ norm_f( torch_nn.Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), norm_f( torch_nn.Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), norm_f( torch_nn.Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), norm_f( torch_nn.Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), norm_f( torch_nn.Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))), ]) self.conv_post = norm_f( torch_nn.Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) return def forward(self, x): fmap = [] # 1d to 2d b, c, t = x.shape if t % self.period != 0: # pad first n_pad = self.period - (t % self.period) x = torch_nn_func.pad(x, (0, n_pad), "reflect") t = t + n_pad x = x.view(b, c, t // self.period, self.period) for l in self.convs: x = l(x) x = torch_nn_func.leaky_relu(x, self.leaky_relu_slope) fmap.append(x) x = self.conv_post(x) fmap.append(x) x = torch.flatten(x, 1, -1) return x, fmap class MultiPeriodDiscriminator(torch_nn.Module): def __init__(self): super(MultiPeriodDiscriminator, self).__init__() self.discriminators = torch_nn.ModuleList([ DiscriminatorP(2), DiscriminatorP(3), DiscriminatorP(5), DiscriminatorP(7), DiscriminatorP(11), ]) def forward(self, y, y_hat): y_d_rs = [] y_d_gs = [] fmap_rs = [] fmap_gs = [] for i, d in enumerate(self.discriminators): y_d_r, fmap_r = d(y) y_d_g, fmap_g = d(y_hat) y_d_rs.append(y_d_r) fmap_rs.append(fmap_r) y_d_gs.append(y_d_g) fmap_gs.append(fmap_g) return y_d_rs, y_d_gs, fmap_rs, fmap_gs class DiscriminatorS(torch_nn.Module): def __init__(self, use_spectral_norm=False): super(DiscriminatorS, self).__init__() self.leaky_relu_slope = 0.1 norm_f = weight_norm if use_spectral_norm == False else spectral_norm self.convs = torch_nn.ModuleList([ norm_f( torch_nn.Conv1d(1, 128, 15, 1, padding=7)), norm_f( torch_nn.Conv1d(128, 128, 41, 2, groups=4, padding=20)), norm_f( torch_nn.Conv1d(128, 256, 41, 2, groups=16, padding=20)), norm_f( torch_nn.Conv1d(256, 512, 41, 4, groups=16, padding=20)), norm_f( torch_nn.Conv1d(512, 1024, 41, 4, groups=16, padding=20)), norm_f( torch_nn.Conv1d(1024, 1024, 41, 1, groups=16, padding=20)), norm_f( torch_nn.Conv1d(1024, 1024, 5, 1, padding=2)), ]) self.conv_post = norm_f(torch_nn.Conv1d(1024, 1, 3, 1, padding=1)) return def forward(self, x): fmap = [] for l in self.convs: x = l(x) x = torch_nn_func.leaky_relu(x, self.leaky_relu_slope) fmap.append(x) x = self.conv_post(x) fmap.append(x) x = torch.flatten(x, 1, -1) return x, fmap class MultiScaleDiscriminator(torch_nn.Module): def __init__(self): super(MultiScaleDiscriminator, self).__init__() self.discriminators = torch_nn.ModuleList([ DiscriminatorS(use_spectral_norm=True), DiscriminatorS(), DiscriminatorS(), ]) self.meanpools = torch_nn.ModuleList([ torch_nn.AvgPool1d(4, 2, padding=2), torch_nn.AvgPool1d(4, 2, padding=2) ]) def forward(self, y, y_hat): y_d_rs = [] y_d_gs = [] fmap_rs = [] fmap_gs = [] for i, d in enumerate(self.discriminators): if i != 0: y = self.meanpools[i-1](y) y_hat = self.meanpools[i-1](y_hat) y_d_r, fmap_r = d(y) y_d_g, fmap_g = d(y_hat) y_d_rs.append(y_d_r) fmap_rs.append(fmap_r) y_d_gs.append(y_d_g) fmap_gs.append(fmap_g) return y_d_rs, y_d_gs, fmap_rs, fmap_gs class ModelDiscriminator(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None): super(ModelDiscriminator, self).__init__() self.m_mpd = MultiPeriodDiscriminator() self.m_msd = MultiScaleDiscriminator() # done return def _feature_loss(self, fmap_r, fmap_g): loss = 0 for dr, dg in zip(fmap_r, fmap_g): for rl, gl in zip(dr, dg): loss += torch.mean(torch.abs(rl - gl)) return loss*2 def _discriminator_loss(self, disc_real_outputs, disc_generated_outputs): loss = 0 r_losses = [] g_losses = [] for dr, dg in zip(disc_real_outputs, disc_generated_outputs): r_loss = torch.mean((1-dr)**2) g_loss = torch.mean(dg**2) loss += (r_loss + g_loss) r_losses.append(r_loss.item()) g_losses.append(g_loss.item()) return loss, r_losses, g_losses def _generator_loss(self, disc_outputs): loss = 0 gen_losses = [] for dg in disc_outputs: l = torch.mean((1-dg)**2) gen_losses.append(l) loss += l return loss, gen_losses def loss_for_D(self, nat_wav, gen_wav_detached, input_feat): # gen_wav has been detached nat_wav_tmp = nat_wav.permute(0, 2, 1) gen_wav_tmp = gen_wav_detached.permute(0, 2, 1) # MPD y_df_hat_r, y_df_hat_g, _, _ = self.m_mpd(nat_wav_tmp, gen_wav_tmp) loss_disc_f, _, _ = self._discriminator_loss(y_df_hat_r, y_df_hat_g) # MSD y_ds_hat_r, y_ds_hat_g, _, _ = self.m_msd(nat_wav_tmp, gen_wav_tmp) loss_disc_s, _, _ = self._discriminator_loss(y_ds_hat_r, y_ds_hat_g) return loss_disc_f + loss_disc_s def loss_for_G(self, nat_wav, gen_wav, input_feat): nat_wav_tmp = nat_wav.permute(0, 2, 1) gen_wav_tmp = gen_wav.permute(0, 2, 1) # MPD y_df_hat_r, y_df_hat_g, fmap_f_r, fmap_f_g = self.m_mpd(nat_wav_tmp, gen_wav_tmp) # MSD y_ds_hat_r, y_ds_hat_g, fmap_s_r, fmap_s_g = self.m_msd(nat_wav_tmp, gen_wav_tmp) loss_fm_f = self._feature_loss(fmap_f_r, fmap_f_g) loss_fm_s = self._feature_loss(fmap_s_r, fmap_s_g) loss_gen_f, _ = self._generator_loss(y_df_hat_g) loss_gen_s, _ = self._generator_loss(y_ds_hat_g) return loss_fm_f + loss_fm_s + loss_gen_f + loss_gen_s if __name__ == "__main__": print("Definition of model")
48,552
37.170597
77
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/01-nsf/hn-sinc-nsf-hifigan/config.py
#!/usr/bin/env python """ config.py To merge different corpora (or just one corpus), *_set_name are lists *_list are lists of lists *_dirs are lists of lists """ import os __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ######################################################### ## Configuration for training stage ######################################################### # Name of datasets # after data preparation, trn/val_set_name are used to save statistics # about the data sets trn_set_name = ['cmu_all_trn'] val_set_name = ['cmu_all_val'] # for convenience tmp1 = '../DATA/cmu-arctic-data-set' # File lists (text file, one data name per line, without name extension) # trin_file_list: list of files for training set trn_list = [tmp1 + '/scp/train.lst'] # val_file_list: list of files for validation set. It can be None val_list = [tmp1 + '/scp/val.lst'] # Directories for input features # input_dirs = [path_of_feature_1, path_of_feature_2, ..., ] # we assume train and validation data are put in the same sub-directory input_dirs = [[tmp1 + '/5ms/melspec', tmp1 + '/5ms/f0']] # Dimensions of input features # input_dims = [dimension_of_feature_1, dimension_of_feature_2, ...] input_dims = [80, 1] # File name extension for input features # input_exts = [name_extention_of_feature_1, ...] # Please put ".f0" as the last feature input_exts = ['.mfbsp', '.f0'] # Temporal resolution for input features # input_reso = [reso_feature_1, reso_feature_2, ...] # for waveform modeling, temporal resolution of input acoustic features # may be = waveform_sampling_rate * frame_shift_of_acoustic_features # for example, 80 = 16000 Hz * 5 ms input_reso = [80, 80] # Whether input features should be z-normalized # input_norm = [normalize_feature_1, normalize_feature_2] input_norm = [True, True] # Similar configurations for output features output_dirs = [[tmp1 + '/wav_16k_norm']] output_dims = [1] output_exts = ['.wav'] output_reso = [1] output_norm = [False] # Waveform sampling rate # wav_samp_rate can be None if no waveform data is used wav_samp_rate = 16000 # Truncating input sequences so that the maximum length = truncate_seq # When truncate_seq is larger, more GPU mem required # If you don't want truncating, please truncate_seq = None truncate_seq = None # Minimum sequence length # If sequence length < minimum_len, this sequence is not used for training # minimum_len can be None minimum_len = None # Optional argument # Just a buffer for convenience # It can contain anything optional_argument = [''] # Data transformation function, you can import here # these functions are applied before casting np.array data into tensor arrays # #input_trans_fns = [[func_for_mel, fun_for_f0]] #output_trans_fns = [[func_for_wav]] # import numpy as np def augfunc(data_in, data_out): """ Use this function to trim the input and output sequence data_in_modified, data_out_modified, length = augfunc(data_in, data_out) """ orig_length = data_out.shape[0] ups_rate = input_reso[0] seg_length = ups_rate * 32 if orig_length > seg_length: # random start rs = np.random.randint(0, (orig_length - seg_length)) rs = rs // ups_rate * ups_rate data_in_new = data_in[rs // ups_rate : (rs + seg_length) // ups_rate] data_out_new = data_out[rs : rs + seg_length] tmp_len = seg_length else: data_in_new = data_in data_out_new = data_out tmp_len = orig_length return data_in_new, data_out_new, tmp_len input_output_trans_fn = augfunc ######################################################### ## Configuration for inference stage ######################################################### # similar options to training stage test_set_name = ['cmu_all_test_tiny'] # List of test set data # for convenience, you may directly load test_set list here test_list = [['slt_arctic_b0474']] # Directories for input features # input_dirs = [path_of_feature_1, path_of_feature_2, ..., ] # we assume train and validation data are put in the same sub-directory test_input_dirs = [[tmp1 + '/5ms/melspec/', tmp1 + '/5ms/f0/']] # Directories for output features, which are [] test_output_dirs = [[]] # Data transformation function, you can import here # these functions are applied before casting np.array data into tensor arrays # #test_input_trans_fns = [[func_for_mel, fun_for_f0]] #test_output_trans_fns = [[func_for_wav]]
4,529
30.027397
78
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/01-nsf/hn-sinc-nsf-9/main.py
#!/usr/bin/env python """ main.py for project-NN-pytorch/projects The training/inference process wrapper. Dataset API is replaced with NII_MergeDataSetLoader. It is more convenient to train model on corpora stored in different directories. Requires model.py and config.py (config_merge_datasets.py) Usage: $: python main.py [options] """ from __future__ import absolute_import import os import sys import torch import importlib import core_scripts.other_tools.display as nii_warn import core_scripts.data_io.default_data_io as nii_default_dset import core_scripts.data_io.customize_dataset as nii_dset import core_scripts.data_io.conf as nii_dconf import core_scripts.other_tools.list_tools as nii_list_tool import core_scripts.config_parse.config_parse as nii_config_parse import core_scripts.config_parse.arg_parse as nii_arg_parse import core_scripts.op_manager.op_manager as nii_op_wrapper import core_scripts.nn_manager.nn_manager as nii_nn_wrapper import core_scripts.startup_config as nii_startup __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" def main(): """ main(): the default wrapper for training and inference process Please prepare config.py and model.py """ # arguments initialization args = nii_arg_parse.f_args_parsed() # nii_warn.f_print_w_date("Start program", level='h') nii_warn.f_print("Load module: %s" % (args.module_config)) nii_warn.f_print("Load module: %s" % (args.module_model)) prj_conf = importlib.import_module(args.module_config) prj_model = importlib.import_module(args.module_model) # initialization nii_startup.set_random_seed(args.seed, args) use_cuda = not args.no_cuda and torch.cuda.is_available() device = torch.device("cuda" if use_cuda else "cpu") # prepare data io if not args.inference: params = {'batch_size': args.batch_size, 'shuffle': args.shuffle, 'num_workers': args.num_workers, 'sampler': args.sampler} in_trans_fns = prj_conf.input_trans_fns \ if hasattr(prj_conf, 'input_trans_fns') else None out_trans_fns = prj_conf.output_trans_fns \ if hasattr(prj_conf, 'output_trans_fns') else None # Load file list and create data loader trn_lst = prj_conf.trn_list trn_set = nii_dset.NII_MergeDataSetLoader( prj_conf.trn_set_name, \ trn_lst, prj_conf.input_dirs, \ prj_conf.input_exts, \ prj_conf.input_dims, \ prj_conf.input_reso, \ prj_conf.input_norm, \ prj_conf.output_dirs, \ prj_conf.output_exts, \ prj_conf.output_dims, \ prj_conf.output_reso, \ prj_conf.output_norm, \ './', params = params, truncate_seq = prj_conf.truncate_seq, min_seq_len = prj_conf.minimum_len, save_mean_std = True, wav_samp_rate = prj_conf.wav_samp_rate, way_to_merge = args.way_to_merge_datasets, global_arg = args, dset_config = prj_conf, input_augment_funcs = in_trans_fns, output_augment_funcs = out_trans_fns) if prj_conf.val_list is not None: val_lst = prj_conf.val_list val_set = nii_dset.NII_MergeDataSetLoader( prj_conf.val_set_name, val_lst, prj_conf.input_dirs, \ prj_conf.input_exts, \ prj_conf.input_dims, \ prj_conf.input_reso, \ prj_conf.input_norm, \ prj_conf.output_dirs, \ prj_conf.output_exts, \ prj_conf.output_dims, \ prj_conf.output_reso, \ prj_conf.output_norm, \ './', \ params = params, truncate_seq= prj_conf.truncate_seq, min_seq_len = prj_conf.minimum_len, save_mean_std = False, wav_samp_rate = prj_conf.wav_samp_rate, way_to_merge = args.way_to_merge_datasets, global_arg = args, dset_config = prj_conf, input_augment_funcs = in_trans_fns, output_augment_funcs = out_trans_fns) else: val_set = None # initialize the model and loss function model = prj_model.Model(trn_set.get_in_dim(), \ trn_set.get_out_dim(), \ args, prj_conf, trn_set.get_data_mean_std()) loss_wrapper = prj_model.Loss(args) # initialize the optimizer optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args) # if necessary, resume training if args.trained_model == "": checkpoint = None else: checkpoint = torch.load(args.trained_model) # start training nii_nn_wrapper.f_train_wrapper(args, model, loss_wrapper, device, optimizer_wrapper, trn_set, val_set, checkpoint) # done for traing else: # for inference # default, no truncating, no shuffling params = {'batch_size': args.batch_size, 'shuffle': False, 'num_workers': args.num_workers} in_trans_fns = prj_conf.test_input_trans_fns \ if hasattr(prj_conf, 'test_input_trans_fns') else None out_trans_fns = prj_conf.test_output_trans_fns \ if hasattr(prj_conf, 'test_output_trans_fns') else None if type(prj_conf.test_list) is list: t_lst = prj_conf.test_list else: t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list) test_set = nii_dset.NII_MergeDataSetLoader( prj_conf.test_set_name, \ t_lst, \ prj_conf.test_input_dirs, prj_conf.input_exts, prj_conf.input_dims, prj_conf.input_reso, prj_conf.input_norm, prj_conf.test_output_dirs, prj_conf.output_exts, prj_conf.output_dims, prj_conf.output_reso, prj_conf.output_norm, './', params = params, truncate_seq= None, min_seq_len = None, save_mean_std = False, wav_samp_rate = prj_conf.wav_samp_rate, way_to_merge = args.way_to_merge_datasets, global_arg = args, dset_config = prj_conf, input_augment_funcs = in_trans_fns, output_augment_funcs = out_trans_fns) # initialize model model = prj_model.Model(test_set.get_in_dim(), \ test_set.get_out_dim(), \ args, prj_conf) if args.trained_model == "": print("No model is loaded by ---trained-model for inference") print("By default, load %s%s" % (args.save_trained_name, args.save_model_ext)) checkpoint = torch.load("%s%s" % (args.save_trained_name, args.save_model_ext)) else: checkpoint = torch.load(args.trained_model) # do inference and output data nii_nn_wrapper.f_inference_wrapper(args, model, device, \ test_set, checkpoint) # done return if __name__ == "__main__": main()
7,819
35.886792
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/01-nsf/hn-sinc-nsf-9/model.py
#!/usr/bin/env python """ model.py for harmonic-plus-noise NSF with trainable sinc filter version: 9 """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import core_scripts.other_tools.debug as nii_debug __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## # Building blocks (torch.nn modules + dimension operation) # # For blstm class BLSTMLayer(torch_nn.Module): """ Wrapper over BLSTM Input tensor: (batchsize=1, length, dim_in) Output tensor: (batchsize=1, length, dim_out) Recurrency is conducted along "length" """ def __init__(self, input_dim, output_dim): super(BLSTMLayer, self).__init__() if output_dim % 2 != 0: print("Output_dim of BLSTMLayer is {:d}".format(output_dim)) print("BLSTMLayer expects a layer size of even number") sys.exit(1) # bi-directional LSTM self.l_blstm = torch_nn.LSTM(input_dim, output_dim // 2, \ bidirectional=True) def forward(self, x): # permute to (length, batchsize=1, dim) blstm_data, _ = self.l_blstm(x.permute(1, 0, 2)) # permute it backt to (batchsize=1, length, dim) return blstm_data.permute(1, 0, 2) # # 1D dilated convolution that keep the input/output length class Conv1dKeepLength(torch_nn.Conv1d): """ Wrapper for causal convolution Input tensor: (batchsize=1, length, dim_in) Output tensor: (batchsize=1, length, dim_out) https://github.com/pytorch/pytorch/issues/1333 Note: Tanh is optional """ def __init__(self, input_dim, output_dim, dilation_s, kernel_s, causal = False, stride = 1, groups=1, bias=True, \ tanh = True, pad_mode='constant'): super(Conv1dKeepLength, self).__init__( input_dim, output_dim, kernel_s, stride=stride, padding = 0, dilation = dilation_s, groups=groups, bias=bias) self.pad_mode = pad_mode self.causal = causal # input & output length will be the same if self.causal: # left pad to make the convolution causal self.pad_le = dilation_s * (kernel_s - 1) self.pad_ri = 0 else: # pad on both sizes self.pad_le = dilation_s * (kernel_s - 1) // 2 self.pad_ri = dilation_s * (kernel_s - 1) - self.pad_le if tanh: self.l_ac = torch_nn.Tanh() else: self.l_ac = torch_nn.Identity() def forward(self, data): # permute to (batchsize=1, dim, length) # add one dimension (batchsize=1, dim, ADDED_DIM, length) # pad to ADDED_DIM # squeeze and return to (batchsize=1, dim, length) # https://github.com/pytorch/pytorch/issues/1333 x = torch_nn_func.pad(data.permute(0, 2, 1).unsqueeze(2), \ (self.pad_le, self.pad_ri, 0, 0), mode = self.pad_mode).squeeze(2) # tanh(conv1()) # permmute back to (batchsize=1, length, dim) output = self.l_ac(super(Conv1dKeepLength, self).forward(x)) return output.permute(0, 2, 1) # # Moving average class MovingAverage(Conv1dKeepLength): """ Wrapper to define a moving average smoothing layer Note: MovingAverage can be implemented using TimeInvFIRFilter too. Here we define another Module dicrectly on Conv1DKeepLength """ def __init__(self, feature_dim, window_len, causal=False, \ pad_mode='replicate'): super(MovingAverage, self).__init__( feature_dim, feature_dim, 1, window_len, causal, groups=feature_dim, bias=False, tanh=False, \ pad_mode=pad_mode) # set the weighting coefficients torch_nn.init.constant_(self.weight, 1/window_len) # turn off grad for this layer for p in self.parameters(): p.requires_grad = False def forward(self, data): return super(MovingAverage, self).forward(data) # # FIR filter layer class TimeInvFIRFilter(Conv1dKeepLength): """ Wrapper to define a FIR filter over Conv1d Note: FIR Filtering is conducted on each dimension (channel) independently: groups=channel_num in conv1d """ def __init__(self, feature_dim, filter_coef, causal=True, flag_train=False): """ __init__(self, feature_dim, filter_coef, causal=True, flag_train=False) feature_dim: dimension of input data filter_coef: 1-D tensor of filter coefficients causal: FIR is causal or not (default: true) flag_train: whether train the filter coefficients (default false) Input data: (batchsize=1, length, feature_dim) Output data: (batchsize=1, length, feature_dim) """ super(TimeInvFIRFilter, self).__init__( feature_dim, feature_dim, 1, filter_coef.shape[0], causal, groups=feature_dim, bias=False, tanh=False) if filter_coef.ndim == 1: # initialize weight using provided filter_coef with torch.no_grad(): tmp_coef = torch.zeros([feature_dim, 1, filter_coef.shape[0]]) tmp_coef[:, 0, :] = filter_coef tmp_coef = torch.flip(tmp_coef, dims=[2]) self.weight = torch.nn.Parameter(tmp_coef, requires_grad=flag_train) else: print("TimeInvFIRFilter expects filter_coef to be 1-D tensor") print("Please implement the code in __init__ if necessary") sys.exit(1) def forward(self, data): return super(TimeInvFIRFilter, self).forward(data) class TimeVarFIRFilter(torch_nn.Module): """ TimeVarFIRFilter Given sequences of filter coefficients and a signal, do filtering Filter coefs: (batchsize=1, signal_length, filter_order = K) Signal: (batchsize=1, signal_length, 1) For batch 0: For n in [1, sequence_length): output(0, n, 1) = \sum_{k=1}^{K} signal(0, n-k, 1)*coef(0, n, k) Note: filter coef (0, n, :) is only used to compute the output at (0, n, 1) """ def __init__(self): super(TimeVarFIRFilter, self).__init__() def forward(self, signal, f_coef): """ Filter coefs: (batchsize=1, signal_length, filter_order = K) Signal: (batchsize=1, signal_length, 1) Output: (batchsize=1, signal_length, 1) For n in [1, sequence_length): output(0, n, 1)= \sum_{k=1}^{K} signal(0, n-k, 1)*coef(0, n, k) This method may be not efficient: Suppose signal [x_1, ..., x_N], filter [a_1, ..., a_K] output [y_1, y_2, y_3, ..., y_N, *, * ... *] = a_1 * [x_1, x_2, x_3, ..., x_N, 0, ..., 0] + a_2 * [ 0, x_1, x_2, x_3, ..., x_N, 0, ..., 0] + a_3 * [ 0, 0, x_1, x_2, x_3, ..., x_N, 0, ..., 0] """ signal_l = signal.shape[1] order_k = f_coef.shape[-1] # pad to (batchsize=1, signal_length + filter_order-1, dim) padded_signal = torch_nn_func.pad(signal, (0, 0, 0, order_k - 1)) y = torch.zeros_like(signal) # roll and weighted sum, only take [0:signal_length] for k in range(order_k): y += torch.roll(padded_signal, k, dims=1)[:, 0:signal_l, :] \ * f_coef[:, :, k:k+1] # done return y # Sinc filter generator class SincFilter(torch_nn.Module): """ SincFilter Given the cut-off-frequency, produce the low-pass and high-pass windowed-sinc-filters. If input cut-off-frequency is (batchsize=1, signal_length, 1), output filter coef is (batchsize=1, signal_length, filter_order). For each time step in [1, signal_length), we calculate one filter for low-pass sinc filter and another for high-pass filter. Example: import scipy import scipy.signal import numpy as np filter_order = 31 cut_f = 0.2 sinc_layer = SincFilter(filter_order) lp_coef, hp_coef = sinc_layer(torch.ones(1, 10, 1) * cut_f) w, h1 = scipy.signal.freqz(lp_coef[0, 0, :].numpy(), [1]) w, h2 = scipy.signal.freqz(hp_coef[0, 0, :].numpy(), [1]) plt.plot(w, 20*np.log10(np.abs(h1))) plt.plot(w, 20*np.log10(np.abs(h2))) plt.plot([cut_f * np.pi, cut_f * np.pi], [-100, 0]) """ def __init__(self, filter_order): super(SincFilter, self).__init__() # Make the filter oder an odd number # [-(M-1)/2, ... 0, (M-1)/2] # self.half_k = (filter_order - 1) // 2 self.order = self.half_k * 2 +1 def hamming_w(self, n_index): """ prepare hamming window for each time step n_index (batchsize=1, signal_length, filter_order) For each time step, n_index will be [-(M-1)/2, ... 0, (M-1)/2] n_index[0, 0, :] = [-(M-1)/2, ... 0, (M-1)/2] n_index[0, 1, :] = [-(M-1)/2, ... 0, (M-1)/2] ... output (batchsize=1, signal_length, filter_order) output[0, 0, :] = hamming_window output[0, 1, :] = hamming_window ... """ # Hamming window return 0.54 + 0.46 * torch.cos(2 * np.pi * n_index / self.order) def sinc(self, x): """ Normalized sinc-filter sin( pi * x) / pi * x https://en.wikipedia.org/wiki/Sinc_function Assume x (batchsize, signal_length, filter_order) and x[0, 0, :] = [-half_order, - half_order+1, ... 0, ..., half_order] x[:, :, self.half_order] -> time index = 0, sinc(0)=1 """ y = torch.zeros_like(x) y[:,:,0:self.half_k]=torch.sin(np.pi * x[:, :, 0:self.half_k]) \ / (np.pi * x[:, :, 0:self.half_k]) y[:,:,self.half_k+1:]=torch.sin(np.pi * x[:, :, self.half_k+1:]) \ / (np.pi * x[:, :, self.half_k+1:]) y[:,:,self.half_k] = 1 return y def forward(self, cut_f): """ lp_coef, hp_coef = forward(self, cut_f) cut-off frequency cut_f (batchsize=1, length, dim = 1) lp_coef: low-pass filter coefs (batchsize, length, filter_order) hp_coef: high-pass filter coefs (batchsize, length, filter_order) """ # create the filter order index with torch.no_grad(): # [- (M-1) / 2, ..., 0, ..., (M-1)/2] lp_coef = torch.arange(-self.half_k, self.half_k + 1, device=cut_f.device) # [[[- (M-1) / 2, ..., 0, ..., (M-1)/2], # [- (M-1) / 2, ..., 0, ..., (M-1)/2], # ... # ], # [[- (M-1) / 2, ..., 0, ..., (M-1)/2], # [- (M-1) / 2, ..., 0, ..., (M-1)/2], # ... # ]] lp_coef = lp_coef.repeat(cut_f.shape[0], cut_f.shape[1], 1) hp_coef = torch.arange(-self.half_k, self.half_k + 1, device=cut_f.device) hp_coef = hp_coef.repeat(cut_f.shape[0], cut_f.shape[1], 1) # temporary buffer of [-1^n] for gain norm in hp_coef tmp_one = torch.pow(-1, hp_coef) # unnormalized filter coefs with hamming window lp_coef = cut_f * self.sinc(cut_f * lp_coef) \ * self.hamming_w(lp_coef) hp_coef = (self.sinc(hp_coef) \ - cut_f * self.sinc(cut_f * hp_coef)) \ * self.hamming_w(hp_coef) # normalize the coef to make gain at 0/pi is 0 dB # sum_n lp_coef[n] lp_coef_norm = torch.sum(lp_coef, axis=2).unsqueeze(-1) # sum_n hp_coef[n] * -1^n hp_coef_norm = torch.sum(hp_coef * tmp_one, axis=2).unsqueeze(-1) lp_coef = lp_coef / lp_coef_norm hp_coef = hp_coef / hp_coef_norm # return normed coef return lp_coef, hp_coef # # Up sampling class UpSampleLayer(torch_nn.Module): """ Wrapper over up-sampling Input tensor: (batchsize=1, length, dim) Ouput tensor: (batchsize=1, length * up-sampling_factor, dim) """ def __init__(self, feature_dim, up_sampling_factor, smoothing=False): super(UpSampleLayer, self).__init__() # wrap a up_sampling layer self.scale_factor = up_sampling_factor self.l_upsamp = torch_nn.Upsample(scale_factor=self.scale_factor) if smoothing: self.l_ave1 = MovingAverage(feature_dim, self.scale_factor) self.l_ave2 = MovingAverage(feature_dim, self.scale_factor) else: self.l_ave1 = torch_nn.Identity() self.l_ave2 = torch_nn.Identity() return def forward(self, x): # permute to (batchsize=1, dim, length) up_sampled_data = self.l_upsamp(x.permute(0, 2, 1)) # permute it backt to (batchsize=1, length, dim) # and do two moving average return self.l_ave1(self.l_ave2(up_sampled_data.permute(0, 2, 1))) # Neural filter block (1 block) class NeuralFilterBlock(torch_nn.Module): """ Wrapper over a single filter block """ def __init__(self, signal_size, hidden_size,\ kernel_size=3, conv_num=10): super(NeuralFilterBlock, self).__init__() self.signal_size = signal_size self.hidden_size = hidden_size self.kernel_size = kernel_size self.conv_num = conv_num self.dilation_s = [np.power(2, x) for x in np.arange(conv_num)] # ff layer to expand dimension self.l_ff_1 = torch_nn.Linear(signal_size, hidden_size, \ bias=False) self.l_ff_1_tanh = torch_nn.Tanh() # dilated conv layers tmp = [Conv1dKeepLength(hidden_size, hidden_size, x, \ kernel_size, causal=True, bias=False) \ for x in self.dilation_s] self.l_convs = torch_nn.ModuleList(tmp) # ff layer to de-expand dimension self.l_ff_2 = torch_nn.Linear(hidden_size, hidden_size//4, \ bias=False) self.l_ff_2_tanh = torch_nn.Tanh() self.l_ff_3 = torch_nn.Linear(hidden_size//4, signal_size, \ bias=False) self.l_ff_3_tanh = torch_nn.Tanh() # a simple scale self.scale = torch_nn.Parameter(torch.tensor([0.1]), requires_grad=False) return def forward(self, signal, context): """ Assume: signal (batchsize=1, length, signal_size) context (batchsize=1, length, hidden_size) Output: (batchsize=1, length, signal_size) """ # expand dimension tmp_hidden = self.l_ff_1_tanh(self.l_ff_1(signal)) # loop over dilated convs # output of a d-conv is input + context + d-conv(input) for l_conv in self.l_convs: tmp_hidden = tmp_hidden + l_conv(tmp_hidden) + context # to be consistent with legacy configuration in CURRENNT tmp_hidden = tmp_hidden * self.scale # compress the dimesion and skip-add tmp_hidden = self.l_ff_2_tanh(self.l_ff_2(tmp_hidden)) tmp_hidden = self.l_ff_3_tanh(self.l_ff_3(tmp_hidden)) output_signal = tmp_hidden + signal return output_signal # # Sine waveform generator # # Sine waveform generator class SineGen(torch_nn.Module): """ Definition of sine generator SineGen(samp_rate, harmonic_num = 0, sine_amp = 0.1, noise_std = 0.003, voiced_threshold = 0, flag_for_pulse=False) samp_rate: sampling rate in Hz harmonic_num: number of harmonic overtones (default 0) sine_amp: amplitude of sine-wavefrom (default 0.1) noise_std: std of Gaussian noise (default 0.003) voiced_thoreshold: F0 threshold for U/V classification (default 0) flag_for_pulse: this SinGen is used inside PulseGen (default False) Note: when flag_for_pulse is True, the first time step of a voiced segment is always sin(np.pi) or cos(0) """ def __init__(self, samp_rate, harmonic_num = 0, sine_amp = 0.1, noise_std = 0.003, voiced_threshold = 0, flag_for_pulse=False): super(SineGen, self).__init__() self.sine_amp = sine_amp self.noise_std = noise_std self.harmonic_num = harmonic_num self.dim = self.harmonic_num + 1 self.sampling_rate = samp_rate self.voiced_threshold = voiced_threshold self.flag_for_pulse = flag_for_pulse def _f02uv(self, f0): # generate uv signal uv = torch.ones_like(f0) uv = uv * (f0 > self.voiced_threshold) return uv def _f02sine(self, f0_values): """ f0_values: (batchsize, length, dim) where dim indicates fundamental tone and overtones """ # convert to F0 in rad. The interger part n can be ignored # because 2 * np.pi * n doesn't affect phase rad_values = (f0_values / self.sampling_rate) % 1 # initial phase noise (no noise for fundamental component) rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2],\ device = f0_values.device) rand_ini[:, 0] = 0 rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini # instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad) if not self.flag_for_pulse: # for normal case # To prevent torch.cumsum numerical overflow, # it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1. # Buffer tmp_over_one_idx indicates the time step to add -1. # This will not change F0 of sine because (x-1) * 2*pi = x *2*pi tmp_over_one = torch.cumsum(rad_values, 1) % 1 tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 cumsum_shift = torch.zeros_like(rad_values) cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi) else: # If necessary, make sure that the first time step of every # voiced segments is sin(pi) or cos(0) # This is used for pulse-train generation # identify the last time step in unvoiced segments uv = self._f02uv(f0_values) uv_1 = torch.roll(uv, shifts=-1, dims=1) uv_1[:, -1, :] = 1 u_loc = (uv < 1) * (uv_1 > 0) # get the instantanouse phase tmp_cumsum = torch.cumsum(rad_values, dim=1) # different batch needs to be processed differently for idx in range(f0_values.shape[0]): temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :] temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :] # stores the accumulation of i.phase within # each voiced segments tmp_cumsum[idx, :, :] = 0 tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum # rad_values - tmp_cumsum: remove the accumulation of i.phase # within the previous voiced segment. i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1) # get the sines sines = torch.cos(i_phase * 2 * np.pi) return sines def forward(self, f0): """ sine_tensor, uv = forward(f0) input F0: tensor(batchsize=1, length, dim=1) f0 for unvoiced steps should be 0 output sine_tensor: tensor(batchsize=1, length, dim) output uv: tensor(batchsize=1, length, 1) """ with torch.no_grad(): f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, \ device=f0.device) # fundamental component f0_buf[:, :, 0] = f0[:, :, 0] for idx in np.arange(self.harmonic_num): # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic f0_buf[:, :, idx+1] = f0_buf[:, :, 0] * (idx+2) # generate sine waveforms sine_waves = self._f02sine(f0_buf) * self.sine_amp # generate uv signal #uv = torch.ones(f0.shape) #uv = uv * (f0 > self.voiced_threshold) uv = self._f02uv(f0) # noise: for unvoiced should be similar to sine_amp # std = self.sine_amp/3 -> max value ~ self.sine_amp #. for voiced regions is self.noise_std noise_amp = uv * self.noise_std + (1-uv) * self.sine_amp / 3 noise = noise_amp * torch.randn_like(sine_waves) # first: set the unvoiced part to 0 by uv # then: additive noise sine_waves = sine_waves * uv + noise return sine_waves, uv, noise ##### ## Model definition ## ## For condition module only provide Spectral feature to Filter block class CondModuleHnSincNSF(torch_nn.Module): """ Condition module for hn-sinc-NSF Upsample and transform input features CondModuleHnSincNSF(input_dimension, output_dimension, up_sample_rate, blstm_dimension = 64, cnn_kernel_size = 3) Spec, F0, cut_off_freq = CondModuleHnSincNSF(features, F0) Both input features should be frame-level features If x doesn't contain F0, just ignore the returned F0 CondModuleHnSincNSF(input_dim, output_dim, up_sample, blstm_s = 64, cnn_kernel_s = 3, voiced_threshold = 0): input_dim: sum of dimensions of input features output_dim: dim of the feature Spec to be used by neural filter-block up_sample: up sampling rate of input features blstm_s: dimension of the features from blstm (default 64) cnn_kernel_s: kernel size of CNN in condition module (default 3) voiced_threshold: f0 > voiced_threshold is voiced, otherwise unvoiced """ def __init__(self, input_dim, output_dim, up_sample, \ blstm_s = 64, cnn_kernel_s = 3, voiced_threshold = 0): super(CondModuleHnSincNSF, self).__init__() # input feature dimension self.input_dim = input_dim self.output_dim = output_dim self.up_sample = up_sample self.blstm_s = blstm_s self.cnn_kernel_s = cnn_kernel_s self.cut_f_smooth = up_sample * 4 self.voiced_threshold = voiced_threshold # the blstm layer self.l_blstm = BLSTMLayer(input_dim, self.blstm_s) # the CNN layer (+1 dim for cut_off_frequence of sinc filter) self.l_conv1d = Conv1dKeepLength(self.blstm_s, \ self.output_dim, \ dilation_s = 1, \ kernel_s = self.cnn_kernel_s) # Upsampling layer for hidden features self.l_upsamp = UpSampleLayer(self.output_dim, \ self.up_sample, True) # separate layer for up-sampling normalized F0 values self.l_upsamp_f0_hi = UpSampleLayer(1, self.up_sample, True) # Upsampling for F0: don't smooth up-sampled F0 self.l_upsamp_F0 = UpSampleLayer(1, self.up_sample, False) # Another smoothing layer to smooth the cut-off frequency # for sinc filters. Use a larger window to smooth self.l_cut_f_smooth = MovingAverage(1, self.cut_f_smooth) def get_cut_f(self, hidden_feat, f0): """ cut_f = get_cut_f(self, feature, f0) feature: (batchsize, length, dim=1) f0: (batchsize, length, dim=1) """ # generate uv signal uv = torch.ones_like(f0) * (f0 > self.voiced_threshold) # hidden_feat is between (-1, 1) after conv1d with tanh # (-0.2, 0.2) + 0.3 = (0.1, 0.5) # voiced: (0.1, 0.5) + 0.4 = (0.5, 0.9) # unvoiced: (0.1, 0.5) = (0.1, 0.5) return hidden_feat * 0.2 + uv * 0.4 + 0.3 def forward(self, feature, f0): """ spec, f0 = forward(self, feature, f0) feature: (batchsize, length, dim) f0: (batchsize, length, dim=1), which should be F0 at frame-level spec: (batchsize, length, self.output_dim), at wave-level f0: (batchsize, length, 1), at wave-level """ tmp = self.l_upsamp(self.l_conv1d(self.l_blstm(feature))) # concatenat normed F0 with hidden spectral features context = torch.cat((tmp[:, :, 0:self.output_dim-1], \ self.l_upsamp_f0_hi(feature[:, :, -1:])), \ dim=2) # hidden feature for cut-off frequency hidden_cut_f = tmp[:, :, self.output_dim-1:] # directly up-sample F0 without smoothing f0_upsamp = self.l_upsamp_F0(f0) # get the cut-off-frequency from output of CNN cut_f = self.get_cut_f(hidden_cut_f, f0_upsamp) # smooth the cut-off-frequency using fixed average smoothing cut_f_smoothed = self.l_cut_f_smooth(cut_f) # return return context, f0_upsamp, cut_f_smoothed, hidden_cut_f # For source module class SourceModuleHnNSF(torch_nn.Module): """ SourceModule for hn-nsf SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, add_noise_std=0.003, voiced_threshod=0) sampling_rate: sampling_rate in Hz harmonic_num: number of harmonic above F0 (default: 0) sine_amp: amplitude of sine source signal (default: 0.1) add_noise_std: std of additive Gaussian noise (default: 0.003) note that amplitude of noise in unvoiced is decided by sine_amp voiced_threshold: threhold to set U/V given F0 (default: 0) Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) F0_sampled (batchsize, length, 1) Sine_source (batchsize, length, 1) noise_source (batchsize, length 1) uv (batchsize, length, 1) """ def __init__(self, sampling_rate, harmonic_num=0, sine_amp=0.1, add_noise_std=0.003, voiced_threshod=0): super(SourceModuleHnNSF, self).__init__() self.sine_amp = sine_amp self.noise_std = add_noise_std # to produce sine waveforms self.l_sin_gen = SineGen(sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod) # to merge source harmonics into a single excitation self.l_linear = torch_nn.Linear(harmonic_num+1, 1) self.l_tanh = torch_nn.Tanh() def forward(self, x): """ Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) F0_sampled (batchsize, length, 1) Sine_source (batchsize, length, 1) noise_source (batchsize, length 1) """ # source for harmonic branch sine_wavs, uv, _ = self.l_sin_gen(x) sine_merge = self.l_tanh(self.l_linear(sine_wavs)) # source for noise branch, in the same shape as uv noise = torch.randn_like(uv) * self.sine_amp / 3 return sine_merge, noise, uv # For Filter module class FilterModuleHnSincNSF(torch_nn.Module): """ Filter for Hn-sinc-NSF FilterModuleHnSincNSF(signal_size, hidden_size, sinc_order = 31, block_num = 5, kernel_size = 3, conv_num_in_block = 10) signal_size: signal dimension (should be 1) hidden_size: dimension of hidden features inside neural filter block sinc_order: order of the sinc filter block_num: number of neural filter blocks in harmonic branch kernel_size: kernel size in dilated CNN conv_num_in_block: number of d-conv1d in one neural filter block Usage: output = FilterModuleHnSincNSF(har_source, noi_source, cut_f, context) har_source: source for harmonic branch (batchsize, length, dim=1) noi_source: source for noise branch (batchsize, length, dim=1) cut_f: cut-off-frequency of sinc filters (batchsize, length, dim=1) context: hidden features to be added (batchsize, length, dim) output: (batchsize, length, dim=1) """ def __init__(self, signal_size, hidden_size, sinc_order = 31, \ block_num = 5, kernel_size = 3, conv_num_in_block = 10): super(FilterModuleHnSincNSF, self).__init__() self.signal_size = signal_size self.hidden_size = hidden_size self.kernel_size = kernel_size self.block_num = block_num self.conv_num_in_block = conv_num_in_block self.sinc_order = sinc_order # filter blocks for harmonic branch tmp = [NeuralFilterBlock(signal_size, hidden_size, \ kernel_size, conv_num_in_block) \ for x in range(self.block_num)] self.l_har_blocks = torch_nn.ModuleList(tmp) # filter blocks for noise branch (only one block, 5 sub-blocks) tmp = [NeuralFilterBlock(signal_size, hidden_size, \ kernel_size, conv_num_in_block // 2) \ for x in range(1)] self.l_noi_blocks = torch_nn.ModuleList(tmp) # sinc filter generators and time-variant filtering layer self.l_sinc_coef = SincFilter(self.sinc_order) self.l_tv_filtering = TimeVarFIRFilter() # done def forward(self, har_component, noi_component, cond_feat, cut_f): """ """ # harmonic component for l_har_block in self.l_har_blocks: har_component = l_har_block(har_component, cond_feat) # noise componebt for l_noi_block in self.l_noi_blocks: noi_component = l_noi_block(noi_component, cond_feat) # get sinc filter coefficients lp_coef, hp_coef = self.l_sinc_coef(cut_f) # time-variant filtering har_signal = self.l_tv_filtering(har_component, lp_coef) noi_signal = self.l_tv_filtering(noi_component, hp_coef) # get output return har_signal + noi_signal ## FOR MODEL class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None): super(Model, self).__init__() # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) self.input_dim = in_dim self.output_dim = out_dim # configurations # amplitude of sine waveform (for each harmonic) self.sine_amp = 0.1 # standard deviation of Gaussian noise for additive noise self.noise_std = 0.003 # dimension of hidden features in filter blocks self.hidden_dim = 64 # upsampling rate on input acoustic features (16kHz * 5ms = 80) # assume input_reso has the same value self.upsamp_rate = prj_conf.input_reso[0] # sampling rate (Hz) self.sampling_rate = prj_conf.wav_samp_rate # CNN kernel size in filter blocks self.cnn_kernel_s = 3 # number of filter blocks (for harmonic branch) # noise branch only uses 1 block self.filter_block_num = 5 # number of dilated CNN in each filter block self.cnn_num_in_block = 10 # number of harmonic overtones in source self.harmonic_num = 7 # order of sinc-windowed-FIR-filter self.sinc_order = 31 # the three modules self.m_cond = CondModuleHnSincNSF(self.input_dim, \ self.hidden_dim, \ self.upsamp_rate, \ cnn_kernel_s=self.cnn_kernel_s) self.m_source = SourceModuleHnNSF(self.sampling_rate, self.harmonic_num, self.sine_amp, self.noise_std) self.m_filter = FilterModuleHnSincNSF(self.output_dim, \ self.hidden_dim, \ self.sinc_order, \ self.filter_block_num, \ self.cnn_kernel_s, \ self.cnn_num_in_block) # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network """ return y * self.output_std + self.output_mean def forward(self, x): """ definition of forward method Assume x (batchsize=1, length, dim) Return output(batchsize=1, length) """ # assume x[:, :, -1] is F0, denormalize F0 f0 = x[:, :, -1:] # normalize the input features data feat = self.normalize_input(x) # condition module # feature-to-filter-block, f0-up-sampled, cut-off-f-for-sinc, # hidden-feature-for-cut-off-f cond_feat, f0_upsamped, cut_f, hid_cut_f = self.m_cond(feat, f0) # source module # harmonic-source, noise-source (for noise branch), uv har_source, noi_source, uv = self.m_source(f0_upsamped) # neural filter module (including sinc-based FIR filtering) # output output = self.m_filter(har_source, noi_source, cond_feat, cut_f) if self.training: # just in case we need to penalize the hidden feauture for # cut-off-freq. return [output.squeeze(-1), hid_cut_f] else: return output.squeeze(-1) class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ # frame shift (number of points) self.frame_hops = [80, 40, 640] # frame length self.frame_lens = [320, 80, 1920] # fft length self.fft_n = [512, 128, 2048] # window type in stft self.win = torch.hann_window # floor in log-spectrum-amplitude calculating self.amp_floor = 0.00001 # loss function self.loss = torch_nn.MSELoss() # weight to penalize hidden features for cut-off-frequency # for experiments on CMU-arctic, ATR-F009, VCTK, cutoff_w = 0.0 self.cutoff_w = 0.0 return def _stft(self, signal, fft_p, frame_shift, frame_len): """ wrapper of torch.stft Remember to use onesided=True, pad_mode="constant" Signal (batchsize, length) Output (batchsize, fft_p/2+1, frame_num, 2) """ # to be compatible with different torch versions if torch.__version__.split('.')[1].isnumeric() and \ int(torch.__version__.split('.')[1]) < 7: return torch.stft( signal, fft_p, frame_shift, frame_len, window=self.win(frame_len, dtype=signal.dtype, device=signal.device), onesided=True, pad_mode="constant") else: return torch.stft( signal, fft_p, frame_shift, frame_len, window=self.win(frame_len, dtype=signal.dtype, device=signal.device), onesided=True, pad_mode="constant", return_complex=False) def _amp(self, x): """ _amp(stft) x_stft: (batchsize, fft_p/2+1, frame_num, 2) output: (batchsize, fft_p/2+1, frame_num) output[x, y, z] = log(x_stft[x, y, z, 1]^2 + x_stft[x, y, z, 2]^2 + floor) """ return torch.log(torch.norm(x, 2, -1).pow(2) + self.amp_floor) def compute(self, outputs, target): """ Loss().compute(outputs, target) should return the Loss in torch.tensor format Assume output and target as (batchsize=1, length) """ # hidden-feature for cut-off-frequency cut_f = outputs[1] # generated signal output = outputs[0] # convert from (batchsize=1, length, dim=1) to (1, length) if target.ndim == 3: target.squeeze_(-1) # compute loss loss = 0 for frame_shift, frame_len, fft_p in \ zip(self.frame_hops, self.frame_lens, self.fft_n): x_stft = self._stft(output, fft_p, frame_shift, frame_len) y_stft = self._stft(target, fft_p, frame_shift, frame_len) x_sp_amp = self._amp(x_stft) y_sp_amp = self._amp(y_stft) loss += self.loss(x_sp_amp, y_sp_amp) # A norm on cut_f, which forces sinc-cut-off-frequency # to be close to the U/V-decided value # Experiments on CMU-arctic, ATR-F009, and VCTK don't use it # by setting self.cutoff_w = 0.0 # However, just in case loss += self.cutoff_w * self.loss(cut_f, torch.zeros_like(cut_f)) return loss if __name__ == "__main__": print("Definition of model")
39,990
38.713009
77
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/01-nsf/hn-sinc-nsf-9/config.py
#!/usr/bin/env python """ config.py for project-NN-pytorch/projects Usage: For training, change Configuration for training stage For inference, change Configuration for inference stage """ __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ######################################################### ## Configuration for training stage ######################################################### # Name of datasets # after data preparation, trn/val_set_name are used to save statistics # about the data sets trn_set_name = 'cmu_all_trn' val_set_name = 'cmu_all_val' # for convenience tmp = '../DATA/cmu-arctic-data-set' # File lists (text file, one data name per line, without name extension) # trin_file_list: list of files for training set trn_list = [tmp + '/scp/train.lst'] # val_file_list: list of files for validation set. It can be None val_list = [tmp + '/scp/val.lst'] # Directories for input features # input_dirs = [path_of_feature_1, path_of_feature_2, ..., ] # we assume train and validation data are put in the same sub-directory input_dirs = [[tmp + '/5ms/melspec', tmp + '/5ms/f0']] # Dimensions of input features # input_dims = [dimension_of_feature_1, dimension_of_feature_2, ...] input_dims = [80, 1] # File name extension for input features # input_exts = [name_extention_of_feature_1, ...] # Please put ".f0" as the last feature input_exts = ['.mfbsp', '.f0'] # Temporal resolution for input features # input_reso = [reso_feature_1, reso_feature_2, ...] # for waveform modeling, temporal resolution of input acoustic features # may be = waveform_sampling_rate * frame_shift_of_acoustic_features # for example, 80 = 16000 Hz * 5 ms input_reso = [80, 80] # Whether input features should be z-normalized # input_norm = [normalize_feature_1, normalize_feature_2] input_norm = [True, True] # Similar configurations for output features output_dirs = [[tmp + '/wav_16k_norm']] output_dims = [1] output_exts = ['.wav'] output_reso = [1] output_norm = [False] # Waveform sampling rate # wav_samp_rate can be None if no waveform data is used wav_samp_rate = 16000 # Truncating input sequences so that the maximum length = truncate_seq # When truncate_seq is larger, more GPU mem required # If you don't want truncating, please truncate_seq = None truncate_seq = 16000 * 3 # Minimum sequence length # If sequence length < minimum_len, this sequence is not used for training # minimum_len can be None minimum_len = 80 * 50 ######################################################### ## Configuration for inference stage ######################################################### # similar options to training stage test_set_name = ['cmu_all_test_tiny'] # List of test set data # for convenience, you may directly load test_set list here test_list = [['slt_arctic_b0474', 'slt_arctic_b0475', 'slt_arctic_b0476', 'bdl_arctic_b0474', 'bdl_arctic_b0475', 'bdl_arctic_b0476', 'rms_arctic_b0474', 'rms_arctic_b0475', 'rms_arctic_b0476', 'clb_arctic_b0474', 'clb_arctic_b0475', 'clb_arctic_b0476']] # Directories for input features # input_dirs = [path_of_feature_1, path_of_feature_2, ..., ] # we assume train and validation data are put in the same sub-directory test_input_dirs = [[tmp + '/5ms/melspec', tmp + '/5ms/f0']] # Directories for output features, which are [] test_output_dirs = [[]]
3,430
32.31068
75
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/01_main.py
#!/usr/bin/env python """ main.py for project-NN-pytorch/projects The default training/inference process wrapper Requires model.py and config.py Usage: $: python main.py [options] """ from __future__ import absolute_import import os import sys import torch import importlib import core_scripts.other_tools.display as nii_warn import core_scripts.data_io.default_data_io as nii_dset import core_scripts.data_io.conf as nii_dconf import core_scripts.other_tools.list_tools as nii_list_tool import core_scripts.config_parse.config_parse as nii_config_parse import core_scripts.config_parse.arg_parse as nii_arg_parse import core_scripts.op_manager.op_manager as nii_op_wrapper import core_scripts.nn_manager.nn_manager as nii_nn_wrapper import core_scripts.startup_config as nii_startup __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" def main(): """ main(): the default wrapper for training and inference process Please prepare config.py and model.py """ # arguments initialization args = nii_arg_parse.f_args_parsed() # nii_warn.f_print_w_date("Start program", level='h') nii_warn.f_print("Load module: %s" % (args.module_config)) nii_warn.f_print("Load module: %s" % (args.module_model)) prj_conf = importlib.import_module(args.module_config) prj_model = importlib.import_module(args.module_model) # initialization nii_startup.set_random_seed(args.seed, args) use_cuda = not args.no_cuda and torch.cuda.is_available() device = torch.device("cuda" if use_cuda else "cpu") # prepare data io if not args.inference: params = {'batch_size': args.batch_size, 'shuffle': args.shuffle, 'num_workers': args.num_workers, 'sampler': args.sampler} # Load file list and create data loader trn_lst = nii_list_tool.read_list_from_text(prj_conf.trn_list) trn_set = nii_dset.NIIDataSetLoader( prj_conf.trn_set_name, \ trn_lst, prj_conf.input_dirs, \ prj_conf.input_exts, \ prj_conf.input_dims, \ prj_conf.input_reso, \ prj_conf.input_norm, \ prj_conf.output_dirs, \ prj_conf.output_exts, \ prj_conf.output_dims, \ prj_conf.output_reso, \ prj_conf.output_norm, \ './', params = params, truncate_seq = prj_conf.truncate_seq, min_seq_len = prj_conf.minimum_len, save_mean_std = True, wav_samp_rate = prj_conf.wav_samp_rate, global_arg = args) if prj_conf.val_list is not None: val_lst = nii_list_tool.read_list_from_text(prj_conf.val_list) val_set = nii_dset.NIIDataSetLoader( prj_conf.val_set_name, val_lst, prj_conf.input_dirs, \ prj_conf.input_exts, \ prj_conf.input_dims, \ prj_conf.input_reso, \ prj_conf.input_norm, \ prj_conf.output_dirs, \ prj_conf.output_exts, \ prj_conf.output_dims, \ prj_conf.output_reso, \ prj_conf.output_norm, \ './', \ params = params, truncate_seq= prj_conf.truncate_seq, min_seq_len = prj_conf.minimum_len, save_mean_std = False, wav_samp_rate = prj_conf.wav_samp_rate, global_arg = args) else: val_set = None # initialize the model and loss function model = prj_model.Model(trn_set.get_in_dim(), \ trn_set.get_out_dim(), \ args, trn_set.get_data_mean_std()) loss_wrapper = prj_model.Loss(args) # initialize the optimizer optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args) # if necessary, resume training if args.trained_model == "": checkpoint = None else: checkpoint = torch.load(args.trained_model) # start training nii_nn_wrapper.f_train_wrapper(args, model, loss_wrapper, device, optimizer_wrapper, trn_set, val_set, checkpoint) # done for traing else: # for inference # default, no truncating, no shuffling params = {'batch_size': args.batch_size, 'shuffle': False, 'num_workers': args.num_workers} if type(prj_conf.test_list) is list: t_lst = prj_conf.test_list else: t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list) test_set = nii_dset.NIIDataSetLoader( prj_conf.test_set_name, \ t_lst, \ prj_conf.test_input_dirs, prj_conf.input_exts, prj_conf.input_dims, prj_conf.input_reso, prj_conf.input_norm, prj_conf.test_output_dirs, prj_conf.output_exts, prj_conf.output_dims, prj_conf.output_reso, prj_conf.output_norm, './', params = params, truncate_seq= None, min_seq_len = None, save_mean_std = False, wav_samp_rate = prj_conf.wav_samp_rate, global_arg = args) # initialize model model = prj_model.Model(test_set.get_in_dim(), \ test_set.get_out_dim(), \ args) if args.trained_model == "": print("No model is loaded by ---trained-model for inference") print("By default, load %s%s" % (args.save_trained_name, args.save_model_ext)) checkpoint = torch.load("%s%s" % (args.save_trained_name, args.save_model_ext)) else: checkpoint = torch.load(args.trained_model) # do inference and output data nii_nn_wrapper.f_inference_wrapper(args, model, device, \ test_set, checkpoint) # done return if __name__ == "__main__": main()
6,513
34.595628
74
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/03_fuse_score_evaluate.py
#!/usr/bin/python """ Wrapper to fuse score and compute EER and min tDCF Simple score averaging. Usage: python 03_fuse_score_evaluate.py log_output_testset_1 log_output_testset_2 ... The log_output_testset is produced by the pytorch code, for example, ./lfcc-lcnn-lstmsum-am/01/__pretrained/log_output_testset It has information like: ... Generating 71230,LA_E_9999427,0,43237,0, time: 0.005s Output, LA_E_9999487, 0, 0.172325 ... (See README for the format of this log) This script will extract the line starts with "Output, ..." """ import os import sys import numpy as np from sandbox import eval_asvspoof def parse_txt(file_path): bonafide = [] bonafide_file_name = [] spoofed = [] spoofed_file_name = [] with open(file_path, 'r') as file_ptr: for line in file_ptr: if line.startswith('Output,'): #Output, LA_E_9999487, 0, 0.172325 temp = line.split(',') flag = int(temp[2]) name = temp[1] if flag: bonafide_file_name.append(name) bonafide.append(float(temp[-1])) else: spoofed.append(float(temp[-1])) spoofed_file_name.append(name) bonafide = np.array(bonafide) spoofed = np.array(spoofed) return bonafide, spoofed, bonafide_file_name, spoofed_file_name def fuse_score(file_path_lists): bonafide_score = {} spoofed_score = {} for data_path in file_path_lists: bonafide, spoofed, bona_name, spoof_name = parse_txt(data_path) for score, name in zip(bonafide, bona_name): if name in bonafide_score: bonafide_score[name].append(score) else: bonafide_score[name] = [score] for score, name in zip(spoofed, spoof_name): if name in spoofed_score: spoofed_score[name].append(score) else: spoofed_score[name] = [score] fused_bonafide = np.array([np.mean(y) for x, y in bonafide_score.items()]) fused_spoofed = np.array([np.mean(y) for x, y in spoofed_score.items()]) return fused_bonafide, fused_spoofed if __name__ == "__main__": data_paths = sys.argv[1:] bonafide, spoofed = fuse_score(data_paths) mintDCF, eer, threshold = eval_asvspoof.tDCF_wrapper(bonafide, spoofed) print("Score file: {:s}".format(str(data_paths))) print("mintDCF: {:1.4f}".format(mintDCF)) print("EER: {:2.3f}%".format(eer * 100)) print("Threshold: {:f}".format(threshold))
2,584
31.3125
78
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/01_config.py
#!/usr/bin/env python """ config.py for project-NN-pytorch/projects Usage: For training, change Configuration for training stage For inference, change Configuration for inference stage """ import os __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ######################################################### ## Configuration for training stage ######################################################### # Name of datasets (any string you wish to use) # after data preparation, trn/val_set_name are used to save statistics # about the data sets trn_set_name = 'asvspoof2019_trn' val_set_name = 'asvspoof2019_val' # for convenience # we will use resources in this directory tmp = os.path.dirname(__file__) + '/../../DATA/asvspoof2019_LA' # File lists (text file, one data name per line, without name extension) # trin_file_list: list of files for training set trn_list = tmp + '/scp/train.lst' # val_file_list: list of files for validation set. It can be None val_list = tmp + '/scp/val.lst' # Directories for input features # input_dirs = [path_of_feature_1, path_of_feature_2, ..., ] # we assume train and validation data are put in the same sub-directory input_dirs = [tmp + '/train_dev'] # Dimensions of input features # input_dims = [dimension_of_feature_1, dimension_of_feature_2, ...] input_dims = [1] # File name extension for input features # input_exts = [name_extention_of_feature_1, ...] # If you have waveform in *.flac, please use input_exts = ['.flac'] input_exts = ['.wav'] # Temporal resolution for input features # input_reso = [reso_feature_1, reso_feature_2, ...] # for waveform modeling, temporal resolution of input acoustic features # may be = waveform_sampling_rate * frame_shift_of_acoustic_features # for example, 80 = 16000 Hz * 5 ms input_reso = [1] # Whether input features should be z-normalized # input_norm = [normalize_feature_1, normalize_feature_2] input_norm = [False] # Similar configurations for output features output_dirs = [] output_dims = [1] output_exts = ['.bin'] output_reso = [1] output_norm = [False] # Waveform sampling rate # wav_samp_rate can be None if no waveform data is used wav_samp_rate = 16000 # Truncating input sequences so that the maximum length = truncate_seq # When truncate_seq is larger, more GPU mem required # If you don't want truncating, please truncate_seq = None truncate_seq = None # Minimum sequence length # If sequence length < minimum_len, this sequence is not used for training # minimum_len can be None minimum_len = None # Optional argument # Just a buffer for convenience # It can contain anything optional_argument = [tmp + '/protocol.txt'] ######################################################### ## Configuration for inference stage ######################################################### # similar options to training stage test_set_name = 'asvspoof2019_test' # List of test set data # for convenience, you may directly load test_set list here test_list = tmp + '/scp/test.lst' # Directories for input features # input_dirs = [path_of_feature_1, path_of_feature_2, ..., ] # we assume train and validation data are put in the same sub-directory test_input_dirs = [tmp + '/eval'] # Directories for output features, which are [] test_output_dirs = []
3,334
30.462264
75
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/01_config_rawnet.py
#!/usr/bin/env python """ config.py for project-NN-pytorch/projects Usage: For training, change Configuration for training stage For inference, change Configuration for inference stage """ import os __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ######################################################### ## Configuration for training stage ######################################################### # Name of datasets (any string you wish to use) # after data preparation, trn/val_set_name are used to save statistics # about the data sets trn_set_name = 'asvspoof2019_trn' val_set_name = 'asvspoof2019_val' # for convenience # we will use resources in this directory tmp = os.path.dirname(__file__) + '/../../DATA/asvspoof2019_LA' # File lists (text file, one data name per line, without name extension) # trin_file_list: list of files for training set trn_list = tmp + '/scp/train.lst' # val_file_list: list of files for validation set. It can be None val_list = tmp + '/scp/val.lst' # Directories for input features # input_dirs = [path_of_feature_1, path_of_feature_2, ..., ] # we assume train and validation data are put in the same sub-directory input_dirs = [tmp + '/train_dev'] # Dimensions of input features # input_dims = [dimension_of_feature_1, dimension_of_feature_2, ...] input_dims = [1] # File name extension for input features # input_exts = [name_extention_of_feature_1, ...] # If you have waveform in *.flac, please use input_exts = ['.flac'] input_exts = ['.wav'] # Temporal resolution for input features # input_reso = [reso_feature_1, reso_feature_2, ...] # for waveform modeling, temporal resolution of input acoustic features # may be = waveform_sampling_rate * frame_shift_of_acoustic_features # for example, 80 = 16000 Hz * 5 ms input_reso = [1] # Whether input features should be z-normalized # input_norm = [normalize_feature_1, normalize_feature_2] input_norm = [False] # Similar configurations for output features output_dirs = [] output_dims = [1] output_exts = ['.bin'] output_reso = [1] output_norm = [False] # Waveform sampling rate # wav_samp_rate can be None if no waveform data is used wav_samp_rate = 16000 # Truncating input sequences so that the maximum length = truncate_seq # When truncate_seq is larger, more GPU mem required # If you don't want truncating, please truncate_seq = None truncate_seq = 64600 # Minimum sequence length # If sequence length < minimum_len, this sequence is not used for training # minimum_len can be None minimum_len = 16000 # Optional argument # Just a buffer for convenience # It can contain anything optional_argument = [tmp + '/protocol.txt'] ######################################################### ## Configuration for inference stage ######################################################### # similar options to training stage test_set_name = 'asvspoof2019_test' # List of test set data # for convenience, you may directly load test_set list here test_list = tmp + '/scp/test.lst' # Directories for input features # input_dirs = [path_of_feature_1, path_of_feature_2, ..., ] # we assume train and validation data are put in the same sub-directory test_input_dirs = [tmp + '/eval'] # Directories for output features, which are [] test_output_dirs = []
3,336
30.481132
75
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/02_evaluate.py
#!/usr/bin/python """ Wrapper to parse the score file and compute EER and min tDCF Usage: python 00_evaluate.py log_output_testset The log_output_testset is produced by the pytorch code, for example, ./lfcc-lcnn-lstmsum-am/01/__pretrained/log_output_testset It has information like: ... Generating 71230,LA_E_9999427,0,43237,0, time: 0.005s Output, LA_E_9999487, 0, 0.172325 ... (See README for the format of this log) This script will extract the line starts with "Output, ..." """ import os import sys import numpy as np from sandbox import eval_asvspoof def parse_txt(file_path): bonafide = [] spoofed = [] with open(file_path, 'r') as file_ptr: for line in file_ptr: if line.startswith('Output,'): #Output, LA_E_9999487, 0, 0.172325 temp = line.split(',') flag = int(temp[2]) name = temp[1] if flag: bonafide.append(float(temp[-1])) else: spoofed.append(float(temp[-1])) bonafide = np.array(bonafide) spoofed = np.array(spoofed) return bonafide, spoofed if __name__ == "__main__": data_path = sys.argv[1] bonafide, spoofed = parse_txt(data_path) mintDCF, eer, threshold = eval_asvspoof.tDCF_wrapper(bonafide, spoofed) print("Score file: {:s}".format(data_path)) print("mintDCF: {:1.4f}".format(mintDCF)) print("EER: {:2.3f}%".format(eer * 100)) print("Threshold: {:f}".format(threshold))
1,514
27.055556
75
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/01_main_rawnet.py
#!/usr/bin/env python """ main.py for project-NN-pytorch/projects The default training/inference process wrapper Requires model.py and config.py Usage: $: python main.py [options] """ from __future__ import absolute_import import os import sys import torch import importlib import core_scripts.other_tools.display as nii_warn import core_scripts.data_io.default_data_io as nii_dset import core_scripts.data_io.conf as nii_dconf import core_scripts.other_tools.list_tools as nii_list_tool import core_scripts.config_parse.config_parse as nii_config_parse import core_scripts.config_parse.arg_parse as nii_arg_parse import core_scripts.op_manager.op_manager as nii_op_wrapper import core_scripts.nn_manager.nn_manager as nii_nn_wrapper import core_scripts.startup_config as nii_startup __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" def main(): """ main(): the default wrapper for training and inference process Please prepare config.py and model.py """ # arguments initialization args = nii_arg_parse.f_args_parsed() # nii_warn.f_print_w_date("Start program", level='h') nii_warn.f_print("Load module: %s" % (args.module_config)) nii_warn.f_print("Load module: %s" % (args.module_model)) prj_conf = importlib.import_module(args.module_config) prj_model = importlib.import_module(args.module_model) # initialization nii_startup.set_random_seed(args.seed, args) use_cuda = not args.no_cuda and torch.cuda.is_available() device = torch.device("cuda" if use_cuda else "cpu") # prepare data io if not args.inference: params = {'batch_size': args.batch_size, 'shuffle': args.shuffle, 'num_workers': args.num_workers, 'sampler': args.sampler} # Load file list and create data loader trn_lst = nii_list_tool.read_list_from_text(prj_conf.trn_list) trn_set = nii_dset.NIIDataSetLoader( prj_conf.trn_set_name, \ trn_lst, prj_conf.input_dirs, \ prj_conf.input_exts, \ prj_conf.input_dims, \ prj_conf.input_reso, \ prj_conf.input_norm, \ prj_conf.output_dirs, \ prj_conf.output_exts, \ prj_conf.output_dims, \ prj_conf.output_reso, \ prj_conf.output_norm, \ './', params = params, truncate_seq = prj_conf.truncate_seq, min_seq_len = prj_conf.minimum_len, save_mean_std = True, wav_samp_rate = prj_conf.wav_samp_rate, global_arg = args) if prj_conf.val_list is not None: val_lst = nii_list_tool.read_list_from_text(prj_conf.val_list) val_set = nii_dset.NIIDataSetLoader( prj_conf.val_set_name, val_lst, prj_conf.input_dirs, \ prj_conf.input_exts, \ prj_conf.input_dims, \ prj_conf.input_reso, \ prj_conf.input_norm, \ prj_conf.output_dirs, \ prj_conf.output_exts, \ prj_conf.output_dims, \ prj_conf.output_reso, \ prj_conf.output_norm, \ './', \ params = params, truncate_seq= prj_conf.truncate_seq, min_seq_len = prj_conf.minimum_len, save_mean_std = False, wav_samp_rate = prj_conf.wav_samp_rate, global_arg = args) else: val_set = None # initialize the model and loss function model = prj_model.Model(trn_set.get_in_dim(), \ trn_set.get_out_dim(), \ args, prj_conf, trn_set.get_data_mean_std()) loss_wrapper = prj_model.Loss(args) # initialize the optimizer optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args) # if necessary, resume training if args.trained_model == "": checkpoint = None else: checkpoint = torch.load(args.trained_model) # start training nii_nn_wrapper.f_train_wrapper(args, model, loss_wrapper, device, optimizer_wrapper, trn_set, val_set, checkpoint) # done for traing else: # for inference # default, no truncating, no shuffling params = {'batch_size': args.batch_size, 'shuffle': False, 'num_workers': args.num_workers} if type(prj_conf.test_list) is list: t_lst = prj_conf.test_list else: t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list) test_set = nii_dset.NIIDataSetLoader( prj_conf.test_set_name, \ t_lst, \ prj_conf.test_input_dirs, prj_conf.input_exts, prj_conf.input_dims, prj_conf.input_reso, prj_conf.input_norm, prj_conf.test_output_dirs, prj_conf.output_exts, prj_conf.output_dims, prj_conf.output_reso, prj_conf.output_norm, './', params = params, truncate_seq= None, min_seq_len = None, save_mean_std = False, wav_samp_rate = prj_conf.wav_samp_rate, global_arg = args) # initialize model model = prj_model.Model(test_set.get_in_dim(), \ test_set.get_out_dim(), \ args, prj_conf) if args.trained_model == "": print("No model is loaded by ---trained-model for inference") print("By default, load %s%s" % (args.save_trained_name, args.save_model_ext)) checkpoint = torch.load("%s%s" % (args.save_trained_name, args.save_model_ext)) else: checkpoint = torch.load(args.trained_model) # do inference and output data nii_nn_wrapper.f_inference_wrapper(args, model, device, \ test_set, checkpoint) # done return if __name__ == "__main__": main()
6,533
34.704918
76
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-fixed-p2s/05/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_modules.p2sgrad as nii_p2sgrad import core_scripts.data_io.seq_info as nii_seq_tk import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## def protocol_parse(protocol_filepath): data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ## FOR MODEL class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) self.model_debug = False self.validation = False ##### # target data protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # working sampling rate, torchaudio is used to change sampling rate self.m_target_sr = 16000 # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) self.v_flag = 1 # frame shift (number of points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # LFCC dim (base component) self.lfcc_dim = [20] self.lfcc_with_delta = True # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating self.amp_floor = 0.00001 # manual choose the first 600 frames in the data self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops] # number of sub-models self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output class self.v_out_class = 2 self.m_transform = [] self.m_output_act = [] self.m_frontend = [] self.m_angle = [] for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip( self.v_truncate_lens, self.fft_n, self.lfcc_dim)): fft_n_bins = fft_n // 2 + 1 if self.lfcc_with_delta: lfcc_dim = lfcc_dim * 3 self.m_transform.append( torch_nn.Sequential( torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]) ) ) self.m_output_act.append( torch_nn.Sequential( torch_nn.Dropout(0.7), torch_nn.Linear((trunc_len // 16) * (lfcc_dim // 16) * 32, 160), nii_nn.MaxFeatureMap2D(), torch_nn.Linear(80, self.v_emd_dim) ) ) self.m_frontend.append( nii_front_end.LFCC(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfcc_dim[idx], with_energy=True) ) self.m_angle.append( nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class) ) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_angle = torch_nn.ModuleList(self.m_angle) # output # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.zeros([in_dim]) out_m = torch.ones([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features fs: frame shift fl: frame length fn: fft points trunc_len: number of frames per file (by truncating) datalength: original length of data """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # permute to (batch, fft_bin, frame_length) x_sp_amp = x_sp_amp.permute(0, 2, 1) # make sure the buffer is long enough x_sp_amp_buff = torch.zeros( [x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len], dtype=x_sp_amp.dtype, device=x_sp_amp.device) # for batch of data, handle the padding and trim independently fs = self.frame_hops[idx] for fileidx in range(x_sp_amp.shape[0]): # roughtly this is the number of frames true_frame_num = datalength[fileidx] // fs if true_frame_num > trunc_len: # trim randomly pos = torch.rand([1]) * (true_frame_num-trunc_len) pos = torch.floor(pos[0]).long() tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos] x_sp_amp_buff[fileidx] = tmp else: rep = int(np.ceil(trunc_len / true_frame_num)) tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep) x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len] # permute to (batch, frame_length, fft_bin) x_sp_amp = x_sp_amp_buff.permute(0, 2, 1) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] # buffer to store output embedding from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_output_act)): # extract feature (stft spectrogram) x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. flatten and transform through output function tmp_score = m_output(torch.flatten(hidden_features, 1)) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models batch_size = x.shape[0] # compute score through p2sgrad layer out_score = torch.zeros([batch_size * self.v_submodels, self.v_out_class], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size]) out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score if inference: # output_score [:, 1] corresponds to the positive class return out_score[:, 1] else: return out_score def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): #with torch.no_grad(): # vad_waveform = self.m_vad(x.squeeze(-1)) # vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1])) # if vad_waveform.shape[-1] > 0: # x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1) # else: # pass filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=scores.dtype) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_p2sgrad.P2SGradLoss() def compute(self, outputs, target): """ """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
14,347
34.691542
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-fixed-p2s/04/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_modules.p2sgrad as nii_p2sgrad import core_scripts.data_io.seq_info as nii_seq_tk import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## def protocol_parse(protocol_filepath): data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ## FOR MODEL class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) self.model_debug = False self.validation = False ##### # target data protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # working sampling rate, torchaudio is used to change sampling rate self.m_target_sr = 16000 # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) self.v_flag = 1 # frame shift (number of points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # LFCC dim (base component) self.lfcc_dim = [20] self.lfcc_with_delta = True # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating self.amp_floor = 0.00001 # manual choose the first 600 frames in the data self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops] # number of sub-models self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output class self.v_out_class = 2 self.m_transform = [] self.m_output_act = [] self.m_frontend = [] self.m_angle = [] for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip( self.v_truncate_lens, self.fft_n, self.lfcc_dim)): fft_n_bins = fft_n // 2 + 1 if self.lfcc_with_delta: lfcc_dim = lfcc_dim * 3 self.m_transform.append( torch_nn.Sequential( torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]) ) ) self.m_output_act.append( torch_nn.Sequential( torch_nn.Dropout(0.7), torch_nn.Linear((trunc_len // 16) * (lfcc_dim // 16) * 32, 160), nii_nn.MaxFeatureMap2D(), torch_nn.Linear(80, self.v_emd_dim) ) ) self.m_frontend.append( nii_front_end.LFCC(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfcc_dim[idx], with_energy=True) ) self.m_angle.append( nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class) ) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_angle = torch_nn.ModuleList(self.m_angle) # output # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.zeros([in_dim]) out_m = torch.ones([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features fs: frame shift fl: frame length fn: fft points trunc_len: number of frames per file (by truncating) datalength: original length of data """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # permute to (batch, fft_bin, frame_length) x_sp_amp = x_sp_amp.permute(0, 2, 1) # make sure the buffer is long enough x_sp_amp_buff = torch.zeros( [x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len], dtype=x_sp_amp.dtype, device=x_sp_amp.device) # for batch of data, handle the padding and trim independently fs = self.frame_hops[idx] for fileidx in range(x_sp_amp.shape[0]): # roughtly this is the number of frames true_frame_num = datalength[fileidx] // fs if true_frame_num > trunc_len: # trim randomly pos = torch.rand([1]) * (true_frame_num-trunc_len) pos = torch.floor(pos[0]).long() tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos] x_sp_amp_buff[fileidx] = tmp else: rep = int(np.ceil(trunc_len / true_frame_num)) tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep) x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len] # permute to (batch, frame_length, fft_bin) x_sp_amp = x_sp_amp_buff.permute(0, 2, 1) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] # buffer to store output embedding from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_output_act)): # extract feature (stft spectrogram) x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. flatten and transform through output function tmp_score = m_output(torch.flatten(hidden_features, 1)) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models batch_size = x.shape[0] # compute score through p2sgrad layer out_score = torch.zeros([batch_size * self.v_submodels, self.v_out_class], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size]) out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score if inference: # output_score [:, 1] corresponds to the positive class return out_score[:, 1] else: return out_score def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): #with torch.no_grad(): # vad_waveform = self.m_vad(x.squeeze(-1)) # vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1])) # if vad_waveform.shape[-1] > 0: # x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1) # else: # pass filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=scores.dtype) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_p2sgrad.P2SGradLoss() def compute(self, outputs, target): """ """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
14,347
34.691542
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-fixed-p2s/01/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_modules.p2sgrad as nii_p2sgrad import core_scripts.data_io.seq_info as nii_seq_tk import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## def protocol_parse(protocol_filepath): data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ## FOR MODEL class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) self.model_debug = False self.validation = False ##### # target data protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # working sampling rate, torchaudio is used to change sampling rate self.m_target_sr = 16000 # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) self.v_flag = 1 # frame shift (number of points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # LFCC dim (base component) self.lfcc_dim = [20] self.lfcc_with_delta = True # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating self.amp_floor = 0.00001 # manual choose the first 600 frames in the data self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops] # number of sub-models self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output class self.v_out_class = 2 self.m_transform = [] self.m_output_act = [] self.m_frontend = [] self.m_angle = [] for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip( self.v_truncate_lens, self.fft_n, self.lfcc_dim)): fft_n_bins = fft_n // 2 + 1 if self.lfcc_with_delta: lfcc_dim = lfcc_dim * 3 self.m_transform.append( torch_nn.Sequential( torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]) ) ) self.m_output_act.append( torch_nn.Sequential( torch_nn.Dropout(0.7), torch_nn.Linear((trunc_len // 16) * (lfcc_dim // 16) * 32, 160), nii_nn.MaxFeatureMap2D(), torch_nn.Linear(80, self.v_emd_dim) ) ) self.m_frontend.append( nii_front_end.LFCC(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfcc_dim[idx], with_energy=True) ) self.m_angle.append( nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class) ) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_angle = torch_nn.ModuleList(self.m_angle) # output # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.zeros([in_dim]) out_m = torch.ones([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features fs: frame shift fl: frame length fn: fft points trunc_len: number of frames per file (by truncating) datalength: original length of data """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # permute to (batch, fft_bin, frame_length) x_sp_amp = x_sp_amp.permute(0, 2, 1) # make sure the buffer is long enough x_sp_amp_buff = torch.zeros( [x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len], dtype=x_sp_amp.dtype, device=x_sp_amp.device) # for batch of data, handle the padding and trim independently fs = self.frame_hops[idx] for fileidx in range(x_sp_amp.shape[0]): # roughtly this is the number of frames true_frame_num = datalength[fileidx] // fs if true_frame_num > trunc_len: # trim randomly pos = torch.rand([1]) * (true_frame_num-trunc_len) pos = torch.floor(pos[0]).long() tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos] x_sp_amp_buff[fileidx] = tmp else: rep = int(np.ceil(trunc_len / true_frame_num)) tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep) x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len] # permute to (batch, frame_length, fft_bin) x_sp_amp = x_sp_amp_buff.permute(0, 2, 1) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] # buffer to store output embedding from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_output_act)): # extract feature (stft spectrogram) x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. flatten and transform through output function tmp_score = m_output(torch.flatten(hidden_features, 1)) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models batch_size = x.shape[0] # compute score through p2sgrad layer out_score = torch.zeros([batch_size * self.v_submodels, self.v_out_class], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size]) out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score if inference: # output_score [:, 1] corresponds to the positive class return out_score[:, 1] else: return out_score def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): #with torch.no_grad(): # vad_waveform = self.m_vad(x.squeeze(-1)) # vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1])) # if vad_waveform.shape[-1] > 0: # x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1) # else: # pass filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=scores.dtype) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_p2sgrad.P2SGradLoss() def compute(self, outputs, target): """ """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
14,347
34.691542
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-fixed-p2s/06/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_modules.p2sgrad as nii_p2sgrad import core_scripts.data_io.seq_info as nii_seq_tk import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## def protocol_parse(protocol_filepath): data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ## FOR MODEL class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) self.model_debug = False self.validation = False ##### # target data protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # working sampling rate, torchaudio is used to change sampling rate self.m_target_sr = 16000 # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) self.v_flag = 1 # frame shift (number of points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # LFCC dim (base component) self.lfcc_dim = [20] self.lfcc_with_delta = True # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating self.amp_floor = 0.00001 # manual choose the first 600 frames in the data self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops] # number of sub-models self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output class self.v_out_class = 2 self.m_transform = [] self.m_output_act = [] self.m_frontend = [] self.m_angle = [] for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip( self.v_truncate_lens, self.fft_n, self.lfcc_dim)): fft_n_bins = fft_n // 2 + 1 if self.lfcc_with_delta: lfcc_dim = lfcc_dim * 3 self.m_transform.append( torch_nn.Sequential( torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]) ) ) self.m_output_act.append( torch_nn.Sequential( torch_nn.Dropout(0.7), torch_nn.Linear((trunc_len // 16) * (lfcc_dim // 16) * 32, 160), nii_nn.MaxFeatureMap2D(), torch_nn.Linear(80, self.v_emd_dim) ) ) self.m_frontend.append( nii_front_end.LFCC(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfcc_dim[idx], with_energy=True) ) self.m_angle.append( nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class) ) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_angle = torch_nn.ModuleList(self.m_angle) # output # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.zeros([in_dim]) out_m = torch.ones([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features fs: frame shift fl: frame length fn: fft points trunc_len: number of frames per file (by truncating) datalength: original length of data """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # permute to (batch, fft_bin, frame_length) x_sp_amp = x_sp_amp.permute(0, 2, 1) # make sure the buffer is long enough x_sp_amp_buff = torch.zeros( [x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len], dtype=x_sp_amp.dtype, device=x_sp_amp.device) # for batch of data, handle the padding and trim independently fs = self.frame_hops[idx] for fileidx in range(x_sp_amp.shape[0]): # roughtly this is the number of frames true_frame_num = datalength[fileidx] // fs if true_frame_num > trunc_len: # trim randomly pos = torch.rand([1]) * (true_frame_num-trunc_len) pos = torch.floor(pos[0]).long() tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos] x_sp_amp_buff[fileidx] = tmp else: rep = int(np.ceil(trunc_len / true_frame_num)) tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep) x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len] # permute to (batch, frame_length, fft_bin) x_sp_amp = x_sp_amp_buff.permute(0, 2, 1) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] # buffer to store output embedding from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_output_act)): # extract feature (stft spectrogram) x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. flatten and transform through output function tmp_score = m_output(torch.flatten(hidden_features, 1)) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models batch_size = x.shape[0] # compute score through p2sgrad layer out_score = torch.zeros([batch_size * self.v_submodels, self.v_out_class], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size]) out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score if inference: # output_score [:, 1] corresponds to the positive class return out_score[:, 1] else: return out_score def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): #with torch.no_grad(): # vad_waveform = self.m_vad(x.squeeze(-1)) # vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1])) # if vad_waveform.shape[-1] > 0: # x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1) # else: # pass filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=scores.dtype) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_p2sgrad.P2SGradLoss() def compute(self, outputs, target): """ """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
14,347
34.691542
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-fixed-p2s/03/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_modules.p2sgrad as nii_p2sgrad import core_scripts.data_io.seq_info as nii_seq_tk import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## def protocol_parse(protocol_filepath): data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ## FOR MODEL class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) self.model_debug = False self.validation = False ##### # target data protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # working sampling rate, torchaudio is used to change sampling rate self.m_target_sr = 16000 # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) self.v_flag = 1 # frame shift (number of points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # LFCC dim (base component) self.lfcc_dim = [20] self.lfcc_with_delta = True # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating self.amp_floor = 0.00001 # manual choose the first 600 frames in the data self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops] # number of sub-models self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output class self.v_out_class = 2 self.m_transform = [] self.m_output_act = [] self.m_frontend = [] self.m_angle = [] for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip( self.v_truncate_lens, self.fft_n, self.lfcc_dim)): fft_n_bins = fft_n // 2 + 1 if self.lfcc_with_delta: lfcc_dim = lfcc_dim * 3 self.m_transform.append( torch_nn.Sequential( torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]) ) ) self.m_output_act.append( torch_nn.Sequential( torch_nn.Dropout(0.7), torch_nn.Linear((trunc_len // 16) * (lfcc_dim // 16) * 32, 160), nii_nn.MaxFeatureMap2D(), torch_nn.Linear(80, self.v_emd_dim) ) ) self.m_frontend.append( nii_front_end.LFCC(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfcc_dim[idx], with_energy=True) ) self.m_angle.append( nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class) ) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_angle = torch_nn.ModuleList(self.m_angle) # output # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.zeros([in_dim]) out_m = torch.ones([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features fs: frame shift fl: frame length fn: fft points trunc_len: number of frames per file (by truncating) datalength: original length of data """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # permute to (batch, fft_bin, frame_length) x_sp_amp = x_sp_amp.permute(0, 2, 1) # make sure the buffer is long enough x_sp_amp_buff = torch.zeros( [x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len], dtype=x_sp_amp.dtype, device=x_sp_amp.device) # for batch of data, handle the padding and trim independently fs = self.frame_hops[idx] for fileidx in range(x_sp_amp.shape[0]): # roughtly this is the number of frames true_frame_num = datalength[fileidx] // fs if true_frame_num > trunc_len: # trim randomly pos = torch.rand([1]) * (true_frame_num-trunc_len) pos = torch.floor(pos[0]).long() tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos] x_sp_amp_buff[fileidx] = tmp else: rep = int(np.ceil(trunc_len / true_frame_num)) tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep) x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len] # permute to (batch, frame_length, fft_bin) x_sp_amp = x_sp_amp_buff.permute(0, 2, 1) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] # buffer to store output embedding from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_output_act)): # extract feature (stft spectrogram) x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. flatten and transform through output function tmp_score = m_output(torch.flatten(hidden_features, 1)) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models batch_size = x.shape[0] # compute score through p2sgrad layer out_score = torch.zeros([batch_size * self.v_submodels, self.v_out_class], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size]) out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score if inference: # output_score [:, 1] corresponds to the positive class return out_score[:, 1] else: return out_score def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): #with torch.no_grad(): # vad_waveform = self.m_vad(x.squeeze(-1)) # vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1])) # if vad_waveform.shape[-1] > 0: # x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1) # else: # pass filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=scores.dtype) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_p2sgrad.P2SGradLoss() def compute(self, outputs, target): """ """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
14,347
34.691542
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-fixed-p2s/02/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_modules.p2sgrad as nii_p2sgrad import core_scripts.data_io.seq_info as nii_seq_tk import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## def protocol_parse(protocol_filepath): data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ## FOR MODEL class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) self.model_debug = False self.validation = False ##### # target data protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # working sampling rate, torchaudio is used to change sampling rate self.m_target_sr = 16000 # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) self.v_flag = 1 # frame shift (number of points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # LFCC dim (base component) self.lfcc_dim = [20] self.lfcc_with_delta = True # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating self.amp_floor = 0.00001 # manual choose the first 600 frames in the data self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops] # number of sub-models self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output class self.v_out_class = 2 self.m_transform = [] self.m_output_act = [] self.m_frontend = [] self.m_angle = [] for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip( self.v_truncate_lens, self.fft_n, self.lfcc_dim)): fft_n_bins = fft_n // 2 + 1 if self.lfcc_with_delta: lfcc_dim = lfcc_dim * 3 self.m_transform.append( torch_nn.Sequential( torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]) ) ) self.m_output_act.append( torch_nn.Sequential( torch_nn.Dropout(0.7), torch_nn.Linear((trunc_len // 16) * (lfcc_dim // 16) * 32, 160), nii_nn.MaxFeatureMap2D(), torch_nn.Linear(80, self.v_emd_dim) ) ) self.m_frontend.append( nii_front_end.LFCC(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfcc_dim[idx], with_energy=True) ) self.m_angle.append( nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class) ) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_angle = torch_nn.ModuleList(self.m_angle) # output # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.zeros([in_dim]) out_m = torch.ones([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features fs: frame shift fl: frame length fn: fft points trunc_len: number of frames per file (by truncating) datalength: original length of data """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # permute to (batch, fft_bin, frame_length) x_sp_amp = x_sp_amp.permute(0, 2, 1) # make sure the buffer is long enough x_sp_amp_buff = torch.zeros( [x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len], dtype=x_sp_amp.dtype, device=x_sp_amp.device) # for batch of data, handle the padding and trim independently fs = self.frame_hops[idx] for fileidx in range(x_sp_amp.shape[0]): # roughtly this is the number of frames true_frame_num = datalength[fileidx] // fs if true_frame_num > trunc_len: # trim randomly pos = torch.rand([1]) * (true_frame_num-trunc_len) pos = torch.floor(pos[0]).long() tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos] x_sp_amp_buff[fileidx] = tmp else: rep = int(np.ceil(trunc_len / true_frame_num)) tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep) x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len] # permute to (batch, frame_length, fft_bin) x_sp_amp = x_sp_amp_buff.permute(0, 2, 1) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] # buffer to store output embedding from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_output_act)): # extract feature (stft spectrogram) x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. flatten and transform through output function tmp_score = m_output(torch.flatten(hidden_features, 1)) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models batch_size = x.shape[0] # compute score through p2sgrad layer out_score = torch.zeros([batch_size * self.v_submodels, self.v_out_class], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size]) out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score if inference: # output_score [:, 1] corresponds to the positive class return out_score[:, 1] else: return out_score def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): #with torch.no_grad(): # vad_waveform = self.m_vad(x.squeeze(-1)) # vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1])) # if vad_waveform.shape[-1] > 0: # x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1) # else: # pass filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=scores.dtype) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_p2sgrad.P2SGradLoss() def compute(self, outputs, target): """ """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
14,347
34.691542
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-attention-am/05/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import core_modules.am_softmax as nii_amsoftmax import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) # self.model_debug = False # self.flag_validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # Working sampling rate # torchaudio may be used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of waveform points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # LFCC dim (base component) self.lfcc_dim = [20] self.lfcc_with_delta = True # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating (not used) self.amp_floor = 0.00001 # number of frames to be kept for each trial # no truncation self.v_truncate_lens = [None for x in self.frame_hops] # number of sub-models (by default, a single model) self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output classes self.v_out_class = 2 #### # create network #### # 1st part of the classifier self.m_transform = [] # pooling layer self.m_pooling = [] # 2nd part of the classifier self.m_output_act = [] # front-end self.m_frontend = [] # final part for output layer self.m_angle = [] # it can handle models with multiple front-end configuration # by default, only a single front-end for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip( self.v_truncate_lens, self.fft_n, self.lfcc_dim)): fft_n_bins = fft_n // 2 + 1 if self.lfcc_with_delta: lfcc_dim = lfcc_dim * 3 self.m_transform.append( torch_nn.Sequential( torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Dropout(0.7) ) ) self.m_pooling.append( nii_nn.SelfWeightedPooling((lfcc_dim // 16) * 32) ) self.m_output_act.append( torch_nn.Linear((lfcc_dim // 16) * 32 * 2, self.v_emd_dim) ) self.m_angle.append( nii_amsoftmax.AMAngleLayer(self.v_emd_dim, self.v_out_class) ) self.m_frontend.append( nii_front_end.LFCC(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfcc_dim[idx], with_energy=True) ) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_pooling = torch_nn.ModuleList(self.m_pooling) self.m_angle = torch_nn.ModuleList(self.m_angle) # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch trunc_len: number of frames to be kept after truncation datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # batch_size batch_size = x.shape[0] // self.v_submodels # buffer to store output scores from sub-models output_emb = torch.zeros([x.shape[0], self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \ enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_pooling, self.m_output_act)): # extract front-end feature x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. (batch, channel, frame//N, feat_dim//N) -> # (batch, frame//N, channel * feat_dim//N) # where N is caused by conv with stride hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous() frame_num = hidden_features.shape[1] hidden_features = hidden_features.view(batch_size, frame_num, -1) # 4. pooling hidden_features = m_pool(hidden_features) # 5. pass through the output layer tmp_emb = m_output(hidden_features) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models * batch_size batch_size = x.shape[0] // self.v_submodels # buffer to save the scores # for non-target classes out_score_neg = torch.zeros( [x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype) # for target classes out_score_pos = torch.zeros_like(out_score_neg) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): s_idx = idx * batch_size e_idx = idx * batch_size + batch_size tmp_score = m_score(x[s_idx:e_idx], inference) out_score_neg[s_idx:e_idx] = tmp_score[0] out_score_pos[s_idx:e_idx] = tmp_score[1] if inference: return torch_nn_func.softmax(out_score_neg, dim=1)[:, 1] else: return out_score_neg, out_score_pos def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=torch.long) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_amsoftmax.AMSoftmaxWithLoss() def compute(self, input_data, target): """loss = compute(input_data, target_data) Note: 1. input_data will be the output from Model.forward() input_data will be a tuple of [scores, target_vec] 2. we will not use target given by the system script we will use the target_vec in input_data[1] """ loss = self.m_loss(input_data[0], input_data[1]) return loss if __name__ == "__main__": print("Definition of model")
15,207
33.642369
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-attention-am/04/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import core_modules.am_softmax as nii_amsoftmax import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) # self.model_debug = False # self.flag_validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # Working sampling rate # torchaudio may be used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of waveform points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # LFCC dim (base component) self.lfcc_dim = [20] self.lfcc_with_delta = True # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating (not used) self.amp_floor = 0.00001 # number of frames to be kept for each trial # no truncation self.v_truncate_lens = [None for x in self.frame_hops] # number of sub-models (by default, a single model) self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output classes self.v_out_class = 2 #### # create network #### # 1st part of the classifier self.m_transform = [] # pooling layer self.m_pooling = [] # 2nd part of the classifier self.m_output_act = [] # front-end self.m_frontend = [] # final part for output layer self.m_angle = [] # it can handle models with multiple front-end configuration # by default, only a single front-end for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip( self.v_truncate_lens, self.fft_n, self.lfcc_dim)): fft_n_bins = fft_n // 2 + 1 if self.lfcc_with_delta: lfcc_dim = lfcc_dim * 3 self.m_transform.append( torch_nn.Sequential( torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Dropout(0.7) ) ) self.m_pooling.append( nii_nn.SelfWeightedPooling((lfcc_dim // 16) * 32) ) self.m_output_act.append( torch_nn.Linear((lfcc_dim // 16) * 32 * 2, self.v_emd_dim) ) self.m_angle.append( nii_amsoftmax.AMAngleLayer(self.v_emd_dim, self.v_out_class) ) self.m_frontend.append( nii_front_end.LFCC(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfcc_dim[idx], with_energy=True) ) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_pooling = torch_nn.ModuleList(self.m_pooling) self.m_angle = torch_nn.ModuleList(self.m_angle) # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch trunc_len: number of frames to be kept after truncation datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # batch_size batch_size = x.shape[0] // self.v_submodels # buffer to store output scores from sub-models output_emb = torch.zeros([x.shape[0], self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \ enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_pooling, self.m_output_act)): # extract front-end feature x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. (batch, channel, frame//N, feat_dim//N) -> # (batch, frame//N, channel * feat_dim//N) # where N is caused by conv with stride hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous() frame_num = hidden_features.shape[1] hidden_features = hidden_features.view(batch_size, frame_num, -1) # 4. pooling hidden_features = m_pool(hidden_features) # 5. pass through the output layer tmp_emb = m_output(hidden_features) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models * batch_size batch_size = x.shape[0] // self.v_submodels # buffer to save the scores # for non-target classes out_score_neg = torch.zeros( [x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype) # for target classes out_score_pos = torch.zeros_like(out_score_neg) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): s_idx = idx * batch_size e_idx = idx * batch_size + batch_size tmp_score = m_score(x[s_idx:e_idx], inference) out_score_neg[s_idx:e_idx] = tmp_score[0] out_score_pos[s_idx:e_idx] = tmp_score[1] if inference: return torch_nn_func.softmax(out_score_neg, dim=1)[:, 1] else: return out_score_neg, out_score_pos def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=torch.long) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_amsoftmax.AMSoftmaxWithLoss() def compute(self, input_data, target): """loss = compute(input_data, target_data) Note: 1. input_data will be the output from Model.forward() input_data will be a tuple of [scores, target_vec] 2. we will not use target given by the system script we will use the target_vec in input_data[1] """ loss = self.m_loss(input_data[0], input_data[1]) return loss if __name__ == "__main__": print("Definition of model")
15,207
33.642369
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-attention-am/01/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import core_modules.am_softmax as nii_amsoftmax import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) # self.model_debug = False # self.flag_validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # Working sampling rate # torchaudio may be used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of waveform points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # LFCC dim (base component) self.lfcc_dim = [20] self.lfcc_with_delta = True # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating (not used) self.amp_floor = 0.00001 # number of frames to be kept for each trial # no truncation self.v_truncate_lens = [None for x in self.frame_hops] # number of sub-models (by default, a single model) self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output classes self.v_out_class = 2 #### # create network #### # 1st part of the classifier self.m_transform = [] # pooling layer self.m_pooling = [] # 2nd part of the classifier self.m_output_act = [] # front-end self.m_frontend = [] # final part for output layer self.m_angle = [] # it can handle models with multiple front-end configuration # by default, only a single front-end for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip( self.v_truncate_lens, self.fft_n, self.lfcc_dim)): fft_n_bins = fft_n // 2 + 1 if self.lfcc_with_delta: lfcc_dim = lfcc_dim * 3 self.m_transform.append( torch_nn.Sequential( torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Dropout(0.7) ) ) self.m_pooling.append( nii_nn.SelfWeightedPooling((lfcc_dim // 16) * 32) ) self.m_output_act.append( torch_nn.Linear((lfcc_dim // 16) * 32 * 2, self.v_emd_dim) ) self.m_angle.append( nii_amsoftmax.AMAngleLayer(self.v_emd_dim, self.v_out_class) ) self.m_frontend.append( nii_front_end.LFCC(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfcc_dim[idx], with_energy=True) ) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_pooling = torch_nn.ModuleList(self.m_pooling) self.m_angle = torch_nn.ModuleList(self.m_angle) # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch trunc_len: number of frames to be kept after truncation datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # batch_size batch_size = x.shape[0] // self.v_submodels # buffer to store output scores from sub-models output_emb = torch.zeros([x.shape[0], self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \ enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_pooling, self.m_output_act)): # extract front-end feature x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. (batch, channel, frame//N, feat_dim//N) -> # (batch, frame//N, channel * feat_dim//N) # where N is caused by conv with stride hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous() frame_num = hidden_features.shape[1] hidden_features = hidden_features.view(batch_size, frame_num, -1) # 4. pooling hidden_features = m_pool(hidden_features) # 5. pass through the output layer tmp_emb = m_output(hidden_features) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models * batch_size batch_size = x.shape[0] // self.v_submodels # buffer to save the scores # for non-target classes out_score_neg = torch.zeros( [x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype) # for target classes out_score_pos = torch.zeros_like(out_score_neg) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): s_idx = idx * batch_size e_idx = idx * batch_size + batch_size tmp_score = m_score(x[s_idx:e_idx], inference) out_score_neg[s_idx:e_idx] = tmp_score[0] out_score_pos[s_idx:e_idx] = tmp_score[1] if inference: return torch_nn_func.softmax(out_score_neg, dim=1)[:, 1] else: return out_score_neg, out_score_pos def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=torch.long) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_amsoftmax.AMSoftmaxWithLoss() def compute(self, input_data, target): """loss = compute(input_data, target_data) Note: 1. input_data will be the output from Model.forward() input_data will be a tuple of [scores, target_vec] 2. we will not use target given by the system script we will use the target_vec in input_data[1] """ loss = self.m_loss(input_data[0], input_data[1]) return loss if __name__ == "__main__": print("Definition of model")
15,207
33.642369
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-attention-am/06/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import core_modules.am_softmax as nii_amsoftmax import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) # self.model_debug = False # self.flag_validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # Working sampling rate # torchaudio may be used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of waveform points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # LFCC dim (base component) self.lfcc_dim = [20] self.lfcc_with_delta = True # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating (not used) self.amp_floor = 0.00001 # number of frames to be kept for each trial # no truncation self.v_truncate_lens = [None for x in self.frame_hops] # number of sub-models (by default, a single model) self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output classes self.v_out_class = 2 #### # create network #### # 1st part of the classifier self.m_transform = [] # pooling layer self.m_pooling = [] # 2nd part of the classifier self.m_output_act = [] # front-end self.m_frontend = [] # final part for output layer self.m_angle = [] # it can handle models with multiple front-end configuration # by default, only a single front-end for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip( self.v_truncate_lens, self.fft_n, self.lfcc_dim)): fft_n_bins = fft_n // 2 + 1 if self.lfcc_with_delta: lfcc_dim = lfcc_dim * 3 self.m_transform.append( torch_nn.Sequential( torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Dropout(0.7) ) ) self.m_pooling.append( nii_nn.SelfWeightedPooling((lfcc_dim // 16) * 32) ) self.m_output_act.append( torch_nn.Linear((lfcc_dim // 16) * 32 * 2, self.v_emd_dim) ) self.m_angle.append( nii_amsoftmax.AMAngleLayer(self.v_emd_dim, self.v_out_class) ) self.m_frontend.append( nii_front_end.LFCC(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfcc_dim[idx], with_energy=True) ) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_pooling = torch_nn.ModuleList(self.m_pooling) self.m_angle = torch_nn.ModuleList(self.m_angle) # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch trunc_len: number of frames to be kept after truncation datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # batch_size batch_size = x.shape[0] // self.v_submodels # buffer to store output scores from sub-models output_emb = torch.zeros([x.shape[0], self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \ enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_pooling, self.m_output_act)): # extract front-end feature x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. (batch, channel, frame//N, feat_dim//N) -> # (batch, frame//N, channel * feat_dim//N) # where N is caused by conv with stride hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous() frame_num = hidden_features.shape[1] hidden_features = hidden_features.view(batch_size, frame_num, -1) # 4. pooling hidden_features = m_pool(hidden_features) # 5. pass through the output layer tmp_emb = m_output(hidden_features) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models * batch_size batch_size = x.shape[0] // self.v_submodels # buffer to save the scores # for non-target classes out_score_neg = torch.zeros( [x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype) # for target classes out_score_pos = torch.zeros_like(out_score_neg) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): s_idx = idx * batch_size e_idx = idx * batch_size + batch_size tmp_score = m_score(x[s_idx:e_idx], inference) out_score_neg[s_idx:e_idx] = tmp_score[0] out_score_pos[s_idx:e_idx] = tmp_score[1] if inference: return torch_nn_func.softmax(out_score_neg, dim=1)[:, 1] else: return out_score_neg, out_score_pos def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=torch.long) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_amsoftmax.AMSoftmaxWithLoss() def compute(self, input_data, target): """loss = compute(input_data, target_data) Note: 1. input_data will be the output from Model.forward() input_data will be a tuple of [scores, target_vec] 2. we will not use target given by the system script we will use the target_vec in input_data[1] """ loss = self.m_loss(input_data[0], input_data[1]) return loss if __name__ == "__main__": print("Definition of model")
15,207
33.642369
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-attention-am/03/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import core_modules.am_softmax as nii_amsoftmax import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) # self.model_debug = False # self.flag_validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # Working sampling rate # torchaudio may be used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of waveform points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # LFCC dim (base component) self.lfcc_dim = [20] self.lfcc_with_delta = True # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating (not used) self.amp_floor = 0.00001 # number of frames to be kept for each trial # no truncation self.v_truncate_lens = [None for x in self.frame_hops] # number of sub-models (by default, a single model) self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output classes self.v_out_class = 2 #### # create network #### # 1st part of the classifier self.m_transform = [] # pooling layer self.m_pooling = [] # 2nd part of the classifier self.m_output_act = [] # front-end self.m_frontend = [] # final part for output layer self.m_angle = [] # it can handle models with multiple front-end configuration # by default, only a single front-end for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip( self.v_truncate_lens, self.fft_n, self.lfcc_dim)): fft_n_bins = fft_n // 2 + 1 if self.lfcc_with_delta: lfcc_dim = lfcc_dim * 3 self.m_transform.append( torch_nn.Sequential( torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Dropout(0.7) ) ) self.m_pooling.append( nii_nn.SelfWeightedPooling((lfcc_dim // 16) * 32) ) self.m_output_act.append( torch_nn.Linear((lfcc_dim // 16) * 32 * 2, self.v_emd_dim) ) self.m_angle.append( nii_amsoftmax.AMAngleLayer(self.v_emd_dim, self.v_out_class) ) self.m_frontend.append( nii_front_end.LFCC(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfcc_dim[idx], with_energy=True) ) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_pooling = torch_nn.ModuleList(self.m_pooling) self.m_angle = torch_nn.ModuleList(self.m_angle) # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch trunc_len: number of frames to be kept after truncation datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # batch_size batch_size = x.shape[0] // self.v_submodels # buffer to store output scores from sub-models output_emb = torch.zeros([x.shape[0], self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \ enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_pooling, self.m_output_act)): # extract front-end feature x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. (batch, channel, frame//N, feat_dim//N) -> # (batch, frame//N, channel * feat_dim//N) # where N is caused by conv with stride hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous() frame_num = hidden_features.shape[1] hidden_features = hidden_features.view(batch_size, frame_num, -1) # 4. pooling hidden_features = m_pool(hidden_features) # 5. pass through the output layer tmp_emb = m_output(hidden_features) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models * batch_size batch_size = x.shape[0] // self.v_submodels # buffer to save the scores # for non-target classes out_score_neg = torch.zeros( [x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype) # for target classes out_score_pos = torch.zeros_like(out_score_neg) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): s_idx = idx * batch_size e_idx = idx * batch_size + batch_size tmp_score = m_score(x[s_idx:e_idx], inference) out_score_neg[s_idx:e_idx] = tmp_score[0] out_score_pos[s_idx:e_idx] = tmp_score[1] if inference: return torch_nn_func.softmax(out_score_neg, dim=1)[:, 1] else: return out_score_neg, out_score_pos def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=torch.long) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_amsoftmax.AMSoftmaxWithLoss() def compute(self, input_data, target): """loss = compute(input_data, target_data) Note: 1. input_data will be the output from Model.forward() input_data will be a tuple of [scores, target_vec] 2. we will not use target given by the system script we will use the target_vec in input_data[1] """ loss = self.m_loss(input_data[0], input_data[1]) return loss if __name__ == "__main__": print("Definition of model")
15,207
33.642369
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-attention-am/02/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import core_modules.am_softmax as nii_amsoftmax import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) # self.model_debug = False # self.flag_validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # Working sampling rate # torchaudio may be used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of waveform points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # LFCC dim (base component) self.lfcc_dim = [20] self.lfcc_with_delta = True # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating (not used) self.amp_floor = 0.00001 # number of frames to be kept for each trial # no truncation self.v_truncate_lens = [None for x in self.frame_hops] # number of sub-models (by default, a single model) self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output classes self.v_out_class = 2 #### # create network #### # 1st part of the classifier self.m_transform = [] # pooling layer self.m_pooling = [] # 2nd part of the classifier self.m_output_act = [] # front-end self.m_frontend = [] # final part for output layer self.m_angle = [] # it can handle models with multiple front-end configuration # by default, only a single front-end for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip( self.v_truncate_lens, self.fft_n, self.lfcc_dim)): fft_n_bins = fft_n // 2 + 1 if self.lfcc_with_delta: lfcc_dim = lfcc_dim * 3 self.m_transform.append( torch_nn.Sequential( torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Dropout(0.7) ) ) self.m_pooling.append( nii_nn.SelfWeightedPooling((lfcc_dim // 16) * 32) ) self.m_output_act.append( torch_nn.Linear((lfcc_dim // 16) * 32 * 2, self.v_emd_dim) ) self.m_angle.append( nii_amsoftmax.AMAngleLayer(self.v_emd_dim, self.v_out_class) ) self.m_frontend.append( nii_front_end.LFCC(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfcc_dim[idx], with_energy=True) ) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_pooling = torch_nn.ModuleList(self.m_pooling) self.m_angle = torch_nn.ModuleList(self.m_angle) # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch trunc_len: number of frames to be kept after truncation datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # batch_size batch_size = x.shape[0] // self.v_submodels # buffer to store output scores from sub-models output_emb = torch.zeros([x.shape[0], self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \ enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_pooling, self.m_output_act)): # extract front-end feature x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. (batch, channel, frame//N, feat_dim//N) -> # (batch, frame//N, channel * feat_dim//N) # where N is caused by conv with stride hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous() frame_num = hidden_features.shape[1] hidden_features = hidden_features.view(batch_size, frame_num, -1) # 4. pooling hidden_features = m_pool(hidden_features) # 5. pass through the output layer tmp_emb = m_output(hidden_features) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models * batch_size batch_size = x.shape[0] // self.v_submodels # buffer to save the scores # for non-target classes out_score_neg = torch.zeros( [x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype) # for target classes out_score_pos = torch.zeros_like(out_score_neg) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): s_idx = idx * batch_size e_idx = idx * batch_size + batch_size tmp_score = m_score(x[s_idx:e_idx], inference) out_score_neg[s_idx:e_idx] = tmp_score[0] out_score_pos[s_idx:e_idx] = tmp_score[1] if inference: return torch_nn_func.softmax(out_score_neg, dim=1)[:, 1] else: return out_score_neg, out_score_pos def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=torch.long) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_amsoftmax.AMSoftmaxWithLoss() def compute(self, input_data, target): """loss = compute(input_data, target_data) Note: 1. input_data will be the output from Model.forward() input_data will be a tuple of [scores, target_vec] 2. we will not use target given by the system script we will use the target_vec in input_data[1] """ loss = self.m_loss(input_data[0], input_data[1]) return loss if __name__ == "__main__": print("Definition of model")
15,207
33.642369
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-fixed-oc/05/model.py
#!/usr/bin/env python """ model.py 1;95;0cSelf defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_modules.oc_softmax as nii_oc_softmax import core_scripts.data_io.seq_info as nii_seq_tk import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training # target data protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # working sampling rate, torchaudio is used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # LFB dim (base component) self.lfb_dim = [60] self.lfb_with_delta = False # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating self.amp_floor = 0.00001 # manual choose the first 600 frames in the data self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops] # number of sub-models self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 256 # output class self.v_out_class = 1 self.m_transform = [] self.m_output_act = [] self.m_frontend = [] self.m_a_softmax = [] for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip( self.v_truncate_lens, self.fft_n, self.lfb_dim)): fft_n_bins = fft_n // 2 + 1 if self.lfb_with_delta: lfb_dim = lfb_dim * 3 self.m_transform.append( torch_nn.Sequential( torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]) ) ) self.m_output_act.append( torch_nn.Sequential( torch_nn.Dropout(0.7), torch_nn.Linear((trunc_len // 16) * (lfb_dim // 16) * 32, 512), nii_nn.MaxFeatureMap2D(), torch_nn.Linear(256, self.v_emd_dim) ) ) self.m_frontend.append( nii_front_end.LFB(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfb_dim[idx], with_energy=False, with_emphasis=True, with_delta=self.lfb_with_delta) ) self.m_a_softmax.append( nii_oc_softmax.OCAngleLayer(self.v_emd_dim) ) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_a_softmax = torch_nn.ModuleList(self.m_a_softmax) # output # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.zeros([in_dim]) out_m = torch.ones([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features fs: frame shift fl: frame length fn: fft points trunc_len: number of frames per file (by truncating) datalength: original length of data """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # permute to (batch, fft_bin, frame_length) x_sp_amp = x_sp_amp.permute(0, 2, 1) # make sure the buffer is long enough x_sp_amp_buff = torch.zeros( [x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len], dtype=x_sp_amp.dtype, device=x_sp_amp.device) # for batch of data, handle the padding and trim independently fs = self.frame_hops[idx] for fileidx in range(x_sp_amp.shape[0]): # roughtly this is the number of frames true_frame_num = datalength[fileidx] // fs if true_frame_num > trunc_len: # trim randomly pos = torch.rand([1]) * (true_frame_num-trunc_len) pos = torch.floor(pos[0]).long() tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos] x_sp_amp_buff[fileidx] = tmp else: rep = int(np.ceil(trunc_len / true_frame_num)) tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep) x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len] # permute to (batch, frame_length, fft_bin) x_sp_amp = x_sp_amp_buff.permute(0, 2, 1) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] # buffer to store output scores from sub-models output_score = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_output_act)): # extract feature (stft spectrogram) x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. flatten and transform through output function tmp_score = m_output(torch.flatten(hidden_features, 1)) output_score[idx * batch_size : (idx+1) * batch_size] = tmp_score return output_score def _compute_score(self, feature_vec, inference=False): """ """ # compute a-softmax output for each feature configuration batch_size = feature_vec.shape[0] // self.v_submodels x_cos_val = torch.zeros( [feature_vec.shape[0], self.v_out_class], dtype=feature_vec.dtype, device=feature_vec.device) x_phi_val = torch.zeros_like(x_cos_val) for idx in range(self.v_submodels): s_idx = idx * batch_size e_idx = idx * batch_size + batch_size tmp1, tmp2 = self.m_a_softmax[idx](feature_vec[s_idx:e_idx], inference) x_cos_val[s_idx:e_idx] = tmp1 x_phi_val[s_idx:e_idx] = tmp2 if inference: return x_cos_val else: return [x_cos_val, x_phi_val] def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): #with torch.no_grad(): # vad_waveform = self.m_vad(x.squeeze(-1)) # vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1])) # if vad_waveform.shape[-1] > 0: # x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1) # else: # pass filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) a_softmax_act = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device).long() target_vec = target_vec.repeat(self.v_submodels) return [a_softmax_act, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) score = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], score.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_oc_softmax.OCSoftmaxWithLoss() def compute(self, outputs, target): """ """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
15,313
34.367206
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-fixed-oc/04/model.py
#!/usr/bin/env python """ model.py 1;95;0cSelf defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_modules.oc_softmax as nii_oc_softmax import core_scripts.data_io.seq_info as nii_seq_tk import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training # target data protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # working sampling rate, torchaudio is used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # LFB dim (base component) self.lfb_dim = [60] self.lfb_with_delta = False # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating self.amp_floor = 0.00001 # manual choose the first 600 frames in the data self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops] # number of sub-models self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 256 # output class self.v_out_class = 1 self.m_transform = [] self.m_output_act = [] self.m_frontend = [] self.m_a_softmax = [] for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip( self.v_truncate_lens, self.fft_n, self.lfb_dim)): fft_n_bins = fft_n // 2 + 1 if self.lfb_with_delta: lfb_dim = lfb_dim * 3 self.m_transform.append( torch_nn.Sequential( torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]) ) ) self.m_output_act.append( torch_nn.Sequential( torch_nn.Dropout(0.7), torch_nn.Linear((trunc_len // 16) * (lfb_dim // 16) * 32, 512), nii_nn.MaxFeatureMap2D(), torch_nn.Linear(256, self.v_emd_dim) ) ) self.m_frontend.append( nii_front_end.LFB(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfb_dim[idx], with_energy=False, with_emphasis=True, with_delta=self.lfb_with_delta) ) self.m_a_softmax.append( nii_oc_softmax.OCAngleLayer(self.v_emd_dim) ) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_a_softmax = torch_nn.ModuleList(self.m_a_softmax) # output # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.zeros([in_dim]) out_m = torch.ones([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features fs: frame shift fl: frame length fn: fft points trunc_len: number of frames per file (by truncating) datalength: original length of data """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # permute to (batch, fft_bin, frame_length) x_sp_amp = x_sp_amp.permute(0, 2, 1) # make sure the buffer is long enough x_sp_amp_buff = torch.zeros( [x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len], dtype=x_sp_amp.dtype, device=x_sp_amp.device) # for batch of data, handle the padding and trim independently fs = self.frame_hops[idx] for fileidx in range(x_sp_amp.shape[0]): # roughtly this is the number of frames true_frame_num = datalength[fileidx] // fs if true_frame_num > trunc_len: # trim randomly pos = torch.rand([1]) * (true_frame_num-trunc_len) pos = torch.floor(pos[0]).long() tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos] x_sp_amp_buff[fileidx] = tmp else: rep = int(np.ceil(trunc_len / true_frame_num)) tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep) x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len] # permute to (batch, frame_length, fft_bin) x_sp_amp = x_sp_amp_buff.permute(0, 2, 1) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] # buffer to store output scores from sub-models output_score = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_output_act)): # extract feature (stft spectrogram) x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. flatten and transform through output function tmp_score = m_output(torch.flatten(hidden_features, 1)) output_score[idx * batch_size : (idx+1) * batch_size] = tmp_score return output_score def _compute_score(self, feature_vec, inference=False): """ """ # compute a-softmax output for each feature configuration batch_size = feature_vec.shape[0] // self.v_submodels x_cos_val = torch.zeros( [feature_vec.shape[0], self.v_out_class], dtype=feature_vec.dtype, device=feature_vec.device) x_phi_val = torch.zeros_like(x_cos_val) for idx in range(self.v_submodels): s_idx = idx * batch_size e_idx = idx * batch_size + batch_size tmp1, tmp2 = self.m_a_softmax[idx](feature_vec[s_idx:e_idx], inference) x_cos_val[s_idx:e_idx] = tmp1 x_phi_val[s_idx:e_idx] = tmp2 if inference: return x_cos_val else: return [x_cos_val, x_phi_val] def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): #with torch.no_grad(): # vad_waveform = self.m_vad(x.squeeze(-1)) # vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1])) # if vad_waveform.shape[-1] > 0: # x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1) # else: # pass filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) a_softmax_act = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device).long() target_vec = target_vec.repeat(self.v_submodels) return [a_softmax_act, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) score = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], score.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_oc_softmax.OCSoftmaxWithLoss() def compute(self, outputs, target): """ """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
15,313
34.367206
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-fixed-oc/01/model.py
#!/usr/bin/env python """ model.py 1;95;0cSelf defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_modules.oc_softmax as nii_oc_softmax import core_scripts.data_io.seq_info as nii_seq_tk import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training # target data protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # working sampling rate, torchaudio is used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # LFB dim (base component) self.lfb_dim = [60] self.lfb_with_delta = False # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating self.amp_floor = 0.00001 # manual choose the first 600 frames in the data self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops] # number of sub-models self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 256 # output class self.v_out_class = 1 self.m_transform = [] self.m_output_act = [] self.m_frontend = [] self.m_a_softmax = [] for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip( self.v_truncate_lens, self.fft_n, self.lfb_dim)): fft_n_bins = fft_n // 2 + 1 if self.lfb_with_delta: lfb_dim = lfb_dim * 3 self.m_transform.append( torch_nn.Sequential( torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]) ) ) self.m_output_act.append( torch_nn.Sequential( torch_nn.Dropout(0.7), torch_nn.Linear((trunc_len // 16) * (lfb_dim // 16) * 32, 512), nii_nn.MaxFeatureMap2D(), torch_nn.Linear(256, self.v_emd_dim) ) ) self.m_frontend.append( nii_front_end.LFB(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfb_dim[idx], with_energy=False, with_emphasis=True, with_delta=self.lfb_with_delta) ) self.m_a_softmax.append( nii_oc_softmax.OCAngleLayer(self.v_emd_dim) ) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_a_softmax = torch_nn.ModuleList(self.m_a_softmax) # output # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.zeros([in_dim]) out_m = torch.ones([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features fs: frame shift fl: frame length fn: fft points trunc_len: number of frames per file (by truncating) datalength: original length of data """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # permute to (batch, fft_bin, frame_length) x_sp_amp = x_sp_amp.permute(0, 2, 1) # make sure the buffer is long enough x_sp_amp_buff = torch.zeros( [x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len], dtype=x_sp_amp.dtype, device=x_sp_amp.device) # for batch of data, handle the padding and trim independently fs = self.frame_hops[idx] for fileidx in range(x_sp_amp.shape[0]): # roughtly this is the number of frames true_frame_num = datalength[fileidx] // fs if true_frame_num > trunc_len: # trim randomly pos = torch.rand([1]) * (true_frame_num-trunc_len) pos = torch.floor(pos[0]).long() tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos] x_sp_amp_buff[fileidx] = tmp else: rep = int(np.ceil(trunc_len / true_frame_num)) tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep) x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len] # permute to (batch, frame_length, fft_bin) x_sp_amp = x_sp_amp_buff.permute(0, 2, 1) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] # buffer to store output scores from sub-models output_score = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_output_act)): # extract feature (stft spectrogram) x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. flatten and transform through output function tmp_score = m_output(torch.flatten(hidden_features, 1)) output_score[idx * batch_size : (idx+1) * batch_size] = tmp_score return output_score def _compute_score(self, feature_vec, inference=False): """ """ # compute a-softmax output for each feature configuration batch_size = feature_vec.shape[0] // self.v_submodels x_cos_val = torch.zeros( [feature_vec.shape[0], self.v_out_class], dtype=feature_vec.dtype, device=feature_vec.device) x_phi_val = torch.zeros_like(x_cos_val) for idx in range(self.v_submodels): s_idx = idx * batch_size e_idx = idx * batch_size + batch_size tmp1, tmp2 = self.m_a_softmax[idx](feature_vec[s_idx:e_idx], inference) x_cos_val[s_idx:e_idx] = tmp1 x_phi_val[s_idx:e_idx] = tmp2 if inference: return x_cos_val else: return [x_cos_val, x_phi_val] def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): #with torch.no_grad(): # vad_waveform = self.m_vad(x.squeeze(-1)) # vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1])) # if vad_waveform.shape[-1] > 0: # x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1) # else: # pass filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) a_softmax_act = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device).long() target_vec = target_vec.repeat(self.v_submodels) return [a_softmax_act, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) score = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], score.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_oc_softmax.OCSoftmaxWithLoss() def compute(self, outputs, target): """ """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
15,313
34.367206
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-fixed-oc/06/model.py
#!/usr/bin/env python """ model.py 1;95;0cSelf defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_modules.oc_softmax as nii_oc_softmax import core_scripts.data_io.seq_info as nii_seq_tk import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training # target data protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # working sampling rate, torchaudio is used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # LFB dim (base component) self.lfb_dim = [60] self.lfb_with_delta = False # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating self.amp_floor = 0.00001 # manual choose the first 600 frames in the data self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops] # number of sub-models self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 256 # output class self.v_out_class = 1 self.m_transform = [] self.m_output_act = [] self.m_frontend = [] self.m_a_softmax = [] for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip( self.v_truncate_lens, self.fft_n, self.lfb_dim)): fft_n_bins = fft_n // 2 + 1 if self.lfb_with_delta: lfb_dim = lfb_dim * 3 self.m_transform.append( torch_nn.Sequential( torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]) ) ) self.m_output_act.append( torch_nn.Sequential( torch_nn.Dropout(0.7), torch_nn.Linear((trunc_len // 16) * (lfb_dim // 16) * 32, 512), nii_nn.MaxFeatureMap2D(), torch_nn.Linear(256, self.v_emd_dim) ) ) self.m_frontend.append( nii_front_end.LFB(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfb_dim[idx], with_energy=False, with_emphasis=True, with_delta=self.lfb_with_delta) ) self.m_a_softmax.append( nii_oc_softmax.OCAngleLayer(self.v_emd_dim) ) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_a_softmax = torch_nn.ModuleList(self.m_a_softmax) # output # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.zeros([in_dim]) out_m = torch.ones([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features fs: frame shift fl: frame length fn: fft points trunc_len: number of frames per file (by truncating) datalength: original length of data """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # permute to (batch, fft_bin, frame_length) x_sp_amp = x_sp_amp.permute(0, 2, 1) # make sure the buffer is long enough x_sp_amp_buff = torch.zeros( [x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len], dtype=x_sp_amp.dtype, device=x_sp_amp.device) # for batch of data, handle the padding and trim independently fs = self.frame_hops[idx] for fileidx in range(x_sp_amp.shape[0]): # roughtly this is the number of frames true_frame_num = datalength[fileidx] // fs if true_frame_num > trunc_len: # trim randomly pos = torch.rand([1]) * (true_frame_num-trunc_len) pos = torch.floor(pos[0]).long() tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos] x_sp_amp_buff[fileidx] = tmp else: rep = int(np.ceil(trunc_len / true_frame_num)) tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep) x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len] # permute to (batch, frame_length, fft_bin) x_sp_amp = x_sp_amp_buff.permute(0, 2, 1) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] # buffer to store output scores from sub-models output_score = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_output_act)): # extract feature (stft spectrogram) x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. flatten and transform through output function tmp_score = m_output(torch.flatten(hidden_features, 1)) output_score[idx * batch_size : (idx+1) * batch_size] = tmp_score return output_score def _compute_score(self, feature_vec, inference=False): """ """ # compute a-softmax output for each feature configuration batch_size = feature_vec.shape[0] // self.v_submodels x_cos_val = torch.zeros( [feature_vec.shape[0], self.v_out_class], dtype=feature_vec.dtype, device=feature_vec.device) x_phi_val = torch.zeros_like(x_cos_val) for idx in range(self.v_submodels): s_idx = idx * batch_size e_idx = idx * batch_size + batch_size tmp1, tmp2 = self.m_a_softmax[idx](feature_vec[s_idx:e_idx], inference) x_cos_val[s_idx:e_idx] = tmp1 x_phi_val[s_idx:e_idx] = tmp2 if inference: return x_cos_val else: return [x_cos_val, x_phi_val] def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): #with torch.no_grad(): # vad_waveform = self.m_vad(x.squeeze(-1)) # vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1])) # if vad_waveform.shape[-1] > 0: # x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1) # else: # pass filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) a_softmax_act = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device).long() target_vec = target_vec.repeat(self.v_submodels) return [a_softmax_act, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) score = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], score.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_oc_softmax.OCSoftmaxWithLoss() def compute(self, outputs, target): """ """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
15,313
34.367206
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-fixed-oc/03/model.py
#!/usr/bin/env python """ model.py 1;95;0cSelf defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_modules.oc_softmax as nii_oc_softmax import core_scripts.data_io.seq_info as nii_seq_tk import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training # target data protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # working sampling rate, torchaudio is used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # LFB dim (base component) self.lfb_dim = [60] self.lfb_with_delta = False # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating self.amp_floor = 0.00001 # manual choose the first 600 frames in the data self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops] # number of sub-models self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 256 # output class self.v_out_class = 1 self.m_transform = [] self.m_output_act = [] self.m_frontend = [] self.m_a_softmax = [] for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip( self.v_truncate_lens, self.fft_n, self.lfb_dim)): fft_n_bins = fft_n // 2 + 1 if self.lfb_with_delta: lfb_dim = lfb_dim * 3 self.m_transform.append( torch_nn.Sequential( torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]) ) ) self.m_output_act.append( torch_nn.Sequential( torch_nn.Dropout(0.7), torch_nn.Linear((trunc_len // 16) * (lfb_dim // 16) * 32, 512), nii_nn.MaxFeatureMap2D(), torch_nn.Linear(256, self.v_emd_dim) ) ) self.m_frontend.append( nii_front_end.LFB(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfb_dim[idx], with_energy=False, with_emphasis=True, with_delta=self.lfb_with_delta) ) self.m_a_softmax.append( nii_oc_softmax.OCAngleLayer(self.v_emd_dim) ) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_a_softmax = torch_nn.ModuleList(self.m_a_softmax) # output # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.zeros([in_dim]) out_m = torch.ones([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features fs: frame shift fl: frame length fn: fft points trunc_len: number of frames per file (by truncating) datalength: original length of data """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # permute to (batch, fft_bin, frame_length) x_sp_amp = x_sp_amp.permute(0, 2, 1) # make sure the buffer is long enough x_sp_amp_buff = torch.zeros( [x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len], dtype=x_sp_amp.dtype, device=x_sp_amp.device) # for batch of data, handle the padding and trim independently fs = self.frame_hops[idx] for fileidx in range(x_sp_amp.shape[0]): # roughtly this is the number of frames true_frame_num = datalength[fileidx] // fs if true_frame_num > trunc_len: # trim randomly pos = torch.rand([1]) * (true_frame_num-trunc_len) pos = torch.floor(pos[0]).long() tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos] x_sp_amp_buff[fileidx] = tmp else: rep = int(np.ceil(trunc_len / true_frame_num)) tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep) x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len] # permute to (batch, frame_length, fft_bin) x_sp_amp = x_sp_amp_buff.permute(0, 2, 1) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] # buffer to store output scores from sub-models output_score = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_output_act)): # extract feature (stft spectrogram) x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. flatten and transform through output function tmp_score = m_output(torch.flatten(hidden_features, 1)) output_score[idx * batch_size : (idx+1) * batch_size] = tmp_score return output_score def _compute_score(self, feature_vec, inference=False): """ """ # compute a-softmax output for each feature configuration batch_size = feature_vec.shape[0] // self.v_submodels x_cos_val = torch.zeros( [feature_vec.shape[0], self.v_out_class], dtype=feature_vec.dtype, device=feature_vec.device) x_phi_val = torch.zeros_like(x_cos_val) for idx in range(self.v_submodels): s_idx = idx * batch_size e_idx = idx * batch_size + batch_size tmp1, tmp2 = self.m_a_softmax[idx](feature_vec[s_idx:e_idx], inference) x_cos_val[s_idx:e_idx] = tmp1 x_phi_val[s_idx:e_idx] = tmp2 if inference: return x_cos_val else: return [x_cos_val, x_phi_val] def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): #with torch.no_grad(): # vad_waveform = self.m_vad(x.squeeze(-1)) # vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1])) # if vad_waveform.shape[-1] > 0: # x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1) # else: # pass filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) a_softmax_act = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device).long() target_vec = target_vec.repeat(self.v_submodels) return [a_softmax_act, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) score = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], score.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_oc_softmax.OCSoftmaxWithLoss() def compute(self, outputs, target): """ """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
15,313
34.367206
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-fixed-oc/02/model.py
#!/usr/bin/env python """ model.py 1;95;0cSelf defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_modules.oc_softmax as nii_oc_softmax import core_scripts.data_io.seq_info as nii_seq_tk import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training # target data protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # working sampling rate, torchaudio is used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # LFB dim (base component) self.lfb_dim = [60] self.lfb_with_delta = False # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating self.amp_floor = 0.00001 # manual choose the first 600 frames in the data self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops] # number of sub-models self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 256 # output class self.v_out_class = 1 self.m_transform = [] self.m_output_act = [] self.m_frontend = [] self.m_a_softmax = [] for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip( self.v_truncate_lens, self.fft_n, self.lfb_dim)): fft_n_bins = fft_n // 2 + 1 if self.lfb_with_delta: lfb_dim = lfb_dim * 3 self.m_transform.append( torch_nn.Sequential( torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]) ) ) self.m_output_act.append( torch_nn.Sequential( torch_nn.Dropout(0.7), torch_nn.Linear((trunc_len // 16) * (lfb_dim // 16) * 32, 512), nii_nn.MaxFeatureMap2D(), torch_nn.Linear(256, self.v_emd_dim) ) ) self.m_frontend.append( nii_front_end.LFB(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfb_dim[idx], with_energy=False, with_emphasis=True, with_delta=self.lfb_with_delta) ) self.m_a_softmax.append( nii_oc_softmax.OCAngleLayer(self.v_emd_dim) ) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_a_softmax = torch_nn.ModuleList(self.m_a_softmax) # output # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.zeros([in_dim]) out_m = torch.ones([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features fs: frame shift fl: frame length fn: fft points trunc_len: number of frames per file (by truncating) datalength: original length of data """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # permute to (batch, fft_bin, frame_length) x_sp_amp = x_sp_amp.permute(0, 2, 1) # make sure the buffer is long enough x_sp_amp_buff = torch.zeros( [x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len], dtype=x_sp_amp.dtype, device=x_sp_amp.device) # for batch of data, handle the padding and trim independently fs = self.frame_hops[idx] for fileidx in range(x_sp_amp.shape[0]): # roughtly this is the number of frames true_frame_num = datalength[fileidx] // fs if true_frame_num > trunc_len: # trim randomly pos = torch.rand([1]) * (true_frame_num-trunc_len) pos = torch.floor(pos[0]).long() tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos] x_sp_amp_buff[fileidx] = tmp else: rep = int(np.ceil(trunc_len / true_frame_num)) tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep) x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len] # permute to (batch, frame_length, fft_bin) x_sp_amp = x_sp_amp_buff.permute(0, 2, 1) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] # buffer to store output scores from sub-models output_score = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_output_act)): # extract feature (stft spectrogram) x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. flatten and transform through output function tmp_score = m_output(torch.flatten(hidden_features, 1)) output_score[idx * batch_size : (idx+1) * batch_size] = tmp_score return output_score def _compute_score(self, feature_vec, inference=False): """ """ # compute a-softmax output for each feature configuration batch_size = feature_vec.shape[0] // self.v_submodels x_cos_val = torch.zeros( [feature_vec.shape[0], self.v_out_class], dtype=feature_vec.dtype, device=feature_vec.device) x_phi_val = torch.zeros_like(x_cos_val) for idx in range(self.v_submodels): s_idx = idx * batch_size e_idx = idx * batch_size + batch_size tmp1, tmp2 = self.m_a_softmax[idx](feature_vec[s_idx:e_idx], inference) x_cos_val[s_idx:e_idx] = tmp1 x_phi_val[s_idx:e_idx] = tmp2 if inference: return x_cos_val else: return [x_cos_val, x_phi_val] def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): #with torch.no_grad(): # vad_waveform = self.m_vad(x.squeeze(-1)) # vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1])) # if vad_waveform.shape[-1] > 0: # x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1) # else: # pass filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) a_softmax_act = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device).long() target_vec = target_vec.repeat(self.v_submodels) return [a_softmax_act, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) score = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], score.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_oc_softmax.OCSoftmaxWithLoss() def compute(self, outputs, target): """ """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
15,313
34.367206
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-lstmsum-p2s/05/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import core_modules.p2sgrad as nii_p2sgrad import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # Working sampling rate # torchaudio may be used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of waveform points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # LFCC dim (base component) self.lfcc_dim = [20] self.lfcc_with_delta = True # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating (not used) self.amp_floor = 0.00001 # number of frames to be kept for each trial # no truncation self.v_truncate_lens = [None for x in self.frame_hops] # number of sub-models (by default, a single model) self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output classes self.v_out_class = 2 #### # create network #### # 1st part of the classifier self.m_transform = [] # self.m_before_pooling = [] # 2nd part of the classifier self.m_output_act = [] # front-end self.m_frontend = [] # final part on training self.m_angle = [] # it can handle models with multiple front-end configuration # by default, only a single front-end for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip( self.v_truncate_lens, self.fft_n, self.lfcc_dim)): fft_n_bins = fft_n // 2 + 1 if self.lfcc_with_delta: lfcc_dim = lfcc_dim * 3 self.m_transform.append( torch_nn.Sequential( torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Dropout(0.7) ) ) self.m_before_pooling.append( torch_nn.Sequential( nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32), nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32) ) ) self.m_output_act.append( torch_nn.Linear((lfcc_dim // 16) * 32, self.v_emd_dim) ) self.m_angle.append( nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class) ) self.m_frontend.append( nii_front_end.LFCC(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfcc_dim[idx], with_energy=True) ) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_angle = torch_nn.ModuleList(self.m_angle) self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling) # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch trunc_len: number of frames to be kept after truncation datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] # buffer to store output scores from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \ enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_before_pooling, self.m_output_act)): # extract front-end feature x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. (batch, channel, frame//N, feat_dim//N) -> # (batch, frame//N, channel * feat_dim//N) # where N is caused by conv with stride hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous() frame_num = hidden_features.shape[1] hidden_features = hidden_features.view(batch_size, frame_num, -1) # 4. pass through LSTM then summing hidden_features_lstm = m_be_pool(hidden_features) # 5. pass through the output layer tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1)) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models batch_size = x.shape[0] # compute score through p2sgrad layer out_score = torch.zeros( [batch_size * self.v_submodels, self.v_out_class], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size]) out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score if inference: # output_score [:, 1] corresponds to the positive class return out_score[:, 1] else: return out_score def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=scores.dtype) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_p2sgrad.P2SGradLoss() def compute(self, outputs, target): """ """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
14,979
33.675926
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-lstmsum-p2s/04/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import core_modules.p2sgrad as nii_p2sgrad import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # Working sampling rate # torchaudio may be used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of waveform points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # LFCC dim (base component) self.lfcc_dim = [20] self.lfcc_with_delta = True # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating (not used) self.amp_floor = 0.00001 # number of frames to be kept for each trial # no truncation self.v_truncate_lens = [None for x in self.frame_hops] # number of sub-models (by default, a single model) self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output classes self.v_out_class = 2 #### # create network #### # 1st part of the classifier self.m_transform = [] # self.m_before_pooling = [] # 2nd part of the classifier self.m_output_act = [] # front-end self.m_frontend = [] # final part on training self.m_angle = [] # it can handle models with multiple front-end configuration # by default, only a single front-end for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip( self.v_truncate_lens, self.fft_n, self.lfcc_dim)): fft_n_bins = fft_n // 2 + 1 if self.lfcc_with_delta: lfcc_dim = lfcc_dim * 3 self.m_transform.append( torch_nn.Sequential( torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Dropout(0.7) ) ) self.m_before_pooling.append( torch_nn.Sequential( nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32), nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32) ) ) self.m_output_act.append( torch_nn.Linear((lfcc_dim // 16) * 32, self.v_emd_dim) ) self.m_angle.append( nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class) ) self.m_frontend.append( nii_front_end.LFCC(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfcc_dim[idx], with_energy=True) ) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_angle = torch_nn.ModuleList(self.m_angle) self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling) # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch trunc_len: number of frames to be kept after truncation datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] # buffer to store output scores from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \ enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_before_pooling, self.m_output_act)): # extract front-end feature x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. (batch, channel, frame//N, feat_dim//N) -> # (batch, frame//N, channel * feat_dim//N) # where N is caused by conv with stride hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous() frame_num = hidden_features.shape[1] hidden_features = hidden_features.view(batch_size, frame_num, -1) # 4. pass through LSTM then summing hidden_features_lstm = m_be_pool(hidden_features) # 5. pass through the output layer tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1)) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models batch_size = x.shape[0] # compute score through p2sgrad layer out_score = torch.zeros( [batch_size * self.v_submodels, self.v_out_class], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size]) out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score if inference: # output_score [:, 1] corresponds to the positive class return out_score[:, 1] else: return out_score def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=scores.dtype) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_p2sgrad.P2SGradLoss() def compute(self, outputs, target): """ """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
14,979
33.675926
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-lstmsum-p2s/01/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import core_modules.p2sgrad as nii_p2sgrad import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # Working sampling rate # torchaudio may be used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of waveform points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # LFCC dim (base component) self.lfcc_dim = [20] self.lfcc_with_delta = True # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating (not used) self.amp_floor = 0.00001 # number of frames to be kept for each trial # no truncation self.v_truncate_lens = [None for x in self.frame_hops] # number of sub-models (by default, a single model) self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output classes self.v_out_class = 2 #### # create network #### # 1st part of the classifier self.m_transform = [] # self.m_before_pooling = [] # 2nd part of the classifier self.m_output_act = [] # front-end self.m_frontend = [] # final part on training self.m_angle = [] # it can handle models with multiple front-end configuration # by default, only a single front-end for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip( self.v_truncate_lens, self.fft_n, self.lfcc_dim)): fft_n_bins = fft_n // 2 + 1 if self.lfcc_with_delta: lfcc_dim = lfcc_dim * 3 self.m_transform.append( torch_nn.Sequential( torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Dropout(0.7) ) ) self.m_before_pooling.append( torch_nn.Sequential( nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32), nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32) ) ) self.m_output_act.append( torch_nn.Linear((lfcc_dim // 16) * 32, self.v_emd_dim) ) self.m_angle.append( nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class) ) self.m_frontend.append( nii_front_end.LFCC(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfcc_dim[idx], with_energy=True) ) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_angle = torch_nn.ModuleList(self.m_angle) self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling) # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch trunc_len: number of frames to be kept after truncation datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] # buffer to store output scores from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \ enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_before_pooling, self.m_output_act)): # extract front-end feature x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. (batch, channel, frame//N, feat_dim//N) -> # (batch, frame//N, channel * feat_dim//N) # where N is caused by conv with stride hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous() frame_num = hidden_features.shape[1] hidden_features = hidden_features.view(batch_size, frame_num, -1) # 4. pass through LSTM then summing hidden_features_lstm = m_be_pool(hidden_features) # 5. pass through the output layer tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1)) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models batch_size = x.shape[0] # compute score through p2sgrad layer out_score = torch.zeros( [batch_size * self.v_submodels, self.v_out_class], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size]) out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score if inference: # output_score [:, 1] corresponds to the positive class return out_score[:, 1] else: return out_score def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=scores.dtype) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_p2sgrad.P2SGradLoss() def compute(self, outputs, target): """ """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
14,979
33.675926
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-lstmsum-p2s/06/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import core_modules.p2sgrad as nii_p2sgrad import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # Working sampling rate # torchaudio may be used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of waveform points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # LFCC dim (base component) self.lfcc_dim = [20] self.lfcc_with_delta = True # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating (not used) self.amp_floor = 0.00001 # number of frames to be kept for each trial # no truncation self.v_truncate_lens = [None for x in self.frame_hops] # number of sub-models (by default, a single model) self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output classes self.v_out_class = 2 #### # create network #### # 1st part of the classifier self.m_transform = [] # self.m_before_pooling = [] # 2nd part of the classifier self.m_output_act = [] # front-end self.m_frontend = [] # final part on training self.m_angle = [] # it can handle models with multiple front-end configuration # by default, only a single front-end for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip( self.v_truncate_lens, self.fft_n, self.lfcc_dim)): fft_n_bins = fft_n // 2 + 1 if self.lfcc_with_delta: lfcc_dim = lfcc_dim * 3 self.m_transform.append( torch_nn.Sequential( torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Dropout(0.7) ) ) self.m_before_pooling.append( torch_nn.Sequential( nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32), nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32) ) ) self.m_output_act.append( torch_nn.Linear((lfcc_dim // 16) * 32, self.v_emd_dim) ) self.m_angle.append( nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class) ) self.m_frontend.append( nii_front_end.LFCC(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfcc_dim[idx], with_energy=True) ) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_angle = torch_nn.ModuleList(self.m_angle) self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling) # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch trunc_len: number of frames to be kept after truncation datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] # buffer to store output scores from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \ enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_before_pooling, self.m_output_act)): # extract front-end feature x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. (batch, channel, frame//N, feat_dim//N) -> # (batch, frame//N, channel * feat_dim//N) # where N is caused by conv with stride hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous() frame_num = hidden_features.shape[1] hidden_features = hidden_features.view(batch_size, frame_num, -1) # 4. pass through LSTM then summing hidden_features_lstm = m_be_pool(hidden_features) # 5. pass through the output layer tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1)) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models batch_size = x.shape[0] # compute score through p2sgrad layer out_score = torch.zeros( [batch_size * self.v_submodels, self.v_out_class], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size]) out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score if inference: # output_score [:, 1] corresponds to the positive class return out_score[:, 1] else: return out_score def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=scores.dtype) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_p2sgrad.P2SGradLoss() def compute(self, outputs, target): """ """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
14,979
33.675926
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-lstmsum-p2s/03/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import core_modules.p2sgrad as nii_p2sgrad import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # Working sampling rate # torchaudio may be used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of waveform points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # LFCC dim (base component) self.lfcc_dim = [20] self.lfcc_with_delta = True # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating (not used) self.amp_floor = 0.00001 # number of frames to be kept for each trial # no truncation self.v_truncate_lens = [None for x in self.frame_hops] # number of sub-models (by default, a single model) self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output classes self.v_out_class = 2 #### # create network #### # 1st part of the classifier self.m_transform = [] # self.m_before_pooling = [] # 2nd part of the classifier self.m_output_act = [] # front-end self.m_frontend = [] # final part on training self.m_angle = [] # it can handle models with multiple front-end configuration # by default, only a single front-end for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip( self.v_truncate_lens, self.fft_n, self.lfcc_dim)): fft_n_bins = fft_n // 2 + 1 if self.lfcc_with_delta: lfcc_dim = lfcc_dim * 3 self.m_transform.append( torch_nn.Sequential( torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Dropout(0.7) ) ) self.m_before_pooling.append( torch_nn.Sequential( nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32), nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32) ) ) self.m_output_act.append( torch_nn.Linear((lfcc_dim // 16) * 32, self.v_emd_dim) ) self.m_angle.append( nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class) ) self.m_frontend.append( nii_front_end.LFCC(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfcc_dim[idx], with_energy=True) ) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_angle = torch_nn.ModuleList(self.m_angle) self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling) # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch trunc_len: number of frames to be kept after truncation datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] # buffer to store output scores from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \ enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_before_pooling, self.m_output_act)): # extract front-end feature x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. (batch, channel, frame//N, feat_dim//N) -> # (batch, frame//N, channel * feat_dim//N) # where N is caused by conv with stride hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous() frame_num = hidden_features.shape[1] hidden_features = hidden_features.view(batch_size, frame_num, -1) # 4. pass through LSTM then summing hidden_features_lstm = m_be_pool(hidden_features) # 5. pass through the output layer tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1)) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models batch_size = x.shape[0] # compute score through p2sgrad layer out_score = torch.zeros( [batch_size * self.v_submodels, self.v_out_class], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size]) out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score if inference: # output_score [:, 1] corresponds to the positive class return out_score[:, 1] else: return out_score def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=scores.dtype) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_p2sgrad.P2SGradLoss() def compute(self, outputs, target): """ """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
14,979
33.675926
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-lstmsum-p2s/02/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import core_modules.p2sgrad as nii_p2sgrad import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # Working sampling rate # torchaudio may be used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of waveform points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # LFCC dim (base component) self.lfcc_dim = [20] self.lfcc_with_delta = True # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating (not used) self.amp_floor = 0.00001 # number of frames to be kept for each trial # no truncation self.v_truncate_lens = [None for x in self.frame_hops] # number of sub-models (by default, a single model) self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output classes self.v_out_class = 2 #### # create network #### # 1st part of the classifier self.m_transform = [] # self.m_before_pooling = [] # 2nd part of the classifier self.m_output_act = [] # front-end self.m_frontend = [] # final part on training self.m_angle = [] # it can handle models with multiple front-end configuration # by default, only a single front-end for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip( self.v_truncate_lens, self.fft_n, self.lfcc_dim)): fft_n_bins = fft_n // 2 + 1 if self.lfcc_with_delta: lfcc_dim = lfcc_dim * 3 self.m_transform.append( torch_nn.Sequential( torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Dropout(0.7) ) ) self.m_before_pooling.append( torch_nn.Sequential( nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32), nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32) ) ) self.m_output_act.append( torch_nn.Linear((lfcc_dim // 16) * 32, self.v_emd_dim) ) self.m_angle.append( nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class) ) self.m_frontend.append( nii_front_end.LFCC(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfcc_dim[idx], with_energy=True) ) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_angle = torch_nn.ModuleList(self.m_angle) self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling) # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch trunc_len: number of frames to be kept after truncation datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] # buffer to store output scores from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \ enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_before_pooling, self.m_output_act)): # extract front-end feature x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. (batch, channel, frame//N, feat_dim//N) -> # (batch, frame//N, channel * feat_dim//N) # where N is caused by conv with stride hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous() frame_num = hidden_features.shape[1] hidden_features = hidden_features.view(batch_size, frame_num, -1) # 4. pass through LSTM then summing hidden_features_lstm = m_be_pool(hidden_features) # 5. pass through the output layer tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1)) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models batch_size = x.shape[0] # compute score through p2sgrad layer out_score = torch.zeros( [batch_size * self.v_submodels, self.v_out_class], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size]) out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score if inference: # output_score [:, 1] corresponds to the positive class return out_score[:, 1] else: return out_score def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=scores.dtype) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_p2sgrad.P2SGradLoss() def compute(self, outputs, target): """ """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
14,979
33.675926
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-attention-p2s/05/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import core_modules.p2sgrad as nii_p2sgrad import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class TrainableLinearFb(nii_nn.LinearInitialized): """Linear layer initialized with linear filter bank """ def __init__(self, fn, sr, filter_num): super(TrainableLinearFb, self).__init__( nii_front_end.linear_fb(fn, sr, filter_num)) return def forward(self, x): return torch.log10( torch.pow(super(TrainableLinearFb, self).forward(x), 2) + torch.finfo(torch.float32).eps) class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # Working sampling rate # torchaudio may be used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of waveform points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # spectrogram dim (base component) self.spec_with_delta = False self.spec_fb_dim = 60 # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating (not used) self.amp_floor = 0.00001 # number of frames to be kept for each trial # no truncation self.v_truncate_lens = [None for x in self.frame_hops] # number of sub-models (by default, a single model) self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output classes self.v_out_class = 2 #### # create network #### # 1st part of the classifier self.m_transform = [] # pooling layer self.m_pooling = [] # 2nd part of the classifier self.m_output_act = [] # front-end self.m_frontend = [] # final part on training self.m_angle = [] # it can handle models with multiple front-end configuration # by default, only a single front-end for idx, (trunc_len, fft_n) in enumerate(zip( self.v_truncate_lens, self.fft_n)): fft_n_bins = fft_n // 2 + 1 self.m_transform.append( torch_nn.Sequential( TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim), torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Dropout(0.7) ) ) self.m_pooling.append( nii_nn.SelfWeightedPooling((self.spec_fb_dim // 16) * 32) ) self.m_output_act.append( torch_nn.Linear((self.spec_fb_dim//16) * 32 * 2, self.v_emd_dim) ) self.m_angle.append( nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class) ) self.m_frontend.append( nii_front_end.Spectrogram(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr) ) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_pooling = torch_nn.ModuleList(self.m_pooling) self.m_angle = torch_nn.ModuleList(self.m_angle) # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch trunc_len: number of frames to be kept after truncation datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] # buffer to store output scores from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \ enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_pooling, self.m_output_act)): # extract front-end feature x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. (batch, channel, frame//N, feat_dim//N) -> # (batch, frame//N, channel * feat_dim//N) # where N is caused by conv with stride hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous() frame_num = hidden_features.shape[1] hidden_features = hidden_features.view(batch_size, frame_num, -1) # 4. pooling hidden_features = m_pool(hidden_features) # 5. pass through the output layer tmp_score = m_output(hidden_features) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models batch_size = x.shape[0] # compute score through p2sgrad layer out_score = torch.zeros( [batch_size * self.v_submodels, self.v_out_class], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size]) out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score if inference: # output_score [:, 1] corresponds to the positive class return out_score[:, 1] else: return out_score def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=scores.dtype) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_p2sgrad.P2SGradLoss() def compute(self, outputs, target): """ """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
15,147
33.349206
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-attention-p2s/04/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import core_modules.p2sgrad as nii_p2sgrad import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class TrainableLinearFb(nii_nn.LinearInitialized): """Linear layer initialized with linear filter bank """ def __init__(self, fn, sr, filter_num): super(TrainableLinearFb, self).__init__( nii_front_end.linear_fb(fn, sr, filter_num)) return def forward(self, x): return torch.log10( torch.pow(super(TrainableLinearFb, self).forward(x), 2) + torch.finfo(torch.float32).eps) class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # Working sampling rate # torchaudio may be used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of waveform points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # spectrogram dim (base component) self.spec_with_delta = False self.spec_fb_dim = 60 # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating (not used) self.amp_floor = 0.00001 # number of frames to be kept for each trial # no truncation self.v_truncate_lens = [None for x in self.frame_hops] # number of sub-models (by default, a single model) self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output classes self.v_out_class = 2 #### # create network #### # 1st part of the classifier self.m_transform = [] # pooling layer self.m_pooling = [] # 2nd part of the classifier self.m_output_act = [] # front-end self.m_frontend = [] # final part on training self.m_angle = [] # it can handle models with multiple front-end configuration # by default, only a single front-end for idx, (trunc_len, fft_n) in enumerate(zip( self.v_truncate_lens, self.fft_n)): fft_n_bins = fft_n // 2 + 1 self.m_transform.append( torch_nn.Sequential( TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim), torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Dropout(0.7) ) ) self.m_pooling.append( nii_nn.SelfWeightedPooling((self.spec_fb_dim // 16) * 32) ) self.m_output_act.append( torch_nn.Linear((self.spec_fb_dim//16) * 32 * 2, self.v_emd_dim) ) self.m_angle.append( nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class) ) self.m_frontend.append( nii_front_end.Spectrogram(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr) ) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_pooling = torch_nn.ModuleList(self.m_pooling) self.m_angle = torch_nn.ModuleList(self.m_angle) # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch trunc_len: number of frames to be kept after truncation datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] # buffer to store output scores from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \ enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_pooling, self.m_output_act)): # extract front-end feature x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. (batch, channel, frame//N, feat_dim//N) -> # (batch, frame//N, channel * feat_dim//N) # where N is caused by conv with stride hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous() frame_num = hidden_features.shape[1] hidden_features = hidden_features.view(batch_size, frame_num, -1) # 4. pooling hidden_features = m_pool(hidden_features) # 5. pass through the output layer tmp_score = m_output(hidden_features) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models batch_size = x.shape[0] # compute score through p2sgrad layer out_score = torch.zeros( [batch_size * self.v_submodels, self.v_out_class], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size]) out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score if inference: # output_score [:, 1] corresponds to the positive class return out_score[:, 1] else: return out_score def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=scores.dtype) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_p2sgrad.P2SGradLoss() def compute(self, outputs, target): """ """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
15,147
33.349206
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-attention-p2s/01/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import core_modules.p2sgrad as nii_p2sgrad import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class TrainableLinearFb(nii_nn.LinearInitialized): """Linear layer initialized with linear filter bank """ def __init__(self, fn, sr, filter_num): super(TrainableLinearFb, self).__init__( nii_front_end.linear_fb(fn, sr, filter_num)) return def forward(self, x): return torch.log10( torch.pow(super(TrainableLinearFb, self).forward(x), 2) + torch.finfo(torch.float32).eps) class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # Working sampling rate # torchaudio may be used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of waveform points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # spectrogram dim (base component) self.spec_with_delta = False self.spec_fb_dim = 60 # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating (not used) self.amp_floor = 0.00001 # number of frames to be kept for each trial # no truncation self.v_truncate_lens = [None for x in self.frame_hops] # number of sub-models (by default, a single model) self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output classes self.v_out_class = 2 #### # create network #### # 1st part of the classifier self.m_transform = [] # pooling layer self.m_pooling = [] # 2nd part of the classifier self.m_output_act = [] # front-end self.m_frontend = [] # final part on training self.m_angle = [] # it can handle models with multiple front-end configuration # by default, only a single front-end for idx, (trunc_len, fft_n) in enumerate(zip( self.v_truncate_lens, self.fft_n)): fft_n_bins = fft_n // 2 + 1 self.m_transform.append( torch_nn.Sequential( TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim), torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Dropout(0.7) ) ) self.m_pooling.append( nii_nn.SelfWeightedPooling((self.spec_fb_dim // 16) * 32) ) self.m_output_act.append( torch_nn.Linear((self.spec_fb_dim//16) * 32 * 2, self.v_emd_dim) ) self.m_angle.append( nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class) ) self.m_frontend.append( nii_front_end.Spectrogram(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr) ) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_pooling = torch_nn.ModuleList(self.m_pooling) self.m_angle = torch_nn.ModuleList(self.m_angle) # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch trunc_len: number of frames to be kept after truncation datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] # buffer to store output scores from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \ enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_pooling, self.m_output_act)): # extract front-end feature x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. (batch, channel, frame//N, feat_dim//N) -> # (batch, frame//N, channel * feat_dim//N) # where N is caused by conv with stride hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous() frame_num = hidden_features.shape[1] hidden_features = hidden_features.view(batch_size, frame_num, -1) # 4. pooling hidden_features = m_pool(hidden_features) # 5. pass through the output layer tmp_score = m_output(hidden_features) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models batch_size = x.shape[0] # compute score through p2sgrad layer out_score = torch.zeros( [batch_size * self.v_submodels, self.v_out_class], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size]) out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score if inference: # output_score [:, 1] corresponds to the positive class return out_score[:, 1] else: return out_score def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=scores.dtype) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_p2sgrad.P2SGradLoss() def compute(self, outputs, target): """ """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
15,147
33.349206
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-attention-p2s/06/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import core_modules.p2sgrad as nii_p2sgrad import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class TrainableLinearFb(nii_nn.LinearInitialized): """Linear layer initialized with linear filter bank """ def __init__(self, fn, sr, filter_num): super(TrainableLinearFb, self).__init__( nii_front_end.linear_fb(fn, sr, filter_num)) return def forward(self, x): return torch.log10( torch.pow(super(TrainableLinearFb, self).forward(x), 2) + torch.finfo(torch.float32).eps) class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # Working sampling rate # torchaudio may be used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of waveform points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # spectrogram dim (base component) self.spec_with_delta = False self.spec_fb_dim = 60 # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating (not used) self.amp_floor = 0.00001 # number of frames to be kept for each trial # no truncation self.v_truncate_lens = [None for x in self.frame_hops] # number of sub-models (by default, a single model) self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output classes self.v_out_class = 2 #### # create network #### # 1st part of the classifier self.m_transform = [] # pooling layer self.m_pooling = [] # 2nd part of the classifier self.m_output_act = [] # front-end self.m_frontend = [] # final part on training self.m_angle = [] # it can handle models with multiple front-end configuration # by default, only a single front-end for idx, (trunc_len, fft_n) in enumerate(zip( self.v_truncate_lens, self.fft_n)): fft_n_bins = fft_n // 2 + 1 self.m_transform.append( torch_nn.Sequential( TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim), torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Dropout(0.7) ) ) self.m_pooling.append( nii_nn.SelfWeightedPooling((self.spec_fb_dim // 16) * 32) ) self.m_output_act.append( torch_nn.Linear((self.spec_fb_dim//16) * 32 * 2, self.v_emd_dim) ) self.m_angle.append( nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class) ) self.m_frontend.append( nii_front_end.Spectrogram(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr) ) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_pooling = torch_nn.ModuleList(self.m_pooling) self.m_angle = torch_nn.ModuleList(self.m_angle) # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch trunc_len: number of frames to be kept after truncation datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] # buffer to store output scores from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \ enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_pooling, self.m_output_act)): # extract front-end feature x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. (batch, channel, frame//N, feat_dim//N) -> # (batch, frame//N, channel * feat_dim//N) # where N is caused by conv with stride hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous() frame_num = hidden_features.shape[1] hidden_features = hidden_features.view(batch_size, frame_num, -1) # 4. pooling hidden_features = m_pool(hidden_features) # 5. pass through the output layer tmp_score = m_output(hidden_features) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models batch_size = x.shape[0] # compute score through p2sgrad layer out_score = torch.zeros( [batch_size * self.v_submodels, self.v_out_class], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size]) out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score if inference: # output_score [:, 1] corresponds to the positive class return out_score[:, 1] else: return out_score def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=scores.dtype) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_p2sgrad.P2SGradLoss() def compute(self, outputs, target): """ """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
15,147
33.349206
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-attention-p2s/03/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import core_modules.p2sgrad as nii_p2sgrad import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class TrainableLinearFb(nii_nn.LinearInitialized): """Linear layer initialized with linear filter bank """ def __init__(self, fn, sr, filter_num): super(TrainableLinearFb, self).__init__( nii_front_end.linear_fb(fn, sr, filter_num)) return def forward(self, x): return torch.log10( torch.pow(super(TrainableLinearFb, self).forward(x), 2) + torch.finfo(torch.float32).eps) class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # Working sampling rate # torchaudio may be used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of waveform points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # spectrogram dim (base component) self.spec_with_delta = False self.spec_fb_dim = 60 # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating (not used) self.amp_floor = 0.00001 # number of frames to be kept for each trial # no truncation self.v_truncate_lens = [None for x in self.frame_hops] # number of sub-models (by default, a single model) self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output classes self.v_out_class = 2 #### # create network #### # 1st part of the classifier self.m_transform = [] # pooling layer self.m_pooling = [] # 2nd part of the classifier self.m_output_act = [] # front-end self.m_frontend = [] # final part on training self.m_angle = [] # it can handle models with multiple front-end configuration # by default, only a single front-end for idx, (trunc_len, fft_n) in enumerate(zip( self.v_truncate_lens, self.fft_n)): fft_n_bins = fft_n // 2 + 1 self.m_transform.append( torch_nn.Sequential( TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim), torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Dropout(0.7) ) ) self.m_pooling.append( nii_nn.SelfWeightedPooling((self.spec_fb_dim // 16) * 32) ) self.m_output_act.append( torch_nn.Linear((self.spec_fb_dim//16) * 32 * 2, self.v_emd_dim) ) self.m_angle.append( nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class) ) self.m_frontend.append( nii_front_end.Spectrogram(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr) ) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_pooling = torch_nn.ModuleList(self.m_pooling) self.m_angle = torch_nn.ModuleList(self.m_angle) # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch trunc_len: number of frames to be kept after truncation datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] # buffer to store output scores from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \ enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_pooling, self.m_output_act)): # extract front-end feature x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. (batch, channel, frame//N, feat_dim//N) -> # (batch, frame//N, channel * feat_dim//N) # where N is caused by conv with stride hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous() frame_num = hidden_features.shape[1] hidden_features = hidden_features.view(batch_size, frame_num, -1) # 4. pooling hidden_features = m_pool(hidden_features) # 5. pass through the output layer tmp_score = m_output(hidden_features) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models batch_size = x.shape[0] # compute score through p2sgrad layer out_score = torch.zeros( [batch_size * self.v_submodels, self.v_out_class], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size]) out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score if inference: # output_score [:, 1] corresponds to the positive class return out_score[:, 1] else: return out_score def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=scores.dtype) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_p2sgrad.P2SGradLoss() def compute(self, outputs, target): """ """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
15,147
33.349206
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-attention-p2s/02/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import core_modules.p2sgrad as nii_p2sgrad import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class TrainableLinearFb(nii_nn.LinearInitialized): """Linear layer initialized with linear filter bank """ def __init__(self, fn, sr, filter_num): super(TrainableLinearFb, self).__init__( nii_front_end.linear_fb(fn, sr, filter_num)) return def forward(self, x): return torch.log10( torch.pow(super(TrainableLinearFb, self).forward(x), 2) + torch.finfo(torch.float32).eps) class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # Working sampling rate # torchaudio may be used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of waveform points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # spectrogram dim (base component) self.spec_with_delta = False self.spec_fb_dim = 60 # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating (not used) self.amp_floor = 0.00001 # number of frames to be kept for each trial # no truncation self.v_truncate_lens = [None for x in self.frame_hops] # number of sub-models (by default, a single model) self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output classes self.v_out_class = 2 #### # create network #### # 1st part of the classifier self.m_transform = [] # pooling layer self.m_pooling = [] # 2nd part of the classifier self.m_output_act = [] # front-end self.m_frontend = [] # final part on training self.m_angle = [] # it can handle models with multiple front-end configuration # by default, only a single front-end for idx, (trunc_len, fft_n) in enumerate(zip( self.v_truncate_lens, self.fft_n)): fft_n_bins = fft_n // 2 + 1 self.m_transform.append( torch_nn.Sequential( TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim), torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Dropout(0.7) ) ) self.m_pooling.append( nii_nn.SelfWeightedPooling((self.spec_fb_dim // 16) * 32) ) self.m_output_act.append( torch_nn.Linear((self.spec_fb_dim//16) * 32 * 2, self.v_emd_dim) ) self.m_angle.append( nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class) ) self.m_frontend.append( nii_front_end.Spectrogram(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr) ) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_pooling = torch_nn.ModuleList(self.m_pooling) self.m_angle = torch_nn.ModuleList(self.m_angle) # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch trunc_len: number of frames to be kept after truncation datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] # buffer to store output scores from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \ enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_pooling, self.m_output_act)): # extract front-end feature x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. (batch, channel, frame//N, feat_dim//N) -> # (batch, frame//N, channel * feat_dim//N) # where N is caused by conv with stride hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous() frame_num = hidden_features.shape[1] hidden_features = hidden_features.view(batch_size, frame_num, -1) # 4. pooling hidden_features = m_pool(hidden_features) # 5. pass through the output layer tmp_score = m_output(hidden_features) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models batch_size = x.shape[0] # compute score through p2sgrad layer out_score = torch.zeros( [batch_size * self.v_submodels, self.v_out_class], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size]) out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score if inference: # output_score [:, 1] corresponds to the positive class return out_score[:, 1] else: return out_score def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=scores.dtype) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_p2sgrad.P2SGradLoss() def compute(self, outputs, target): """ """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
15,147
33.349206
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-lstmsum-oc/05/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import core_modules.oc_softmax as nii_ocsoftmax import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # Working sampling rate # torchaudio may be used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of waveform points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # LFCC dim (base component) self.lfcc_dim = [20] self.lfcc_with_delta = True # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating (not used) self.amp_floor = 0.00001 # number of frames to be kept for each trial # no truncation self.v_truncate_lens = [None for x in self.frame_hops] # number of sub-models (by default, a single model) self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output classes self.v_out_class = 1 #### # create network #### # 1st part of the classifier self.m_transform = [] # self.m_before_pooling = [] # 2nd part of the classifier self.m_output_act = [] # front-end self.m_frontend = [] # final part on training self.m_angle = [] # it can handle models with multiple front-end configuration # by default, only a single front-end for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip( self.v_truncate_lens, self.fft_n, self.lfcc_dim)): fft_n_bins = fft_n // 2 + 1 if self.lfcc_with_delta: lfcc_dim = lfcc_dim * 3 self.m_transform.append( torch_nn.Sequential( torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Dropout(0.7) ) ) self.m_before_pooling.append( torch_nn.Sequential( nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32), nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32) ) ) self.m_output_act.append( torch_nn.Linear((lfcc_dim // 16) * 32, self.v_emd_dim) ) self.m_angle.append( nii_ocsoftmax.OCAngleLayer(self.v_emd_dim) ) self.m_frontend.append( nii_front_end.LFCC(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfcc_dim[idx], with_energy=True) ) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_angle = torch_nn.ModuleList(self.m_angle) self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling) # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch trunc_len: number of frames to be kept after truncation datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] // self.v_submodels # buffer to store output scores from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \ enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_before_pooling, self.m_output_act)): # extract front-end feature x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. (batch, channel, frame//N, feat_dim//N) -> # (batch, frame//N, channel * feat_dim//N) # where N is caused by conv with stride hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous() frame_num = hidden_features.shape[1] hidden_features = hidden_features.view(batch_size, frame_num, -1) # 4. pass through LSTM then summing hidden_features_lstm = m_be_pool(hidden_features) # 5. pass through the output layer tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1)) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models batch_size = x.shape[0] // self.v_submodels # buffer to save the scores # for non-target classes out_score_neg = torch.zeros( [x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype) # for target classes out_score_pos = torch.zeros_like(out_score_neg) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): s_idx = idx * batch_size e_idx = idx * batch_size + batch_size tmp_score = m_score(x[s_idx:e_idx], inference) out_score_neg[s_idx:e_idx] = tmp_score[0] out_score_pos[s_idx:e_idx] = tmp_score[1] if inference: return out_score_neg else: return out_score_neg, out_score_pos def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=torch.long) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_ocsoftmax.OCSoftmaxWithLoss() def compute(self, input_data, target): """loss = compute(input_data, target_data) Note: 1. input_data will be the output from Model.forward() input_data will be a tuple of [scores, target_vec] 2. we will not use target given by the system script we will use the target_vec in input_data[1] """ loss = self.m_loss(input_data[0], input_data[1]) return loss if __name__ == "__main__": print("Definition of model")
15,459
33.89842
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-lstmsum-oc/04/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import core_modules.oc_softmax as nii_ocsoftmax import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # Working sampling rate # torchaudio may be used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of waveform points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # LFCC dim (base component) self.lfcc_dim = [20] self.lfcc_with_delta = True # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating (not used) self.amp_floor = 0.00001 # number of frames to be kept for each trial # no truncation self.v_truncate_lens = [None for x in self.frame_hops] # number of sub-models (by default, a single model) self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output classes self.v_out_class = 1 #### # create network #### # 1st part of the classifier self.m_transform = [] # self.m_before_pooling = [] # 2nd part of the classifier self.m_output_act = [] # front-end self.m_frontend = [] # final part on training self.m_angle = [] # it can handle models with multiple front-end configuration # by default, only a single front-end for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip( self.v_truncate_lens, self.fft_n, self.lfcc_dim)): fft_n_bins = fft_n // 2 + 1 if self.lfcc_with_delta: lfcc_dim = lfcc_dim * 3 self.m_transform.append( torch_nn.Sequential( torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Dropout(0.7) ) ) self.m_before_pooling.append( torch_nn.Sequential( nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32), nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32) ) ) self.m_output_act.append( torch_nn.Linear((lfcc_dim // 16) * 32, self.v_emd_dim) ) self.m_angle.append( nii_ocsoftmax.OCAngleLayer(self.v_emd_dim) ) self.m_frontend.append( nii_front_end.LFCC(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfcc_dim[idx], with_energy=True) ) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_angle = torch_nn.ModuleList(self.m_angle) self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling) # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch trunc_len: number of frames to be kept after truncation datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] // self.v_submodels # buffer to store output scores from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \ enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_before_pooling, self.m_output_act)): # extract front-end feature x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. (batch, channel, frame//N, feat_dim//N) -> # (batch, frame//N, channel * feat_dim//N) # where N is caused by conv with stride hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous() frame_num = hidden_features.shape[1] hidden_features = hidden_features.view(batch_size, frame_num, -1) # 4. pass through LSTM then summing hidden_features_lstm = m_be_pool(hidden_features) # 5. pass through the output layer tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1)) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models batch_size = x.shape[0] // self.v_submodels # buffer to save the scores # for non-target classes out_score_neg = torch.zeros( [x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype) # for target classes out_score_pos = torch.zeros_like(out_score_neg) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): s_idx = idx * batch_size e_idx = idx * batch_size + batch_size tmp_score = m_score(x[s_idx:e_idx], inference) out_score_neg[s_idx:e_idx] = tmp_score[0] out_score_pos[s_idx:e_idx] = tmp_score[1] if inference: return out_score_neg else: return out_score_neg, out_score_pos def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=torch.long) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_ocsoftmax.OCSoftmaxWithLoss() def compute(self, input_data, target): """loss = compute(input_data, target_data) Note: 1. input_data will be the output from Model.forward() input_data will be a tuple of [scores, target_vec] 2. we will not use target given by the system script we will use the target_vec in input_data[1] """ loss = self.m_loss(input_data[0], input_data[1]) return loss if __name__ == "__main__": print("Definition of model")
15,459
33.89842
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-lstmsum-oc/01/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import core_modules.oc_softmax as nii_ocsoftmax import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # Working sampling rate # torchaudio may be used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of waveform points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # LFCC dim (base component) self.lfcc_dim = [20] self.lfcc_with_delta = True # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating (not used) self.amp_floor = 0.00001 # number of frames to be kept for each trial # no truncation self.v_truncate_lens = [None for x in self.frame_hops] # number of sub-models (by default, a single model) self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output classes self.v_out_class = 1 #### # create network #### # 1st part of the classifier self.m_transform = [] # self.m_before_pooling = [] # 2nd part of the classifier self.m_output_act = [] # front-end self.m_frontend = [] # final part on training self.m_angle = [] # it can handle models with multiple front-end configuration # by default, only a single front-end for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip( self.v_truncate_lens, self.fft_n, self.lfcc_dim)): fft_n_bins = fft_n // 2 + 1 if self.lfcc_with_delta: lfcc_dim = lfcc_dim * 3 self.m_transform.append( torch_nn.Sequential( torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Dropout(0.7) ) ) self.m_before_pooling.append( torch_nn.Sequential( nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32), nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32) ) ) self.m_output_act.append( torch_nn.Linear((lfcc_dim // 16) * 32, self.v_emd_dim) ) self.m_angle.append( nii_ocsoftmax.OCAngleLayer(self.v_emd_dim) ) self.m_frontend.append( nii_front_end.LFCC(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfcc_dim[idx], with_energy=True) ) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_angle = torch_nn.ModuleList(self.m_angle) self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling) # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch trunc_len: number of frames to be kept after truncation datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] // self.v_submodels # buffer to store output scores from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \ enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_before_pooling, self.m_output_act)): # extract front-end feature x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. (batch, channel, frame//N, feat_dim//N) -> # (batch, frame//N, channel * feat_dim//N) # where N is caused by conv with stride hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous() frame_num = hidden_features.shape[1] hidden_features = hidden_features.view(batch_size, frame_num, -1) # 4. pass through LSTM then summing hidden_features_lstm = m_be_pool(hidden_features) # 5. pass through the output layer tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1)) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models batch_size = x.shape[0] // self.v_submodels # buffer to save the scores # for non-target classes out_score_neg = torch.zeros( [x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype) # for target classes out_score_pos = torch.zeros_like(out_score_neg) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): s_idx = idx * batch_size e_idx = idx * batch_size + batch_size tmp_score = m_score(x[s_idx:e_idx], inference) out_score_neg[s_idx:e_idx] = tmp_score[0] out_score_pos[s_idx:e_idx] = tmp_score[1] if inference: return out_score_neg else: return out_score_neg, out_score_pos def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=torch.long) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_ocsoftmax.OCSoftmaxWithLoss() def compute(self, input_data, target): """loss = compute(input_data, target_data) Note: 1. input_data will be the output from Model.forward() input_data will be a tuple of [scores, target_vec] 2. we will not use target given by the system script we will use the target_vec in input_data[1] """ loss = self.m_loss(input_data[0], input_data[1]) return loss if __name__ == "__main__": print("Definition of model")
15,459
33.89842
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-lstmsum-oc/06/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import core_modules.oc_softmax as nii_ocsoftmax import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # Working sampling rate # torchaudio may be used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of waveform points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # LFCC dim (base component) self.lfcc_dim = [20] self.lfcc_with_delta = True # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating (not used) self.amp_floor = 0.00001 # number of frames to be kept for each trial # no truncation self.v_truncate_lens = [None for x in self.frame_hops] # number of sub-models (by default, a single model) self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output classes self.v_out_class = 1 #### # create network #### # 1st part of the classifier self.m_transform = [] # self.m_before_pooling = [] # 2nd part of the classifier self.m_output_act = [] # front-end self.m_frontend = [] # final part on training self.m_angle = [] # it can handle models with multiple front-end configuration # by default, only a single front-end for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip( self.v_truncate_lens, self.fft_n, self.lfcc_dim)): fft_n_bins = fft_n // 2 + 1 if self.lfcc_with_delta: lfcc_dim = lfcc_dim * 3 self.m_transform.append( torch_nn.Sequential( torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Dropout(0.7) ) ) self.m_before_pooling.append( torch_nn.Sequential( nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32), nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32) ) ) self.m_output_act.append( torch_nn.Linear((lfcc_dim // 16) * 32, self.v_emd_dim) ) self.m_angle.append( nii_ocsoftmax.OCAngleLayer(self.v_emd_dim) ) self.m_frontend.append( nii_front_end.LFCC(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfcc_dim[idx], with_energy=True) ) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_angle = torch_nn.ModuleList(self.m_angle) self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling) # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch trunc_len: number of frames to be kept after truncation datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] // self.v_submodels # buffer to store output scores from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \ enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_before_pooling, self.m_output_act)): # extract front-end feature x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. (batch, channel, frame//N, feat_dim//N) -> # (batch, frame//N, channel * feat_dim//N) # where N is caused by conv with stride hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous() frame_num = hidden_features.shape[1] hidden_features = hidden_features.view(batch_size, frame_num, -1) # 4. pass through LSTM then summing hidden_features_lstm = m_be_pool(hidden_features) # 5. pass through the output layer tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1)) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models batch_size = x.shape[0] // self.v_submodels # buffer to save the scores # for non-target classes out_score_neg = torch.zeros( [x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype) # for target classes out_score_pos = torch.zeros_like(out_score_neg) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): s_idx = idx * batch_size e_idx = idx * batch_size + batch_size tmp_score = m_score(x[s_idx:e_idx], inference) out_score_neg[s_idx:e_idx] = tmp_score[0] out_score_pos[s_idx:e_idx] = tmp_score[1] if inference: return out_score_neg else: return out_score_neg, out_score_pos def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=torch.long) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_ocsoftmax.OCSoftmaxWithLoss() def compute(self, input_data, target): """loss = compute(input_data, target_data) Note: 1. input_data will be the output from Model.forward() input_data will be a tuple of [scores, target_vec] 2. we will not use target given by the system script we will use the target_vec in input_data[1] """ loss = self.m_loss(input_data[0], input_data[1]) return loss if __name__ == "__main__": print("Definition of model")
15,459
33.89842
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-lstmsum-oc/03/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import core_modules.oc_softmax as nii_ocsoftmax import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # Working sampling rate # torchaudio may be used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of waveform points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # LFCC dim (base component) self.lfcc_dim = [20] self.lfcc_with_delta = True # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating (not used) self.amp_floor = 0.00001 # number of frames to be kept for each trial # no truncation self.v_truncate_lens = [None for x in self.frame_hops] # number of sub-models (by default, a single model) self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output classes self.v_out_class = 1 #### # create network #### # 1st part of the classifier self.m_transform = [] # self.m_before_pooling = [] # 2nd part of the classifier self.m_output_act = [] # front-end self.m_frontend = [] # final part on training self.m_angle = [] # it can handle models with multiple front-end configuration # by default, only a single front-end for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip( self.v_truncate_lens, self.fft_n, self.lfcc_dim)): fft_n_bins = fft_n // 2 + 1 if self.lfcc_with_delta: lfcc_dim = lfcc_dim * 3 self.m_transform.append( torch_nn.Sequential( torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Dropout(0.7) ) ) self.m_before_pooling.append( torch_nn.Sequential( nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32), nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32) ) ) self.m_output_act.append( torch_nn.Linear((lfcc_dim // 16) * 32, self.v_emd_dim) ) self.m_angle.append( nii_ocsoftmax.OCAngleLayer(self.v_emd_dim) ) self.m_frontend.append( nii_front_end.LFCC(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfcc_dim[idx], with_energy=True) ) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_angle = torch_nn.ModuleList(self.m_angle) self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling) # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch trunc_len: number of frames to be kept after truncation datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] // self.v_submodels # buffer to store output scores from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \ enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_before_pooling, self.m_output_act)): # extract front-end feature x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. (batch, channel, frame//N, feat_dim//N) -> # (batch, frame//N, channel * feat_dim//N) # where N is caused by conv with stride hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous() frame_num = hidden_features.shape[1] hidden_features = hidden_features.view(batch_size, frame_num, -1) # 4. pass through LSTM then summing hidden_features_lstm = m_be_pool(hidden_features) # 5. pass through the output layer tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1)) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models batch_size = x.shape[0] // self.v_submodels # buffer to save the scores # for non-target classes out_score_neg = torch.zeros( [x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype) # for target classes out_score_pos = torch.zeros_like(out_score_neg) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): s_idx = idx * batch_size e_idx = idx * batch_size + batch_size tmp_score = m_score(x[s_idx:e_idx], inference) out_score_neg[s_idx:e_idx] = tmp_score[0] out_score_pos[s_idx:e_idx] = tmp_score[1] if inference: return out_score_neg else: return out_score_neg, out_score_pos def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=torch.long) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_ocsoftmax.OCSoftmaxWithLoss() def compute(self, input_data, target): """loss = compute(input_data, target_data) Note: 1. input_data will be the output from Model.forward() input_data will be a tuple of [scores, target_vec] 2. we will not use target given by the system script we will use the target_vec in input_data[1] """ loss = self.m_loss(input_data[0], input_data[1]) return loss if __name__ == "__main__": print("Definition of model")
15,459
33.89842
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-lstmsum-oc/02/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import core_modules.oc_softmax as nii_ocsoftmax import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # Working sampling rate # torchaudio may be used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of waveform points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # LFCC dim (base component) self.lfcc_dim = [20] self.lfcc_with_delta = True # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating (not used) self.amp_floor = 0.00001 # number of frames to be kept for each trial # no truncation self.v_truncate_lens = [None for x in self.frame_hops] # number of sub-models (by default, a single model) self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output classes self.v_out_class = 1 #### # create network #### # 1st part of the classifier self.m_transform = [] # self.m_before_pooling = [] # 2nd part of the classifier self.m_output_act = [] # front-end self.m_frontend = [] # final part on training self.m_angle = [] # it can handle models with multiple front-end configuration # by default, only a single front-end for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip( self.v_truncate_lens, self.fft_n, self.lfcc_dim)): fft_n_bins = fft_n // 2 + 1 if self.lfcc_with_delta: lfcc_dim = lfcc_dim * 3 self.m_transform.append( torch_nn.Sequential( torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Dropout(0.7) ) ) self.m_before_pooling.append( torch_nn.Sequential( nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32), nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32) ) ) self.m_output_act.append( torch_nn.Linear((lfcc_dim // 16) * 32, self.v_emd_dim) ) self.m_angle.append( nii_ocsoftmax.OCAngleLayer(self.v_emd_dim) ) self.m_frontend.append( nii_front_end.LFCC(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfcc_dim[idx], with_energy=True) ) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_angle = torch_nn.ModuleList(self.m_angle) self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling) # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch trunc_len: number of frames to be kept after truncation datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] // self.v_submodels # buffer to store output scores from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \ enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_before_pooling, self.m_output_act)): # extract front-end feature x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. (batch, channel, frame//N, feat_dim//N) -> # (batch, frame//N, channel * feat_dim//N) # where N is caused by conv with stride hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous() frame_num = hidden_features.shape[1] hidden_features = hidden_features.view(batch_size, frame_num, -1) # 4. pass through LSTM then summing hidden_features_lstm = m_be_pool(hidden_features) # 5. pass through the output layer tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1)) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models batch_size = x.shape[0] // self.v_submodels # buffer to save the scores # for non-target classes out_score_neg = torch.zeros( [x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype) # for target classes out_score_pos = torch.zeros_like(out_score_neg) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): s_idx = idx * batch_size e_idx = idx * batch_size + batch_size tmp_score = m_score(x[s_idx:e_idx], inference) out_score_neg[s_idx:e_idx] = tmp_score[0] out_score_pos[s_idx:e_idx] = tmp_score[1] if inference: return out_score_neg else: return out_score_neg, out_score_pos def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=torch.long) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_ocsoftmax.OCSoftmaxWithLoss() def compute(self, input_data, target): """loss = compute(input_data, target_data) Note: 1. input_data will be the output from Model.forward() input_data will be a tuple of [scores, target_vec] 2. we will not use target given by the system script we will use the target_vec in input_data[1] """ loss = self.m_loss(input_data[0], input_data[1]) return loss if __name__ == "__main__": print("Definition of model")
15,459
33.89842
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-lstmsum-am/05/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import core_modules.am_softmax as nii_amsoftmax import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # Working sampling rate # torchaudio may be used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of waveform points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # LFB dim (base component) self.lfb_dim = [60] self.lfb_with_delta = False # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating (not used) self.amp_floor = 0.00001 # number of frames to be kept for each trial # no truncation self.v_truncate_lens = [None for x in self.frame_hops] # number of sub-models (by default, a single model) self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output classes self.v_out_class = 2 #### # create network #### # 1st part of the classifier self.m_transform = [] # self.m_before_pooling = [] # 2nd part of the classifier self.m_output_act = [] # front-end self.m_frontend = [] # final part on training self.m_angle = [] # it can handle models with multiple front-end configuration # by default, only a single front-end for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip( self.v_truncate_lens, self.fft_n, self.lfb_dim)): fft_n_bins = fft_n // 2 + 1 if self.lfb_with_delta: lfb_dim = lfb_dim * 3 self.m_transform.append( torch_nn.Sequential( torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Dropout(0.7) ) ) self.m_before_pooling.append( torch_nn.Sequential( nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32), nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32) ) ) self.m_output_act.append( torch_nn.Linear((lfb_dim // 16) * 32, self.v_emd_dim) ) self.m_angle.append( nii_amsoftmax.AMAngleLayer(self.v_emd_dim, self.v_out_class, s=10, m=0.35) ) self.m_frontend.append( nii_front_end.LFB(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfb_dim[idx], with_energy=False, with_emphasis=True, with_delta=self.lfb_with_delta) ) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_angle = torch_nn.ModuleList(self.m_angle) self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling) # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch trunc_len: number of frames to be kept after truncation datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] // self.v_submodels # buffer to store output scores from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \ enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_before_pooling, self.m_output_act)): # extract front-end feature x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. (batch, channel, frame//N, feat_dim//N) -> # (batch, frame//N, channel * feat_dim//N) # where N is caused by conv with stride hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous() frame_num = hidden_features.shape[1] hidden_features = hidden_features.view(batch_size, frame_num, -1) # 4. pass through LSTM then summing hidden_features_lstm = m_be_pool(hidden_features) # 5. pass through the output layer tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1)) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models batch_size = x.shape[0] // self.v_submodels # buffer to save the scores # for non-target classes out_score_neg = torch.zeros( [x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype) # for target classes out_score_pos = torch.zeros_like(out_score_neg) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): s_idx = idx * batch_size e_idx = idx * batch_size + batch_size tmp_score = m_score(x[s_idx:e_idx], inference) out_score_neg[s_idx:e_idx] = tmp_score[0] out_score_pos[s_idx:e_idx] = tmp_score[1] if inference: return torch_nn_func.softmax(out_score_neg, dim=1)[:, 1] else: return out_score_neg, out_score_pos def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=torch.long) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_amsoftmax.AMSoftmaxWithLoss() def compute(self, input_data, target): """loss = compute(input_data, target_data) Note: 1. input_data will be the output from Model.forward() input_data will be a tuple of [scores, target_vec] 2. we will not use target given by the system script we will use the target_vec in input_data[1] """ loss = self.m_loss(input_data[0], input_data[1]) return loss if __name__ == "__main__": print("Definition of model")
15,629
34.123596
90
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-lstmsum-am/04/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import core_modules.am_softmax as nii_amsoftmax import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # Working sampling rate # torchaudio may be used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of waveform points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # LFB dim (base component) self.lfb_dim = [60] self.lfb_with_delta = False # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating (not used) self.amp_floor = 0.00001 # number of frames to be kept for each trial # no truncation self.v_truncate_lens = [None for x in self.frame_hops] # number of sub-models (by default, a single model) self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output classes self.v_out_class = 2 #### # create network #### # 1st part of the classifier self.m_transform = [] # self.m_before_pooling = [] # 2nd part of the classifier self.m_output_act = [] # front-end self.m_frontend = [] # final part on training self.m_angle = [] # it can handle models with multiple front-end configuration # by default, only a single front-end for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip( self.v_truncate_lens, self.fft_n, self.lfb_dim)): fft_n_bins = fft_n // 2 + 1 if self.lfb_with_delta: lfb_dim = lfb_dim * 3 self.m_transform.append( torch_nn.Sequential( torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Dropout(0.7) ) ) self.m_before_pooling.append( torch_nn.Sequential( nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32), nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32) ) ) self.m_output_act.append( torch_nn.Linear((lfb_dim // 16) * 32, self.v_emd_dim) ) self.m_angle.append( nii_amsoftmax.AMAngleLayer(self.v_emd_dim, self.v_out_class, s=10, m=0.35) ) self.m_frontend.append( nii_front_end.LFB(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfb_dim[idx], with_energy=False, with_emphasis=True, with_delta=self.lfb_with_delta) ) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_angle = torch_nn.ModuleList(self.m_angle) self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling) # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch trunc_len: number of frames to be kept after truncation datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] // self.v_submodels # buffer to store output scores from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \ enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_before_pooling, self.m_output_act)): # extract front-end feature x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. (batch, channel, frame//N, feat_dim//N) -> # (batch, frame//N, channel * feat_dim//N) # where N is caused by conv with stride hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous() frame_num = hidden_features.shape[1] hidden_features = hidden_features.view(batch_size, frame_num, -1) # 4. pass through LSTM then summing hidden_features_lstm = m_be_pool(hidden_features) # 5. pass through the output layer tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1)) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models batch_size = x.shape[0] // self.v_submodels # buffer to save the scores # for non-target classes out_score_neg = torch.zeros( [x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype) # for target classes out_score_pos = torch.zeros_like(out_score_neg) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): s_idx = idx * batch_size e_idx = idx * batch_size + batch_size tmp_score = m_score(x[s_idx:e_idx], inference) out_score_neg[s_idx:e_idx] = tmp_score[0] out_score_pos[s_idx:e_idx] = tmp_score[1] if inference: return torch_nn_func.softmax(out_score_neg, dim=1)[:, 1] else: return out_score_neg, out_score_pos def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=torch.long) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_amsoftmax.AMSoftmaxWithLoss() def compute(self, input_data, target): """loss = compute(input_data, target_data) Note: 1. input_data will be the output from Model.forward() input_data will be a tuple of [scores, target_vec] 2. we will not use target given by the system script we will use the target_vec in input_data[1] """ loss = self.m_loss(input_data[0], input_data[1]) return loss if __name__ == "__main__": print("Definition of model")
15,629
34.123596
90
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-lstmsum-am/01/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import core_modules.am_softmax as nii_amsoftmax import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # Working sampling rate # torchaudio may be used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of waveform points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # LFB dim (base component) self.lfb_dim = [60] self.lfb_with_delta = False # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating (not used) self.amp_floor = 0.00001 # number of frames to be kept for each trial # no truncation self.v_truncate_lens = [None for x in self.frame_hops] # number of sub-models (by default, a single model) self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output classes self.v_out_class = 2 #### # create network #### # 1st part of the classifier self.m_transform = [] # self.m_before_pooling = [] # 2nd part of the classifier self.m_output_act = [] # front-end self.m_frontend = [] # final part on training self.m_angle = [] # it can handle models with multiple front-end configuration # by default, only a single front-end for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip( self.v_truncate_lens, self.fft_n, self.lfb_dim)): fft_n_bins = fft_n // 2 + 1 if self.lfb_with_delta: lfb_dim = lfb_dim * 3 self.m_transform.append( torch_nn.Sequential( torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Dropout(0.7) ) ) self.m_before_pooling.append( torch_nn.Sequential( nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32), nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32) ) ) self.m_output_act.append( torch_nn.Linear((lfb_dim // 16) * 32, self.v_emd_dim) ) self.m_angle.append( nii_amsoftmax.AMAngleLayer(self.v_emd_dim, self.v_out_class, s=10, m=0.35) ) self.m_frontend.append( nii_front_end.LFB(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfb_dim[idx], with_energy=False, with_emphasis=True, with_delta=self.lfb_with_delta) ) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_angle = torch_nn.ModuleList(self.m_angle) self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling) # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch trunc_len: number of frames to be kept after truncation datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] // self.v_submodels # buffer to store output scores from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \ enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_before_pooling, self.m_output_act)): # extract front-end feature x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. (batch, channel, frame//N, feat_dim//N) -> # (batch, frame//N, channel * feat_dim//N) # where N is caused by conv with stride hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous() frame_num = hidden_features.shape[1] hidden_features = hidden_features.view(batch_size, frame_num, -1) # 4. pass through LSTM then summing hidden_features_lstm = m_be_pool(hidden_features) # 5. pass through the output layer tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1)) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models batch_size = x.shape[0] // self.v_submodels # buffer to save the scores # for non-target classes out_score_neg = torch.zeros( [x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype) # for target classes out_score_pos = torch.zeros_like(out_score_neg) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): s_idx = idx * batch_size e_idx = idx * batch_size + batch_size tmp_score = m_score(x[s_idx:e_idx], inference) out_score_neg[s_idx:e_idx] = tmp_score[0] out_score_pos[s_idx:e_idx] = tmp_score[1] if inference: return torch_nn_func.softmax(out_score_neg, dim=1)[:, 1] else: return out_score_neg, out_score_pos def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=torch.long) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_amsoftmax.AMSoftmaxWithLoss() def compute(self, input_data, target): """loss = compute(input_data, target_data) Note: 1. input_data will be the output from Model.forward() input_data will be a tuple of [scores, target_vec] 2. we will not use target given by the system script we will use the target_vec in input_data[1] """ loss = self.m_loss(input_data[0], input_data[1]) return loss if __name__ == "__main__": print("Definition of model")
15,629
34.123596
90
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-lstmsum-am/06/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import core_modules.am_softmax as nii_amsoftmax import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # Working sampling rate # torchaudio may be used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of waveform points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # LFB dim (base component) self.lfb_dim = [60] self.lfb_with_delta = False # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating (not used) self.amp_floor = 0.00001 # number of frames to be kept for each trial # no truncation self.v_truncate_lens = [None for x in self.frame_hops] # number of sub-models (by default, a single model) self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output classes self.v_out_class = 2 #### # create network #### # 1st part of the classifier self.m_transform = [] # self.m_before_pooling = [] # 2nd part of the classifier self.m_output_act = [] # front-end self.m_frontend = [] # final part on training self.m_angle = [] # it can handle models with multiple front-end configuration # by default, only a single front-end for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip( self.v_truncate_lens, self.fft_n, self.lfb_dim)): fft_n_bins = fft_n // 2 + 1 if self.lfb_with_delta: lfb_dim = lfb_dim * 3 self.m_transform.append( torch_nn.Sequential( torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Dropout(0.7) ) ) self.m_before_pooling.append( torch_nn.Sequential( nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32), nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32) ) ) self.m_output_act.append( torch_nn.Linear((lfb_dim // 16) * 32, self.v_emd_dim) ) self.m_angle.append( nii_amsoftmax.AMAngleLayer(self.v_emd_dim, self.v_out_class, s=10, m=0.35) ) self.m_frontend.append( nii_front_end.LFB(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfb_dim[idx], with_energy=False, with_emphasis=True, with_delta=self.lfb_with_delta) ) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_angle = torch_nn.ModuleList(self.m_angle) self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling) # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch trunc_len: number of frames to be kept after truncation datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] // self.v_submodels # buffer to store output scores from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \ enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_before_pooling, self.m_output_act)): # extract front-end feature x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. (batch, channel, frame//N, feat_dim//N) -> # (batch, frame//N, channel * feat_dim//N) # where N is caused by conv with stride hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous() frame_num = hidden_features.shape[1] hidden_features = hidden_features.view(batch_size, frame_num, -1) # 4. pass through LSTM then summing hidden_features_lstm = m_be_pool(hidden_features) # 5. pass through the output layer tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1)) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models batch_size = x.shape[0] // self.v_submodels # buffer to save the scores # for non-target classes out_score_neg = torch.zeros( [x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype) # for target classes out_score_pos = torch.zeros_like(out_score_neg) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): s_idx = idx * batch_size e_idx = idx * batch_size + batch_size tmp_score = m_score(x[s_idx:e_idx], inference) out_score_neg[s_idx:e_idx] = tmp_score[0] out_score_pos[s_idx:e_idx] = tmp_score[1] if inference: return torch_nn_func.softmax(out_score_neg, dim=1)[:, 1] else: return out_score_neg, out_score_pos def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=torch.long) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_amsoftmax.AMSoftmaxWithLoss() def compute(self, input_data, target): """loss = compute(input_data, target_data) Note: 1. input_data will be the output from Model.forward() input_data will be a tuple of [scores, target_vec] 2. we will not use target given by the system script we will use the target_vec in input_data[1] """ loss = self.m_loss(input_data[0], input_data[1]) return loss if __name__ == "__main__": print("Definition of model")
15,629
34.123596
90
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-lstmsum-am/03/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import core_modules.am_softmax as nii_amsoftmax import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # Working sampling rate # torchaudio may be used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of waveform points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # LFB dim (base component) self.lfb_dim = [60] self.lfb_with_delta = False # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating (not used) self.amp_floor = 0.00001 # number of frames to be kept for each trial # no truncation self.v_truncate_lens = [None for x in self.frame_hops] # number of sub-models (by default, a single model) self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output classes self.v_out_class = 2 #### # create network #### # 1st part of the classifier self.m_transform = [] # self.m_before_pooling = [] # 2nd part of the classifier self.m_output_act = [] # front-end self.m_frontend = [] # final part on training self.m_angle = [] # it can handle models with multiple front-end configuration # by default, only a single front-end for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip( self.v_truncate_lens, self.fft_n, self.lfb_dim)): fft_n_bins = fft_n // 2 + 1 if self.lfb_with_delta: lfb_dim = lfb_dim * 3 self.m_transform.append( torch_nn.Sequential( torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Dropout(0.7) ) ) self.m_before_pooling.append( torch_nn.Sequential( nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32), nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32) ) ) self.m_output_act.append( torch_nn.Linear((lfb_dim // 16) * 32, self.v_emd_dim) ) self.m_angle.append( nii_amsoftmax.AMAngleLayer(self.v_emd_dim, self.v_out_class, s=10, m=0.35) ) self.m_frontend.append( nii_front_end.LFB(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfb_dim[idx], with_energy=False, with_emphasis=True, with_delta=self.lfb_with_delta) ) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_angle = torch_nn.ModuleList(self.m_angle) self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling) # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch trunc_len: number of frames to be kept after truncation datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] // self.v_submodels # buffer to store output scores from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \ enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_before_pooling, self.m_output_act)): # extract front-end feature x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. (batch, channel, frame//N, feat_dim//N) -> # (batch, frame//N, channel * feat_dim//N) # where N is caused by conv with stride hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous() frame_num = hidden_features.shape[1] hidden_features = hidden_features.view(batch_size, frame_num, -1) # 4. pass through LSTM then summing hidden_features_lstm = m_be_pool(hidden_features) # 5. pass through the output layer tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1)) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models batch_size = x.shape[0] // self.v_submodels # buffer to save the scores # for non-target classes out_score_neg = torch.zeros( [x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype) # for target classes out_score_pos = torch.zeros_like(out_score_neg) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): s_idx = idx * batch_size e_idx = idx * batch_size + batch_size tmp_score = m_score(x[s_idx:e_idx], inference) out_score_neg[s_idx:e_idx] = tmp_score[0] out_score_pos[s_idx:e_idx] = tmp_score[1] if inference: return torch_nn_func.softmax(out_score_neg, dim=1)[:, 1] else: return out_score_neg, out_score_pos def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=torch.long) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_amsoftmax.AMSoftmaxWithLoss() def compute(self, input_data, target): """loss = compute(input_data, target_data) Note: 1. input_data will be the output from Model.forward() input_data will be a tuple of [scores, target_vec] 2. we will not use target given by the system script we will use the target_vec in input_data[1] """ loss = self.m_loss(input_data[0], input_data[1]) return loss if __name__ == "__main__": print("Definition of model")
15,629
34.123596
90
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-lstmsum-am/02/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import core_modules.am_softmax as nii_amsoftmax import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # Working sampling rate # torchaudio may be used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of waveform points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # LFB dim (base component) self.lfb_dim = [60] self.lfb_with_delta = False # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating (not used) self.amp_floor = 0.00001 # number of frames to be kept for each trial # no truncation self.v_truncate_lens = [None for x in self.frame_hops] # number of sub-models (by default, a single model) self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output classes self.v_out_class = 2 #### # create network #### # 1st part of the classifier self.m_transform = [] # self.m_before_pooling = [] # 2nd part of the classifier self.m_output_act = [] # front-end self.m_frontend = [] # final part on training self.m_angle = [] # it can handle models with multiple front-end configuration # by default, only a single front-end for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip( self.v_truncate_lens, self.fft_n, self.lfb_dim)): fft_n_bins = fft_n // 2 + 1 if self.lfb_with_delta: lfb_dim = lfb_dim * 3 self.m_transform.append( torch_nn.Sequential( torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Dropout(0.7) ) ) self.m_before_pooling.append( torch_nn.Sequential( nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32), nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32) ) ) self.m_output_act.append( torch_nn.Linear((lfb_dim // 16) * 32, self.v_emd_dim) ) self.m_angle.append( nii_amsoftmax.AMAngleLayer(self.v_emd_dim, self.v_out_class, s=10, m=0.35) ) self.m_frontend.append( nii_front_end.LFB(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfb_dim[idx], with_energy=False, with_emphasis=True, with_delta=self.lfb_with_delta) ) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_angle = torch_nn.ModuleList(self.m_angle) self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling) # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch trunc_len: number of frames to be kept after truncation datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] // self.v_submodels # buffer to store output scores from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \ enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_before_pooling, self.m_output_act)): # extract front-end feature x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. (batch, channel, frame//N, feat_dim//N) -> # (batch, frame//N, channel * feat_dim//N) # where N is caused by conv with stride hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous() frame_num = hidden_features.shape[1] hidden_features = hidden_features.view(batch_size, frame_num, -1) # 4. pass through LSTM then summing hidden_features_lstm = m_be_pool(hidden_features) # 5. pass through the output layer tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1)) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models batch_size = x.shape[0] // self.v_submodels # buffer to save the scores # for non-target classes out_score_neg = torch.zeros( [x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype) # for target classes out_score_pos = torch.zeros_like(out_score_neg) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): s_idx = idx * batch_size e_idx = idx * batch_size + batch_size tmp_score = m_score(x[s_idx:e_idx], inference) out_score_neg[s_idx:e_idx] = tmp_score[0] out_score_pos[s_idx:e_idx] = tmp_score[1] if inference: return torch_nn_func.softmax(out_score_neg, dim=1)[:, 1] else: return out_score_neg, out_score_pos def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=torch.long) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_amsoftmax.AMSoftmaxWithLoss() def compute(self, input_data, target): """loss = compute(input_data, target_data) Note: 1. input_data will be the output from Model.forward() input_data will be a tuple of [scores, target_vec] 2. we will not use target given by the system script we will use the target_vec in input_data[1] """ loss = self.m_loss(input_data[0], input_data[1]) return loss if __name__ == "__main__": print("Definition of model")
15,629
34.123596
90
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-attention-sig/05/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class TrainableLinearFb(nii_nn.LinearInitialized): """Linear layer initialized with linear filter bank """ def __init__(self, fn, sr, filter_num): super(TrainableLinearFb, self).__init__( nii_front_end.linear_fb(fn, sr, filter_num)) return def forward(self, x): return torch.log10( torch.pow(super(TrainableLinearFb, self).forward(x), 2) + torch.finfo(torch.float32).eps) class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # Working sampling rate # torchaudio may be used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of waveform points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # spectrogram dim (base component) self.spec_with_delta = False self.spec_fb_dim = 60 # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating (not used) self.amp_floor = 0.00001 # number of frames to be kept for each trial # no truncation self.v_truncate_lens = [None for x in self.frame_hops] # number of sub-models (by default, a single model) self.v_submodels = len(self.frame_lens) # dimension of embedding vectors # here, the embedding is just the activation before sigmoid() self.v_emd_dim = 1 #### # create network #### # 1st part of the classifier self.m_transform = [] # pooling layer self.m_pooling = [] # 2nd part of the classifier self.m_output_act = [] # front-end self.m_frontend = [] # it can handle models with multiple front-end configuration # by default, only a single front-end for idx, (trunc_len, fft_n) in enumerate(zip( self.v_truncate_lens, self.fft_n)): fft_n_bins = fft_n // 2 + 1 self.m_transform.append( torch_nn.Sequential( TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim), torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Dropout(0.7) ) ) self.m_pooling.append( nii_nn.SelfWeightedPooling((self.spec_fb_dim // 16) * 32) ) self.m_output_act.append( torch_nn.Linear((self.spec_fb_dim//16) * 32 * 2, self.v_emd_dim) ) self.m_frontend.append( nii_front_end.Spectrogram(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr) ) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_pooling = torch_nn.ModuleList(self.m_pooling) # output # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch trunc_len: number of frames to be kept after truncation datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] # buffer to store output scores from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \ enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_pooling, self.m_output_act)): # extract front-end feature x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. (batch, channel, frame//N, feat_dim//N) -> # (batch, frame//N, channel * feat_dim//N) # where N is caused by conv with stride hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous() frame_num = hidden_features.shape[1] hidden_features = hidden_features.view(batch_size, frame_num, -1) # 4. pooling hidden_features = m_pool(hidden_features) # 5. pass through the output layer tmp_score = m_output(hidden_features) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score return output_emb def _compute_score(self, feature_vec, inference=False): """ """ # feature_vec is [batch * submodel, 1] if inference: return feature_vec.squeeze(1) else: return torch.sigmoid(feature_vec).squeeze(1) def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): #with torch.no_grad(): # vad_waveform = self.m_vad(x.squeeze(-1)) # vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1])) # if vad_waveform.shape[-1] > 0: # x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1) # else: # pass filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=scores.dtype) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = torch_nn.BCELoss() def compute(self, outputs, target): """ """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
14,697
33.421546
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-attention-sig/04/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class TrainableLinearFb(nii_nn.LinearInitialized): """Linear layer initialized with linear filter bank """ def __init__(self, fn, sr, filter_num): super(TrainableLinearFb, self).__init__( nii_front_end.linear_fb(fn, sr, filter_num)) return def forward(self, x): return torch.log10( torch.pow(super(TrainableLinearFb, self).forward(x), 2) + torch.finfo(torch.float32).eps) class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # Working sampling rate # torchaudio may be used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of waveform points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # spectrogram dim (base component) self.spec_with_delta = False self.spec_fb_dim = 60 # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating (not used) self.amp_floor = 0.00001 # number of frames to be kept for each trial # no truncation self.v_truncate_lens = [None for x in self.frame_hops] # number of sub-models (by default, a single model) self.v_submodels = len(self.frame_lens) # dimension of embedding vectors # here, the embedding is just the activation before sigmoid() self.v_emd_dim = 1 #### # create network #### # 1st part of the classifier self.m_transform = [] # pooling layer self.m_pooling = [] # 2nd part of the classifier self.m_output_act = [] # front-end self.m_frontend = [] # it can handle models with multiple front-end configuration # by default, only a single front-end for idx, (trunc_len, fft_n) in enumerate(zip( self.v_truncate_lens, self.fft_n)): fft_n_bins = fft_n // 2 + 1 self.m_transform.append( torch_nn.Sequential( TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim), torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Dropout(0.7) ) ) self.m_pooling.append( nii_nn.SelfWeightedPooling((self.spec_fb_dim // 16) * 32) ) self.m_output_act.append( torch_nn.Linear((self.spec_fb_dim//16) * 32 * 2, self.v_emd_dim) ) self.m_frontend.append( nii_front_end.Spectrogram(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr) ) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_pooling = torch_nn.ModuleList(self.m_pooling) # output # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch trunc_len: number of frames to be kept after truncation datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] # buffer to store output scores from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \ enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_pooling, self.m_output_act)): # extract front-end feature x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. (batch, channel, frame//N, feat_dim//N) -> # (batch, frame//N, channel * feat_dim//N) # where N is caused by conv with stride hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous() frame_num = hidden_features.shape[1] hidden_features = hidden_features.view(batch_size, frame_num, -1) # 4. pooling hidden_features = m_pool(hidden_features) # 5. pass through the output layer tmp_score = m_output(hidden_features) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score return output_emb def _compute_score(self, feature_vec, inference=False): """ """ # feature_vec is [batch * submodel, 1] if inference: return feature_vec.squeeze(1) else: return torch.sigmoid(feature_vec).squeeze(1) def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): #with torch.no_grad(): # vad_waveform = self.m_vad(x.squeeze(-1)) # vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1])) # if vad_waveform.shape[-1] > 0: # x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1) # else: # pass filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=scores.dtype) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = torch_nn.BCELoss() def compute(self, outputs, target): """ """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
14,697
33.421546
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-attention-sig/01/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class TrainableLinearFb(nii_nn.LinearInitialized): """Linear layer initialized with linear filter bank """ def __init__(self, fn, sr, filter_num): super(TrainableLinearFb, self).__init__( nii_front_end.linear_fb(fn, sr, filter_num)) return def forward(self, x): return torch.log10( torch.pow(super(TrainableLinearFb, self).forward(x), 2) + torch.finfo(torch.float32).eps) class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # Working sampling rate # torchaudio may be used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of waveform points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # spectrogram dim (base component) self.spec_with_delta = False self.spec_fb_dim = 60 # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating (not used) self.amp_floor = 0.00001 # number of frames to be kept for each trial # no truncation self.v_truncate_lens = [None for x in self.frame_hops] # number of sub-models (by default, a single model) self.v_submodels = len(self.frame_lens) # dimension of embedding vectors # here, the embedding is just the activation before sigmoid() self.v_emd_dim = 1 #### # create network #### # 1st part of the classifier self.m_transform = [] # pooling layer self.m_pooling = [] # 2nd part of the classifier self.m_output_act = [] # front-end self.m_frontend = [] # it can handle models with multiple front-end configuration # by default, only a single front-end for idx, (trunc_len, fft_n) in enumerate(zip( self.v_truncate_lens, self.fft_n)): fft_n_bins = fft_n // 2 + 1 self.m_transform.append( torch_nn.Sequential( TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim), torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Dropout(0.7) ) ) self.m_pooling.append( nii_nn.SelfWeightedPooling((self.spec_fb_dim // 16) * 32) ) self.m_output_act.append( torch_nn.Linear((self.spec_fb_dim//16) * 32 * 2, self.v_emd_dim) ) self.m_frontend.append( nii_front_end.Spectrogram(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr) ) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_pooling = torch_nn.ModuleList(self.m_pooling) # output # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch trunc_len: number of frames to be kept after truncation datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] # buffer to store output scores from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \ enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_pooling, self.m_output_act)): # extract front-end feature x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. (batch, channel, frame//N, feat_dim//N) -> # (batch, frame//N, channel * feat_dim//N) # where N is caused by conv with stride hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous() frame_num = hidden_features.shape[1] hidden_features = hidden_features.view(batch_size, frame_num, -1) # 4. pooling hidden_features = m_pool(hidden_features) # 5. pass through the output layer tmp_score = m_output(hidden_features) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score return output_emb def _compute_score(self, feature_vec, inference=False): """ """ # feature_vec is [batch * submodel, 1] if inference: return feature_vec.squeeze(1) else: return torch.sigmoid(feature_vec).squeeze(1) def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): #with torch.no_grad(): # vad_waveform = self.m_vad(x.squeeze(-1)) # vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1])) # if vad_waveform.shape[-1] > 0: # x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1) # else: # pass filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=scores.dtype) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = torch_nn.BCELoss() def compute(self, outputs, target): """ """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
14,697
33.421546
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-attention-sig/06/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class TrainableLinearFb(nii_nn.LinearInitialized): """Linear layer initialized with linear filter bank """ def __init__(self, fn, sr, filter_num): super(TrainableLinearFb, self).__init__( nii_front_end.linear_fb(fn, sr, filter_num)) return def forward(self, x): return torch.log10( torch.pow(super(TrainableLinearFb, self).forward(x), 2) + torch.finfo(torch.float32).eps) class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # Working sampling rate # torchaudio may be used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of waveform points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # spectrogram dim (base component) self.spec_with_delta = False self.spec_fb_dim = 60 # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating (not used) self.amp_floor = 0.00001 # number of frames to be kept for each trial # no truncation self.v_truncate_lens = [None for x in self.frame_hops] # number of sub-models (by default, a single model) self.v_submodels = len(self.frame_lens) # dimension of embedding vectors # here, the embedding is just the activation before sigmoid() self.v_emd_dim = 1 #### # create network #### # 1st part of the classifier self.m_transform = [] # pooling layer self.m_pooling = [] # 2nd part of the classifier self.m_output_act = [] # front-end self.m_frontend = [] # it can handle models with multiple front-end configuration # by default, only a single front-end for idx, (trunc_len, fft_n) in enumerate(zip( self.v_truncate_lens, self.fft_n)): fft_n_bins = fft_n // 2 + 1 self.m_transform.append( torch_nn.Sequential( TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim), torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Dropout(0.7) ) ) self.m_pooling.append( nii_nn.SelfWeightedPooling((self.spec_fb_dim // 16) * 32) ) self.m_output_act.append( torch_nn.Linear((self.spec_fb_dim//16) * 32 * 2, self.v_emd_dim) ) self.m_frontend.append( nii_front_end.Spectrogram(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr) ) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_pooling = torch_nn.ModuleList(self.m_pooling) # output # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch trunc_len: number of frames to be kept after truncation datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] # buffer to store output scores from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \ enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_pooling, self.m_output_act)): # extract front-end feature x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. (batch, channel, frame//N, feat_dim//N) -> # (batch, frame//N, channel * feat_dim//N) # where N is caused by conv with stride hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous() frame_num = hidden_features.shape[1] hidden_features = hidden_features.view(batch_size, frame_num, -1) # 4. pooling hidden_features = m_pool(hidden_features) # 5. pass through the output layer tmp_score = m_output(hidden_features) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score return output_emb def _compute_score(self, feature_vec, inference=False): """ """ # feature_vec is [batch * submodel, 1] if inference: return feature_vec.squeeze(1) else: return torch.sigmoid(feature_vec).squeeze(1) def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): #with torch.no_grad(): # vad_waveform = self.m_vad(x.squeeze(-1)) # vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1])) # if vad_waveform.shape[-1] > 0: # x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1) # else: # pass filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=scores.dtype) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = torch_nn.BCELoss() def compute(self, outputs, target): """ """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
14,697
33.421546
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-attention-sig/03/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class TrainableLinearFb(nii_nn.LinearInitialized): """Linear layer initialized with linear filter bank """ def __init__(self, fn, sr, filter_num): super(TrainableLinearFb, self).__init__( nii_front_end.linear_fb(fn, sr, filter_num)) return def forward(self, x): return torch.log10( torch.pow(super(TrainableLinearFb, self).forward(x), 2) + torch.finfo(torch.float32).eps) class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # Working sampling rate # torchaudio may be used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of waveform points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # spectrogram dim (base component) self.spec_with_delta = False self.spec_fb_dim = 60 # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating (not used) self.amp_floor = 0.00001 # number of frames to be kept for each trial # no truncation self.v_truncate_lens = [None for x in self.frame_hops] # number of sub-models (by default, a single model) self.v_submodels = len(self.frame_lens) # dimension of embedding vectors # here, the embedding is just the activation before sigmoid() self.v_emd_dim = 1 #### # create network #### # 1st part of the classifier self.m_transform = [] # pooling layer self.m_pooling = [] # 2nd part of the classifier self.m_output_act = [] # front-end self.m_frontend = [] # it can handle models with multiple front-end configuration # by default, only a single front-end for idx, (trunc_len, fft_n) in enumerate(zip( self.v_truncate_lens, self.fft_n)): fft_n_bins = fft_n // 2 + 1 self.m_transform.append( torch_nn.Sequential( TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim), torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Dropout(0.7) ) ) self.m_pooling.append( nii_nn.SelfWeightedPooling((self.spec_fb_dim // 16) * 32) ) self.m_output_act.append( torch_nn.Linear((self.spec_fb_dim//16) * 32 * 2, self.v_emd_dim) ) self.m_frontend.append( nii_front_end.Spectrogram(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr) ) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_pooling = torch_nn.ModuleList(self.m_pooling) # output # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch trunc_len: number of frames to be kept after truncation datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] # buffer to store output scores from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \ enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_pooling, self.m_output_act)): # extract front-end feature x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. (batch, channel, frame//N, feat_dim//N) -> # (batch, frame//N, channel * feat_dim//N) # where N is caused by conv with stride hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous() frame_num = hidden_features.shape[1] hidden_features = hidden_features.view(batch_size, frame_num, -1) # 4. pooling hidden_features = m_pool(hidden_features) # 5. pass through the output layer tmp_score = m_output(hidden_features) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score return output_emb def _compute_score(self, feature_vec, inference=False): """ """ # feature_vec is [batch * submodel, 1] if inference: return feature_vec.squeeze(1) else: return torch.sigmoid(feature_vec).squeeze(1) def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): #with torch.no_grad(): # vad_waveform = self.m_vad(x.squeeze(-1)) # vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1])) # if vad_waveform.shape[-1] > 0: # x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1) # else: # pass filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=scores.dtype) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = torch_nn.BCELoss() def compute(self, outputs, target): """ """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
14,697
33.421546
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-attention-sig/02/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_scripts.data_io.seq_info as nii_seq_tk import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class TrainableLinearFb(nii_nn.LinearInitialized): """Linear layer initialized with linear filter bank """ def __init__(self, fn, sr, filter_num): super(TrainableLinearFb, self).__init__( nii_front_end.linear_fb(fn, sr, filter_num)) return def forward(self, x): return torch.log10( torch.pow(super(TrainableLinearFb, self).forward(x), 2) + torch.finfo(torch.float32).eps) class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # Working sampling rate # torchaudio may be used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of waveform points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] # spectrogram dim (base component) self.spec_with_delta = False self.spec_fb_dim = 60 # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating (not used) self.amp_floor = 0.00001 # number of frames to be kept for each trial # no truncation self.v_truncate_lens = [None for x in self.frame_hops] # number of sub-models (by default, a single model) self.v_submodels = len(self.frame_lens) # dimension of embedding vectors # here, the embedding is just the activation before sigmoid() self.v_emd_dim = 1 #### # create network #### # 1st part of the classifier self.m_transform = [] # pooling layer self.m_pooling = [] # 2nd part of the classifier self.m_output_act = [] # front-end self.m_frontend = [] # it can handle models with multiple front-end configuration # by default, only a single front-end for idx, (trunc_len, fft_n) in enumerate(zip( self.v_truncate_lens, self.fft_n)): fft_n_bins = fft_n // 2 + 1 self.m_transform.append( torch_nn.Sequential( TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim), torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Dropout(0.7) ) ) self.m_pooling.append( nii_nn.SelfWeightedPooling((self.spec_fb_dim // 16) * 32) ) self.m_output_act.append( torch_nn.Linear((self.spec_fb_dim//16) * 32 * 2, self.v_emd_dim) ) self.m_frontend.append( nii_front_end.Spectrogram(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr) ) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_pooling = torch_nn.ModuleList(self.m_pooling) # output # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ prepare mean and std for data processing This is required for the Pytorch project, but not relevant to this code """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.ones([in_dim]) out_m = torch.zeros([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data This is required for the Pytorch project, but not relevant to this code """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data This is required for the Pytorch project, but not relevant to this code """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network This is required for the Pytorch project, but not relevant to this code """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features input: ------ wav: waveform idx: idx of the trial in mini-batch trunc_len: number of frames to be kept after truncation datalength: list of data length in mini-batch output: ------- x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim) """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] # buffer to store output scores from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \ enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_pooling, self.m_output_act)): # extract front-end feature x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. (batch, channel, frame//N, feat_dim//N) -> # (batch, frame//N, channel * feat_dim//N) # where N is caused by conv with stride hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous() frame_num = hidden_features.shape[1] hidden_features = hidden_features.view(batch_size, frame_num, -1) # 4. pooling hidden_features = m_pool(hidden_features) # 5. pass through the output layer tmp_score = m_output(hidden_features) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score return output_emb def _compute_score(self, feature_vec, inference=False): """ """ # feature_vec is [batch * submodel, 1] if inference: return feature_vec.squeeze(1) else: return torch.sigmoid(feature_vec).squeeze(1) def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): #with torch.no_grad(): # vad_waveform = self.m_vad(x.squeeze(-1)) # vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1])) # if vad_waveform.shape[-1] > 0: # x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1) # else: # pass filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=scores.dtype) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = torch_nn.BCELoss() def compute(self, outputs, target): """ """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
14,697
33.421546
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-fixed-p2s/05/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_modules.p2sgrad as nii_p2sgrad import core_scripts.data_io.seq_info as nii_seq_tk import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class TrainableLinearFb(nii_nn.LinearInitialized): """Linear layer initialized with linear filter bank """ def __init__(self, fn, sr, filter_num): super(TrainableLinearFb, self).__init__( nii_front_end.linear_fb(fn, sr, filter_num)) return def forward(self, x): return torch.log10( torch.pow(super(TrainableLinearFb, self).forward(x), 2) + torch.finfo(torch.float32).eps) class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training # target data protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # working sampling rate, torchaudio is used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] self.spec_with_delta = False self.spec_fb_dim = 60 # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating self.amp_floor = 0.00001 # manual choose the first 600 frames in the data self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops] # number of sub-models self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output class self.v_out_class = 2 self.m_transform = [] self.m_output_act = [] self.m_frontend = [] self.m_angle = [] for idx, (trunc_len, fft_n) in enumerate(zip( self.v_truncate_lens, self.fft_n)): fft_n_bins = fft_n // 2 + 1 self.m_transform.append( torch_nn.Sequential( TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim), torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]) ) ) self.m_output_act.append( torch_nn.Sequential( torch_nn.Dropout(0.7), torch_nn.Linear((trunc_len // 16) * (self.spec_fb_dim // 16) * 32, 160), nii_nn.MaxFeatureMap2D(), torch_nn.Linear(80, self.v_emd_dim) ) ) self.m_frontend.append( nii_front_end.Spectrogram(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr) ) self.m_angle.append( nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class) ) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_angle = torch_nn.ModuleList(self.m_angle) # output # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.zeros([in_dim]) out_m = torch.ones([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features fs: frame shift fl: frame length fn: fft points trunc_len: number of frames per file (by truncating) datalength: original length of data """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # permute to (batch, fft_bin, frame_length) x_sp_amp = x_sp_amp.permute(0, 2, 1) # make sure the buffer is long enough x_sp_amp_buff = torch.zeros( [x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len], dtype=x_sp_amp.dtype, device=x_sp_amp.device) # for batch of data, handle the padding and trim independently fs = self.frame_hops[idx] for fileidx in range(x_sp_amp.shape[0]): # roughtly this is the number of frames true_frame_num = datalength[fileidx] // fs if true_frame_num > trunc_len: # trim randomly pos = torch.rand([1]) * (true_frame_num-trunc_len) pos = torch.floor(pos[0]).long() tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos] x_sp_amp_buff[fileidx] = tmp else: rep = int(np.ceil(trunc_len / true_frame_num)) tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep) x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len] # permute to (batch, frame_length, fft_bin) x_sp_amp = x_sp_amp_buff.permute(0, 2, 1) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] // self.v_submodels # buffer to store output embedding from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_output_act)): # extract feature (stft spectrogram) x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. flatten and transform through output function tmp_emb = m_output(torch.flatten(hidden_features, 1)) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models batch_size = x.shape[0] # compute score through p2sgrad layer out_score = torch.zeros([batch_size * self.v_submodels, self.v_out_class], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size]) out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score if inference: # output_score [:, 1] corresponds to the positive class return out_score[:, 1] else: return out_score def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): #with torch.no_grad(): # vad_waveform = self.m_vad(x.squeeze(-1)) # vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1])) # if vad_waveform.shape[-1] > 0: # x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1) # else: # pass filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=scores.dtype) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_p2sgrad.P2SGradLoss() def compute(self, outputs, target): """loss = compute(input_data, target_data) Note: 1. input_data will be the output from Model.forward() input_data will be a tuple of [scores, target_vec] 2. we will not use target given by the system script we will use the target_vec in input_data[1] """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
15,800
34.587838
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-fixed-p2s/04/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_modules.p2sgrad as nii_p2sgrad import core_scripts.data_io.seq_info as nii_seq_tk import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class TrainableLinearFb(nii_nn.LinearInitialized): """Linear layer initialized with linear filter bank """ def __init__(self, fn, sr, filter_num): super(TrainableLinearFb, self).__init__( nii_front_end.linear_fb(fn, sr, filter_num)) return def forward(self, x): return torch.log10( torch.pow(super(TrainableLinearFb, self).forward(x), 2) + torch.finfo(torch.float32).eps) class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training # target data protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # working sampling rate, torchaudio is used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] self.spec_with_delta = False self.spec_fb_dim = 60 # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating self.amp_floor = 0.00001 # manual choose the first 600 frames in the data self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops] # number of sub-models self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output class self.v_out_class = 2 self.m_transform = [] self.m_output_act = [] self.m_frontend = [] self.m_angle = [] for idx, (trunc_len, fft_n) in enumerate(zip( self.v_truncate_lens, self.fft_n)): fft_n_bins = fft_n // 2 + 1 self.m_transform.append( torch_nn.Sequential( TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim), torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]) ) ) self.m_output_act.append( torch_nn.Sequential( torch_nn.Dropout(0.7), torch_nn.Linear((trunc_len // 16) * (self.spec_fb_dim // 16) * 32, 160), nii_nn.MaxFeatureMap2D(), torch_nn.Linear(80, self.v_emd_dim) ) ) self.m_frontend.append( nii_front_end.Spectrogram(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr) ) self.m_angle.append( nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class) ) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_angle = torch_nn.ModuleList(self.m_angle) # output # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.zeros([in_dim]) out_m = torch.ones([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features fs: frame shift fl: frame length fn: fft points trunc_len: number of frames per file (by truncating) datalength: original length of data """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # permute to (batch, fft_bin, frame_length) x_sp_amp = x_sp_amp.permute(0, 2, 1) # make sure the buffer is long enough x_sp_amp_buff = torch.zeros( [x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len], dtype=x_sp_amp.dtype, device=x_sp_amp.device) # for batch of data, handle the padding and trim independently fs = self.frame_hops[idx] for fileidx in range(x_sp_amp.shape[0]): # roughtly this is the number of frames true_frame_num = datalength[fileidx] // fs if true_frame_num > trunc_len: # trim randomly pos = torch.rand([1]) * (true_frame_num-trunc_len) pos = torch.floor(pos[0]).long() tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos] x_sp_amp_buff[fileidx] = tmp else: rep = int(np.ceil(trunc_len / true_frame_num)) tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep) x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len] # permute to (batch, frame_length, fft_bin) x_sp_amp = x_sp_amp_buff.permute(0, 2, 1) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] // self.v_submodels # buffer to store output embedding from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_output_act)): # extract feature (stft spectrogram) x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. flatten and transform through output function tmp_emb = m_output(torch.flatten(hidden_features, 1)) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models batch_size = x.shape[0] # compute score through p2sgrad layer out_score = torch.zeros([batch_size * self.v_submodels, self.v_out_class], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size]) out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score if inference: # output_score [:, 1] corresponds to the positive class return out_score[:, 1] else: return out_score def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): #with torch.no_grad(): # vad_waveform = self.m_vad(x.squeeze(-1)) # vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1])) # if vad_waveform.shape[-1] > 0: # x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1) # else: # pass filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=scores.dtype) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_p2sgrad.P2SGradLoss() def compute(self, outputs, target): """loss = compute(input_data, target_data) Note: 1. input_data will be the output from Model.forward() input_data will be a tuple of [scores, target_vec] 2. we will not use target given by the system script we will use the target_vec in input_data[1] """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
15,800
34.587838
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-fixed-p2s/01/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_modules.p2sgrad as nii_p2sgrad import core_scripts.data_io.seq_info as nii_seq_tk import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class TrainableLinearFb(nii_nn.LinearInitialized): """Linear layer initialized with linear filter bank """ def __init__(self, fn, sr, filter_num): super(TrainableLinearFb, self).__init__( nii_front_end.linear_fb(fn, sr, filter_num)) return def forward(self, x): return torch.log10( torch.pow(super(TrainableLinearFb, self).forward(x), 2) + torch.finfo(torch.float32).eps) class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training # target data protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # working sampling rate, torchaudio is used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] self.spec_with_delta = False self.spec_fb_dim = 60 # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating self.amp_floor = 0.00001 # manual choose the first 600 frames in the data self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops] # number of sub-models self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output class self.v_out_class = 2 self.m_transform = [] self.m_output_act = [] self.m_frontend = [] self.m_angle = [] for idx, (trunc_len, fft_n) in enumerate(zip( self.v_truncate_lens, self.fft_n)): fft_n_bins = fft_n // 2 + 1 self.m_transform.append( torch_nn.Sequential( TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim), torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]) ) ) self.m_output_act.append( torch_nn.Sequential( torch_nn.Dropout(0.7), torch_nn.Linear((trunc_len // 16) * (self.spec_fb_dim // 16) * 32, 160), nii_nn.MaxFeatureMap2D(), torch_nn.Linear(80, self.v_emd_dim) ) ) self.m_frontend.append( nii_front_end.Spectrogram(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr) ) self.m_angle.append( nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class) ) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_angle = torch_nn.ModuleList(self.m_angle) # output # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.zeros([in_dim]) out_m = torch.ones([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features fs: frame shift fl: frame length fn: fft points trunc_len: number of frames per file (by truncating) datalength: original length of data """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # permute to (batch, fft_bin, frame_length) x_sp_amp = x_sp_amp.permute(0, 2, 1) # make sure the buffer is long enough x_sp_amp_buff = torch.zeros( [x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len], dtype=x_sp_amp.dtype, device=x_sp_amp.device) # for batch of data, handle the padding and trim independently fs = self.frame_hops[idx] for fileidx in range(x_sp_amp.shape[0]): # roughtly this is the number of frames true_frame_num = datalength[fileidx] // fs if true_frame_num > trunc_len: # trim randomly pos = torch.rand([1]) * (true_frame_num-trunc_len) pos = torch.floor(pos[0]).long() tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos] x_sp_amp_buff[fileidx] = tmp else: rep = int(np.ceil(trunc_len / true_frame_num)) tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep) x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len] # permute to (batch, frame_length, fft_bin) x_sp_amp = x_sp_amp_buff.permute(0, 2, 1) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] // self.v_submodels # buffer to store output embedding from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_output_act)): # extract feature (stft spectrogram) x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. flatten and transform through output function tmp_emb = m_output(torch.flatten(hidden_features, 1)) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models batch_size = x.shape[0] # compute score through p2sgrad layer out_score = torch.zeros([batch_size * self.v_submodels, self.v_out_class], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size]) out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score if inference: # output_score [:, 1] corresponds to the positive class return out_score[:, 1] else: return out_score def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): #with torch.no_grad(): # vad_waveform = self.m_vad(x.squeeze(-1)) # vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1])) # if vad_waveform.shape[-1] > 0: # x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1) # else: # pass filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=scores.dtype) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_p2sgrad.P2SGradLoss() def compute(self, outputs, target): """loss = compute(input_data, target_data) Note: 1. input_data will be the output from Model.forward() input_data will be a tuple of [scores, target_vec] 2. we will not use target given by the system script we will use the target_vec in input_data[1] """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
15,800
34.587838
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-fixed-p2s/06/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_modules.p2sgrad as nii_p2sgrad import core_scripts.data_io.seq_info as nii_seq_tk import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class TrainableLinearFb(nii_nn.LinearInitialized): """Linear layer initialized with linear filter bank """ def __init__(self, fn, sr, filter_num): super(TrainableLinearFb, self).__init__( nii_front_end.linear_fb(fn, sr, filter_num)) return def forward(self, x): return torch.log10( torch.pow(super(TrainableLinearFb, self).forward(x), 2) + torch.finfo(torch.float32).eps) class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training # target data protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # working sampling rate, torchaudio is used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] self.spec_with_delta = False self.spec_fb_dim = 60 # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating self.amp_floor = 0.00001 # manual choose the first 600 frames in the data self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops] # number of sub-models self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output class self.v_out_class = 2 self.m_transform = [] self.m_output_act = [] self.m_frontend = [] self.m_angle = [] for idx, (trunc_len, fft_n) in enumerate(zip( self.v_truncate_lens, self.fft_n)): fft_n_bins = fft_n // 2 + 1 self.m_transform.append( torch_nn.Sequential( TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim), torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]) ) ) self.m_output_act.append( torch_nn.Sequential( torch_nn.Dropout(0.7), torch_nn.Linear((trunc_len // 16) * (self.spec_fb_dim // 16) * 32, 160), nii_nn.MaxFeatureMap2D(), torch_nn.Linear(80, self.v_emd_dim) ) ) self.m_frontend.append( nii_front_end.Spectrogram(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr) ) self.m_angle.append( nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class) ) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_angle = torch_nn.ModuleList(self.m_angle) # output # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.zeros([in_dim]) out_m = torch.ones([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features fs: frame shift fl: frame length fn: fft points trunc_len: number of frames per file (by truncating) datalength: original length of data """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # permute to (batch, fft_bin, frame_length) x_sp_amp = x_sp_amp.permute(0, 2, 1) # make sure the buffer is long enough x_sp_amp_buff = torch.zeros( [x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len], dtype=x_sp_amp.dtype, device=x_sp_amp.device) # for batch of data, handle the padding and trim independently fs = self.frame_hops[idx] for fileidx in range(x_sp_amp.shape[0]): # roughtly this is the number of frames true_frame_num = datalength[fileidx] // fs if true_frame_num > trunc_len: # trim randomly pos = torch.rand([1]) * (true_frame_num-trunc_len) pos = torch.floor(pos[0]).long() tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos] x_sp_amp_buff[fileidx] = tmp else: rep = int(np.ceil(trunc_len / true_frame_num)) tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep) x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len] # permute to (batch, frame_length, fft_bin) x_sp_amp = x_sp_amp_buff.permute(0, 2, 1) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] // self.v_submodels # buffer to store output embedding from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_output_act)): # extract feature (stft spectrogram) x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. flatten and transform through output function tmp_emb = m_output(torch.flatten(hidden_features, 1)) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models batch_size = x.shape[0] # compute score through p2sgrad layer out_score = torch.zeros([batch_size * self.v_submodels, self.v_out_class], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size]) out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score if inference: # output_score [:, 1] corresponds to the positive class return out_score[:, 1] else: return out_score def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): #with torch.no_grad(): # vad_waveform = self.m_vad(x.squeeze(-1)) # vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1])) # if vad_waveform.shape[-1] > 0: # x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1) # else: # pass filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=scores.dtype) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_p2sgrad.P2SGradLoss() def compute(self, outputs, target): """loss = compute(input_data, target_data) Note: 1. input_data will be the output from Model.forward() input_data will be a tuple of [scores, target_vec] 2. we will not use target given by the system script we will use the target_vec in input_data[1] """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
15,800
34.587838
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-fixed-p2s/03/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_modules.p2sgrad as nii_p2sgrad import core_scripts.data_io.seq_info as nii_seq_tk import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class TrainableLinearFb(nii_nn.LinearInitialized): """Linear layer initialized with linear filter bank """ def __init__(self, fn, sr, filter_num): super(TrainableLinearFb, self).__init__( nii_front_end.linear_fb(fn, sr, filter_num)) return def forward(self, x): return torch.log10( torch.pow(super(TrainableLinearFb, self).forward(x), 2) + torch.finfo(torch.float32).eps) class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training # target data protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # working sampling rate, torchaudio is used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] self.spec_with_delta = False self.spec_fb_dim = 60 # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating self.amp_floor = 0.00001 # manual choose the first 600 frames in the data self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops] # number of sub-models self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output class self.v_out_class = 2 self.m_transform = [] self.m_output_act = [] self.m_frontend = [] self.m_angle = [] for idx, (trunc_len, fft_n) in enumerate(zip( self.v_truncate_lens, self.fft_n)): fft_n_bins = fft_n // 2 + 1 self.m_transform.append( torch_nn.Sequential( TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim), torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]) ) ) self.m_output_act.append( torch_nn.Sequential( torch_nn.Dropout(0.7), torch_nn.Linear((trunc_len // 16) * (self.spec_fb_dim // 16) * 32, 160), nii_nn.MaxFeatureMap2D(), torch_nn.Linear(80, self.v_emd_dim) ) ) self.m_frontend.append( nii_front_end.Spectrogram(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr) ) self.m_angle.append( nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class) ) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_angle = torch_nn.ModuleList(self.m_angle) # output # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.zeros([in_dim]) out_m = torch.ones([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features fs: frame shift fl: frame length fn: fft points trunc_len: number of frames per file (by truncating) datalength: original length of data """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # permute to (batch, fft_bin, frame_length) x_sp_amp = x_sp_amp.permute(0, 2, 1) # make sure the buffer is long enough x_sp_amp_buff = torch.zeros( [x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len], dtype=x_sp_amp.dtype, device=x_sp_amp.device) # for batch of data, handle the padding and trim independently fs = self.frame_hops[idx] for fileidx in range(x_sp_amp.shape[0]): # roughtly this is the number of frames true_frame_num = datalength[fileidx] // fs if true_frame_num > trunc_len: # trim randomly pos = torch.rand([1]) * (true_frame_num-trunc_len) pos = torch.floor(pos[0]).long() tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos] x_sp_amp_buff[fileidx] = tmp else: rep = int(np.ceil(trunc_len / true_frame_num)) tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep) x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len] # permute to (batch, frame_length, fft_bin) x_sp_amp = x_sp_amp_buff.permute(0, 2, 1) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] // self.v_submodels # buffer to store output embedding from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_output_act)): # extract feature (stft spectrogram) x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. flatten and transform through output function tmp_emb = m_output(torch.flatten(hidden_features, 1)) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models batch_size = x.shape[0] # compute score through p2sgrad layer out_score = torch.zeros([batch_size * self.v_submodels, self.v_out_class], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size]) out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score if inference: # output_score [:, 1] corresponds to the positive class return out_score[:, 1] else: return out_score def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): #with torch.no_grad(): # vad_waveform = self.m_vad(x.squeeze(-1)) # vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1])) # if vad_waveform.shape[-1] > 0: # x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1) # else: # pass filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=scores.dtype) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_p2sgrad.P2SGradLoss() def compute(self, outputs, target): """loss = compute(input_data, target_data) Note: 1. input_data will be the output from Model.forward() input_data will be a tuple of [scores, target_vec] 2. we will not use target given by the system script we will use the target_vec in input_data[1] """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
15,800
34.587838
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-fixed-p2s/02/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_modules.p2sgrad as nii_p2sgrad import core_scripts.data_io.seq_info as nii_seq_tk import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class TrainableLinearFb(nii_nn.LinearInitialized): """Linear layer initialized with linear filter bank """ def __init__(self, fn, sr, filter_num): super(TrainableLinearFb, self).__init__( nii_front_end.linear_fb(fn, sr, filter_num)) return def forward(self, x): return torch.log10( torch.pow(super(TrainableLinearFb, self).forward(x), 2) + torch.finfo(torch.float32).eps) class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training # target data protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # working sampling rate, torchaudio is used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] self.spec_with_delta = False self.spec_fb_dim = 60 # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating self.amp_floor = 0.00001 # manual choose the first 600 frames in the data self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops] # number of sub-models self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 64 # output class self.v_out_class = 2 self.m_transform = [] self.m_output_act = [] self.m_frontend = [] self.m_angle = [] for idx, (trunc_len, fft_n) in enumerate(zip( self.v_truncate_lens, self.fft_n)): fft_n_bins = fft_n // 2 + 1 self.m_transform.append( torch_nn.Sequential( TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim), torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]) ) ) self.m_output_act.append( torch_nn.Sequential( torch_nn.Dropout(0.7), torch_nn.Linear((trunc_len // 16) * (self.spec_fb_dim // 16) * 32, 160), nii_nn.MaxFeatureMap2D(), torch_nn.Linear(80, self.v_emd_dim) ) ) self.m_frontend.append( nii_front_end.Spectrogram(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr) ) self.m_angle.append( nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class) ) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_frontend = torch_nn.ModuleList(self.m_frontend) self.m_angle = torch_nn.ModuleList(self.m_angle) # output # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.zeros([in_dim]) out_m = torch.ones([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features fs: frame shift fl: frame length fn: fft points trunc_len: number of frames per file (by truncating) datalength: original length of data """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # permute to (batch, fft_bin, frame_length) x_sp_amp = x_sp_amp.permute(0, 2, 1) # make sure the buffer is long enough x_sp_amp_buff = torch.zeros( [x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len], dtype=x_sp_amp.dtype, device=x_sp_amp.device) # for batch of data, handle the padding and trim independently fs = self.frame_hops[idx] for fileidx in range(x_sp_amp.shape[0]): # roughtly this is the number of frames true_frame_num = datalength[fileidx] // fs if true_frame_num > trunc_len: # trim randomly pos = torch.rand([1]) * (true_frame_num-trunc_len) pos = torch.floor(pos[0]).long() tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos] x_sp_amp_buff[fileidx] = tmp else: rep = int(np.ceil(trunc_len / true_frame_num)) tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep) x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len] # permute to (batch, frame_length, fft_bin) x_sp_amp = x_sp_amp_buff.permute(0, 2, 1) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] // self.v_submodels # buffer to store output embedding from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_output_act)): # extract feature (stft spectrogram) x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. flatten and transform through output function tmp_emb = m_output(torch.flatten(hidden_features, 1)) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb return output_emb def _compute_score(self, x, inference=False): """ """ # number of sub models batch_size = x.shape[0] # compute score through p2sgrad layer out_score = torch.zeros([batch_size * self.v_submodels, self.v_out_class], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, m_score in enumerate(self.m_angle): tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size]) out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score if inference: # output_score [:, 1] corresponds to the positive class return out_score[:, 1] else: return out_score def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): #with torch.no_grad(): # vad_waveform = self.m_vad(x.squeeze(-1)) # vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1])) # if vad_waveform.shape[-1] > 0: # x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1) # else: # pass filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=scores.dtype) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = nii_p2sgrad.P2SGradLoss() def compute(self, outputs, target): """loss = compute(input_data, target_data) Note: 1. input_data will be the output from Model.forward() input_data will be a tuple of [scores, target_vec] 2. we will not use target given by the system script we will use the target_vec in input_data[1] """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
15,800
34.587838
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-fixed-sig/05/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_modules.a_softmax as nii_a_softmax import core_scripts.data_io.seq_info as nii_seq_tk import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class TrainableLinearFb(nii_nn.LinearInitialized): """Linear layer initialized with linear filter bank """ def __init__(self, fn, sr, filter_num): super(TrainableLinearFb, self).__init__( nii_front_end.linear_fb(fn, sr, filter_num)) return def forward(self, x): return torch.log10( torch.pow(super(TrainableLinearFb, self).forward(x), 2) + torch.finfo(torch.float32).eps) ## FOR MODEL class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training # target data protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # working sampling rate, torchaudio is used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] self.spec_with_delta = False self.spec_fb_dim = 60 # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating self.amp_floor = 0.00001 # manual choose the first 600 frames in the data self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops] # number of sub-models self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 1 # output class self.v_out_class = 1 self.m_transform = [] self.m_output_act = [] self.m_frontend = [] #self.m_a_softmax = [] for idx, (trunc_len, fft_n) in enumerate(zip( self.v_truncate_lens, self.fft_n)): fft_n_bins = fft_n // 2 + 1 self.m_transform.append( torch_nn.Sequential( TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim), torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]) ) ) self.m_output_act.append( torch_nn.Sequential( torch_nn.Dropout(0.7), torch_nn.Linear((trunc_len // 16) * (self.spec_fb_dim // 16) * 32, 160), nii_nn.MaxFeatureMap2D(), torch_nn.Linear(80, self.v_emd_dim) ) ) self.m_frontend.append( nii_front_end.Spectrogram(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr) ) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_frontend = torch_nn.ModuleList(self.m_frontend) # output # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.zeros([in_dim]) out_m = torch.ones([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features fs: frame shift fl: frame length fn: fft points trunc_len: number of frames per file (by truncating) datalength: original length of data """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # permute to (batch, fft_bin, frame_length) x_sp_amp = x_sp_amp.permute(0, 2, 1) # make sure the buffer is long enough x_sp_amp_buff = torch.zeros( [x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len], dtype=x_sp_amp.dtype, device=x_sp_amp.device) # for batch of data, handle the padding and trim independently fs = self.frame_hops[idx] for fileidx in range(x_sp_amp.shape[0]): # roughtly this is the number of frames true_frame_num = datalength[fileidx] // fs if true_frame_num > trunc_len: # trim randomly pos = torch.rand([1]) * (true_frame_num-trunc_len) pos = torch.floor(pos[0]).long() tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos] x_sp_amp_buff[fileidx] = tmp else: rep = int(np.ceil(trunc_len / true_frame_num)) tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep) x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len] # permute to (batch, frame_length, fft_bin) x_sp_amp = x_sp_amp_buff.permute(0, 2, 1) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] // self.v_submodels # buffer to store output scores from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_output_act)): # extract feature (stft spectrogram) x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. flatten and transform through output function tmp_emb = m_output(torch.flatten(hidden_features, 1)) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb return output_emb def _compute_score(self, feature_vec, inference=False): """ """ # feature_vec is [batch * submodel, 1] if inference: return feature_vec.squeeze(1) else: return torch.sigmoid(feature_vec).squeeze(1) def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): #with torch.no_grad(): # vad_waveform = self.m_vad(x.squeeze(-1)) # vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1])) # if vad_waveform.shape[-1] > 0: # x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1) # else: # pass filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=scores.dtype) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = torch_nn.BCELoss() def compute(self, outputs, target): """loss = compute(input_data, target_data) Note: 1. input_data will be the output from Model.forward() input_data will be a tuple of [scores, target_vec] 2. we will not use target given by the system script we will use the target_vec in input_data[1] """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
15,079
34.233645
80
py
project-NN-Pytorch-scripts
project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-fixed-sig/04/model.py
#!/usr/bin/env python """ model.py Self defined model definition. Usage: """ from __future__ import absolute_import from __future__ import print_function import sys import numpy as np import torch import torch.nn as torch_nn import torch.nn.functional as torch_nn_func import sandbox.block_nn as nii_nn import sandbox.util_frontend as nii_front_end import core_scripts.other_tools.debug as nii_debug import core_modules.a_softmax as nii_a_softmax import core_scripts.data_io.seq_info as nii_seq_tk import config as prj_conf __author__ = "Xin Wang" __email__ = "[email protected]" __copyright__ = "Copyright 2020, Xin Wang" ############## ## util ############## def protocol_parse(protocol_filepath): """ Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial input: ----- protocol_filepath: string, path to the protocol file for convenience, I put train/dev/eval trials into a single protocol file output: ------- data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof) """ data_buffer = {} temp_buffer = np.loadtxt(protocol_filepath, dtype='str') for row in temp_buffer: if row[-1] == 'bonafide': data_buffer[row[1]] = 1 else: data_buffer[row[1]] = 0 return data_buffer ############## ## FOR MODEL ############## class TrainableLinearFb(nii_nn.LinearInitialized): """Linear layer initialized with linear filter bank """ def __init__(self, fn, sr, filter_num): super(TrainableLinearFb, self).__init__( nii_front_end.linear_fb(fn, sr, filter_num)) return def forward(self, x): return torch.log10( torch.pow(super(TrainableLinearFb, self).forward(x), 2) + torch.finfo(torch.float32).eps) ## FOR MODEL class Model(torch_nn.Module): """ Model definition """ def __init__(self, in_dim, out_dim, args, mean_std=None): super(Model, self).__init__() ##### required part, no need to change ##### # mean std of input and output in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\ args, mean_std) self.input_mean = torch_nn.Parameter(in_m, requires_grad=False) self.input_std = torch_nn.Parameter(in_s, requires_grad=False) self.output_mean = torch_nn.Parameter(out_m, requires_grad=False) self.output_std = torch_nn.Parameter(out_s, requires_grad=False) # a flag for debugging (by default False) #self.model_debug = False #self.validation = False ##### #### # on input waveform and output target #### # Load protocol and prepare the target data for network training # target data protocol_file = prj_conf.optional_argument[0] self.protocol_parser = protocol_parse(protocol_file) # working sampling rate, torchaudio is used to change sampling rate self.m_target_sr = 16000 #### # optional configs (not used) #### # re-sampling (optional) #self.m_resampler = torchaudio.transforms.Resample( # prj_conf.wav_samp_rate, self.m_target_sr) # vad (optional) #self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr) # flag for balanced class (temporary use) #self.v_flag = 1 #### # front-end configuration # multiple front-end configurations may be used # by default, use a single front-end #### # frame shift (number of points) self.frame_hops = [160] # frame length self.frame_lens = [320] # FFT length self.fft_n = [512] self.spec_with_delta = False self.spec_fb_dim = 60 # window type self.win = torch.hann_window # floor in log-spectrum-amplitude calculating self.amp_floor = 0.00001 # manual choose the first 600 frames in the data self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops] # number of sub-models self.v_submodels = len(self.frame_lens) # dimension of embedding vectors self.v_emd_dim = 1 # output class self.v_out_class = 1 self.m_transform = [] self.m_output_act = [] self.m_frontend = [] #self.m_a_softmax = [] for idx, (trunc_len, fft_n) in enumerate(zip( self.v_truncate_lens, self.fft_n)): fft_n_bins = fft_n // 2 + 1 self.m_transform.append( torch_nn.Sequential( TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim), torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]) ) ) self.m_output_act.append( torch_nn.Sequential( torch_nn.Dropout(0.7), torch_nn.Linear((trunc_len // 16) * (self.spec_fb_dim // 16) * 32, 160), nii_nn.MaxFeatureMap2D(), torch_nn.Linear(80, self.v_emd_dim) ) ) self.m_frontend.append( nii_front_end.Spectrogram(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr) ) self.m_transform = torch_nn.ModuleList(self.m_transform) self.m_output_act = torch_nn.ModuleList(self.m_output_act) self.m_frontend = torch_nn.ModuleList(self.m_frontend) # output # done return def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None): """ """ if data_mean_std is not None: in_m = torch.from_numpy(data_mean_std[0]) in_s = torch.from_numpy(data_mean_std[1]) out_m = torch.from_numpy(data_mean_std[2]) out_s = torch.from_numpy(data_mean_std[3]) if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim: print("Input dim: {:d}".format(in_dim)) print("Mean dim: {:d}".format(in_m.shape[0])) print("Std dim: {:d}".format(in_s.shape[0])) print("Input dimension incompatible") sys.exit(1) if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim: print("Output dim: {:d}".format(out_dim)) print("Mean dim: {:d}".format(out_m.shape[0])) print("Std dim: {:d}".format(out_s.shape[0])) print("Output dimension incompatible") sys.exit(1) else: in_m = torch.zeros([in_dim]) in_s = torch.zeros([in_dim]) out_m = torch.ones([out_dim]) out_s = torch.ones([out_dim]) return in_m, in_s, out_m, out_s def normalize_input(self, x): """ normalizing the input data """ return (x - self.input_mean) / self.input_std def normalize_target(self, y): """ normalizing the target data """ return (y - self.output_mean) / self.output_std def denormalize_output(self, y): """ denormalizing the generated output from network """ return y * self.output_std + self.output_mean def _front_end(self, wav, idx, trunc_len, datalength): """ simple fixed front-end to extract features fs: frame shift fl: frame length fn: fft points trunc_len: number of frames per file (by truncating) datalength: original length of data """ with torch.no_grad(): x_sp_amp = self.m_frontend[idx](wav.squeeze(-1)) # permute to (batch, fft_bin, frame_length) x_sp_amp = x_sp_amp.permute(0, 2, 1) # make sure the buffer is long enough x_sp_amp_buff = torch.zeros( [x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len], dtype=x_sp_amp.dtype, device=x_sp_amp.device) # for batch of data, handle the padding and trim independently fs = self.frame_hops[idx] for fileidx in range(x_sp_amp.shape[0]): # roughtly this is the number of frames true_frame_num = datalength[fileidx] // fs if true_frame_num > trunc_len: # trim randomly pos = torch.rand([1]) * (true_frame_num-trunc_len) pos = torch.floor(pos[0]).long() tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos] x_sp_amp_buff[fileidx] = tmp else: rep = int(np.ceil(trunc_len / true_frame_num)) tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep) x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len] # permute to (batch, frame_length, fft_bin) x_sp_amp = x_sp_amp_buff.permute(0, 2, 1) # return return x_sp_amp def _compute_embedding(self, x, datalength): """ definition of forward method Assume x (batchsize, length, dim) Output x (batchsize * number_filter, output_dim) """ # resample if necessary #x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1) # number of sub models batch_size = x.shape[0] // self.v_submodels # buffer to store output scores from sub-models output_emb = torch.zeros([batch_size * self.v_submodels, self.v_emd_dim], device=x.device, dtype=x.dtype) # compute scores for each sub-models for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate( zip(self.frame_hops, self.frame_lens, self.fft_n, self.v_truncate_lens, self.m_transform, self.m_output_act)): # extract feature (stft spectrogram) x_sp_amp = self._front_end(x, idx, trunc_len, datalength) # compute scores # 1. unsqueeze to (batch, 1, frame_length, fft_bin) # 2. compute hidden features hidden_features = m_trans(x_sp_amp.unsqueeze(1)) # 3. flatten and transform through output function tmp_emb = m_output(torch.flatten(hidden_features, 1)) output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb return output_emb def _compute_score(self, feature_vec, inference=False): """ """ # feature_vec is [batch * submodel, 1] if inference: return feature_vec.squeeze(1) else: return torch.sigmoid(feature_vec).squeeze(1) def _get_target(self, filenames): try: return [self.protocol_parser[x] for x in filenames] except KeyError: print("Cannot find target data for %s" % (str(filenames))) sys.exit(1) def forward(self, x, fileinfo): #with torch.no_grad(): # vad_waveform = self.m_vad(x.squeeze(-1)) # vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1])) # if vad_waveform.shape[-1] > 0: # x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1) # else: # pass filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo] datalength = [nii_seq_tk.parse_length(y) for y in fileinfo] if self.training: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec) # target target = self._get_target(filenames) target_vec = torch.tensor(target, device=x.device, dtype=scores.dtype) target_vec = target_vec.repeat(self.v_submodels) return [scores, target_vec, True] else: feature_vec = self._compute_embedding(x, datalength) scores = self._compute_score(feature_vec, True) target = self._get_target(filenames) print("Output, %s, %d, %f" % (filenames[0], target[0], scores.mean())) # don't write output score as a single file return None class Loss(): """ Wrapper to define loss function """ def __init__(self, args): """ """ self.m_loss = torch_nn.BCELoss() def compute(self, outputs, target): """loss = compute(input_data, target_data) Note: 1. input_data will be the output from Model.forward() input_data will be a tuple of [scores, target_vec] 2. we will not use target given by the system script we will use the target_vec in input_data[1] """ loss = self.m_loss(outputs[0], outputs[1]) return loss if __name__ == "__main__": print("Definition of model")
15,079
34.233645
80
py