repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
cyphercat | cyphercat-master/cyphercat/datadefs/__init__.py | from .cyphercat_dataset import CCATDataset
from .voices_dataset import *
| 74 | 17.75 | 42 | py |
cyphercat | cyphercat-master/cyphercat/tests/test_libri_load.py | import sys
sys.path.insert(0, '../../')
import cyphercat as cc
print('Downloading')
cc.download_and_preprocess_data(cc.DATASETS_DIR)
print('Loading splits')
dfs = cc.Libri_preload_and_split()
print('Initializing dataset')
test_set = cc.LibriSpeechDataset(df=dfs[4])
print('Succesfully loaded libri-speech')
| 308 | 24.75 | 48 | py |
cyphercat | cyphercat-master/cyphercat/tests/test_VOiCES_load.py | import sys
sys.path.insert(0, '../../')
import cyphercat as cc
print('Loading splits')
dfs = cc.Voices_preload_and_split()
print('Initializing dataset')
test_set = cc.LibriSpeechDataset(df=dfs[4])
print('Succesfully loaded VOiCES')
| 233 | 22.4 | 43 | py |
cyphercat | cyphercat-master/cyphercat/tests/test_dim_reduction.py | import sys
sys.path.insert(0, '../../')
import cyphercat as cc
import torch
import torch.nn as nn
import numpy as np
class test_cnn(nn.Module):
def __init__(self, n_in=3, n_classes=10, n_filters=64, size=64):
super(test_cnn, self).__init__()
self.size = size
self.n_filters = n_filters
self.conv_block_1 = nn.Sequential(
nn.Conv2d(n_in, n_filters, kernel_size=5, stride=1, padding=2),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.conv_block_2 = nn.Sequential(
nn.Conv2d(n_filters, 2*n_filters, kernel_size=5, stride=1,
padding=2),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.fc = nn.Linear(2*n_filters * (self.size//4) * (self.size//4),
2*n_filters)
self.output = nn.Linear(2*n_filters, n_classes)
def forward(self, x):
x = self.conv_block_1(x)
x = self.conv_block_2(x)
x = x.view(x.size(0), -1)
# x = x.view(-1, 2*self.n_filters * (self.size//4) * (self.size//4))
x = self.fc(x)
out = self.output(x)
return out
def comparison(model, wrap, wrap2, image):
print('Batch size = ', image.shape[0])
print(' - Original model: returns full vector ')
out = model(image)
print('Batch labels = ', out.argmax(dim=1))
print('Full label vectors\n', out)
print(' - Wrapped 1 : returns top 3 ')
out = wrap(image)
print('Batch labels = ', out.argmax(dim=1))
print('Full label vectors\n', out)
print(' - Wrapped breaking probabilities : returns top 3 ')
out = wrap2(image)
print('Batch labels = ', out.argmax(dim=1))
print('Full label vectors\n', out)
conv_net = test_cnn(size=32)
wrapped = cc.dimensionality_reduction(model=conv_net, n_top=3,
break_posterior=False)
wrapped2 = cc.dimensionality_reduction(model=conv_net, n_top=3,
break_posterior=True)
img = torch.randn((2, 3, 32, 32))
print(' ------- Training -------\n')
comparison(conv_net, wrapped, wrapped2, img)
print(' ------- Eval -------\n')
conv_net.eval()
wrapped.eval()
wrapped2.eval()
comparison(conv_net, wrapped, wrapped2, img)
| 2,350 | 31.205479 | 77 | py |
cyphercat | cyphercat-master/cyphercat/tests/__init__.py | 0 | 0 | 0 | py |
|
cyphercat | cyphercat-master/cyphercat/utils/config_utils.py | from __future__ import print_function
import os
import sys
import yaml
from .utils import set_to_string, keys_to_string, color_mode_dict
from cyphercat.definitions import REPO_DIR
# Ensure basic, necessary fields are in the config file
def check_fields(cfg=None, tset=None):
seen = set()
for key, value in cfg.items():
seen.add(key)
return tset.issubset(seen)
# Test if path is absolute or relative
def test_abs_path(self, path=''):
if path.startswith('/'):
return path
else:
return os.path.join(REPO_DIR, path)
class Configurator(object):
"""
Configuration file reader
"""
# Fields, subfields required in configuration file
reqs = set(["data", "train"])
def __init__(self, config_file=""):
# Get configuration file
self.filepath = os.path.abspath(config_file)
with open(config_file, 'r') as ymlfile:
cfg = yaml.load(ymlfile)
# Loaded config object
self.cfg = cfg
# Ensure necessary header fields exist
if not check_fields(cfg=cfg, tset=self.reqs):
raise AssertionError("Some fields in {} not found. "
"Required fields: {}".format(self.filepath,
self.reqs))
# Extract config parameters
self.dataset = cfg['data']
self.train_model = cfg['train']
# self.avail_models = cfg.get('models_to_run', '').split(',')
# self.head_outpath = cfg.get('outpath',
# os.path.join(self.datapath,
# 'saved_models'))
class ModelConfig(object):
"""
Expected information defining a model.
"""
reqs = set(["model", "runtrain"])
def __init__(self, modelconfig=None):
self.modelconfig = modelconfig
if not check_fields(cfg=modelconfig, tset=self.reqs):
raise AssertionError("Some subfields for 'model' field not found.\n"
" Required fields: {}\nExiting...\n".format(set_to_string(self.reqs)))
self.name = modelconfig.get('model')
self.runtrain = modelconfig.get('runtrain')
self.model_path = self.test_abs_path(str(modelconfig.get('modelpath')))
self.epochs = modelconfig.get('epochs')
self.batchsize = modelconfig.get('batchsize')
self.learnrate = modelconfig.get('learnrate')
# Test if path is absolute or relative
def test_abs_path(self, path=''):
if path.startswith('/'):
return path
else:
return os.path.join(REPO_DIR, path)
class DataStruct(object):
"""
Expected directory structure
for accessing image data sets.
Generalization to data & audio forthcoming.
"""
# Mandatory fields for 'data' yaml config file keyword
reqs = set(["name", "datapath", "datatype"])
image_reqs = set(["nclasses", "height", "width", "channels"])
audio_reqs = set(["length", "seconds"])
data_reqs = [image_reqs, audio_reqs]
# Data types dictionary
data_type_dict = {"image": 0,
"audio": 1}
def __init__(self, dataset=None):
self.dataset = dataset
if not check_fields(cfg=dataset, tset=self.reqs):
raise AssertionError("Some subfields under 'data' field not found.\n"
" Required fields: {}\nExiting...\n".format(set_to_string(self.reqs)))
self.name = dataset.get('name')
self.data_path = self.test_abs_path(str(dataset.get('datapath')))
self.data_type = dataset.get('datatype').lower()
url_list = dataset.get('url', '').replace(" ", "").split(",")
self.url = [x for x in url_list if x]
self.save_path = os.path.join(self.data_path, self.name)
# Ensure data type is permitted
if (self.data_type not in self.data_type_dict):
print("\nUnknown data type '{}'!\n Allowed data types: {}\nExiting...\n".format(self.data_type,
keys_to_string(self.data_type_dict)))
sys.exit()
# Get index from dictionary to access specific data reqs
dtype_ind = self.data_type_dict[self.data_type]
# Check subfields
if not check_fields(cfg=dataset, tset=self.data_reqs[dtype_ind]):
raise AssertionError("\nSome subfields under 'data' field not found.\n "
" Required fields for {} data: {}\nExiting...\n".format(self.data_type,
set_to_string(self.data_reqs[dtype_ind])))
# Image data specific
if dtype_ind == 0:
self.height = int(dataset.get('height'))
self.width = int(dataset.get('width'))
self.channels = int(dataset.get('channels'))
self.n_classes = int(dataset.get('nclasses'))
self.color_mode = color_mode_dict[self.channels]
self.labels = dataset.get('labels',
self.default_labels()).replace(" ", "").split(',')
# Audio data specific
elif dtype_ind == 1:
self.length = float(dataset.get('length'))
self.seconds = float(dataset.get('seconds'))
# Test if path is absolute or relative
def test_abs_path(self, path=''):
if path.startswith('/'):
return path
else:
return os.path.join(REPO_DIR, path)
# Consecutive integers default data labels
def default_labels(self):
return str(list(range(0, self.n_classes))).strip('[]')
| 5,803 | 35.049689 | 132 | py |
cyphercat | cyphercat-master/cyphercat/utils/utils.py | from __future__ import print_function
# Dictionary printer
def print_dict(dct):
for key, value in sorted(dct.items(), reverse=True):
print("{}: {}".format(key, value))
# Set string printer
def set_to_string(iset=None):
sstr = ', '.join([str(i) for i in iset])
return sstr
# Dictionary string key-printer
def keys_to_string(struct=None):
kstr = ', '.join([k for k in struct.keys()])
return kstr
# Color mode dictionary for specifying
# color_mode in data generators
color_mode_dict = {1: 'grayscale',
3: 'rgb'}
| 565 | 20.769231 | 56 | py |
cyphercat | cyphercat-master/cyphercat/utils/file_utils.py | import os
import sys
import shutil
import requests
import zipfile
import tarfile
def downloader(save_dir='', url=''):
"""
Function to download file from
url to specified destination file.
If file already exists, or the url
is a path to a valid local file,
then simply returns path to local file.
Parameters
----------
save_dir : string
directory used for saving file
url : string
url or path to existing compressed
dataset file
Returns
-------
dest_file : string
path to compressed file
"""
# Need defined url for dataset
if url == '':
print('The url to download the dataset or path to the compressed data file was not provided.')
print('Please provide a url, or download and unpack the dataset.\n Exiting...')
sys.exit()
file_bname = os.path.basename(url)
dest_file = os.path.join(save_dir, file_bname)
# Check if url is really path to local file
if os.path.isfile(url):
dest_file = url
# Else if dataset zipfile doesn't exist, download it from url
if not os.path.isfile(dest_file):
print('Downloading file {}...'.format(file_bname))
resp = requests.get(url, stream=True)
with open(dest_file, 'wb') as f:
shutil.copyfileobj(resp.raw, f)
else:
print('File found, no need to download.')
return dest_file
def unpacker(compressed_file_name='', out_directory=''):
"""
Function to extract compressed
file to specified directory.
Currently supports extraction of
- zip
- gz
file types.
Parameters
----------
compressed_file_name : string
file to unpack
out_directory : string
output directory
"""
print('Unpacking {} to {}...'.format(compressed_file_name, out_directory))
file_ext = os.path.splitext(compressed_file_name)[1]
# Unpack zipfile
if 'zip' in file_ext:
with zipfile.ZipFile(compressed_file_name) as zf:
zf.extractall(os.path.split(out_directory)[0])
# Unpack gzipfile
elif 'gz' in file_ext:
with tarfile.open(compressed_file_name) as tar:
#tar.extractall(os.path.split(out_directory)[0])
tar.extractall(path=out_directory)
else:
print('File extension {} not recognized for unpacking.\nExiting...'.format(file_ext))
sys.exit()
| 2,507 | 27.179775 | 102 | py |
cyphercat | cyphercat-master/cyphercat/utils/__init__.py | # __init__.py
from .utils import *
from .svc_utils import *
from .file_utils import *
from .config_utils import *
| 115 | 15.571429 | 27 | py |
cyphercat | cyphercat-master/cyphercat/utils/visualize_utils.py | #!/usr/bin/python3
"""
Set of functions used to call a series of algorithms used to visualize the object localization of a pre-trained
network in PyTorch. The different algorithms are discussed in several papers, while the implementation is based,
roughly, on work in the following repository (https://github.com/sar-gupta/weakly-supervised-localization-survey)
"""
import numpy as np
import PIL
import torch
import torchvision
import torch.nn as nn
from torch.autograd import Variable
def saliency_map_general(model, input, label, plot = False):
"""
saliency_map_general: implementation to return the most general form of the saliency map, informing
on the regions of interest that activate a specific label.
Args:
- model: (PyTorch) Trained model trying to understand
- input: Image to be classfied and understood, passed as a PyTorch tensor (C x W x H)
- label: Class to identify the regions of interest
return: numpy array with heatmap data
"""
input = Variable(input.unsqueeze_(0), requires_grad = True)
output = model.forward(input)
model.zero_grad()
output[0][label].backward()
grads = input.grad.data.clamp(min=0)
grads.squeeze_()
grads.transpose_(0, 1)
grads.transpose_(1, 2)
grads = np.amax(grads.cpu().numpy(), axis=2)
grads -= grads.min()
grads /= grads.max()
grads *= 255
grads = grads.astype(int)
return grads
def guided_saliency_map(model, input, label, plot = False):
"""
guided_saliency_map: implementation to return a guided saliency map, informing
on the regions of interest that activate a specific label.
Args:
- model: (PyTorch) Trained model trying to understand
- input: Image to be classfied and understood, passed as a PyTorch tensor (C x W x H)
- label: Class to identify the regions of interest
return: numpy array with heatmap data
"""
input = Variable(input.unsqueeze_(0), requires_grad=True)
try:
h = [0]*len(list(model.modules()))
def hookfunc(module, gradInput, gradOutput):
return tuple([(None if g is None else g.clamp(min=0)) for g in gradInput])
for j, i in enumerate(list(model.modules())):
h[j] = i.register_backward_hook(hookfunc)
output = model.forward(input)
model.zero_grad()
output[0][label].backward()
for i in range(len(list(model.modules()))):
h[i].remove()
except Exception as e:
print(e)
for i in range(len(list(model.modules()))):
h[i].remove()
grads = input.grad.data.clamp(min=0)
grads.squeeze_()
grads.transpose_(0, 1)
grads.transpose_(1, 2)
grads = np.amax(grads.cpu().numpy(), axis=2)
grads -= grads.min()
grads /= grads.max()
grads *= 255
grads = grads.astype(int)
return grads
def gradcam(model, input, label, layer_name, plot=False):
"""
gradcam: implementation to return a class activation map using the gradient of class score with each
of last conv layer filters. Calculate weighted sum of gradients and filters to finally obtain a map
of size equal to size of filters.
Args:
- model: (PyTorch) Trained model trying to understand
- input: Image to be classfied and understood, passed as a PyTorch tensor (C x W x H)
- label: Class to identify the regions of interest
- layer_name: Name of the layer to target, should be the last CNN.
return:
PIL image with cativation map
"""
imgs_shape = (input.shape[1], input.shape[2])
rs = torchvision.transforms.Resize(imgs_shape)
#find the right layer
last_conv = None
for name, item in model._modules.items():
if name == layer_name:
last_conv = item
if last_conv == None:
print('Cant find target layer')
return None
pre_image = input
global gcdata
global gcgrads
def bhook(module, gradInputs, gradOutputs):
global gcgrads
gcgrads = gradOutputs
def fhook(module, input, output):
global gcdata
gcdata = output
hb = last_conv.register_backward_hook(bhook)
hf = last_conv.register_forward_hook(fhook)
out = model(input.unsqueeze_(0))
model.zero_grad()
out[0, label].backward()
hb.remove()
hf.remove()
gcdata = gcdata[0]
gcgrads = gcgrads[0].squeeze()
gcgrads = gcgrads.mean(dim=2, keepdim=True)
gcgrads = gcgrads.mean(dim=1, keepdim=True)
#
gcdata = gcdata.mul(gcgrads)
gcdata = gcdata.sum(dim=0, keepdim=True)
gcdata = gcdata.clamp(min=0)
gcdata -= gcdata.min()
gcdata /= gcdata.max()
toi = torchvision.transforms.ToPILImage()
gcdata = np.array(rs(toi(gcdata.data.cpu())))
input.squeeze()
return gcdata
def guided_gradcam(model, input, label, layer_name, plot = False):
"""
guided_gradcam: returns a combination of a guided saliency map and class activation map. this combines
the sensitivity to different classes from gradcam toguether with the greater resolution of the
saliency map.
Args:
- model: (PyTorch) Trained model trying to understand
- input: Image to be classfied and understood, passed as a PyTorch tensor (C x W x H)
- label: Class to identify the regions of interest
- layer_name: Name of the layer to target, should be the last CNN.
return:
PIL image with cativation map
"""
gc = gradcam(model, input, label, layer_name, plot=False)
guided = guided_saliency_map(model=model, input=input[0], label=label, plot=False)
gc = gc * guided
rs = torchvision.transforms.Resize((32, 32))
gc -= gc.min()
gc = np.divide(gc, gc.max())
gc *= 255
gc = gc.astype(int)
return gc
def smooth_guided_saliency_map(model, input, label, transform, x=10, percent_noise=10, plot = True):
"""
smooth_guided_saliency_map: Implementation of guided saliency map accounting for the fact
small, local variations in the local derivatives lead to the apparent noise one sees. This implementation smooths
these.
Args:
- model: (PyTorch) Trained model trying to understand
- input: Image to be classfied and understood, passed as a PyTorch tensor (C x W x H)
- x: Number fo times to sample for the smoothing
- percent_nois: Percentage of noise to be itroduced during sampling for smoothing
return:
PIL image with cativation map
"""
tensor_input = input
final_grad = torch.zeros(input.shape).cuda()
final_grad = final_grad.unsqueeze(0)
h = [0]*len(list(model.modules()))
def hookfunc(module, gradInput, gradOutput):
return tuple([(None if g is None else g.clamp(min=0)) for g in gradInput])
for j, i in enumerate(list(model.modules())):
h[j] = i.register_backward_hook(hookfunc)
for i in range(x):
temp_input = tensor_input
noise = torch.from_numpy(np.random.normal(loc=0, scale=(percent_noise/100) *
(tensor_input.max() - tensor_input.min()),
size=temp_input.shape)).type(torch.cuda.FloatTensor)
temp_input = (temp_input.cuda() + noise).cpu().numpy()
temp_input = np.transpose(temp_input, (1, 2, 0) )
temp_input = PIL.Image.fromarray(temp_input.astype(np.uint8))
temp_input = Variable(transform(temp_input).unsqueeze(0).cuda(), requires_grad=True)
output = model.forward(temp_input)
model.zero_grad()
output[0][label].backward()
final_grad += temp_input.grad.data
for i in range(len(list(model.modules()))):
h[i].remove()
grads = final_grad/x
grads = grads.clamp(min=0)
grads.squeeze_()
grads.transpose_(0, 1)
grads.transpose_(1, 2)
grads = np.amax(grads.cpu().numpy(), axis=2)
grads -= grads.min()
grads /= grads.max()
grads *= 255
grads = grads.astype(int)
return grads
def smooth_guided_gradcam(model, input, label, transform, layer_name, plot = False ):
guided = smooth_guided_saliency_map(model, input, label, transform = transform, plot = False)
gc = gradcam(model, input, label, layer_name = layer_name, plot=False)
gc = gc * guided
rs = torchvision.transforms.Resize((32, 32))
gc -= gc.min()
gc = np.divide(gc, gc.max())
gc *= 255
gc = gc.astype(int)
return gc
| 8,630 | 31.085502 | 117 | py |
cyphercat | cyphercat-master/cyphercat/utils/svc_utils.py | from __future__ import print_function
import os
import numpy as np
import torch
import torchvision
from sklearn import svm
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import MinMaxScaler
from sklearn.externals import joblib
def load(dataloader):
"""Loads/flattens inputs and targets for use in SVM. Returns inputs and targets."""
for data in dataloader:
x, y = data
x = x.view(x.shape[0], -1)
return x, y
def hp_grid(n_components, C_range, gamma_range):
"""Creates and returns list of classifiers with grid of hyperparameters given by C_range and gamma_range."""
clfs = []
pca = PCA(n_components=n_components)
scaling = MinMaxScaler(feature_range=(-1, 1))
for i in C_range:
for j in gamma_range:
svc = svm.SVC(C=i, gamma=j)
clf = make_pipeline(pca, scaling, svc)
clfs.append(clf)
return clfs
def train_grid(clfs, inputs, targets):
"""Trains classifiers in a list; returns list of trained classifiers."""
fitted_clfs = []
for i in range(len(clfs)):
x = clfs[i].fit(inputs, targets)
fitted_clfs.append(x)
print("Fitted: {} / {}".format(i+1, len(clfs)))
return fitted_clfs
def predict_eval(clf, inputs, targets, training=False):
"""Given a classifier and inputs, returns predictions and evaluated classifier accuracy."""
preds = clf.predict(inputs)
num_correct = torch.eq(torch.from_numpy(preds), targets).sum().item()
acc = (num_correct / len(targets)) * 100
if training:
# print('C: ', clf.get_params(deep=True)['svc__C'], 'gamma: ', clf.get_params(deep=True)['svc__gamma'])
print("C: {} gamma: {}".format(clf.get_params(deep=True)['svc__C'], clf.get_params(deep=True)['svc__gamma']))
print('Training Accuracy: {}'.format(acc))
else:
print('Testing Accuracy: {}'.format(acc))
return preds, acc
def maxacc_gen(test_accs, train_accs, clfs):
"""Finds and returns model with highest test accuracy and model with train/test accuracy ratio closest to 1."""
test = np.array(test_accs)
train = np.array(train_accs)
maxacc = clfs[np.argmax(test)]
gen = clfs[np.argmin(train-test)]
return maxacc, gen
def save_proba(fn, pipe, inputs, targets):
"""Fits svm with probabilities and saves to disk."""
params = pipe.get_params(deep=True)
pca = PCA(n_components=180)
scaling = MinMaxScaler(feature_range=(-1, 1))
pipe_prob = make_pipeline(pca, scaling, svm.SVC(C=params['svc__C'], gamma=params['svc__gamma'], probability=True))
pipe_prob.fit(inputs, targets)
joblib.dump(pipe_prob, fn)
def load_svm(directory, gen=True):
"""Returns loaded SVM saved with classification baselines.
'gen' : Model with train/test accuracy ratio closest to 1.
'maxacc' : Model with highest test accuracy."""
if gen:
clf = 'gen'
if not gen:
clf = 'maxacc'
dataset = directory.split('/')[-1]
path = 'SVM' + dataset + '_' + clf + '_proba.pkl'
svm = joblib.load(os.path.join(directory, path))
return svm
def class_acc(preds, targets, classes):
"Returns classifier accuracy for each class."
correct = 0
class_correct = np.zeros(len(classes))
class_total = np.zeros(len(classes))
for j in range(len(targets)):
class_total[targets[j]] += 1
if np.argmax(preds[j]) == targets[j]:
class_correct[targets[j]] += 1
correct += 1
class_accuracies = (class_correct/class_total) * 100
accuracy = (correct / len(targets)) * 100
for i in range(len(class_accuracies)):
print('Accuracy of {} : {} %%'.format(classes[i], class_accuracies[i]))
print('Total Accuracy: {} %%'.format(accuracy))
| 3,817 | 29.790323 | 118 | py |
3DDFA | 3DDFA-master/main.py | #!/usr/bin/env python3
# coding: utf-8
__author__ = 'cleardusk'
"""
The pipeline of 3DDFA prediction: given one image, predict the 3d face vertices, 68 landmarks and visualization.
[todo]
1. CPU optimization: https://pmchojnacki.wordpress.com/2018/10/07/slow-pytorch-cpu-performance
"""
import torch
import torchvision.transforms as transforms
import mobilenet_v1
import numpy as np
import cv2
import dlib
from utils.ddfa import ToTensorGjz, NormalizeGjz, str2bool
import scipy.io as sio
from utils.inference import get_suffix, parse_roi_box_from_landmark, crop_img, predict_68pts, dump_to_ply, dump_vertex, \
draw_landmarks, predict_dense, parse_roi_box_from_bbox, get_colors, write_obj_with_colors
from utils.cv_plot import plot_pose_box
from utils.estimate_pose import parse_pose
from utils.render import get_depths_image, cget_depths_image, cpncc
from utils.paf import gen_img_paf
import argparse
import torch.backends.cudnn as cudnn
STD_SIZE = 120
def main(args):
# 1. load pre-tained model
checkpoint_fp = 'models/phase1_wpdc_vdc.pth.tar'
arch = 'mobilenet_1'
checkpoint = torch.load(checkpoint_fp, map_location=lambda storage, loc: storage)['state_dict']
model = getattr(mobilenet_v1, arch)(num_classes=62) # 62 = 12(pose) + 40(shape) +10(expression)
model_dict = model.state_dict()
# because the model is trained by multiple gpus, prefix module should be removed
for k in checkpoint.keys():
model_dict[k.replace('module.', '')] = checkpoint[k]
model.load_state_dict(model_dict)
if args.mode == 'gpu':
cudnn.benchmark = True
model = model.cuda()
model.eval()
# 2. load dlib model for face detection and landmark used for face cropping
if args.dlib_landmark:
dlib_landmark_model = 'models/shape_predictor_68_face_landmarks.dat'
face_regressor = dlib.shape_predictor(dlib_landmark_model)
if args.dlib_bbox:
face_detector = dlib.get_frontal_face_detector()
# 3. forward
tri = sio.loadmat('visualize/tri.mat')['tri']
transform = transforms.Compose([ToTensorGjz(), NormalizeGjz(mean=127.5, std=128)])
for img_fp in args.files:
img_ori = cv2.imread(img_fp)
if args.dlib_bbox:
rects = face_detector(img_ori, 1)
else:
rects = []
if len(rects) == 0:
rects = dlib.rectangles()
rect_fp = img_fp + '.bbox'
lines = open(rect_fp).read().strip().split('\n')[1:]
for l in lines:
l, r, t, b = [int(_) for _ in l.split(' ')[1:]]
rect = dlib.rectangle(l, r, t, b)
rects.append(rect)
pts_res = []
Ps = [] # Camera matrix collection
poses = [] # pose collection, [todo: validate it]
vertices_lst = [] # store multiple face vertices
ind = 0
suffix = get_suffix(img_fp)
for rect in rects:
# whether use dlib landmark to crop image, if not, use only face bbox to calc roi bbox for cropping
if args.dlib_landmark:
# - use landmark for cropping
pts = face_regressor(img_ori, rect).parts()
pts = np.array([[pt.x, pt.y] for pt in pts]).T
roi_box = parse_roi_box_from_landmark(pts)
else:
# - use detected face bbox
bbox = [rect.left(), rect.top(), rect.right(), rect.bottom()]
roi_box = parse_roi_box_from_bbox(bbox)
img = crop_img(img_ori, roi_box)
# forward: one step
img = cv2.resize(img, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR)
input = transform(img).unsqueeze(0)
with torch.no_grad():
if args.mode == 'gpu':
input = input.cuda()
param = model(input)
param = param.squeeze().cpu().numpy().flatten().astype(np.float32)
# 68 pts
pts68 = predict_68pts(param, roi_box)
# two-step for more accurate bbox to crop face
if args.bbox_init == 'two':
roi_box = parse_roi_box_from_landmark(pts68)
img_step2 = crop_img(img_ori, roi_box)
img_step2 = cv2.resize(img_step2, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR)
input = transform(img_step2).unsqueeze(0)
with torch.no_grad():
if args.mode == 'gpu':
input = input.cuda()
param = model(input)
param = param.squeeze().cpu().numpy().flatten().astype(np.float32)
pts68 = predict_68pts(param, roi_box)
pts_res.append(pts68)
P, pose = parse_pose(param)
Ps.append(P)
poses.append(pose)
# dense face 3d vertices
if args.dump_ply or args.dump_vertex or args.dump_depth or args.dump_pncc or args.dump_obj:
vertices = predict_dense(param, roi_box)
vertices_lst.append(vertices)
if args.dump_ply:
dump_to_ply(vertices, tri, '{}_{}.ply'.format(img_fp.replace(suffix, ''), ind))
if args.dump_vertex:
dump_vertex(vertices, '{}_{}.mat'.format(img_fp.replace(suffix, ''), ind))
if args.dump_pts:
wfp = '{}_{}.txt'.format(img_fp.replace(suffix, ''), ind)
np.savetxt(wfp, pts68, fmt='%.3f')
print('Save 68 3d landmarks to {}'.format(wfp))
if args.dump_roi_box:
wfp = '{}_{}.roibox'.format(img_fp.replace(suffix, ''), ind)
np.savetxt(wfp, roi_box, fmt='%.3f')
print('Save roi box to {}'.format(wfp))
if args.dump_paf:
wfp_paf = '{}_{}_paf.jpg'.format(img_fp.replace(suffix, ''), ind)
wfp_crop = '{}_{}_crop.jpg'.format(img_fp.replace(suffix, ''), ind)
paf_feature = gen_img_paf(img_crop=img, param=param, kernel_size=args.paf_size)
cv2.imwrite(wfp_paf, paf_feature)
cv2.imwrite(wfp_crop, img)
print('Dump to {} and {}'.format(wfp_crop, wfp_paf))
if args.dump_obj:
wfp = '{}_{}.obj'.format(img_fp.replace(suffix, ''), ind)
colors = get_colors(img_ori, vertices)
write_obj_with_colors(wfp, vertices, tri, colors)
print('Dump obj with sampled texture to {}'.format(wfp))
ind += 1
if args.dump_pose:
# P, pose = parse_pose(param) # Camera matrix (without scale), and pose (yaw, pitch, roll, to verify)
img_pose = plot_pose_box(img_ori, Ps, pts_res)
wfp = img_fp.replace(suffix, '_pose.jpg')
cv2.imwrite(wfp, img_pose)
print('Dump to {}'.format(wfp))
if args.dump_depth:
wfp = img_fp.replace(suffix, '_depth.png')
# depths_img = get_depths_image(img_ori, vertices_lst, tri-1) # python version
depths_img = cget_depths_image(img_ori, vertices_lst, tri - 1) # cython version
cv2.imwrite(wfp, depths_img)
print('Dump to {}'.format(wfp))
if args.dump_pncc:
wfp = img_fp.replace(suffix, '_pncc.png')
pncc_feature = cpncc(img_ori, vertices_lst, tri - 1) # cython version
cv2.imwrite(wfp, pncc_feature[:, :, ::-1]) # cv2.imwrite will swap RGB -> BGR
print('Dump to {}'.format(wfp))
if args.dump_res:
draw_landmarks(img_ori, pts_res, wfp=img_fp.replace(suffix, '_3DDFA.jpg'), show_flg=args.show_flg)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='3DDFA inference pipeline')
parser.add_argument('-f', '--files', nargs='+',
help='image files paths fed into network, single or multiple images')
parser.add_argument('-m', '--mode', default='cpu', type=str, help='gpu or cpu mode')
parser.add_argument('--show_flg', default='true', type=str2bool, help='whether show the visualization result')
parser.add_argument('--bbox_init', default='one', type=str,
help='one|two: one-step bbox initialization or two-step')
parser.add_argument('--dump_res', default='true', type=str2bool, help='whether write out the visualization image')
parser.add_argument('--dump_vertex', default='false', type=str2bool,
help='whether write out the dense face vertices to mat')
parser.add_argument('--dump_ply', default='true', type=str2bool)
parser.add_argument('--dump_pts', default='true', type=str2bool)
parser.add_argument('--dump_roi_box', default='false', type=str2bool)
parser.add_argument('--dump_pose', default='true', type=str2bool)
parser.add_argument('--dump_depth', default='true', type=str2bool)
parser.add_argument('--dump_pncc', default='true', type=str2bool)
parser.add_argument('--dump_paf', default='false', type=str2bool)
parser.add_argument('--paf_size', default=3, type=int, help='PAF feature kernel size')
parser.add_argument('--dump_obj', default='true', type=str2bool)
parser.add_argument('--dlib_bbox', default='true', type=str2bool, help='whether use dlib to predict bbox')
parser.add_argument('--dlib_landmark', default='true', type=str2bool,
help='whether use dlib landmark to crop image')
args = parser.parse_args()
main(args)
| 9,511 | 45.174757 | 121 | py |
3DDFA | 3DDFA-master/video_demo.py | #!/usr/bin/env python3
# coding: utf-8
import torch
import torchvision.transforms as transforms
import mobilenet_v1
import numpy as np
import cv2
import dlib
from utils.ddfa import ToTensorGjz, NormalizeGjz
import scipy.io as sio
from utils.inference import (
parse_roi_box_from_landmark,
crop_img,
predict_68pts,
predict_dense,
)
from utils.cv_plot import plot_kpt
from utils.render import get_depths_image, cget_depths_image, cpncc
from utils.paf import gen_img_paf
import argparse
import torch.backends.cudnn as cudnn
STD_SIZE = 120
def main(args):
# 0. open video
# vc = cv2.VideoCapture(str(args.video) if len(args.video) == 1 else args.video)
vc = cv2.VideoCapture(args.video if int(args.video) != 0 else 0)
# 1. load pre-tained model
checkpoint_fp = 'models/phase1_wpdc_vdc.pth.tar'
arch = 'mobilenet_1'
tri = sio.loadmat('visualize/tri.mat')['tri']
transform = transforms.Compose([ToTensorGjz(), NormalizeGjz(mean=127.5, std=128)])
checkpoint = torch.load(checkpoint_fp, map_location=lambda storage, loc: storage)[
'state_dict'
]
model = getattr(mobilenet_v1, arch)(
num_classes=62
) # 62 = 12(pose) + 40(shape) +10(expression)
model_dict = model.state_dict()
# because the model is trained by multiple gpus, prefix module should be removed
for k in checkpoint.keys():
model_dict[k.replace('module.', '')] = checkpoint[k]
model.load_state_dict(model_dict)
if args.mode == 'gpu':
cudnn.benchmark = True
model = model.cuda()
model.eval()
# 2. load dlib model for face detection and landmark used for face cropping
dlib_landmark_model = 'models/shape_predictor_68_face_landmarks.dat'
face_regressor = dlib.shape_predictor(dlib_landmark_model)
face_detector = dlib.get_frontal_face_detector()
# 3. forward
success, frame = vc.read()
last_frame_pts = []
while success:
if len(last_frame_pts) == 0:
rects = face_detector(frame, 1)
for rect in rects:
pts = face_regressor(frame, rect).parts()
pts = np.array([[pt.x, pt.y] for pt in pts]).T
last_frame_pts.append(pts)
vertices_lst = []
for lmk in last_frame_pts:
roi_box = parse_roi_box_from_landmark(lmk)
img = crop_img(frame, roi_box)
img = cv2.resize(
img, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR
)
input = transform(img).unsqueeze(0)
with torch.no_grad():
if args.mode == 'gpu':
input = input.cuda()
param = model(input)
param = param.squeeze().cpu().numpy().flatten().astype(np.float32)
pts68 = predict_68pts(param, roi_box)
vertex = predict_dense(param, roi_box)
lmk[:] = pts68[:2]
vertices_lst.append(vertex)
pncc = cpncc(frame, vertices_lst, tri - 1) / 255.0
frame = frame / 255.0 * (1.0 - pncc)
cv2.imshow('3ddfa', frame)
cv2.waitKey(1)
success, frame = vc.read()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='3DDFA inference pipeline')
parser.add_argument(
'-v',
'--video',
default='0',
type=str,
help='video file path or opencv cam index',
)
parser.add_argument('-m', '--mode', default='cpu', type=str, help='gpu or cpu mode')
args = parser.parse_args()
main(args)
| 3,552 | 31.3 | 88 | py |
3DDFA | 3DDFA-master/benchmark.py | #!/usr/bin/env python3
# coding: utf-8
import torch
import torch.nn as nn
import torch.utils.data as data
import torchvision.transforms as transforms
import torch.backends.cudnn as cudnn
import mobilenet_v1
import time
import numpy as np
from benchmark_aflw2000 import calc_nme as calc_nme_alfw2000
from benchmark_aflw2000 import ana as ana_alfw2000
from benchmark_aflw import calc_nme as calc_nme_alfw
from benchmark_aflw import ana as ana_aflw
from utils.ddfa import ToTensorGjz, NormalizeGjz, DDFATestDataset, reconstruct_vertex
import argparse
def extract_param(checkpoint_fp, root='', filelists=None, arch='mobilenet_1', num_classes=62, device_ids=[0],
batch_size=128, num_workers=4):
map_location = {f'cuda:{i}': 'cuda:0' for i in range(8)}
checkpoint = torch.load(checkpoint_fp, map_location=map_location)['state_dict']
torch.cuda.set_device(device_ids[0])
model = getattr(mobilenet_v1, arch)(num_classes=num_classes)
model = nn.DataParallel(model, device_ids=device_ids).cuda()
model.load_state_dict(checkpoint)
dataset = DDFATestDataset(filelists=filelists, root=root,
transform=transforms.Compose([ToTensorGjz(), NormalizeGjz(mean=127.5, std=128)]))
data_loader = data.DataLoader(dataset, batch_size=batch_size, num_workers=num_workers)
cudnn.benchmark = True
model.eval()
end = time.time()
outputs = []
with torch.no_grad():
for _, inputs in enumerate(data_loader):
inputs = inputs.cuda()
output = model(inputs)
for i in range(output.shape[0]):
param_prediction = output[i].cpu().numpy().flatten()
outputs.append(param_prediction)
outputs = np.array(outputs, dtype=np.float32)
print(f'Extracting params take {time.time() - end: .3f}s')
return outputs
def _benchmark_aflw(outputs):
return ana_aflw(calc_nme_alfw(outputs))
def _benchmark_aflw2000(outputs):
return ana_alfw2000(calc_nme_alfw2000(outputs))
def benchmark_alfw_params(params):
outputs = []
for i in range(params.shape[0]):
lm = reconstruct_vertex(params[i])
outputs.append(lm[:2, :])
return _benchmark_aflw(outputs)
def benchmark_aflw2000_params(params):
outputs = []
for i in range(params.shape[0]):
lm = reconstruct_vertex(params[i])
outputs.append(lm[:2, :])
return _benchmark_aflw2000(outputs)
def benchmark_pipeline(arch, checkpoint_fp):
device_ids = [0]
def aflw():
params = extract_param(
checkpoint_fp=checkpoint_fp,
root='test.data/AFLW_GT_crop',
filelists='test.data/AFLW_GT_crop.list',
arch=arch,
device_ids=device_ids,
batch_size=128)
benchmark_alfw_params(params)
def aflw2000():
params = extract_param(
checkpoint_fp=checkpoint_fp,
root='test.data/AFLW2000-3D_crop',
filelists='test.data/AFLW2000-3D_crop.list',
arch=arch,
device_ids=device_ids,
batch_size=128)
benchmark_aflw2000_params(params)
aflw2000()
aflw()
def main():
parser = argparse.ArgumentParser(description='3DDFA Benchmark')
parser.add_argument('--arch', default='mobilenet_1', type=str)
parser.add_argument('-c', '--checkpoint-fp', default='models/phase1_wpdc_vdc.pth.tar', type=str)
args = parser.parse_args()
benchmark_pipeline(args.arch, args.checkpoint_fp)
if __name__ == '__main__':
main()
| 3,554 | 28.87395 | 111 | py |
3DDFA | 3DDFA-master/benchmark_aflw.py | #!/usr/bin/env python3
# coding: utf-8
import os.path as osp
import numpy as np
from math import sqrt
from utils.io import _load
d = 'test.configs'
yaw_list = _load(osp.join(d, 'AFLW_GT_crop_yaws.npy'))
roi_boxs = _load(osp.join(d, 'AFLW_GT_crop_roi_box.npy'))
pts68_all = _load(osp.join(d, 'AFLW_GT_pts68.npy'))
pts21_all = _load(osp.join(d, 'AFLW_GT_pts21.npy'))
def ana(nme_list):
yaw_list_abs = np.abs(yaw_list)
ind_yaw_1 = yaw_list_abs <= 30
ind_yaw_2 = np.bitwise_and(yaw_list_abs > 30, yaw_list_abs <= 60)
ind_yaw_3 = yaw_list_abs > 60
nme_1 = nme_list[ind_yaw_1]
nme_2 = nme_list[ind_yaw_2]
nme_3 = nme_list[ind_yaw_3]
mean_nme_1 = np.mean(nme_1) * 100
mean_nme_2 = np.mean(nme_2) * 100
mean_nme_3 = np.mean(nme_3) * 100
# mean_nme_all = np.mean(nme_list) * 100
std_nme_1 = np.std(nme_1) * 100
std_nme_2 = np.std(nme_2) * 100
std_nme_3 = np.std(nme_3) * 100
# std_nme_all = np.std(nme_list) * 100
mean_all = [mean_nme_1, mean_nme_2, mean_nme_3]
mean = np.mean(mean_all)
std = np.std(mean_all)
s1 = '[ 0, 30]\tMean: \x1b[32m{:.3f}\x1b[0m, Std: {:.3f}'.format(mean_nme_1, std_nme_1)
s2 = '[30, 60]\tMean: \x1b[32m{:.3f}\x1b[0m, Std: {:.3f}'.format(mean_nme_2, std_nme_2)
s3 = '[60, 90]\tMean: \x1b[32m{:.3f}\x1b[0m, Std: {:.3f}'.format(mean_nme_3, std_nme_3)
# s4 = '[ 0, 90]\tMean: \x1b[31m{:.3f}\x1b[0m, Std: {:.3f}'.format(mean_nme_all, std_nme_all)
s5 = '[ 0, 90]\tMean: \x1b[31m{:.3f}\x1b[0m, Std: \x1b[31m{:.3f}\x1b[0m'.format(mean, std)
s = '\n'.join([s1, s2, s3, s5])
print(s)
return mean_nme_1, mean_nme_2, mean_nme_3, mean, std
def calc_nme(pts68_fit_all):
std_size = 120
ind_68to21 = [[18], [20], [22], [23], [25], [27], [37], [37, 38, 39, 40, 41, 42], [40], [43],
[43, 44, 45, 46, 47, 48],
[46], [3], [32], [31], [36], [15], [49], [61, 62, 63, 64, 65, 66, 67, 68], [55], [9]]
for i in range(len(ind_68to21)):
for j in range(len(ind_68to21[i])):
ind_68to21[i][j] -= 1
nme_list = []
for i in range(len(roi_boxs)):
pts68_fit = pts68_fit_all[i]
pts68_gt = pts68_all[i]
pts21_gt = pts21_all[i]
# reconstruct 68 pts
sx, sy, ex, ey = roi_boxs[i]
scale_x = (ex - sx) / std_size
scale_y = (ey - sy) / std_size
pts68_fit[0, :] = pts68_fit[0, :] * scale_x + sx
pts68_fit[1, :] = pts68_fit[1, :] * scale_y + sy
# pts68 -> pts21
pts21_est = np.zeros_like(pts21_gt, dtype=np.float32)
for i in range(21):
ind = ind_68to21[i]
tmp = np.mean(pts68_fit[:, ind], 1)
pts21_est[:, i] = tmp
# build bbox
minx, maxx = np.min(pts68_gt[0, :]), np.max(pts68_gt[0, :])
miny, maxy = np.min(pts68_gt[1, :]), np.max(pts68_gt[1, :])
llength = sqrt((maxx - minx) * (maxy - miny))
# nme
pt_valid = (pts21_gt[0, :] != -1) & (pts21_gt[1, :] != -1)
dis = pts21_est[:, pt_valid] - pts21_gt[:, pt_valid]
dis = np.sqrt(np.sum(np.power(dis, 2), 0))
dis = np.mean(dis)
nme = dis / llength
nme_list.append(nme)
nme_list = np.array(nme_list, dtype=np.float32)
return nme_list
def main():
pass
if __name__ == '__main__':
main()
| 3,341 | 30.828571 | 103 | py |
3DDFA | 3DDFA-master/speed_cpu.py | #!/usr/bin/env python3
# coding: utf-8
import timeit
import numpy as np
SETUP_CODE = '''
import mobilenet_v1
import torch
model = mobilenet_v1.mobilenet_1()
model.eval()
data = torch.rand(1, 3, 120, 120)
'''
TEST_CODE = '''
with torch.no_grad():
model(data)
'''
def main():
repeat, number = 5, 100
res = timeit.repeat(setup=SETUP_CODE,
stmt=TEST_CODE,
repeat=repeat,
number=number)
res = np.array(res, dtype=np.float32)
res /= number
mean, var = np.mean(res), np.std(res)
print('Inference speed: {:.2f}±{:.2f} ms'.format(mean * 1000, var * 1000))
if __name__ == '__main__':
main()
| 693 | 18.277778 | 78 | py |
3DDFA | 3DDFA-master/wpdc_loss.py | #!/usr/bin/env python3
# coding: utf-8
import torch
import torch.nn as nn
from math import sqrt
from utils.io import _numpy_to_cuda
from utils.params import *
_to_tensor = _numpy_to_cuda # gpu
def _parse_param_batch(param):
"""Work for both numpy and tensor"""
N = param.shape[0]
p_ = param[:, :12].view(N, 3, -1)
p = p_[:, :, :3]
offset = p_[:, :, -1].view(N, 3, 1)
alpha_shp = param[:, 12:52].view(N, -1, 1)
alpha_exp = param[:, 52:].view(N, -1, 1)
return p, offset, alpha_shp, alpha_exp
class WPDCLoss(nn.Module):
"""Input and target are all 62-d param"""
def __init__(self, opt_style='resample', resample_num=132):
super(WPDCLoss, self).__init__()
self.opt_style = opt_style
self.param_mean = _to_tensor(param_mean)
self.param_std = _to_tensor(param_std)
self.u = _to_tensor(u)
self.w_shp = _to_tensor(w_shp)
self.w_exp = _to_tensor(w_exp)
self.w_norm = _to_tensor(w_norm)
self.w_shp_length = self.w_shp.shape[0] // 3
self.keypoints = _to_tensor(keypoints)
self.resample_num = resample_num
def reconstruct_and_parse(self, input, target):
# reconstruct
param = input * self.param_std + self.param_mean
param_gt = target * self.param_std + self.param_mean
# parse param
p, offset, alpha_shp, alpha_exp = _parse_param_batch(param)
pg, offsetg, alpha_shpg, alpha_expg = _parse_param_batch(param_gt)
return (p, offset, alpha_shp, alpha_exp), (pg, offsetg, alpha_shpg, alpha_expg)
def _calc_weights_resample(self, input_, target_):
# resample index
if self.resample_num <= 0:
keypoints_mix = self.keypoints
else:
index = torch.randperm(self.w_shp_length)[:self.resample_num].reshape(-1, 1)
keypoints_resample = torch.cat((3 * index, 3 * index + 1, 3 * index + 2), dim=1).view(-1).cuda()
keypoints_mix = torch.cat((self.keypoints, keypoints_resample))
w_shp_base = self.w_shp[keypoints_mix]
u_base = self.u[keypoints_mix]
w_exp_base = self.w_exp[keypoints_mix]
input = torch.tensor(input_.data.clone(), requires_grad=False)
target = torch.tensor(target_.data.clone(), requires_grad=False)
(p, offset, alpha_shp, alpha_exp), (pg, offsetg, alpha_shpg, alpha_expg) \
= self.reconstruct_and_parse(input, target)
input = self.param_std * input + self.param_mean
target = self.param_std * target + self.param_mean
N = input.shape[0]
offset[:, -1] = offsetg[:, -1]
weights = torch.zeros_like(input, dtype=torch.float)
tmpv = (u_base + w_shp_base @ alpha_shpg + w_exp_base @ alpha_expg).view(N, -1, 3).permute(0, 2, 1)
tmpv_norm = torch.norm(tmpv, dim=2)
offset_norm = sqrt(w_shp_base.shape[0] // 3)
# for pose
param_diff_pose = torch.abs(input[:, :11] - target[:, :11])
for ind in range(11):
if ind in [0, 4, 8]:
weights[:, ind] = param_diff_pose[:, ind] * tmpv_norm[:, 0]
elif ind in [1, 5, 9]:
weights[:, ind] = param_diff_pose[:, ind] * tmpv_norm[:, 1]
elif ind in [2, 6, 10]:
weights[:, ind] = param_diff_pose[:, ind] * tmpv_norm[:, 2]
else:
weights[:, ind] = param_diff_pose[:, ind] * offset_norm
## This is the optimizest version
# for shape_exp
magic_number = 0.00057339936 # scale
param_diff_shape_exp = torch.abs(input[:, 12:] - target[:, 12:])
# weights[:, 12:] = magic_number * param_diff_shape_exp * self.w_norm
w = torch.cat((w_shp_base, w_exp_base), dim=1)
w_norm = torch.norm(w, dim=0)
# print('here')
weights[:, 12:] = magic_number * param_diff_shape_exp * w_norm
eps = 1e-6
weights[:, :11] += eps
weights[:, 12:] += eps
# normalize the weights
maxes, _ = weights.max(dim=1)
maxes = maxes.view(-1, 1)
weights /= maxes
# zero the z
weights[:, 11] = 0
return weights
def forward(self, input, target, weights_scale=10):
if self.opt_style == 'resample':
weights = self._calc_weights_resample(input, target)
loss = weights * (input - target) ** 2
return loss.mean()
else:
raise Exception(f'Unknown opt style: {self.opt_style}')
if __name__ == '__main__':
pass
| 4,540 | 33.664122 | 108 | py |
3DDFA | 3DDFA-master/vdc_loss.py | #!/usr/bin/env python3
# coding: utf-8
import torch
import torch.nn as nn
from utils.io import _load, _numpy_to_cuda, _numpy_to_tensor
from utils.params import *
_to_tensor = _numpy_to_cuda # gpu
def _parse_param_batch(param):
"""Work for both numpy and tensor"""
N = param.shape[0]
p_ = param[:, :12].view(N, 3, -1)
p = p_[:, :, :3]
offset = p_[:, :, -1].view(N, 3, 1)
alpha_shp = param[:, 12:52].view(N, -1, 1)
alpha_exp = param[:, 52:].view(N, -1, 1)
return p, offset, alpha_shp, alpha_exp
class VDCLoss(nn.Module):
def __init__(self, opt_style='all'):
super(VDCLoss, self).__init__()
self.u = _to_tensor(u)
self.param_mean = _to_tensor(param_mean)
self.param_std = _to_tensor(param_std)
self.w_shp = _to_tensor(w_shp)
self.w_exp = _to_tensor(w_exp)
self.keypoints = _to_tensor(keypoints)
self.u_base = self.u[self.keypoints]
self.w_shp_base = self.w_shp[self.keypoints]
self.w_exp_base = self.w_exp[self.keypoints]
self.w_shp_length = self.w_shp.shape[0] // 3
self.opt_style = opt_style
def reconstruct_and_parse(self, input, target):
# reconstruct
param = input * self.param_std + self.param_mean
param_gt = target * self.param_std + self.param_mean
# parse param
p, offset, alpha_shp, alpha_exp = _parse_param_batch(param)
pg, offsetg, alpha_shpg, alpha_expg = _parse_param_batch(param_gt)
return (p, offset, alpha_shp, alpha_exp), (pg, offsetg, alpha_shpg, alpha_expg)
def forward_all(self, input, target):
(p, offset, alpha_shp, alpha_exp), (pg, offsetg, alpha_shpg, alpha_expg) \
= self.reconstruct_and_parse(input, target)
N = input.shape[0]
offset[:, -1] = offsetg[:, -1]
gt_vertex = pg @ (self.u + self.w_shp @ alpha_shpg + self.w_exp @ alpha_expg) \
.view(N, -1, 3).permute(0, 2, 1) + offsetg
vertex = p @ (self.u + self.w_shp @ alpha_shp + self.w_exp @ alpha_exp) \
.view(N, -1, 3).permute(0, 2, 1) + offset
diff = (gt_vertex - vertex) ** 2
loss = torch.mean(diff)
return loss
def forward_resample(self, input, target, resample_num=132):
(p, offset, alpha_shp, alpha_exp), (pg, offsetg, alpha_shpg, alpha_expg) \
= self.reconstruct_and_parse(input, target)
# resample index
index = torch.randperm(self.w_shp_length)[:resample_num].reshape(-1, 1)
keypoints_resample = torch.cat((3 * index, 3 * index + 1, 3 * index + 2), dim=1).view(-1).cuda()
keypoints_mix = torch.cat((self.keypoints, keypoints_resample))
w_shp_base = self.w_shp[keypoints_mix]
u_base = self.u[keypoints_mix]
w_exp_base = self.w_exp[keypoints_mix]
offset[:, -1] = offsetg[:, -1]
N = input.shape[0]
gt_vertex = pg @ (u_base + w_shp_base @ alpha_shpg + w_exp_base @ alpha_expg) \
.view(N, -1, 3).permute(0, 2, 1) + offsetg
vertex = p @ (u_base + w_shp_base @ alpha_shp + w_exp_base @ alpha_exp) \
.view(N, -1, 3).permute(0, 2, 1) + offset
diff = (gt_vertex - vertex) ** 2
loss = torch.mean(diff)
return loss
def forward(self, input, target):
if self.opt_style == 'all':
return self.forward_all(input, target)
elif self.opt_style == 'resample':
return self.forward_resample(input, target)
else:
raise Exception(f'Unknown opt style: f{opt_style}')
if __name__ == '__main__':
pass
| 3,606 | 34.362745 | 104 | py |
3DDFA | 3DDFA-master/benchmark_aflw2000.py | #!/usr/bin/env python3
# coding: utf-8
"""
Notation (2019.09.15): two versions of spliting AFLW2000-3D:
1) AFLW2000-3D.pose.npy: according to the fitted pose
2) AFLW2000-3D-new.pose: according to AFLW labels
There is no obvious difference between these two splits.
"""
import os.path as osp
import numpy as np
from math import sqrt
from utils.io import _load
d = 'test.configs'
# [1312, 383, 305], current version
yaws_list = _load(osp.join(d, 'AFLW2000-3D.pose.npy'))
# [1306, 462, 232], same as paper
# yaws_list = _load(osp.join(d, 'AFLW2000-3D-new.pose.npy'))
# origin
pts68_all_ori = _load(osp.join(d, 'AFLW2000-3D.pts68.npy'))
# reannonated
pts68_all_re = _load(osp.join(d, 'AFLW2000-3D-Reannotated.pts68.npy'))
roi_boxs = _load(osp.join(d, 'AFLW2000-3D_crop.roi_box.npy'))
def ana(nme_list):
yaw_list_abs = np.abs(yaws_list)
ind_yaw_1 = yaw_list_abs <= 30
ind_yaw_2 = np.bitwise_and(yaw_list_abs > 30, yaw_list_abs <= 60)
ind_yaw_3 = yaw_list_abs > 60
nme_1 = nme_list[ind_yaw_1]
nme_2 = nme_list[ind_yaw_2]
nme_3 = nme_list[ind_yaw_3]
mean_nme_1 = np.mean(nme_1) * 100
mean_nme_2 = np.mean(nme_2) * 100
mean_nme_3 = np.mean(nme_3) * 100
# mean_nme_all = np.mean(nme_list) * 100
std_nme_1 = np.std(nme_1) * 100
std_nme_2 = np.std(nme_2) * 100
std_nme_3 = np.std(nme_3) * 100
# std_nme_all = np.std(nme_list) * 100
mean_all = [mean_nme_1, mean_nme_2, mean_nme_3]
mean = np.mean(mean_all)
std = np.std(mean_all)
s1 = '[ 0, 30]\tMean: \x1b[32m{:.3f}\x1b[0m, Std: {:.3f}'.format(mean_nme_1, std_nme_1)
s2 = '[30, 60]\tMean: \x1b[32m{:.3f}\x1b[0m, Std: {:.3f}'.format(mean_nme_2, std_nme_2)
s3 = '[60, 90]\tMean: \x1b[32m{:.3f}\x1b[0m, Std: {:.3f}'.format(mean_nme_3, std_nme_3)
# s4 = '[ 0, 90]\tMean: \x1b[31m{:.3f}\x1b[0m, Std: {:.3f}'.format(mean_nme_all, std_nme_all)
s5 = '[ 0, 90]\tMean: \x1b[31m{:.3f}\x1b[0m, Std: \x1b[31m{:.3f}\x1b[0m'.format(mean, std)
s = '\n'.join([s1, s2, s3, s5])
print(s)
return mean_nme_1, mean_nme_2, mean_nme_3, mean, std
def convert_to_ori(lms, i):
std_size = 120
sx, sy, ex, ey = roi_boxs[i]
scale_x = (ex - sx) / std_size
scale_y = (ey - sy) / std_size
lms[0, :] = lms[0, :] * scale_x + sx
lms[1, :] = lms[1, :] * scale_y + sy
return lms
def calc_nme(pts68_fit_all, option='ori'):
if option == 'ori':
pts68_all = pts68_all_ori
elif option == 're':
pts68_all = pts68_all_re
std_size = 120
nme_list = []
for i in range(len(roi_boxs)):
pts68_fit = pts68_fit_all[i]
pts68_gt = pts68_all[i]
sx, sy, ex, ey = roi_boxs[i]
scale_x = (ex - sx) / std_size
scale_y = (ey - sy) / std_size
pts68_fit[0, :] = pts68_fit[0, :] * scale_x + sx
pts68_fit[1, :] = pts68_fit[1, :] * scale_y + sy
# build bbox
minx, maxx = np.min(pts68_gt[0, :]), np.max(pts68_gt[0, :])
miny, maxy = np.min(pts68_gt[1, :]), np.max(pts68_gt[1, :])
llength = sqrt((maxx - minx) * (maxy - miny))
#
dis = pts68_fit - pts68_gt[:2, :]
dis = np.sqrt(np.sum(np.power(dis, 2), 0))
dis = np.mean(dis)
nme = dis / llength
nme_list.append(nme)
nme_list = np.array(nme_list, dtype=np.float32)
return nme_list
def main():
pass
if __name__ == '__main__':
main()
| 3,402 | 27.596639 | 97 | py |
3DDFA | 3DDFA-master/train.py | #!/usr/bin/env python3
# coding: utf-8
import os.path as osp
from pathlib import Path
import numpy as np
import argparse
import time
import logging
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import mobilenet_v1
import torch.backends.cudnn as cudnn
from utils.ddfa import DDFADataset, ToTensorGjz, NormalizeGjz
from utils.ddfa import str2bool, AverageMeter
from utils.io import mkdir
from vdc_loss import VDCLoss
from wpdc_loss import WPDCLoss
# global args (configuration)
args = None
lr = None
arch_choices = ['mobilenet_2', 'mobilenet_1', 'mobilenet_075', 'mobilenet_05', 'mobilenet_025']
def parse_args():
parser = argparse.ArgumentParser(description='3DMM Fitting')
parser.add_argument('-j', '--workers', default=6, type=int)
parser.add_argument('--epochs', default=40, type=int)
parser.add_argument('--start-epoch', default=1, type=int)
parser.add_argument('-b', '--batch-size', default=128, type=int)
parser.add_argument('-vb', '--val-batch-size', default=32, type=int)
parser.add_argument('--base-lr', '--learning-rate', default=0.001, type=float)
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=5e-4, type=float)
parser.add_argument('--print-freq', '-p', default=20, type=int)
parser.add_argument('--resume', default='', type=str, metavar='PATH')
parser.add_argument('--devices-id', default='0,1', type=str)
parser.add_argument('--filelists-train',
default='', type=str)
parser.add_argument('--filelists-val',
default='', type=str)
parser.add_argument('--root', default='')
parser.add_argument('--snapshot', default='', type=str)
parser.add_argument('--log-file', default='output.log', type=str)
parser.add_argument('--log-mode', default='w', type=str)
parser.add_argument('--size-average', default='true', type=str2bool)
parser.add_argument('--num-classes', default=62, type=int)
parser.add_argument('--arch', default='mobilenet_1', type=str,
choices=arch_choices)
parser.add_argument('--frozen', default='false', type=str2bool)
parser.add_argument('--milestones', default='15,25,30', type=str)
parser.add_argument('--task', default='all', type=str)
parser.add_argument('--test_initial', default='false', type=str2bool)
parser.add_argument('--warmup', default=-1, type=int)
parser.add_argument('--param-fp-train',
default='',
type=str)
parser.add_argument('--param-fp-val',
default='')
parser.add_argument('--opt-style', default='resample', type=str) # resample
parser.add_argument('--resample-num', default=132, type=int)
parser.add_argument('--loss', default='vdc', type=str)
global args
args = parser.parse_args()
# some other operations
args.devices_id = [int(d) for d in args.devices_id.split(',')]
args.milestones = [int(m) for m in args.milestones.split(',')]
snapshot_dir = osp.split(args.snapshot)[0]
mkdir(snapshot_dir)
def print_args(args):
for arg in vars(args):
s = arg + ': ' + str(getattr(args, arg))
logging.info(s)
def adjust_learning_rate(optimizer, epoch, milestones=None):
"""Sets the learning rate: milestone is a list/tuple"""
def to(epoch):
if epoch <= args.warmup:
return 1
elif args.warmup < epoch <= milestones[0]:
return 0
for i in range(1, len(milestones)):
if milestones[i - 1] < epoch <= milestones[i]:
return i
return len(milestones)
n = to(epoch)
global lr
lr = args.base_lr * (0.2 ** n)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def save_checkpoint(state, filename='checkpoint.pth.tar'):
torch.save(state, filename)
logging.info(f'Save checkpoint to {filename}')
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
model.train()
end = time.time()
# loader is batch style
# for i, (input, target) in enumerate(train_loader):
for i, (input, target) in enumerate(train_loader):
target.requires_grad = False
target = target.cuda(non_blocking=True)
output = model(input)
data_time.update(time.time() - end)
if args.loss.lower() == 'vdc':
loss = criterion(output, target)
elif args.loss.lower() == 'wpdc':
loss = criterion(output, target)
elif args.loss.lower() == 'pdc':
loss = criterion(output, target)
else:
raise Exception(f'Unknown loss {args.loss}')
losses.update(loss.item(), input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# log
if i % args.print_freq == 0:
logging.info(f'Epoch: [{epoch}][{i}/{len(train_loader)}]\t'
f'LR: {lr:8f}\t'
f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
# f'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
f'Loss {losses.val:.4f} ({losses.avg:.4f})')
def validate(val_loader, model, criterion, epoch):
model.eval()
end = time.time()
with torch.no_grad():
losses = []
for i, (input, target) in enumerate(val_loader):
# compute output
target.requires_grad = False
target = target.cuda(non_blocking=True)
output = model(input)
loss = criterion(output, target)
losses.append(loss.item())
elapse = time.time() - end
loss = np.mean(losses)
logging.info(f'Val: [{epoch}][{len(val_loader)}]\t'
f'Loss {loss:.4f}\t'
f'Time {elapse:.3f}')
def main():
parse_args() # parse global argsl
# logging setup
logging.basicConfig(
format='[%(asctime)s] [p%(process)s] [%(pathname)s:%(lineno)d] [%(levelname)s] %(message)s',
level=logging.INFO,
handlers=[
logging.FileHandler(args.log_file, mode=args.log_mode),
logging.StreamHandler()
]
)
print_args(args) # print args
# step1: define the model structure
model = getattr(mobilenet_v1, args.arch)(num_classes=args.num_classes)
torch.cuda.set_device(args.devices_id[0]) # fix bug for `ERROR: all tensors must be on devices[0]`
model = nn.DataParallel(model, device_ids=args.devices_id).cuda() # -> GPU
# step2: optimization: loss and optimization method
# criterion = nn.MSELoss(size_average=args.size_average).cuda()
if args.loss.lower() == 'wpdc':
print(args.opt_style)
criterion = WPDCLoss(opt_style=args.opt_style).cuda()
logging.info('Use WPDC Loss')
elif args.loss.lower() == 'vdc':
criterion = VDCLoss(opt_style=args.opt_style).cuda()
logging.info('Use VDC Loss')
elif args.loss.lower() == 'pdc':
criterion = nn.MSELoss(size_average=args.size_average).cuda()
logging.info('Use PDC loss')
else:
raise Exception(f'Unknown Loss {args.loss}')
optimizer = torch.optim.SGD(model.parameters(),
lr=args.base_lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
nesterov=True)
# step 2.1 resume
if args.resume:
if Path(args.resume).is_file():
logging.info(f'=> loading checkpoint {args.resume}')
checkpoint = torch.load(args.resume, map_location=lambda storage, loc: storage)['state_dict']
# checkpoint = torch.load(args.resume)['state_dict']
model.load_state_dict(checkpoint)
else:
logging.info(f'=> no checkpoint found at {args.resume}')
# step3: data
normalize = NormalizeGjz(mean=127.5, std=128) # may need optimization
train_dataset = DDFADataset(
root=args.root,
filelists=args.filelists_train,
param_fp=args.param_fp_train,
transform=transforms.Compose([ToTensorGjz(), normalize])
)
val_dataset = DDFADataset(
root=args.root,
filelists=args.filelists_val,
param_fp=args.param_fp_val,
transform=transforms.Compose([ToTensorGjz(), normalize])
)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.workers,
shuffle=True, pin_memory=True, drop_last=True)
val_loader = DataLoader(val_dataset, batch_size=args.val_batch_size, num_workers=args.workers,
shuffle=False, pin_memory=True)
# step4: run
cudnn.benchmark = True
if args.test_initial:
logging.info('Testing from initial')
validate(val_loader, model, criterion, args.start_epoch)
for epoch in range(args.start_epoch, args.epochs + 1):
# adjust learning rate
adjust_learning_rate(optimizer, epoch, args.milestones)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch)
filename = f'{args.snapshot}_checkpoint_epoch_{epoch}.pth.tar'
save_checkpoint(
{
'epoch': epoch,
'state_dict': model.state_dict(),
# 'optimizer': optimizer.state_dict()
},
filename
)
validate(val_loader, model, criterion, epoch)
if __name__ == '__main__':
main()
| 9,938 | 34.244681 | 105 | py |
3DDFA | 3DDFA-master/visualize.py | #!/usr/bin/env python3
# coding: utf-8
from benchmark import extract_param
from utils.ddfa import reconstruct_vertex
from utils.io import _dump, _load
import os.path as osp
from skimage import io
import matplotlib.pyplot as plt
from benchmark_aflw2000 import convert_to_ori
import scipy.io as sio
def aflw2000():
arch = 'mobilenet_1'
device_ids = [0]
checkpoint_fp = 'models/phase1_wpdc_vdc.pth.tar'
params = extract_param(
checkpoint_fp=checkpoint_fp,
root='test.data/AFLW2000-3D_crop',
filelists='test.data/AFLW2000-3D_crop.list',
arch=arch,
device_ids=device_ids,
batch_size=128)
_dump('res/params_aflw2000.npy', params)
def draw_landmarks():
filelists = 'test.data/AFLW2000-3D_crop.list'
root = 'AFLW-2000-3D/'
fns = open(filelists).read().strip().split('\n')
params = _load('res/params_aflw2000.npy')
for i in range(2000):
plt.close()
img_fp = osp.join(root, fns[i])
img = io.imread(img_fp)
lms = reconstruct_vertex(params[i], dense=False)
lms = convert_to_ori(lms, i)
# print(lms.shape)
fig = plt.figure(figsize=plt.figaspect(.5))
# fig = plt.figure(figsize=(8, 4))
ax = fig.add_subplot(1, 2, 1)
ax.imshow(img)
alpha = 0.8
markersize = 4
lw = 1.5
color = 'w'
markeredgecolor = 'black'
nums = [0, 17, 22, 27, 31, 36, 42, 48, 60, 68]
for ind in range(len(nums) - 1):
l, r = nums[ind], nums[ind + 1]
ax.plot(lms[0, l:r], lms[1, l:r], color=color, lw=lw, alpha=alpha - 0.1)
ax.plot(lms[0, l:r], lms[1, l:r], marker='o', linestyle='None', markersize=markersize, color=color,
markeredgecolor=markeredgecolor, alpha=alpha)
ax.axis('off')
# 3D
ax = fig.add_subplot(1, 2, 2, projection='3d')
lms[1] = img.shape[1] - lms[1]
lms[2] = -lms[2]
# print(lms)
ax.scatter(lms[0], lms[2], lms[1], c="cyan", alpha=1.0, edgecolor='b')
for ind in range(len(nums) - 1):
l, r = nums[ind], nums[ind + 1]
ax.plot3D(lms[0, l:r], lms[2, l:r], lms[1, l:r], color='blue')
ax.view_init(elev=5., azim=-95)
# ax.set_xlabel('x')
# ax.set_ylabel('y')
# ax.set_zlabel('z')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_zticklabels([])
plt.tight_layout()
# plt.show()
wfp = f'res/AFLW-2000-3D/{osp.basename(img_fp)}'
plt.savefig(wfp, dpi=200)
def gen_3d_vertex():
filelists = 'test.data/AFLW2000-3D_crop.list'
root = 'AFLW-2000-3D/'
fns = open(filelists).read().strip().split('\n')
params = _load('res/params_aflw2000.npy')
sel = ['00427', '00439', '00475', '00477', '00497', '00514', '00562', '00623', '01045', '01095', '01104', '01506',
'01621', '02214', '02244', '03906', '04157']
sel = list(map(lambda x: f'image{x}.jpg', sel))
for i in range(2000):
fn = fns[i]
if fn in sel:
vertex = reconstruct_vertex(params[i], dense=True)
wfp = osp.join('res/AFLW-2000-3D_vertex/', fn.replace('.jpg', '.mat'))
print(wfp)
sio.savemat(wfp, {'vertex': vertex})
def main():
# step1: extract params
# aflw2000()
# step2: draw landmarks
# draw_landmarks()
# step3: visual 3d vertex
gen_3d_vertex()
if __name__ == '__main__':
main()
| 3,510 | 27.544715 | 118 | py |
3DDFA | 3DDFA-master/mobilenet_v1.py | #!/usr/bin/env python3
# coding: utf-8
from __future__ import division
"""
Creates a MobileNet Model as defined in:
Andrew G. Howard Menglong Zhu Bo Chen, et.al. (2017).
MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications.
Copyright (c) Yang Lu, 2017
Modified By cleardusk
"""
import math
import torch.nn as nn
__all__ = ['mobilenet_2', 'mobilenet_1', 'mobilenet_075', 'mobilenet_05', 'mobilenet_025']
class DepthWiseBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1, prelu=False):
super(DepthWiseBlock, self).__init__()
inplanes, planes = int(inplanes), int(planes)
self.conv_dw = nn.Conv2d(inplanes, inplanes, kernel_size=3, padding=1, stride=stride, groups=inplanes,
bias=False)
self.bn_dw = nn.BatchNorm2d(inplanes)
self.conv_sep = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_sep = nn.BatchNorm2d(planes)
if prelu:
self.relu = nn.PReLU()
else:
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
out = self.conv_dw(x)
out = self.bn_dw(out)
out = self.relu(out)
out = self.conv_sep(out)
out = self.bn_sep(out)
out = self.relu(out)
return out
class MobileNet(nn.Module):
def __init__(self, widen_factor=1.0, num_classes=1000, prelu=False, input_channel=3):
""" Constructor
Args:
widen_factor: config of widen_factor
num_classes: number of classes
"""
super(MobileNet, self).__init__()
block = DepthWiseBlock
self.conv1 = nn.Conv2d(input_channel, int(32 * widen_factor), kernel_size=3, stride=2, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(int(32 * widen_factor))
if prelu:
self.relu = nn.PReLU()
else:
self.relu = nn.ReLU(inplace=True)
self.dw2_1 = block(32 * widen_factor, 64 * widen_factor, prelu=prelu)
self.dw2_2 = block(64 * widen_factor, 128 * widen_factor, stride=2, prelu=prelu)
self.dw3_1 = block(128 * widen_factor, 128 * widen_factor, prelu=prelu)
self.dw3_2 = block(128 * widen_factor, 256 * widen_factor, stride=2, prelu=prelu)
self.dw4_1 = block(256 * widen_factor, 256 * widen_factor, prelu=prelu)
self.dw4_2 = block(256 * widen_factor, 512 * widen_factor, stride=2, prelu=prelu)
self.dw5_1 = block(512 * widen_factor, 512 * widen_factor, prelu=prelu)
self.dw5_2 = block(512 * widen_factor, 512 * widen_factor, prelu=prelu)
self.dw5_3 = block(512 * widen_factor, 512 * widen_factor, prelu=prelu)
self.dw5_4 = block(512 * widen_factor, 512 * widen_factor, prelu=prelu)
self.dw5_5 = block(512 * widen_factor, 512 * widen_factor, prelu=prelu)
self.dw5_6 = block(512 * widen_factor, 1024 * widen_factor, stride=2, prelu=prelu)
self.dw6 = block(1024 * widen_factor, 1024 * widen_factor, prelu=prelu)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(int(1024 * widen_factor), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.dw2_1(x)
x = self.dw2_2(x)
x = self.dw3_1(x)
x = self.dw3_2(x)
x = self.dw4_1(x)
x = self.dw4_2(x)
x = self.dw5_1(x)
x = self.dw5_2(x)
x = self.dw5_3(x)
x = self.dw5_4(x)
x = self.dw5_5(x)
x = self.dw5_6(x)
x = self.dw6(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def mobilenet(widen_factor=1.0, num_classes=1000):
"""
Construct MobileNet.
widen_factor=1.0 for mobilenet_1
widen_factor=0.75 for mobilenet_075
widen_factor=0.5 for mobilenet_05
widen_factor=0.25 for mobilenet_025
"""
model = MobileNet(widen_factor=widen_factor, num_classes=num_classes)
return model
def mobilenet_2(num_classes=62, input_channel=3):
model = MobileNet(widen_factor=2.0, num_classes=num_classes, input_channel=input_channel)
return model
def mobilenet_1(num_classes=62, input_channel=3):
model = MobileNet(widen_factor=1.0, num_classes=num_classes, input_channel=input_channel)
return model
def mobilenet_075(num_classes=62, input_channel=3):
model = MobileNet(widen_factor=0.75, num_classes=num_classes, input_channel=input_channel)
return model
def mobilenet_05(num_classes=62, input_channel=3):
model = MobileNet(widen_factor=0.5, num_classes=num_classes, input_channel=input_channel)
return model
def mobilenet_025(num_classes=62, input_channel=3):
model = MobileNet(widen_factor=0.25, num_classes=num_classes, input_channel=input_channel)
return model
| 5,224 | 32.709677 | 110 | py |
3DDFA | 3DDFA-master/training/train.py | ../train.py | 11 | 11 | 11 | py |
3DDFA | 3DDFA-master/c++/convert_to_onnx.py | #!/usr/bin/env python3
# coding: utf-8
import torch
import mobilenet_v1
def main():
# checkpoint_fp = 'weights/phase1_wpdc_vdc.pth.tar'
checkpoint_fp = 'weights/mb_1.p'
arch = 'mobilenet_1'
checkpoint = torch.load(checkpoint_fp, map_location=lambda storage, loc: storage)['state_dict']
model = getattr(mobilenet_v1, arch)(num_classes=62) # 62 = 12(pose) + 40(shape) +10(expression)
model_dict = model.state_dict()
# because the model is trained by multiple gpus, prefix module should be removed
for k in checkpoint.keys():
kc = k.replace('module.', '')
if kc in model_dict.keys():
model_dict[kc] = checkpoint[k]
if kc in ['fc_param.bias', 'fc_param.weight']:
model_dict[kc.replace('_param', '')] = checkpoint[k]
model.load_state_dict(model_dict)
# conversion
batch_size = 1
dummy_input = torch.randn(batch_size, 3, 120, 120)
torch.onnx.export(model, dummy_input, checkpoint_fp.replace('.p', '.onnx'))
# torch.onnx.export(model, dummy_input, checkpoint_fp.replace('.pth.tar', '.onnx'))
if __name__ == '__main__':
main()
| 1,135 | 32.411765 | 100 | py |
3DDFA | 3DDFA-master/c++/mobilenet_v1.py | #!/usr/bin/env python3
# coding: utf-8
from __future__ import division
"""
Creates a MobileNet Model as defined in:
Andrew G. Howard Menglong Zhu Bo Chen, et.al. (2017).
MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications.
Copyright (c) Yang Lu, 2017
Modified By cleardusk
"""
import math
import torch.nn as nn
__all__ = ['mobilenet_2', 'mobilenet_1', 'mobilenet_075', 'mobilenet_05', 'mobilenet_025']
class DepthWiseBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1, prelu=False):
super(DepthWiseBlock, self).__init__()
inplanes, planes = int(inplanes), int(planes)
self.conv_dw = nn.Conv2d(inplanes, inplanes, kernel_size=3, padding=1, stride=stride, groups=inplanes,
bias=False)
self.bn_dw = nn.BatchNorm2d(inplanes)
self.conv_sep = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_sep = nn.BatchNorm2d(planes)
if prelu:
self.relu = nn.PReLU()
else:
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
out = self.conv_dw(x)
out = self.bn_dw(out)
out = self.relu(out)
out = self.conv_sep(out)
out = self.bn_sep(out)
out = self.relu(out)
return out
class MobileNet(nn.Module):
def __init__(self, widen_factor=1.0, num_classes=1000, prelu=False, input_channel=3):
""" Constructor
Args:
widen_factor: config of widen_factor
num_classes: number of classes
"""
super(MobileNet, self).__init__()
block = DepthWiseBlock
self.conv1 = nn.Conv2d(input_channel, int(32 * widen_factor), kernel_size=3, stride=2, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(int(32 * widen_factor))
if prelu:
self.relu = nn.PReLU()
else:
self.relu = nn.ReLU(inplace=True)
self.dw2_1 = block(32 * widen_factor, 64 * widen_factor, prelu=prelu)
self.dw2_2 = block(64 * widen_factor, 128 * widen_factor, stride=2, prelu=prelu)
self.dw3_1 = block(128 * widen_factor, 128 * widen_factor, prelu=prelu)
self.dw3_2 = block(128 * widen_factor, 256 * widen_factor, stride=2, prelu=prelu)
self.dw4_1 = block(256 * widen_factor, 256 * widen_factor, prelu=prelu)
self.dw4_2 = block(256 * widen_factor, 512 * widen_factor, stride=2, prelu=prelu)
self.dw5_1 = block(512 * widen_factor, 512 * widen_factor, prelu=prelu)
self.dw5_2 = block(512 * widen_factor, 512 * widen_factor, prelu=prelu)
self.dw5_3 = block(512 * widen_factor, 512 * widen_factor, prelu=prelu)
self.dw5_4 = block(512 * widen_factor, 512 * widen_factor, prelu=prelu)
self.dw5_5 = block(512 * widen_factor, 512 * widen_factor, prelu=prelu)
self.dw5_6 = block(512 * widen_factor, 1024 * widen_factor, stride=2, prelu=prelu)
self.dw6 = block(1024 * widen_factor, 1024 * widen_factor, prelu=prelu)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(int(1024 * widen_factor), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.dw2_1(x)
x = self.dw2_2(x)
x = self.dw3_1(x)
x = self.dw3_2(x)
x = self.dw4_1(x)
x = self.dw4_2(x)
x = self.dw5_1(x)
x = self.dw5_2(x)
x = self.dw5_3(x)
x = self.dw5_4(x)
x = self.dw5_5(x)
x = self.dw5_6(x)
x = self.dw6(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def mobilenet(widen_factor=1.0, num_classes=1000):
"""
Construct MobileNet.
widen_factor=1.0 for mobilenet_1
widen_factor=0.75 for mobilenet_075
widen_factor=0.5 for mobilenet_05
widen_factor=0.25 for mobilenet_025
"""
model = MobileNet(widen_factor=widen_factor, num_classes=num_classes)
return model
def mobilenet_2(num_classes=62, input_channel=3):
model = MobileNet(widen_factor=2.0, num_classes=num_classes, input_channel=input_channel)
return model
def mobilenet_1(num_classes=62, input_channel=3):
model = MobileNet(widen_factor=1.0, num_classes=num_classes, input_channel=input_channel)
return model
def mobilenet_075(num_classes=62, input_channel=3):
model = MobileNet(widen_factor=0.75, num_classes=num_classes, input_channel=input_channel)
return model
def mobilenet_05(num_classes=62, input_channel=3):
model = MobileNet(widen_factor=0.5, num_classes=num_classes, input_channel=input_channel)
return model
def mobilenet_025(num_classes=62, input_channel=3):
model = MobileNet(widen_factor=0.25, num_classes=num_classes, input_channel=input_channel)
return model
| 5,224 | 32.709677 | 110 | py |
3DDFA | 3DDFA-master/demo@obama/rendering_demo.py | #!/usr/bin/env python3
# coding: utf-8
"""
A demo for rendering mesh generated by `main.py`
"""
from rendering import cfg, _to_ctype, RenderPipeline
import scipy.io as sio
import imageio
import numpy as np
import matplotlib.pyplot as plt
def test():
# 1. first, using main.py to generate dense vertices, like emma_input_0.mat
fp = '../samples/emma_input_0.mat'
vertices = sio.loadmat(fp)['vertex'].T # 3xm
print(vertices.shape)
img = imageio.imread('../samples/emma_input.jpg').astype(np.float32) / 255.
# 2. render it
# triangles = sio.loadmat('tri_refine.mat')['tri'] # mx3
triangles = sio.loadmat('../visualize/tri.mat')['tri'].T - 1 # mx3
print(triangles.shape)
triangles = _to_ctype(triangles).astype(np.int32) # for type compatible
app = RenderPipeline(**cfg)
img_render = app(vertices, triangles, img)
plt.imshow(img_render)
plt.show()
def main():
test()
if __name__ == '__main__':
main()
| 974 | 23.375 | 79 | py |
3DDFA | 3DDFA-master/demo@obama/convert_imgs_to_video.py | #!/usr/bin/env python3
# coding: utf-8
import os
import os.path as osp
import sys
from glob import glob
import imageio
def main():
assert len(sys.argv) >= 2
d = sys.argv[1]
fps = glob(osp.join(d, '*.jpg'))
fps = sorted(fps, key=lambda x: int(x.split('/')[-1].replace('.jpg', '')))
imgs = []
for fp in fps:
img = imageio.imread(fp)
imgs.append(img)
if len(sys.argv) >= 3:
imageio.mimwrite(sys.argv[2], imgs, fps=24, macro_block_size=None)
else:
imageio.mimwrite(osp.basename(d) + '.mp4', imgs, fps=24, macro_block_size=None)
if __name__ == '__main__':
main()
| 634 | 19.483871 | 87 | py |
3DDFA | 3DDFA-master/demo@obama/rendering.py | #!/usr/bin/env python3
# coding: utf-8
import sys
sys.path.append('../')
import os
import os.path as osp
from glob import glob
from utils.lighting import RenderPipeline
import numpy as np
import scipy.io as sio
import imageio
cfg = {
'intensity_ambient': 0.3,
'color_ambient': (1, 1, 1),
'intensity_directional': 0.6,
'color_directional': (1, 1, 1),
'intensity_specular': 0.1,
'specular_exp': 5,
'light_pos': (0, 0, 5),
'view_pos': (0, 0, 5)
}
def _to_ctype(arr):
if not arr.flags.c_contiguous:
return arr.copy(order='C')
return arr
def obama_demo():
wd = 'obama_res@dense_py'
if not osp.exists(wd):
os.mkdir(wd)
app = RenderPipeline(**cfg)
img_fps = sorted(glob('obama/*.jpg'))
triangles = sio.loadmat('tri_refine.mat')['tri'] # mx3
triangles = _to_ctype(triangles).astype(np.int32) # for type compatible
for img_fp in img_fps[:]:
vertices = sio.loadmat(img_fp.replace('.jpg', '_0.mat'))['vertex'].T # mx3
img = imageio.imread(img_fp).astype(np.float32) / 255.
# end = time.clock()
img_render = app(vertices, triangles, img)
# print('Elapse: {:.1f}ms'.format((time.clock() - end) * 1000))
img_wfp = osp.join(wd, osp.basename(img_fp))
imageio.imwrite(img_wfp, img_render)
print('Writing to {}'.format(img_wfp))
if __name__ == '__main__':
obama_demo()
| 1,420 | 23.084746 | 83 | py |
3DDFA | 3DDFA-master/utils/inference.py | #!/usr/bin/env python3
# coding: utf-8
__author__ = 'cleardusk'
import numpy as np
from math import sqrt
import scipy.io as sio
import matplotlib.pyplot as plt
from .ddfa import reconstruct_vertex
def get_suffix(filename):
"""a.jpg -> jpg"""
pos = filename.rfind('.')
if pos == -1:
return ''
return filename[pos:]
def crop_img(img, roi_box):
h, w = img.shape[:2]
sx, sy, ex, ey = [int(round(_)) for _ in roi_box]
dh, dw = ey - sy, ex - sx
if len(img.shape) == 3:
res = np.zeros((dh, dw, 3), dtype=np.uint8)
else:
res = np.zeros((dh, dw), dtype=np.uint8)
if sx < 0:
sx, dsx = 0, -sx
else:
dsx = 0
if ex > w:
ex, dex = w, dw - (ex - w)
else:
dex = dw
if sy < 0:
sy, dsy = 0, -sy
else:
dsy = 0
if ey > h:
ey, dey = h, dh - (ey - h)
else:
dey = dh
res[dsy:dey, dsx:dex] = img[sy:ey, sx:ex]
return res
def calc_hypotenuse(pts):
bbox = [min(pts[0, :]), min(pts[1, :]), max(pts[0, :]), max(pts[1, :])]
center = [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2]
radius = max(bbox[2] - bbox[0], bbox[3] - bbox[1]) / 2
bbox = [center[0] - radius, center[1] - radius, center[0] + radius, center[1] + radius]
llength = sqrt((bbox[2] - bbox[0]) ** 2 + (bbox[3] - bbox[1]) ** 2)
return llength / 3
def parse_roi_box_from_landmark(pts):
"""calc roi box from landmark"""
bbox = [min(pts[0, :]), min(pts[1, :]), max(pts[0, :]), max(pts[1, :])]
center = [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2]
radius = max(bbox[2] - bbox[0], bbox[3] - bbox[1]) / 2
bbox = [center[0] - radius, center[1] - radius, center[0] + radius, center[1] + radius]
llength = sqrt((bbox[2] - bbox[0]) ** 2 + (bbox[3] - bbox[1]) ** 2)
center_x = (bbox[2] + bbox[0]) / 2
center_y = (bbox[3] + bbox[1]) / 2
roi_box = [0] * 4
roi_box[0] = center_x - llength / 2
roi_box[1] = center_y - llength / 2
roi_box[2] = roi_box[0] + llength
roi_box[3] = roi_box[1] + llength
return roi_box
def parse_roi_box_from_bbox(bbox):
left, top, right, bottom = bbox
old_size = (right - left + bottom - top) / 2
center_x = right - (right - left) / 2.0
center_y = bottom - (bottom - top) / 2.0 + old_size * 0.14
size = int(old_size * 1.58)
roi_box = [0] * 4
roi_box[0] = center_x - size / 2
roi_box[1] = center_y - size / 2
roi_box[2] = roi_box[0] + size
roi_box[3] = roi_box[1] + size
return roi_box
def dump_to_ply(vertex, tri, wfp):
header = """ply
format ascii 1.0
element vertex {}
property float x
property float y
property float z
element face {}
property list uchar int vertex_indices
end_header"""
n_vertex = vertex.shape[1]
n_face = tri.shape[1]
header = header.format(n_vertex, n_face)
with open(wfp, 'w') as f:
f.write(header + '\n')
for i in range(n_vertex):
x, y, z = vertex[:, i]
f.write('{:.4f} {:.4f} {:.4f}\n'.format(x, y, z))
for i in range(n_face):
idx1, idx2, idx3 = tri[:, i]
f.write('3 {} {} {}\n'.format(idx1 - 1, idx2 - 1, idx3 - 1))
print('Dump tp {}'.format(wfp))
def dump_vertex(vertex, wfp):
sio.savemat(wfp, {'vertex': vertex})
print('Dump to {}'.format(wfp))
def _predict_vertices(param, roi_bbox, dense, transform=True):
vertex = reconstruct_vertex(param, dense=dense)
sx, sy, ex, ey = roi_bbox
scale_x = (ex - sx) / 120
scale_y = (ey - sy) / 120
vertex[0, :] = vertex[0, :] * scale_x + sx
vertex[1, :] = vertex[1, :] * scale_y + sy
s = (scale_x + scale_y) / 2
vertex[2, :] *= s
return vertex
def predict_68pts(param, roi_box):
return _predict_vertices(param, roi_box, dense=False)
def predict_dense(param, roi_box):
return _predict_vertices(param, roi_box, dense=True)
def draw_landmarks(img, pts, style='fancy', wfp=None, show_flg=False, **kwargs):
"""Draw landmarks using matplotlib"""
height, width = img.shape[:2]
plt.figure(figsize=(12, height / width * 12))
plt.imshow(img[:, :, ::-1])
plt.subplots_adjust(left=0, right=1, top=1, bottom=0)
plt.axis('off')
if not type(pts) in [tuple, list]:
pts = [pts]
for i in range(len(pts)):
if style == 'simple':
plt.plot(pts[i][0, :], pts[i][1, :], 'o', markersize=4, color='g')
elif style == 'fancy':
alpha = 0.8
markersize = 4
lw = 1.5
color = kwargs.get('color', 'w')
markeredgecolor = kwargs.get('markeredgecolor', 'black')
nums = [0, 17, 22, 27, 31, 36, 42, 48, 60, 68]
# close eyes and mouths
plot_close = lambda i1, i2: plt.plot([pts[i][0, i1], pts[i][0, i2]], [pts[i][1, i1], pts[i][1, i2]],
color=color, lw=lw, alpha=alpha - 0.1)
plot_close(41, 36)
plot_close(47, 42)
plot_close(59, 48)
plot_close(67, 60)
for ind in range(len(nums) - 1):
l, r = nums[ind], nums[ind + 1]
plt.plot(pts[i][0, l:r], pts[i][1, l:r], color=color, lw=lw, alpha=alpha - 0.1)
plt.plot(pts[i][0, l:r], pts[i][1, l:r], marker='o', linestyle='None', markersize=markersize,
color=color,
markeredgecolor=markeredgecolor, alpha=alpha)
if wfp is not None:
plt.savefig(wfp, dpi=200)
print('Save visualization result to {}'.format(wfp))
if show_flg:
plt.show()
def get_colors(image, vertices):
[h, w, _] = image.shape
vertices[0, :] = np.minimum(np.maximum(vertices[0, :], 0), w - 1) # x
vertices[1, :] = np.minimum(np.maximum(vertices[1, :], 0), h - 1) # y
ind = np.round(vertices).astype(np.int32)
colors = image[ind[1, :], ind[0, :], :] # n x 3
return colors
def write_obj_with_colors(obj_name, vertices, triangles, colors):
triangles = triangles.copy() # meshlab start with 1
if obj_name.split('.')[-1] != 'obj':
obj_name = obj_name + '.obj'
# write obj
with open(obj_name, 'w') as f:
# write vertices & colors
for i in range(vertices.shape[1]):
s = 'v {:.4f} {:.4f} {:.4f} {} {} {}\n'.format(vertices[1, i], vertices[0, i], vertices[2, i], colors[i, 2],
colors[i, 1], colors[i, 0])
f.write(s)
# write f: ver ind/ uv ind
for i in range(triangles.shape[1]):
s = 'f {} {} {}\n'.format(triangles[0, i], triangles[1, i], triangles[2, i])
f.write(s)
def main():
pass
if __name__ == '__main__':
main()
| 6,805 | 28.463203 | 120 | py |
3DDFA | 3DDFA-master/utils/render.py | #!/usr/bin/env python3
# coding: utf-8
"""
Modified from https://raw.githubusercontent.com/YadiraF/PRNet/master/utils/render.py
"""
__author__ = 'cleardusk'
import numpy as np
from .cython import mesh_core_cython
from .params import pncc_code
def is_point_in_tri(point, tri_points):
''' Judge whether the point is in the triangle
Method:
http://blackpawn.com/texts/pointinpoly/
Args:
point: [u, v] or [x, y]
tri_points: three vertices(2d points) of a triangle. 2 coords x 3 vertices
Returns:
bool: true for in triangle
'''
tp = tri_points
# vectors
v0 = tp[:, 2] - tp[:, 0]
v1 = tp[:, 1] - tp[:, 0]
v2 = point - tp[:, 0]
# dot products
dot00 = np.dot(v0.T, v0)
dot01 = np.dot(v0.T, v1)
dot02 = np.dot(v0.T, v2)
dot11 = np.dot(v1.T, v1)
dot12 = np.dot(v1.T, v2)
# barycentric coordinates
if dot00 * dot11 - dot01 * dot01 == 0:
inverDeno = 0
else:
inverDeno = 1 / (dot00 * dot11 - dot01 * dot01)
u = (dot11 * dot02 - dot01 * dot12) * inverDeno
v = (dot00 * dot12 - dot01 * dot02) * inverDeno
# check if point in triangle
return (u >= 0) & (v >= 0) & (u + v < 1)
def render_colors(vertices, colors, tri, h, w, c=3):
""" render mesh by z buffer
Args:
vertices: 3 x nver
colors: 3 x nver
tri: 3 x ntri
h: height
w: width
"""
# initial
image = np.zeros((h, w, c))
depth_buffer = np.zeros([h, w]) - 999999.
# triangle depth: approximate the depth to the average value of z in each vertex(v0, v1, v2), since the vertices are closed to each other
tri_depth = (vertices[2, tri[0, :]] + vertices[2, tri[1, :]] + vertices[2, tri[2, :]]) / 3.
tri_tex = (colors[:, tri[0, :]] + colors[:, tri[1, :]] + colors[:, tri[2, :]]) / 3.
for i in range(tri.shape[1]):
tri_idx = tri[:, i] # 3 vertex indices
# the inner bounding box
umin = max(int(np.ceil(np.min(vertices[0, tri_idx]))), 0)
umax = min(int(np.floor(np.max(vertices[0, tri_idx]))), w - 1)
vmin = max(int(np.ceil(np.min(vertices[1, tri_idx]))), 0)
vmax = min(int(np.floor(np.max(vertices[1, tri_idx]))), h - 1)
if umax < umin or vmax < vmin:
continue
for u in range(umin, umax + 1):
for v in range(vmin, vmax + 1):
if tri_depth[i] > depth_buffer[v, u] and is_point_in_tri([u, v], vertices[:2, tri_idx]):
depth_buffer[v, u] = tri_depth[i]
image[v, u, :] = tri_tex[:, i]
return image
def get_depths_image(img, vertices_lst, tri):
h, w = img.shape[:2]
c = 1
depths_img = np.zeros((h, w, c))
for i in range(len(vertices_lst)):
vertices = vertices_lst[i]
z = vertices[2, :]
z_min, z_max = min(z), max(z)
vertices[2, :] = (z - z_min) / (z_max - z_min)
z = vertices[2:, :]
depth_img = render_colors(vertices.T, z.T, tri.T, h, w, 1)
depths_img[depth_img > 0] = depth_img[depth_img > 0]
depths_img = depths_img.squeeze() * 255
return depths_img
def crender_colors(vertices, triangles, colors, h, w, c=3, BG=None):
""" render mesh with colors
Args:
vertices: [nver, 3]
triangles: [ntri, 3]
colors: [nver, 3]
h: height
w: width
c: channel
BG: background image
Returns:
image: [h, w, c]. rendered image./rendering.
"""
if BG is None:
image = np.zeros((h, w, c), dtype=np.float32)
else:
assert BG.shape[0] == h and BG.shape[1] == w and BG.shape[2] == c
image = BG.astype(np.float32).copy(order='C')
depth_buffer = np.zeros([h, w], dtype=np.float32, order='C') - 999999.
# to C order
vertices = vertices.astype(np.float32).copy(order='C')
triangles = triangles.astype(np.int32).copy(order='C')
colors = colors.astype(np.float32).copy(order='C')
mesh_core_cython.render_colors_core(
image, vertices, triangles,
colors,
depth_buffer,
vertices.shape[0], triangles.shape[0],
h, w, c
)
return image
def cget_depths_image(img, vertices_lst, tri):
"""cython version for depth image render"""
h, w = img.shape[:2]
c = 1
depths_img = np.zeros((h, w, c))
for i in range(len(vertices_lst)):
vertices = vertices_lst[i]
z = vertices[2, :]
z_min, z_max = min(z), max(z)
vertices[2, :] = (z - z_min) / (z_max - z_min)
z = vertices[2:, :]
depth_img = crender_colors(vertices.T, tri.T, z.T, h, w, 1)
depths_img[depth_img > 0] = depth_img[depth_img > 0]
depths_img = depths_img.squeeze() * 255
return depths_img
def ncc(vertices):
## simple version
# ncc_vertices = np.zeros_like(vertices)
# x = vertices[0, :]
# y = vertices[1, :]
# z = vertices[2, :]
#
# ncc_vertices[0, :] = (x - min(x)) / (max(x) - min(x))
# ncc_vertices[1, :] = (y - min(y)) / (max(y) - min(y))
# ncc_vertices[2, :] = (z - min(z)) / (max(z) - min(z))
# matrix version
v_min = np.min(vertices, axis=1).reshape(-1, 1)
v_max = np.max(vertices, axis=1).reshape(-1, 1)
ncc_vertices = (vertices - v_min) / (v_max - v_min)
return ncc_vertices
def cpncc(img, vertices_lst, tri):
"""cython version for PNCC render: original paper"""
h, w = img.shape[:2]
c = 3
pnccs_img = np.zeros((h, w, c))
for i in range(len(vertices_lst)):
vertices = vertices_lst[i]
pncc_img = crender_colors(vertices.T, tri.T, pncc_code.T, h, w, c)
pnccs_img[pncc_img > 0] = pncc_img[pncc_img > 0]
pnccs_img = pnccs_img.squeeze() * 255
return pnccs_img
def cpncc_v2(img, vertices_lst, tri):
"""cython version for PNCC render"""
h, w = img.shape[:2]
c = 3
pnccs_img = np.zeros((h, w, c))
for i in range(len(vertices_lst)):
vertices = vertices_lst[i]
ncc_vertices = ncc(vertices)
pncc_img = crender_colors(vertices.T, tri.T, ncc_vertices.T, h, w, c)
pnccs_img[pncc_img > 0] = pncc_img[pncc_img > 0]
pnccs_img = pnccs_img.squeeze() * 255
return pnccs_img
def main():
pass
if __name__ == '__main__':
main()
| 6,290 | 27.084821 | 141 | py |
3DDFA | 3DDFA-master/utils/cv_plot.py | #!/usr/bin/env python3
# coding: utf-8
"""
Modified from: https://sourcegraph.com/github.com/YadiraF/PRNet@master/-/blob/utils/cv_plot.py
"""
import numpy as np
import cv2
from utils.inference import calc_hypotenuse
end_list = np.array([17, 22, 27, 42, 48, 31, 36, 68], dtype=np.int32) - 1
def plot_kpt(image, kpt):
''' Draw 68 key points
Args:
image: the input image
kpt: (68, 3).
'''
image = image.copy()
kpt = np.round(kpt).astype(np.int32)
for i in range(kpt.shape[0]):
st = kpt[i, :2]
image = cv2.circle(image, (st[0], st[1]), 1, (0, 0, 255), 2)
if i in end_list:
continue
ed = kpt[i + 1, :2]
image = cv2.line(image, (st[0], st[1]), (ed[0], ed[1]), (255, 255, 255), 1)
return image
def build_camera_box(rear_size=90):
point_3d = []
rear_depth = 0
point_3d.append((-rear_size, -rear_size, rear_depth))
point_3d.append((-rear_size, rear_size, rear_depth))
point_3d.append((rear_size, rear_size, rear_depth))
point_3d.append((rear_size, -rear_size, rear_depth))
point_3d.append((-rear_size, -rear_size, rear_depth))
front_size = int(4 / 3 * rear_size)
front_depth = int(4 / 3 * rear_size)
point_3d.append((-front_size, -front_size, front_depth))
point_3d.append((-front_size, front_size, front_depth))
point_3d.append((front_size, front_size, front_depth))
point_3d.append((front_size, -front_size, front_depth))
point_3d.append((-front_size, -front_size, front_depth))
point_3d = np.array(point_3d, dtype=np.float).reshape(-1, 3)
return point_3d
def plot_pose_box(image, Ps, pts68s, color=(40, 255, 0), line_width=2):
''' Draw a 3D box as annotation of pose. Ref:https://github.com/yinguobing/head-pose-estimation/blob/master/pose_estimator.py
Args:
image: the input image
P: (3, 4). Affine Camera Matrix.
kpt: (2, 68) or (3, 68)
'''
image = image.copy()
if not isinstance(pts68s, list):
pts68s = [pts68s]
if not isinstance(Ps, list):
Ps = [Ps]
for i in range(len(pts68s)):
pts68 = pts68s[i]
llength = calc_hypotenuse(pts68)
point_3d = build_camera_box(llength)
P = Ps[i]
# Map to 2d image points
point_3d_homo = np.hstack((point_3d, np.ones([point_3d.shape[0], 1]))) # n x 4
point_2d = point_3d_homo.dot(P.T)[:, :2]
point_2d[:, 1] = - point_2d[:, 1]
point_2d[:, :2] = point_2d[:, :2] - np.mean(point_2d[:4, :2], 0) + np.mean(pts68[:2, :27], 1)
point_2d = np.int32(point_2d.reshape(-1, 2))
# Draw all the lines
cv2.polylines(image, [point_2d], True, color, line_width, cv2.LINE_AA)
cv2.line(image, tuple(point_2d[1]), tuple(
point_2d[6]), color, line_width, cv2.LINE_AA)
cv2.line(image, tuple(point_2d[2]), tuple(
point_2d[7]), color, line_width, cv2.LINE_AA)
cv2.line(image, tuple(point_2d[3]), tuple(
point_2d[8]), color, line_width, cv2.LINE_AA)
return image
def main():
pass
if __name__ == '__main__':
main()
| 3,134 | 30.35 | 129 | py |
3DDFA | 3DDFA-master/utils/lighting.py | #!/usr/bin/env python3
# coding: utf-8
import sys
sys.path.append('../')
import numpy as np
from utils import render
from utils.cython import mesh_core_cython
_norm = lambda arr: arr / np.sqrt(np.sum(arr ** 2, axis=1))[:, None]
def norm_vertices(vertices):
vertices -= vertices.min(0)[None, :]
vertices /= vertices.max()
vertices *= 2
vertices -= vertices.max(0)[None, :] / 2
return vertices
def convert_type(obj):
if isinstance(obj, tuple) or isinstance(obj, list):
return np.array(obj, dtype=np.float32)[None, :]
return obj
class RenderPipeline(object):
def __init__(self, **kwargs):
self.intensity_ambient = convert_type(kwargs.get('intensity_ambient', 0.3))
self.intensity_directional = convert_type(kwargs.get('intensity_directional', 0.6))
self.intensity_specular = convert_type(kwargs.get('intensity_specular', 0.9))
self.specular_exp = kwargs.get('specular_exp', 5)
self.color_ambient = convert_type(kwargs.get('color_ambient', (1, 1, 1)))
self.color_directional = convert_type(kwargs.get('color_directional', (1, 1, 1)))
self.light_pos = convert_type(kwargs.get('light_pos', (0, 0, 1)))
self.view_pos = convert_type(kwargs.get('view_pos', (0, 0, 1)))
def update_light_pos(self, light_pos):
self.light_pos = convert_type(light_pos)
def __call__(self, vertices, triangles, background):
height, width = background.shape[:2]
# 1. compute triangle/face normals and vertex normals
# ## Old style: very slow
# normal = np.zeros((vertices.shape[0], 3), dtype=np.float32)
# # surface_count = np.zeros((vertices.shape[0], 1))
# for i in range(triangles.shape[0]):
# i1, i2, i3 = triangles[i, :]
# v1, v2, v3 = vertices[[i1, i2, i3], :]
# surface_normal = np.cross(v2 - v1, v3 - v1)
# normal[[i1, i2, i3], :] += surface_normal
# # surface_count[[i1, i2, i3], :] += 1
#
# # normal /= surface_count
# # normal /= np.linalg.norm(normal, axis=1, keepdims=True)
# normal = _norm(normal)
# Cython style
normal = np.zeros((vertices.shape[0], 3), dtype=np.float32)
mesh_core_cython.get_normal(normal, vertices, triangles, vertices.shape[0], triangles.shape[0])
# 2. lighting
color = np.zeros_like(vertices, dtype=np.float32)
# ambient component
if self.intensity_ambient > 0:
color += self.intensity_ambient * self.color_ambient
vertices_n = norm_vertices(vertices.copy())
if self.intensity_directional > 0:
# diffuse component
direction = _norm(self.light_pos - vertices_n)
cos = np.sum(normal * direction, axis=1)[:, None]
# cos = np.clip(cos, 0, 1)
# todo: check below
color += self.intensity_directional * (self.color_directional * np.clip(cos, 0, 1))
# specular component
if self.intensity_specular > 0:
v2v = _norm(self.view_pos - vertices_n)
reflection = 2 * cos * normal - direction
spe = np.sum((v2v * reflection) ** self.specular_exp, axis=1)[:, None]
spe = np.where(cos != 0, np.clip(spe, 0, 1), np.zeros_like(spe))
color += self.intensity_specular * self.color_directional * np.clip(spe, 0, 1)
color = np.clip(color, 0, 1)
# 2. rasterization, [0, 1]
render_img = render.crender_colors(vertices, triangles, color, height, width, BG=background)
render_img = (render_img * 255).astype(np.uint8)
return render_img
def main():
pass
if __name__ == '__main__':
main()
| 3,753 | 36.54 | 103 | py |
3DDFA | 3DDFA-master/utils/paf.py | #!/usr/bin/env python3
# coding: utf-8
import numpy as np
from .ddfa import _parse_param
from .params import u_filter, w_filter, w_exp_filter, std_size, param_mean, param_std
def reconstruct_paf_anchor(param, whitening=True):
if whitening:
param = param * param_std + param_mean
p, offset, alpha_shp, alpha_exp = _parse_param(param)
anchor = p @ (u_filter + w_filter @ alpha_shp + w_exp_filter @ alpha_exp).reshape(3, -1, order='F') + offset
anchor[1, :] = std_size + 1 - anchor[1, :]
return anchor[:2, :]
def gen_offsets(kernel_size):
offsets = np.zeros((2, kernel_size * kernel_size), dtype=np.int)
ind = 0
delta = (kernel_size - 1) // 2
for i in range(kernel_size):
y = i - delta
for j in range(kernel_size):
x = j - delta
offsets[0, ind] = x
offsets[1, ind] = y
ind += 1
return offsets
def gen_img_paf(img_crop, param, kernel_size=3):
"""Generate PAF image
img_crop: 120x120
kernel_size: kernel_size for convolution, should be even number like 3 or 5 or ...
"""
anchor = reconstruct_paf_anchor(param)
anchor = np.round(anchor).astype(np.int)
delta = (kernel_size - 1) // 2
anchor[anchor < delta] = delta
anchor[anchor >= std_size - delta - 1] = std_size - delta - 1
img_paf = np.zeros((64 * kernel_size, 64 * kernel_size, 3), dtype=np.uint8)
offsets = gen_offsets(kernel_size)
for i in range(kernel_size * kernel_size):
ox, oy = offsets[:, i]
index0 = anchor[0] + ox
index1 = anchor[1] + oy
p = img_crop[index1, index0].reshape(64, 64, 3).transpose(1, 0, 2)
img_paf[oy + delta::kernel_size, ox + delta::kernel_size] = p
return img_paf
def main():
pass
if __name__ == '__main__':
main()
| 1,816 | 28.306452 | 112 | py |
3DDFA | 3DDFA-master/utils/__init__.py | 0 | 0 | 0 | py |
|
3DDFA | 3DDFA-master/utils/params.py | #!/usr/bin/env python3
# coding: utf-8
import os.path as osp
import numpy as np
from .io import _load
def make_abs_path(d):
return osp.join(osp.dirname(osp.realpath(__file__)), d)
d = make_abs_path('../train.configs')
keypoints = _load(osp.join(d, 'keypoints_sim.npy'))
w_shp = _load(osp.join(d, 'w_shp_sim.npy'))
w_exp = _load(osp.join(d, 'w_exp_sim.npy')) # simplified version
meta = _load(osp.join(d, 'param_whitening.pkl'))
# param_mean and param_std are used for re-whitening
param_mean = meta.get('param_mean')
param_std = meta.get('param_std')
u_shp = _load(osp.join(d, 'u_shp.npy'))
u_exp = _load(osp.join(d, 'u_exp.npy'))
u = u_shp + u_exp
w = np.concatenate((w_shp, w_exp), axis=1)
w_base = w[keypoints]
w_norm = np.linalg.norm(w, axis=0)
w_base_norm = np.linalg.norm(w_base, axis=0)
# for inference
dim = w_shp.shape[0] // 3
u_base = u[keypoints].reshape(-1, 1)
w_shp_base = w_shp[keypoints]
w_exp_base = w_exp[keypoints]
std_size = 120
# for paf (pac)
paf = _load(osp.join(d, 'Model_PAF.pkl'))
u_filter = paf.get('mu_filter')
w_filter = paf.get('w_filter')
w_exp_filter = paf.get('w_exp_filter')
# pncc code (mean shape)
pncc_code = _load(osp.join(d, 'pncc_code.npy'))
| 1,194 | 26.159091 | 65 | py |
3DDFA | 3DDFA-master/utils/io.py | #!/usr/bin/env python3
# coding: utf-8
import os
import numpy as np
import torch
import pickle
import scipy.io as sio
def mkdir(d):
"""only works on *nix system"""
if not os.path.isdir(d) and not os.path.exists(d):
os.system('mkdir -p {}'.format(d))
def _get_suffix(filename):
"""a.jpg -> jpg"""
pos = filename.rfind('.')
if pos == -1:
return ''
return filename[pos + 1:]
def _load(fp):
suffix = _get_suffix(fp)
if suffix == 'npy':
return np.load(fp)
elif suffix == 'pkl':
return pickle.load(open(fp, 'rb'))
def _dump(wfp, obj):
suffix = _get_suffix(wfp)
if suffix == 'npy':
np.save(wfp, obj)
elif suffix == 'pkl':
pickle.dump(obj, open(wfp, 'wb'))
else:
raise Exception('Unknown Type: {}'.format(suffix))
def _load_tensor(fp, mode='cpu'):
if mode.lower() == 'cpu':
return torch.from_numpy(_load(fp))
elif mode.lower() == 'gpu':
return torch.from_numpy(_load(fp)).cuda()
def _tensor_to_cuda(x):
if x.is_cuda:
return x
else:
return x.cuda()
def _load_gpu(fp):
return torch.from_numpy(_load(fp)).cuda()
def load_bfm(model_path):
suffix = _get_suffix(model_path)
if suffix == 'mat':
C = sio.loadmat(model_path)
model = C['model_refine']
model = model[0, 0]
model_new = {}
w_shp = model['w'].astype(np.float32)
model_new['w_shp_sim'] = w_shp[:, :40]
w_exp = model['w_exp'].astype(np.float32)
model_new['w_exp_sim'] = w_exp[:, :10]
u_shp = model['mu_shape']
u_exp = model['mu_exp']
u = (u_shp + u_exp).astype(np.float32)
model_new['mu'] = u
model_new['tri'] = model['tri'].astype(np.int32) - 1
# flatten it, pay attention to index value
keypoints = model['keypoints'].astype(np.int32) - 1
keypoints = np.concatenate((3 * keypoints, 3 * keypoints + 1, 3 * keypoints + 2), axis=0)
model_new['keypoints'] = keypoints.T.flatten()
#
w = np.concatenate((w_shp, w_exp), axis=1)
w_base = w[keypoints]
w_norm = np.linalg.norm(w, axis=0)
w_base_norm = np.linalg.norm(w_base, axis=0)
dim = w_shp.shape[0] // 3
u_base = u[keypoints].reshape(-1, 1)
w_shp_base = w_shp[keypoints]
w_exp_base = w_exp[keypoints]
model_new['w_norm'] = w_norm
model_new['w_base_norm'] = w_base_norm
model_new['dim'] = dim
model_new['u_base'] = u_base
model_new['w_shp_base'] = w_shp_base
model_new['w_exp_base'] = w_exp_base
_dump(model_path.replace('.mat', '.pkl'), model_new)
return model_new
else:
return _load(model_path)
_load_cpu = _load
_numpy_to_tensor = lambda x: torch.from_numpy(x)
_tensor_to_numpy = lambda x: x.cpu()
_numpy_to_cuda = lambda x: _tensor_to_cuda(torch.from_numpy(x))
_cuda_to_tensor = lambda x: x.cpu()
_cuda_to_numpy = lambda x: x.cpu().numpy()
| 3,012 | 24.974138 | 97 | py |
3DDFA | 3DDFA-master/utils/estimate_pose.py | #!/usr/bin/env python3
# coding: utf-8
"""
Reference: https://github.com/YadiraF/PRNet/blob/master/utils/estimate_pose.py
"""
from math import cos, sin, atan2, asin, sqrt
import numpy as np
from .params import param_mean, param_std
def parse_pose(param):
param = param * param_std + param_mean
Ps = param[:12].reshape(3, -1) # camera matrix
# R = P[:, :3]
s, R, t3d = P2sRt(Ps)
P = np.concatenate((R, t3d.reshape(3, -1)), axis=1) # without scale
# P = Ps / s
pose = matrix2angle(R) # yaw, pitch, roll
# offset = p_[:, -1].reshape(3, 1)
return P, pose
def matrix2angle(R):
''' compute three Euler angles from a Rotation Matrix. Ref: http://www.gregslabaugh.net/publications/euler.pdf
Args:
R: (3,3). rotation matrix
Returns:
x: yaw
y: pitch
z: roll
'''
# assert(isRotationMatrix(R))
if R[2, 0] != 1 and R[2, 0] != -1:
x = asin(R[2, 0])
y = atan2(R[2, 1] / cos(x), R[2, 2] / cos(x))
z = atan2(R[1, 0] / cos(x), R[0, 0] / cos(x))
else: # Gimbal lock
z = 0 # can be anything
if R[2, 0] == -1:
x = np.pi / 2
y = z + atan2(R[0, 1], R[0, 2])
else:
x = -np.pi / 2
y = -z + atan2(-R[0, 1], -R[0, 2])
return x, y, z
def P2sRt(P):
''' decompositing camera matrix P.
Args:
P: (3, 4). Affine Camera Matrix.
Returns:
s: scale factor.
R: (3, 3). rotation matrix.
t2d: (2,). 2d translation.
'''
t3d = P[:, 3]
R1 = P[0:1, :3]
R2 = P[1:2, :3]
s = (np.linalg.norm(R1) + np.linalg.norm(R2)) / 2.0
r1 = R1 / np.linalg.norm(R1)
r2 = R2 / np.linalg.norm(R2)
r3 = np.cross(r1, r2)
R = np.concatenate((r1, r2, r3), 0)
return s, R, t3d
def main():
pass
if __name__ == '__main__':
main()
| 1,870 | 22.3875 | 114 | py |
3DDFA | 3DDFA-master/utils/ddfa.py | #!/usr/bin/env python3
# coding: utf-8
import os.path as osp
from pathlib import Path
import numpy as np
import torch
import torch.utils.data as data
import cv2
import pickle
import argparse
from .io import _numpy_to_tensor, _load_cpu, _load_gpu
from .params import *
def _parse_param(param):
"""Work for both numpy and tensor"""
p_ = param[:12].reshape(3, -1)
p = p_[:, :3]
offset = p_[:, -1].reshape(3, 1)
alpha_shp = param[12:52].reshape(-1, 1)
alpha_exp = param[52:].reshape(-1, 1)
return p, offset, alpha_shp, alpha_exp
def reconstruct_vertex(param, whitening=True, dense=False, transform=True):
"""Whitening param -> 3d vertex, based on the 3dmm param: u_base, w_shp, w_exp
dense: if True, return dense vertex, else return 68 sparse landmarks. All dense or sparse vertex is transformed to
image coordinate space, but without alignment caused by face cropping.
transform: whether transform to image space
"""
if len(param) == 12:
param = np.concatenate((param, [0] * 50))
if whitening:
if len(param) == 62:
param = param * param_std + param_mean
else:
param = np.concatenate((param[:11], [0], param[11:]))
param = param * param_std + param_mean
p, offset, alpha_shp, alpha_exp = _parse_param(param)
if dense:
vertex = p @ (u + w_shp @ alpha_shp + w_exp @ alpha_exp).reshape(3, -1, order='F') + offset
if transform:
# transform to image coordinate space
vertex[1, :] = std_size + 1 - vertex[1, :]
else:
"""For 68 pts"""
vertex = p @ (u_base + w_shp_base @ alpha_shp + w_exp_base @ alpha_exp).reshape(3, -1, order='F') + offset
if transform:
# transform to image coordinate space
vertex[1, :] = std_size + 1 - vertex[1, :]
return vertex
def img_loader(path):
return cv2.imread(path, cv2.IMREAD_COLOR)
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class ToTensorGjz(object):
def __call__(self, pic):
if isinstance(pic, np.ndarray):
img = torch.from_numpy(pic.transpose((2, 0, 1)))
return img.float()
def __repr__(self):
return self.__class__.__name__ + '()'
class NormalizeGjz(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
tensor.sub_(self.mean).div_(self.std)
return tensor
class DDFADataset(data.Dataset):
def __init__(self, root, filelists, param_fp, transform=None, **kargs):
self.root = root
self.transform = transform
self.lines = Path(filelists).read_text().strip().split('\n')
self.params = _numpy_to_tensor(_load_cpu(param_fp))
self.img_loader = img_loader
def _target_loader(self, index):
target = self.params[index]
return target
def __getitem__(self, index):
path = osp.join(self.root, self.lines[index])
img = self.img_loader(path)
target = self._target_loader(index)
if self.transform is not None:
img = self.transform(img)
return img, target
def __len__(self):
return len(self.lines)
class DDFATestDataset(data.Dataset):
def __init__(self, filelists, root='', transform=None):
self.root = root
self.transform = transform
self.lines = Path(filelists).read_text().strip().split('\n')
def __getitem__(self, index):
path = osp.join(self.root, self.lines[index])
img = img_loader(path)
if self.transform is not None:
img = self.transform(img)
return img
def __len__(self):
return len(self.lines)
| 4,316 | 26.673077 | 118 | py |
3DDFA | 3DDFA-master/utils/cython/setup.py | '''
python setup.py build_ext -i
to compile
'''
# setup.py
from distutils.core import setup, Extension
# from Cython.Build import cythonize
from Cython.Distutils import build_ext
import numpy
setup(
name='mesh_core_cython',
cmdclass={'build_ext': build_ext},
ext_modules=[Extension("mesh_core_cython",
sources=["mesh_core_cython.pyx", "mesh_core.cpp"],
language='c++',
include_dirs=[numpy.get_include()])],
)
| 504 | 24.25 | 77 | py |
3DDFA | 3DDFA-master/utils/cython/__init__.py | 0 | 0 | 0 | py |
|
BioFLAIR | BioFLAIR-master/fine_tune.py | from flair.data import Corpus
from flair.datasets import ColumnCorpus
columns = {0: 'text', 1: 'pos', 3: 'ner'}
# this is the folder in which train, test and dev files reside
data_folder = 'data/ner/bc5dr'
# init a corpus using column format, data folder and the names of the train, dev and test files
corpus: Corpus = ColumnCorpus(data_folder, columns,
train_file='train.txt',
test_file='test.txt',
dev_file='dev.txt')
tag_type = 'ner'
tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type)
from flair.embeddings import TokenEmbeddings, WordEmbeddings, StackedEmbeddings, PooledFlairEmbeddings, ELMoEmbeddings
from typing import List
embedding_types: List[TokenEmbeddings] = [
PooledFlairEmbeddings('pubmed-forward'),
PooledFlairEmbeddings('pubmed-backward'),
ELMoEmbeddings('pubmed'),
]
embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embedding_types)
from flair.models import SequenceTagger
tagger: SequenceTagger = SequenceTagger(hidden_size=256,
embeddings=embeddings,
tag_dictionary=tag_dictionary,
tag_type=tag_type,
use_crf=True)
# initialize trainer
from flair.trainers import ModelTrainer
trainer: ModelTrainer = ModelTrainer(tagger, corpus)
# start training
trainer.train('ner/models/v1',
learning_rate=0.1,
mini_batch_size=32,
max_epochs=150)
| 1,590 | 36 | 118 | py |
BioFLAIR | BioFLAIR-master/pre_train.py | from flair.data import Dictionary
from flair.models import LanguageModel
from flair.trainers.language_model_trainer import LanguageModelTrainer, TextCorpus
from flair.embeddings import FlairEmbeddings
dictionary: Dictionary = Dictionary.load('chars')
#dictionary: Dictionary = language_model.dictionary
language_model = FlairEmbeddings('pubmed-forward').lm
# get your corpus, process forward and at the character level
is_forward_lm=True
corpus = TextCorpus('/content/corpus',
dictionary,
is_forward_lm,
character_level=True)
trainer = LanguageModelTrainer(language_model, corpus)
trainer.train('/content/language_model',
sequence_length=10,
mini_batch_size=10,
max_epochs=10)
| 783 | 33.086957 | 82 | py |
Squeezeformer | Squeezeformer-main/setup.py | # Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
with open("requirements.txt", "r") as fr:
requirements = fr.read().splitlines()
setuptools.setup(
name="squeezeformer",
packages=setuptools.find_packages(include=["src*"]),
install_requires=requirements,
extras_require={
#"tf2.3": ["tensorflow>=2.3.0,<2.4", "tensorflow-text>2.3.0,<2.4", "tensorflow-io>=0.16.0,<0.17"],
#"tf2.3-gpu": ["tensorflow-gpu>=2.3.0,<2.4", "tensorflow-text>=2.3.0,<2.4", "tensorflow-io>=0.16.0,<0.17"],
#"tf2.4": ["tensorflow>=2.4.0,<2.5", "tensorflow-text>=2.4.0,<2.5", "tensorflow-io>=0.17.0,<0.18"],
#"tf2.4-gpu": ["tensorflow-gpu>=2.4.0,<2.5", "tensorflow-text>=2.4.0,<2.5", "tensorflow-io>=0.17.0,<0.18"],
"tf2.5": ["tensorflow>=2.5.0,<2.6", "tensorflow-text>=2.5.0,<2.6", "tensorflow-io>=0.18.0,<0.19"],
"tf2.5-gpu": ["tensorflow-gpu>=2.5.0,<2.6", "tensorflow-text>=2.5.0,<2.6", "tensorflow-io>=0.18.0,<0.19"]
},
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Intended Audience :: Science/Research",
"Operating System :: POSIX :: Linux",
"License :: OSI Approved :: Apache Software License",
"Topic :: Software Development :: Libraries :: Python Modules"
],
python_requires='>=3.6',
)
| 2,026 | 43.065217 | 115 | py |
Squeezeformer | Squeezeformer-main/examples/squeezeformer/test.py | # Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from tqdm import tqdm
import argparse
from scipy.special import softmax
import datasets
import tensorflow as tf
from src.configs.config import Config
from src.datasets.asr_dataset import ASRSliceDataset
from src.featurizers.speech_featurizers import TFSpeechFeaturizer
from src.featurizers.text_featurizers import SentencePieceFeaturizer
from src.models.conformer import ConformerCtc
from src.utils import env_util, file_util
logger = env_util.setup_environment()
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
DEFAULT_YAML = os.path.join(os.path.abspath(os.path.dirname(__file__)), "config.yml")
tf.keras.backend.clear_session()
def parse_arguments():
parser = argparse.ArgumentParser(prog="Conformer Testing")
parser.add_argument("--config", type=str, default=DEFAULT_YAML, help="The file path of model configuration file")
parser.add_argument("--mxp", default=False, action="store_true", help="Enable mixed precision")
parser.add_argument("--device", type=int, default=0, help="Device's id to run test on")
parser.add_argument("--cpu", default=False, action="store_true", help="Whether to only use cpu")
parser.add_argument("--saved", type=str, default=None, help="Path to saved model")
parser.add_argument("--output", type=str, default=None, help="Result filepath")
# Dataset arguments
parser.add_argument("--bs", type=int, default=None, help="Test batch size")
parser.add_argument("--dataset_path", type=str, required=True, help="path to the tsv manifest files")
parser.add_argument("--dataset", type=str, default="test_other",
choices=["dev_clean", "dev_other", "test_clean", "test_other"], help="Testing dataset")
parser.add_argument("--input_padding", type=int, default=3700)
parser.add_argument("--label_padding", type=int, default=530)
# Architecture arguments
parser.add_argument("--fixed_arch", default=None, help="force fixed architecture")
# Decoding arguments
parser.add_argument("--beam_size", type=int, default=None, help="ctc beam size")
args = parser.parse_args()
return args
def parse_fixed_arch(args):
parsed_arch = args.fixed_arch.split('|')
i, rep = 0, 1
fixed_arch = []
while i < len(parsed_arch):
if parsed_arch[i].isnumeric():
rep = int(parsed_arch[i])
else:
block = parsed_arch[i].split(',')
assert len(block) == NUM_LAYERS_IN_BLOCK
for _ in range(rep):
fixed_arch.append(block)
rep = 1
i += 1
return fixed_arch
args = parse_arguments()
config = Config(args.config)
NUM_BLOCKS = config.model_config['encoder_num_blocks']
NUM_LAYERS_IN_BLOCK = 4
tf.config.optimizer.set_experimental_options({"auto_mixed_precision": args.mxp})
env_util.setup_devices([args.device], cpu=args.cpu)
speech_featurizer = TFSpeechFeaturizer(config.speech_config)
logger.info("Use SentencePiece ...")
text_featurizer = SentencePieceFeaturizer(config.decoder_config)
tf.random.set_seed(0)
# Parse fixed architecture
if args.fixed_arch is not None:
fixed_arch = parse_fixed_arch(args)
if len(fixed_arch) != NUM_BLOCKS:
logger.warn(
f"encoder_num_blocks={config.model_config['encoder_num_blocks']} is " \
f"different from len(fixed_arch) = {len(fixed_arch)}." \
)
logger.warn(f"Changing `encoder_num_blocks` to {len(fixed_arch)}")
config.model_config['encoder_num_blocks'] = len(fixed_arch)
logger.info(f"Changing fixed arch: {fixed_arch}")
config.model_config['encoder_fixed_arch'] = fixed_arch
if args.dataset_path is not None:
dataset_path = os.path.join(args.dataset_path, f"{args.dataset}.tsv")
logger.info(f"dataset: {args.dataset} at {dataset_path}")
config.learning_config.test_dataset_config.data_paths = [dataset_path]
else:
raise ValueError("specify the manifest file path using --dataset_path")
test_dataset = ASRSliceDataset(
speech_featurizer=speech_featurizer,
text_featurizer=text_featurizer,
input_padding_length=args.input_padding,
label_padding_length=args.label_padding,
**vars(config.learning_config.test_dataset_config)
)
conformer = ConformerCtc(
**config.model_config,
vocabulary_size=text_featurizer.num_classes,
)
conformer.make(speech_featurizer.shape)
if args.saved:
conformer.load_weights(args.saved, by_name=True)
else:
logger.warning("Model is initialized randomly, please use --saved to assign checkpoint")
conformer.summary(line_length=100)
conformer.add_featurizers(speech_featurizer, text_featurizer)
batch_size = args.bs or config.learning_config.running_config.batch_size
test_data_loader = test_dataset.create(batch_size)
blank_id = text_featurizer.blank
true_decoded = []
pred_decoded = []
beam_decoded = []
#for batch in enumerate(test_data_loader):
for k, batch in tqdm(enumerate(test_data_loader)):
labels, labels_len = batch[1]['labels'], batch[1]['labels_length']
outputs = conformer(batch[0], training=False)
logits, logits_len = outputs['logits'], outputs['logits_length']
probs = softmax(logits)
if args.beam_size is not None:
beam = tf.nn.ctc_beam_search_decoder(
tf.transpose(logits, perm=[1, 0, 2]), logits_len, beam_width=args.beam_size, top_paths=1,
)
beam = tf.sparse.to_dense(beam[0][0]).numpy()
for i, (p, l, label, ll) in enumerate(zip(probs, logits_len, labels, labels_len)):
# p: length x characters
pred = p[:l].argmax(-1)
decoded_prediction = []
previous = blank_id
# remove the repeting characters and the blanck characters
for p in pred:
if (p != previous or previous == blank_id) and p != blank_id:
decoded_prediction.append(p)
previous = p
if len(decoded_prediction) == 0:
decoded = ""
else:
decoded = text_featurizer.iextract([decoded_prediction]).numpy()[0].decode('utf-8')
pred_decoded.append(decoded)
label_len = tf.math.reduce_sum(tf.cast(label != 0, tf.int32))
true_decoded.append(text_featurizer.iextract([label[:label_len]]).numpy()[0].decode('utf-8'))
if args.beam_size is not None:
b = beam[i]
previous = blank_id
# remove the repeting characters and the blanck characters
beam_prediction = []
for p in b:
if (p != previous or previous == blank_id) and p != blank_id:
beam_prediction.append(p)
previous = p
if len(beam_prediction) == 0:
decoded = ""
else:
decoded = text_featurizer.iextract([beam_prediction]).numpy()[0].decode('utf-8')
beam_decoded.append(decoded)
wer_metric = datasets.load_metric("wer")
logger.info(f"Length decoded: {len(true_decoded)}")
logger.info(f"WER: {wer_metric.compute(predictions=pred_decoded, references=true_decoded)}")
if args.beam_size is not None:
logger.info(f"WER-beam: {wer_metric.compute(predictions=beam_decoded, references=true_decoded)}")
if args.output is not None:
with file_util.save_file(file_util.preprocess_paths(args.output)) as filepath:
overwrite = True
if tf.io.gfile.exists(filepath):
overwrite = input(f"Overwrite existing result file {filepath} ? (y/n): ").lower() == "y"
if overwrite:
logger.info(f"Saving result to {args.output} ...")
with open(filepath, "w") as openfile:
openfile.write("PATH\tDURATION\tGROUNDTRUTH\tGREEDY\tBEAMSEARCH\n")
progbar = tqdm(total=test_dataset.total_steps, unit="batch")
for i, (groundtruth, greedy) in enumerate(zip(true_decoded, pred_decoded)):
openfile.write(f"N/A\tN/A\t{groundtruth}\t{greedy}\tN/A\n")
progbar.update(1)
progbar.close()
| 8,644 | 37.59375 | 117 | py |
Squeezeformer | Squeezeformer-main/src/__init__.py | 0 | 0 | 0 | py |
|
Squeezeformer | Squeezeformer-main/src/models/base_model.py | # Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow.keras import mixed_precision as mxp
from ..utils import file_util, env_util
class BaseModel(tf.keras.Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._metrics = {}
self.use_loss_scale = False
def save(
self,
filepath,
overwrite=True,
include_optimizer=True,
save_format=None,
signatures=None,
options=None,
save_traces=True,
):
with file_util.save_file(filepath) as path:
super().save(
filepath=path,
overwrite=overwrite,
include_optimizer=include_optimizer,
save_format=save_format,
signatures=signatures,
options=options,
save_traces=save_traces,
)
def save_weights(
self,
filepath,
overwrite=True,
save_format=None,
options=None,
):
with file_util.save_file(filepath) as path:
super().save_weights(
filepath=path,
overwrite=overwrite,
save_format=save_format,
options=options,
)
def load_weights(
self,
filepath,
by_name=False,
skip_mismatch=False,
options=None,
):
with file_util.read_file(filepath) as path:
super().load_weights(
filepath=path,
by_name=by_name,
skip_mismatch=skip_mismatch,
options=options,
)
@property
def metrics(self):
return self._metrics.values()
def add_metric(self, metric: tf.keras.metrics.Metric):
self._metrics[metric.name] = metric
def make(self, *args, **kwargs):
""" Custom function for building model (uses self.build so cannot overwrite that function) """
raise NotImplementedError()
def compile(self, loss, optimizer, run_eagerly=None, **kwargs):
if not env_util.has_devices("TPU"):
optimizer = mxp.experimental.LossScaleOptimizer(tf.keras.optimizers.get(optimizer), "dynamic")
self.use_loss_scale = True
loss_metric = tf.keras.metrics.Mean(name="loss", dtype=tf.float32)
self._metrics = {loss_metric.name: loss_metric}
super().compile(optimizer=optimizer, loss=loss, run_eagerly=run_eagerly, **kwargs)
# -------------------------------- STEP FUNCTIONS -------------------------------------
def gradient_step(self, inputs, y_true):
with tf.GradientTape() as tape:
y_pred = self(inputs, training=True)
loss = self.loss(y_true, y_pred)
if self.use_loss_scale:
scaled_loss = self.optimizer.get_scaled_loss(loss)
if self.use_loss_scale:
gradients = tape.gradient(scaled_loss, self.trainable_weights)
gradients = self.optimizer.get_unscaled_gradients(gradients)
else:
gradients = tape.gradient(loss, self.trainable_weights)
return loss, y_pred, gradients
def train_step(self, batch):
"""
Args:
batch ([tf.Tensor]): a batch of training data
Returns:
Dict[tf.Tensor]: a dict of validation metrics with keys are the name of metric
"""
inputs, y_true = batch
loss, y_pred, gradients = self.gradient_step(inputs, y_true)
self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))
self._metrics["loss"].update_state(loss)
if 'step_loss' in self._metrics:
self._metrics['step_loss'].update_state(loss)
if 'WER' in self._metrics:
self._metrics['WER'].update_state(y_true, y_pred)
if 'labels' in self._metrics:
self._metrics['labels'].update_state(y_true)
if 'logits' in self._metrics:
self._metrics['logits'].update_state(y_pred)
if 'logits_len' in self._metrics:
self._metrics['logits_len'].update_state(y_pred)
return {m.name: m.result() for m in self.metrics}
def test_step(self, batch):
"""
Args:
batch ([tf.Tensor]: a batch of validation data
Returns:
Dict[tf.Tensor]: a dict of validation metrics with keys are the name of metric prefixed with "val_"
"""
inputs, y_true = batch
y_pred = self(inputs, training=False)
loss = self.loss(y_true, y_pred)
self._metrics["loss"].update_state(loss)
if 'step_loss' in self._metrics:
self._metrics['step_loss'].update_state(loss)
if 'WER' in self._metrics:
self._metrics['WER'].update_state(y_true, y_pred)
if 'labels' in self._metrics:
self._metrics['labels'].update_state(y_true)
if 'logits' in self._metrics:
self._metrics['logits'].update_state(y_pred)
if 'logits_len' in self._metrics:
self._metrics['logits_len'].update_state(y_pred)
return {m.name: m.result() for m in self.metrics}
def predict_step(self, batch):
"""
Args:
batch ([tf.Tensor]): a batch of testing data
Returns:
[tf.Tensor]: stacked tensor of shape [B, 3] with each row is the text [truth, greedy, beam_search]
"""
inputs, y_true = batch
labels = self.text_featurizer.iextract(y_true["labels"])
greedy_decoding = self.recognize(inputs)
if self.text_featurizer.decoder_config.beam_width == 0:
beam_search_decoding = tf.map_fn(lambda _: tf.convert_to_tensor("", dtype=tf.string), labels)
else:
beam_search_decoding = self.recognize_beam(inputs)
return tf.stack([labels, greedy_decoding, beam_search_decoding], axis=-1)
# -------------------------------- INFERENCE FUNCTIONS -------------------------------------
def recognize(self, *args, **kwargs):
""" Greedy decoding function that used in self.predict_step """
raise NotImplementedError()
def recognize_beam(self, *args, **kwargs):
""" Beam search decoding function that used in self.predict_step """
raise NotImplementedError()
| 6,880 | 35.026178 | 111 | py |
Squeezeformer | Squeezeformer-main/src/models/conformer_encoder.py | # Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from .submodules.glu import GLU
from .submodules.subsampling import Conv2dSubsampling
from .submodules.positional_encoding import PositionalEncoding
from .submodules.multihead_attention import MultiHeadAttention, RelPositionMultiHeadAttention
from .submodules.time_reduction import TimeReductionLayer
from ..utils import shape_util
logger = tf.get_logger()
class FFModule(tf.keras.layers.Layer):
def __init__(
self,
input_dim,
dropout=0.0,
fc_factor=0.5,
adaptive_scale=False,
ff_expansion_rate=4,
name="ff_module",
**kwargs,
):
super(FFModule, self).__init__(name=name, **kwargs)
self.fc_factor = fc_factor
logger.info(f"fc factor set as {self.fc_factor}")
self.adaptive_scale = adaptive_scale
if not adaptive_scale:
logger.info("No scaling, use preLN")
self.ln = tf.keras.layers.LayerNormalization(name=f"{name}_ln")
else:
logger.info("Use scaling, no preLN")
self.scale = tf.Variable([1.] * input_dim, trainable=True, name=f'{name}_scale')
self.bias = tf.Variable([0.] * input_dim, trainable=True, name=f'{name}_bias')
ffn1_max = input_dim ** -0.5
ffn2_max = (ff_expansion_rate * input_dim) ** -0.5
self.ffn1 = tf.keras.layers.Dense(
ff_expansion_rate * input_dim, name=f"{name}_dense_1",
kernel_initializer=tf.keras.initializers.RandomUniform(minval=-ffn1_max, maxval=ffn1_max),
bias_initializer=tf.keras.initializers.RandomUniform(minval=-ffn1_max, maxval=ffn1_max),
)
self.act = tf.keras.layers.Activation(tf.nn.swish, name=f"{name}_act")
self.do1 = tf.keras.layers.Dropout(dropout, name=f"{name}_dropout_1")
self.ffn2 = tf.keras.layers.Dense(
input_dim, name=f"{name}_dense_2",
kernel_initializer=tf.keras.initializers.RandomUniform(minval=-ffn2_max, maxval=ffn2_max),
bias_initializer=tf.keras.initializers.RandomUniform(minval=-ffn2_max, maxval=ffn2_max),
)
self.do2 = tf.keras.layers.Dropout(dropout, name=f"{name}_dropout_2")
self.res_add = tf.keras.layers.Add(name=f"{name}_add")
def call(self, inputs, training=False, **kwargs):
if not self.adaptive_scale:
outputs = self.ln(inputs, training=training)
else:
scale = tf.reshape(self.scale, (1, 1, -1))
bias = tf.reshape(self.bias, (1, 1, -1))
outputs = inputs * scale + bias
outputs = self.ffn1(outputs, training=training)
outputs = self.act(outputs)
outputs = self.do1(outputs, training=training)
outputs = self.ffn2(outputs, training=training)
outputs = self.do2(outputs, training=training)
outputs = self.res_add([inputs, self.fc_factor * outputs])
return outputs
class MHSAModule(tf.keras.layers.Layer):
def __init__(
self,
head_size,
num_heads,
dropout=0.0,
mha_type="relmha",
adaptive_scale=False,
name="mhsa_module",
**kwargs,
):
super(MHSAModule, self).__init__(name=name, **kwargs)
self.adaptive_scale = adaptive_scale
input_dim = num_heads * head_size
if not adaptive_scale:
logger.info("No scaling, use preLN")
self.ln = tf.keras.layers.LayerNormalization(name=f"{name}_ln")
else:
logger.info("Use scaling, no preLN")
self.scale = tf.Variable([1.] * input_dim, trainable=True, name=f'{name}_scale')
self.bias = tf.Variable([0.] * input_dim, trainable=True, name=f'{name}_bias')
if mha_type == "relmha":
self.mha = RelPositionMultiHeadAttention(
name=f"{name}_mhsa",
head_size=head_size, num_heads=num_heads,
)
elif mha_type == "mha":
self.mha = MultiHeadAttention(
name=f"{name}_mhsa",
head_size=head_size, num_heads=num_heads,
)
else:
raise ValueError("mha_type must be either 'mha' or 'relmha'")
self.do = tf.keras.layers.Dropout(dropout, name=f"{name}_dropout")
self.res_add = tf.keras.layers.Add(name=f"{name}_add")
self.mha_type = mha_type
def call(self, inputs, training=False, mask=None, pos=False, **kwargs):
if pos is False:
inputs, pos = inputs # pos is positional encoding
if not self.adaptive_scale:
outputs = self.ln(inputs, training=training)
else:
scale = tf.reshape(self.scale, (1, 1, -1))
bias = tf.reshape(self.bias, (1, 1, -1))
outputs = inputs * scale + bias
if self.mha_type == "relmha":
outputs = self.mha([outputs, outputs, outputs, pos], training=training, mask=mask)
else:
outputs = outputs + pos
outputs = self.mha([outputs, outputs, outputs], training=training, mask=mask)
outputs = self.do(outputs, training=training)
outputs = self.res_add([inputs, outputs])
return outputs
class ConvModule(tf.keras.layers.Layer):
def __init__(
self,
input_dim,
kernel_size=31,
dropout=0.0,
depth_multiplier=1,
conv_expansion_rate=2,
conv_use_glu=False,
adaptive_scale=False,
name="conv_module",
**kwargs,
):
super(ConvModule, self).__init__(name=name, **kwargs)
self.adaptive_scale = adaptive_scale
if not adaptive_scale:
logger.info("No scaling, use preLN")
self.ln = tf.keras.layers.LayerNormalization(name=f"{name}_ln")
else:
logger.info("Use scaling, no preLN")
self.scale = tf.Variable([1.] * input_dim, trainable=True, name=f'{name}_scale')
self.bias = tf.Variable([0.] * input_dim, trainable=True, name=f'{name}_bias')
pw1_max = input_dim ** -0.5
dw_max = kernel_size ** -0.5
pw2_max = input_dim ** -0.5
self.pw_conv_1 = tf.keras.layers.Conv2D(
filters=conv_expansion_rate * input_dim, kernel_size=1, strides=1,
padding="valid", name=f"{name}_pw_conv_1",
kernel_initializer=tf.keras.initializers.RandomUniform(minval=-pw1_max, maxval=pw1_max),
bias_initializer=tf.keras.initializers.RandomUniform(minval=-pw1_max, maxval=pw1_max),
)
if conv_use_glu:
logger.info("Using GLU for Conv")
self.act1 = GLU(name=f"{name}_act_1")
else:
logger.info("Replace GLU with swish for Conv")
self.act1 = tf.keras.layers.Activation(tf.nn.swish, name=f"{name}_act_1")
self.dw_conv = tf.keras.layers.DepthwiseConv2D(
kernel_size=(kernel_size, 1), strides=1,
padding="same", name=f"{name}_dw_conv",
depth_multiplier=depth_multiplier,
depthwise_initializer=tf.keras.initializers.RandomUniform(minval=-dw_max, maxval=dw_max),
bias_initializer=tf.keras.initializers.RandomUniform(minval=-dw_max, maxval=dw_max),
)
self.bn = tf.keras.layers.experimental.SyncBatchNormalization(
name=f"{name}_bn",
momentum=0.985,
)
self.act2 = tf.keras.layers.Activation(tf.nn.swish, name=f"{name}_act_2")
self.pw_conv_2 = tf.keras.layers.Conv2D(
filters=input_dim, kernel_size=1, strides=1,
padding="valid", name=f"{name}_pw_conv_2",
kernel_initializer=tf.keras.initializers.RandomUniform(minval=-pw2_max, maxval=pw2_max),
bias_initializer=tf.keras.initializers.RandomUniform(minval=-pw2_max, maxval=pw2_max),
)
self.do = tf.keras.layers.Dropout(dropout, name=f"{name}_dropout")
self.res_add = tf.keras.layers.Add(name=f"{name}_add")
def call(self, inputs, training=False, pad_mask=None, **kwargs):
if not self.adaptive_scale:
outputs = self.ln(inputs, training=training)
else:
scale = tf.reshape(self.scale, (1, 1, -1))
bias = tf.reshape(self.bias, (1, 1, -1))
outputs = inputs * scale + bias
B, T, E = shape_util.shape_list(outputs)
outputs = tf.reshape(outputs, [B, T, 1, E])
outputs = self.pw_conv_1(outputs, training=training)
outputs = self.act1(outputs)
pad_mask = tf.expand_dims(tf.expand_dims(pad_mask, -1), -1)
outputs = outputs * tf.cast(pad_mask, "float32")
outputs = self.dw_conv(outputs, training=training)
outputs = self.bn(outputs, training=training)
outputs = self.act2(outputs)
outputs = self.pw_conv_2(outputs, training=training)
outputs = tf.reshape(outputs, [B, T, E])
outputs = self.do(outputs, training=training)
outputs = self.res_add([inputs, outputs])
return outputs
class IdentityLayer(tf.keras.layers.Layer):
def call(self, inputs, *args, **kwargs):
return inputs
class MHSAFFModule(tf.keras.layers.Layer):
'''
Wrapper class for a MHSA layer followed by a FF layer
'''
def __init__(
self,
input_dim,
head_size,
num_heads,
dropout=0.0,
mha_type="relmha",
fc_factor=0.5,
ff_expansion_rate=4,
adaptive_scale=False,
name="mhsaff_module",
**kwargs,
):
super(MHSAFFModule, self).__init__(name=name, **kwargs)
assert input_dim == head_size * num_heads
self.mhsa = MHSAModule(
mha_type=mha_type,
head_size=head_size,
num_heads=num_heads,
adaptive_scale=adaptive_scale,
dropout=dropout,
name=f"{name}_mhsa",
)
self.ln_mid = tf.keras.layers.LayerNormalization(name=f"{name}_ln_mid")
self.ff = FFModule(
input_dim=input_dim,
dropout=dropout,
fc_factor=fc_factor,
ff_expansion_rate=ff_expansion_rate,
adaptive_scale=adaptive_scale,
name=f"{name}_ff",
)
self.ln = tf.keras.layers.LayerNormalization(name=f"{name}_ln")
def call(self, inputs, training=False, *args, **kwargs):
outputs = self.mhsa(inputs, training=training, *args, **kwargs)
outputs = self.ln_mid(outputs, training=training)
outputs = self.ff(outputs, training=training, *args, **kwargs)
outputs = self.ln(outputs, training=training)
return outputs
class ConvFFModule(tf.keras.layers.Layer):
'''
Wrapper class for a Conv layer followed by a FF layer
'''
def __init__(
self,
input_dim,
kernel_size=31,
dropout=0.0,
conv_expansion_rate=2,
conv_use_glu=False,
fc_factor=0.5,
ff_expansion_rate=4,
adaptive_scale=False,
name="convff_module",
**kwargs,
):
super(ConvFFModule, self).__init__(name=name, **kwargs)
self.conv = ConvModule(
input_dim=input_dim,
kernel_size=kernel_size,
conv_expansion_rate=conv_expansion_rate,
dropout=dropout,
conv_use_glu=conv_use_glu,
adaptive_scale=adaptive_scale,
name=f"{name}_conv",
)
self.ln_mid = tf.keras.layers.LayerNormalization(name=f"{name}_ln_mid")
self.ff = FFModule(
input_dim=input_dim, dropout=dropout,
fc_factor=fc_factor,
ff_expansion_rate=ff_expansion_rate,
adaptive_scale=adaptive_scale,
name=f"{name}_ff",
)
self.ln = tf.keras.layers.LayerNormalization(name=f"{name}_ln")
def call(self, inputs, training=False, *args, **kwargs):
outputs = self.conv(inputs, training=training, *args, **kwargs)
outputs = self.ln_mid(outputs, training=training)
outputs = self.ff(outputs, training=training, *args, **kwargs)
outputs = self.ln(outputs, training=training)
return outputs
class ConformerBlock(tf.keras.layers.Layer):
def __init__(
self,
input_dim,
dropout=0.0,
fc_factor=0.5,
head_size=36,
num_heads=4,
mha_type="relmha",
kernel_size=31,
name="conformer_block",
fixed_arch=None,
conv_use_glu=False,
no_post_ln=False,
adaptive_scale=False,
**kwargs,
):
assert input_dim == num_heads * head_size
super(ConformerBlock, self).__init__(name=name, **kwargs)
def get_fixed_arch(arch_type, name):
logger.info(f'layer type: {arch_type}')
if arch_type == 'f':
return FFModule(
input_dim=input_dim,
dropout=dropout,
fc_factor=fc_factor,
adaptive_scale=adaptive_scale,
name=name,
)
elif arch_type == 'm':
return MHSAModule(
mha_type=mha_type,
head_size=head_size,
num_heads=num_heads,
dropout=dropout,
adaptive_scale=adaptive_scale,
name=name,
)
elif arch_type == 'c':
return ConvModule(
input_dim=input_dim,
kernel_size=kernel_size,
dropout=dropout,
conv_use_glu=conv_use_glu,
adaptive_scale=adaptive_scale,
name=name,
)
elif arch_type == 'M':
return MHSAFFModule(
mha_type=mha_type,
head_size=head_size,
num_heads=num_heads,
dropout=dropout,
input_dim=input_dim,
fc_factor=fc_factor,
adaptive_scale=adaptive_scale,
name=name,
)
elif arch_type == 'C':
return ConvFFModule(
input_dim=input_dim,
kernel_size=kernel_size,
conv_use_glu=conv_use_glu,
dropout=dropout,
fc_factor=fc_factor,
adaptive_scale=adaptive_scale,
name=name,
)
elif arch_type == 's':
return IdentityLayer()
raise ValueError(f"fised architecture type '{arch_type}' is not defined")
####### Layer 1: MHSA ######
if fixed_arch is None:
arch_type = 'm'
else:
arch_type = fixed_arch[0]
self.layer1 = get_fixed_arch(arch_type, name+"_layer1")
####### Layer 2: FF ######
arch_type = 'f' if fixed_arch is None else fixed_arch[1]
self.layer2 = get_fixed_arch(arch_type, name+"_layer2")
####### Layer 3: CONV ######
arch_type = 'c' if fixed_arch is None else fixed_arch[2]
self.layer3 = get_fixed_arch(arch_type, name+"_layer3")
####### Layer 4: FF ######
arch_type = 'f' if fixed_arch is None else fixed_arch[3]
self.layer4 = get_fixed_arch(arch_type, name+"_layer4")
if not no_post_ln:
self.ln = tf.keras.layers.LayerNormalization(name=f"{name}_ln")
else: # we skip postLN for squeezenet as it has already been applied in MF or CF blocks
logger.info("Skipping post ln")
self.ln = None
def call(self, inputs, training=False, mask=None, pad_mask=None, **kwargs):
inputs, pos = inputs # pos is positional encoding
outputs = self.layer1(inputs, training=training, mask=mask, pos=pos, pad_mask=pad_mask, **kwargs)
outputs = self.layer2(outputs, training=training, mask=mask, pos=pos, pad_mask=pad_mask, **kwargs)
outputs = self.layer3(outputs, training=training, mask=mask, pos=pos, pad_mask=pad_mask, **kwargs)
outputs = self.layer4(outputs, training=training, mask=mask, pos=pos, pad_mask=pad_mask, **kwargs)
if self.ln is not None:
outputs = self.ln(outputs, training=training)
return outputs
class ConformerEncoder(tf.keras.Model):
def __init__(
self,
subsampling,
dmodel=144,
num_blocks=16,
mha_type="relmha",
head_size=36,
num_heads=4,
kernel_size=31,
fc_factor=0.5,
dropout=0.0,
name="conformer_encoder",
fixed_arch=None,
conv_use_glu=None,
time_reduce_idx=None,
time_recover_idx=None,
no_post_ln=False,
ds_subsample=False,
adaptive_scale=False,
**kwargs,
):
super(ConformerEncoder, self).__init__(name=name, **kwargs)
if time_reduce_idx is None:
self.time_reduce = None
else:
if time_recover_idx is None:
self.time_reduce = 'normal' # no recovery at the end
else:
self.time_reduce = 'recover' # recovery at the end
assert len(time_reduce_idx) == len(time_recover_idx)
self.reduce_idx = time_reduce_idx
self.recover_idx = time_recover_idx
self.reduce_stride = 2
self.dmodel = dmodel
self.xscale = dmodel ** 0.5
subsampling_name = subsampling.pop("type", "conv2d")
if subsampling_name == "vgg":
raise NotImplementedError("VGG subsampling is not supported")
elif subsampling_name == "conv2d":
subsampling_class = Conv2dSubsampling
else:
raise ValueError("subsampling must be either 'conv2d' or 'vgg'")
self.conv_subsampling = subsampling_class(
**subsampling, ds=ds_subsample, name=f"{name}_subsampling",
)
self.pre_ln = tf.keras.layers.LayerNormalization(name=f"{name}_preln")
self.pe = PositionalEncoding(dmodel, name=f"{name}_pe")
linear_max = 5120 ** -0.5 # TODO: parameterize this later
self.linear = tf.keras.layers.Dense(
dmodel, name=f"{name}_linear",
kernel_initializer=tf.keras.initializers.RandomUniform(minval=-linear_max, maxval=linear_max),
bias_initializer=tf.keras.initializers.RandomUniform(minval=-linear_max, maxval=linear_max),
)
self.do = tf.keras.layers.Dropout(dropout, name=f"{name}_dropout")
self.conformer_blocks = []
recover_dmodels = []
recover_head_sizes = []
self.pe_time_reduction = []
self.time_reduction_layers = []
self.time_recover_layers = []
for i in range(num_blocks):
logger.info(f"Initialize block {i}")
if self.time_reduce is not None and i in self.reduce_idx:
recover_dmodel = dmodel
recover_dmodels.append(recover_dmodel) # push dmodel to recover later
recover_head_sizes.append(head_size) # push head size to recover later
logger.info(f"Reducing to dmodel {dmodel}, head_size {head_size}")
self.time_reduction_layers.append(
TimeReductionLayer(
recover_dmodel,
dmodel,
stride=self.reduce_stride,
name=f"{name}_timereduce",
)
)
self.pe_time_reduction.append(PositionalEncoding(dmodel, name=f"{name}_pe2"))
if self.time_reduce == 'recover' and i in self.recover_idx:
dmodel = recover_dmodels[-1] # pop dmodel for recovery
head_size = recover_head_sizes[-1] # pop head size for recovery
logger.info(f"recovering to dmodel {dmodel}, head_size {head_size}")
self.time_recover_layers.append(tf.keras.layers.Dense(dmodel))
recover_dmodels = recover_dmodels[:-1]
recover_head_sizes = recover_head_sizes[:-1]
conformer_block = ConformerBlock(
input_dim=dmodel,
dropout=dropout,
fc_factor=fc_factor,
head_size=head_size,
num_heads=num_heads,
mha_type=mha_type,
kernel_size=kernel_size,
name=f"{name}_block_{i}",
fixed_arch=None if fixed_arch is None else fixed_arch[i],
no_post_ln=no_post_ln,
conv_use_glu=conv_use_glu,
adaptive_scale=adaptive_scale,
)
self.conformer_blocks.append(conformer_block)
def call(self, inputs, length, training=False, mask=None, **kwargs):
# input with shape [B, T, V1, V2]
outputs = self.conv_subsampling(inputs, training=training)
outputs = self.linear(outputs, training=training)
padding, kernel_size, stride, num_subsample = 1, 3, 2, 2 #TODO: set these in __init__
for _ in range(num_subsample):
length = tf.math.ceil((tf.cast(length, tf.float32) + (2 * padding) - (kernel_size - 1) - 1) / float(stride) + 1)
pad_mask = tf.sequence_mask(length, maxlen=tf.shape(outputs)[1])
mask = tf.expand_dims(pad_mask, 1)
mask = tf.repeat(mask, repeats=[tf.shape(mask)[-1]], axis=1)
mask = tf.math.logical_and(tf.transpose(mask, perm=[0, 2, 1]), mask)
pe = self.pe(outputs)
outputs = outputs * self.xscale
outputs = self.do(outputs, training=training)
pe_org, mask_org = pe, mask
recover_activations = []
index = 0 # index to point the queues for pe, recover activations, etc.
outputs = self.pre_ln(outputs, training=training)
for i, cblock in enumerate(self.conformer_blocks):
if self.time_reduce is not None and i in self.reduce_idx:
recover_activations.append((outputs, mask, pad_mask, pe))
outputs, mask, pad_mask = self.time_reduction_layers[index](
outputs, training=training, mask=mask, pad_mask=pad_mask, **kwargs,
)
pe = self.pe_time_reduction[index](outputs)
index += 1
if self.time_reduce == 'recover' and i in self.recover_idx:
index -= 1
recover_activation, mask, pad_mask, pe = recover_activations[index]
B, T, E = shape_util.shape_list(outputs)
outputs = tf.repeat(outputs, [self.reduce_stride] * T, axis=1)
B, T, E = shape_util.shape_list(recover_activation)
outputs = self.time_recover_layers[index](outputs[:, :T, :], training=training)
outputs = outputs + recover_activation
outputs = cblock([outputs, pe], training=training, mask=mask, pad_mask=pad_mask, **kwargs)
return outputs
| 23,551 | 39.191126 | 124 | py |
Squeezeformer | Squeezeformer-main/src/models/ctc.py | # Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Union
import numpy as np
import tensorflow as tf
from .base_model import BaseModel
from ..featurizers.speech_featurizers import TFSpeechFeaturizer
from ..featurizers.text_featurizers import TextFeaturizer
from ..utils import math_util, shape_util, data_util
from ..losses.ctc_loss import CtcLoss
logger = tf.get_logger()
class CtcModel(BaseModel):
def __init__(
self,
encoder: tf.keras.Model,
decoder: Union[tf.keras.Model, tf.keras.layers.Layer] = None,
augmentation: tf.keras.Model = None,
vocabulary_size: int = None,
**kwargs,
):
super().__init__(**kwargs)
self.encoder = encoder
if decoder is None:
assert vocabulary_size is not None, "vocabulary_size must be set"
self.decoder = tf.keras.layers.Dense(units=vocabulary_size, name=f"{self.name}_logits")
else:
self.decoder = decoder
self.augmentation = augmentation
self.time_reduction_factor = 1
def make(self, input_shape, batch_size=None):
inputs = tf.keras.Input(input_shape, batch_size=batch_size, dtype=tf.float32)
inputs_length = tf.keras.Input(shape=[], batch_size=batch_size, dtype=tf.int32)
self(
data_util.create_inputs(
inputs=inputs,
inputs_length=inputs_length
),
training=False
)
def compile(self, optimizer, blank=0, run_eagerly=None, **kwargs):
loss = CtcLoss(blank=blank)
super().compile(loss=loss, optimizer=optimizer, run_eagerly=run_eagerly, **kwargs)
def add_featurizers(
self,
speech_featurizer: TFSpeechFeaturizer,
text_featurizer: TextFeaturizer,
):
self.speech_featurizer = speech_featurizer
self.text_featurizer = text_featurizer
def call(self, inputs, training=False, **kwargs):
x, x_length = inputs["inputs"], inputs["inputs_length"]
if training and self.augmentation is not None:
x = self.augmentation(x, x_length)
logits = self.encoder(x, x_length, training=training, **kwargs)
logits = self.decoder(logits, training=training, **kwargs)
return data_util.create_logits(
logits=logits,
logits_length=math_util.get_reduced_length(x_length, self.time_reduction_factor)
)
# -------------------------------- GREEDY -------------------------------------
@tf.function
def recognize_from_logits(self, logits: tf.Tensor, lengths: tf.Tensor):
probs = tf.nn.softmax(logits)
# blank is in the first index of `probs`, where `ctc_greedy_decoder` supposes it to be in the last index.
# threfore, we move the first column to the last column to be compatible with `ctc_greedy_decoder`
probs = tf.concat([probs[:, :, 1:], tf.expand_dims(probs[:, :, 0], -1)], axis=-1)
def _map(elems): return tf.numpy_function(self._perform_greedy, inp=[elems[0], elems[1]], Tout=tf.string)
return tf.map_fn(_map, (probs, lengths), fn_output_signature=tf.TensorSpec([], dtype=tf.string))
@tf.function
def recognize(self, inputs: Dict[str, tf.Tensor]):
logits = self(inputs, training=False)
probs = tf.nn.softmax(logits["logits"])
# send the first index (skip token) to the last index
# for compatibility with the ctc_decoders library
probs = tf.concat([probs[:, :, 1:], tf.expand_dims(probs[:, :, 0], -1)], axis=-1)
lengths = logits["logits_length"]
def map_fn(elem): return tf.numpy_function(self._perform_greedy, inp=[elem[0], elem[1]], Tout=tf.string)
return tf.map_fn(map_fn, [probs, lengths], fn_output_signature=tf.TensorSpec([], dtype=tf.string))
def _perform_greedy(self, probs: np.ndarray, length):
from ctc_decoders import ctc_greedy_decoder
decoded = ctc_greedy_decoder(probs[:length], vocabulary=self.text_featurizer.non_blank_tokens)
return tf.convert_to_tensor(decoded, dtype=tf.string)
# -------------------------------- BEAM SEARCH -------------------------------------
@tf.function
def recognize_beam(self, inputs: Dict[str, tf.Tensor], lm: bool = False):
logits = self(inputs, training=False)
probs = tf.nn.softmax(logits["logits"])
def map_fn(prob): return tf.numpy_function(self._perform_beam_search, inp=[prob, lm], Tout=tf.string)
return tf.map_fn(map_fn, probs, dtype=tf.string)
def _perform_beam_search(self, probs: np.ndarray, lm: bool = False):
from ctc_decoders import ctc_beam_search_decoder
decoded = ctc_beam_search_decoder(
probs_seq=probs,
vocabulary=self.text_featurizer.non_blank_tokens,
beam_size=self.text_featurizer.decoder_config.beam_width,
ext_scoring_func=self.text_featurizer.scorer if lm else None
)
decoded = decoded[0][-1]
return tf.convert_to_tensor(decoded, dtype=tf.string)
| 5,608 | 41.172932 | 113 | py |
Squeezeformer | Squeezeformer-main/src/models/conformer.py | import tensorflow as tf
from tensorflow.python.keras.utils import losses_utils
from tensorflow.python.framework import ops
from tensorflow.python.eager import def_function
from .ctc import CtcModel
from .conformer_encoder import ConformerEncoder
from ..augmentations.augmentation import SpecAugmentation
from ..utils import math_util
from ..utils.training_utils import (
_minimum_control_deps,
reduce_per_replica,
write_scalar_summaries,
)
class ConformerCtc(CtcModel):
def __init__(
self,
vocabulary_size: int,
encoder_subsampling: dict,
encoder_dmodel: int = 144,
encoder_num_blocks: int = 16,
encoder_head_size: int = 36,
encoder_num_heads: int = 4,
encoder_mha_type: str = "relmha",
encoder_kernel_size: int = 32,
encoder_fc_factor: float = 0.5,
encoder_dropout: float = 0,
encoder_time_reduce_idx : list = None,
encoder_time_recover_idx : list = None,
encoder_conv_use_glu: bool = False,
encoder_ds_subsample: bool = False,
encoder_no_post_ln: bool = False,
encoder_adaptive_scale: bool = False,
encoder_fixed_arch: list = None,
augmentation_config=None,
name: str = "conformer",
**kwargs,
) -> object:
assert encoder_dmodel == encoder_num_heads * encoder_head_size
if not isinstance(encoder_fixed_arch[0], list):
encoder_fixed_arch = [encoder_fixed_arch] * encoder_num_blocks
super().__init__(
encoder=ConformerEncoder(
subsampling=encoder_subsampling,
dmodel=encoder_dmodel,
num_blocks=encoder_num_blocks,
head_size=encoder_head_size,
num_heads=encoder_num_heads,
mha_type=encoder_mha_type,
kernel_size=encoder_kernel_size,
fc_factor=encoder_fc_factor,
dropout=encoder_dropout,
time_reduce_idx=encoder_time_reduce_idx,
time_recover_idx=encoder_time_recover_idx,
conv_use_glu=encoder_conv_use_glu,
ds_subsample=encoder_ds_subsample,
no_post_ln=encoder_no_post_ln,
adaptive_scale=encoder_adaptive_scale,
fixed_arch=encoder_fixed_arch,
name=f"{name}_encoder",
),
decoder=tf.keras.layers.Conv1D(
filters=vocabulary_size, kernel_size=1,
strides=1, padding="same",
name=f"{name}_logits"
),
augmentation = SpecAugmentation(
num_freq_masks=augmentation_config['freq_masking']['num_masks'],
freq_mask_len=augmentation_config['freq_masking']['mask_factor'],
num_time_masks=augmentation_config['time_masking']['num_masks'],
time_mask_prop=augmentation_config['time_masking']['p_upperbound'],
name=f"{name}_specaug"
) if augmentation_config is not None else None,
vocabulary_size=vocabulary_size,
name=name,
**kwargs
)
self.time_reduction_factor = self.encoder.conv_subsampling.time_reduction_factor
self.dmodel = encoder_dmodel
# The following functions override the original function
# in order to gather the outputs from multiple TPU cores
def make_train_function(self):
if self.train_function is not None:
return self.train_function
def step_function(model, iterator):
"""Runs a single training step."""
def run_step(data):
outputs = model.train_step(data)
# Ensure counter is updated only if `train_step` succeeds.
with ops.control_dependencies(_minimum_control_deps(outputs)):
model._train_counter.assign_add(1) # pylint: disable=protected-access
return outputs
data = next(iterator)
outputs = model.distribute_strategy.run(run_step, args=(data,))
outputs = reduce_per_replica(outputs, self.distribute_strategy)
write_scalar_summaries(outputs, step=model._train_counter) # pylint: disable=protected-access
return outputs
if self._steps_per_execution.numpy().item() == 1:
def train_function(iterator):
"""Runs a training execution with one step."""
return step_function(self, iterator)
else:
def train_function(iterator):
"""Runs a training execution with multiple steps."""
for _ in math_ops.range(self._steps_per_execution):
outputs = step_function(self, iterator)
return outputs
if not self.run_eagerly:
train_function = def_function.function(
train_function, experimental_relax_shapes=True)
self.train_function = train_function
if self._cluster_coordinator:
self.train_function = lambda iterator: self._cluster_coordinator.schedule( # pylint: disable=g-long-lambda
train_function, args=(iterator,))
return self.train_function
def make_test_function(self):
if self.test_function is not None:
return self.test_function
def step_function(model, iterator):
"""Runs a single evaluation step."""
def run_step(data):
outputs = model.test_step(data)
# Ensure counter is updated only if `test_step` succeeds.
with ops.control_dependencies(_minimum_control_deps(outputs)):
model._test_counter.assign_add(1) # pylint: disable=protected-access
return outputs
data = next(iterator)
outputs = model.distribute_strategy.run(run_step, args=(data,))
outputs = reduce_per_replica(outputs, self.distribute_strategy)
return outputs
if self._steps_per_execution.numpy().item() == 1:
def test_function(iterator):
"""Runs an evaluation execution with one step."""
return step_function(self, iterator)
else:
def test_function(iterator):
"""Runs an evaluation execution with multiple steps."""
for _ in math_ops.range(self._steps_per_execution):
outputs = step_function(self, iterator)
return outputs
if not self.run_eagerly:
test_function = def_function.function(test_function, experimental_relax_shapes=True)
self.test_function = test_function
if self._cluster_coordinator:
self.test_function = lambda iterator: self._cluster_coordinator.schedule( # pylint: disable=g-long-lambda
test_function, args=(iterator,))
return self.test_function
class ConformerCtcAccumulate(ConformerCtc):
def __init__(self, n_gradients: int = 1, **kwargs) -> object:
super().__init__(**kwargs)
self.time_reduction_factor = self.encoder.conv_subsampling.time_reduction_factor
self.n_gradients = tf.constant(n_gradients, dtype=tf.int32, name="conformer/num_accumulated_gradients")
self.n_acum_step = tf.Variable(0, dtype=tf.int32, trainable=False, name="conformer/accumulate_step")
def make(self, input_shape, batch_size=None):
super().make(input_shape, batch_size)
self.gradient_accumulation = [
tf.Variable(tf.zeros_like(v, dtype=tf.float32), trainable=False, name=f"{v.name}/cached_accumulated_gradient") for v in self.trainable_variables
]
def train_step(self, batch):
"""
Args:
batch ([tf.Tensor]): a batch of training data
Returns:
Dict[tf.Tensor]: a dict of validation metrics with keys are the name of metric
"""
self.n_acum_step.assign_add(1)
inputs, y_true = batch
loss, y_pred, gradients = self.gradient_step(inputs, y_true)
for i in range(len(self.gradient_accumulation)):
self.gradient_accumulation[i].assign_add(gradients[i] / tf.cast(self.n_gradients, tf.float32))
tf.cond(tf.equal(self.n_acum_step, self.n_gradients), self.apply_accu_gradients, lambda: None)
self._metrics["loss"].update_state(loss)
if 'WER' in self._metrics:
self._metrics['WER'].update_state(y_true, y_pred)
return {m.name: m.result() for m in self.metrics}
def apply_accu_gradients(self):
# Apply accumulated gradients
self.optimizer.apply_gradients(zip(self.gradient_accumulation,
self.trainable_variables))
# Reset
self.n_acum_step.assign(0)
for i in range(len(self.gradient_accumulation)):
self.gradient_accumulation[i].assign(
tf.zeros_like(self.trainable_variables[i], dtype=tf.float32)
)
| 9,029 | 39.493274 | 160 | py |
Squeezeformer | Squeezeformer-main/src/models/__init__.py | 0 | 0 | 0 | py |
|
Squeezeformer | Squeezeformer-main/src/models/submodules/multihead_attention.py | # Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing
import tensorflow as tf
from src.utils import shape_util
logger = tf.get_logger()
class MultiHeadAttention(tf.keras.layers.Layer):
def __init__(
self,
num_heads,
head_size,
output_size: int = None,
dropout: float = 0.0,
use_projection_bias: bool = True,
return_attn_coef: bool = False,
**kwargs,
):
super(MultiHeadAttention, self).__init__(**kwargs)
if output_size is not None and output_size < 1:
raise ValueError("output_size must be a positive number")
self.head_size = head_size
self.num_heads = num_heads
self.output_size = output_size
self.use_projection_bias = use_projection_bias
self.return_attn_coef = return_attn_coef
self.dropout = tf.keras.layers.Dropout(dropout, name="dropout")
self._droput_rate = dropout
def build(self, input_shape):
num_query_features = input_shape[0][-1]
num_key_features = input_shape[1][-1]
num_value_features = (
input_shape[2][-1] if len(input_shape) > 2 else num_key_features
)
output_size = (
self.output_size if self.output_size is not None else num_value_features
)
input_max = (self.num_heads * self.head_size) ** -0.5
self.query = tf.keras.layers.Dense(
self.num_heads * self.head_size, activation=None,
kernel_initializer=tf.keras.initializers.RandomUniform(minval=-input_max, maxval=input_max),
bias_initializer=tf.keras.initializers.RandomUniform(minval=-input_max, maxval=input_max),
)
self.key = tf.keras.layers.Dense(
self.num_heads * self.head_size, activation=None,
kernel_initializer=tf.keras.initializers.RandomUniform(minval=-input_max, maxval=input_max),
bias_initializer=tf.keras.initializers.RandomUniform(minval=-input_max, maxval=input_max),
)
self.value = tf.keras.layers.Dense(
self.num_heads * self.head_size, activation=None,
kernel_initializer=tf.keras.initializers.RandomUniform(minval=-input_max, maxval=input_max),
bias_initializer=tf.keras.initializers.RandomUniform(minval=-input_max, maxval=input_max),
)
self.projection_kernel = self.add_weight(
name="projection_kernel",
shape=[self.num_heads, self.head_size, output_size],
initializer=tf.keras.initializers.RandomUniform(minval=-input_max, maxval=input_max),
)
if self.use_projection_bias:
self.projection_bias = self.add_weight(
name="projection_bias",
shape=[output_size],
initializer=tf.keras.initializers.RandomUniform(minval=-input_max, maxval=input_max),
)
else:
self.projection_bias = None
def call_qkv(self, query, key, value, training=False):
# verify shapes
if key.shape[-2] != value.shape[-2]:
raise ValueError(
"the number of elements in 'key' must be equal to "
"the same as the number of elements in 'value'"
)
# Linear transformations
query = self.query(query)
B, T, E = shape_util.shape_list(query)
query = tf.reshape(query, [B, T, self.num_heads, self.head_size])
key = self.key(key)
B, T, E = shape_util.shape_list(key)
key = tf.reshape(key, [B, T, self.num_heads, self.head_size])
value = self.value(value)
B, T, E = shape_util.shape_list(value)
value = tf.reshape(value, [B, T, self.num_heads, self.head_size])
return query, key, value
def call_attention(self, query, key, value, logits, training=False, mask=None):
# mask = attention mask with shape [B, Tquery, Tkey] with 1 is for positions we want to attend, 0 for masked
if mask is not None:
if len(mask.shape) < 2:
raise ValueError("'mask' must have at least 2 dimensions")
if query.shape[-3] != mask.shape[-2]:
raise ValueError(
"mask's second to last dimension must be equal to "
"the number of elements in 'query'"
)
if key.shape[-3] != mask.shape[-1]:
raise ValueError(
"mask's last dimension must be equal to the number of elements in 'key'"
)
# apply mask
if mask is not None:
mask = tf.cast(mask, tf.float32)
# possibly expand on the head dimension so broadcasting works
if len(mask.shape) != len(logits.shape):
mask = tf.expand_dims(mask, -3)
logits += -10e9 * (1.0 - mask)
attn_coef = tf.nn.softmax(logits)
# attention dropout
attn_coef_dropout = self.dropout(attn_coef, training=training)
# attention * value
multihead_output = tf.einsum("...HNM,...MHI->...NHI", attn_coef_dropout, value)
# Run the outputs through another linear projection layer. Recombining heads
# is automatically done.
output = tf.einsum("...NHI,HIO->...NO", multihead_output, self.projection_kernel)
if self.projection_bias is not None:
output += self.projection_bias
return output, attn_coef
def call(self, inputs, training=False, mask=None, **kwargs):
query, key, value = inputs
query, key, value = self.call_qkv(query, key, value, training=training)
# Scale dot-product, doing the division to either query or key
# instead of their product saves some computation
depth = tf.constant(self.head_size, dtype=tf.float32)
query /= tf.sqrt(depth)
# Calculate dot product attention
logits = tf.einsum("...NHO,...MHO->...HNM", query, key)
output, attn_coef = self.call_attention(query, key, value, logits,
training=training, mask=mask)
if self.return_attn_coef:
return output, attn_coef
else:
return output
def compute_output_shape(self, input_shape):
num_value_features = (
input_shape[2][-1] if len(input_shape) > 2 else input_shape[1][-1]
)
output_size = (
self.output_size if self.output_size is not None else num_value_features
)
output_shape = input_shape[0][:-1] + (output_size,)
if self.return_attn_coef:
num_query_elements = input_shape[0][-2]
num_key_elements = input_shape[1][-2]
attn_coef_shape = input_shape[0][:-2] + (
self.num_heads,
num_query_elements,
num_key_elements,
)
return output_shape, attn_coef_shape
else:
return output_shape
def get_config(self):
config = super().get_config()
config.update(
head_size=self.head_size,
num_heads=self.num_heads,
output_size=self.output_size,
dropout=self._droput_rate,
use_projection_bias=self.use_projection_bias,
return_attn_coef=self.return_attn_coef,
)
return config
class RelPositionMultiHeadAttention(MultiHeadAttention):
def __init__(self, kernel_sizes=None, strides=None, **kwargs):
super(RelPositionMultiHeadAttention, self).__init__(**kwargs)
def build(self, input_shape):
num_pos_features = input_shape[-1][-1]
input_max = (self.num_heads * self.head_size) ** -0.5
self.pos_kernel = self.add_weight(
name="pos_kernel",
shape=[self.num_heads, num_pos_features, self.head_size],
initializer=tf.keras.initializers.RandomUniform(minval=-input_max, maxval=input_max),
)
self.pos_bias_u = self.add_weight(
name="pos_bias_u",
shape=[self.num_heads, self.head_size],
initializer=tf.keras.initializers.Zeros(),
)
self.pos_bias_v = self.add_weight(
name="pos_bias_v",
shape=[self.num_heads, self.head_size],
initializer=tf.keras.initializers.Zeros(),
)
super(RelPositionMultiHeadAttention, self).build(input_shape[:-1])
@staticmethod
def relative_shift(x):
x_shape = tf.shape(x)
x = tf.pad(x, [[0, 0], [0, 0], [0, 0], [1, 0]])
x = tf.reshape(x, [x_shape[0], x_shape[1], x_shape[3] + 1, x_shape[2]])
x = tf.reshape(x[:, :, 1:, :], x_shape)
return x
def call(self, inputs, training=False, mask=None, **kwargs):
query, key, value, pos = inputs
query, key, value = self.call_qkv(query, key, value, training=training)
pos = tf.einsum("...MI,HIO->...MHO", pos, self.pos_kernel)
query_with_u = query + self.pos_bias_u
query_with_v = query + self.pos_bias_v
logits_with_u = tf.einsum("...NHO,...MHO->...HNM", query_with_u, key)
logits_with_v = tf.einsum("...NHO,...MHO->...HNM", query_with_v, pos)
logits_with_v = self.relative_shift(logits_with_v)
logits = logits_with_u + logits_with_v[:, :, :, :tf.shape(logits_with_u)[3]]
depth = tf.constant(self.head_size, dtype=tf.float32)
logits /= tf.sqrt(depth)
output, attn_coef = self.call_attention(query, key, value, logits,
training=training, mask=mask)
if self.return_attn_coef:
return output, attn_coef
else:
return output
| 10,251 | 37.397004 | 116 | py |
Squeezeformer | Squeezeformer-main/src/models/submodules/time_reduction.py | import tensorflow as tf
from ...utils import shape_util
class TimeReductionLayer(tf.keras.layers.Layer):
def __init__(
self,
input_dim,
output_dim,
kernel_size=5,
stride=2,
dropout=0.0,
name="time_reduction",
**kwargs,
):
super(TimeReductionLayer, self).__init__(name=name, **kwargs)
self.stride = stride
self.kernel_size = kernel_size
dw_max = kernel_size ** -0.5
pw_max = input_dim ** -0.5
self.dw_conv = tf.keras.layers.DepthwiseConv2D(
kernel_size=(kernel_size, 1), strides=self.stride,
padding="valid", name=f"{name}_dw_conv",
depth_multiplier=1,
depthwise_initializer=tf.keras.initializers.RandomUniform(minval=-dw_max, maxval=dw_max),
bias_initializer=tf.keras.initializers.RandomUniform(minval=-dw_max, maxval=dw_max),
)
#self.swish = tf.keras.layers.Activation(tf.nn.swish, name=f"{name}_swish_activation")
self.pw_conv = tf.keras.layers.Conv2D(
filters=output_dim, kernel_size=1, strides=1,
padding="valid", name=f"{name}_pw_conv_2",
kernel_initializer=tf.keras.initializers.RandomUniform(minval=-pw_max, maxval=pw_max),
bias_initializer=tf.keras.initializers.RandomUniform(minval=-pw_max, maxval=pw_max),
)
def call(self, inputs, training=False, mask=None, pad_mask=None, **kwargs):
B, T, E = shape_util.shape_list(inputs)
outputs = tf.reshape(inputs, [B, T, 1, E])
_pad_mask = tf.expand_dims(tf.expand_dims(pad_mask, -1), -1)
outputs = outputs * tf.cast(_pad_mask, "float32")
padding = max(0, self.kernel_size - self.stride)
outputs = tf.pad(outputs, [[0, 0], [0, padding], [0, 0], [0, 0]])
outputs = self.dw_conv(outputs, training=training)
outputs = self.pw_conv(outputs, training=training)
B, T, _, E = shape_util.shape_list(outputs)
outputs = tf.reshape(outputs, [B, T, E])
mask = mask[:, ::self.stride, ::self.stride]
pad_mask = pad_mask[:, ::self.stride]
_, L = shape_util.shape_list(pad_mask)
outputs = tf.pad(outputs, [[0, 0], [0, L - T], [0, 0]])
return outputs, mask, pad_mask
| 2,287 | 42.169811 | 101 | py |
Squeezeformer | Squeezeformer-main/src/models/submodules/subsampling.py | # Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from src.utils import shape_util, math_util
logger = tf.get_logger()
class Conv2dSubsampling(tf.keras.layers.Layer):
def __init__(
self,
filters: int,
strides: int = 2,
kernel_size: int = 3,
ds: bool = False,
name="Conv2dSubsampling",
**kwargs,
):
super(Conv2dSubsampling, self).__init__(name=name, **kwargs)
self.strides = strides
self.kernel_size = kernel_size
assert self.strides == 2 and self.kernel_size == 3 # Fix this for simplicity
conv1_max = kernel_size ** -1
conv2_max = (kernel_size ** 2 * filters) ** -0.5
self.conv1 = tf.keras.layers.Conv2D(
filters=filters, kernel_size=kernel_size,
strides=strides, padding="valid", name=f"{name}_1",
kernel_initializer=tf.keras.initializers.RandomUniform(minval=-conv1_max, maxval=conv1_max),
bias_initializer=tf.keras.initializers.RandomUniform(minval=-conv1_max, maxval=conv1_max),
)
self.ds = ds
if not ds:
logger.info("Subsampling with full conv")
self.conv2 = tf.keras.layers.Conv2D(
filters=filters, kernel_size=kernel_size,
strides=strides, padding="valid", name=f"{name}_2",
kernel_initializer=tf.keras.initializers.RandomUniform(minval=-conv2_max, maxval=conv2_max),
bias_initializer=tf.keras.initializers.RandomUniform(minval=-conv2_max, maxval=conv2_max),
)
self.time_reduction_factor = self.conv1.strides[0] + self.conv2.strides[0]
else:
logger.info("Subsampling with DS conv")
dw_max = (kernel_size ** 2) ** -0.5
pw_max = filters ** -0.5
self.dw_conv = tf.keras.layers.DepthwiseConv2D(
kernel_size=(kernel_size, kernel_size), strides=strides,
padding="valid", name=f"{name}_2_dw",
depth_multiplier=1,
depthwise_initializer=tf.keras.initializers.RandomUniform(minval=-dw_max, maxval=dw_max),
bias_initializer=tf.keras.initializers.RandomUniform(minval=-dw_max, maxval=dw_max),
)
self.pw_conv = tf.keras.layers.Conv2D(
filters=filters, kernel_size=1, strides=1,
padding="valid", name=f"{name}_2_pw",
kernel_initializer=tf.keras.initializers.RandomUniform(minval=-pw_max, maxval=pw_max),
bias_initializer=tf.keras.initializers.RandomUniform(minval=-pw_max, maxval=pw_max),
)
self.time_reduction_factor = self.conv1.strides[0] + self.dw_conv.strides[0]
def call(self, inputs, training=False, **kwargs):
_, L, H, _ = shape_util.shape_list(inputs)
assert H == 80
outputs = tf.pad(inputs, [[0, 0], [0, 1], [0, 1], [0, 0]])
outputs = self.conv1(outputs, training=training)
outputs = tf.nn.relu(outputs)
outputs = tf.pad(outputs, [[0, 0], [0, 1], [0, 1], [0, 0]])
if not self.ds:
outputs = self.conv2(outputs, training=training)
else:
outputs = self.dw_conv(outputs, training=training)
outputs = self.pw_conv(outputs, training=training)
outputs = tf.nn.relu(outputs)
_, L, H, _ = shape_util.shape_list(outputs)
assert H == 20
return math_util.merge_two_last_dims(outputs)
def get_config(self):
conf = super(Conv2dSubsampling, self).get_config()
conf.update(self.conv1.get_config())
conf.update(self.conv2.get_config())
return conf
| 4,228 | 43.989362 | 108 | py |
Squeezeformer | Squeezeformer-main/src/models/submodules/glu.py | # Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
class GLU(tf.keras.layers.Layer):
def __init__(self,
axis=-1,
name="glu_activation",
**kwargs):
super(GLU, self).__init__(name=name, **kwargs)
self.axis = axis
def call(self, inputs, **kwargs):
a, b = tf.split(inputs, 2, axis=self.axis)
b = tf.nn.sigmoid(b)
return tf.multiply(a, b)
def get_config(self):
conf = super(GLU, self).get_config()
conf.update({"axis": self.axis})
return conf
| 1,132 | 31.371429 | 74 | py |
Squeezeformer | Squeezeformer-main/src/models/submodules/positional_encoding.py | # Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
import tensorflow as tf
from src.utils.shape_util import shape_list
class PositionalEncoding(tf.keras.layers.Layer):
'''
Same positional encoding method as NeMo library
'''
def __init__(self, d_model, max_len=5000, name="positional_encoding_nemo", **kwargs):
super().__init__(trainable=False, name=name, **kwargs)
self.max_len = max_len
positions = tf.expand_dims(tf.range(self.max_len - 1, -max_len, -1.0, dtype=tf.float32), axis=1)
pos_length = tf.shape(positions)[0]
pe = np.zeros([pos_length, d_model], 'float32')
div_term = np.exp(
tf.range(0, d_model, 2, dtype=tf.float32) * -(math.log(10000.0) / d_model)
)
pe[:, 0::2] = np.sin(positions * div_term)
pe[:, 1::2] = np.cos(positions * div_term)
pe = tf.convert_to_tensor(pe)
self.pe = tf.expand_dims(pe, 0)
def call(self, inputs, **kwargs):
# inputs shape [B, T, V]
_, length, dmodel = shape_list(inputs)
center_pos = tf.shape(self.pe)[1] // 2
start_pos = center_pos - length + 1
end_pos = center_pos + length
pos_emb = self.pe[:, start_pos:end_pos]
return tf.cast(pos_emb, dtype=inputs.dtype)
def get_config(self):
conf = super().get_config()
return conf.update({"max_len": self.max_len})
| 1,965 | 38.32 | 104 | py |
Squeezeformer | Squeezeformer-main/src/models/submodules/__init__.py | 0 | 0 | 0 | py |
|
Squeezeformer | Squeezeformer-main/src/datasets/asr_dataset.py | # Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import abc
from typing import Union
import tqdm
import numpy as np
import tensorflow as tf
from ..featurizers.speech_featurizers import (
load_and_convert_to_wav,
read_raw_audio,
tf_read_raw_audio,
TFSpeechFeaturizer
)
from ..featurizers.text_featurizers import TextFeaturizer
from ..utils import feature_util, file_util, math_util, data_util
logger = tf.get_logger()
BUFFER_SIZE = 10000
AUTOTUNE = tf.data.experimental.AUTOTUNE
class BaseDataset(metaclass=abc.ABCMeta):
""" Based dataset for all models """
def __init__(
self,
data_paths: list,
cache: bool = False,
shuffle: bool = False,
buffer_size: int = BUFFER_SIZE,
indefinite: bool = False,
drop_remainder: bool = True,
stage: str = "train",
**kwargs,
):
self.data_paths = data_paths or []
if not isinstance(self.data_paths, list):
raise ValueError('data_paths must be a list of string paths')
self.cache = cache # whether to cache transformed dataset to memory
self.shuffle = shuffle # whether to shuffle tf.data.Dataset
if buffer_size <= 0 and shuffle:
raise ValueError("buffer_size must be positive when shuffle is on")
self.buffer_size = buffer_size # shuffle buffer size
self.stage = stage # for defining tfrecords files
self.drop_remainder = drop_remainder # whether to drop remainder for multi gpu training
self.indefinite = indefinite # Whether to make dataset repeat indefinitely -> avoid the potential last partial batch
self.total_steps = None # for better training visualization
@abc.abstractmethod
def parse(self, *args, **kwargs):
raise NotImplementedError()
@abc.abstractmethod
def create(self, batch_size):
raise NotImplementedError()
class ASRDataset(BaseDataset):
""" Dataset for ASR using Generator """
def __init__(
self,
stage: str,
speech_featurizer: TFSpeechFeaturizer,
text_featurizer: TextFeaturizer,
data_paths: list,
cache: bool = False,
shuffle: bool = False,
indefinite: bool = False,
drop_remainder: bool = True,
buffer_size: int = BUFFER_SIZE,
input_padding_length: int = 3300,
label_padding_length: int = 530,
**kwargs,
):
super().__init__(
data_paths=data_paths,
cache=cache, shuffle=shuffle, stage=stage, buffer_size=buffer_size,
drop_remainder=drop_remainder, indefinite=indefinite
)
self.speech_featurizer = speech_featurizer
self.text_featurizer = text_featurizer
self.input_padding_length = input_padding_length
self.label_padding_length = label_padding_length
# -------------------------------- ENTRIES -------------------------------------
def read_entries(self):
if hasattr(self, "entries") and len(self.entries) > 0: return
self.entries = []
for file_path in self.data_paths:
logger.info(f"Reading {file_path} ...")
with tf.io.gfile.GFile(file_path, "r") as f:
temp_lines = f.read().splitlines()
# Skip the header of tsv file
self.entries += temp_lines[1:]
# The files is "\t" seperated
self.entries = [line.split("\t", 2) for line in self.entries]
for i, line in enumerate(self.entries):
self.entries[i][-1] = " ".join([str(x) for x in self.text_featurizer.extract(line[-1]).numpy()])
self.entries = np.array(self.entries)
if self.shuffle: np.random.shuffle(self.entries) # Mix transcripts.tsv
self.total_steps = len(self.entries)
# -------------------------------- LOAD AND PREPROCESS -------------------------------------
def generator(self):
for path, _, indices in self.entries:
audio = load_and_convert_to_wav(path).numpy()
yield bytes(path, "utf-8"), audio, bytes(indices, "utf-8")
def tf_preprocess(self, path: tf.Tensor, audio: tf.Tensor, indices: tf.Tensor):
with tf.device("/CPU:0"):
signal = tf_read_raw_audio(audio, self.speech_featurizer.sample_rate)
features = self.speech_featurizer.tf_extract(signal)
input_length = tf.cast(tf.shape(features)[0], tf.int32)
label = tf.strings.to_number(tf.strings.split(indices), out_type=tf.int32)
label_length = tf.cast(tf.shape(label)[0], tf.int32)
prediction = self.text_featurizer.prepand_blank(label)
prediction_length = tf.cast(tf.shape(prediction)[0], tf.int32)
return path, features, input_length, label, label_length, prediction, prediction_length
def parse(self, path: tf.Tensor, audio: tf.Tensor, indices: tf.Tensor):
"""
Returns:
path, features, input_lengths, labels, label_lengths, pred_inp
"""
data = self.tf_preprocess(path, audio, indices)
_, features, input_length, label, label_length, prediction, prediction_length = data
return (
data_util.create_inputs(
inputs=features,
inputs_length=input_length,
predictions=prediction,
predictions_length=prediction_length
),
data_util.create_labels(
labels=label,
labels_length=label_length
)
)
def process(self, dataset, batch_size):
dataset = dataset.map(self.parse, num_parallel_calls=AUTOTUNE)
self.total_steps = math_util.get_num_batches(self.total_steps, batch_size, drop_remainders=self.drop_remainder)
if self.cache:
dataset = dataset.cache()
if self.shuffle:
dataset = dataset.shuffle(self.buffer_size, reshuffle_each_iteration=True)
if self.indefinite and self.total_steps:
dataset = dataset.repeat()
dataset = dataset.padded_batch(
batch_size=batch_size,
padded_shapes=(
data_util.create_inputs(
inputs=tf.TensorShape([self.input_padding_length, 80, 1]),
inputs_length=tf.TensorShape([]),
predictions=tf.TensorShape([self.label_padding_length]),
predictions_length=tf.TensorShape([])
),
data_util.create_labels(
labels=tf.TensorShape([self.label_padding_length]),
labels_length=tf.TensorShape([])
),
),
padding_values=(
data_util.create_inputs(
inputs=0.0,
inputs_length=0,
predictions=self.text_featurizer.blank,
predictions_length=0
),
data_util.create_labels(
labels=self.text_featurizer.blank,
labels_length=0
)
),
drop_remainder=self.drop_remainder
)
# PREFETCH to improve speed of input length
dataset = dataset.prefetch(AUTOTUNE)
return dataset
def create(self, batch_size: int):
self.read_entries()
if not self.total_steps or self.total_steps == 0: return print("Couldn't create")
dataset = tf.data.Dataset.from_generator(
self.generator,
output_types=(tf.string, tf.string, tf.string),
output_shapes=(tf.TensorShape([]), tf.TensorShape([]), tf.TensorShape([]))
)
return self.process(dataset, batch_size)
class ASRSliceDataset(ASRDataset):
""" Dataset for ASR using Slice """
@staticmethod
def load(record: tf.Tensor):
def fn(path: bytes): return load_and_convert_to_wav(path.decode("utf-8")).numpy()
audio = tf.numpy_function(fn, inp=[record[0]], Tout=tf.string)
return record[0], audio, record[2]
def create(self, batch_size: int):
self.read_entries()
if not self.total_steps or self.total_steps == 0: return None
dataset = tf.data.Dataset.from_tensor_slices(self.entries)
dataset = dataset.map(self.load, num_parallel_calls=AUTOTUNE)
return self.process(dataset, batch_size)
def preprocess_dataset(self, tfrecord_path, shard_size=0, max_len=None):
self.read_entries()
if not self.total_steps or self.total_steps == 0: return None
logger.info(f"Preprocess dataset")
dataset = tf.data.Dataset.from_tensor_slices(self.entries)
dataset = dataset.map(self.load, num_parallel_calls=AUTOTUNE)
self.create_preprocessed_tfrecord(dataset, tfrecord_path, shard_size, max_len)
| 9,392 | 37.338776 | 125 | py |
Squeezeformer | Squeezeformer-main/src/datasets/__init__.py | 0 | 0 | 0 | py |
|
Squeezeformer | Squeezeformer-main/src/augmentations/augmentation.py | # Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from ..utils import shape_util
class SpecAugmentation(tf.keras.Model):
def __init__(
self,
num_freq_masks=2,
freq_mask_len=27,
num_time_masks=5,
time_mask_prop=0.05,
name='specaug',
**kwargs,
):
super(SpecAugmentation, self).__init__(name=name, **kwargs)
self.num_freq_masks = num_freq_masks
self.freq_mask_len = freq_mask_len
self.num_time_masks = num_time_masks
self.time_mask_prop = time_mask_prop
def time_mask(self, inputs, inputs_len):
time_max = inputs_len
B, T, F = tf.shape(inputs)[0], tf.shape(inputs)[1], tf.shape(inputs)[2]
t = tf.random.uniform(shape=tf.shape(time_max), minval=0, maxval=self.time_mask_prop)
t = tf.cast(tf.cast(time_max, tf.dtypes.float32) * t, 'int32')
t0 = tf.random.uniform(shape=tf.shape(time_max), minval=0, maxval=1)
t0 = tf.cast(tf.cast(time_max - t, tf.dtypes.float32) * t0, 'int32')
t = tf.repeat(tf.reshape(t, (-1, 1)), T, axis=1)
t0 = tf.repeat(tf.reshape(t0, (-1, 1)), T, axis=1)
indices = tf.repeat(tf.reshape(tf.range(T), (1, -1)), B, axis=0)
left_mask = tf.cast(tf.math.greater_equal(indices, t0), 'float32')
right_mask = tf.cast(tf.math.less(indices, t0 + t), 'float32')
mask = 1.0 - left_mask * right_mask
masked_inputs = inputs * tf.reshape(mask, (B, T, 1, 1))
return masked_inputs
def frequency_mask(self, inputs, inputs_len):
B, T, F = tf.shape(inputs)[0], tf.shape(inputs)[1], tf.shape(inputs)[2]
f = tf.random.uniform(shape=tf.shape(inputs_len), minval=0, maxval=self.freq_mask_len, dtype='int32')
f0 = tf.random.uniform(shape=tf.shape(inputs_len), minval=0, maxval=1)
f0 = tf.cast(tf.cast(F - f, tf.dtypes.float32) * f0, 'int32')
f = tf.repeat(tf.reshape(f, (-1, 1)), F, axis=1)
f0 = tf.repeat(tf.reshape(f0, (-1, 1)), F, axis=1)
indices = tf.repeat(tf.reshape(tf.range(F), (1, -1)), B, axis=0)
left_mask = tf.cast(tf.math.greater_equal(indices, f0), 'float32')
right_mask = tf.cast(tf.math.less(indices, f0 + f), 'float32')
mask = 1.0 - left_mask * right_mask
masked_inputs = inputs * tf.reshape(mask, (B, 1, F, 1))
return masked_inputs
@tf.function
def call(self, inputs, inputs_len):
masked_inputs = inputs
for _ in range(self.num_time_masks):
masked_inputs = self.time_mask(masked_inputs, inputs_len)
for _ in range(self.num_freq_masks):
masked_inputs = self.frequency_mask(masked_inputs, inputs_len)
return masked_inputs
| 3,294 | 38.698795 | 109 | py |
Squeezeformer | Squeezeformer-main/src/augmentations/__init__.py | 0 | 0 | 0 | py |
|
Squeezeformer | Squeezeformer-main/src/configs/config.py | # Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union
from ..utils import file_util
class DatasetConfig:
def __init__(self, config: dict = None):
if not config: config = {}
self.stage = config.pop("stage", None)
self.data_paths = file_util.preprocess_paths(config.pop("data_paths", None))
self.tfrecords_dir = file_util.preprocess_paths(config.pop("tfrecords_dir", None), isdir=True)
self.tfrecords_shards = config.pop("tfrecords_shards", 16)
self.shuffle = config.pop("shuffle", False)
self.cache = config.pop("cache", False)
self.drop_remainder = config.pop("drop_remainder", True)
self.buffer_size = config.pop("buffer_size", 10000)
for k, v in config.items(): setattr(self, k, v)
class RunningConfig:
def __init__(self, config: dict = None):
if not config: config = {}
self.batch_size = config.pop("batch_size", 1)
self.accumulation_steps = config.pop("accumulation_steps", 1)
self.num_epochs = config.pop("num_epochs", 20)
for k, v in config.items(): setattr(self, k, v)
class LearningConfig:
def __init__(self, config: dict = None):
if not config: config = {}
self.train_dataset_config = DatasetConfig(config.pop("train_dataset_config", {}))
self.eval_dataset_config = DatasetConfig(config.pop("eval_dataset_config", {}))
self.test_dataset_config = DatasetConfig(config.pop("test_dataset_config", {}))
self.optimizer_config = config.pop("optimizer_config", {})
self.running_config = RunningConfig(config.pop("running_config", {}))
for k, v in config.items(): setattr(self, k, v)
class Config:
""" User config class for training, testing or infering """
def __init__(self, data: Union[str, dict]):
config = data if isinstance(data, dict) else file_util.load_yaml(file_util.preprocess_paths(data))
self.speech_config = config.pop("speech_config", {})
self.decoder_config = config.pop("decoder_config", {})
self.model_config = config.pop("model_config", {})
self.learning_config = LearningConfig(config.pop("learning_config", {}))
for k, v in config.items(): setattr(self, k, v)
| 2,796 | 43.396825 | 106 | py |
Squeezeformer | Squeezeformer-main/src/configs/__init__.py | 0 | 0 | 0 | py |
|
Squeezeformer | Squeezeformer-main/src/featurizers/speech_featurizers.py | # Copyright 2020 Huy Le Nguyen (@usimarit) and Huy Phan (@pquochuy)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import io
import abc
import math
from typing import Union
import numpy as np
import librosa
import soundfile as sf
import tensorflow as tf
import tensorflow_io as tfio
from ..utils import math_util, env_util
def load_and_convert_to_wav(path: str) -> tf.Tensor:
wave, rate = librosa.load(os.path.expanduser(path), sr=None, mono=True)
return tf.audio.encode_wav(tf.expand_dims(wave, axis=-1), sample_rate=rate)
def read_raw_audio(audio: Union[str, bytes, np.ndarray], sample_rate=16000) -> np.ndarray:
if isinstance(audio, str):
wave, _ = librosa.load(os.path.expanduser(audio), sr=sample_rate, mono=True)
elif isinstance(audio, bytes):
wave, sr = sf.read(io.BytesIO(audio))
if wave.ndim > 1: wave = np.mean(wave, axis=-1)
wave = np.asfortranarray(wave)
if sr != sample_rate: wave = librosa.resample(wave, sr, sample_rate)
elif isinstance(audio, np.ndarray):
if audio.ndim > 1: ValueError("input audio must be single channel")
return audio
else:
raise ValueError("input audio must be either a path or bytes")
return wave
def tf_read_raw_audio(audio: tf.Tensor, sample_rate=16000) -> tf.Tensor:
wave, rate = tf.audio.decode_wav(audio, desired_channels=1, desired_samples=-1)
if not env_util.has_devices("TPU"):
resampled = tfio.audio.resample(wave, rate_in=tf.cast(rate, dtype=tf.int64), rate_out=sample_rate)
return tf.reshape(resampled, shape=[-1]) # reshape for using tf.signal
return tf.reshape(wave, shape=[-1]) # reshape for using tf.signal
def slice_signal(signal, window_size, stride=0.5) -> np.ndarray:
""" Return windows of the given signal by sweeping in stride fractions of window """
assert signal.ndim == 1, signal.ndim
n_samples = signal.shape[0]
offset = int(window_size * stride)
slices = []
for beg_i, end_i in zip(range(0, n_samples, offset),
range(window_size, n_samples + offset,
offset)):
slice_ = signal[beg_i:end_i]
if slice_.shape[0] < window_size:
slice_ = np.pad(
slice_, (0, window_size - slice_.shape[0]), 'constant', constant_values=0.0)
if slice_.shape[0] == window_size:
slices.append(slice_)
return np.array(slices, dtype=np.float32)
def tf_merge_slices(slices: tf.Tensor) -> tf.Tensor:
# slices shape = [batch, window_size]
return tf.keras.backend.flatten(slices) # return shape = [-1, ]
def merge_slices(slices: np.ndarray) -> np.ndarray:
# slices shape = [batch, window_size]
return np.reshape(slices, [-1])
def tf_normalize_audio_features(audio_feature: tf.Tensor, per_frame=False) -> tf.Tensor:
"""
TF Mean and variance features normalization
Args:
audio_feature: tf.Tensor with shape [T, F]
Returns:
normalized audio features with shape [T, F]
"""
axis = 1 if per_frame else None
mean = tf.reduce_mean(audio_feature, axis=axis, keepdims=True)
std_dev = tf.math.sqrt(tf.math.reduce_variance(audio_feature, axis=axis, keepdims=True) + 1e-9)
return (audio_feature - mean) / std_dev
def tf_normalize_signal(signal: tf.Tensor) -> tf.Tensor:
"""
TF Normailize signal to [-1, 1] range
Args:
signal: tf.Tensor with shape [None]
Returns:
normalized signal with shape [None]
"""
gain = 1.0 / (tf.reduce_max(tf.abs(signal), axis=-1) + 1e-9)
return signal * gain
def tf_preemphasis(signal: tf.Tensor, coeff=0.97):
"""
TF Pre-emphasis
Args:
signal: tf.Tensor with shape [None]
coeff: Float that indicates the preemphasis coefficient
Returns:
pre-emphasized signal with shape [None]
"""
if not coeff or coeff <= 0.0: return signal
s0 = tf.expand_dims(signal[0], axis=-1)
s1 = signal[1:] - coeff * signal[:-1]
return tf.concat([s0, s1], axis=-1)
def tf_depreemphasis(signal: tf.Tensor, coeff=0.97) -> tf.Tensor:
"""
TF Depreemphasis
Args:
signal: tf.Tensor with shape [B, None]
coeff: Float that indicates the preemphasis coefficient
Returns:
depre-emphasized signal with shape [B, None]
"""
if not coeff or coeff <= 0.0: return signal
def map_fn(elem):
x = tf.expand_dims(elem[0], axis=-1)
for n in range(1, elem.shape[0], 1):
current = coeff * x[n - 1] + elem[n]
x = tf.concat([x, [current]], axis=0)
return x
return tf.map_fn(map_fn, signal)
class TFSpeechFeaturizer(metaclass=abc.ABCMeta):
def __init__(self, speech_config: dict):
"""
speech_config = {
"sample_rate": int,
"frame_ms": int,
"stride_ms": int,
"num_feature_bins": int,
"feature_type": str,
"delta": bool,
"delta_delta": bool,
"pitch": bool,
"normalize_signal": bool,
"normalize_feature": bool,
"normalize_per_frame": bool
}
"""
# Samples
self.sample_rate = speech_config.get("sample_rate", 16000)
self.frame_length = int(self.sample_rate * (speech_config.get("frame_ms", 25) / 1000))
self.frame_step = int(self.sample_rate * (speech_config.get("stride_ms", 10) / 1000))
# Features
self.num_feature_bins = speech_config.get("num_feature_bins", 80)
self.feature_type = speech_config.get("feature_type", "log_mel_spectrogram")
self.preemphasis = speech_config.get("preemphasis", None)
self.top_db = speech_config.get("top_db", 80.0)
# Normalization
self.normalize_signal = speech_config.get("normalize_signal", True)
self.normalize_feature = speech_config.get("normalize_feature", True)
self.normalize_per_frame = speech_config.get("normalize_per_frame", False)
self.center = speech_config.get("center", True)
# Length
self.max_length = 0
@property
def shape(self) -> list:
length = self.max_length if self.max_length > 0 else None
return [length, self.num_feature_bins, 1]
@property
def nfft(self) -> int:
""" Number of FFT """
return 2 ** (self.frame_length - 1).bit_length()
def get_length_from_duration(self, duration):
nsamples = math.ceil(float(duration) * self.sample_rate)
if self.center: nsamples += self.nfft
return 1 + (nsamples - self.nfft) // self.frame_step # https://www.tensorflow.org/api_docs/python/tf/signal/frame
def update_length(self, length: int):
self.max_length = max(self.max_length, length)
def reset_length(self):
self.max_length = 0
def stft(self, signal):
if self.center: signal = tf.pad(signal, [[self.nfft // 2, self.nfft // 2]], mode="REFLECT")
window = tf.signal.hann_window(self.frame_length, periodic=True)
left_pad = (self.nfft - self.frame_length) // 2
right_pad = self.nfft - self.frame_length - left_pad
window = tf.pad(window, [[left_pad, right_pad]])
framed_signals = tf.signal.frame(signal, frame_length=self.nfft, frame_step=self.frame_step)
framed_signals *= window
return tf.square(tf.abs(tf.signal.rfft(framed_signals, [self.nfft])))
def power_to_db(self, S, amin=1e-10):
log_spec = 10.0 * math_util.log10(tf.maximum(amin, S))
log_spec -= 10.0 * math_util.log10(tf.maximum(amin, 1.0))
if self.top_db is not None:
if self.top_db < 0:
raise ValueError('top_db must be non-negative')
log_spec = tf.maximum(log_spec, tf.reduce_max(log_spec) - self.top_db)
return log_spec
def extract(self, signal: np.ndarray) -> np.ndarray:
signal = np.asfortranarray(signal)
features = self.tf_extract(tf.convert_to_tensor(signal, dtype=tf.float32))
return features.numpy()
def tf_extract(self, signal: tf.Tensor) -> tf.Tensor:
"""
Extract speech features from signals (for using in tflite)
Args:
signal: tf.Tensor with shape [None]
Returns:
features: tf.Tensor with shape [T, F, 1]
"""
if self.normalize_signal:
signal = tf_normalize_signal(signal)
signal = tf_preemphasis(signal, self.preemphasis)
if self.feature_type == "log_mel_spectrogram":
features = self.compute_log_mel_spectrogram(signal)
else:
raise ValueError("feature_type must be 'log_mel_spectrogram'")
features = tf.expand_dims(features, axis=-1)
if self.normalize_feature:
features = tf_normalize_audio_features(features, per_frame=self.normalize_per_frame)
return features
def compute_log_mel_spectrogram(self, signal):
spectrogram = self.stft(signal)
linear_to_weight_matrix = tf.signal.linear_to_mel_weight_matrix(
num_mel_bins=self.num_feature_bins,
num_spectrogram_bins=spectrogram.shape[-1],
sample_rate=self.sample_rate,
lower_edge_hertz=0.0, upper_edge_hertz=(self.sample_rate / 2)
)
mel_spectrogram = tf.tensordot(spectrogram, linear_to_weight_matrix, 1)
return self.power_to_db(mel_spectrogram)
| 9,943 | 36.104478 | 122 | py |
Squeezeformer | Squeezeformer-main/src/featurizers/text_featurizers.py | # Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import abc
import codecs
import unicodedata
from multiprocessing import cpu_count
import sentencepiece as sp
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tds
from ..utils import file_util
ENGLISH_CHARACTERS = [" ", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m",
"n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "'"]
class TextFeaturizer(metaclass=abc.ABCMeta):
def __init__(self):
self.scorer = None
self.blank = None
self.tokens2indices = {}
self.tokens = []
self.num_classes = None
self.max_length = 0
@property
def shape(self) -> list:
return [self.max_length if self.max_length > 0 else None]
@property
def prepand_shape(self) -> list:
return [self.max_length + 1 if self.max_length > 0 else None]
def update_length(self, length: int):
self.max_length = max(self.max_length, length)
def reset_length(self):
self.max_length = 0
def preprocess_text(self, text):
text = unicodedata.normalize("NFC", text.lower())
return text.strip("\n") # remove trailing newline
def add_scorer(self, scorer: any = None):
""" Add scorer to this instance """
self.scorer = scorer
def normalize_indices(self, indices: tf.Tensor) -> tf.Tensor:
"""
Remove -1 in indices by replacing them with blanks
Args:
indices (tf.Tensor): shape any
Returns:
tf.Tensor: normalized indices with shape same as indices
"""
with tf.name_scope("normalize_indices"):
minus_one = -1 * tf.ones_like(indices, dtype=tf.int32)
blank_like = self.blank * tf.ones_like(indices, dtype=tf.int32)
return tf.where(indices == minus_one, blank_like, indices)
def prepand_blank(self, text: tf.Tensor) -> tf.Tensor:
""" Prepand blank index for transducer models """
return tf.concat([[self.blank], text], axis=0)
@abc.abstractclassmethod
def extract(self, text):
raise NotImplementedError()
@abc.abstractclassmethod
def iextract(self, indices):
raise NotImplementedError()
@abc.abstractclassmethod
def indices2upoints(self, indices):
raise NotImplementedError()
class SentencePieceFeaturizer(TextFeaturizer):
"""
Extract text feature based on sentence piece package.
"""
UNK_TOKEN, UNK_TOKEN_ID = "<unk>", 1
BOS_TOKEN, BOS_TOKEN_ID = "<s>", 2
EOS_TOKEN, EOS_TOKEN_ID = "</s>", 3
PAD_TOKEN, PAD_TOKEN_ID = "<pad>", 0 # unused, by default
def __init__(self, decoder_config: dict, model=None):
super(SentencePieceFeaturizer, self).__init__()
self.vocabulary = decoder_config['vocabulary']
self.model = self.__load_model() if model is None else model
self.blank = 0 # treats blank as 0 (pad)
# vocab size
self.num_classes = self.model.get_piece_size()
self.__init_vocabulary()
def __load_model(self):
filename_prefix = os.path.splitext(self.vocabulary)[0]
processor = sp.SentencePieceProcessor()
processor.load(filename_prefix + ".model")
return processor
def __init_vocabulary(self):
self.tokens = []
for idx in range(1, self.num_classes):
self.tokens.append(self.model.decode_ids([idx]))
self.non_blank_tokens = self.tokens.copy()
self.tokens.insert(0, "")
self.upoints = tf.strings.unicode_decode(self.tokens, "UTF-8")
self.upoints = self.upoints.to_tensor() # [num_classes, max_subword_length]
@classmethod
def load_from_file(cls, decoder_config: dict, filename: str = None):
if filename is not None:
filename_prefix = os.path.splitext(file_util.preprocess_paths(filename))[0]
else:
filename_prefix = decoder_config.get("output_path_prefix", None)
processor = sp.SentencePieceProcessor()
processor.load(filename_prefix + ".model")
return cls(decoder_config, processor)
def extract(self, text: str) -> tf.Tensor:
"""
Convert string to a list of integers
# encode: text => id
sp.encode_as_pieces('This is a test') --> ['▁This', '▁is', '▁a', '▁t', 'est']
sp.encode_as_ids('This is a test') --> [209, 31, 9, 375, 586]
Args:
text: string (sequence of characters)
Returns:
sequence of ints in tf.Tensor
"""
text = self.preprocess_text(text)
text = text.strip() # remove trailing space
indices = self.model.encode_as_ids(text)
return tf.convert_to_tensor(indices, dtype=tf.int32)
def iextract(self, indices: tf.Tensor) -> tf.Tensor:
"""
Convert list of indices to string
# decode: id => text
sp.decode_pieces(['▁This', '▁is', '▁a', '▁t', 'est']) --> This is a test
sp.decode_ids([209, 31, 9, 375, 586]) --> This is a test
Args:
indices: tf.Tensor with dim [B, None]
Returns:
transcripts: tf.Tensor of dtype tf.string with dim [B]
"""
indices = self.normalize_indices(indices)
with tf.device("/CPU:0"): # string data is not supported on GPU
def decode(x):
if x[0] == self.blank: x = x[1:]
return self.model.decode_ids(x.tolist())
text = tf.map_fn(
lambda x: tf.numpy_function(decode, inp=[x], Tout=tf.string),
indices,
fn_output_signature=tf.TensorSpec([], dtype=tf.string)
)
return text
@tf.function(
input_signature=[
tf.TensorSpec([None], dtype=tf.int32)
]
)
def indices2upoints(self, indices: tf.Tensor) -> tf.Tensor:
"""
Transform Predicted Indices to Unicode Code Points (for using tflite)
Args:
indices: tf.Tensor of Classes in shape [None]
Returns:
unicode code points transcript with dtype tf.int32 and shape [None]
"""
with tf.name_scope("indices2upoints"):
indices = self.normalize_indices(indices)
upoints = tf.gather_nd(self.upoints, tf.expand_dims(indices, axis=-1))
return tf.gather_nd(upoints, tf.where(tf.not_equal(upoints, 0)))
| 6,995 | 34.51269 | 91 | py |
Squeezeformer | Squeezeformer-main/src/featurizers/__init__.py | 0 | 0 | 0 | py |
|
Squeezeformer | Squeezeformer-main/src/utils/file_util.py | # Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import yaml
import tempfile
import contextlib
from typing import Union, List
import tensorflow as tf
def load_yaml(path):
# Fix yaml numbers https://stackoverflow.com/a/30462009/11037553
loader = yaml.SafeLoader
loader.add_implicit_resolver(
u'tag:yaml.org,2002:float',
re.compile(u'''^(?:
[-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)?
|[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
|\\.[0-9_]+(?:[eE][-+][0-9]+)?
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]*
|[-+]?\\.(?:inf|Inf|INF)
|\\.(?:nan|NaN|NAN))$''', re.X),
list(u'-+0123456789.'))
with open(path, "r", encoding="utf-8") as file:
return yaml.load(file, Loader=loader)
def is_hdf5_filepath(filepath: str) -> bool:
return (filepath.endswith('.h5') or filepath.endswith('.keras') or filepath.endswith('.hdf5'))
def is_cloud_path(path: str) -> bool:
""" Check if the path is on cloud (which requires tf.io.gfile)
Args:
path (str): Path to directory or file
Returns:
bool: True if path is on cloud, False otherwise
"""
return bool(re.match(r"^[a-z]+://", path))
def preprocess_paths(paths: Union[List[str], str], isdir: bool = False) -> Union[List[str], str]:
""" Expand the path to the root "/" and makedirs
Args:
paths (Union[List, str]): A path or list of paths
Returns:
Union[List, str]: A processed path or list of paths, return None if it's not path
"""
if isinstance(paths, list):
paths = [path if is_cloud_path(path) else os.path.abspath(os.path.expanduser(path)) for path in paths]
for path in paths:
dirpath = path if isdir else os.path.dirname(path)
if not tf.io.gfile.exists(dirpath): tf.io.gfile.makedirs(dirpath)
return paths
if isinstance(paths, str):
paths = paths if is_cloud_path(paths) else os.path.abspath(os.path.expanduser(paths))
dirpath = paths if isdir else os.path.dirname(paths)
if not tf.io.gfile.exists(dirpath): tf.io.gfile.makedirs(dirpath)
return paths
return None
@contextlib.contextmanager
def save_file(filepath: str):
if is_cloud_path(filepath) and is_hdf5_filepath(filepath):
_, ext = os.path.splitext(filepath)
with tempfile.NamedTemporaryFile(suffix=ext) as tmp:
yield tmp.name
tf.io.gfile.copy(tmp.name, filepath, overwrite=True)
else:
yield filepath
@contextlib.contextmanager
def read_file(filepath: str):
if is_cloud_path(filepath) and is_hdf5_filepath(filepath):
_, ext = os.path.splitext(filepath)
with tempfile.NamedTemporaryFile(suffix=ext) as tmp:
tf.io.gfile.copy(filepath, tmp.name, overwrite=True)
yield tmp.name
else:
yield filepath
| 3,440 | 33.41 | 110 | py |
Squeezeformer | Squeezeformer-main/src/utils/layer_util.py |
# Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
def get_rnn(rnn_type: str):
assert rnn_type in ["lstm", "gru", "rnn"]
if rnn_type == "lstm": return tf.keras.layers.LSTM
if rnn_type == "gru": return tf.keras.layers.GRU
return tf.keras.layers.SimpleRNN
def get_conv(conv_type):
assert conv_type in ["conv1d", "conv2d"]
if conv_type == "conv1d": return tf.keras.layers.Conv1D
return tf.keras.layers.Conv2D
| 1,002 | 32.433333 | 74 | py |
Squeezeformer | Squeezeformer-main/src/utils/shape_util.py | # Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
def shape_list(x, out_type=tf.int32):
"""Deal with dynamic shape in tensorflow cleanly."""
static = x.shape.as_list()
dynamic = tf.shape(x, out_type=out_type)
return [dynamic[i] if s is None else s for i, s in enumerate(static)]
def get_shape_invariants(tensor):
shapes = shape_list(tensor)
return tf.TensorShape([i if isinstance(i, int) else None for i in shapes])
def get_float_spec(tensor):
shape = get_shape_invariants(tensor)
return tf.TensorSpec(shape, dtype=tf.float32)
| 1,129 | 33.242424 | 78 | py |
Squeezeformer | Squeezeformer-main/src/utils/math_util.py | # Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
import tensorflow as tf
from . import shape_util
def log10(x):
numerator = tf.math.log(x)
denominator = tf.math.log(tf.constant(10, dtype=numerator.dtype))
return numerator / denominator
def get_num_batches(nsamples, batch_size, drop_remainders=True):
if nsamples is None or batch_size is None: return None
if drop_remainders: return math.floor(float(nsamples) / float(batch_size))
return math.ceil(float(nsamples) / float(batch_size))
def nan_to_zero(input_tensor):
return tf.where(tf.math.is_nan(input_tensor), tf.zeros_like(input_tensor), input_tensor)
def bytes_to_string(array: np.ndarray, encoding: str = "utf-8"):
if array is None: return None
return [transcript.decode(encoding) for transcript in array]
def get_reduced_length(length, reduction_factor):
return tf.cast(tf.math.ceil(tf.divide(length, tf.cast(reduction_factor, dtype=length.dtype))), dtype=tf.int32)
def count_non_blank(tensor: tf.Tensor, blank: int or tf.Tensor = 0, axis=None):
return tf.reduce_sum(tf.where(tf.not_equal(tensor, blank), x=tf.ones_like(tensor), y=tf.zeros_like(tensor)), axis=axis)
def merge_two_last_dims(x):
b, _, f, c = shape_util.shape_list(x)
return tf.reshape(x, shape=[b, -1, f * c])
def merge_repeated(yseqs, blank=0):
result = tf.reshape(yseqs[0], [1])
U = shape_util.shape_list(yseqs)[0]
i = tf.constant(1, dtype=tf.int32)
def _cond(i, result, yseqs, U): return tf.less(i, U)
def _body(i, result, yseqs, U):
if yseqs[i] != result[-1]:
result = tf.concat([result, [yseqs[i]]], axis=-1)
return i + 1, result, yseqs, U
_, result, _, _ = tf.while_loop(
_cond,
_body,
loop_vars=[i, result, yseqs, U],
shape_invariants=(
tf.TensorShape([]),
tf.TensorShape([None]),
tf.TensorShape([None]),
tf.TensorShape([])
)
)
return tf.pad(result, [[U - shape_util.shape_list(result)[0], 0]], constant_values=blank)
def find_max_length_prediction_tfarray(tfarray: tf.TensorArray) -> tf.Tensor:
with tf.name_scope("find_max_length_prediction_tfarray"):
index = tf.constant(0, dtype=tf.int32)
total = tfarray.size()
max_length = tf.constant(0, dtype=tf.int32)
def condition(index, _): return tf.less(index, total)
def body(index, max_length):
prediction = tfarray.read(index)
length = tf.shape(prediction)[0]
max_length = tf.where(tf.greater(length, max_length), length, max_length)
return index + 1, max_length
index, max_length = tf.while_loop(condition, body, loop_vars=[index, max_length], swap_memory=False)
return max_length
def pad_prediction_tfarray(tfarray: tf.TensorArray, blank: int or tf.Tensor) -> tf.TensorArray:
with tf.name_scope("pad_prediction_tfarray"):
index = tf.constant(0, dtype=tf.int32)
total = tfarray.size()
max_length = find_max_length_prediction_tfarray(tfarray) + 1
def condition(index, _): return tf.less(index, total)
def body(index, tfarray):
prediction = tfarray.read(index)
prediction = tf.pad(
prediction, paddings=[[0, max_length - tf.shape(prediction)[0]]],
mode="CONSTANT", constant_values=blank
)
tfarray = tfarray.write(index, prediction)
return index + 1, tfarray
index, tfarray = tf.while_loop(condition, body, loop_vars=[index, tfarray], swap_memory=False)
return tfarray
| 4,208 | 33.785124 | 123 | py |
Squeezeformer | Squeezeformer-main/src/utils/data_util.py | # Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# tf.data.Dataset does not work well for namedtuple so we are using dict
import tensorflow as tf
def create_inputs(
inputs: tf.Tensor,
inputs_length: tf.Tensor,
predictions: tf.Tensor = None,
predictions_length: tf.Tensor = None,
) -> dict:
data = {
"inputs": inputs,
"inputs_length": inputs_length,
}
if predictions is not None:
data["predictions"] = predictions
if predictions_length is not None:
data["predictions_length"] = predictions_length
return data
def create_logits(logits: tf.Tensor, logits_length: tf.Tensor) -> dict:
return {
"logits": logits,
"logits_length": logits_length
}
def create_labels(labels: tf.Tensor, labels_length: tf.Tensor) -> dict:
return {
"labels": labels,
"labels_length": labels_length,
}
| 1,437 | 28.346939 | 74 | py |
Squeezeformer | Squeezeformer-main/src/utils/training_utils.py | import tensorflow as tf
from tensorflow.python.keras import backend
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import ops
from tensorflow.python.eager import context
from tensorflow.python.util import nest
from tensorflow.python.ops import variables
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.ops import array_ops
from tensorflow.python.distribute import collective_all_reduce_strategy
from tensorflow.python.distribute import values as ds_values
def _minimum_control_deps(outputs):
"""Returns the minimum control dependencies to ensure step succeeded."""
if context.executing_eagerly():
return [] # Control dependencies not needed.
outputs = nest.flatten(outputs, expand_composites=True)
for out in outputs:
# Variables can't be control dependencies.
if not isinstance(out, variables.Variable):
return [out] # Return first Tensor or Op from outputs.
return [] # No viable Tensor or Op to use for control deps.
def reduce_per_replica(values, strategy):
"""Reduce PerReplica objects.
Args:
values: Structure of `PerReplica` objects or `Tensor`s. `Tensor`s are
returned as-is.
strategy: `tf.distribute.Strategy` object.
reduction: One of 'first', 'concat'.
Returns:
Structure of `Tensor`s.
"""
def _reduce(v):
"""Reduce a single `PerReplica` object."""
if _collective_all_reduce_multi_worker(strategy):
return _multi_worker_concat(v, strategy)
if not isinstance(v, ds_values.PerReplica):
return v
if _is_tpu_multi_host(strategy):
return _tpu_multi_host_concat(v, strategy)
else:
return concat(strategy.unwrap(v))
return nest.map_structure(_reduce, values)
def concat(tensors, axis=0):
if len(tensors[0].shape) == 0:
return tf.math.add_n(tensors)
"""Concats `tensor`s along `axis`."""
if isinstance(tensors[0], sparse_tensor.SparseTensor):
return sparse_ops.sparse_concat_v2(axis=axis, sp_inputs=tensors)
return array_ops.concat(tensors, axis=axis)
def _collective_all_reduce_multi_worker(strategy):
return (isinstance(strategy,
collective_all_reduce_strategy.CollectiveAllReduceStrategy)
) and strategy.extended._in_multi_worker_mode() # pylint: disable=protected-access
def _is_scalar(x):
return isinstance(x, (ops.Tensor, variables.Variable)) and x.shape.rank == 0
def write_scalar_summaries(logs, step):
for name, value in logs.items():
if _is_scalar(value):
summary_ops_v2.scalar('batch_' + name, value, step=step)
def _is_tpu_multi_host(strategy):
return (backend.is_tpu_strategy(strategy) and
strategy.extended.num_hosts > 1)
def _tpu_multi_host_concat(v, strategy):
"""Correctly order TPU PerReplica objects."""
replicas = strategy.unwrap(v)
# When distributed datasets are created from Tensors / NumPy,
# TPUStrategy.experimental_distribute_dataset shards data in
# (Replica, Host) order, and TPUStrategy.unwrap returns it in
# (Host, Replica) order.
# TODO(b/150317897): Figure out long-term plan here.
num_replicas_per_host = strategy.extended.num_replicas_per_host
ordered_replicas = []
for replica_id in range(num_replicas_per_host):
ordered_replicas += replicas[replica_id::num_replicas_per_host]
return concat(ordered_replicas)
| 3,574 | 37.44086 | 97 | py |
Squeezeformer | Squeezeformer-main/src/utils/metric_util.py | # Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
from nltk.metrics import distance
import tensorflow as tf
from . import math_util
def execute_wer(decode, target):
decode = math_util.bytes_to_string(decode)
target = math_util.bytes_to_string(target)
dis = 0.0
length = 0.0
for dec, tar in zip(decode, target):
words = set(dec.split() + tar.split())
word2char = dict(zip(words, range(len(words))))
new_decode = [chr(word2char[w]) for w in dec.split()]
new_target = [chr(word2char[w]) for w in tar.split()]
dis += distance.edit_distance(''.join(new_decode), ''.join(new_target))
length += len(tar.split())
return tf.convert_to_tensor(dis, tf.float32), tf.convert_to_tensor(length, tf.float32)
def wer(decode: tf.Tensor, target: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
"""Word Error Rate
Args:
decode (np.ndarray): array of prediction texts
target (np.ndarray): array of groundtruth texts
Returns:
tuple: a tuple of tf.Tensor of (edit distances, number of words) of each text
"""
return tf.numpy_function(execute_wer, inp=[decode, target], Tout=[tf.float32, tf.float32])
def execute_cer(decode, target):
decode = math_util.bytes_to_string(decode)
target = math_util.bytes_to_string(target)
dis = 0
length = 0
for dec, tar in zip(decode, target):
dis += distance.edit_distance(dec, tar)
length += len(tar)
return tf.convert_to_tensor(dis, tf.float32), tf.convert_to_tensor(length, tf.float32)
def cer(decode: tf.Tensor, target: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
"""Character Error Rate
Args:
decode (np.ndarray): array of prediction texts
target (np.ndarray): array of groundtruth texts
Returns:
tuple: a tuple of tf.Tensor of (edit distances, number of characters) of each text
"""
return tf.numpy_function(execute_cer, inp=[decode, target], Tout=[tf.float32, tf.float32])
def tf_cer(decode: tf.Tensor, target: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
"""Tensorflwo Charactor Error rate
Args:
decoder (tf.Tensor): tensor shape [B]
target (tf.Tensor): tensor shape [B]
Returns:
tuple: a tuple of tf.Tensor of (edit distances, number of characters) of each text
"""
decode = tf.strings.bytes_split(decode) # [B, N]
target = tf.strings.bytes_split(target) # [B, M]
distances = tf.edit_distance(decode.to_sparse(), target.to_sparse(), normalize=False) # [B]
lengths = tf.cast(target.row_lengths(axis=1), dtype=tf.float32) # [B]
return tf.reduce_sum(distances), tf.reduce_sum(lengths)
| 3,240 | 34.615385 | 96 | py |
Squeezeformer | Squeezeformer-main/src/utils/feature_util.py | # Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
def float_feature(list_of_floats):
return tf.train.Feature(float_list=tf.train.FloatList(value=list_of_floats))
def int64_feature(list_of_ints):
return tf.train.Feature(int64_list=tf.train.Int64List(value=list_of_ints))
def bytestring_feature(list_of_bytestrings):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=list_of_bytestrings))
| 979 | 34 | 85 | py |
Squeezeformer | Squeezeformer-main/src/utils/app_util.py | # Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tqdm import tqdm
import tensorflow as tf
from .metric_util import wer, cer
from ..metrics.error_rates import ErrorRate
from .file_util import read_file
logger = tf.get_logger()
def evaluate_results(filepath: str):
logger.info(f"Evaluating result from {filepath} ...")
metrics = {
"greedy_wer": ErrorRate(wer, name="greedy_wer", dtype=tf.float32),
"greedy_cer": ErrorRate(cer, name="greedy_cer", dtype=tf.float32),
"beamsearch_wer": ErrorRate(wer, name="beamsearch_wer", dtype=tf.float32),
"beamsearch_cer": ErrorRate(cer, name="beamsearch_cer", dtype=tf.float32)
}
with read_file(filepath) as path:
with open(path, "r", encoding="utf-8") as openfile:
lines = openfile.read().splitlines()
lines = lines[1:] # skip header
for eachline in tqdm(lines):
_, _, groundtruth, greedy, beamsearch = eachline.split("\t")
groundtruth = tf.convert_to_tensor([groundtruth], dtype=tf.string)
greedy = tf.convert_to_tensor([greedy], dtype=tf.string)
beamsearch = tf.convert_to_tensor([beamsearch], dtype=tf.string)
metrics["greedy_wer"].update_state(decode=greedy, target=groundtruth)
metrics["greedy_cer"].update_state(decode=greedy, target=groundtruth)
metrics["beamsearch_wer"].update_state(decode=beamsearch, target=groundtruth)
metrics["beamsearch_cer"].update_state(decode=beamsearch, target=groundtruth)
for key, value in metrics.items():
logger.info(f"{key}: {value.result().numpy()}")
| 2,138 | 43.5625 | 85 | py |
Squeezeformer | Squeezeformer-main/src/utils/__init__.py | 0 | 0 | 0 | py |
|
Squeezeformer | Squeezeformer-main/src/utils/env_util.py | # Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Union, List
import warnings
import tensorflow as tf
logger = tf.get_logger()
def setup_environment():
""" Setting tensorflow running environment """
warnings.simplefilter("ignore")
logger.setLevel(logging.INFO)
return logger
def setup_devices(devices: List[int], cpu: bool = False):
"""Setting visible devices
Args:
devices (list): list of visible devices' indices
"""
if cpu:
cpus = tf.config.list_physical_devices("CPU")
tf.config.set_visible_devices(cpus, "CPU")
tf.config.set_visible_devices([], "GPU")
logger.info(f"Run on {len(cpus)} Physical CPUs")
else:
gpus = tf.config.list_physical_devices("GPU")
if gpus:
visible_gpus = [gpus[i] for i in devices]
tf.config.set_visible_devices(visible_gpus, "GPU")
logger.info(f"Run on {len(visible_gpus)} Physical GPUs")
def setup_tpu(tpu_address=None):
if tpu_address is None:
resolver = tf.distribute.cluster_resolver.TPUClusterResolver()
else:
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu="grpc://" + tpu_address)
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
logger.info(f"All TPUs: {tf.config.list_logical_devices('TPU')}")
return tf.distribute.experimental.TPUStrategy(resolver)
def setup_strategy(devices: List[int], tpu_address: str = None):
"""Setting mirrored strategy for training
Args:
devices (list): list of visible devices' indices
tpu_address (str): an optional custom tpu address
Returns:
tf.distribute.Strategy: TPUStrategy for training on tpus or MirroredStrategy for training on gpus
"""
try:
return setup_tpu(tpu_address)
except (ValueError, tf.errors.NotFoundError) as e:
logger.warn(e)
pass
setup_devices(devices)
return tf.distribute.MirroredStrategy()
def has_devices(devices: Union[List[str], str]):
if isinstance(devices, list):
return all([len(tf.config.list_logical_devices(d)) != 0 for d in devices])
return len(tf.config.list_logical_devices(devices)) != 0
| 2,814 | 32.915663 | 105 | py |
Squeezeformer | Squeezeformer-main/src/utils/logging_util.py | import wandb
import tensorflow as tf
import numpy as np
from numpy import linalg as la
from . import env_util
logger = env_util.setup_environment()
class StepLossMetric(tf.keras.metrics.Metric):
def __init__(self, name='step_loss', **kwargs):
super(StepLossMetric, self).__init__(name=name, **kwargs)
self.loss = tf.zeros(())
def update_state(self, loss):
self.loss = loss
def result(self):
return self.loss
def reset_states(self):
self.loss = tf.zeros(())
class LoggingCallback(tf.keras.callbacks.Callback):
def __init__(
self,
optimizer,
model,
):
super(LoggingCallback, self).__init__()
self.optimizer = optimizer
self.model = model
def on_epoch_end(self, epoch, logs=None):
logger.info("saving checkpoint")
iterations = self.optimizer.iterations
lr = self.optimizer.learning_rate(iterations)
logger.info(f"[LR Logger] Epoch: {epoch}, lr: {lr}")
wandb.log({"epoch": epoch, "lr": lr, "iterations": iterations.numpy()})
| 1,090 | 24.97619 | 79 | py |
Squeezeformer | Squeezeformer-main/src/losses/ctc_loss.py | # Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
class CtcLoss(tf.keras.losses.Loss):
def __init__(self, blank=0, name=None):
super(CtcLoss, self).__init__(reduction=tf.keras.losses.Reduction.NONE, name=name)
self.blank = blank
def call(self, y_true, y_pred):
loss = ctc_loss(
y_pred=y_pred["logits"],
input_length=y_pred["logits_length"],
y_true=y_true["labels"],
label_length=y_true["labels_length"],
blank=self.blank,
name=self.name
)
return tf.nn.compute_average_loss(loss)
@tf.function
def ctc_loss(y_true, y_pred, input_length, label_length, blank, name=None):
return tf.nn.ctc_loss(
labels=tf.cast(y_true, tf.int32),
logit_length=tf.cast(input_length, tf.int32),
logits=tf.cast(y_pred, tf.float32),
label_length=tf.cast(label_length, tf.int32),
logits_time_major=False,
blank_index=blank,
name=name
)
| 1,560 | 34.477273 | 90 | py |
Squeezeformer | Squeezeformer-main/src/losses/__init__.py | 0 | 0 | 0 | py |
|
Squeezeformer | Squeezeformer-main/scripts/create_librispeech_trans.py | # Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import glob
import argparse
import librosa
from tqdm.auto import tqdm
import unicodedata
from src.utils.file_util import preprocess_paths
parser = argparse.ArgumentParser(prog="Setup LibriSpeech Transcripts")
parser.add_argument("--dir", "-d", type=str, default=None, help="Directory of dataset")
parser.add_argument("output", type=str, default=None, help="The output .tsv transcript file path")
args = parser.parse_args()
assert args.dir and args.output
args.dir = preprocess_paths(args.dir, isdir=True)
args.output = preprocess_paths(args.output)
transcripts = []
text_files = glob.glob(os.path.join(args.dir, "**", "*.txt"), recursive=True)
for text_file in tqdm(text_files, desc="[Loading]"):
current_dir = os.path.dirname(text_file)
with open(text_file, "r", encoding="utf-8") as txt:
lines = txt.read().splitlines()
for line in lines:
line = line.split(" ", maxsplit=1)
audio_file = os.path.join(current_dir, line[0] + ".flac")
y, sr = librosa.load(audio_file, sr=None)
duration = librosa.get_duration(y, sr)
text = unicodedata.normalize("NFC", line[1].lower())
transcripts.append(f"{audio_file}\t{duration}\t{text}\n")
with open(args.output, "w", encoding="utf-8") as out:
out.write("PATH\tDURATION\tTRANSCRIPT\n")
for line in tqdm(transcripts, desc="[Writing]"):
out.write(line)
| 1,982 | 33.789474 | 98 | py |
Squeezeformer | Squeezeformer-main/scripts/create_librispeech_trans_all.py | import os
import csv
import subprocess
import argparse
def arg_parse():
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, default='all', choices=['all', 'test-only'])
parser.add_argument('--dataset_dir', type=str, required=True)
parser.add_argument('--output_dir', type=str, required=True)
args = parser.parse_args()
return args
args = arg_parse()
for n in ['dev', 'test']:
for m in ['clean', 'other']:
outname = f'{n}_{m}.tsv'
inname = f'{n}-{m}'
print(f'processing {inname}')
subprocess_args = [
'python', 'create_librispeech_trans.py', os.path.join(args.output_dir, outname),
'--dir', os.path.join(args.dataset_dir, inname)
]
subprocess.call(subprocess_args)
if args.mode == 'all':
train_set_names = [
('train-clean-100', 'train_clean_100.tsv'),
('train-clean-360', 'train_clean_360.tsv'),
('train-other-500', 'train_other_500.tsv'),
]
for inname, outname in train_set_names:
print(f'processing {inname}')
subprocess_args = [
'python', 'create_librispeech_trans.py', os.path.join(args.output_dir, outname),
'--dir', os.path.join(args.dataset_dir, inname)
]
subprocess.call(subprocess_args)
lines = ["PATH\tDURATION\tTRANSCRIPT\n"]
tsv_names = [x[-1] for x in train_set_names]
for tsv_name in tsv_names:
infile = os.path.join(args.output_dir, tsv_name)
with open(infile) as file:
tsv_file = csv.reader(file, delimiter="\t")
for i, line in enumerate(tsv_file):
if i == 0: continue
audio_file, duration, text = line
lines.append(f"{audio_file}\t{duration}\t{text}\n")
output_file = os.path.join(args.output_dir, 'train_all.tsv')
with open(output_file, "w", encoding="utf-8") as out:
for line in lines:
out.write(line)
| 1,978 | 30.919355 | 92 | py |
asari | asari-master/asari/api.py | import pathlib
import onnxruntime as rt
from asari.preprocess import tokenize
class Sonar:
def __init__(self):
pipeline_file = pathlib.Path(__file__).parent / "data" / "pipeline.onnx"
self.sess = rt.InferenceSession(str(pipeline_file))
self.input_name = self.sess.get_inputs()[0].name
self.prob_name = self.sess.get_outputs()[1].name
def ping(self, text: str):
tokenized = tokenize(text)
proba = self.sess.run([self.prob_name], {self.input_name: [tokenized]})[0][0]
res = {
"text": text,
"top_class": max(proba, key=lambda k: proba[k]),
"classes": [
{"class_name": class_name, "confidence": confidence} for class_name, confidence in proba.items()
],
}
return res
| 812 | 30.269231 | 112 | py |
asari | asari-master/asari/__init__.py | 0 | 0 | 0 | py |
|
asari | asari-master/asari/train.py | """
Train a baseline model.
"""
import argparse
import json
import pathlib
import numpy as np
from skl2onnx import to_onnx
from sklearn.calibration import CalibratedClassifierCV
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from asari.api import Sonar
from asari.preprocess import tokenize
def load_jsonl(filename):
texts, labels = [], []
with open(filename, encoding="utf-8") as f:
for line in f:
item = json.loads(line)
texts.append(item["text"])
labels.append(item["label"])
return texts, labels
def main(args):
print("Loading dataset...")
X, y = load_jsonl(args.dataset)
X = [tokenize(x) for x in X]
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=args.test_size, random_state=42)
print("Fitting...")
pipe = Pipeline(
[
("vectorizer", TfidfVectorizer(ngram_range=(1, 2))),
("classifier", CalibratedClassifierCV(LinearSVC())),
]
)
pipe.fit(x_train, y_train)
print("Saving...")
seps = {
TfidfVectorizer: {
"separators": [
" ",
],
}
}
onx = to_onnx(pipe, np.array(x_train)[1:], options=seps)
with open(args.pipeline, "wb") as f:
f.write(onx.SerializeToString())
print("Predicting...")
y_pred = pipe.predict(x_test)
print(classification_report(y_test, y_pred, digits=4))
print(pipe.predict_proba([tokenize("広告多すぎる♡")]))
sonar = Sonar()
y_pred = [sonar.ping(x)["top_class"] for x in x_test]
print(classification_report(y_test, y_pred, digits=4))
if __name__ == "__main__":
DATA_DIR = pathlib.Path(__file__).parent.parent / "data"
SAVE_DIR = pathlib.Path(__file__).parent / "data"
parser = argparse.ArgumentParser(description="Training a classifier")
parser.add_argument("--dataset", default=DATA_DIR / "dataset.jsonl", help="dataset")
parser.add_argument("--pipeline", default=SAVE_DIR / "pipeline.onnx", help="pipeline file")
parser.add_argument("--test_size", type=float, default=0.1, help="test data size")
args = parser.parse_args()
main(args)
| 2,340 | 29.402597 | 104 | py |
asari | asari-master/asari/preprocess.py | from janome.tokenizer import Tokenizer
t = Tokenizer(wakati=True)
def tokenize(text: str) -> str:
return " ".join(t.tokenize(text))
| 139 | 16.5 | 38 | py |
asari | asari-master/tests/test_api.py | import unittest
from pprint import pprint
from asari.api import Sonar
class TestAPI(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.text = "広告多すぎる♡"
def test_ping(self):
sonar = Sonar()
res = sonar.ping(self.text)
pprint(res)
self.assertIn("text", res)
self.assertIn("top_class", res)
self.assertIn("classes", res)
self.assertIsInstance(res["text"], str)
self.assertIsInstance(res["top_class"], str)
self.assertIsInstance(res["classes"], list)
for d in res["classes"]:
self.assertIn("class_name", d)
self.assertIn("confidence", d)
self.assertIsInstance(d["class_name"], str)
self.assertIsInstance(d["confidence"], float)
| 783 | 28.037037 | 57 | py |
asari | asari-master/tests/__init__.py | 0 | 0 | 0 | py |
|
pyjsd | pyjsd-main/distributions.py | # -*- coding: utf-8 -*-
import numpy as __np
import scipy.stats as __stats
# distributions for use in JSD function as the theoretical (assumed) distributions
norm={
"cdf": lambda params,x: __stats.norm.cdf(x,loc=params[0],scale=params[1]),
"likelihood": lambda params,data: -__np.sum(__stats.norm.logpdf(data,loc=params[0],scale=params[1])),
"params": [1,1],
}
logn={
"cdf": lambda params,x: __stats.lognorm.cdf(x,params[1],loc=0,scale=__np.exp(params[0])),
"likelihood": lambda params,data: -__np.sum(__stats.lognorm.logpdf(data,params[1],loc=0,scale=__np.exp(params[0]))),
"params": [1,1],
}
gamma={
"cdf": lambda params,x: __stats.gamma.cdf(x,params[0],scale=params[1]),
"likelihood": lambda params,data: -__np.sum(__stats.gamma.logpdf(data,params[0],scale=params[1])),
"params": [3,3],
}
weibull={
"cdf": lambda params,x: __stats.weibull_min.cdf(x,params[0],scale=params[1]),
"likelihood": lambda params,data: -__np.sum(__stats.weibull_min.logpdf(data,params[0],scale=params[1])),
"params": [3,3],
}
qgauss={
"cdf": lambda params,x: __stats.t.cdf(x,params[0]-1,scale=params[1]/__np.sqrt(params[0]-1)),
"likelihood": lambda params,data: -__np.sum(__stats.t.logpdf(data,params[0]-1,scale=params[1]/__np.sqrt(params[0]-1))),
"params": [4,1],
}
beta={
"cdf": lambda params,x: __stats.beta.cdf(x,params[0],params[1]),
"likelihood": lambda params,data: -__np.sum(__stats.beta.logpdf(data,params[0],params[1])),
"params": [3,3],
}
exp={
"cdf": lambda params,x: __stats.expon.cdf(x,scale=params[0]),
"likelihood": lambda params,data: -__np.sum(__stats.expon.logpdf(data,scale=params[0])),
"params": [1],
}
pareto={
"cdf": lambda params,x: __stats.pareto.cdf(x,params[0]),
"likelihood": lambda params,data: -__np.sum(__stats.pareto.logpdf(data,params[0])),
"params": [1],
}
| 1,867 | 38.744681 | 123 | py |
pyjsd | pyjsd-main/jsd.py | # -*- coding: utf-8 -*-
import numpy as __np
from scipy.optimize import minimize as __minimize
# Estimation of the empirical cdf
def __EmpiricalCDF(bins,data):
empiricalHistogram=__np.histogram(data,bins=bins)[0]
empiricalCH=__np.cumsum(empiricalHistogram)
return empiricalCH/empiricalCH[-1]
# Maximum likelihood estimation of the parameters
def __MLE(distLike,distParams,data):
return __minimize(distLike,distParams,args=(data),
method="nelder-mead").x
# estimate JSD of a data given assumed distribution
def __JSD(data,empiricalDist,theorDist,returnParams=True):
# run MLE
distParams=__MLE(theorDist["likelihood"],theorDist["params"],data)
# setup empirical and theoretical (assumed) distribution
cdfBins=__np.linspace(empiricalDist["start"],
empiricalDist["stop"],
num=empiricalDist["bins"])
cdf1=__EmpiricalCDF(cdfBins,data)
cdf2=theorDist["cdf"](distParams,cdfBins[1:])
# estimate JSD
mcdf=0.5*(cdf1+cdf2)
with __np.errstate(divide="ignore",invalid="ignore"):
term1=cdf1*__np.log(cdf1/mcdf)
term2=cdf2*__np.log(cdf2/mcdf)
# 0*log(x)=0 (even if x is zero)
term1[cdf1==0]=0
term2[cdf2==0]=0
normalization=__np.sum(cdf1)+__np.sum(cdf2)
jsd=__np.sqrt(__np.sum(term1+term2)/(normalization*__np.log(2)))
if returnParams:
return jsd,distParams
return jsd
# estimate assumed distribution parameters, JSD score, JSD confidence intervals
def JSD(data,empiricalDist,theorDist,bootstrap):
# estimate JSD of the original data
jsdEstimate,distParams=__JSD(data,empiricalDist,theorDist)
# estimate confidence interval of the JSD using bootstrap methods
jsdConfidence=None
if bootstrap["iterations"]>0:
if bootstrap["blockSize"]<=1:
# ordinary bootstrap
tmpJSD=[]
for rep in range(bootstrap["iterations"]):
resample=__np.random.choice(data,size=len(data))
tmpJSD+=[__JSD(resample,empiricalDist,theorDist,returnParams=False),]
jsdConfidence=__np.percentile(tmpJSD,bootstrap["percentiles"])
else:
# moving block bootstrap
origLen=len(data)
data=__np.append(data,data[:bootstrap["blockSize"]-1])
getBlocks=origLen//bootstrap["blockSize"]+1
tmpJSD=[]
for rep in range(bootstrap["iterations"]):
selectedBlocks=__np.random.choice(range(origLen),size=getBlocks)
resample=[data[sb:sb+bootstrap["blockSize"]] for sb in selectedBlocks]
resample=resample[:origLen]
tmpJSD+=[__JSD(resample,empiricalDist,theorDist,returnParams=False),]
jsdConfidence=__np.percentile(tmpJSD,bootstrap["percentiles"])
pass
return {
"parameterEstimates": distParams,
"jsdEstimate": jsdEstimate,
"jsdConfidence": jsdConfidence,
}
| 2,989 | 39.958904 | 86 | py |
pyjsd | pyjsd-main/__init__.py | # -*- coding: utf-8 -*-
from .jsd import JSD
__all__=["JSD"]
| 63 | 9.666667 | 23 | py |
ReBATE | ReBATE-master/setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
def calculate_version():
initpy = open('rebate/_version.py').read().split('\n')
version = list(filter(lambda x: '__version__' in x, initpy))[0].split('\'')[1]
return version
package_version = calculate_version()
setup(
name='ReBATE',
version=package_version,
author='Pete Schmitt, and Ryan J. Urbanowicz',
author_email='[email protected]',
packages=find_packages(),
url='https://github.com/EpistasisLab/ReBATE',
license='License :: OSI Approved :: MIT License',
description=('Relief-based feature selection algorithms'),
long_description='''
A Cython optimized Python implementation of ReBATE, a suite of Relief-based feature selection algorithms.
Contact
=============
If you have any questions or comments about ReBATE, please feel free to contact us via e-mail: [email protected]
This project is hosted at https://github.com/EpistasisLab/ReBATE
''',
zip_safe=True,
install_requires=['numpy', 'scipy', 'scikit-learn'],
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Utilities'
],
keywords=['data mining', 'feature selection', 'feature importance', 'machine learning', 'data analysis', 'data engineering', 'data science'],
include_package_data=True,
)
| 1,792 | 36.354167 | 145 | py |
ReBATE | ReBATE-master/rebate/setup_surf.py | """
Copyright (c) 2016 Peter R. Schmitt and Ryan J. Urbanowicz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
exts = [Extension("surf", ["rebate/SURF.pyx"],)]
setup(
cmdclass = {'build_ext': build_ext},
ext_modules = exts,
)
| 1,323 | 40.375 | 74 | py |
ReBATE | ReBATE-master/rebate/Common.py | """
Copyright (c) 2016 Peter R. Schmitt and Ryan J. Urbanowicz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import numpy as np
import math
import pandas as pd
import sys
# Thu Apr 6 13:15:17 CDT 2017
###############################################################################
def getVariables(header, x, y, options):
"""Get all the needed variables into a Dictionary
More added in overallDataType()"""
pname = options['phenotypename']
var = {'NumAttributes' : len(header),
'phenoTypeList' : list(set(y)) }
if(len(var['phenoTypeList']) <= options['discretelimit']):
var['discretePhenotype'] = True
var['phenSD'] = 0
else:
var['discretePhenotype'] = False
var['phenSD'] = np.std(y, ddof=1)
var['discreteLimit'] = options['discretelimit']
var['labelMissingData'] = options['missingdata']
var['phenoTypeName'] = pname
#var['phenoTypeLoc'] = options['classloc']
var['numNeighbors'] = options['neighbors']
var['mdcnt'] = np.isnan(x).sum()
var['datalen'] = len(x)
return var
###############################################################################
def getAttributeInfo(header, x, var, options):
"""Get attribute as tuple into Dictionary"""
attr = dict()
c = d = 0
limit = options['discretelimit']
w = x.transpose()
#for h in header:
for idx in range(len(w)):
h = header[idx]
z = w[idx]
z = z[np.logical_not(np.isnan(z))] # remove missing data before
zulen = len(np.unique(z)) # calculating unique set
if(zulen <= limit):
attr[h] = ('discrete', 0, 0, 0, 0)
d += 1
else:
mx = np.max(z)
mn = np.min(z)
sd = np.std(z)
attr[h] = ('continuous', mx, mn, mx - mn, sd)
c += 1
overallDataType(attr,var,options) # add datatype of data and endpoints
var['dpct'] = (float(d) / (d + c) * 100, d)
var['cpct'] = (float(c) / (d + c) * 100, c)
return attr
###############################################################################
# Is the data entirely discrete, continuous, or mixed?
# Is the class type discrete, continuous or multiclass?
# This will help with future directions. Adding this to the variables
# dictionary. This is called from within getAttributeInfo()
def overallDataType(attr, var, options):
""" adds overall datatype of the data and class to var dictionary """
D = False; C = False # set tmp booleons
for key in attr.keys():
if(key == 'dataType' or key == 'phenoType'): continue
if(attr[key][0] == 'discrete'):
D = True
if(attr[key][0] == 'continuous'):
C = True
if(var['discretePhenotype'] and len(var['phenoTypeList']) > 2):
pheno = 'multiclass'
elif(var['discretePhenotype']):
pheno = 'binary'
else:
pheno = 'continuous'
if(D and C):
dataType = 'mixed'
elif(D and not C):
dataType = 'discrete'
elif(C and not D):
dataType = 'continuous'
var['dataType'] = dataType
var['classType'] = pheno
###############################################################################
def getDistances(x, attr, var, cidx, didx, cheader):
""" This creates the distance array for only discrete or continuous data
with no missing data """
from scipy.spatial.distance import pdist, squareform
#--------------------------------------------------------------------------
def pre_normalize(x):
idx = 0
for i in cheader:
cmin = attr[i][2]
diff = attr[i][3]
x[:,idx] -= cmin
x[:,idx] /= diff
idx += 1
return x
#--------------------------------------------------------------------------
dtype = var['dataType']
numattr = var['NumAttributes']
if(dtype == 'discrete'):
return squareform(pdist(x,metric='hamming'))
if(dtype == 'mixed'):
d_dist = squareform(pdist(x[:,didx],metric='hamming'))
xc = pre_normalize(x[:,cidx])
c_dist = squareform(pdist(xc,metric='cityblock'))
return np.add(d_dist, c_dist) / numattr
else: #(dtype == 'continuous'):
return squareform(pdist(pre_normalize(x),metric='cityblock'))
###############################################################################
# return mask for discrete(0)/continuous(1) attributes and their indices
# return array of max/min diffs of attributes.
# added for cython routines
def dtypeArray(header, attr, var):
import numpy as np
attrtype = []
attrdiff = []
pname = var['phenoTypeName']
for key in header:
#if(key == pname): continue
if(attr[key][0] == 'continuous'):
attrtype.append(1)
else:
attrtype.append(0)
attrdiff.append(attr[key][3]) # build array of max-min diffs
attrtype = np.array(attrtype)
cidx = np.where(attrtype == 1)[0] # grab indices for split_data()
cidx = np.ascontiguousarray(cidx, dtype=np.int32)
didx = np.where(attrtype == 0)[0] # where returns a tuple
didx = np.ascontiguousarray(didx, dtype=np.int32)
attrdiff = np.array(attrdiff)
attrdiff = np.ascontiguousarray(attrdiff, dtype=np.double)
return attrdiff, cidx, didx
###############################################################################
def printf(format, *args):
sys.stdout.write(format % args)
| 6,508 | 34.375 | 79 | py |
ReBATE | ReBATE-master/rebate/mmDistance.py | # Wed Aug 24 14:51:56 EDT 2016
"""
Copyright (c) 2016 Peter R. Schmitt and Ryan J. Urbanowicz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
import numpy as np
from numpy import isnan, where, append, unique, delete, empty
###############################################################################
def getDistances(xc, xd, var, cdiffs):
""" This creates a distance array for mixed types of data with or
without missing data """
distArray = []
datalen = var['datalen']
missing = int(var['mdcnt'])
# get indices of missing data per record
if(missing > 0):
cindices = list()
dindices = list()
for i in range(datalen):
cindices.append(where(isnan(xc[i]))[0])
dindices.append(where(isnan(xd[i]))[0])
for index in range(datalen):
if(missing > 0):
row = getrow_missing(xc, xd, cdiffs, index, cindices, dindices)
else:
row = getrow_mixed(xc, xd, cdiffs, index)
row = list(row)
distArray.append(row)
return distArray
###############################################################################
def getrow_missing(xc, xd, cdiffs, index, cindices, dindices):
row = empty(0,dtype=np.double)
cinst1 = xc[index]
dinst1 = xd[index]
can = cindices[index]
dan = dindices[index]
for j in range(index):
dist = 0
dinst2 = xd[j]
cinst2 = xc[j]
# continuous
cbn = cindices[j]
idx = unique(append(can,cbn)) # create unique list
c1 = delete(cinst1,idx) # remove elements by idx
c2 = delete(cinst2,idx)
cdf = delete(cdiffs,idx)
# discrete
dbn = dindices[j]
idx = unique(append(dan,dbn))
d1 = delete(dinst1,idx)
d2 = delete(dinst2,idx)
# discrete first
dist += len(d1[d1 != d2])
# now continuous
dist += np.sum(np.absolute(np.subtract(c1,c2)) / cdf)
row = append(row,dist)
return row
##############################################################################
# if data is clean this will not be used
def getrow_mixed(xc, xd, cdiffs, index):
row = empty(0,dtype=np.double)
d1 = xd[index]
c1 = xc[index]
for j in range(index):
dist = 0
d2 = xd[j]
c2 = xc[j]
# discrete first
dist += len(d1[d1 != d2])
# now continuous
dist += np.sum(np.absolute(np.subtract(c1,c2)) / cdiffs)
row = append(row,dist)
return row
##############################################################################
| 3,628 | 31.401786 | 79 | py |
ReBATE | ReBATE-master/rebate/_version.py | # -*- coding: utf-8 -*-
"""
scikit-rebate was primarily developed at the University of Pennsylvania by:
- Randal S. Olson ([email protected])
- Pete Schmitt ([email protected])
- Ryan J. Urbanowicz ([email protected])
- Weixuan Fu ([email protected])
- and many more generous open source contributors
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
__version__ = '0.2'
| 1,375 | 48.142857 | 103 | py |
ReBATE | ReBATE-master/rebate/IO.py | # Thu Jul 22 14:41 EDT 2016
"""
Copyright (c) 2016 Peter R. Schmitt and Ryan J. Urbanowicz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
###############################################################################
# Functions: getArguments, read_data, create_subset, createScoresFile
###############################################################################
import argparse
import time as tm
import numpy as np
import pandas as pd
import sys
import os
def getArguments():
"""get all command line arguments here"""
options = dict()
parser = argparse.ArgumentParser(description = \
"Run ReliefF/SURF/SURF*/MultiSURF*/MultiSURF on your data")
parser.add_argument("-a", "--algorithm", type=str, \
help="relieff, surf, surfstar, multisurfstar, multisurf (default=relieff)")
parser.add_argument("-c", "--classname", type=str, \
help="name of Class/Phenotype column (default=Class)")
parser.add_argument("-D", "--debug", \
help="lots and lots of output", action="store_true")
parser.add_argument("-d", "--discretelimit", type=int, \
help="max unique values in attributes/class to be considered \
discrete (default=10)")
parser.add_argument("-f", "--filename", type=str, \
help="name of training data file (REQUIRED)")
parser.add_argument("-k", "--knearestneighbors", type=int, \
help="k number of neighbors for ReliefF to use (default=100)")
parser.add_argument("-m", "--missingdata", type=str, \
help="missing data designator or string (default=NA)")
parser.add_argument("-o", "--outputdir", type=str, \
help="directory path to write scores file (default=data file directory)")
parser.add_argument("-T", "--topattr", type=int, \
help="Create new data file with top number attributes (integer argument)")
parser.add_argument("-t", "--turflimit", type=int, \
help="percent_drop (default=0: turf OFF )")
parser.add_argument("-v", "--verbose", \
help="use output verbosity", action="store_true")
parser.add_argument("-x", "--testdata", type=str, \
help="test data file, used in conjuction with --topattr to create new \
test data file with top number of attributes")
args = parser.parse_args()
# ------------------------------------------ #
if(args.filename == None):
print("filename required!")
sys.exit()
else:
options['filename'] = args.filename
options['basename'] = os.path.basename(args.filename)
options['dir_path'] = os.path.dirname(args.filename)
# ------------------------------------------ #
if(args.testdata == None):
options['testdata'] = None
else:
options['testdata'] = args.testdata
options['test_basename'] = os.path.basename(args.testdata)
options['test_dir_path'] = os.path.dirname(args.testdata)
# ------------------------------------------ #
if(args.classname == None):
phenoTypeName = "Class"
else:
phenoTypeName = args.classname
options['phenotypename'] = phenoTypeName
# ------------------------------------------ #
if(args.discretelimit == None):
discretelimit = 10
else:
discretelimit = args.discretelimit
options['discretelimit'] = discretelimit
# ------------------------------------------ #
if(args.knearestneighbors == None):
neighbors = 100
else:
neighbors = args.knearestneighbors
options['neighbors'] = neighbors
# ------------------------------------------ #
if(args.missingdata == None):
mdata = 'NA'
else:
mdata = args.missingdata
options['missingdata'] = mdata
# ------------------------------------------ #
if(args.algorithm == None):
algorithm = 'relieff'
else:
algorithm = args.algorithm
options['algorithm'] = algorithm
# ------------------------------------------ #
if(args.turflimit == None):
turf = '0'
else:
turf = args.turflimit
options['turfpct'] = turf
# ------------------------------------------ #
if(args.verbose):
V = True
else:
V = False
options['verbose'] = V
# ------------------------------------------ #
if(args.debug):
D = True
else:
D = False
options['debug'] = D
# ------------------------------------------ #
if(args.topattr == None):
topattr = 0
else:
topattr = args.topattr
options['topattr'] = topattr
# ------------------------------------------ #
if(args.outputdir == None):
outputdir = '.'
else:
outputdir = args.outputdir
options['outputdir'] = outputdir
# ------------------------------------------ #
return options
###############################################################################
def test_testdata(header, testdata, options):
""" ensure the test data has the same attributes
and class as the training data"""
theader, tdata = read_data(testdata, options)
for i in header:
if(i not in tdata.columns):
print("Features must match between training and test data")
sys.exit(3)
for i in tdata.columns:
if(i not in header):
print("Features must match between training and test data")
sys.exit(3)
return theader, tdata
###############################################################################
def create_subset(header, x, y, options, ordered_attr):
""" creates the a subset of top attributes of the training data file"""
V = options['verbose']
top = []
topidx = [] # index of columns in x to extract
outfile = options['basename']
path = options['outputdir']
dir_path = options['dir_path']
if(path == '.' and dir_path != ''):
path = options['dir_path']
outfile = path + '/top_' + str(options['topattr']) + '_attrs-' + outfile
for i in range(options['topattr']):
topidx.append(header.index(ordered_attr[i]))
top.append(ordered_attr[i])
top.append(options['phenotypename'])
newx = x[:,topidx]
newy = y.reshape(len(y),1)
npdata = np.append(newx,newy,axis=1)
newdata = pd.DataFrame(npdata,columns=top)
fh = open(outfile, 'w')
newdata.to_csv(fh, sep='\t', index=False)
if(V):
ctime = "[" + tm.strftime("%H:%M:%S") + "]"
print(ctime + " Created new data file: " + outfile)
sys.stdout.flush()
###############################################################################
def create_test_subset(tdata, options, ordered_attr):
"""creates the same subset of top attributes of the testdata file as the
training data"""
V = options['verbose']
top = []
outfile = options['test_basename']
path = options['outputdir']
dir_path = options['test_dir_path']
if(path == '.' and dir_path != ''):
path = options['test_dir_path']
outfile = path + '/top_' + str(options['topattr']) + '_attrs-' + outfile
for i in range(options['topattr']):
top.append(ordered_attr[i])
top.append(options['phenotypename'])
newdata = tdata[top]
fh = open(outfile, 'w')
newdata.to_csv(fh, sep='\t', index=False)
if(V):
ctime = "[" + tm.strftime("%H:%M:%S") + "]"
print(ctime + " Created new test data file: " + outfile)
sys.stdout.flush()
###############################################################################
def createScoresFile(header,var,scores,options,prog_start,turfpct,table,lost):
from operator import itemgetter
V = options['verbose']
input_file = options['basename']
algorithm = options['algorithm']
path = options['outputdir']
dir_path = options['dir_path']
if(path == '.' and dir_path != ''):
path = options['dir_path']
tab = '\t'; nl = '\n'
top = []
if('turf' not in algorithm):
table = []
for i in range(var['NumAttributes']):
table.append((header[i], scores[i]))
table = sorted(table,key=itemgetter(1), reverse=True)
if('relieff' in algorithm):
values = str(var['discreteLimit']) + '-' + str(var['numNeighbors'])
elif('surf' in algorithm):
values = str(var['discreteLimit'])
if('turf' in algorithm):
values += '-' + str(turfpct)
outfile = path + '/' + algorithm + '-scores-' + values + '-' + input_file
fh = open(outfile, 'w')
fh.write(algorithm + ' Analysis Completed with REBATE\n')
fh.write('Run Time (sec): ' + str(tm.time() - prog_start) + '\n')
fh.write('=== SCORES ===\n')
n = 1
if('turf' in algorithm):
for col, val in table:
top.append(col)
val = '{0:.16f}'.format(val)
fh.write(col + tab + str(val) + tab + str(n) + nl)
n += 1
reduction = 0.01 * (max(scores) - min(scores))
m = last = 0
for w in sorted(lost, key=lost.get, reverse=True):
if(last != lost[w]):
last = lost[w]
m += 1
top.append(w)
score = min(scores) - reduction * m
score = '{0:.16f}'.format(score)
fh.write(w + tab + str(score) + tab + str(lost[w] * '*' + nl))
else: # NOT TURF
for col, val in table:
top.append(col)
val = '{0:.16f}'.format(val)
fh.write(col + tab + str(val) + tab + str(n) + nl)
n += 1
fh.close()
if(V):
ctime = "[" + tm.strftime("%H:%M:%S") + "]"
print(ctime + " Created scores file: " + outfile)
sys.stdout.flush()
return top
###############################################################################
def printf(format, *args):
sys.stdout.write(format % args)
###############################################################################
def np_read_data(fname, options):
"""Read in data file into a numpy array (data) and a header
returns header, data in that order."""
import csv
start = tm.time()
V = options['verbose']
md = options['missingdata']
#---- determine delimiter -----------#
fh = open(fname)
line = fh.readline().rstrip()
fh.close()
sniffer = csv.Sniffer()
dialect = sniffer.sniff(line)
delim = dialect.delimiter
#-------------------------------------#
# reading into numpy array
data = np.genfromtxt(fname, missing_values=md, skip_header=1,
dtype=np.double, delimiter=delim)
if(V):
ctime = "[" + tm.strftime("%H:%M:%S") + "]"
print(ctime + " " + fname + ": data input elapsed time(sec) = "
+ str(tm.time() - start))
sys.stdout.flush()
#delim = '"' + delim + '"'
header = line.split(delim)
return header, data
###############################################################################
def np_read_data_tst(fname, options):
"""Read in data file into a numpy array (data) and a header
returns header, data in that order. Only used for unit testing! Uses a smaller subset of full dataset"""
import csv
np.random.seed(5249083)
start = tm.time()
V = options['verbose']
md = options['missingdata']
#---- determine delimiter -----------#
fh = open(fname)
line = fh.readline().rstrip()
fh.close()
sniffer = csv.Sniffer()
dialect = sniffer.sniff(line)
delim = dialect.delimiter
#-------------------------------------#
# reading into numpy array
data_raw = np.genfromtxt(fname, missing_values=md, skip_header=1,
dtype=np.double, delimiter=delim)
data_raw = pd.DataFrame(data=data_raw)
data_raw = data_raw.sample(frac=0.25)
#print(len(data_raw))
data_raw = data_raw.as_matrix()
#sampleSize = int(0.25 * len(data))
#data = np.random.choice(data,sampleSize)
#print(sampleSize)
if(V):
ctime = "[" + tm.strftime("%H:%M:%S") + "]"
print(ctime + " " + fname + ": data input elapsed time(sec) = "
+ str(tm.time() - start))
sys.stdout.flush()
#delim = '"' + delim + '"'
header = line.split(delim)
return header, data_raw
###############################################################################
def getxy(header, data, options):
""" returns contiguous x numpy matrix of data and y numpy array of class
and also removes phenotype name from headers to match the columns in
the x matrix"""
pname = options['phenotypename']
pindex = header.index(pname)
y = data[:, pindex]
y = np.ascontiguousarray(y, dtype=np.double)
x = np.delete(data,pindex,axis=1)
x = np.ascontiguousarray(x, dtype=np.double)
options['classloc'] = pindex
del header[pindex] # remove phenotype/class name from header
return x, y
| 13,846 | 35.729443 | 111 | py |
ReBATE | ReBATE-master/rebate/Turf.py | # Tue Aug 16 13:26:42 EDT 2016
"""
Copyright (c) 2016 Peter R. Schmitt and Ryan J. Urbanowicz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
import time as tm
import datetime as dt
###############################################################################
def runTurf(header, x, y, attr, var, distArray, pct, iterations, fun, options, cmn):
from operator import itemgetter
import numpy as np
lost = dict() #Dictionary storing the header names and iteration lost of all features filtered out in this round by TuRF
start = tm.time()
save_x = x
V = options['verbose']
if(V): print('Under TURF Control...')
#--------------------------------------------------------------------------
def create_newdata(header, x):
dlist = []
cnt = 0
if(V):
print('Reducing attributes by ' + str(options['turfpct']) + '%')
sys.stdout.flush()
#Go through table with feature sorted by decreasing scores, once we hit keepcnt, we start adding to lost.
for a in table:
if(cnt >= keepcnt):
lost[a[0]] = iteration + 1
i = header.index(a[0])
dlist.append(i) #store position of each feature removed in dlist.
cnt += 1
#update header and dataset to reflect removal of lowest scoring features.
header = np.delete(header,dlist).tolist() #remove orphans from header
x = np.delete(x,dlist,axis=1) #remove orphaned attributes from data
x = np.ascontiguousarray(x, dtype=np.double)
if(V):
print('Getting new variables, attributes and distance array')
sys.stdout.flush()
#Redo data survey (which may save time in downstream distance array calculation (depending on dataset)
var = cmn.getVariables(header, x, y, options)
attr = cmn.getAttributeInfo(header, x, var, options)
cheader = []
for i in header:
if attr[i][0] == 'continuous':
cheader.append(i)
if(V):
print("--------------- Parameters ---------------")
print("datatype: " + var['dataType'])
print("attributes: " + str(var['NumAttributes']))
if(var['dataType'] == 'mixed'):
print(" continuous: " + str(var['cpct'][1]))
print(" discrete: " + str(var['dpct'][1]))
if(var['mdcnt'] > 0):
print("missing: " + str(var['mdcnt']))
print("--------------------------------------------")
sys.stdout.flush()
begin = tm.time()
diffs, cidx, didx = cmn.dtypeArray(header, attr, var)
#Calculate distance array based on present feature types and data missingness.
if(var['mdcnt'] > 0):
import mmDistance as md
distArray = md.getDistances(x[:,cidx], x[:,didx], var, diffs[cidx])
disttype = "missing"
else:
distArray = cmn.getDistances(x, attr, var, cidx, didx, cheader)
disttype = "discrete/continuous/mixed"
if(V):
print(disttype + " distance array elapsed time(sec) = "
+ str(tm.time()-begin))
sys.stdout.flush()
return header, x, attr, var, distArray, lost
#--------------------------------------------------------------------------
print("Total Iterations: " + str(iterations))
#Main TuRF loop--------------------
for iteration in range(iterations):
numattr = var['NumAttributes']
if(V):
print ("============================================")
print ("Iteration: " + str(iteration+1))
print ("Attributes: " + str(numattr))
sys.stdout.flush()
table = []
#Run the selected core Relief-based algorithm scoring method.
Scores = fun(header,x,y,attr,var,distArray,options)
if(V):
print('Building scores table...')
sys.stdout.flush()
for j in range(var['NumAttributes']):
table.append([header[j], Scores[j]])
table = sorted(table,key=itemgetter(1), reverse=True)
if(iteration + 1 < iterations):
#Calculate features to preserve in the next score update.
keepcnt = int(numattr - numattr * pct)
if keepcnt == numattr: #Special case (Ensure at least one feature filtered out in an iteration)
keepcnt -= 1
#Store data subset.
header,x,attr,var,distArray,lost = create_newdata(header, x)
if(V):
print('Turf finished! Overall time: ' + str(tm.time() - start))
sys.stdout.flush()
return Scores,save_x,var,lost,table
###############################################################################
| 5,880 | 39.840278 | 124 | py |
ReBATE | ReBATE-master/rebate/setup_relieff.py | """
Copyright (c) 2016 Peter R. Schmitt and Ryan J. Urbanowicz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy
setup(
cmdclass = {'build_ext': build_ext},
ext_modules = [Extension("relieff", sources=["rebate/ReliefF.pyx"],
include_dirs=[numpy.get_include()])],
)
| 1,410 | 41.757576 | 74 | py |
ReBATE | ReBATE-master/rebate/rebate.py | #!/usr/bin/env python
# REBATE CLI
# Thu Apr 6 13:15:38 CDT 2017
"""
Copyright (c) 2016 Peter R. Schmitt and Ryan J. Urbanowicz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
###############################################################################
import time as tm
import sys
import os
import IO as io
import Common as cmn
###############################################################################
prog_start = tm.time()
Scores = table = lost = 0
#-----------------------------------------------------------------------------#
#
# Get arguments from command line ---------------------------------------------
options = io.getArguments()
V = options['verbose']
turfpct = int(options['turfpct'])
algorithm = options['algorithm']
if(algorithm != 'relieff' and algorithm != 'surf' and algorithm != 'surfstar' and algorithm != 'multisurfstar' and algorithm != 'multisurf'):
print("algorithm " + algorithm + " is not available")
print("Use relieff, surf, surfstar, multisurfstar, or multisurf")
sys.exit(1)
if(V):
print("-------------- Python Version --------------")
print(sys.version)
print("--------------------------------------------")
# Read data into header and numpy matrix --------------------------------------
input_file = options['filename']
if(os.path.exists(input_file)):
header, data = io.np_read_data(input_file,options)
else:
print("File " + input_file + " does NOT exist!")
sys.exit(1)
#-----------------------------------------------------------------------------#
# get x (data) and y (class) into contiguous numpy arrays
# remove phenotype name from header
x, y = io.getxy(header, data, options)
#-----------------------------------------------------------------------------#
# if there is test data, test it for compatibility
# Test data is only imported to for constructing a new test dataset with the same selected feature subset. Not used to test a model, and not trained on in feature scoring.
if(options['testdata'] != None):
testdata = options['testdata']
if(os.path.exists(testdata)):
theader, tdata = io.test_testdata(header, testdata, options)
else:
print("File " + testdata + " does NOT exist!")
sys.exit(2)
#-----------------------------------------------------------------------------#
# collect variables
# var is a dictionary of the following variables:
# NumAttributes, discreteLimit, discretePhenotype, labelMissingData
# phenSD, phenotypeList, phenoTypeName, phenoTypeLoc
# dataType (overall data type of data)
# classType (datatype of Class or phenotype)
#
var = cmn.getVariables(header, x, y, options)
#sys.exit(99)
#-----------------------------------------------------------------------------#
# collect attribute information
# attributes is a dictionary of tuples:
# attributes['ATTRIBUTE'] = ('continuous/discrete', MAX/None, MIN,None)
#
attr = cmn.getAttributeInfo(header, x, var, options)
#
# create header list (cheader) with must headers that have continuous data
cheader = []
for i in header:
if attr[i][0] == 'continuous':
cheader.append(i)
if(V):
print("--------------- Parameters ---------------")
print("datafile: " + options['basename'])
print("datatype: " + var['dataType'])
print("attributes: " + str(var['NumAttributes']))
if(var['dataType'] == 'mixed'):
print(" continuous: " + str(var['cpct'][1]))
print(" discrete: " + str(var['dpct'][1]))
print("instances: " + str(var['datalen']))
print("missing: " + str(var['mdcnt']))
print("classtype: " + var['classType'])
if(var['classType'] == 'multiclass'):
yset = var['phenoTypeList']
print(" classes: " + str(len(yset)))
print("datatype: " + var['dataType'])
print("classname: " + var['phenoTypeName'])
print("algorithm: " + options['algorithm'])
print("--------------------------------------------")
sys.stdout.flush()
#-----------------------------------------------------------------------------#
# create distance array and remove intermediate data
# if missing and/or mixed data use the mixedDistance function
#
begin = tm.time()
diffs, cidx, didx = cmn.dtypeArray(header, attr, var)
if(var['mdcnt'] > 0):
import mmDistance as md
distArray = md.getDistances(x[:,cidx], x[:,didx], var, diffs[cidx])
disttype = "missing"
else:
distArray = cmn.getDistances(x, attr, var, cidx, didx, cheader)
disttype = "discrete/continuous/mixed"
if(V):
ctime = "[" + tm.strftime("%H:%M:%S") + "]"
print(ctime + " " + disttype + " distance array time(sec) = "
+ str(tm.time()-begin))
sys.stdout.flush()
#-----------------------------------------------------------------------------#
# get Scores based on algorithm selected (-a and -t)
#
if(turfpct > 0): # Use TURF
import Turf as T
pct = float(turfpct)/100.0
iterations = int(1/float(pct))
if(algorithm == 'relieff'):
import relieff as R
print('Running Relieff + TuRF')
fun = R.runReliefF
if(algorithm == 'surf' or algorithm == 'surfstar'):
import surf as S
print('Running SURF or SURF* + TuRF')
fun = S.runSURF
if(algorithm == 'multisurf' or algorithm == 'multisurfstar'):
import multisurf as MS
print('Running MultiSURF or MultiSURF* + TuRF')
fun = MS.runMultiSURF
Scores,x,var,lost,table = T.runTurf(header,x,y,attr,var,distArray,pct,iterations,fun,options,cmn)
options['algorithm'] = algorithm + "-turf"
elif(algorithm == 'relieff'):
import relieff as R
print('Running Relieff')
Scores = R.runReliefF(header,x,y,attr,var,distArray,options)
elif(algorithm == 'surf' or algorithm =='surfstar'):
import surf as S
print('Running SURF or SURF*')
Scores = S.runSURF(header, x, y, attr, var, distArray, options)
elif(algorithm == 'multisurf' or algorithm == 'multisurfstar'):
if(var['classType'] == 'multiclass'):
import multisurf as MS
print('Running MultiSURF or MultiSURF*')
Scores = MS.runMultiSURF(header, x, y, attr, var, distArray, options)
#-----------------------------------------------------------------------------#
# create new data files of some number of top scored attributes
ordered_attr = io.createScoresFile(header, var, Scores, options,
prog_start, turfpct, table, lost)
if(options['topattr'] > 0):
io.create_subset(header, x, y, options, ordered_attr)
if(options['testdata'] != None):
io.create_test_subset(tdata, options, ordered_attr)
if(V):
ctime = "[" + tm.strftime("%H:%M:%S") + "]"
print(ctime + " Overall program time(sec) = " + str(tm.time() - prog_start))
###############################################################################
| 7,824 | 40.184211 | 172 | py |
ReBATE | ReBATE-master/rebate/__init__.py | # -*- coding: utf-8 -*-
"""
scikit-rebate was primarily developed at the University of Pennsylvania by:
- Randal S. Olson ([email protected])
- Pete Schmitt ([email protected])
- Ryan J. Urbanowicz ([email protected])
- Weixuan Fu ([email protected])
- and many more generous open source contributors
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
| 1,355 | 49.222222 | 103 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.