python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2018 NVIDIA Corporation
# -*- coding: utf-8 -*-
import random
import numpy as np
import tensorflow as tf
import os
from enum import Enum
from open_seq2seq.data.data_layer import DataLayer
from open_seq2seq.data.utils import load_pre_existing_vocabulary, pad_vocab_to_eight
from open_seq2seq.data.text2text.t2t import _read_and_batch_from_files
from open_seq2seq.data.lm.lmutils import Dictionary, Corpus, IMDBCorpus, SSTCorpus
class WKTDataLayer(DataLayer):
'''
WKTDataLayer does the necessary pre-processing to make the WikiText datasets
ready to be fed into the model. We use the ``word_token`` method
available in the ``nltk`` package.
You can download the datasets here:
https://www.salesforce.com/products/einstein/ai-research/the-wikitext-dependency-language-modeling-dataset/
bptt: backpropagation through time - the length of the sequences used for training
rand_start: whether to start from a random starting index between (0, bptt)
'''
@staticmethod
def get_required_params():
return dict(DataLayer.get_required_params(), **{
'repeat': bool,
'bptt': int,
})
@staticmethod
def get_optional_params():
return dict(DataLayer.get_optional_params(), **{
'data_root': str,
'rand_start': bool,
'small': bool,
'use_targets': bool,
'delimiter': str,
'map_parallel_calls': int,
'prefetch_buffer_size': int,
'pad_lengths_to_eight': bool,
'pad_vocab_to_eight': bool,
'seed_tokens': str,
'shuffle_buffer_size': int,
'processed_data_folder': str,
})
def __init__(self, params, model, num_workers=1, worker_id=0):
super(WKTDataLayer, self).__init__(params, model,
num_workers, worker_id)
self._processed_data_folder = self.params.get('processed_data_folder', 'wkt-processed_data')
self._data_root = self.params.get('data_root', None)
self.corp = Corpus(self._data_root, self._processed_data_folder)
seed_tokens = self.params.get('seed_tokens', 'The').split()
self.end_token = self.corp.dictionary.word2idx[self.corp.dictionary.EOS]
self.params['seed_tokens'] = [self.corp.dictionary.word2idx[seed_token] for seed_token in seed_tokens]
if self.params['mode'] == 'infer':
self.corp.content = self.params['seed_tokens']
if self.params['mode'] == 'train':
self.batch_size = self.params['batch_size']
self.corp.content = self.corp.train
elif self.params['mode'] == 'eval':
self.batch_size = self.params['batch_size']
self.corp.content = self.corp.valid
else:
if len(self.corp.content) < self.params['batch_size']:
self.batch_size = len(self.corp.content)
else:
self.batch_size = self.params['batch_size']
self.vocab_file = (self._processed_data_folder, 'vocab.txt')
self.bptt = self.params['bptt']
self.rand_start = self.params.get('rand_start', False)
self._map_parallel_calls = self.params.get('map_parallel_calls', 8)
self._pad_lengths_to_eight = self.params.get('pad_lengths_to_eight', False)
self._prefetch_buffer_size = self.params.get('prefetch_buffer_size',
tf.contrib.data.AUTOTUNE)
self._shuffle_buffer_size = self.params.get('shuffle_buffer_size', -1)
self._num_workers = num_workers
self._worker_id = worker_id
self.delimiter = self.params.get("delimiter", " ")
self._small = self.params.get("small", False)
self.start = 0
# load source and target vocabularies to RAM
if self._small:
if self.params['mode'] == 'eval':
self.corp.content = self.corp.content[:200]
else:
self.corp.content = self.corp.content[:9004]
if self.params.get('pad_vocab_to_eight', False):
self.corp.content = pad_vocab_to_eight(self.corp.content)
self.dataset_size = len(self.corp.content)
self.vocab_size = len(self.corp.dictionary.idx2word)
self._input_tensors = {}
def gen(self):
while True:
if self.rand_start:
self.start = random.randint(0, self.bptt - 1)
n_samples = (self.dataset_size - self.start - 1) // self.bptt
for i in range(n_samples):
begin = self.start + i * self.bptt
yield (self.corp.content[begin : begin + self.bptt], self.corp.content[begin + 1 : begin + self.bptt + 1])
def gen_infer(self):
while True:
for seed in self.corp.content:
yield ([seed], [seed])
def build_graph(self):
if self.params['mode'] == 'train' or self.params['mode'] == 'eval':
gen = self.gen
batch_shape = self.bptt
else:
gen = self.gen_infer
batch_shape = 1
_src_tgt_dataset = tf.data.Dataset.from_generator(gen, (tf.int32, tf.int32),
(tf.TensorShape([batch_shape]), tf.TensorShape([batch_shape])))
if self._num_workers > 1:
_src_tgt_dataset = _src_tgt_dataset\
.shard(num_shards=self._num_workers, index=self._worker_id)
if self.params['shuffle']:
bf_size = self.get_size_in_samples() if self._shuffle_buffer_size == -1 \
else self._shuffle_buffer_size
_src_tgt_dataset = _src_tgt_dataset.shuffle(buffer_size=bf_size)
else:
_src_tgt_dataset = _src_tgt_dataset
if self.params['repeat']:
_src_tgt_dataset = _src_tgt_dataset.repeat()
_src_tgt_dataset = _src_tgt_dataset.map(lambda x, y: ((x, tf.size(x)), (y, tf.size(y))),
num_parallel_calls=self._map_parallel_calls)
self.batched_dataset = _src_tgt_dataset.batch(self.batch_size)
self._iterator = self.batched_dataset.make_initializable_iterator()
if self.params['mode'] == 'train' or self.params['mode'] == 'eval':
t1, t2 = self.iterator.get_next()
x, x_length = t1[0], t1[1]
y, y_length = t2[0], t2[1]
self._input_tensors['source_tensors'] = [x, x_length]
self._input_tensors['target_tensors'] = [y, y_length]
else: # this is unncessary
t1, _ = self.iterator.get_next()
self._input_tensors['source_tensors'] = [t1[0], t1[1]]
def get_size_in_samples(self):
if self.params['mode'] == 'train' or self.params['mode'] == 'eval':
return (self.dataset_size - self.start) // self.bptt
return len(self.corp.content)
@property
def iterator(self):
return self._iterator
@property
def input_tensors(self):
return self._input_tensors
class TextClassificationDataLayer(DataLayer):
'''
The base ckass to process data for text classification tasks.
If the data has already been processed, it shoud load the processed
data instead of re-processing it.
'''
@staticmethod
def get_required_params():
return dict(DataLayer.get_required_params(), **{
'lm_vocab_file': str,
'shuffle': bool,
'repeat': bool,
'max_length': int,
'processed_data_folder': str,
})
@staticmethod
def get_optional_params():
return dict(DataLayer.get_optional_params(), **{
'rand_start': bool,
'small': bool,
'use_targets': bool,
'delimiter': str,
'map_parallel_calls': int,
'prefetch_buffer_size': int,
'pad_lengths_to_eight': bool,
'pad_vocab_to_eight': bool,
'shuffle_buffer_size': int,
'data_root': str,
'binary': bool,
'num_classes': int,
'get_stats': bool,
})
def __init__(self, params, model, num_workers=1, worker_id=0):
super(TextClassificationDataLayer, self).__init__(params, model,
num_workers, worker_id)
self._data_root = self.params.get('data_root', None)
self._binary = self.params.get('binary', True)
self._get_stats = self.params.get('get_stats', False)
self._lm_vocab_file = self.params['lm_vocab_file']
self._map_parallel_calls = self.params.get('map_parallel_calls', 8)
self._pad_lengths_to_eight = self.params.get('pad_lengths_to_eight', False)
self._prefetch_buffer_size = self.params.get('prefetch_buffer_size',
tf.contrib.data.AUTOTUNE)
self._shuffle_buffer_size = self.params.get('shuffle_buffer_size', -1)
self._num_workers = num_workers
self._worker_id = worker_id
self._small = self.params.get("small", False)
self._max_length = self.params['max_length']
self.delimiter = self.params.get("delimiter", " ")
self.EOS_ID = -1
self.batch_size = self.params['batch_size']
if self._pad_lengths_to_eight and not (self._max_length % 8 == 0):
raise ValueError("If padding to 8 in data layer, then "
"max_length should be multiple of 8")
self._input_tensors = {}
def gen(self):
while True:
for review, raw_rating in self.corp.content:
if len(review) > self._max_length:
review = review[-self._max_length:]
rating = np.zeros(self.num_classes)
rating[raw_rating] = 1
yield (review, rating)
def build_graph(self):
_src_tgt_dataset = tf.data.Dataset.from_generator(self.gen,
(tf.int32, tf.int32),
(tf.TensorShape([None]), tf.TensorShape([self.num_classes])))
if self._num_workers > 1:
_src_tgt_dataset = _src_tgt_dataset\
.shard(num_shards=self._num_workers, index=self._worker_id)
if self.params['shuffle']:
bf_size = self.get_size_in_samples() if self._shuffle_buffer_size == -1 \
else self._shuffle_buffer_size
_src_tgt_dataset = _src_tgt_dataset.shuffle(buffer_size=bf_size)
if self.params['repeat']:
_src_tgt_dataset = _src_tgt_dataset.repeat()
_src_tgt_dataset = _src_tgt_dataset.map(lambda x, y: ((x, tf.size(x)), (y, tf.size(y))),
num_parallel_calls=self._map_parallel_calls)
self.batched_dataset = _src_tgt_dataset.padded_batch(
self.batch_size,
padded_shapes=((tf.TensorShape([None]),
tf.TensorShape([])),
(tf.TensorShape([None]),
tf.TensorShape([]))),
padding_values=(
(self.EOS_ID, 0),
(self.EOS_ID, 0))).prefetch(buffer_size=self._prefetch_buffer_size)
self._iterator = self.batched_dataset.make_initializable_iterator()
t1, t2 = self.iterator.get_next()
x, x_length = t1[0], t1[1]
y, y_length = t2[0], t2[1]
self._input_tensors['source_tensors'] = [x, x_length]
self._input_tensors['target_tensors'] = [y, y_length]
def get_size_in_samples(self):
return self.dataset_size
@property
def iterator(self):
return self._iterator
@property
def input_tensors(self):
return self._input_tensors
class IMDBDataLayer(TextClassificationDataLayer):
'''
Data layer to process the raw IMDB data, which can be downloaded here:
http://ai.stanford.edu/~amaas/data/sentiment/
'''
def __init__(self, params, model, num_workers=1, worker_id=0):
super(IMDBDataLayer, self).__init__(params, model, num_workers, worker_id)
self._processed_data_folder = self.params['processed_data_folder']
if self._binary:
self.num_classes = 2
else:
self.num_classes = 10
self.corp = IMDBCorpus(self._data_root,
self._processed_data_folder,
self._lm_vocab_file,
self._binary,
get_stats=self._get_stats)
if self.params['mode'] == 'train':
self.corp.content = self.corp.train
elif self.params['mode'] == 'eval':
self.corp.content = self.corp.valid
else:
self.corp.content = self.corp.test
if self._small:
if self.params['mode'] == 'eval':
self.corp.content = self.corp.content[:self.batch_size * 2]
else:
self.corp.content = self.corp.content[:self.batch_size * 4]
self.dataset_size = len(self.corp.content)
self.vocab_size = len(self.corp.dictionary.idx2word)
self.EOS_ID = self.corp.dictionary.word2idx[self.corp.dictionary.EOS]
self.end_token = self.corp.dictionary.word2idx[self.corp.dictionary.EOS]
class SSTDataLayer(TextClassificationDataLayer):
'''
Data layer to process the raw SST (Stanford Sentiment Treebank).
Read about the dataset here:
https://nlp.stanford.edu/sentiment/
Download the preprocessed version that can be used for this DataLayer here:
https://github.com/NVIDIA/sentiment-discovery/tree/master/data/binary_sst
'''
def __init__(self, params, model, num_workers=1, worker_id=0):
super(SSTDataLayer, self).__init__(params, model, num_workers, worker_id)
self._processed_data_folder = self.params['processed_data_folder']
self.corp = SSTCorpus(self._data_root,
self._processed_data_folder,
self._lm_vocab_file,
get_stats=self._get_stats)
if self.params['mode'] == 'train':
self.corp.content = self.corp.train
elif self.params['mode'] == 'eval':
self.corp.content = self.corp.valid
else:
self.corp.content = self.corp.test
self.num_classes = 2
self.dataset_size = len(self.corp.content)
self.vocab_size = len(self.corp.dictionary.idx2word)
self.EOS_ID = self.corp.dictionary.word2idx[self.corp.dictionary.EOS]
self.end_token = self.corp.dictionary.word2idx[self.corp.dictionary.EOS]
| OpenSeq2Seq-master | open_seq2seq/data/lm/lmdata.py |
OpenSeq2Seq-master | open_seq2seq/data/lm/__init__.py |
|
# -*- coding: utf-8 -*-
from collections import Counter
import glob
import os
import pathlib
import random
import re
import shutil
from nltk.tokenize import word_tokenize
import numpy as np
import pandas as pd
class Dictionary(object):
'''
Adapted from salesforce's repo:
https://github.com/salesforce/awd-lstm-lm/blob/master/data.py
'''
def __init__(self, limit=3, vocab_link=None): # do we need limit?
self.word2idx = {}
self.idx2word = []
self.counter = Counter()
self.UNK = '<unk>'
self.EOS = '<eos>'
if vocab_link and os.path.isfile(vocab_link):
self.load_vocab(vocab_link)
def add_word(self, word):
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
token_id = self.word2idx[word]
self.counter[token_id] += 1
return self.word2idx[word]
def load_vocab(self, vocab_link):
vocab_file = open(vocab_link, 'r')
lines = vocab_file.readlines()
n = int(lines[-1].strip())
self.idx2word = [0 for _ in range(n)]
for line in lines[:-1]:
parts = line.strip().split('\t')
token_id, word, count = int(parts[0]), parts[1], int(parts[2])
self.word2idx[word] = token_id
self.idx2word[token_id] = word
self.counter[token_id] = count
if not self.UNK in self.word2idx:
self.add_word(self.UNK)
if not self.EOS in self.word2idx:
self.add_word(self.EOS)
def __len__(self):
return len(self.idx2word)
def check_exist(proc_path):
filenames = ['train.ids', 'valid.ids', 'test.ids']
paths = [os.path.join(proc_path, name) for name in filenames]
paths.append(proc_path)
for name in paths:
if not os.path.exists(name):
return False
return True
def list2str(list):
return '\t'.join([str(num) for num in list])
def unzip(data):
tmp = [list(t) for t in zip(*data)]
return (tmp[0], tmp[1])
class Corpus(object):
def __init__(self, raw_path, proc_path, change_contraction=True, limit=3):
pathlib.Path(proc_path).mkdir(exist_ok=True)
self.limit = limit
self.dictionary = Dictionary(limit)
self.vocab_link = 'vocab.txt'
exists = check_exist(proc_path)
self.change_contraction = change_contraction
if not exists:
print('Creating corpus from raw data ...')
if raw_path and 'raw' in raw_path:
self._change_names(raw_path)
if not raw_path:
raise ValueError("data_root [directory to the original data] must be specified")
self.preprocess(raw_path, proc_path)
self.create_dictionary(proc_path, os.path.join(proc_path, 'train.txt'))
self.dictionary = Dictionary(limit)
self.dictionary.load_vocab(os.path.join(proc_path, self.vocab_link))
self.train = self.tokenize(proc_path, proc_path, 'train.txt')
self.valid = self.tokenize(proc_path, proc_path, 'valid.txt')
self.test = self.tokenize(proc_path, proc_path, 'test.txt')
else:
self.load_corpus(proc_path)
def _change_names(self, raw_path):
if os.path.isfile(os.path.join(raw_path, 'wiki.train.raw')):
os.rename(os.path.join(raw_path, 'wiki.train.raw'), os.path.join(raw_path, 'train.txt'))
os.rename(os.path.join(raw_path, 'wiki.valid.raw'), os.path.join(raw_path, 'valid.txt'))
os.rename(os.path.join(raw_path, 'wiki.test.raw'), os.path.join(raw_path, 'test.txt'))
def preprocess(self, raw_path, proc_path):
for filename in ['train.txt', 'valid.txt', 'test.txt']:
in_ = open(os.path.join(raw_path, filename), 'r')
out = open(os.path.join(proc_path, filename), 'w')
for line in in_:
line = re.sub('@-@', '-', line)
line = re.sub('-', ' - ', line)
line = re.sub('etc .', 'etc.', line)
if self.change_contraction:
line = re.sub("n 't", " n't", line)
tokens = []
for token in line.split():
tokens.append(token.strip())
out.write(' '.join(tokens) + '\n')
def create_dictionary(self, proc_path, filename):
'''
Add words to the dictionary only if it's in the train file
'''
self.dictionary.add_word(self.dictionary.UNK)
with open(filename, 'r') as f:
f.readline()
for line in f:
words = line.split() + [self.dictionary.EOS]
for word in words:
self.dictionary.add_word(word)
with open(os.path.join(proc_path, self.vocab_link), 'w') as f:
f.write('\t'.join(['0', self.dictionary.UNK, '0']) + '\n')
idx = 1
for token_id, count in self.dictionary.counter.most_common():
if count < self.limit:
f.write(str(idx) + '\n')
return
f.write('\t'.join([str(idx),
self.dictionary.idx2word[token_id],
str(count)]) + '\n')
idx += 1
def tokenize(self, raw_path, proc_path, filename):
unk_id = self.dictionary.word2idx[self.dictionary.UNK]
out = open(os.path.join(proc_path, filename[:-3] + 'ids'), 'w')
with open(os.path.join(raw_path, filename), 'r') as f:
ids = []
for line in f:
words = line.split() + [self.dictionary.EOS]
for word in words:
ids.append(self.dictionary.word2idx.get(word, unk_id))
out.write(list2str(ids))
out.close()
return np.asarray(ids)
def load_ids(self, filename):
ids = open(filename, 'r').read().strip().split('\t')
return np.asarray([int(i) for i in ids])
def list2str(self, list):
return '\t'.join([str(num) for num in list])
def load_corpus(self, proc_path):
print('Loading corpus from processed data ...')
self.dictionary.load_vocab(os.path.join(proc_path, self.vocab_link))
self.train = self.load_ids(os.path.join(proc_path, 'train.ids'))
self.valid = self.load_ids(os.path.join(proc_path, 'valid.ids'))
self.test = self.load_ids(os.path.join(proc_path, 'test.ids'))
class IMDBCorpus(object):
def __init__(self, raw_path, proc_path, lm_vocab_link, binary=True, get_stats=False):
exists = check_exist(proc_path)
pathlib.Path(proc_path).mkdir(exist_ok=True)
self.dictionary = Dictionary(vocab_link=lm_vocab_link)
self.binary = binary
self.raw_path = raw_path
self.proc_path = proc_path
self._get_stats = get_stats
if not exists:
print('Creating corpus from raw data ...')
if not raw_path:
raise ValueError("data_root [directory to the original data] must be specified")
self.preprocess()
else:
self.load_corpus(proc_path)
def check_oov(self, txt):
txt = txt.lower()
txt = re.sub('thats', "that's", txt)
txt = re.sub('wouldnt', "wounldn't", txt)
txt = re.sub('couldnt', "couldn't", txt)
txt = re.sub('cant', "can't", txt)
txt = re.sub('dont', "don't", txt)
txt = re.sub("didnt", "didn't", txt)
txt = re.sub("isnt", "isn't", txt)
txt = re.sub("wasnt", "wasn't", txt)
return word_tokenize(txt)
def tokenize(self, txt):
txt = re.sub('<br />', ' ', txt)
txt = re.sub('', ' ', txt)
txt = re.sub('', ' ', txt)
txt = re.sub('-', ' - ', txt)
txt = re.sub('\.', ' . ', txt)
txt = re.sub('\+', ' + ', txt)
txt = re.sub('\*', ' * ', txt)
txt = re.sub('/', ' / ', txt)
txt = re.sub('`', "'", txt)
txt = re.sub(' ms \.', " ms.", txt)
txt = re.sub('Ms \.', "Ms.", txt)
words = []
for token in word_tokenize(txt):
if not token in self.dictionary.word2idx:
if token.startswith("'"):
words.append("'")
token = token[1:]
if not token in self.dictionary.word2idx:
tokens = self.check_oov(token)
words.extend(tokens)
else:
words.append(token)
else:
words.append(token)
txt = ' '.join(words)
txt = re.sub("''", '"', txt)
txt = re.sub("' '", '"', txt)
txt = re.sub("``", '"', txt)
txt = re.sub('etc \.', 'etc. ', txt)
txt = re.sub(' etc ', ' etc. ', txt)
return txt
def tokenize_folder(self, mode, token_file, rating_file):
review_outfile = open(token_file, 'w')
rating_outfile = open(rating_file, 'w')
for sent in ['pos', 'neg']:
files = glob.glob(os.path.join(self.raw_path, mode, sent, '*.txt'))
for file in files:
in_file = open(file, 'r')
txt = self.tokenize(in_file.read())
review_outfile.write(txt + "\n")
if self.binary:
if sent == 'pos':
rating = "1"
else:
rating = "0"
else:
idx = file.rfind("_")
rating = str(int(file[idx + 1:-4]) - 1)
rating_outfile.write(rating + '\n')
in_file.close()
def txt2ids(self, mode, token_file, rating_file):
if self._get_stats:
import matplotlib
matplotlib.use("TkAgg")
from matplotlib import pyplot as plt
rating_lines = open(rating_file, 'r').readlines()
ratings = [int(line.strip()) for line in rating_lines]
reviews = []
unk_id = self.dictionary.word2idx[self.dictionary.UNK]
unseen = []
all_tokens = 0
all_unseen = 0
for line in open(token_file, 'r'):
tokens = line.strip().split()
reviews.append([self.dictionary.word2idx.get(token, unk_id) for token in tokens])
if self._get_stats:
for token in tokens:
all_tokens += 1
if not token in self.dictionary.word2idx:
unseen.append(token)
all_unseen += 1
if self._get_stats:
counter = Counter(unseen)
out = open(os.path.join(self.proc_path, mode + '_unseen.txt'), 'w')
for key, count in counter.most_common():
out.write(key + '\t' + str(count) + '\n')
lengths = np.asarray([len(review) for review in reviews])
stat_file = open(os.path.join(self.proc_path, 'statistics.txt'), 'w')
stat_file.write(mode + '\n')
short_lengths = [l for l in lengths if l <= 256]
stat_file.write('\t'.join(['Min', 'Max', 'Mean', 'Median', 'STD', 'Total', '<=256']) + '\n')
stats = [np.min(lengths), np.max(lengths), np.mean(lengths), np.median(lengths), np.std(lengths), len(lengths), len(short_lengths)]
stat_file.write('\t'.join([str(t) for t in stats]) + '\n')
stat_file.write('Total {} unseen out of {} all tokens. Probability {}.\n'.
format(all_unseen, all_tokens, all_unseen / all_tokens))
plt.hist(lengths, bins=20)
plt.savefig(os.path.join(self.proc_path, mode + '_hist.png'))
plt.hist(short_lengths, bins=20)
plt.savefig(os.path.join(self.proc_path, mode + '_short_hist.png'))
return list(zip(reviews, ratings))
def preprocess_folder(self, mode):
token_file = os.path.join(self.proc_path, mode + '.tok')
rating_file = os.path.join(self.proc_path, mode + '.inter.rat')
self.tokenize_folder(mode, token_file, rating_file)
return self.txt2ids(mode, token_file, rating_file)
def partition(self, data, val_count=1000):
random.shuffle(data)
return data[val_count:], data[:val_count]
def ids2file(self):
for mode in ['train', 'valid', 'test']:
data = getattr(self, mode)
review_out = open(os.path.join(self.proc_path, mode + '.ids'), 'w')
rating_out = open(os.path.join(self.proc_path, mode + '.rat'), 'w')
for review, rating in data:
review_out.write(list2str(review) + '\n')
rating_out.write(str(rating) + '\n')
def preprocess(self):
os.makedirs(self.proc_path, exist_ok=True)
train = self.preprocess_folder('train')
self.train, self.valid = self.partition(train)
self.test = self.preprocess_folder('test')
self.ids2file()
def load_ids(self, mode):
review_lines = open(os.path.join(self.proc_path, mode + '.ids')).readlines()
rating_lines = open(os.path.join(self.proc_path, mode + '.rat')).readlines()
ratings = [int(line.strip()) for line in rating_lines]
reviews = [[int(i) for i in line.strip().split('\t')] for line in review_lines]
return list(zip(reviews, ratings))
def load_corpus(self, proc_path):
print('Loading corpus from processed data ...')
self.train = self.load_ids('train')
self.valid = self.load_ids('valid')
self.test = self.load_ids('test')
class SSTCorpus(object):
def __init__(self, raw_path, proc_path, lm_vocab_link, get_stats=False):
exists = check_exist(proc_path)
pathlib.Path(proc_path).mkdir(exist_ok=True)
self.dictionary = Dictionary(vocab_link=lm_vocab_link)
self.raw_path = raw_path
self.proc_path = proc_path
self._get_stats = get_stats
if not exists:
print('Creating corpus from raw data ...')
if not raw_path:
raise ValueError("data_root [directory to the original data] must be specified")
self.preprocess()
else:
self.load_corpus(proc_path)
def check_oov(self, txt):
txt = txt.lower()
txt = re.sub('thats', "that's", txt)
txt = re.sub('wouldnt', "wounldn't", txt)
txt = re.sub('couldnt', "couldn't", txt)
txt = re.sub('cant', "can't", txt)
txt = re.sub('dont', "don't", txt)
txt = re.sub("didnt", "didn't", txt)
txt = re.sub("isnt", "isn't", txt)
txt = re.sub("wasnt", "wasn't", txt)
return word_tokenize(txt)
def tokenize(self, txt):
txt = re.sub('-', ' - ', txt)
txt = re.sub('\+', ' + ', txt)
txt = re.sub('\*', ' * ', txt)
txt = re.sub('/', ' / ', txt)
txt = re.sub('`', "'", txt)
words = []
for token in word_tokenize(txt):
if not token in self.dictionary.word2idx:
if token.startswith("'"):
words.append("'")
token = token[1:]
if not token in self.dictionary.word2idx:
tokens = self.check_oov(token)
words.extend(tokens)
else:
words.append(token)
else:
words.append(token)
txt = ' '.join(words)
txt = re.sub("''", '"', txt)
txt = re.sub("' '", '"', txt)
txt = re.sub("``", '"', txt)
txt = re.sub('etc \.', 'etc. ', txt)
txt = re.sub(' etc ', ' etc. ', txt)
return txt
def tokenize_file(self, mode):
data = pd.read_csv(os.path.join(self.raw_path, mode + '.csv'))
if mode == 'val':
mode = 'valid'
review_file = open(os.path.join(self.proc_path, mode + '.tok'), 'w')
rating_file = open(os.path.join(self.proc_path, mode + '.rat'), 'w')
for _, row in data.iterrows():
review = self.tokenize(row['sentence'])
review_file.write(review + '\n')
rating_file.write(str(row['label']) + '\n')
def txt2ids(self, mode):
if self._get_stats:
import matplotlib
matplotlib.use("TkAgg")
from matplotlib import pyplot as plt
reviews = []
unk_id = self.dictionary.word2idx[self.dictionary.UNK]
unseen = []
all_tokens = 0
all_unseen = 0
rating_lines = open(os.path.join(self.proc_path, mode + '.rat'), 'r').readlines()
ratings = [int(line.strip()) for line in rating_lines]
for line in open(os.path.join(self.proc_path, mode + '.tok'), 'r'):
tokens = line.strip().split()
reviews.append([self.dictionary.word2idx.get(token, unk_id) for token in tokens])
if self._get_stats:
for token in tokens:
all_tokens += 1
if not token in self.dictionary.word2idx:
unseen.append(token)
all_unseen += 1
if self._get_stats:
counter = Counter(unseen)
out = open(os.path.join(self.proc_path, mode + '_unseen.txt'), 'w')
for key, count in counter.most_common():
out.write(key + '\t' + str(count) + '\n')
lengths = np.asarray([len(review) for review in reviews])
stat_file = open(os.path.join(self.proc_path, 'statistics.txt'), 'a')
stat_file.write(mode + '\n')
short_lengths = [l for l in lengths if l <= 96]
stat_file.write('\t'.join(['Min', 'Max', 'Mean', 'Median', 'STD', 'Total', '<=96']) + '\n')
stats = [np.min(lengths), np.max(lengths), np.mean(lengths), np.median(lengths), np.std(lengths), len(lengths), len(short_lengths)]
stat_file.write('\t'.join([str(t) for t in stats]) + '\n')
stat_file.write('Total {} unseen out of {} all tokens. Probability {}.\n'.
format(all_unseen, all_tokens, all_unseen / all_tokens))
plt.hist(lengths, bins=20)
plt.savefig(os.path.join(self.proc_path, mode + '_hist.png'))
plt.hist(short_lengths, bins=20)
plt.savefig(os.path.join(self.proc_path, mode + '_short_hist.png'))
return list(zip(reviews, ratings))
def preprocess_file(self, mode):
self.tokenize_file(mode)
if mode == 'val':
mode = 'valid'
return self.txt2ids(mode)
def ids2file(self):
for mode in ['train', 'valid', 'test']:
data = getattr(self, mode)
review_out = open(os.path.join(self.proc_path, mode + '.ids'), 'w')
rating_out = open(os.path.join(self.proc_path, mode + '.rat'), 'w')
for review, rating in data:
review_out.write(list2str(review) + '\n')
rating_out.write(str(rating) + '\n')
def preprocess(self):
os.makedirs(self.proc_path, exist_ok=True)
self.train = self.preprocess_file('train')
self.valid = self.preprocess_file('val')
self.test = self.preprocess_file('test')
self.ids2file()
def load_ids(self, mode):
review_lines = open(os.path.join(self.proc_path, mode + '.ids')).readlines()
rating_lines = open(os.path.join(self.proc_path, mode + '.rat')).readlines()
ratings = [int(line.strip()) for line in rating_lines]
reviews = [[int(i) for i in line.strip().split('\t')] for line in review_lines]
return list(zip(reviews, ratings))
def load_corpus(self, proc_path):
print('Loading corpus from processed data ...')
self.train = self.load_ids('train')
self.valid = self.load_ids('valid')
self.test = self.load_ids('test')
# SSTCorpus('/home/chipn/data/binary_sst', 'sst-processed-data-wkt2' , '/home/chipn/dev/OpenSeq2Seq/wkt2-processed-data/vocab.txt')
# SSTCorpus('/home/chipn/data/binary_sst', 'sst-processed-data-wkt103' , '/home/chipn/dev/OpenSeq2Seq/wkt103-processed-data/vocab.txt')
# IMDBCorpus('/home/chipn/data/aclImdb', 'imdb-processed-data-wkt103' , '/home/chipn/dev/OpenSeq2Seq/wkt103-processed-data/vocab.txt')
# IMDBCorpus('/home/chipn/data/aclImdb', 'imdb-processed-data-wkt2' , '/home/chipn/dev/OpenSeq2Seq/wkt2-processed-data/vocab.txt') | OpenSeq2Seq-master | open_seq2seq/data/lm/lmutils.py |
# Copyright (c) 2017 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import numpy as np
import tensorflow as tf
import os
from enum import Enum
from open_seq2seq.data.data_layer import DataLayer
from open_seq2seq.data.utils import load_pre_existing_vocabulary, pad_vocab_to_eight
from open_seq2seq.data.text2text.t2t import _read_and_batch_from_files
from open_seq2seq.data.text2text.tokenizer import PAD_ID
class SpecialTextTokens(Enum):
PAD_ID = 0 # special padding token
EOS_ID = 1 # special end of sentence token
S_ID = 2 # special start of sentence token
UNK_ID = 3 # out-of-vocabulary tokens will map there
OUT_OF_BUCKET = 1234567890
END_OF_CHOICE = -100
@staticmethod
def to_string(s_token):
if s_token == SpecialTextTokens.UNK_ID.value:
return '<UNK>'
elif s_token == SpecialTextTokens.S_ID.value:
return '<S>'
elif s_token == SpecialTextTokens.EOS_ID.value:
return '</S>'
elif s_token == SpecialTextTokens.PAD_ID.value:
return '<PAD>'
else:
raise ValueError("Unknown Value in SpecialTokens")
class ParallelTextDataLayer(DataLayer):
@staticmethod
def get_required_params():
return dict(DataLayer.get_required_params(), **{
'source_file': str,
'src_vocab_file': str,
'tgt_vocab_file': str,
'max_length': int,
'shuffle': bool,
'repeat': bool,
})
@staticmethod
def get_optional_params():
return dict(DataLayer.get_optional_params(), **{
'use_targets': bool,
'delimiter': str,
'target_file': str,
'map_parallel_calls': int,
'prefetch_buffer_size': int,
'pad_lengths_to_eight': bool,
'pad_vocab_to_eight': bool,
'shuffle_buffer_size': int,
'special_tokens_already_in_vocab': bool,
'use_start_token': bool,
})
def __init__(self, params, model, num_workers=1, worker_id=0):
super(ParallelTextDataLayer, self).__init__(params, model,
num_workers, worker_id)
self._batch_size = self.params['batch_size']
self.source_file = self.params['source_file']
self._use_targets = self.params.get('use_targets', True)
if not self._use_targets:
self.target_file = self.source_file
if 'target_file' in self.params:
print("WARNING: target file was specified but was "
"ignored by data layer because 'use_targets'=False")
else:
self.target_file = self.params['target_file']
self.src_vocab_file = self.params['src_vocab_file']
self.tgt_vocab_file = self.params['tgt_vocab_file']
self.max_len = self.params['max_length']
self._delimiter = self.params.get('delimiter', ' ')
self._map_parallel_calls = self.params.get('map_parallel_calls', 8)
self._pad_lengths_to_eight = self.params.get('pad_lengths_to_eight', False)
self._prefetch_buffer_size = self.params.get('prefetch_buffer_size',
tf.contrib.data.AUTOTUNE)
self._shuffle_buffer_size = self.params.get('shuffle_buffer_size', -1)
self._num_workers = num_workers
self._worker_id = worker_id
self._use_start_token = self.params.get('use_start_token', True)
if self._pad_lengths_to_eight and not (self.params['max_length'] % 8 == 0):
raise ValueError("If padding to 8 in data layer, then "
"max_length should be multiple of 8")
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
self.dataset_size = file_len(self.source_file)
special_tokens_already_in_vocab = self.params.get('special_tokens_already_in_vocab', True)
# load source and target vocabularies to RAM
self.src_seq2idx = load_pre_existing_vocabulary(
self.src_vocab_file, min_idx=0 if special_tokens_already_in_vocab
else SpecialTextTokens.UNK_ID.value + 1)
self.tgt_seq2idx = load_pre_existing_vocabulary(
self.tgt_vocab_file, min_idx=0 if special_tokens_already_in_vocab
else SpecialTextTokens.UNK_ID.value + 1)
if not special_tokens_already_in_vocab:
# manually add special tokens
# unknown symbol
self.src_seq2idx[
SpecialTextTokens.to_string(SpecialTextTokens.UNK_ID.value)] = \
SpecialTextTokens.UNK_ID.value
self.tgt_seq2idx[
SpecialTextTokens.to_string(SpecialTextTokens.UNK_ID.value)] = \
SpecialTextTokens.UNK_ID.value
# sentence start
self.src_seq2idx[
SpecialTextTokens.to_string(SpecialTextTokens.S_ID.value)] = \
SpecialTextTokens.S_ID.value
self.tgt_seq2idx[
SpecialTextTokens.to_string(SpecialTextTokens.S_ID.value)] = \
SpecialTextTokens.S_ID.value
# sentence end
self.src_seq2idx[
SpecialTextTokens.to_string(SpecialTextTokens.EOS_ID.value)] = \
SpecialTextTokens.EOS_ID.value
self.tgt_seq2idx[
SpecialTextTokens.to_string(SpecialTextTokens.EOS_ID.value)] = \
SpecialTextTokens.EOS_ID.value
# padding
self.src_seq2idx[
SpecialTextTokens.to_string(SpecialTextTokens.PAD_ID.value)] = \
SpecialTextTokens.PAD_ID.value
self.tgt_seq2idx[
SpecialTextTokens.to_string(SpecialTextTokens.PAD_ID.value)] = \
SpecialTextTokens.PAD_ID.value
if self.params.get('pad_vocab_to_eight', False):
self.src_seq2idx = pad_vocab_to_eight(self.src_seq2idx)
self.tgt_seq2idx = pad_vocab_to_eight(self.tgt_seq2idx)
self.src_idx2seq = {idx: w for w, idx in self.src_seq2idx.items()}
self.tgt_idx2seq = {idx: w for w, idx in self.tgt_seq2idx.items()}
self.params['src_vocab_size'] = len(self.src_seq2idx)
self.params['tgt_vocab_size'] = len(self.tgt_seq2idx)
self.params['target_seq2idx'] = self.tgt_seq2idx
self.params['source_seq2idx'] = self.src_seq2idx
self.params['target_idx2seq'] = self.tgt_idx2seq
self.params['source_idx2seq'] = self.src_idx2seq
self._input_tensors = {}
def _pad2eight(self, lst, do_pad_eight):
if len(lst) % 8 == 0 or not do_pad_eight:
return lst
else:
return lst + [SpecialTextTokens.PAD_ID.value] * (8 - len(lst) % 8)
def _src_token_to_id(self, line):
tokens = line.decode("utf-8").split(self._delimiter) #line.numpy().decode
if self._use_start_token:
return np.array(self._pad2eight([SpecialTextTokens.S_ID.value] + \
[self.src_seq2idx.get(token, SpecialTextTokens.UNK_ID.value) for token in tokens[:self.max_len-2]] + \
[SpecialTextTokens.EOS_ID.value], self._pad_lengths_to_eight), dtype="int32")
else:
return np.array(self._pad2eight([self.src_seq2idx.get(token, SpecialTextTokens.UNK_ID.value) for token in
tokens[:self.max_len - 2]] + \
[SpecialTextTokens.EOS_ID.value], self._pad_lengths_to_eight), dtype="int32")
def _tgt_token_to_id(self, line):
tokens = line.decode("utf-8").split(self._delimiter) #line.numpy().decode
if self._use_start_token:
return np.array(self._pad2eight([SpecialTextTokens.S_ID.value] + \
[self.tgt_seq2idx.get(token, SpecialTextTokens.UNK_ID.value) for token in tokens[:self.max_len-2]] + \
[SpecialTextTokens.EOS_ID.value], self._pad_lengths_to_eight), dtype="int32")
else:
return np.array(self._pad2eight([self.tgt_seq2idx.get(token, SpecialTextTokens.UNK_ID.value) for token in
tokens[:self.max_len - 2]] + \
[SpecialTextTokens.EOS_ID.value], self._pad_lengths_to_eight), dtype="int32")
def build_graph(self):
with tf.device('/cpu:0'):
_sources = tf.data.TextLineDataset(self.source_file)
_targets = tf.data.TextLineDataset(self.target_file)
if self._num_workers > 1:
#_src_tgt_dataset = _src_tgt_dataset\
# .shard(num_shards=self._num_workers, index=self._worker_id)
_sources = _sources.shard(num_shards=self._num_workers,
index=self._worker_id)
_targets = _targets.shard(num_shards=self._num_workers,
index=self._worker_id)
_sources = _sources.map(lambda line: tf.py_func(func=self._src_token_to_id, inp=[line],
Tout=[tf.int32], stateful=False),
num_parallel_calls=self._map_parallel_calls) \
.map(lambda tokens: (tokens, tf.size(tokens)),
num_parallel_calls=self._map_parallel_calls)
_targets = _targets.map(lambda line: tf.py_func(func=self._tgt_token_to_id, inp=[line],
Tout=[tf.int32], stateful=False),
num_parallel_calls=self._map_parallel_calls) \
.map(lambda tokens: (tokens, tf.size(tokens)),
num_parallel_calls=self._map_parallel_calls)
_src_tgt_dataset = tf.data.Dataset.zip((_sources, _targets)).filter(
lambda t1, t2: tf.logical_and(tf.less_equal(t1[1], self.max_len),
tf.less_equal(t2[1], self.max_len))
).cache()
if self.params['shuffle']:
bf_size = self.get_size_in_samples() if self._shuffle_buffer_size == -1 \
else self._shuffle_buffer_size
_src_tgt_dataset = _src_tgt_dataset.shuffle(buffer_size=bf_size)
else:
_src_tgt_dataset = _src_tgt_dataset
if self.params['repeat']:
_src_tgt_dataset = _src_tgt_dataset.repeat()
self.batched_dataset = _src_tgt_dataset.padded_batch(
self._batch_size,
padded_shapes=((tf.TensorShape([None]),
tf.TensorShape([])),
(tf.TensorShape([None]),
tf.TensorShape([]))),
padding_values=(
(SpecialTextTokens.PAD_ID.value,
0),
(SpecialTextTokens.PAD_ID.value,
0))).prefetch(buffer_size=self._prefetch_buffer_size)
self._iterator = self.batched_dataset.make_initializable_iterator()
if self.params['mode'] == 'train' or self.params['mode'] == 'eval':
t1, t2 = self.iterator.get_next()
x, x_length = t1[0], t1[1]
y, y_length = t2[0], t2[1]
self._input_tensors['source_tensors'] = [x, x_length]
self._input_tensors['target_tensors'] = [y, y_length]
else:
t1, _ = self.iterator.get_next()
self._input_tensors['source_tensors'] = [t1[0], t1[1]]
def create_interactive_placeholders(self):
self._text = tf.placeholder(dtype=tf.int32, shape=[self._batch_size, None])
self._text_length = tf.placeholder(dtype=tf.int32, shape=[self._batch_size])
self._input_tensors = {}
self._input_tensors['source_tensors'] = [self._text, self._text_length]
def create_feed_dict(self, model_in):
""" Creates the feed dict for interactive infer
Args:
model_in (str): the string to be translated. Should be in bpe format.
Returns:
feed_dict (dict): Dictionary with values for the placeholders.
"""
text = []
text_length = []
for line in model_in:
line = self._src_token_to_id(line)
text.append(line)
text_length.append(line.shape[0])
max_len = np.max(text_length)
for i,line in enumerate(text):
line = np.pad(
line, ((0, max_len-len(line))),
"constant", constant_values=SpecialTextTokens.PAD_ID.value
)
text[i] = line
text = np.reshape(text, [self._batch_size, -1])
text_length = np.reshape(text_length, [self._batch_size])
feed_dict = {
self._text: text,
self._text_length: text_length
}
return feed_dict
def get_size_in_samples(self):
return self.dataset_size
@property
def iterator(self):
return self._iterator
@property
def input_tensors(self):
return self._input_tensors
class TransformerDataLayer(DataLayer):
"""Wraps Transformers data pipeline into the form for OpenSeq2Seq"""
@staticmethod
def get_required_params():
return dict(DataLayer.get_required_params(), **{
'data_dir': str,
'file_pattern': str,
'src_vocab_file': str,
'batch_size': int,
'max_length': int,
'shuffle': bool,
"delimiter": str,
})
@staticmethod
def get_optional_params():
return dict(DataLayer.get_optional_params(), **{
'repeat': int,
'num_cpu_cores': int,
'tgt_vocab_file': str,
'pad_data_to_eight': bool,
'batch_in_tokens': bool,
})
def __init__(self, params, model, num_workers=1, worker_id=0):
super(TransformerDataLayer, self).__init__(params, model,
num_workers, worker_id)
self.src_vocab_file = self.params['src_vocab_file']
# if tgt vocab isn't specified - assume common vocab file
self.tgt_vocab_file = self.params.get('tgt_vocab_file', self.src_vocab_file)
# load source and target vocabularies to RAM
# pre-processed vocab starts from PAD, EOS
self.src_seq2idx = load_pre_existing_vocabulary(
self.src_vocab_file,
min_idx=PAD_ID)
self.tgt_seq2idx = load_pre_existing_vocabulary(
self.tgt_vocab_file,
min_idx=PAD_ID)
self.src_idx2seq = {idx: w for w, idx in self.src_seq2idx.items()}
self.tgt_idx2seq = {idx: w for w, idx in self.tgt_seq2idx.items()}
self.params['src_vocab_size'] = len(self.src_seq2idx)
self.params['tgt_vocab_size'] = len(self.tgt_seq2idx)
self.params['target_seq2idx'] = self.tgt_seq2idx
self.params['source_seq2idx'] = self.src_seq2idx
self.params['target_idx2seq'] = self.tgt_idx2seq
self.params['source_idx2seq'] = self.src_idx2seq
self._num_workers = num_workers
self._worker_id = worker_id
self._input_tensors = {}
self._iterator = None
self.batched_dataset = None
def build_graph(self):
file_pattern = os.path.join(self.params['data_dir'],
self.params['file_pattern'])
self.batched_dataset = _read_and_batch_from_files(
file_pattern=file_pattern,
batch_size=self.params['batch_size'],
max_length=self.params['max_length'],
num_cpu_cores=self.params.get('num_cpu_cores', 2),
shuffle=self.params['shuffle'],
repeat=self.params['repeat'],
num_workers=self._num_workers,
worker_id=self._worker_id,
batch_in_tokens=self.params.get('batch_in_tokens', True),
pad2eight=self.params.get('pad_data_to_eight', False))
self._iterator = self.batched_dataset.make_initializable_iterator()
x, y = self.iterator.get_next()
len_x = tf.count_nonzero(x, axis=1, dtype=tf.int32)
len_y = tf.count_nonzero(y, axis=1, dtype=tf.int32)
if self.params['mode'] == 'train' or self.params['mode'] == 'eval':
self._input_tensors['source_tensors'] = [x, len_x]
self._input_tensors['target_tensors'] = [y, len_y]
else:
self._input_tensors['source_tensors'] = [x, len_x]
@property
def iterator(self):
return self._iterator
@property
def input_tensors(self):
return self._input_tensors
| OpenSeq2Seq-master | open_seq2seq/data/text2text/text2text.py |
# Copyright (c) 2017 NVIDIA Corporation
"""
This file takes output of the inference stage produced using
TransformerDataLayer and converts it to simple tokenized text
"""
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import argparse
import tokenizer
def main():
with open(FLAGS.input_file, 'r') as in_file:
def trim(token):
return token[1:-1]
print("******Reading from file: {}".format(FLAGS.input_file))
with open(FLAGS.output_file, 'w') as out_file:
print("******Writing to file: {}".format(FLAGS.output_file))
for line in in_file:
# merge and split by _
escaped_tokens = "".join([trim(t) for t in line.strip().split(" ")])
escaped_tokens = escaped_tokens.split("_")
# unescape
unescaped_tokens = []
for token in escaped_tokens:
if token:
unescaped_tokens.append(tokenizer.unescape_token(token))
# join and write
out_file.write(tokenizer.join_tokens_to_string(unescaped_tokens)+'\n')
print("******All done!")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_file", "-if", type=str, default="",
help="output of the inference stage produced using model with "
"TransformerDataLayer",
metavar="<IF>")
parser.add_argument(
"--output_file", "-of", type=str, default="tokenized_output.txt",
help="where to save output",
metavar="<OF>")
FLAGS, _ = parser.parse_known_args()
main()
| OpenSeq2Seq-master | open_seq2seq/data/text2text/parse_output.py |
OpenSeq2Seq-master | open_seq2seq/data/text2text/__init__.py |
|
# Copyright 2018 MLBenchmark Group. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines Subtokenizer class to encode and decode strings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import sys
import unicodedata
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
PAD = "<pad>"
PAD_ID = 0
EOS = "<EOS>"
EOS_ID = 1
RESERVED_TOKENS = [PAD, EOS]
# Set of characters that will be used in the function _escape_token() (see func
# docstring for more details).
# This set is added to the alphabet list to ensure that all escaped tokens can
# be encoded.
_ESCAPE_CHARS = set(u"\\_u;0123456789")
# Regex for the function _unescape_token(), the inverse of _escape_token().
# This is used to find "\u", "\\", and "\###;" substrings in the token.
_UNESCAPE_REGEX = re.compile(r"\\u|\\\\|\\([0-9]+);")
_UNDEFINED_UNICODE = u"\u3013"
# Set contains all letter and number characters.
_ALPHANUMERIC_CHAR_SET = set(
six.unichr(i) for i in xrange(sys.maxunicode)
if (unicodedata.category(six.unichr(i)).startswith("L") or
unicodedata.category(six.unichr(i)).startswith("N")))
# min_count is the minimum number of times a subtoken must appear in the data
# before before it is added to the vocabulary. The value is found using binary
# search to obtain the target vocabulary size.
_MIN_MIN_COUNT = 1 # min value to use when binary searching for min_count
_MAX_MIN_COUNT = 1000 # max value to use when binary searching for min_count
class Subtokenizer(object):
"""Encodes and decodes strings to/from integer IDs."""
def __init__(self, vocab_file, reserved_tokens=None):
"""Initializes class, creating a vocab file if data_files is provided."""
tf.logging.info("Initializing Subtokenizer from file %s." % vocab_file)
if reserved_tokens is None:
reserved_tokens = RESERVED_TOKENS
self.subtoken_list = _load_vocab_file(vocab_file, reserved_tokens)
self.alphabet = _generate_alphabet_dict(self.subtoken_list)
self.subtoken_to_id_dict = _list_to_index_dict(self.subtoken_list)
self.max_subtoken_length = 0
for subtoken in self.subtoken_list:
self.max_subtoken_length = max(self.max_subtoken_length, len(subtoken))
# Create cache to speed up subtokenization
self._cache_size = 2 ** 20
self._cache = [(None, None)] * self._cache_size
@staticmethod
def init_from_files(
vocab_file, files, target_vocab_size, threshold, min_count=None,
file_byte_limit=1e6, reserved_tokens=None):
"""Create subtoken vocabulary based on files, and save vocab to file.
Args:
vocab_file: String name of vocab file to store subtoken vocabulary.
files: List of file paths that will be used to generate vocabulary.
target_vocab_size: target vocabulary size to generate.
threshold: int threshold of vocabulary size to accept.
min_count: int minimum count to use for generating the vocabulary. The min
count is the minimum number of times a subtoken should appear in the
files before it is added to the vocabulary. If set to none, this value
is found using binary search.
file_byte_limit: (Default 1e6) Maximum number of bytes of sample text that
will be drawn from the files.
reserved_tokens: List of string tokens that are guaranteed to be at the
beginning of the subtoken vocabulary list.
Returns:
Subtokenizer object
"""
if reserved_tokens is None:
reserved_tokens = RESERVED_TOKENS
if tf.gfile.Exists(vocab_file):
tf.logging.info("Vocab file already exists (%s)" % vocab_file)
else:
tf.logging.info("Begin steps to create subtoken vocabulary...")
token_counts = _count_tokens(files, file_byte_limit)
alphabet = _generate_alphabet_dict(token_counts)
subtoken_list = _generate_subtokens_with_target_vocab_size(
token_counts, alphabet, target_vocab_size, threshold, min_count,
reserved_tokens)
tf.logging.info("Generated vocabulary with %d subtokens." %
len(subtoken_list))
_save_vocab_file(vocab_file, subtoken_list)
return Subtokenizer(vocab_file)
def encode(self, raw_string, add_eos=False):
"""Encodes a string into a list of int subtoken ids."""
ret = []
tokens = _split_string_to_tokens(_native_to_unicode(raw_string))
for token in tokens:
ret.extend(self._token_to_subtoken_ids(token))
if add_eos:
ret.append(EOS_ID)
return ret
def _token_to_subtoken_ids(self, token):
"""Encode a single token into a list of subtoken ids."""
cache_location = hash(token) % self._cache_size
cache_key, cache_value = self._cache[cache_location]
if cache_key == token:
return cache_value
ret = _split_token_to_subtokens(
_escape_token(token, self.alphabet), self.subtoken_to_id_dict,
self.max_subtoken_length)
ret = [self.subtoken_to_id_dict[subtoken_id] for subtoken_id in ret]
self._cache[cache_location] = (token, ret)
return ret
def decode(self, subtokens):
"""Converts list of int subtokens ids into a string."""
if isinstance(subtokens, np.ndarray):
# Note that list(subtokens) converts subtokens to a python list, but the
# items remain as np.int32. This converts both the array and its items.
subtokens = subtokens.tolist()
if not subtokens:
return ""
assert isinstance(subtokens, list) and isinstance(subtokens[0], int), (
"Subtokens argument passed into decode() must be a list of integers.")
return _unicode_to_native(
join_tokens_to_string(self._subtoken_ids_to_tokens(subtokens)))
def _subtoken_ids_to_tokens(self, subtokens):
"""Convert list of int subtoken ids to a list of string tokens."""
escaped_tokens = "".join([
self.subtoken_list[s] for s in subtokens
if s < len(self.subtoken_list)])
escaped_tokens = escaped_tokens.split("_")
# All tokens in the vocabulary list have been escaped (see _escape_token())
# so each token must be unescaped when decoding.
ret = []
for token in escaped_tokens:
if token:
ret.append(unescape_token(token))
return ret
def _save_vocab_file(vocab_file, subtoken_list):
"""Save subtokens to file."""
with tf.gfile.Open(vocab_file, mode="w") as f:
for subtoken in subtoken_list:
f.write("'%s'\n" % _unicode_to_native(subtoken))
def _load_vocab_file(vocab_file, reserved_tokens=None):
"""Load vocabulary while ensuring reserved tokens are at the top."""
if reserved_tokens is None:
reserved_tokens = RESERVED_TOKENS
subtoken_list = []
with tf.gfile.Open(vocab_file, mode="r") as f:
for line in f:
subtoken = _native_to_unicode(line.strip())
subtoken = subtoken[1:-1] # Remove surrounding single-quotes
if subtoken in reserved_tokens:
continue
subtoken_list.append(_native_to_unicode(subtoken))
return reserved_tokens + subtoken_list
def _native_to_unicode(s):
"""Convert string to unicode (required in Python 2)."""
if six.PY2:
return s if isinstance(s, unicode) else s.decode("utf-8")
else:
return s
def _unicode_to_native(s):
"""Convert string from unicode to native format (required in Python 2)."""
if six.PY2:
return s.encode("utf-8") if isinstance(s, unicode) else s
else:
return s
def _split_string_to_tokens(text):
"""Splits text to a list of string tokens."""
if not text:
return []
ret = []
token_start = 0
# Classify each character in the input string
is_alnum = [c in _ALPHANUMERIC_CHAR_SET for c in text]
for pos in xrange(1, len(text)):
if is_alnum[pos] != is_alnum[pos - 1]:
token = text[token_start:pos]
if token != u" " or token_start == 0:
ret.append(token)
token_start = pos
final_token = text[token_start:]
ret.append(final_token)
return ret
def join_tokens_to_string(tokens):
"""Join a list of string tokens into a single string."""
token_is_alnum = [t[0] in _ALPHANUMERIC_CHAR_SET for t in tokens]
ret = []
for i, token in enumerate(tokens):
if i > 0 and token_is_alnum[i - 1] and token_is_alnum[i]:
ret.append(u" ")
ret.append(token)
return "".join(ret)
def _escape_token(token, alphabet):
r"""Replace characters that aren't in the alphabet and append "_" to token.
Apply three transformations to the token:
1. Replace underline character "_" with "\u", and backslash "\" with "\\".
2. Replace characters outside of the alphabet with "\###;", where ### is the
character's Unicode code point.
3. Appends "_" to mark the end of a token.
Args:
token: unicode string to be escaped
alphabet: list of all known characters
Returns:
escaped string
"""
token = token.replace(u"\\", u"\\\\").replace(u"_", u"\\u")
ret = [c if c in alphabet and c != u"\n" else r"\%d;" % ord(c) for c in token]
return u"".join(ret) + "_"
def unescape_token(token):
r"""Replaces escaped characters in the token with their unescaped versions.
Applies inverse transformations as _escape_token():
1. Replace "\u" with "_", and "\\" with "\".
2. Replace "\###;" with the unicode character the ### refers to.
Args:
token: escaped string
Returns:
unescaped string
"""
def match(m):
r"""Returns replacement string for matched object.
Matched objects contain one of the strings that matches the regex pattern:
r"\\u|\\\\|\\([0-9]+);"
The strings can be '\u', '\\', or '\###;' (### is any digit number).
m.group(0) refers to the entire matched string ('\u', '\\', or '\###;').
m.group(1) refers to the first parenthesized subgroup ('###').
m.group(0) exists for all match objects, while m.group(1) exists only for
the string '\###;'.
This function looks to see if m.group(1) exists. If it doesn't, then the
matched string must be '\u' or '\\' . In this case, the corresponding
replacement ('_' and '\') are returned. Note that in python, a single
backslash is written as '\\', and double backslash as '\\\\'.
If m.goup(1) exists, then use the integer in m.group(1) to return a
unicode character.
Args:
m: match object
Returns:
String to replace matched object with.
"""
# Check if the matched strings are '\u' or '\\'.
if m.group(1) is None:
return u"_" if m.group(0) == u"\\u" else u"\\"
# If m.group(1) exists, try and return unicode character.
try:
return six.unichr(int(m.group(1)))
except (ValueError, OverflowError) as _:
return _UNDEFINED_UNICODE
# Use match function to replace escaped substrings in the token.
return _UNESCAPE_REGEX.sub(match, token)
def _count_tokens(files, file_byte_limit=1e6):
"""Return token counts of words in the files.
Samples file_byte_limit bytes from each file, and counts the words that appear
in the samples. The samples are semi-evenly distributed across the file.
Args:
files: List of filepaths
file_byte_limit: Max number of bytes that will be read from each file.
Returns:
Dictionary mapping tokens to the number of times they appear in the sampled
lines from the files.
"""
token_counts = collections.defaultdict(int)
for filepath in files:
with tf.gfile.Open(filepath, mode="r") as reader:
file_byte_budget = file_byte_limit
counter = 0
lines_to_skip = int(reader.size() / (file_byte_budget * 2))
for line in reader:
if counter < lines_to_skip:
counter += 1
else:
if file_byte_budget < 0:
break
line = line.strip()
file_byte_budget -= len(line)
counter = 0
# Add words to token counts
for token in _split_string_to_tokens(_native_to_unicode(line)):
token_counts[token] += 1
return token_counts
def _list_to_index_dict(lst):
"""Create dictionary mapping list items to their indices in the list."""
return {item: n for n, item in enumerate(lst)}
def _split_token_to_subtokens(token, subtoken_dict, max_subtoken_length):
"""Splits a token into subtokens defined in the subtoken dict."""
ret = []
start = 0
token_len = len(token)
while start < token_len:
# Find the longest subtoken, so iterate backwards.
for end in xrange(min(token_len, start + max_subtoken_length), start, -1):
subtoken = token[start:end]
if subtoken in subtoken_dict:
ret.append(subtoken)
start = end
break
else: # Did not break
# If there is no possible encoding of the escaped token then one of the
# characters in the token is not in the alphabet. This should be
# impossible and would be indicative of a bug.
raise ValueError("Was unable to split token \"%s\" into subtokens." %
token)
return ret
def _generate_subtokens_with_target_vocab_size(
token_counts, alphabet, target_size, threshold, min_count=None,
reserved_tokens=None):
"""Generate subtoken vocabulary close to the target size."""
if reserved_tokens is None:
reserved_tokens = RESERVED_TOKENS
if min_count is not None:
tf.logging.info("Using min_count=%d to generate vocab with target size %d" %
(min_count, target_size))
return _generate_subtokens(
token_counts, alphabet, min_count, reserved_tokens=reserved_tokens)
def bisect(min_val, max_val):
"""Recursive function to binary search for subtoken vocabulary."""
cur_count = (min_val + max_val) // 2
tf.logging.info("Binary search: trying min_count=%d (%d %d)" %
(cur_count, min_val, max_val))
subtoken_list = _generate_subtokens(
token_counts, alphabet, cur_count, reserved_tokens=reserved_tokens)
val = len(subtoken_list)
tf.logging.info("Binary search: min_count=%d resulted in %d tokens" %
(cur_count, val))
within_threshold = abs(val - target_size) < threshold
if within_threshold or min_val >= max_val or cur_count < 2:
return subtoken_list
if val > target_size:
other_subtoken_list = bisect(cur_count + 1, max_val)
else:
other_subtoken_list = bisect(min_val, cur_count - 1)
# Return vocabulary dictionary with the closest number of tokens.
other_val = len(other_subtoken_list)
if abs(other_val - target_size) < abs(val - target_size):
return other_subtoken_list
return subtoken_list
tf.logging.info("Finding best min_count to get target size of %d" %
target_size)
return bisect(_MIN_MIN_COUNT, _MAX_MIN_COUNT)
def _generate_alphabet_dict(iterable, reserved_tokens=None):
"""Create set of characters that appear in any element in the iterable."""
if reserved_tokens is None:
reserved_tokens = RESERVED_TOKENS
alphabet = {c for token in iterable for c in token}
alphabet |= {c for token in reserved_tokens for c in token}
alphabet |= _ESCAPE_CHARS # Add escape characters to alphabet set.
return alphabet
def _count_and_gen_subtokens(
token_counts, alphabet, subtoken_dict, max_subtoken_length):
"""Count number of times subtokens appear, and generate new subtokens.
Args:
token_counts: dict mapping tokens to the number of times they appear in the
original files.
alphabet: list of allowed characters. Used to escape the tokens, which
guarantees that all tokens can be split into subtokens.
subtoken_dict: dict mapping subtokens to ids.
max_subtoken_length: maximum length of subtoken in subtoken_dict.
Returns:
A defaultdict mapping subtokens to the number of times they appear in the
tokens. The dict may contain new subtokens.
"""
subtoken_counts = collections.defaultdict(int)
for token, count in six.iteritems(token_counts):
token = _escape_token(token, alphabet)
subtokens = _split_token_to_subtokens(
token, subtoken_dict, max_subtoken_length)
# Generate new subtokens by taking substrings from token.
start = 0
for subtoken in subtokens:
for end in xrange(start + 1, len(token) + 1):
new_subtoken = token[start:end]
subtoken_counts[new_subtoken] += count
start += len(subtoken)
return subtoken_counts
def _filter_and_bucket_subtokens(subtoken_counts, min_count):
"""Return a bucketed list of subtokens that are filtered by count.
Args:
subtoken_counts: defaultdict mapping subtokens to their counts
min_count: int count used to filter subtokens
Returns:
List of subtoken sets, where subtokens in set i have the same length=i.
"""
# Create list of buckets, where subtokens in bucket i have length i.
subtoken_buckets = []
for subtoken, count in six.iteritems(subtoken_counts):
if count < min_count: # Filter out subtokens that don't appear enough
continue
while len(subtoken_buckets) <= len(subtoken):
subtoken_buckets.append(set())
subtoken_buckets[len(subtoken)].add(subtoken)
return subtoken_buckets
def _gen_new_subtoken_list(
subtoken_counts, min_count, alphabet, reserved_tokens=None):
"""Generate candidate subtokens ordered by count, and new max subtoken length.
Add subtokens to the candiate list in order of length (longest subtokens
first). When a subtoken is added, the counts of each of its prefixes are
decreased. Prefixes that don't appear much outside the subtoken are not added
to the candidate list.
For example:
subtoken being added to candidate list: 'translate'
subtoken_counts: {'translate':10, 't':40, 'tr':16, 'tra':12, ...}
min_count: 5
When 'translate' is added, subtoken_counts is updated to:
{'translate':0, 't':30, 'tr':6, 'tra': 2, ...}
The subtoken 'tra' will not be added to the candidate list, because it appears
twice (less than min_count) outside of 'translate'.
Args:
subtoken_counts: defaultdict mapping str subtokens to int counts
min_count: int minumum count requirement for subtokens
alphabet: set of characters. Each character is added to the subtoken list to
guarantee that all tokens can be encoded.
reserved_tokens: list of tokens that will be added to the beginning of the
returned subtoken list.
Returns:
List of candidate subtokens in decreasing count order, and maximum subtoken
length
"""
if reserved_tokens is None:
reserved_tokens = RESERVED_TOKENS
# Create a list of (count, subtoken) for each candidate subtoken.
subtoken_candidates = []
# Use bucketted list to iterate through subtokens in order of length.
# subtoken_buckets[i] = set(subtokens), where each subtoken has length i.
subtoken_buckets = _filter_and_bucket_subtokens(subtoken_counts, min_count)
max_subtoken_length = len(subtoken_buckets) - 1
# Go through the list in reverse order to consider longer subtokens first.
for subtoken_len in xrange(max_subtoken_length, 0, -1):
for subtoken in subtoken_buckets[subtoken_len]:
count = subtoken_counts[subtoken]
# Possible if this subtoken is a prefix of another token.
if count < min_count:
continue
# Ignore alphabet/reserved tokens, which will be added manually later.
if subtoken not in alphabet and subtoken not in reserved_tokens:
subtoken_candidates.append((count, subtoken))
# Decrement count of the subtoken's prefixes (if a longer subtoken is
# added, its prefixes lose priority to be added).
for end in xrange(1, subtoken_len):
subtoken_counts[subtoken[:end]] -= count
# Add alphabet subtokens (guarantees that all strings are encodable).
subtoken_candidates.extend((subtoken_counts.get(a, 0), a) for a in alphabet)
# Order subtoken candidates by decreasing count.
subtoken_list = [t for _, t in sorted(subtoken_candidates, reverse=True)]
# Add reserved tokens to beginning of the list.
subtoken_list = reserved_tokens + subtoken_list
return subtoken_list, max_subtoken_length
def _generate_subtokens(
token_counts, alphabet, min_count, num_iterations=4,
reserved_tokens=None):
"""Create a list of subtokens in decreasing order of frequency.
Args:
token_counts: dict mapping str tokens -> int count
alphabet: set of characters
min_count: int minimum number of times a subtoken must appear before it is
added to the vocabulary.
num_iterations: int number of iterations to generate new tokens.
reserved_tokens: list of tokens that will be added to the beginning to the
returned subtoken list.
Returns:
Sorted list of subtokens (most frequent first)
"""
if reserved_tokens is None:
reserved_tokens = RESERVED_TOKENS
# Use alphabet set to create initial list of subtokens
subtoken_list = reserved_tokens + list(alphabet)
max_subtoken_length = 1
# On each iteration, segment all words using the subtokens defined in
# subtoken_dict, count how often the resulting subtokens appear, and update
# the dictionary with subtokens w/ high enough counts.
for i in xrange(num_iterations):
tf.logging.info("\tGenerating subtokens: iteration %d" % i)
# Generate new subtoken->id dictionary using the new subtoken list.
subtoken_dict = _list_to_index_dict(subtoken_list)
# Create dict mapping subtoken->count, with additional subtokens created
# from substrings taken from the tokens.
subtoken_counts = _count_and_gen_subtokens(
token_counts, alphabet, subtoken_dict, max_subtoken_length)
# Generate new list of subtokens sorted by subtoken count.
subtoken_list, max_subtoken_length = _gen_new_subtoken_list(
subtoken_counts, min_count, alphabet, reserved_tokens)
tf.logging.info("\tVocab size: %d" % len(subtoken_list))
return subtoken_list
| OpenSeq2Seq-master | open_seq2seq/data/text2text/tokenizer.py |
# Copyright (c) 2017 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import tensorflow as tf
from open_seq2seq.data.text2text.text2text import ParallelTextDataLayer
from open_seq2seq.test_utils.create_reversed_examples import create_data, \
remove_data
class ParallelTextDataLayerTests(tf.test.TestCase):
def setUp(self):
create_data(train_corpus_size=1000, data_path="tmp1")
batch_size = 2
self.params = {
'src_vocab_file': "tmp1/vocab/source.txt",
'tgt_vocab_file': "tmp1/vocab/target.txt",
'source_file': "tmp1/train/source.txt",
'target_file': "tmp1/train/target.txt",
'shuffle': True,
'batch_size': batch_size,
'max_length': 56,
'repeat': False,
'delimiter': ' ',
'map_parallel_calls': 1,
'prefetch_buffer_size': 1,
'mode': 'train',
}
def tearDown(self):
remove_data(data_path='tmp1')
def test_init_test4(self):
dl = ParallelTextDataLayer(params=self.params, model=None)
dl.build_graph()
print(len(dl.src_seq2idx))
print(len(dl.tgt_seq2idx))
with self.test_session(use_gpu=True) as sess:
sess.run(dl.iterator.initializer)
et = sess.run(dl.input_tensors)
self.assertIn('source_tensors', et)
self.assertIn('target_tensors', et)
self.assertEqual(et['source_tensors'][0].shape[0],
self.params['batch_size'])
self.assertLessEqual(et['source_tensors'][0].shape[1],
self.params['max_length'])
self.assertEqual(et['source_tensors'][1].shape[0],
self.params['batch_size'])
self.assertEqual(et['target_tensors'][0].shape[0],
self.params['batch_size'])
self.assertLessEqual(et['target_tensors'][0].shape[1],
self.params['max_length'])
self.assertEqual(et['target_tensors'][1].shape[0],
self.params['batch_size'])
def test_init_test2(self):
self.params['mode'] = "infer" # in this case we do not yield targets
self.params['shuffle'] = False # in this case we do not yield targets
dl = ParallelTextDataLayer(params=self.params, model=None)
dl.build_graph()
print(len(dl.src_seq2idx))
print(len(dl.tgt_seq2idx))
with self.test_session(use_gpu=True) as sess:
sess.run(dl.iterator.initializer)
et = sess.run(dl.input_tensors)
self.assertIn('source_tensors', et)
self.assertEqual(et['source_tensors'][0].shape[0],
self.params['batch_size'])
self.assertLessEqual(et['source_tensors'][0].shape[1],
self.params['max_length'])
self.assertEqual(et['source_tensors'][1].shape[0],
self.params['batch_size'])
def test_pad8(self):
self.params['shuffle'] = False # in this case we do not yield targets
self.params['pad_lengths_to_eight'] = True
dl = ParallelTextDataLayer(params=self.params, model=None)
dl.build_graph()
print(len(dl.src_seq2idx))
print(len(dl.tgt_seq2idx))
print(dl.src_seq2idx)
print(dl.src_idx2seq)
for i in range(len(dl.src_seq2idx)):
self.assertIn(i, dl.src_idx2seq)
with self.test_session(use_gpu=True) as sess:
sess.run(dl.iterator.initializer)
et = sess.run(dl.input_tensors)
self.assertIn('source_tensors', et)
self.assertIn('target_tensors', et)
self.assertEqual(et['source_tensors'][0].shape[0],
self.params['batch_size'])
self.assertTrue(et['source_tensors'][0].shape[1] % 8 == 0)
self.assertEqual(et['source_tensors'][1].shape[0],
self.params['batch_size'])
self.assertEqual(et['target_tensors'][0].shape[0],
self.params['batch_size'])
self.assertTrue(et['target_tensors'][0].shape[1] % 8 == 0)
self.assertEqual(et['target_tensors'][1].shape[0],
self.params['batch_size'])
if __name__ == '__main__':
tf.test.main()
| OpenSeq2Seq-master | open_seq2seq/data/text2text/text2text_test.py |
# Copyright 2018 MLBenchmark Group. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Input pipeline for the transformer model to read, filter, and batch examples.
Two things to note in the pipeline:
1. Batching scheme
The examples encoded in the TFRecord files contain data in the format:
{"inputs": [variable length array of integers],
"targets": [variable length array of integers]}
Where integers in the arrays refer to tokens in the English and German vocab
file (named `vocab.ende.32768`).
Prior to batching, elements in the dataset are grouped by length (max between
"inputs" and "targets" length). Each group is then batched such that:
group_batch_size * length <= batch_size.
Another way to view batch_size is the maximum number of tokens in each batch.
Once batched, each element in the dataset will have the shape:
{"inputs": [group_batch_size, padded_input_length],
"targets": [group_batch_size, padded_target_length]}
Lengths are padded to the longest "inputs" or "targets" sequence in the batch
(padded_input_length and padded_target_length can be different).
This batching scheme decreases the fraction of padding tokens per training
batch, thus improving the training speed significantly.
2. Shuffling
While training, the dataset is shuffled in two places in the code. The first
is the list of training files. Second, while reading records using
`parallel_interleave`, the `sloppy` argument is used to generate randomness
in the order of the examples.
3. Modified slightly to fit OpenSeq2Seq needs
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
# Use the number of training files as the shuffle buffer.
_FILE_SHUFFLE_BUFFER = 100
# Buffer size for reading records from a TFRecord file. Each training file is
# 7.2 MB, so 8 MB allows an entire file to be kept in memory.
_READ_RECORD_BUFFER = 8 * 1000 * 1000
# Example grouping constants. Defines length boundaries for each group.
# These values are the defaults used in Tensor2Tensor.
_MIN_BOUNDARY = 8
_BOUNDARY_SCALE = 1.1
def _load_records(filename):
"""Read file and return a dataset of tf.Examples."""
return tf.data.TFRecordDataset(filename, buffer_size=_READ_RECORD_BUFFER)
def _parse_example(serialized_example, pad_2_eight=False):
"""Return inputs and targets Tensors from a serialized tf.Example."""
data_fields = {
"inputs": tf.VarLenFeature(tf.int64),
"targets": tf.VarLenFeature(tf.int64)
}
parsed = tf.parse_single_example(serialized_example, data_fields)
inputs = tf.sparse_tensor_to_dense(parsed["inputs"])
targets = tf.sparse_tensor_to_dense(parsed["targets"])
if pad_2_eight:
inputs = tf.cond(
tf.equal(tf.shape(inputs)[0] % 8, 0),
true_fn=lambda: inputs,
false_fn=lambda: tf.pad(inputs,
paddings=[[0, 8 - tf.shape(inputs)[0] % 8]])
)
targets = tf.cond(
tf.equal(tf.shape(targets)[0] % 8, 0),
true_fn=lambda: targets,
false_fn=lambda: tf.pad(targets,
paddings=[[0, 8 - tf.shape(targets)[0] % 8]])
)
return inputs, targets
def _filter_max_length(example, max_length=256):
"""Indicates whether the example's length is lower than the maximum length."""
return tf.logical_and(tf.size(example[0]) <= max_length,
tf.size(example[1]) <= max_length)
def _get_example_length(example):
"""Returns the maximum length between the example inputs and targets."""
length = tf.maximum(tf.shape(example[0])[0], tf.shape(example[1])[0])
return length
def _create_min_max_boundaries(
max_length, min_boundary=_MIN_BOUNDARY, boundary_scale=_BOUNDARY_SCALE):
"""Create min and max boundary lists up to max_length.
For example, when max_length=24, min_boundary=4 and boundary_scale=2, the
returned values will be:
buckets_min = [0, 4, 8, 16, 24]
buckets_max = [4, 8, 16, 24, 25]
Args:
max_length: The maximum length of example in dataset.
min_boundary: Minimum length in boundary.
boundary_scale: Amount to scale consecutive boundaries in the list.
Returns:
min and max boundary lists
"""
# Create bucket boundaries list by scaling the previous boundary or adding 1
# (to ensure increasing boundary sizes).
bucket_boundaries = []
x = min_boundary
while x < max_length:
bucket_boundaries.append(x)
x = max(x + 1, int(x * boundary_scale))
# Create min and max boundary lists from the initial list.
buckets_min = [0] + bucket_boundaries
buckets_max = bucket_boundaries + [max_length + 1]
return buckets_min, buckets_max
def _batch_examples(dataset, batch_size, max_length, pad_2_eight=True):
"""Group examples by similar lengths, and return batched dataset.
Each batch of similar-length examples are padded to the same length, and may
have different number of elements in each batch, such that:
group_batch_size * padded_length <= batch_size.
This decreases the number of padding tokens per batch, which improves the
training speed.
Args:
dataset: Dataset of unbatched examples.
batch_size: Max number of tokens per batch of examples.
max_length: Max number of tokens in an example input or target sequence.
Returns:
Dataset of batched examples with similar lengths.
"""
# Get min and max boundary lists for each example. These are used to calculate
# the `bucket_id`, which is the index at which:
# buckets_min[bucket_id] <= len(example) < buckets_max[bucket_id]
# Note that using both min and max lists improves the performance.
buckets_min, buckets_max = _create_min_max_boundaries(max_length)
# Create list of batch sizes for each bucket_id, so that
# bucket_batch_size[bucket_id] * buckets_max[bucket_id] <= batch_size
if pad_2_eight: # pad to 8 for HMMA
bucket_batch_sizes = [
batch_size // x if batch_size // x % 8 == 0 else
batch_size // x + (8 - batch_size // x % 8)
for x in buckets_max
]
else:
bucket_batch_sizes = [batch_size // x for x in buckets_max]
# bucket_id will be a tensor, so convert this list to a tensor as well.
bucket_batch_sizes = tf.constant(bucket_batch_sizes, dtype=tf.int64)
def example_to_bucket_id(example_input, example_target):
"""Return int64 bucket id for this example, calculated based on length."""
seq_length = _get_example_length((example_input, example_target))
# TODO: investigate whether removing code branching improves performance.
conditions_c = tf.logical_and(
tf.less_equal(buckets_min, seq_length),
tf.less(seq_length, buckets_max))
bucket_id = tf.reduce_min(tf.where(conditions_c))
return bucket_id
def window_size_fn(bucket_id):
"""Return number of examples to be grouped when given a bucket id."""
return bucket_batch_sizes[bucket_id]
def batching_fn(bucket_id, grouped_dataset):
"""Batch and add padding to a dataset of elements with similar lengths."""
bucket_batch_size = window_size_fn(bucket_id)
# Batch the dataset and add padding so that all input sequences in the
# examples have the same length, and all target sequences have the same
# lengths as well. Resulting lengths of inputs and targets can differ.
return grouped_dataset.padded_batch(bucket_batch_size, ([None], [None]))
return dataset.apply(
tf.contrib.data.group_by_window( # pylint: disable=no-member
key_func=example_to_bucket_id,
reduce_func=batching_fn,
window_size=None,
window_size_func=window_size_fn
)
)
def _read_and_batch_from_files(
file_pattern, batch_size, max_length, num_cpu_cores, shuffle, repeat,
num_workers, worker_id, batch_in_tokens, pad2eight=True):
"""Create dataset where each item is a dict of "inputs" and "targets".
Args:
file_pattern: String used to match the input TFRecord files.
batch_size: Maximum number of tokens per batch of examples
max_length: Maximum number of tokens per example
num_cpu_cores: Number of cpu cores for parallel input processing.
shuffle: If true, randomizes order of elements.
repeat: Number of times to repeat the dataset. If None, the dataset is
repeated forever.
num_workers: Number of workers or number of Horovod workers
worker_id: Worker id or Horovod rank
batch_in_tokens: whether to batch_size means amounts in tokens or sentence
pairs. batching in tokens is more efficient as it reduces PADs. batching in
sentences should be used in inference mode since order of
sentences is important
pad2eight: if True, it will pad both dimensions to be divisible by 8
Returns:
tf.data.Dataset object containing examples loaded from the files.
"""
dataset = tf.data.Dataset.list_files(file_pattern)
if num_workers > 1:
dataset = dataset.shard(num_shards=num_workers, index=worker_id)
if shuffle:
# Shuffle filenames
dataset = dataset.shuffle(buffer_size=_FILE_SHUFFLE_BUFFER)
# Read files and interleave results. When training, the order of the examples
# will be non-deterministic.
dataset = dataset.apply(
tf.contrib.data.parallel_interleave( # pylint: disable=no-member
_load_records, sloppy=shuffle, cycle_length=num_cpu_cores))
# Parse each tf.Example into a dictionary
# TODO: Look into prefetch_input_elements for performance optimization.
dataset = dataset.map(lambda x: _parse_example(x, pad_2_eight=pad2eight),
num_parallel_calls=num_cpu_cores)
# Remove examples where the input or target length exceeds the maximum length,
dataset = dataset.filter(lambda x, y: _filter_max_length((x, y), max_length))
if batch_in_tokens:
# Batch such that each batch has examples of similar length.
dataset = _batch_examples(dataset, batch_size, max_length,
pad_2_eight=pad2eight)
else:
# Examples can have different lenghts
dataset = dataset.padded_batch(batch_size, ([None], [None]))
dataset = dataset.repeat(repeat)
# Prefetch the next element to improve speed of input pipeline.
dataset = dataset.prefetch(1)
return dataset
| OpenSeq2Seq-master | open_seq2seq/data/text2text/t2t.py |
# Copyright 2018 MLBenchmark Group. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Download and preprocess WMT17 ende training and evaluation datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import random
import sys
import tarfile
import urllib
import six
import tensorflow as tf
import urllib.request
import tokenizer
# Data sources for training/evaluating the transformer translation model.
# If any of the training sources are changed, then either:
# 1) use the flag `--search` to find the best min count or
# 2) update the _TRAIN_DATA_MIN_COUNT constant.
# min_count is the minimum number of times a token must appear in the data
# before it is added to the vocabulary. "Best min count" refers to the value
# that generates a vocabulary set that is closest in size to _TARGET_VOCAB_SIZE.
_TRAIN_DATA_SOURCES = [
{
"url": "http://data.statmt.org/wmt17/translation-task/"
"training-parallel-nc-v12.tgz",
"input": "news-commentary-v12.de-en.en",
"target": "news-commentary-v12.de-en.de",
},
{
"url": "http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz",
"input": "commoncrawl.de-en.en",
"target": "commoncrawl.de-en.de",
},
{
"url": "http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz",
"input": "europarl-v7.de-en.en",
"target": "europarl-v7.de-en.de",
},
]
# Use pre-defined minimum count to generate subtoken vocabulary.
_TRAIN_DATA_MIN_COUNT = 6
_EVAL_DATA_SOURCES = [
{
"url": "http://data.statmt.org/wmt17/translation-task/dev.tgz",
"input": "newstest2013.en",
"target": "newstest2013.de",
}
]
_TEST_DATA_SOURCES = [
{
"url": "https://nlp.stanford.edu/projects/nmt/data/wmt14.en-de/newstest2014.en",
"input": "newstest2014.en",
"target": "newstest2014.en",
}
]
# Vocabulary constants
_TARGET_VOCAB_SIZE = 32768 # Number of subtokens in the vocabulary list.
_TARGET_THRESHOLD = 327 # Accept vocabulary if size is within this threshold
_VOCAB_FILE = "vocab.ende.%d" % _TARGET_VOCAB_SIZE
# Strings to inclue in the generated files.
_PREFIX = "wmt32k"
_TRAIN_TAG = "train"
_EVAL_TAG = "dev" # Following WMT and Tensor2Tensor conventions, in which the
# evaluation datasets are tagged as "dev" for development.
_TEST_TAG = "test"
# Number of files to split train and evaluation data
_TRAIN_SHARDS = 100
_EVAL_SHARDS = 1
_TEST_SHARDS = 1
def find_file(path, filename, max_depth=5):
"""Returns full filepath if the file is in path or a subdirectory."""
for root, dirs, files in os.walk(path):
if filename in files:
return os.path.join(root, filename)
# Don't search past max_depth
depth = root[len(path) + 1:].count(os.sep)
if depth > max_depth:
del dirs[:] # Clear dirs
return None
###############################################################################
# Download and extraction functions
###############################################################################
def get_raw_files(raw_dir, data_source):
"""Return raw files from source. Downloads/extracts if needed.
Args:
raw_dir: string directory to store raw files
data_source: dictionary with
{"url": url of compressed dataset containing input and target files
"input": file with data in input language
"target": file with data in target language}
Returns:
dictionary with
{"inputs": list of files containing data in input language
"targets": list of files containing corresponding data in target language
}
"""
raw_files = {
"inputs": [],
"targets": [],
} # keys
for d in data_source:
input_file, target_file = download_and_extract(
raw_dir, d["url"], d["input"], d["target"])
raw_files["inputs"].append(input_file)
raw_files["targets"].append(target_file)
return raw_files
def download_report_hook(count, block_size, total_size):
"""Report hook for download progress.
Args:
count: current block number
block_size: block size
total_size: total size
"""
percent = int(count * block_size * 100 / total_size)
print("\r%d%%" % percent + " completed", end="\r")
def download_from_url(path, url):
"""Download content from a url.
Args:
path: string directory where file will be downloaded
url: string url
Returns:
Full path to downloaded file
"""
filename = url.split("/")[-1]
found_file = find_file(path, filename, max_depth=0)
if found_file is None:
filename = os.path.join(path, filename)
tf.logging.info("Downloading from %s to %s." % (url, filename))
inprogress_filepath = filename + ".incomplete"
inprogress_filepath, _ = urllib.request.urlretrieve(
url, inprogress_filepath, reporthook=download_report_hook)
# Print newline to clear the carriage return from the download progress.
print()
tf.gfile.Rename(inprogress_filepath, filename)
return filename
else:
tf.logging.info("Already downloaded: %s (at %s)." % (url, found_file))
return found_file
def download_and_extract(path, url, input_filename, target_filename):
"""Extract files from downloaded compressed archive file.
Args:
path: string directory where the files will be downloaded
url: url containing the compressed input and target files
input_filename: name of file containing data in source language
target_filename: name of file containing data in target language
Returns:
Full paths to extracted input and target files.
Raises:
OSError: if the the download/extraction fails.
"""
# Check if extracted files already exist in path
input_file = find_file(path, input_filename)
target_file = find_file(path, target_filename)
if input_file and target_file:
tf.logging.info("Already downloaded and extracted %s." % url)
return input_file, target_file
# Download archive file if it doesn't already exist.
compressed_file = download_from_url(path, url)
# Extract compressed files
tf.logging.info("Extracting %s." % compressed_file)
with tarfile.open(compressed_file, "r:gz") as corpus_tar:
corpus_tar.extractall(path)
# Return filepaths of the requested files.
input_file = find_file(path, input_filename)
target_file = find_file(path, target_filename)
if input_file and target_file:
return input_file, target_file
raise OSError("Download/extraction failed for url %s to path %s" %
(url, path))
def txt_line_iterator(path):
"""Iterate through lines of file."""
with tf.gfile.Open(path) as f:
for line in f:
yield line.strip()
def compile_files(raw_dir, raw_files, tag):
"""Compile raw files into a single file for each language.
Args:
raw_dir: Directory containing downloaded raw files.
raw_files: Dict containing filenames of input and target data.
{"inputs": list of files containing data in input language
"targets": list of files containing corresponding data in target language
}
tag: String to append to the compiled filename.
Returns:
Full path of compiled input and target files.
"""
tf.logging.info("Compiling files with tag %s." % tag)
filename = "%s-%s" % (_PREFIX, tag)
input_compiled_file = os.path.join(raw_dir, filename + ".lang1")
target_compiled_file = os.path.join(raw_dir, filename + ".lang2")
with tf.gfile.Open(input_compiled_file, mode="w") as input_writer:
with tf.gfile.Open(target_compiled_file, mode="w") as target_writer:
for i in range(len(raw_files["inputs"])):
input_file = raw_files["inputs"][i]
target_file = raw_files["targets"][i]
tf.logging.info("Reading files %s and %s." % (input_file, target_file))
write_file(input_writer, input_file)
write_file(target_writer, target_file)
return input_compiled_file, target_compiled_file
def write_file(writer, filename):
"""Write all of lines from file using the writer."""
for line in txt_line_iterator(filename):
writer.write(line)
writer.write("\n")
###############################################################################
# Data preprocessing
###############################################################################
def encode_and_save_files(
subtokenizer, data_dir, raw_files, tag, total_shards):
"""Save data from files as encoded Examples in TFrecord format.
Args:
subtokenizer: Subtokenizer object that will be used to encode the strings.
data_dir: The directory in which to write the examples
raw_files: A tuple of (input, target) data files. Each line in the input and
the corresponding line in target file will be saved in a tf.Example.
tag: String that will be added onto the file names.
total_shards: Number of files to divide the data into.
Returns:
List of all files produced.
"""
# Create a file for each shard.
filepaths = [shard_filename(data_dir, tag, n + 1, total_shards)
for n in range(total_shards)]
if all_exist(filepaths):
tf.logging.info("Files with tag %s already exist." % tag)
return filepaths
tf.logging.info("Saving files with tag %s." % tag)
input_file = raw_files[0]
target_file = raw_files[1]
# Write examples to each shard in round robin order.
tmp_filepaths = [fname + ".incomplete" for fname in filepaths]
writers = [tf.python_io.TFRecordWriter(fname) for fname in tmp_filepaths]
counter, shard = 0, 0
for counter, (input_line, target_line) in enumerate(zip(
txt_line_iterator(input_file), txt_line_iterator(target_file))):
if counter > 0 and counter % 100000 == 0:
tf.logging.info("\tSaving case %d." % counter)
example = dict_to_example(
{"inputs": subtokenizer.encode(input_line, add_eos=True),
"targets": subtokenizer.encode(target_line, add_eos=True)})
writers[shard].write(example.SerializeToString())
shard = (shard + 1) % total_shards
for writer in writers:
writer.close()
for tmp_name, final_name in zip(tmp_filepaths, filepaths):
tf.gfile.Rename(tmp_name, final_name)
tf.logging.info("Saved %d Examples", counter)
return filepaths
def shard_filename(path, tag, shard_num, total_shards):
"""Create filename for data shard."""
return os.path.join(
path, "%s-%s-%.5d-of-%.5d" % (_PREFIX, tag, shard_num, total_shards))
def shuffle_records(fname):
"""Shuffle records in a single file."""
tf.logging.info("Shuffling records in file %s" % fname)
# Rename file prior to shuffling
tmp_fname = fname + ".unshuffled"
tf.gfile.Rename(fname, tmp_fname)
reader = tf.python_io.tf_record_iterator(tmp_fname)
records = []
for record in reader:
records.append(record)
if len(records) % 100000 == 0:
tf.logging.info("\tRead: %d", len(records))
random.shuffle(records)
# Write shuffled records to original file name
with tf.python_io.TFRecordWriter(fname) as w:
for count, record in enumerate(records):
w.write(record)
if count > 0 and count % 100000 == 0:
tf.logging.info("\tWriting record: %d" % count)
tf.gfile.Remove(tmp_fname)
def dict_to_example(dictionary):
"""Converts a dictionary of string->int to a tf.Example."""
features = {}
for k, v in six.iteritems(dictionary):
features[k] = tf.train.Feature(int64_list=tf.train.Int64List(value=v))
return tf.train.Example(features=tf.train.Features(feature=features))
def all_exist(filepaths):
"""Returns true if all files in the list exist."""
for fname in filepaths:
if not tf.gfile.Exists(fname):
return False
return True
def make_dir(path):
if not tf.gfile.Exists(path):
tf.logging.info("Creating directory %s" % path)
tf.gfile.MakeDirs(path)
def main(unused_argv):
"""Obtain training and evaluation data for the Transformer model."""
tf.logging.set_verbosity(tf.logging.INFO)
make_dir(FLAGS.raw_dir)
make_dir(FLAGS.data_dir)
# Get paths of download/extracted training and evaluation files.
tf.logging.info("Step 1/4: Downloading data from source")
train_files = get_raw_files(FLAGS.raw_dir, _TRAIN_DATA_SOURCES)
eval_files = get_raw_files(FLAGS.raw_dir, _EVAL_DATA_SOURCES)
test_files = get_raw_files(FLAGS.raw_dir, _TEST_DATA_SOURCES)
# Create subtokenizer based on the training files.
tf.logging.info("Step 2/4: Creating subtokenizer and building vocabulary")
train_files_flat = train_files["inputs"] + train_files["targets"]
vocab_file = os.path.join(FLAGS.data_dir, _VOCAB_FILE)
subtokenizer = tokenizer.Subtokenizer.init_from_files(
vocab_file, train_files_flat, _TARGET_VOCAB_SIZE, _TARGET_THRESHOLD,
min_count=None if FLAGS.search else _TRAIN_DATA_MIN_COUNT)
tf.logging.info("Step 3/4: Compiling training and evaluation data")
compiled_train_files = compile_files(FLAGS.raw_dir, train_files, _TRAIN_TAG)
compiled_eval_files = compile_files(FLAGS.raw_dir, eval_files, _EVAL_TAG)
compiled_test_files = compile_files(FLAGS.raw_dir, test_files, _TEST_TAG)
# Tokenize and save data as Examples in the TFRecord format.
tf.logging.info("Step 4/4: Preprocessing and saving data")
train_tfrecord_files = encode_and_save_files(
subtokenizer, FLAGS.data_dir, compiled_train_files, _TRAIN_TAG,
_TRAIN_SHARDS)
encode_and_save_files(
subtokenizer, FLAGS.data_dir, compiled_eval_files, _EVAL_TAG,
_EVAL_SHARDS)
encode_and_save_files(
subtokenizer, FLAGS.data_dir, compiled_test_files, _TEST_TAG,
_TEST_SHARDS)
for fname in train_tfrecord_files:
shuffle_records(fname)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_dir", "-dd", type=str, default="/tmp/translate_ende",
help="[default: %(default)s] Directory for where the "
"translate_ende_wmt32k dataset is saved.",
metavar="<DD>")
parser.add_argument(
"--raw_dir", "-rd", type=str, default="/tmp/translate_ende_raw",
help="[default: %(default)s] Path where the raw data will be downloaded "
"and extracted.",
metavar="<RD>")
parser.add_argument(
"--search", action="store_true",
help="If set, use binary search to find the vocabulary set with size"
"closest to the target size (%d)." % _TARGET_VOCAB_SIZE)
FLAGS, unparsed = parser.parse_known_args()
main(sys.argv) | OpenSeq2Seq-master | open_seq2seq/data/text2text/process_data.py |
OpenSeq2Seq-master | open_seq2seq/data/text2speech/__init__.py |
|
# Copyright (c) 2018 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import numpy as np
import librosa
import librosa.filters
import resampy as rs
def get_speech_features_from_file(
filename,
num_features,
features_type='magnitude',
n_fft=1024,
hop_length=None,
mag_power=2,
feature_normalize=False,
mean=0.,
std=1.,
trim=False,
data_min=1e-5,
return_raw_audio=False,
return_audio_duration=False,
augmentation=None,
mel_basis=None
):
""" Helper function to retrieve spectrograms from wav files
Args:
filename (string): WAVE filename.
num_features (int): number of speech features in frequency domain.
features_type (string): 'magnitude' or 'mel'.
n_fft (int): size of analysis window in samples.
hop_length (int): stride of analysis window in samples.
mag_power (int): power to raise magnitude spectrograms (prior to dot product
with mel basis)
1 for energy spectrograms
2 fot power spectrograms
feature_normalize (bool): whether to normalize the data with mean and std
mean (float): if normalize is enabled, the mean to normalize to
std (float): if normalize is enabled, the deviation to normalize to
trim (bool): Whether to trim silence via librosa or not
data_min (float): min clip value prior to taking the log.
Returns:
np.array: np.array of audio features with shape=[num_time_steps,
num_features].
"""
# load audio signal
signal, fs = librosa.core.load(filename, sr=None)
if hop_length is None:
hop_length = int(n_fft / 4)
if trim:
signal, _ = librosa.effects.trim(
signal,
frame_length=int(n_fft/2),
hop_length=int(hop_length/2)
)
if augmentation is not None:
if 'pitch_shift_steps' in augmentation:
pitch_shift_steps = (2.0 * augmentation['pitch_shift_steps'] * \
np.random.rand()) - augmentation['pitch_shift_steps']
signal = librosa.effects.pitch_shift(signal, fs, pitch_shift_steps)
if augmentation['time_stretch_ratio'] > 0:
# time stretch
stretch_amount = 1.0 + (2.0 * np.random.rand() - 1.0) * \
augmentation['time_stretch_ratio']
signal = rs.resample(
signal,
fs,
int(fs * stretch_amount),
filter='kaiser_fast',
)
# noise
noise_level_db = np.random.randint(
low=augmentation['noise_level_min'],
high=augmentation['noise_level_max']
)
signal += np.random.randn(signal.shape[0]) * \
10.0 ** (noise_level_db / 20.0)
speech_features = get_speech_features(
signal, fs, num_features, features_type, n_fft,
hop_length, mag_power, feature_normalize, mean, std, data_min, mel_basis
)
if return_raw_audio:
return signal, speech_features
elif return_audio_duration:
return speech_features, len(signal) * 1.0 / fs
else:
return speech_features
def get_speech_features(
signal,
fs,
num_features,
features_type='magnitude',
n_fft=1024,
hop_length=256,
mag_power=2,
feature_normalize=False,
mean=0.,
std=1.,
data_min=1e-5,
mel_basis=None
):
""" Helper function to retrieve spectrograms from loaded wav
Args:
signal: signal loaded with librosa.
fs (int): sampling frequency in Hz.
num_features (int): number of speech features in frequency domain.
features_type (string): 'magnitude' or 'mel'.
n_fft (int): size of analysis window in samples.
hop_length (int): stride of analysis window in samples.
mag_power (int): power to raise magnitude spectrograms (prior to dot product
with mel basis)
1 for energy spectrograms
2 fot power spectrograms
feature_normalize(bool): whether to normalize the data with mean and std
mean(float): if normalize is enabled, the mean to normalize to
std(float): if normalize is enabled, the deviation to normalize to
data_min (float): min clip value prior to taking the log.
Returns:
np.array: np.array of audio features with shape=[num_time_steps,
num_features].
"""
if isinstance(data_min, dict):
data_min_mel = data_min["mel"]
data_min_mag = data_min["magnitude"]
else:
data_min_mel = data_min_mag = data_min
if isinstance(num_features, dict):
num_features_mel = num_features["mel"]
num_features_mag = num_features["magnitude"]
else:
num_features_mel = num_features_mag = num_features
complex_spec = librosa.stft(y=signal, n_fft=n_fft)
mag, _ = librosa.magphase(complex_spec, power=mag_power)
if features_type == 'magnitude' or features_type == "both":
features = np.log(np.clip(mag, a_min=data_min_mag, a_max=None)).T
assert num_features_mag <= n_fft // 2 + 1, \
"num_features for spectrogram should be <= (fs * window_size // 2 + 1)"
# cut high frequency part
features = features[:, :num_features_mag]
if 'mel' in features_type or features_type == "both":
if features_type == "both":
mag_features = features
if mel_basis is None:
htk = True
norm = None
if 'slaney' in features_type:
htk = False
norm = 1
mel_basis = librosa.filters.mel(
sr=fs,
n_fft=n_fft,
n_mels=num_features_mel,
htk=htk,
norm=norm
)
features = np.dot(mel_basis, mag)
features = np.log(np.clip(features, a_min=data_min_mel, a_max=None)).T
if feature_normalize:
features = normalize(features, mean, std)
if features_type == "both":
return [features, mag_features]
return features
def get_mel(
log_mag_spec,
fs=22050,
n_fft=1024,
n_mels=80,
power=2.,
feature_normalize=False,
mean=0,
std=1,
mel_basis=None,
data_min=1e-5,
htk=True,
norm=None
):
"""
Method to get mel spectrograms from magnitude spectrograms
Args:
log_mag_spec (np.array): log of the magnitude spec
fs (int): sampling frequency in Hz
n_fft (int): size of fft window in samples
n_mels (int): number of mel features
power (float): power of the mag spectrogram
feature_normalize (bool): whether the mag spec was normalized
mean (float): normalization param of mag spec
std (float): normalization param of mag spec
mel_basis (np.array): optional pre-computed mel basis to save computational
time if passed. If not passed, it will call librosa to construct one
data_min (float): min clip value prior to taking the log.
htk (bool): whther to compute the mel spec with the htk or slaney algorithm
norm: Should be None for htk, and 1 for slaney
Returns:
np.array: mel_spec with shape [time, n_mels]
"""
if mel_basis is None:
mel_basis = librosa.filters.mel(
fs,
n_fft,
n_mels=n_mels,
htk=htk,
norm=norm
)
log_mag_spec = log_mag_spec * power
mag_spec = np.exp(log_mag_spec)
mel_spec = np.dot(mag_spec, mel_basis.T)
mel_spec = np.log(np.clip(mel_spec, a_min=data_min, a_max=None))
if feature_normalize:
mel_spec = normalize(mel_spec, mean, std)
return mel_spec
def inverse_mel(
log_mel_spec,
fs=22050,
n_fft=1024,
n_mels=80,
power=2.,
feature_normalize=False,
mean=0,
std=1,
mel_basis=None,
htk=True,
norm=None
):
"""
Reconstructs magnitude spectrogram from a mel spectrogram by multiplying it
with the transposed mel basis.
Args:
log_mel_spec (np.array): log of the mel spec
fs (int): sampling frequency in Hz
n_fft (int): size of fft window in samples
n_mels (int): number of mel features
power (float): power of the mag spectrogram that was used to generate the
mel spec
feature_normalize (bool): whether the mel spec was normalized
mean (float): normalization param of mel spec
std (float): normalization param of mel spec
mel_basis (np.array): optional pre-computed mel basis to save computational
time if passed. If not passed, it will call librosa to construct one
htk (bool): whther to compute the mel spec with the htk or slaney algorithm
norm: Should be None for htk, and 1 for slaney
Returns:
np.array: mag_spec with shape [time, n_fft/2 + 1]
"""
if mel_basis is None:
mel_basis = librosa.filters.mel(
fs,
n_fft,
n_mels=n_mels,
htk=htk,
norm=norm
)
if feature_normalize:
log_mel_spec = denormalize(log_mel_spec, mean, std)
mel_spec = np.exp(log_mel_spec)
mag_spec = np.dot(mel_spec, mel_basis)
mag_spec = np.power(mag_spec, 1. / power)
return mag_spec
def normalize(features, mean, std):
"""
Normalizes features with the specificed mean and std
"""
return (features - mean) / std
def denormalize(features, mean, std):
"""
Normalizes features with the specificed mean and std
"""
return features * std + mean
| OpenSeq2Seq-master | open_seq2seq/data/text2speech/speech_utils.py |
# Copyright (c) 2018 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import os
import six
import librosa
import numpy as np
import tensorflow as tf
import pandas as pd
from six import string_types
from open_seq2seq.data.data_layer import DataLayer
from open_seq2seq.data.utils import load_pre_existing_vocabulary
from .speech_utils import get_speech_features_from_file,\
inverse_mel, normalize, denormalize
class Text2SpeechDataLayer(DataLayer):
"""
Text-to-speech data layer class
"""
@staticmethod
def get_required_params():
return dict(
DataLayer.get_required_params(), **{
'dataset_location': str,
'dataset': ['LJ', 'MAILABS'],
'num_audio_features': None,
'output_type': ['magnitude', 'mel', 'both'],
'vocab_file': str,
'dataset_files': list,
'feature_normalize': bool,
}
)
@staticmethod
def get_optional_params():
return dict(
DataLayer.get_optional_params(), **{
'pad_to': int,
'mag_power': int,
'pad_EOS': bool,
'pad_value': float,
'feature_normalize_mean': float,
'feature_normalize_std': float,
'trim': bool,
'data_min': None,
'duration_min': int,
'duration_max': int,
'mel_type': ['slaney', 'htk'],
"exp_mag": bool,
'style_input': [None, 'wav'],
'n_samples_train': int,
'n_samples_eval': int,
'n_fft': int,
'fmax': float,
'max_normalization': bool,
'use_cache': bool
}
)
def __init__(self, params, model, num_workers=None, worker_id=None):
"""Text-to-speech data layer constructor.
See parent class for arguments description.
Config parameters:
* **dataset** (str) --- The dataset to use. Currently 'LJ' for the LJSpeech
1.1 dataset is supported.
* **num_audio_features** (int) --- number of audio features to extract.
* **output_type** (str) --- could be either "magnitude", or "mel".
* **vocab_file** (str) --- path to vocabulary file.
* **dataset_files** (list) --- list with paths to all dataset .csv files.
File is assumed to be separated by "|".
* **dataset_location** (string) --- string with path to directory where wavs
are stored.
* **feature_normalize** (bool) --- whether to normlize the data with a
preset mean and std
* **feature_normalize_mean** (bool) --- used for feature normalize.
Defaults to 0.
* **feature_normalize_std** (bool) --- used for feature normalize.
Defaults to 1.
* **mag_power** (int) --- the power to which the magnitude spectrogram is
scaled to. Defaults to 1.
1 for energy spectrogram
2 for power spectrogram
Defaults to 2.
* **pad_EOS** (bool) --- whether to apply EOS tokens to both the text and
the speech signal. Will pad at least 1 token regardless of pad_to value.
Defaults to True.
* **pad_value** (float) --- The value we pad the spectrogram with. Defaults
to np.log(data_min).
* **pad_to** (int) --- we pad such that the resulting datapoint is a
multiple of pad_to.
Defaults to 8.
* **trim** (bool) --- Whether to trim silence via librosa or not. Defaults
to False.
* **data_min** (float) --- min clip value prior to taking the log. Defaults
to 1e-5. Please change to 1e-2 if using htk mels.
* **duration_min** (int) --- Minimum duration in steps for speech signal.
All signals less than this will be cut from the training set. Defaults to
0.
* **duration_max** (int) --- Maximum duration in steps for speech signal.
All signals greater than this will be cut from the training set. Defaults
to 4000.
* **mel_type** (str) --- One of ['slaney', 'htk']. Decides which algorithm to
use to compute mel specs.
Defaults to htk.
* **style_input** (str) --- Can be either None or "wav". Must be set to "wav"
for GST. Defaults to None.
* **n_samples_train** (int) --- number of the shortest examples to use for training.
* **n_samples_eval** (int) --- number of the shortest examples to use for evaluation.
* **n_fft** (int) --- FFT window size.
* **fmax** (float) --- highest frequency to use.
* **max_normalization** (bool) --- whether to divide the final audio signal
by its' absolute maximum.
* **use_cache** (bool) --- whether to use cache.
"""
super(Text2SpeechDataLayer, self).__init__(
params,
model,
num_workers,
worker_id
)
self.use_cache = self.params.get('use_cache', False)
self._cache = {}
names = ['wav_filename', 'raw_transcript', 'transcript']
sep = '\x7c'
header = None
if self.params["dataset"] == "LJ":
self._sampling_rate = 22050
self._n_fft = self.params.get("n_fft", 1024)
elif self.params["dataset"] == "MAILABS":
self._sampling_rate = 16000
self._n_fft = 800
# Character level vocab
self.params['char2idx'] = load_pre_existing_vocabulary(
self.params['vocab_file'],
min_idx=3,
read_chars=True,
)
# Add the pad, start, and end chars
self.params['char2idx']['<p>'] = 0
self.params['char2idx']['<s>'] = 1
self.params['char2idx']['</s>'] = 2
self.params['idx2char'] = {i: w for w, i in self.params['char2idx'].items()}
self.params['src_vocab_size'] = len(self.params['char2idx'])
self.max_normalization = self.params.get('max_normalization', False)
n_feats = self.params['num_audio_features']
if "both" in self.params["output_type"]:
self._both = True
if self.params["feature_normalize"]:
raise ValueError(
"feature normalize is not currently enabled for both mode"
)
if not isinstance(n_feats, dict):
raise ValueError(
"num_audio_features must be a dictionary for both mode"
)
else:
if ("mel" not in n_feats and
"magnitude" not in n_feats):
raise ValueError(
"num_audio_features must contain mel and magnitude keys"
)
elif (not isinstance(n_feats["mel"], int) or
not isinstance(n_feats["magnitude"], int)):
raise ValueError(
"num_audio_features must be a int"
)
n_mels = n_feats['mel']
data_min = self.params.get("data_min", None)
if data_min is not None:
if not isinstance(data_min, dict):
raise ValueError(
"data_min must be a dictionary for both mode"
)
else:
if "mel" not in data_min and "magnitude" not in data_min:
raise ValueError(
"data_min must contain mel and magnitude keys"
)
elif (not isinstance(data_min["mel"], float) or
not isinstance(data_min["magnitude"], float)):
raise ValueError(
"data_min must be a float"
)
self._exp_mag = self.params.get("exp_mag", True)
else:
if not isinstance(n_feats, int):
raise ValueError(
"num_audio_features must be a float for mel or magnitude mode"
)
if not isinstance(self.params.get("data_min",1.0), float):
raise ValueError(
"data_min must be a float for mel or magnitude mode"
)
self._both = False
self._exp_mag = False
n_mels = n_feats
self._mel = "mel" in self.params["output_type"]
if self._mel or self._both:
htk = True
norm = None
if self.params.get('mel_type', 'htk') == 'slaney':
htk = False
norm = 1
self._mel_basis = librosa.filters.mel(
sr=self._sampling_rate,
n_fft=self._n_fft,
n_mels=n_mels,
htk=htk,
norm=norm,
fmax=self.params.get('fmax', None)
)
else:
self._mel_basis = None
if self.params["interactive"]:
return
# Load csv files
self._files = None
for csvs in params['dataset_files']:
files = pd.read_csv(
csvs,
encoding='utf-8',
sep=sep,
header=header,
names=names,
quoting=3
)
if self._files is None:
self._files = files
else:
self._files = self._files.append(files)
if self.params['mode'] == 'train' and 'n_samples_train' in self.params:
indices = self._files['transcript'].str.len().sort_values().index
self._files = self._files.reindex(indices)
n_samples = self.params.get('n_samples_train')
print('Using just the {} shortest samples'.format(n_samples))
self._files = self._files.iloc[:n_samples]
if self.params['mode'] == 'eval':
indices = self._files['transcript'].str.len().sort_values().index
self._files = self._files.reindex(indices)
if 'n_samples_eval' in self.params:
n_samples = self.params['n_samples_eval']
self._files = self._files.iloc[:n_samples]
if (self.params['mode'] != 'infer'
or self.params.get("style_input", None) == "wav"):
cols = ['wav_filename', 'transcript']
else:
cols = 'transcript'
all_files = self._files.loc[:, cols].values
self._files = self.split_data(all_files)
self._size = self.get_size_in_samples()
self._dataset = None
self._iterator = None
self._input_tensors = None
def split_data(self, data):
if self.params['mode'] != 'train' and self._num_workers is not None:
size = len(data)
start = size // self._num_workers * self._worker_id
if self._worker_id == self._num_workers - 1:
end = size
else:
end = size // self._num_workers * (self._worker_id + 1)
return data[start:end]
return data
@property
def iterator(self):
return self._iterator
def build_graph(self):
with tf.device('/cpu:0'):
"""Builds data reading graph."""
self._dataset = tf.data.Dataset.from_tensor_slices(self._files)
if self.params['shuffle']:
self._dataset = self._dataset.shuffle(self._size)
self._dataset = self._dataset.repeat()
if self._both:
num_audio_features = self.params['num_audio_features']['mel']
num_audio_features += self.params['num_audio_features']['magnitude']
else:
num_audio_features = self.params['num_audio_features']
if (self.params['mode'] != 'infer'
or self.params.get("style_input", None) == "wav"):
self._dataset = self._dataset.map(
lambda line: tf.py_func(
self._parse_audio_transcript_element,
[line],
[tf.int32, tf.int32, self.params['dtype'], self.params['dtype'],\
tf.int32],
stateful=False,
),
num_parallel_calls=8,
)
if (self.params.get("duration_max", None) or
self.params.get("duration_max", None)):
self._dataset = self._dataset.filter(
lambda txt, txt_len, spec, stop, spec_len:
tf.logical_and(
tf.less_equal(
spec_len,
self.params.get("duration_max", 4000)
),
tf.greater_equal(
spec_len,
self.params.get("duration_min", 0)
)
)
)
if self._both:
default_pad_value = 0.
else:
default_pad_value = np.log(self.params.get("data_min", 1e-5))
pad_value = self.params.get("pad_value", default_pad_value)
if self.params["feature_normalize"]:
pad_value = self._normalize(pad_value)
self._dataset = self._dataset.padded_batch(
self.params['batch_size'],
padded_shapes=(
[None], 1, [None, num_audio_features], [None], 1
),
padding_values=(
0, 0, tf.cast(pad_value, dtype=self.params['dtype']),
tf.cast(1., dtype=self.params['dtype']), 0
)
)
else:
self._dataset = self._dataset.map(
lambda line: tf.py_func(
self._parse_transcript_element,
[line],
[tf.int32, tf.int32],
stateful=False,
),
num_parallel_calls=8,
)
self._dataset = self._dataset.padded_batch(
self.params['batch_size'], padded_shapes=([None], 1)
)
self._iterator = self._dataset.prefetch(tf.contrib.data.AUTOTUNE)\
.make_initializable_iterator()
if (self.params['mode'] != 'infer'
or self.params.get("style_input", None) == "wav"):
text, text_length, spec, stop_token_target, spec_length = self._iterator\
.get_next()
# need to explicitly set batch size dimension
# (it is employed in the model)
spec.set_shape(
[self.params['batch_size'], None, num_audio_features]
)
stop_token_target.set_shape([self.params['batch_size'], None])
spec_length = tf.reshape(spec_length, [self.params['batch_size']])
else:
text, text_length = self._iterator.get_next()
text.set_shape([self.params['batch_size'], None])
text_length = tf.reshape(text_length, [self.params['batch_size']])
self._input_tensors = {}
self._input_tensors["source_tensors"] = [text, text_length]
if self.params.get("style_input", None) == "wav":
# mag - not supported currently
if not self._mel and not self._both:
raise ValueError(
"GST is currently only supported on mel and both output modes.")
# mel
mel_spec = spec
if self._both:
mel_spec, _ = tf.split(
mel_spec,
[self.params['num_audio_features']['mel'],
self.params['num_audio_features']['magnitude']],
axis=2
)
self._input_tensors["source_tensors"].extend([mel_spec, spec_length])
# both
if self.params['mode'] != 'infer':
self._input_tensors['target_tensors'] = [
spec, stop_token_target, spec_length
]
def _parse_audio_transcript_element(self, element):
"""Parses tf.data element from TextLineDataset into audio and text.
Args:
element: tf.data element from TextLineDataset.
Returns:
tuple: text_input text as `np.array` of ids, text_input length,
target audio features as `np.array`, stop token targets as `np.array`,
length of target sequence.
"""
audio_filename, transcript = element
transcript = transcript.lower()
if six.PY2:
audio_filename = unicode(audio_filename, "utf-8")
transcript = unicode(transcript, "utf-8")
elif not isinstance(transcript, string_types):
audio_filename = str(audio_filename, "utf-8")
transcript = str(transcript, "utf-8")
text_input = np.array(
[self.params['char2idx'][c] for c in transcript]
)
pad_to = self.params.get('pad_to', 8)
if self.params.get("pad_EOS", True):
num_pad = pad_to - ((len(text_input) + 2) % pad_to)
text_input = np.pad(
text_input, ((1, 1)),
"constant",
constant_values=(
(self.params['char2idx']["<s>"], self.params['char2idx']["</s>"])
)
)
text_input = np.pad(
text_input, ((0, num_pad)),
"constant",
constant_values=self.params['char2idx']["<p>"]
)
# Mainly used for GST
if "wavs" in audio_filename:
file_path = os.path.join(
self.params['dataset_location'], audio_filename + ".wav"
)
# Default path for LJ and MAILABS
else:
file_path = os.path.join(
self.params['dataset_location'], "wavs", audio_filename + ".wav"
)
if self._mel:
features_type = "mel_htk"
if self.params.get('mel_type', 'htk') == 'slaney':
features_type = "mel_slaney"
else:
features_type = self.params['output_type']
if self.use_cache and audio_filename in self._cache:
spectrogram = self._cache[audio_filename]
else:
spectrogram = get_speech_features_from_file(
file_path,
self.params['num_audio_features'],
features_type=features_type,
n_fft=self._n_fft,
mag_power=self.params.get('mag_power', 2),
feature_normalize=self.params["feature_normalize"],
mean=self.params.get("feature_normalize_mean", 0.),
std=self.params.get("feature_normalize_std", 1.),
trim=self.params.get("trim", False),
data_min=self.params.get("data_min", 1e-5),
mel_basis=self._mel_basis
)
if self.use_cache:
self._cache[audio_filename] = spectrogram
if self._both:
mel_spectrogram, spectrogram = spectrogram
if self._exp_mag:
spectrogram = np.exp(spectrogram)
stop_token_target = np.zeros(
[len(spectrogram)], dtype=self.params['dtype'].as_numpy_dtype()
)
if self.params.get("pad_EOS", True):
num_pad = pad_to - ((len(spectrogram) + 1) % pad_to) + 1
data_min = self.params.get("data_min", 1e-5)
if isinstance(data_min, dict):
pad_value_mel = self.params.get("pad_value", np.log(data_min["mel"]))
if self._exp_mag:
pad_value_mag = self.params.get("pad_value", data_min["magnitude"])
else:
pad_value_mag = self.params.get("pad_value", np.log(data_min["magnitude"]))
else:
pad_value = self.params.get("pad_value", np.log(data_min))
if self.params["feature_normalize"]:
pad_value = self._normalize(pad_value)
pad_value_mel = pad_value_mag = pad_value
if self._both:
mel_spectrogram = np.pad(
mel_spectrogram,
# ((8, num_pad), (0, 0)),
((0, num_pad), (0, 0)),
"constant",
constant_values=pad_value_mel
)
spectrogram = np.pad(
spectrogram,
# ((8, num_pad), (0, 0)),
((0, num_pad), (0, 0)),
"constant",
constant_values=pad_value_mag
)
spectrogram = np.concatenate((mel_spectrogram, spectrogram), axis=1)
else:
spectrogram = np.pad(
spectrogram,
# ((8, num_pad), (0, 0)),
((0, num_pad), (0, 0)),
"constant",
constant_values=pad_value
)
stop_token_target = np.pad(
stop_token_target, ((0, num_pad)), "constant", constant_values=1
)
else:
stop_token_target[-1] = 1.
assert len(text_input) % pad_to == 0
assert len(spectrogram) % pad_to == 0
return np.int32(text_input), \
np.int32([len(text_input)]), \
spectrogram.astype(self.params['dtype'].as_numpy_dtype()), \
stop_token_target.astype(self.params['dtype'].as_numpy_dtype()), \
np.int32([len(spectrogram)])
def _parse_transcript_element(self, transcript):
"""Parses text from file and returns array of text features.
Args:
transcript: the string to parse.
Returns:
tuple: target text as `np.array` of ids, target text length.
"""
if six.PY2:
transcript = unicode(transcript, "utf-8")
elif not isinstance(transcript, string_types):
transcript = str(transcript, "utf-8")
transcript = transcript.lower()
text_input = np.array(
[self.params['char2idx'].get(c,3) for c in transcript]
)
pad_to = self.params.get('pad_to', 8)
if self.params.get("pad_EOS", True):
num_pad = pad_to - ((len(text_input) + 2) % pad_to)
text_input = np.pad(
text_input, ((1, 1)),
"constant",
constant_values=(
(self.params['char2idx']["<s>"], self.params['char2idx']["</s>"])
)
)
text_input = np.pad(
text_input, ((0, num_pad)),
"constant",
constant_values=self.params['char2idx']["<p>"]
)
return np.int32(text_input), \
np.int32([len(text_input)])
def parse_text_output(self, text):
text = "".join([self.params['idx2char'][k] for k in text])
return text
def create_interactive_placeholders(self):
self._text = tf.placeholder(
dtype=tf.int32,
shape=[self.params["batch_size"], None]
)
self._text_length = tf.placeholder(
dtype=tf.int32,
shape=[self.params["batch_size"]]
)
self._input_tensors = {}
self._input_tensors["source_tensors"] = [self._text, self._text_length]
def create_feed_dict(self, model_in):
""" Creates the feed dict for interactive infer
Args:
model_in (str): The string to be spoken.
Returns:
feed_dict (dict): Dictionary with values for the placeholders.
"""
text = []
text_length = []
for line in model_in:
if not isinstance(line, string_types):
raise ValueError(
"Text2Speech's interactive inference mode only supports string.",
"Got {}". format(type(line))
)
text_a, text_length_a = self._parse_transcript_element(line)
text.append(text_a)
text_length.append(text_length_a)
max_len = np.max(text_length)
for i, line in enumerate(text):
line = np.pad(
line, ((0, max_len-len(line))),
"constant", constant_values=self.params['char2idx']["<p>"]
)
text[i] = line
text = np.reshape(text, [self.params["batch_size"], -1])
text_length = np.reshape(text_length, [self.params["batch_size"]])
feed_dict = {
self._text: text,
self._text_length: text_length,
}
return feed_dict
@property
def input_tensors(self):
return self._input_tensors
@property
def sampling_rate(self):
return self._sampling_rate
@property
def n_fft(self):
return self._n_fft
def get_size_in_samples(self):
"""Returns the number of audio files."""
return len(self._files)
def get_magnitude_spec(self, spectrogram, is_mel=False):
"""Returns an energy magnitude spectrogram. The processing depends on the
data layer params.
Args:
spectrogram: output spec from model
Returns:
mag_spec: mag spec
"""
spectrogram = spectrogram.astype(float)
if self._mel or (is_mel and self._both):
htk = True
norm = None
if self.params.get('mel_type', 'htk') == 'slaney':
htk = False
norm = 1
n_feats = self.params['num_audio_features']
if self._both:
n_feats = n_feats["mel"]
return inverse_mel(
spectrogram,
fs=self._sampling_rate,
n_fft=self._n_fft,
n_mels=n_feats,
power=self.params.get('mag_power', 2),
feature_normalize=self.params["feature_normalize"],
mean=self.params.get("feature_normalize_mean", 0.),
std=self.params.get("feature_normalize_std", 1.),
mel_basis=self._mel_basis,
htk=htk,
norm=norm
)
# Else it is a mag spec
else:
if self.params["feature_normalize"]:
spectrogram = self._denormalize(spectrogram)
n_feats = self.params['num_audio_features']
data_min = self.params.get("data_min", 1e-5)
if self._both:
n_feats = n_feats["magnitude"]
if isinstance(data_min, dict):
data_min = data_min["magnitude"]
if not self._exp_mag:
data_min = np.log(data_min)
else:
data_min = np.log(data_min)
# Ensure that num_features is consistent with n_fft
if n_feats < self._n_fft // 2 + 1:
num_pad = (self._n_fft // 2 + 1) - spectrogram.shape[1]
spectrogram = np.pad(
spectrogram,
((0, 0), (0, num_pad)),
"constant",
constant_values=data_min
)
mag_spec = spectrogram * 1.0 / self.params.get('mag_power', 2)
if not self._both and not self._exp_mag:
mag_spec = np.exp(mag_spec)
return mag_spec
def _normalize(self, spectrogram):
return normalize(
spectrogram,
mean=self.params.get("feature_normalize_mean", 0.),
std=self.params.get("feature_normalize_std", 1.)
)
def _denormalize(self, spectrogram):
return denormalize(
spectrogram,
mean=self.params.get("feature_normalize_mean", 0.),
std=self.params.get("feature_normalize_std", 1.)
)
| OpenSeq2Seq-master | open_seq2seq/data/text2speech/text2speech.py |
# Copyright (c) 2018 NVIDIA Corporation
import os
import six
import numpy as np
import tensorflow as tf
import pandas as pd
from open_seq2seq.data.data_layer import DataLayer
from open_seq2seq.data.text2speech.speech_utils import \
get_speech_features_from_file
class WavenetDataLayer(DataLayer):
""" Text to speech data layer class for Wavenet """
@staticmethod
def get_required_params():
return dict(
DataLayer.get_required_params(), **{
"num_audio_features": int,
"dataset_files": list
}
)
@staticmethod
def get_optional_params():
return dict(
DataLayer.get_optional_params(), **{
"dataset_location": str
}
)
def __init__(self, params, model, num_workers=None, worker_id=None):
"""
Wavenet data layer constructor.
See parent class for arguments description.
Config parameters:
* **num_audio_features** (int) --- number of spectrogram audio features
* **dataset_files** (list) --- list with paths to all dataset .csv files
* **dataset_location** (str) --- string with path to directory where wavs
are stored
"""
super(WavenetDataLayer, self).__init__(
params,
model,
num_workers,
worker_id
)
if self.params.get("dataset_location", None) is None:
raise ValueError(
"dataset_location must be specified when using LJSpeech"
)
names = ["wav_filename", "raw_transcript", "transcript"]
sep = "\x7c"
header = None
self.sampling_rate = 22050
self.n_fft = 1024
self._files = None
for csvs in params["dataset_files"]:
files = pd.read_csv(
csvs,
encoding="utf-8",
sep=sep,
header=header,
names=names,
quoting=3
)
if self._files is None:
self._files = files
else:
self._files = self._files.append(files)
cols = "wav_filename"
if self._files is not None:
all_files = self._files.loc[:, cols].values
self._files = self.split_data(all_files)
self._size = self.get_size_in_samples()
self._dataset = None
self._iterator = None
self._input_tensors = None
@property
def input_tensors(self):
return self._input_tensors
def get_size_in_samples(self):
if self._files is not None:
return len(self._files)
else:
return 0
def split_data(self, data):
if self.params['mode'] != 'train' and self._num_workers is not None:
size = len(data)
start = size // self._num_workers * self._worker_id
if self._worker_id == self._num_workers - 1:
end = size
else:
end = size // self._num_workers * (self._worker_id + 1)
return data[start:end]
return data
@property
def iterator(self):
return self._iterator
def _parse_audio_element(self, element):
"""Parses tf.data element from TextLineDataset into audio."""
audio_filename = element
if six.PY2:
audio_filename = unicode(audio_filename, "utf-8")
else:
audio_filename = str(audio_filename, "utf-8")
file_path = os.path.join(
self.params["dataset_location"],
audio_filename + ".wav"
)
audio, spectrogram = get_speech_features_from_file(
file_path,
self.params["num_audio_features"],
features_type="mel",
data_min=1e-5,
return_raw_audio=True
)
spectrogram = np.pad(
spectrogram,
((0, 1), (0, 0)),
"constant",
constant_values=1e-5
)
assert len(audio) < len(spectrogram)*256, \
"audio len: {}, spec*256 len: {}".format(len(audio), \
len(spectrogram)*256)
num_pad = len(spectrogram)*256 - len(audio)
audio = np.pad(
audio,
(0, num_pad),
"constant",
constant_values=0
)
# upsample the spectrogram to match source length by repeating each value
spectrogram = np.repeat(spectrogram, 256, axis=0)
return audio.astype(self.params["dtype"].as_numpy_dtype()), \
np.int32([len(audio)]), \
spectrogram.astype(self.params["dtype"].as_numpy_dtype()), \
np.int32([len(spectrogram)])
def _parse_spectrogram_element(self, element):
audio, au_length, spectrogram, spec_length = \
self._parse_audio_element(element)
return spectrogram, spec_length
def create_interactive_placeholders(self):
self._source = tf.placeholder(
dtype=self.params["dtype"],
shape=[self.params["batch_size"], None]
)
self._src_length = tf.placeholder(
dtype=tf.int32,
shape=[self.params["batch_size"]]
)
self._spec = tf.placeholder(
dtype=self.params["dtype"],
shape=[self.params["batch_size"], None,
self.params["num_audio_features"]]
)
self._spec_length = tf.placeholder(
dtype=tf.int32,
shape=[self.params["batch_size"]]
)
self._spec_offset = tf.placeholder(
dtype=tf.int32,
shape=()
)
self._input_tensors = {}
self._input_tensors["source_tensors"] = [
self._source, self._src_length, self._spec, self._spec_length,
self._spec_offset
]
def create_feed_dict(self, model_in):
"""
Creates the feed dict for interactive infer using a spectrogram
Args:
model_in: tuple containing source audio, length of the source, \
conditioning spectrogram, length of the spectrogram, index of \
receptive field window
"""
source, src_length, spec, spec_length, spec_offset = model_in
return {
self._source: source,
self._src_length: src_length,
self._spec: spec,
self._spec_length: spec_length,
self._spec_offset: spec_offset
}
def build_graph(self):
""" builds data reading graph """
self._dataset = tf.data.Dataset.from_tensor_slices(self._files)
if self.params["shuffle"]:
self._dataset = self._dataset.shuffle(self._size)
self._dataset = self._dataset.repeat()
num_audio_features = self.params["num_audio_features"]
if self.params["mode"] != "infer":
self._dataset = self._dataset.map(
lambda line: tf.py_func(
self._parse_audio_element,
[line],
[self.params["dtype"], tf.int32, self.params["dtype"], tf.int32],
stateful=False
),
num_parallel_calls=8
)
self._dataset = self._dataset.padded_batch(
self.params["batch_size"],
padded_shapes=([None], 1, [None, num_audio_features], 1)
)
else:
raise ValueError("Non-interactive infer is not supported")
self._iterator = self._dataset.prefetch(tf.contrib.data.AUTOTUNE) \
.make_initializable_iterator()
if self.params["mode"] != "infer":
source, src_length, spec, spec_length = self._iterator.get_next()
spec.set_shape([self.params["batch_size"], None, num_audio_features])
spec_length = tf.reshape(spec_length, [self.params["batch_size"]])
source.set_shape([self.params["batch_size"], None])
src_length = tf.reshape(src_length, [self.params["batch_size"]])
self._input_tensors = {}
self._input_tensors["source_tensors"] = [
source, src_length, spec, spec_length
]
self._input_tensors["target_tensors"] = [source, src_length]
else:
raise ValueError("Non-interactive infer is not supported")
| OpenSeq2Seq-master | open_seq2seq/data/text2speech/text2speech_wavenet.py |
# Copyright (c) 2017 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import math
import os
import numpy as np
import numpy.testing as npt
import scipy.io.wavfile as wave
import tensorflow as tf
from six.moves import range
from .speech_utils import get_speech_features, get_speech_features_from_file, \
augment_audio_signal
class SpeechUtilsTests(tf.test.TestCase):
def test_augment_audio_signal(self):
filename = 'open_seq2seq/test_utils/toy_speech_data/wav_files/46gc040q.wav'
freq_s, signal = wave.read(filename)
signal = signal.astype(np.float32)
augmentation = {
'speed_perturbation_ratio': 0.2,
'noise_level_min': -90,
'noise_level_max': -46,
}
# just checking length requirements here for now
for _ in range(100):
signal_augm = augment_audio_signal(signal, freq_s, augmentation)
self.assertLessEqual(signal.shape[0] * 0.8, signal_augm.shape[0])
self.assertGreaterEqual(signal.shape[0] * 1.2, signal_augm.shape[0])
augmentation = {
'speed_perturbation_ratio': 0.5,
'noise_level_min': -90,
'noise_level_max': -46,
}
# just checking length requirements here for now
for _ in range(100):
signal_augm = augment_audio_signal(signal, freq_s, augmentation)
self.assertLessEqual(signal.shape[0] * 0.5, signal_augm.shape[0])
self.assertGreaterEqual(signal.shape[0] * 1.5, signal_augm.shape[0])
def test_get_speech_features_from_file(self):
dirname = 'open_seq2seq/test_utils/toy_speech_data/wav_files/'
for name in ['46gc040q.wav', '206o0103.wav', '48rc041b.wav']:
filename = os.path.join(dirname, name)
for num_features in [161, 120]:
for window_stride in [10e-3, 5e-3, 40e-3]:
for window_size in [20e-3, 30e-3]:
for features_type in ['spectrogram', 'mfcc', 'logfbank']:
freq_s, signal = wave.read(filename)
n_window_size = int(freq_s * window_size)
n_window_stride = int(freq_s * window_stride)
length = 1 + (signal.shape[0] - n_window_size)// n_window_stride
if length % 8 != 0:
length += 8 - length % 8
right_shape = (length, num_features)
params = {}
params['num_audio_features'] = num_features
params['input_type'] = features_type
params['window_size'] = window_size
params['window_stride'] = window_stride
params['sample_freq'] = 16000
input_features, _ = get_speech_features_from_file(
filename,
params
)
self.assertTrue(input_features.shape[0] % 8 == 0)
self.assertTupleEqual(right_shape, input_features.shape)
self.assertAlmostEqual(np.mean(input_features), 0.0, places=6)
self.assertAlmostEqual(np.std(input_features), 1.0, places=6)
# only for spectrogram
with self.assertRaises(AssertionError):
params = {}
params['num_audio_features'] = n_window_size // 2 + 2
params['input_type'] = 'spectrogram'
params['window_size'] = window_size
params['window_stride'] = window_stride
params['sample_freq'] = 16000
get_speech_features_from_file(
filename,
params
)
def test_get_speech_features_from_file_augmentation(self):
augmentation = {
'speed_perturbation_ratio': 0.0,
'noise_level_min': -90,
'noise_level_max': -46,
}
filename = 'open_seq2seq/test_utils/toy_speech_data/wav_files/46gc040q.wav'
num_features = 161
params = {}
params['sample_freq'] = 16000
params['num_audio_features'] = num_features
input_features_clean, _ = get_speech_features_from_file(
filename, params
)
params['augmentation'] = augmentation
input_features_augm, _ = get_speech_features_from_file(
filename, params
)
# just checking that result is different with and without augmentation
self.assertTrue(np.all(np.not_equal(input_features_clean,
input_features_augm)))
augmentation = {
'speed_perturbation_ratio': 0.2,
'noise_level_min': -90,
'noise_level_max': -46,
}
params['augmentation'] = augmentation
input_features_augm, _ = get_speech_features_from_file(
filename, params
)
self.assertNotEqual(
input_features_clean.shape[0],
input_features_augm.shape[0],
)
self.assertEqual(
input_features_clean.shape[1],
input_features_augm.shape[1],
)
def tst_get_speech_features_with_sine(self):
freq_s = 16000.0
t_s = np.arange(0, 0.5, 1.0 / freq_s)
signal = np.sin(2 * np.pi * 4000 * t_s)
features, _ = get_speech_features(signal, freq_s, 161)
npt.assert_allclose(
np.abs(features - features[0]),
np.zeros_like(features),
atol=1e-6,
)
for i in range(80):
npt.assert_allclose(features[:, 79 - i], features[:, 81 + i], atol=1e-6)
self.assertGreater(features[0, 80 - i], features[0, 80 - i - 1])
if __name__ == '__main__':
tf.test.main()
| OpenSeq2Seq-master | open_seq2seq/data/speech2text/speech_utils_test.py |
import os
import six
import numpy as np
import tensorflow as tf
import pandas as pd
import librosa
from open_seq2seq.data.data_layer import DataLayer
from open_seq2seq.data.text2speech.speech_utils import \
get_speech_features_from_file
class SpeechCommandsDataLayer(DataLayer):
@staticmethod
def get_required_params():
return dict(DataLayer.get_required_params(), ** {
"dataset_files": list,
"dataset_location": str,
"num_audio_features": int,
"audio_length": int,
"num_labels": int,
"model_format": str
})
@staticmethod
def get_optional_params():
return dict(DataLayer.get_optional_params(), **{
"cache_data": bool,
"augment_data": bool
})
def split_data(self, data):
if self.params["mode"] != "train" and self._num_workers is not None:
size = len(data)
start = size // self._num_workers * self._worker_id
if self._worker_id == self._num_workers - 1:
end = size
else:
end = size // self._num_workers * (self._worker_id + 1)
return data[start:end]
return data
@property
def input_tensors(self):
return self._input_tensors
@property
def iterator(self):
return self._iterator
def get_size_in_samples(self):
if self._files is not None:
return len(self._files)
else:
return 0
def __init__(self, params, model, num_workers=None, worker_id=None):
"""
ResNet Speech Commands data layer constructor.
Config parameters:
* **dataset_files** (list) --- list with paths to all dataset .csv files
* **dataset_location** (str) --- string with path to directory where .wavs
are stored
* **num_audio_features** (int) --- number of spectrogram audio features and
image length
* **audio_length** (int) --- cropping length of spectrogram and image width
* **num_labels** (int) --- number of classes in dataset
* **model_format** (str) --- determines input format, should be one of
"jasper" or "resnet"
* **cache_data** (bool) --- cache the training data in the first epoch
* **augment_data** (bool) --- add time stretch and noise to training data
"""
super(SpeechCommandsDataLayer, self).__init__(params, model, num_workers, worker_id)
if self.params["mode"] == "infer":
raise ValueError("Inference is not supported on SpeechCommandsDataLayer")
self._files = None
for file in self.params["dataset_files"]:
csv_file = pd.read_csv(
os.path.join(self.params["dataset_location"], file),
encoding="utf-8",
sep=",",
header=None,
names=["label", "wav_filename"],
dtype=str
)
if self._files is None:
self._files = csv_file
else:
self._files.append(csv_file)
cols = ["label", "wav_filename"]
if self._files is not None:
all_files = self._files.loc[:, cols].values
self._files = self.split_data(all_files)
self._size = self.get_size_in_samples()
self._iterator = None
self._input_tensors = None
def preprocess_image(self, image):
"""Crops or pads a spectrogram into a fixed dimension square image
"""
num_audio_features = self.params["num_audio_features"]
audio_length = self.params["audio_length"]
if image.shape[0] > audio_length: # randomly slice
offset = np.random.randint(0, image.shape[0] - audio_length + 1)
image = image[offset:offset + audio_length, :]
else: # symmetrically pad with zeros
pad_left = (audio_length - image.shape[0]) // 2
pad_right = (audio_length - image.shape[0]) // 2
if (audio_length - image.shape[0]) % 2 == 1:
pad_right += 1
image = np.pad(
image,
((pad_left, pad_right), (0, 0)),
"constant"
)
assert image.shape == (audio_length, num_audio_features)
# add dummy dimension
if self.params["model_format"] == "jasper": # for batch norm
image = np.expand_dims(image, 1)
else: # for channel
image = np.expand_dims(image, -1)
return image
def parse_element(self, element):
"""Reads an audio file and returns the augmented spectrogram image
"""
label, audio_filename = element
if six.PY2:
audio_filename = unicode(audio_filename, "utf-8")
else:
audio_filename = str(audio_filename, "utf-8")
file_path = os.path.join(
self.params["dataset_location"],
audio_filename
)
if self.params["mode"] == "train" and self.params.get("augment_data", False):
augmentation = {
"pitch_shift_steps": 2,
"time_stretch_ratio": 0.2,
"noise_level_min": -90,
"noise_level_max": -46,
}
else:
augmentation = None
spectrogram = get_speech_features_from_file(
file_path,
self.params["num_audio_features"],
features_type="mel",
data_min=1e-5,
augmentation=augmentation
)
image = self.preprocess_image(spectrogram)
return image.astype(self.params["dtype"].as_numpy_dtype()), \
np.int32(self.params["num_audio_features"]), np.int32(label)
def build_graph(self):
dataset = tf.data.Dataset.from_tensor_slices(self._files)
cache_data = self.params.get("cache_data", False)
if not cache_data:
if self.params["shuffle"]:
dataset = dataset.shuffle(self._size)
dataset = dataset.map(
lambda line: tf.py_func(
self.parse_element,
[line],
[self.params["dtype"], tf.int32, tf.int32],
stateful=False
),
num_parallel_calls=8
)
if cache_data:
dataset = dataset.cache()
if self.params["shuffle"]:
dataset = dataset.shuffle(self._size)
if self.params["repeat"]:
dataset = dataset.repeat()
dataset = dataset.batch(self.params["batch_size"])
dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE)
self._iterator = dataset.make_initializable_iterator()
inputs, lengths, labels = self._iterator.get_next()
if self.params["model_format"] == "jasper":
inputs.set_shape([
self.params["batch_size"],
self.params["audio_length"],
1,
self.params["num_audio_features"],
]) # B T 1 C
lengths.set_shape([self.params["batch_size"]])
source_tensors = [inputs, lengths]
else:
inputs.set_shape([
self.params["batch_size"],
self.params["num_audio_features"],
self.params["num_audio_features"],
1
]) # B W L C
source_tensors = [inputs]
labels = tf.one_hot(labels, self.params["num_labels"])
labels.set_shape([self.params["batch_size"], self.params["num_labels"]])
self._input_tensors = {
"source_tensors": source_tensors,
"target_tensors": [labels]
} | OpenSeq2Seq-master | open_seq2seq/data/speech2text/speech_commands.py |
OpenSeq2Seq-master | open_seq2seq/data/speech2text/__init__.py |
|
# Produces labelled .csv files containing balanced samples of classes
# and their labels for the chosen dataset
import os
import random
import librosa
import numpy as np
# choose one of three datasets
# 1) v1-12: V1 dataset with 12 classes, including unknown and silence
# 2) v1-30: V1 dataset with 30 classes, without unknown and silence
# 3) v2: V2 dataset with 35 classes
DATASET = "v1-12"
if DATASET == "v1-12":
classes = ["yes", "no", "up", "down", "left", "right", "on", "off", "stop", "go", "unknown", "silence"]
elif DATASET == "v1-30":
classes = ["yes", "no", "up", "down", "left", "right", "on", "off", "stop", "go", "bed", "bird", "cat", "dog", "eight", "five", "four", "happy", "house", "marvin", "nine", "one", "seven", "sheila", "six", "three", "tree", "two", "wow", "zero"]
elif DATASET == "v2":
classes = ["backward", "bed", "bird", "cat", "dog", "down", "eight", "five", "follow", "forward", "four", "go", "happy", "house", "learn", "left", "marvin", "nine", "no", "off", "on", "one", "right", "seven", "sheila", "six", "stop", "three", "tree", "two", "up", "visual", "wow", "yes", "zero"]
else:
print("Dataset not supported")
exit()
root_dir = "../../../data"
if "v1" in DATASET:
root_dir = os.path.join(root_dir, "speech_commands_v0.01")
else:
root_dir = os.path.join(root_dir, "speech_commands_v0.02")
eval_batch = 16
train_split = 0.8
test_split = val_split = (1 - train_split) / 2
data_list = []
min_samples_per_class = None
max_samples_per_class = 5000
# build a list of all available samples
for idx, label in enumerate(classes):
class_list = []
if label == "unknown":
unknowns = ["bed", "bird", "cat", "dog", "eight", "five", "four", "happy", "house", "marvin", "nine", "one", "seven", "sheila", "six", "three", "tree", "two", "wow", "zero"]
for unknown in unknowns:
folder = os.path.join(root_dir, unknown)
for file in os.listdir(folder):
file_path = "{}/{}".format(unknown, file)
class_list.append(file_path)
elif label == "silence":
silence_path = os.path.join(root_dir, "silence")
if not os.path.exists(silence_path):
os.mkdir(silence_path)
silence_stride = 2000
sampling_rate = 16000
folder = os.path.join(root_dir, "_background_noise_")
for file in os.listdir(folder):
if ".wav" in file:
load_path = os.path.join(folder, file)
y, sr = librosa.load(load_path)
for i in range(0, len(y) - sampling_rate, silence_stride):
file_path = "silence/{}_{}.wav".format(file[:-4], i)
y_slice = y[i:i + sampling_rate]
librosa.output.write_wav(os.path.join(root_dir, file_path), y_slice, sr)
class_list.append(file_path)
else:
folder = os.path.join(root_dir, label)
for file in os.listdir(folder):
file_path = "{}/{}".format(label, file)
class_list.append(file_path)
if min_samples_per_class is None or len(class_list) < min_samples_per_class:
min_samples_per_class = len(class_list)
random.shuffle(class_list)
data_list.append(class_list)
# sample and write to files
test_part = int(test_split * min_samples_per_class)
test_part += eval_batch - (test_part % eval_batch)
val_part = int(test_split * min_samples_per_class)
val_part += eval_batch - (val_part % eval_batch)
train_samples = []
test_samples = []
val_samples = []
for i, class_list in enumerate(data_list):
# take test and validation samples out
for sample in class_list[:test_part]:
test_samples.append("{},{}".format(i, sample))
for sample in class_list[test_part:test_part + val_part]:
val_samples.append("{},{}".format(i, sample))
samples = class_list[test_part + val_part:]
length = len(samples)
while len(class_list) < max_samples_per_class:
l = np.random.randint(0, length)
class_list.append(samples[l])
for sample in class_list[test_part + val_part:max_samples_per_class]:
train_samples.append("{},{}".format(i, sample))
train_file = open(os.path.join(root_dir, DATASET + "-train.txt"), "w")
for line in train_samples:
train_file.write(line + "\n")
test_file = open(os.path.join(root_dir, DATASET + "-test.txt"), "w")
for line in test_samples:
test_file.write(line + "\n")
val_file = open(os.path.join(root_dir, DATASET + "-val.txt"), "w")
for line in val_samples:
val_file.write(line + "\n")
| OpenSeq2Seq-master | open_seq2seq/data/speech2text/speech_commands_preprocessing.py |
# Copyright (c) 2018 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import math
import os
import h5py
import numpy as np
import resampy as rs
import scipy.io.wavfile as wave
BACKENDS = []
try:
import python_speech_features as psf
BACKENDS.append('psf')
except ImportError:
pass
try:
import librosa
BACKENDS.append('librosa')
except ImportError:
pass
WINDOWS_FNS = {"hanning": np.hanning, "hamming": np.hamming, "none": None}
class PreprocessOnTheFlyException(Exception):
""" Exception that is thrown to not load preprocessed features from disk;
recompute on-the-fly.
This saves disk space (if you're experimenting with data input
formats/preprocessing) but can be slower.
The slowdown is especially apparent for small, fast NNs."""
pass
class RegenerateCacheException(Exception):
""" Exception that is thrown to force recomputation of (preprocessed) features
"""
pass
def load_features(path, data_format):
""" Function to load (preprocessed) features from disk
Args:
:param path: the path where the features are stored
:param data_format: the format in which the features are stored
:return: tuple of (features, duration)
"""
if data_format == 'hdf5':
with h5py.File(path + '.hdf5', "r") as hf5_file:
features = hf5_file["features"][:]
duration = hf5_file["features"].attrs["duration"]
elif data_format == 'npy':
features, duration = np.load(path + '.npy')
elif data_format == 'npz':
data = np.load(path + '.npz')
features = data['features']
duration = data['duration']
else:
raise ValueError("Invalid data format for caching: ", data_format, "!\n",
"options: hdf5, npy, npz")
return features, duration
def save_features(features, duration, path, data_format, verbose=False):
""" Function to save (preprocessed) features to disk
Args:
:param features: features
:param duration: metadata: duration in seconds of audio file
:param path: path to store the data
:param data_format: format to store the data in ('npy',
'npz',
'hdf5')
"""
if verbose: print("Saving to: ", path)
if data_format == 'hdf5':
with h5py.File(path + '.hdf5', "w") as hf5_file:
dset = hf5_file.create_dataset("features", data=features)
dset.attrs["duration"] = duration
elif data_format == 'npy':
np.save(path + '.npy', [features, duration])
elif data_format == 'npz':
np.savez(path + '.npz', features=features, duration=duration)
else:
raise ValueError("Invalid data format for caching: ", data_format, "!\n",
"options: hdf5, npy, npz")
def get_preprocessed_data_path(filename, params):
""" Function to convert the audio path into the path to the preprocessed
version of this audio
Args:
:param filename: WAVE filename
:param params: dictionary containing preprocessing parameters
:return: path to new file (without extension). The path is
generated from the relevant preprocessing parameters.
"""
if isinstance(filename, bytes): # convert binary string to normal string
filename = filename.decode('ascii')
filename = os.path.realpath(filename) # decode symbolic links
## filter relevant parameters # TODO is there a cleaner way of doing this?
# print(list(params.keys()))
ignored_params = ["cache_features", "cache_format", "cache_regenerate",
"vocab_file", "dataset_files", "shuffle", "batch_size",
"max_duration",
"mode", "interactive", "autoregressive", "char2idx",
"tgt_vocab_size", "idx2char", "dtype"]
def fix_kv(text):
""" Helper function to shorten length of filenames to get around
filesystem path length limitations"""
text = str(text)
text = text.replace("speed_perturbation_ratio", "sp") \
.replace("noise_level_min", "nlmin", ) \
.replace("noise_level_max", "nlmax") \
.replace("add_derivatives", "d") \
.replace("add_second_derivatives", "dd")
return text
# generate the identifier by simply concatenating preprocessing key-value
# pairs as strings.
preprocess_id = "-".join(
[fix_kv(k) + "_" + fix_kv(v) for k, v in params.items() if
k not in ignored_params])
preprocessed_dir = os.path.dirname(filename).replace("wav",
"preprocessed-" +
preprocess_id)
preprocessed_path = os.path.join(preprocessed_dir,
os.path.basename(filename).replace(".wav",
""))
# create dir if it doesn't exist yet
if not os.path.exists(preprocessed_dir):
os.makedirs(preprocessed_dir)
return preprocessed_path
def get_speech_features_from_file(filename, params):
"""Function to get a numpy array of features, from an audio file.
if params['cache_features']==True, try load preprocessed data from
disk, or store after preprocesseng.
else, perform preprocessing on-the-fly.
Args:
filename (string): WAVE filename.
params (dict): the following parameters
num_features (int): number of speech features in frequency domain.
features_type (string): 'mfcc' or 'spectrogram'.
window_size (float): size of analysis window in milli-seconds.
window_stride (float): stride of analysis window in milli-seconds.
augmentation (dict, optional): dictionary of augmentation parameters. See
:func:`augment_audio_signal` for specification and example.
window (str): window function to apply
dither (float): weight of Gaussian noise to apply to input signal for
dithering/preventing quantization noise
num_fft (int): size of fft window to use if features require fft,
defaults to smallest power of 2 larger than window size
norm_per_feature (bool): if True, the output features will be normalized
(whitened) individually. if False, a global mean/std over all features
will be used for normalization
Returns:
np.array: np.array of audio features with shape=[num_time_steps,
num_features].
"""
cache_features = params.get('cache_features', False)
cache_format = params.get('cache_format', 'hdf5')
cache_regenerate = params.get('cache_regenerate', False)
try:
if not cache_features:
raise PreprocessOnTheFlyException(
"on-the-fly preprocessing enforced with 'cache_features'==True")
if cache_regenerate:
raise RegenerateCacheException("regenerating cache...")
preprocessed_data_path = get_preprocessed_data_path(filename, params)
features, duration = load_features(preprocessed_data_path,
data_format=cache_format)
except PreprocessOnTheFlyException:
sample_freq, signal = wave.read(filename)
# check sample rate
if sample_freq != params['sample_freq']:
raise ValueError(
("The sampling frequency set in params {} does not match the "
"frequency {} read from file {}").format(params['sample_freq'],
sample_freq, filename)
)
features, duration = get_speech_features(signal, sample_freq, params)
except (OSError, FileNotFoundError, RegenerateCacheException):
sample_freq, signal = wave.read(filename)
# check sample rate
if sample_freq != params['sample_freq']:
raise ValueError(
("The sampling frequency set in params {} does not match the "
"frequency {} read from file {}").format(params['sample_freq'],
sample_freq, filename)
)
features, duration = get_speech_features(signal, sample_freq, params)
preprocessed_data_path = get_preprocessed_data_path(filename, params)
save_features(features, duration, preprocessed_data_path,
data_format=cache_format)
return features, duration
def normalize_signal(signal, gain=None):
"""
Normalize float32 signal to [-1, 1] range
"""
if gain is None:
gain = 1.0 / (np.max(np.abs(signal)) + 1e-5)
return signal * gain
def augment_audio_signal(signal_float, sample_freq, augmentation):
"""Function that performs audio signal augmentation.
Args:
signal_float (np.array): np.array containing raw audio signal.
sample_freq (float): frames per second.
augmentation (dict, optional): None or dictionary of augmentation parameters.
If not None, has to have 'speed_perturbation_ratio',
'noise_level_min', or 'noise_level_max' fields, e.g.::
augmentation={
'speed_perturbation_ratio': 0.2,
'noise_level_min': -90,
'noise_level_max': -46,
}
'speed_perturbation_ratio' can either be a list of possible speed
perturbation factors or a float. If float, a random value from
U[1-speed_perturbation_ratio, 1+speed_perturbation_ratio].
Returns:
np.array: np.array with augmented audio signal.
"""
if 'speed_perturbation_ratio' in augmentation:
stretch_amount = -1
if isinstance(augmentation['speed_perturbation_ratio'], list):
stretch_amount = np.random.choice(augmentation['speed_perturbation_ratio'])
elif augmentation['speed_perturbation_ratio'] > 0:
# time stretch (might be slow)
stretch_amount = 1.0 + (2.0 * np.random.rand() - 1.0) * \
augmentation['speed_perturbation_ratio']
if stretch_amount > 0:
signal_float = rs.resample(
signal_float,
sample_freq,
int(sample_freq * stretch_amount),
filter='kaiser_best',
)
# noise
if 'noise_level_min' in augmentation and 'noise_level_max' in augmentation:
noise_level_db = np.random.randint(low=augmentation['noise_level_min'],
high=augmentation['noise_level_max'])
signal_float += np.random.randn(signal_float.shape[0]) * \
10.0 ** (noise_level_db / 20.0)
return signal_float
def preemphasis(signal, coeff=0.97):
return np.append(signal[0], signal[1:] - coeff * signal[:-1])
def get_speech_features(signal, sample_freq, params):
"""
Get speech features using either librosa (recommended) or
python_speech_features
Args:
signal (np.array): np.array containing raw audio signal
sample_freq (float): sample rate of the signal
params (dict): parameters of pre-processing
Returns:
np.array: np.array of audio features with shape=[num_time_steps,
num_features].
audio_duration (float): duration of the signal in seconds
"""
backend = params.get('backend', 'psf')
features_type = params.get('input_type', 'spectrogram')
num_features = params['num_audio_features']
window_size = params.get('window_size', 20e-3)
window_stride = params.get('window_stride', 10e-3)
augmentation = params.get('augmentation', None)
if backend == 'librosa':
window_fn = WINDOWS_FNS[params.get('window', "hanning")]
dither = params.get('dither', 0.0)
num_fft = params.get('num_fft', None)
norm_per_feature = params.get('norm_per_feature', False)
mel_basis = params.get('mel_basis', None)
gain = params.get('gain')
mean = params.get('features_mean')
std_dev = params.get('features_std_dev')
features, duration = get_speech_features_librosa(
signal, sample_freq, num_features, features_type,
window_size, window_stride, augmentation, window_fn=window_fn,
dither=dither, norm_per_feature=norm_per_feature, num_fft=num_fft,
mel_basis=mel_basis, gain=gain, mean=mean, std_dev=std_dev
)
else:
pad_to = params.get('pad_to', 8)
features, duration = get_speech_features_psf(
signal, sample_freq, num_features, pad_to, features_type,
window_size, window_stride, augmentation
)
return features, duration
def get_speech_features_librosa(signal, sample_freq, num_features,
features_type='spectrogram',
window_size=20e-3,
window_stride=10e-3,
augmentation=None,
window_fn=np.hanning,
num_fft=None,
dither=0.0,
norm_per_feature=False,
mel_basis=None,
gain=None,
mean=None,
std_dev=None):
"""Function to convert raw audio signal to numpy array of features.
Backend: librosa
Args:
signal (np.array): np.array containing raw audio signal.
sample_freq (float): frames per second.
num_features (int): number of speech features in frequency domain.
pad_to (int): if specified, the length will be padded to become divisible
by ``pad_to`` parameter.
features_type (string): 'mfcc' or 'spectrogram'.
window_size (float): size of analysis window in milli-seconds.
window_stride (float): stride of analysis window in milli-seconds.
augmentation (dict, optional): dictionary of augmentation parameters. See
:func:`augment_audio_signal` for specification and example.
Returns:
np.array: np.array of audio features with shape=[num_time_steps,
num_features].
audio_duration (float): duration of the signal in seconds
"""
signal = normalize_signal(signal.astype(np.float32), gain)
if augmentation:
signal = augment_audio_signal(signal, sample_freq, augmentation)
audio_duration = len(signal) * 1.0 / sample_freq
n_window_size = int(sample_freq * window_size)
n_window_stride = int(sample_freq * window_stride)
num_fft = num_fft or 2**math.ceil(math.log2(window_size*sample_freq))
if dither > 0:
signal += dither*np.random.randn(*signal.shape)
if features_type == 'spectrogram':
# ignore 1/n_fft multiplier, since there is a post-normalization
powspec = np.square(np.abs(librosa.core.stft(
signal, n_fft=n_window_size,
hop_length=n_window_stride, win_length=n_window_size, center=True,
window=window_fn)))
# remove small bins
powspec[powspec <= 1e-30] = 1e-30
features = 10 * np.log10(powspec.T)
assert num_features <= n_window_size // 2 + 1, \
"num_features for spectrogram should be <= (sample_freq * window_size // 2 + 1)"
# cut high frequency part
features = features[:, :num_features]
elif features_type == 'mfcc':
signal = preemphasis(signal, coeff=0.97)
S = np.square(
np.abs(
librosa.core.stft(signal, n_fft=num_fft,
hop_length=int(window_stride * sample_freq),
win_length=int(window_size * sample_freq),
center=True, window=window_fn
)
)
)
features = librosa.feature.mfcc(sr=sample_freq, S=S,
n_mfcc=num_features, n_mels=2*num_features).T
elif features_type == 'logfbank':
signal = preemphasis(signal,coeff=0.97)
S = np.abs(librosa.core.stft(signal, n_fft=num_fft,
hop_length=int(window_stride * sample_freq),
win_length=int(window_size * sample_freq),
center=True, window=window_fn))**2.0
if mel_basis is None:
# Build a Mel filter
mel_basis = librosa.filters.mel(sample_freq, num_fft, n_mels=num_features,
fmin=0, fmax=int(sample_freq/2))
features = np.log(np.dot(mel_basis, S) + 1e-20).T
else:
raise ValueError('Unknown features type: {}'.format(features_type))
norm_axis = 0 if norm_per_feature else None
if mean is None:
mean = np.mean(features, axis=norm_axis)
if std_dev is None:
std_dev = np.std(features, axis=norm_axis)
features = (features - mean) / std_dev
if augmentation:
n_freq_mask = augmentation.get('n_freq_mask', 0)
n_time_mask = augmentation.get('n_time_mask', 0)
width_freq_mask = augmentation.get('width_freq_mask', 10)
width_time_mask = augmentation.get('width_time_mask', 50)
for idx in range(n_freq_mask):
freq_band = np.random.randint(width_freq_mask + 1)
freq_base = np.random.randint(0, features.shape[1] - freq_band)
features[:, freq_base:freq_base+freq_band] = 0
for idx in range(n_time_mask):
time_band = np.random.randint(width_time_mask + 1)
if features.shape[0] - time_band > 0:
time_base = np.random.randint(features.shape[0] - time_band)
features[time_base:time_base+time_band, :] = 0
# now it is safe to pad
# if pad_to > 0:
# if features.shape[0] % pad_to != 0:
# pad_size = pad_to - features.shape[0] % pad_to
# if pad_size != 0:
# features = np.pad(features, ((0,pad_size), (0,0)), mode='constant')
return features, audio_duration
def get_speech_features_psf(signal, sample_freq, num_features,
pad_to=8,
features_type='spectrogram',
window_size=20e-3,
window_stride=10e-3,
augmentation=None):
"""Function to convert raw audio signal to numpy array of features.
Backend: python_speech_features
Args:
signal (np.array): np.array containing raw audio signal.
sample_freq (float): frames per second.
num_features (int): number of speech features in frequency domain.
pad_to (int): if specified, the length will be padded to become divisible
by ``pad_to`` parameter.
features_type (string): 'mfcc' or 'spectrogram'.
window_size (float): size of analysis window in milli-seconds.
window_stride (float): stride of analysis window in milli-seconds.
augmentation (dict, optional): dictionary of augmentation parameters. See
:func:`augment_audio_signal` for specification and example.
apply_window (bool): whether to apply Hann window for mfcc and logfbank.
python_speech_features version should accept winfunc if it is True.
Returns:
np.array: np.array of audio features with shape=[num_time_steps,
num_features].
audio_duration (float): duration of the signal in seconds
"""
if augmentation is not None:
signal = augment_audio_signal(signal.astype(np.float32),
sample_freq, augmentation)
signal = (normalize_signal(signal.astype(np.float32)) * 32767.0).astype(
np.int16)
audio_duration = len(signal) * 1.0 / sample_freq
n_window_size = int(sample_freq * window_size)
n_window_stride = int(sample_freq * window_stride)
# making sure length of the audio is divisible by 8 (fp16 optimization)
length = 1 + int(math.ceil(
(1.0 * signal.shape[0] - n_window_size) / n_window_stride
))
if pad_to > 0:
if length % pad_to != 0:
pad_size = (pad_to - length % pad_to) * n_window_stride
signal = np.pad(signal, (0, pad_size), mode='constant')
if features_type == 'spectrogram':
frames = psf.sigproc.framesig(sig=signal,
frame_len=n_window_size,
frame_step=n_window_stride,
winfunc=np.hanning)
# features = np.log1p(psf.sigproc.powspec(frames, NFFT=N_window_size))
features = psf.sigproc.logpowspec(frames, NFFT=n_window_size)
assert num_features <= n_window_size // 2 + 1, \
"num_features for spectrogram should be <= (sample_freq * window_size // 2 + 1)"
# cut high frequency part
features = features[:, :num_features]
elif features_type == 'mfcc':
features = psf.mfcc(signal=signal,
samplerate=sample_freq,
winlen=window_size,
winstep=window_stride,
numcep=num_features,
nfilt=2 * num_features,
nfft=512,
lowfreq=0, highfreq=None,
preemph=0.97,
ceplifter=2 * num_features,
appendEnergy=False)
elif features_type == 'logfbank':
features = psf.logfbank(signal=signal,
samplerate=sample_freq,
winlen=window_size,
winstep=window_stride,
nfilt=num_features,
nfft=512,
lowfreq=0, highfreq=sample_freq / 2,
preemph=0.97)
else:
raise ValueError('Unknown features type: {}'.format(features_type))
if pad_to > 0:
assert features.shape[0] % pad_to == 0
mean = np.mean(features)
std_dev = np.std(features)
features = (features - mean) / std_dev
return features, audio_duration
| OpenSeq2Seq-master | open_seq2seq/data/speech2text/speech_utils.py |
# Copyright (c) 2018 NVIDIA Corporation
"""Data Layer for Speech-to-Text models"""
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import numpy as np
import pandas as pd
import tensorflow as tf
import six
import math
import librosa
from six import string_types
from six.moves import range
from open_seq2seq.data.data_layer import DataLayer
from open_seq2seq.data.utils import load_pre_existing_vocabulary
from .speech_utils import get_speech_features_from_file, get_speech_features
import sentencepiece as spm
# numpy.fft MKL bug: https://github.com/IntelPython/mkl_fft/issues/11
if hasattr(np.fft, 'restore_all'):
np.fft.restore_all()
class Speech2TextDataLayer(DataLayer):
"""Speech-to-text data layer class."""
@staticmethod
def get_required_params():
return dict(DataLayer.get_required_params(), **{
'num_audio_features': int,
'input_type': ['spectrogram', 'mfcc', 'logfbank'],
'vocab_file': str,
'dataset_files': list,
})
@staticmethod
def get_optional_params():
return dict(DataLayer.get_optional_params(), **{
'backend': ['psf', 'librosa'],
'augmentation': dict,
'pad_to': int,
'max_duration': float,
'min_duration': float,
'bpe': bool,
'autoregressive': bool,
'syn_enable': bool,
'syn_subdirs': list,
'window_size': float,
'window_stride': float,
'dither': float,
'norm_per_feature': bool,
'window': ['hanning', 'hamming', 'none'],
'num_fft': int,
'precompute_mel_basis': bool,
'sample_freq': int,
'gain': float,
'features_mean': np.ndarray,
'features_std_dev': np.ndarray,
})
def __init__(self, params, model, num_workers, worker_id):
"""Speech-to-text data layer constructor.
See parent class for arguments description.
Config parameters:
* **backend** (str) --- audio pre-processing backend
('psf' [default] or librosa [recommended]).
* **num_audio_features** (int) --- number of audio features to extract.
* **input_type** (str) --- could be either "spectrogram" or "mfcc".
* **vocab_file** (str) --- path to vocabulary file or sentencepiece model.
* **dataset_files** (list) --- list with paths to all dataset .csv files.
* **augmentation** (dict) --- optional dictionary with data augmentation
parameters. Can contain "speed_perturbation_ratio", "noise_level_min" and
"noise_level_max" parameters, e.g.::
{
'speed_perturbation_ratio': 0.05,
'noise_level_min': -90,
'noise_level_max': -60,
}
For additional details on these parameters see
:func:`data.speech2text.speech_utils.augment_audio_signal` function.
* **pad_to** (int) --- align audio sequence length to pad_to value.
* **max_duration** (float) --- drop all samples longer than
**max_duration** (seconds)
* **min_duration** (float) --- drop all samples shorter than
**min_duration** (seconds)
* **bpe** (bool) --- use BPE encodings
* **autoregressive** (bool) --- boolean indicating whether the model is
autoregressive.
* **syn_enable** (bool) --- boolean indicating whether the model is
using synthetic data.
* **syn_subdirs** (list) --- must be defined if using synthetic mode.
Contains a list of subdirectories that hold the synthetica wav files.
* **window_size** (float) --- window's duration (in seconds)
* **window_stride** (float) --- window's stride (in seconds)
* **dither** (float) --- weight of Gaussian noise to apply to input signal
for dithering/preventing quantization noise
* **num_fft** (int) --- size of fft window to use if features require fft,
defaults to smallest power of 2 larger than window size
* **norm_per_feature** (bool) --- if True, the output features will be
normalized (whitened) individually. if False, a global mean/std over all
features will be used for normalization.
* **window** (str) --- window function to apply before FFT
('hanning', 'hamming', 'none')
* **num_fft** (int) --- optional FFT size
* **precompute_mel_basis** (bool) --- compute and store mel basis. If False,
it will compute it for every get_speech_features call. Default: False
* **sample_freq** (int) --- required for precompute_mel_basis
"""
super(Speech2TextDataLayer, self).__init__(params, model,
num_workers, worker_id)
self.params['autoregressive'] = self.params.get('autoregressive', False)
self.autoregressive = self.params['autoregressive']
self.params['bpe'] = self.params.get('bpe', False)
if self.params['bpe']:
self.sp = spm.SentencePieceProcessor()
self.sp.Load(self.params['vocab_file'])
self.params['tgt_vocab_size'] = len(self.sp) + 1
else:
self.params['char2idx'] = load_pre_existing_vocabulary(
self.params['vocab_file'], read_chars=True,
)
if not self.autoregressive:
# add one for implied blank token
self.params['tgt_vocab_size'] = len(self.params['char2idx']) + 1
else:
num_chars_orig = len(self.params['char2idx'])
self.params['tgt_vocab_size'] = num_chars_orig + 2
self.start_index = num_chars_orig
self.end_index = num_chars_orig + 1
self.params['char2idx']['<S>'] = self.start_index
self.params['char2idx']['</S>'] = self.end_index
self.target_pad_value = self.end_index
self.params['idx2char'] = {i: w for w,
i in self.params['char2idx'].items()}
self.target_pad_value = 0
self._files = None
if self.params["interactive"]:
return
for csv in params['dataset_files']:
files = pd.read_csv(csv, encoding='utf-8')
if self._files is None:
self._files = files
else:
self._files = self._files.append(files)
if self.params['mode'] != 'infer':
cols = ['wav_filename', 'transcript']
else:
cols = 'wav_filename'
self.all_files = self._files.loc[:, cols].values
self._files = self.split_data(self.all_files)
self._size = self.get_size_in_samples()
self._dataset = None
self._iterator = None
self._input_tensors = None
self.params['min_duration'] = self.params.get('min_duration', -1.0)
self.params['max_duration'] = self.params.get('max_duration', -1.0)
self.params['window_size'] = self.params.get('window_size', 20e-3)
self.params['window_stride'] = self.params.get('window_stride', 10e-3)
self.params['sample_freq'] = self.params.get('sample_freq', 16000)
mel_basis = None
if (self.params.get("precompute_mel_basis", False) and
self.params["input_type"] == "logfbank"):
num_fft = (
self.params.get("num_fft", None) or
2**math.ceil(math.log2(
self.params['window_size']*self.params['sample_freq'])
)
)
mel_basis = librosa.filters.mel(
self.params['sample_freq'],
num_fft,
n_mels=self.params['num_audio_features'],
fmin=0,
fmax=int(self.params['sample_freq']/2)
)
self.params['mel_basis'] = mel_basis
if 'n_freq_mask' in self.params.get('augmentation', {}):
width_freq_mask = self.params['augmentation'].get('width_freq_mask', 10)
if width_freq_mask > self.params['num_audio_features']:
raise ValueError(
"'width_freq_mask'={} should be smaller ".format(width_freq_mask)+
"than 'num_audio_features'={}".format(
self.params['num_audio_features']
)
)
if 'time_stretch_ratio' in self.params.get('augmentation', {}):
print("WARNING: Please update time_stretch_ratio to speed_perturbation_ratio")
self.params['augmentation']['speed_perturbation_ratio'] = self.params['augmentation']['time_stretch_ratio']
def split_data(self, data):
if self.params['mode'] != 'train' and self._num_workers is not None:
size = len(data)
start = size // self._num_workers * self._worker_id
if self._worker_id == self._num_workers - 1:
end = size
else:
end = size // self._num_workers * (self._worker_id + 1)
return data[start:end]
else:
return data
@property
def iterator(self):
"""Underlying tf.data iterator."""
return self._iterator
def build_graph(self):
with tf.device('/cpu:0'):
"""Builds data processing graph using ``tf.data`` API."""
if self.params['mode'] != 'infer':
self._dataset = tf.data.Dataset.from_tensor_slices(self._files)
if self.params['shuffle']:
self._dataset = self._dataset.shuffle(self._size)
self._dataset = self._dataset.repeat()
self._dataset = self._dataset.prefetch(tf.contrib.data.AUTOTUNE)
self._dataset = self._dataset.map(
lambda line: tf.py_func(
self._parse_audio_transcript_element,
[line],
[self.params['dtype'], tf.int32, tf.int32, tf.int32, tf.float32],
stateful=False,
),
num_parallel_calls=8,
)
if self.params['max_duration'] > 0:
self._dataset = self._dataset.filter(
lambda x, x_len, y, y_len, duration:
tf.less_equal(duration, self.params['max_duration'])
)
if self.params['min_duration'] > 0:
self._dataset = self._dataset.filter(
lambda x, x_len, y, y_len, duration:
tf.greater_equal(duration, self.params['min_duration'])
)
self._dataset = self._dataset.map(
lambda x, x_len, y, y_len, duration:
[x, x_len, y, y_len],
num_parallel_calls=8,
)
self._dataset = self._dataset.padded_batch(
self.params['batch_size'],
padded_shapes=([None, self.params['num_audio_features']],
1, [None], 1),
padding_values=(
tf.cast(0, self.params['dtype']), 0, self.target_pad_value, 0),
)
else:
indices = self.split_data(
np.array(list(map(str, range(len(self.all_files)))))
)
self._dataset = tf.data.Dataset.from_tensor_slices(
np.hstack((indices[:, np.newaxis], self._files[:, np.newaxis]))
)
self._dataset = self._dataset.repeat()
self._dataset = self._dataset.prefetch(tf.contrib.data.AUTOTUNE)
self._dataset = self._dataset.map(
lambda line: tf.py_func(
self._parse_audio_element,
[line],
[self.params['dtype'], tf.int32, tf.int32, tf.float32],
stateful=False,
),
num_parallel_calls=8,
)
if self.params['max_duration'] > 0:
self._dataset = self._dataset.filter(
lambda x, x_len, idx, duration:
tf.less_equal(duration, self.params['max_duration'])
)
if self.params['min_duration'] > 0:
self._dataset = self._dataset.filter(
lambda x, x_len, y, y_len, duration:
tf.greater_equal(duration, self.params['min_duration'])
)
self._dataset = self._dataset.map(
lambda x, x_len, idx, duration:
[x, x_len, idx],
num_parallel_calls=16,
)
self._dataset = self._dataset.padded_batch(
self.params['batch_size'],
padded_shapes=([None, self.params['num_audio_features']], 1, 1)
)
self._iterator = self._dataset.prefetch(tf.contrib.data.AUTOTUNE)\
.make_initializable_iterator()
if self.params['mode'] != 'infer':
x, x_length, y, y_length = self._iterator.get_next()
# need to explicitly set batch size dimension
# (it is employed in the model)
y.set_shape([self.params['batch_size'], None])
y_length = tf.reshape(y_length, [self.params['batch_size']])
else:
x, x_length, x_id = self._iterator.get_next()
x_id = tf.reshape(x_id, [self.params['batch_size']])
x.set_shape([self.params['batch_size'], None,
self.params['num_audio_features']])
x_length = tf.reshape(x_length, [self.params['batch_size']])
pad_to = self.params.get("pad_to", 8)
if pad_to > 0 and self.params.get('backend') == 'librosa':
# we do padding with TF for librosa backend
num_pad = tf.mod(pad_to - tf.mod(tf.reduce_max(x_length), pad_to), pad_to)
x = tf.pad(x, [[0, 0], [0, num_pad], [0, 0]])
self._input_tensors = {}
self._input_tensors["source_tensors"] = [x, x_length]
if self.params['mode'] != 'infer':
self._input_tensors['target_tensors'] = [y, y_length]
else:
self._input_tensors['source_ids'] = [x_id]
def create_interactive_placeholders(self):
self._x = tf.placeholder(
dtype=self.params['dtype'],
shape=[
self.params['batch_size'],
None,
self.params['num_audio_features']
]
)
self._x_length = tf.placeholder(
dtype=tf.int32,
shape=[self.params['batch_size']]
)
self._x_id = tf.placeholder(
dtype=tf.int32,
shape=[self.params['batch_size']]
)
self._input_tensors = {}
self._input_tensors["source_tensors"] = [self._x, self._x_length]
self._input_tensors['source_ids'] = [self._x_id]
def create_feed_dict(self, model_in):
""" Creates the feed dict for interactive infer
Args:
model_in (str or np.array): Either a str that contains the file path of the
wav file, or a numpy array containing 1-d wav file.
Returns:
feed_dict (dict): Dictionary with values for the placeholders.
"""
audio_arr = []
audio_length_arr = []
x_id_arr = []
for line in model_in:
if isinstance(line, string_types):
audio, audio_length, x_id, _ = self._parse_audio_element([0, line])
elif isinstance(line, np.ndarray):
audio, audio_length, x_id, _ = self._get_audio(line)
else:
raise ValueError(
"Speech2Text's interactive inference mode only supports string or",
"numpy array as input. Got {}". format(type(line))
)
audio_arr.append(audio)
audio_length_arr.append(audio_length)
x_id_arr.append(x_id)
max_len = np.max(audio_length_arr)
pad_to = self.params.get("pad_to", 8)
if pad_to > 0 and self.params.get('backend') == 'librosa':
max_len += (pad_to - max_len % pad_to) % pad_to
for i, audio in enumerate(audio_arr):
audio = np.pad(
audio, ((0, max_len-len(audio)), (0, 0)),
"constant", constant_values=0.
)
audio_arr[i] = audio
audio = np.reshape(
audio_arr,
[self.params['batch_size'],
-1,
self.params['num_audio_features']]
)
audio_length = np.reshape(audio_length_arr, [self.params['batch_size']])
x_id = np.reshape(x_id_arr, [self.params['batch_size']])
feed_dict = {
self._x: audio,
self._x_length: audio_length,
self._x_id: x_id,
}
return feed_dict
def _parse_audio_transcript_element(self, element):
"""Parses tf.data element from TextLineDataset into audio and text.
Args:
element: tf.data element from TextLineDataset.
Returns:
tuple: source audio features as ``np.array``, length of source sequence,
target text as `np.array` of ids, target text length.
"""
audio_filename, transcript = element
if not six.PY2:
transcript = str(transcript, 'utf-8')
audio_filename = str(audio_filename, 'utf-8')
if self.params['bpe']:
target_indices = self.sp.EncodeAsIds(transcript)
else:
target_indices = [self.params['char2idx'][c] for c in transcript]
if self.autoregressive:
target_indices = target_indices + [self.end_index]
target = np.array(target_indices)
if self.params.get("syn_enable", False):
audio_filename = audio_filename.format(np.random.choice(self.params["syn_subdirs"]))
source, audio_duration = get_speech_features_from_file(
audio_filename,
params=self.params
)
return source.astype(self.params['dtype'].as_numpy_dtype()), \
np.int32([len(source)]), \
np.int32(target), \
np.int32([len(target)]), \
np.float32([audio_duration])
def _get_audio(self, wav):
"""Parses audio from wav and returns array of audio features.
Args:
wav: numpy array containing wav
Returns:
tuple: source audio features as ``np.array``, length of source sequence,
sample id.
"""
source, audio_duration = get_speech_features(
wav, 16000., self.params
)
return source.astype(self.params['dtype'].as_numpy_dtype()), \
np.int32([len(source)]), np.int32([0]), \
np.float32([audio_duration])
def _parse_audio_element(self, id_and_audio_filename):
"""Parses audio from file and returns array of audio features.
Args:
id_and_audio_filename: tuple of sample id and corresponding
audio file name.
Returns:
tuple: source audio features as ``np.array``, length of source sequence,
sample id.
"""
idx, audio_filename = id_and_audio_filename
source, audio_duration = get_speech_features_from_file(
audio_filename,
params=self.params
)
return source.astype(self.params['dtype'].as_numpy_dtype()), \
np.int32([len(source)]), np.int32([idx]), \
np.float32([audio_duration])
@property
def input_tensors(self):
"""Dictionary with input tensors.
``input_tensors["source_tensors"]`` contains:
* source_sequence
(shape=[batch_size x sequence length x num_audio_features])
* source_length (shape=[batch_size])
``input_tensors["target_tensors"]`` contains:
* target_sequence
(shape=[batch_size x sequence length])
* target_length (shape=[batch_size])
"""
return self._input_tensors
def get_size_in_samples(self):
"""Returns the number of audio files."""
return len(self._files)
| OpenSeq2Seq-master | open_seq2seq/data/speech2text/speech2text.py |
# pylint: skip-file
from open_seq2seq.models import Image2Label
from open_seq2seq.encoders import ResNetEncoder
from open_seq2seq.decoders import FullyConnectedDecoder
from open_seq2seq.losses import CrossEntropyLoss
from open_seq2seq.data import ImagenetDataLayer
from open_seq2seq.optimizers.lr_policies import piecewise_constant
import tensorflow as tf
base_model = Image2Label
base_params = {
"random_seed": 0,
"use_horovod": False,
"num_epochs": 100,
"num_gpus": 8,
"batch_size_per_gpu": 32,
"dtype": tf.float32,
"save_summaries_steps": 2000,
"print_loss_steps": 100,
"print_samples_steps": 2000,
"eval_steps": 5000,
"save_checkpoint_steps": 5000,
"logdir": "experiments/resnet50-imagenet",
"optimizer": "Momentum",
"optimizer_params": {
"momentum": 0.90,
},
"lr_policy": piecewise_constant,
"lr_policy_params": {
"learning_rate": 0.1,
"boundaries": [30, 60, 80, 90],
"decay_rates": [0.1, 0.01, 0.001, 1e-4],
},
"initializer": tf.variance_scaling_initializer,
"regularizer": tf.contrib.layers.l2_regularizer,
"regularizer_params": {
'scale': 0.0001,
},
"summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"encoder": ResNetEncoder,
"encoder_params": {
'resnet_size': 50,
"regularize_bn": False,
},
"decoder": FullyConnectedDecoder,
"decoder_params": {
"output_dim": 1000,
},
"loss": CrossEntropyLoss,
"data_layer": ImagenetDataLayer,
"data_layer_params": {
"data_dir": "data/tf-imagenet",
"image_size": 224,
},
}
| OpenSeq2Seq-master | example_configs/image2label/resnet-50-v2.py |
# pylint: skip-file
from open_seq2seq.models import Image2Label
from open_seq2seq.encoders import ResNetEncoder
from open_seq2seq.decoders import FullyConnectedDecoder
from open_seq2seq.losses import CrossEntropyLoss
from open_seq2seq.data import SpeechCommandsDataLayer
from open_seq2seq.optimizers.lr_policies import poly_decay
import tensorflow as tf
base_model = Image2Label
dataset_version = "v1-12"
dataset_location = "data/speech_commands_v0.01"
if dataset_version == "v1-12":
num_labels = 12
elif dataset_version == "v1-30":
num_labels = 30
else:
num_labels = 35
dataset_location = "data/speech_commands_v0.02"
base_params = {
"random_seed": 0,
"use_horovod": False,
"num_gpus": 1,
"num_epochs": 20,
"batch_size_per_gpu": 32,
"dtype": "mixed",
"loss_scaling": 512.0,
"save_summaries_steps": 10000,
"print_loss_steps": 10,
"print_samples_steps": 1000,
"eval_steps": 100,
"save_checkpoint_steps": 10000,
"logdir": "result/" + dataset_version + "-resnet",
"optimizer": "Momentum",
"optimizer_params": {
"momentum": 0.95,
},
"lr_policy": poly_decay,
"lr_policy_params": {
"learning_rate": 0.2,
"power": 2,
},
"initializer": tf.variance_scaling_initializer,
"regularizer": tf.contrib.layers.l2_regularizer,
"regularizer_params": {
'scale': 0.0001,
},
"summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"encoder": ResNetEncoder,
"encoder_params": {
'resnet_size': 50,
"regularize_bn": False,
},
"decoder": FullyConnectedDecoder,
"decoder_params": {
"output_dim": num_labels,
},
"loss": CrossEntropyLoss,
"data_layer": SpeechCommandsDataLayer,
"data_layer_params": {
"dataset_location": dataset_location,
"num_audio_features": 120,
"audio_length": 120,
"num_labels": num_labels,
"cache_data": True,
"augment_data": True,
"model_format": "resnet"
},
}
train_params = {
"data_layer_params": {
"dataset_files": [
dataset_version + "-train.txt"
],
"shuffle": True,
"repeat": True
},
}
eval_params = {
"batch_size_per_gpu": 16,
"data_layer_params": {
"dataset_files": [
dataset_version + "-val.txt"
],
"shuffle": False,
"repeat": False
},
}
| OpenSeq2Seq-master | example_configs/image2label/resnet_commands.py |
# pylint: skip-file
from open_seq2seq.models import Image2Label
from open_seq2seq.encoders import ResNetEncoder
from open_seq2seq.decoders import FullyConnectedDecoder
from open_seq2seq.losses import CrossEntropyLoss
from open_seq2seq.data import SpeechCommandsDataLayer
from open_seq2seq.optimizers.lr_policies import poly_decay
import tensorflow as tf
base_model = Image2Label
dataset_version = "v1-12"
dataset_location = "/data/speech-commands/v1"
if dataset_version == "v1-12":
num_labels = 12
elif dataset_version == "v1-30":
num_labels = 30
else:
num_labels = 35
dataset_location = "/data/speech-commands/v2"
base_params = {
"random_seed": 0,
"use_horovod": True,
"num_gpus": 8,
"num_epochs": 100,
"batch_size_per_gpu": 32,
"dtype": "mixed",
"loss_scaling": 512.0,
"save_summaries_steps": 10000,
"print_loss_steps": 100,
"print_samples_steps": 1000,
"eval_steps": 1000,
"save_checkpoint_steps": 10000,
"logdir": "result/resnet_commands",
"optimizer": "Momentum",
"optimizer_params": {
"momentum": 0.95,
},
"lr_policy": poly_decay,
"lr_policy_params": {
"learning_rate": 0.2,
"power": 2,
},
"initializer": tf.variance_scaling_initializer,
"regularizer": tf.contrib.layers.l2_regularizer,
"regularizer_params": {
'scale': 0.0001,
},
"summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"encoder": ResNetEncoder,
"encoder_params": {
'resnet_size': 50,
"regularize_bn": False,
},
"decoder": FullyConnectedDecoder,
"decoder_params": {
"output_dim": num_labels,
},
"loss": CrossEntropyLoss,
"data_layer": SpeechCommandsDataLayer,
"data_layer_params": {
"dataset_location": dataset_location,
"num_audio_features": 120,
"audio_length": 120,
"num_labels": num_labels,
"cache_data": True,
"augment_data": True,
"model_format": "resnet"
},
}
train_params = {
"data_layer_params": {
"dataset_files": [
dataset_version + "-train.txt"
],
"shuffle": True,
"repeat": True
},
}
eval_params = {
"batch_size_per_gpu": 16,
"data_layer_params": {
"dataset_files": [
dataset_version + "-val.txt"
],
"shuffle": False,
"repeat": False
},
}
| OpenSeq2Seq-master | example_configs/image2label/resnet_commands_8gpu.py |
# pylint: skip-file
from open_seq2seq.models import Image2Label
from open_seq2seq.encoders import ResNetEncoder
from open_seq2seq.decoders import FullyConnectedDecoder
from open_seq2seq.losses import CrossEntropyLoss
from open_seq2seq.data import ImagenetDataLayer
from open_seq2seq.optimizers.lr_policies import piecewise_constant, poly_decay
import tensorflow as tf
data_root = ""
base_model = Image2Label
base_params = {
"random_seed": 0,
"use_horovod": True,
"num_epochs": 100,
"num_gpus": 1,
"batch_size_per_gpu": 128,
"dtype": "mixed",
"loss_scaling": "Backoff",
"save_summaries_steps": 2000,
"print_loss_steps": 100,
"print_samples_steps": 2000,
"eval_steps": 5000,
"save_checkpoint_steps": 5000,
"logdir": "logs/rn50",
"optimizer": "Momentum",
"optimizer_params": {
"momentum": 0.90,
},
"lr_policy": poly_decay,
"lr_policy_params": {
"learning_rate": 0.4, # 8 GPU-s ,
"power": 2,
},
"initializer": tf.variance_scaling_initializer,
"regularizer": tf.contrib.layers.l2_regularizer,
"regularizer_params": {
'scale': 0.0001,
},
"summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"encoder": ResNetEncoder,
"encoder_params": {
'resnet_size': 50,
"regularize_bn": False,
},
"decoder": FullyConnectedDecoder,
"decoder_params": {
"output_dim": 1000,
},
"loss": CrossEntropyLoss,
"data_layer": ImagenetDataLayer,
"data_layer_params": {
"data_dir": data_root+"data", # "data",
"image_size": 224,
"num_classes": 1000,
},
}
| OpenSeq2Seq-master | example_configs/image2label/resnet-50-v2-mp.py |
# pylint: skip-file
from open_seq2seq.models import Image2Label
from open_seq2seq.encoders import ResNetEncoder
from open_seq2seq.decoders import FullyConnectedDecoder
from open_seq2seq.losses import CrossEntropyLoss
from open_seq2seq.data import ImagenetDataLayer
import tensorflow as tf
data_root = ""
base_model = Image2Label
base_params = {
"random_seed": 0,
"use_horovod": False, #True,
"num_gpus": 8,
"batch_size_per_gpu": 128,
"num_epochs": 100,
"dtype": "mixed",
"loss_scaling": "Backoff",
"save_summaries_steps": 2000,
"print_loss_steps": 100,
"print_samples_steps": 10000,
"eval_steps": 5000,
"save_checkpoint_steps": 50000,
"logdir": "logs/rn50-adamw",
"optimizer": "AdamW",
"optimizer_params": {
"beta1": 0.9,
"beta2": 0.999,
"epsilon": 1e-08,
"weight_decay": 0.1,
},
"lr_policy": tf.train.cosine_decay,
"lr_policy_params": {
"learning_rate": 0.002, # 8 GPUs
},
"initializer": tf.variance_scaling_initializer,
"summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"encoder": ResNetEncoder,
"encoder_params": {
'resnet_size': 50,
"regularize_bn": False,
},
"decoder": FullyConnectedDecoder,
"decoder_params": {
"output_dim": 1000,
},
"loss": CrossEntropyLoss,
"data_layer": ImagenetDataLayer,
"data_layer_params": {
"data_dir": data_root+"data", # "data",
"image_size": 224,
"num_classes": 1000,
},
}
| OpenSeq2Seq-master | example_configs/image2label/resnet-50v2-adamw.py |
# pylint: skip-file
from open_seq2seq.models import Image2Label
from open_seq2seq.encoders import ResNetEncoder
from open_seq2seq.decoders import FullyConnectedDecoder
from open_seq2seq.losses import CrossEntropyLoss
from open_seq2seq.data import ImagenetDataLayer
from open_seq2seq.optimizers.lr_policies import piecewise_constant, poly_decay
from open_seq2seq.optimizers.novograd import NovoGrad
import tensorflow as tf
data_root =""
base_model = Image2Label
base_params = {
"random_seed": 0,
"use_horovod": True, # False, #
"num_gpus": 1,
"batch_size_per_gpu": 128,
"iter_size": 1,
"num_epochs": 100,
"dtype": "mixed",
"loss_scaling": "Backoff",
"save_summaries_steps": 2000,
"print_loss_steps": 100,
"print_samples_steps": 10000,
"eval_steps": 5000,
"save_checkpoint_steps": 5000,
"logdir": "logs/rn50/nvgd_lr0.02_wd0.001",
"optimizer": NovoGrad,
"optimizer_params": {
"beta1": 0.95,
"beta2": 0.98,
"epsilon": 1e-08,
"weight_decay": 0.004,
"grad_averaging": False
},
"lr_policy": poly_decay,
"lr_policy_params": {
"learning_rate": 0.03,
"power": 2,
},
"initializer": tf.variance_scaling_initializer,
"summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"encoder": ResNetEncoder,
"encoder_params": {
'resnet_size': 50,
"regularize_bn": False,
},
"decoder": FullyConnectedDecoder,
"decoder_params": {
"output_dim": 1000,
},
"loss": CrossEntropyLoss,
"data_layer": ImagenetDataLayer,
"data_layer_params": {
"data_dir": data_root+"data", # "data",
"image_size": 224,
"num_classes": 1000,
},
}
| OpenSeq2Seq-master | example_configs/image2label/resnet-50v2-nvgrad.py |
# pylint: skip-file
from open_seq2seq.models import Image2Label
from open_seq2seq.encoders.cnn_encoder import CNNEncoder
from open_seq2seq.decoders import FullyConnectedDecoder
from open_seq2seq.losses import CrossEntropyLoss
from open_seq2seq.data import ImagenetDataLayer
from open_seq2seq.optimizers.lr_policies import poly_decay
import tensorflow as tf
base_model = Image2Label
base_params = {
"random_seed": 0,
"use_horovod": False,
"num_epochs": 120,
"num_gpus": 4,
"batch_size_per_gpu": 256,
"dtype": tf.float32,
"save_summaries_steps": 2000,
"print_loss_steps": 100,
"print_samples_steps": 2000,
"eval_steps": 5000,
"save_checkpoint_steps": 5000,
"logdir": "experiments/alexnet-imagenet",
"optimizer": "Momentum",
"optimizer_params": {
"momentum": 0.90,
},
"lr_policy": poly_decay,
"lr_policy_params": {
"learning_rate": 0.04,
"power": 1.0,
},
"initializer": tf.variance_scaling_initializer,
"regularizer": tf.contrib.layers.l2_regularizer,
"regularizer_params": {
'scale': 0.0005,
},
"summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"encoder": CNNEncoder,
"encoder_params": {
'data_format': 'channels_first',
'cnn_layers': [
(tf.layers.conv2d, {
'filters': 64, 'kernel_size': (11, 11),
'strides': (4, 4), 'padding': 'VALID',
'activation': tf.nn.relu,
}),
(tf.layers.max_pooling2d, {
'pool_size': (3, 3), 'strides': (2, 2),
}),
(tf.layers.conv2d, {
'filters': 192, 'kernel_size': (5, 5),
'strides': (1, 1), 'padding': 'SAME',
'activation': tf.nn.relu,
}),
(tf.layers.max_pooling2d, {
'pool_size': (3, 3), 'strides': (2, 2),
}),
(tf.layers.conv2d, {
'filters': 384, 'kernel_size': (3, 3),
'strides': (1, 1), 'padding': 'SAME',
'activation': tf.nn.relu,
}),
(tf.layers.conv2d, {
'filters': 256, 'kernel_size': (3, 3),
'strides': (1, 1), 'padding': 'SAME',
'activation': tf.nn.relu,
}),
(tf.layers.conv2d, {
'filters': 256, 'kernel_size': (3, 3),
'strides': (1, 1), 'padding': 'SAME',
'activation': tf.nn.relu,
}),
(tf.layers.max_pooling2d, {
'pool_size': (3, 3), 'strides': (2, 2),
}),
],
'fc_layers': [
(tf.layers.dense, {'units': 4096, 'activation': tf.nn.relu}),
(tf.layers.dropout, {'rate': 0.5}),
(tf.layers.dense, {'units': 4096, 'activation': tf.nn.relu}),
(tf.layers.dropout, {'rate': 0.5}),
],
},
"decoder": FullyConnectedDecoder,
"decoder_params": {
"output_dim": 1000,
},
"loss": CrossEntropyLoss,
"data_layer": ImagenetDataLayer,
"data_layer_params": {
"data_dir": "data/tf-imagenet",
"image_size": 227,
"num_classes": 1000,
},
} | OpenSeq2Seq-master | example_configs/image2label/alexnet_owt.py |
# pylint: skip-file
from open_seq2seq.models import Image2Label
from open_seq2seq.encoders.cnn_encoder import CNNEncoder
from open_seq2seq.decoders import FullyConnectedDecoder
from open_seq2seq.losses import CrossEntropyLoss
from open_seq2seq.data.image2label.image2label import CifarDataLayer
from open_seq2seq.optimizers.lr_policies import poly_decay
import tensorflow as tf
base_model = Image2Label
base_params = {
"random_seed": 0,
"use_horovod": False,
"num_epochs": 200,
"num_gpus": 1,
"batch_size_per_gpu": 32,
"dtype": tf.float32,
"save_summaries_steps": 2000,
"print_loss_steps": 100,
"print_samples_steps": 2000,
"eval_steps": 5000,
"save_checkpoint_steps": 5000,
"logdir": "experiments/test-cifar",
"optimizer": "Momentum",
"optimizer_params": {
"momentum": 0.90,
},
"lr_policy": poly_decay,
"lr_policy_params": {
"learning_rate": 0.001,
"power": 1.0,
},
"initializer": tf.variance_scaling_initializer,
"regularizer": tf.contrib.layers.l2_regularizer,
"regularizer_params": {
'scale': 0.0002,
},
"summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"encoder": CNNEncoder,
"encoder_params": {
'data_format': 'channels_first',
'cnn_layers': [
# block 1
(tf.layers.conv2d, {
'filters': 128, 'kernel_size': (3, 3),
'strides': (1, 1), 'padding': 'SAME',
'activation': tf.nn.relu,
}),
(tf.layers.conv2d, {
'filters': 128, 'kernel_size': (3, 3),
'strides': (1, 1), 'padding': 'SAME',
'activation': tf.nn.relu,
}),
(tf.layers.conv2d, {
'filters': 128, 'kernel_size': (3, 3),
'strides': (1, 1), 'padding': 'SAME',
'activation': None, 'use_bias': False,
}),
(tf.layers.batch_normalization, {'momentum': 0.9, 'epsilon': 0.0001}),
(tf.nn.relu, {}),
(tf.layers.max_pooling2d, {
'pool_size': 3, 'strides': 2, 'padding': 'SAME',
}),
# block 2
(tf.layers.conv2d, {
'filters': 256, 'kernel_size': (3, 3),
'strides': (1, 1), 'padding': 'SAME',
'activation': tf.nn.relu,
}),
(tf.layers.conv2d, {
'filters': 256, 'kernel_size': (3, 3),
'strides': (1, 1), 'padding': 'SAME',
'activation': tf.nn.relu,
}),
(tf.layers.conv2d, {
'filters': 256, 'kernel_size': (3, 3),
'strides': (1, 1), 'padding': 'SAME',
'activation': None, 'use_bias': False,
}),
(tf.layers.batch_normalization, {'momentum': 0.9, 'epsilon': 0.0001}),
(tf.nn.relu, {}),
(tf.layers.max_pooling2d, {
'pool_size': 3, 'strides': 2, 'padding': 'SAME',
}),
# block 3
(tf.layers.conv2d, {
'filters': 320, 'kernel_size': (3, 3),
'strides': (1, 1), 'padding': 'SAME',
'activation': tf.nn.relu,
}),
(tf.layers.conv2d, {
'filters': 320, 'kernel_size': (1, 1),
'strides': (1, 1), 'padding': 'SAME',
'activation': tf.nn.relu,
}),
],
},
"decoder": FullyConnectedDecoder,
"decoder_params": {
"output_dim": 10,
},
"loss": CrossEntropyLoss,
"data_layer": CifarDataLayer,
"data_layer_params": {
"data_dir": "data/cifar-10-batches-bin",
},
}
| OpenSeq2Seq-master | example_configs/image2label/cifar-nv.py |
import tensorflow as tf
from open_seq2seq.models import LSTMLM
from open_seq2seq.encoders import LMEncoder
from open_seq2seq.decoders import FakeDecoder
from open_seq2seq.data import WKTDataLayer
from open_seq2seq.parts.rnns.weight_drop import WeightDropLayerNormBasicLSTMCell
from open_seq2seq.losses import BasicSampledSequenceLoss
from open_seq2seq.optimizers.lr_policies import fixed_lr
data_root = "[REPLACE THIS TO THE PATH WITH YOUR WikiText-103-raw DATA]"
processed_data_folder = 'wkt103-processed-data'
base_model = LSTMLM
bptt = 96
steps = 40
base_params = {
"restore_best_checkpoint": True,
"use_horovod": True,
"num_gpus": 8,
"batch_size_per_gpu": 224,
"eval_batch_size_per_gpu": 56,
"num_epochs": 1500,
"save_summaries_steps": steps,
"print_loss_steps": steps,
"print_samples_steps": steps,
"save_checkpoint_steps": steps,
"logdir": "LSTM-WKT103-MIXED",
"processed_data_folder": processed_data_folder,
"eval_steps": steps * 4,
"optimizer": "Adam",
"optimizer_params": {},
"lr_policy": fixed_lr,
"lr_policy_params": {
"learning_rate": 1e-3
},
"summaries": ['learning_rate', 'variables', 'gradients',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
# "max_grad_norm": 0.25,
# "dtype": tf.float32,
"dtype": "mixed",
"loss_scaling": "Backoff",
"encoder": LMEncoder,
"encoder_params": {
"initializer": tf.random_uniform_initializer,
"initializer_params": {
"minval": -0.1,
"maxval": 0.1,
},
"use_cudnn_rnn": False,
"cudnn_rnn_type": None,
"core_cell": WeightDropLayerNormBasicLSTMCell,
"core_cell_params": {
"num_units": 1024,
"forget_bias": 1.0,
},
"encoder_layers": 3,
"encoder_dp_input_keep_prob": 1.0,
"encoder_dp_output_keep_prob": 0.85,
"encoder_last_input_keep_prob": 1.0,
"encoder_last_output_keep_prob": 0.85,
"recurrent_keep_prob": 0.7,
'encoder_emb_keep_prob': 0.8,
"encoder_use_skip_connections": False,
"emb_size": 320,
"sampling_prob": 0.0, # 0 is always use the ground truth
"fc_use_bias": True,
"weight_tied": True,
"awd_initializer": False,
"num_sampled": 8192,
},
"decoder": FakeDecoder,
"regularizer": tf.contrib.layers.l2_regularizer,
"regularizer_params": {
'scale': 2e-6,
},
"loss": BasicSampledSequenceLoss,
"loss_params": {
"offset_target_by_one": False,
"average_across_timestep": True,
"do_mask": False,
}
}
train_params = {
"data_layer": WKTDataLayer,
"data_layer_params": {
"data_root": data_root,
"pad_vocab_to_eight": False,
"rand_start": True,
"shuffle": True,
"shuffle_buffer_size": 25000,
"repeat": True,
"map_parallel_calls": 16,
"prefetch_buffer_size": 8,
"bptt": bptt,
},
}
eval_params = {
"data_layer": WKTDataLayer,
"data_layer_params": {
"pad_vocab_to_eight": False,
"shuffle": False,
"repeat": False,
"map_parallel_calls": 16,
"prefetch_buffer_size": 1,
"bptt": bptt,
},
}
infer_params = {
"data_layer": WKTDataLayer,
"data_layer_params": {
"pad_vocab_to_eight": False,
"shuffle": False,
"repeat": False,
"rand_start": False,
"map_parallel_calls": 16,
"prefetch_buffer_size": 8,
"bptt": bptt,
"seed_tokens": "something The only game",
},
}
| OpenSeq2Seq-master | example_configs/lm/lstm-wkt103-mixed.py |
import tensorflow as tf
from open_seq2seq.models import LSTMLM
from open_seq2seq.encoders import LMEncoder
from open_seq2seq.decoders import FakeDecoder
from open_seq2seq.data import WKTDataLayer
from open_seq2seq.parts.rnns.weight_drop import WeightDropLayerNormBasicLSTMCell
from open_seq2seq.losses import BasicSequenceLoss
from open_seq2seq.optimizers.lr_policies import fixed_lr
data_root = "[REPLACE THIS TO THE PATH WITH YOUR WikiText-2-raw DATA]"
processed_data_folder = 'wkt2-processed-data'
base_model = LSTMLM
bptt = 96
steps = 40
base_params = {
"restore_best_checkpoint": True,
"use_horovod": True,
"num_gpus": 2,
"batch_size_per_gpu": 160,
"num_epochs": 1500,
"save_summaries_steps": steps,
"print_loss_steps": steps,
"print_samples_steps": steps,
"save_checkpoint_steps": steps,
"logdir": "LSTM-WKT2-FP32",
"processed_data_folder": processed_data_folder,
"eval_steps": steps * 2,
"optimizer": "Adam",
"optimizer_params": {},
"lr_policy": fixed_lr,
"lr_policy_params": {
"learning_rate": 4e-4
},
"summaries": ['learning_rate', 'variables', 'gradients',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
# "max_grad_norm": 0.25,
"dtype": tf.float32,
#"dtype": "mixed",
#"automatic_loss_scaling": "Backoff",
"encoder": LMEncoder,
"encoder_params": {
"initializer": tf.random_uniform_initializer,
"initializer_params": {
"minval": -0.1,
"maxval": 0.1,
},
"use_cudnn_rnn": False,
"cudnn_rnn_type": None,
"core_cell": WeightDropLayerNormBasicLSTMCell,
"core_cell_params": {
"num_units": 896,
"forget_bias": 1.0,
},
"encoder_layers": 3,
"encoder_dp_input_keep_prob": 1.0,
"encoder_dp_output_keep_prob": 0.6,
"encoder_last_input_keep_prob": 1.0,
"encoder_last_output_keep_prob": 0.6,
"recurrent_keep_prob": 0.7,
'encoder_emb_keep_prob': 0.37,
"encoder_use_skip_connections": False,
"emb_size": 256,
"num_tokens_gen": 10,
"sampling_prob": 0.0, # 0 is always use the ground truth
"fc_use_bias": True,
"weight_tied": True,
"awd_initializer": False,
},
"decoder": FakeDecoder,
"regularizer": tf.contrib.layers.l2_regularizer,
"regularizer_params": {
'scale': 2e-6,
},
"loss": BasicSequenceLoss,
"loss_params": {
"offset_target_by_one": False,
"average_across_timestep": True,
"do_mask": False,
}
}
train_params = {
"data_layer": WKTDataLayer,
"data_layer_params": {
"data_root": data_root,
"pad_vocab_to_eight": False,
"rand_start": True,
"shuffle": True,
"shuffle_buffer_size": 25000,
"repeat": True,
"map_parallel_calls": 16,
"prefetch_buffer_size": 8,
"bptt": bptt,
},
}
eval_params = {
"data_layer": WKTDataLayer,
"data_layer_params": {
"pad_vocab_to_eight": False,
"shuffle": False,
"repeat": False,
"map_parallel_calls": 16,
"prefetch_buffer_size": 1,
"bptt": bptt,
},
}
infer_params = {
"data_layer": WKTDataLayer,
"data_layer_params": {
"pad_vocab_to_eight": False,
"shuffle": False,
"repeat": False,
"rand_start": False,
"map_parallel_calls": 16,
"prefetch_buffer_size": 8,
"bptt": bptt,
"seed_tokens": "something The only game",
},
}
| OpenSeq2Seq-master | example_configs/lm/lstm-wkt2-fp32.py |
import tensorflow as tf
from open_seq2seq.models import LSTMLM
from open_seq2seq.encoders import LMEncoder
from open_seq2seq.decoders import FakeDecoder
from open_seq2seq.data import WKTDataLayer
from open_seq2seq.parts.rnns.weight_drop import WeightDropLayerNormBasicLSTMCell
from open_seq2seq.losses import BasicSequenceLoss
from open_seq2seq.optimizers.lr_policies import fixed_lr
data_root = "[REPLACE THIS TO THE PATH WITH YOUR WikiText-2-raw DATA]"
processed_data_folder = 'wkt2-processed-data'
base_model = LSTMLM
bptt = 12
steps = 10
base_params = {
"restore_best_checkpoint": True,
"use_horovod": False,
"num_gpus": 2,
"batch_size_per_gpu": 160,
"num_epochs": 1500,
"save_summaries_steps": steps,
"print_loss_steps": steps,
"print_samples_steps": steps,
"save_checkpoint_steps": steps,
"processed_data_folder": processed_data_folder,
"logdir": "LSTM-FP32-2GPU-SMALL",
"eval_steps": steps * 2,
"optimizer": "Adam",
"optimizer_params": {},
"lr_policy": fixed_lr,
"lr_policy_params": {
"learning_rate": 9e-4
},
"summaries": ['learning_rate', 'variables', 'gradients',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"dtype": tf.float32,
# "dtype": "mixed",
# "loss_scaling": "Backoff",
"encoder": LMEncoder,
"encoder_params": {
"initializer": tf.random_uniform_initializer,
"initializer_params": {
"minval": -0.1,
"maxval": 0.1,
},
"use_cudnn_rnn": True,
"cudnn_rnn_type": tf.contrib.cudnn_rnn.CudnnLSTM,
"core_cell": None,
"core_cell_params": {
"num_units": 128,
"forget_bias": 1.0,
},
"encoder_layers": 2,
"encoder_dp_input_keep_prob": 1.0,
"encoder_dp_output_keep_prob": 0.6,
"encoder_last_input_keep_prob": 1.0,
"encoder_last_output_keep_prob": 0.6,
"recurrent_keep_prob": 0.7,
'encoder_emb_keep_prob': 0.37,
"encoder_use_skip_connections": False,
"emb_size": 64,
"sampling_prob": 0.0, # 0 is always use the ground truth
"fc_use_bias": True,
"weight_tied": True,
"awd_initializer": False,
},
"decoder": FakeDecoder,
"regularizer": tf.contrib.layers.l2_regularizer,
"regularizer_params": {
'scale': 2e-6,
},
"loss": BasicSequenceLoss,
"loss_params": {
"offset_target_by_one": False,
"average_across_timestep": True,
"do_mask": False,
}
}
train_params = {
"data_layer": WKTDataLayer,
"data_layer_params": {
"data_root": data_root,
"processed_data_folder": processed_data_folder,
"pad_vocab_to_eight": False,
"rand_start": True,
"shuffle": False,
"shuffle_buffer_size": 25000,
"repeat": True,
"map_parallel_calls": 16,
"prefetch_buffer_size": 8,
"bptt": bptt,
"small": True,
},
}
eval_params = {
"data_layer": WKTDataLayer,
"data_layer_params": {
"processed_data_folder": processed_data_folder,
"pad_vocab_to_eight": False,
"shuffle": False,
"repeat": False,
"map_parallel_calls": 16,
"prefetch_buffer_size": 1,
"bptt": bptt,
"small": True,
},
}
infer_params = {
"data_layer": WKTDataLayer,
"data_layer_params": {
"processed_data_folder": processed_data_folder,
"pad_vocab_to_eight": False,
"shuffle": False,
"repeat": False,
"rand_start": False,
"map_parallel_calls": 16,
"prefetch_buffer_size": 8,
"bptt": bptt,
"seed_tokens": "something The only game",
},
}
| OpenSeq2Seq-master | example_configs/lm/lstm-test-small-cudnn.py |
import tensorflow as tf
from open_seq2seq.models import LSTMLM
from open_seq2seq.encoders import LMEncoder
from open_seq2seq.decoders import FakeDecoder
from open_seq2seq.data import WKTDataLayer
from open_seq2seq.parts.rnns.weight_drop import WeightDropLayerNormBasicLSTMCell
from open_seq2seq.losses import BasicSequenceLoss
from open_seq2seq.optimizers.lr_policies import fixed_lr
data_root = "[REPLACE THIS TO THE PATH WITH YOUR WikiText-2-raw DATA]"
processed_data_folder = 'wkt2-processed-data'
base_model = LSTMLM
bptt = 12
steps = 10
base_params = {
"restore_best_checkpoint": True,
"use_horovod": False,
"num_gpus": 2,
"batch_size_per_gpu": 160,
"num_epochs": 1500,
"save_summaries_steps": steps,
"print_loss_steps": steps,
"print_samples_steps": steps,
"save_checkpoint_steps": steps,
"processed_data_folder": processed_data_folder,
"logdir": "LSTM-FP32-2GPU-SMALL",
"eval_steps": steps * 2,
"optimizer": "Adam",
"optimizer_params": {},
"lr_policy": fixed_lr,
"lr_policy_params": {
"learning_rate": 9e-4
},
"summaries": ['learning_rate', 'variables', 'gradients',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"dtype": tf.float32,
# "dtype": "mixed",
# "loss_scaling": "Backoff",
"encoder": LMEncoder,
"encoder_params": {
"initializer": tf.random_uniform_initializer,
"initializer_params": {
"minval": -0.1,
"maxval": 0.1,
},
"use_cudnn_rnn": False,
"cudnn_rnn_type": None,
"core_cell": WeightDropLayerNormBasicLSTMCell,
"core_cell_params": {
"num_units": 128,
"forget_bias": 1.0,
},
"encoder_layers": 2,
"encoder_dp_input_keep_prob": 1.0,
"encoder_dp_output_keep_prob": 0.6,
"encoder_last_input_keep_prob": 1.0,
"encoder_last_output_keep_prob": 0.6,
"recurrent_keep_prob": 0.7,
'encoder_emb_keep_prob': 0.37,
"encoder_use_skip_connections": False,
"emb_size": 64,
"sampling_prob": 0.0, # 0 is always use the ground truth
"fc_use_bias": True,
"weight_tied": True,
"awd_initializer": False,
},
"decoder": FakeDecoder,
"regularizer": tf.contrib.layers.l2_regularizer,
"regularizer_params": {
'scale': 2e-6,
},
"loss": BasicSequenceLoss,
"loss_params": {
"offset_target_by_one": False,
"average_across_timestep": True,
"do_mask": False,
}
}
train_params = {
"data_layer": WKTDataLayer,
"data_layer_params": {
"data_root": data_root,
"processed_data_folder": processed_data_folder,
"pad_vocab_to_eight": False,
"rand_start": True,
"shuffle": False,
"shuffle_buffer_size": 25000,
"repeat": True,
"map_parallel_calls": 16,
"prefetch_buffer_size": 8,
"bptt": bptt,
"small": True,
},
}
eval_params = {
"data_layer": WKTDataLayer,
"data_layer_params": {
"processed_data_folder": processed_data_folder,
"pad_vocab_to_eight": False,
"shuffle": False,
"repeat": False,
"map_parallel_calls": 16,
"prefetch_buffer_size": 1,
"bptt": bptt,
"small": True,
},
}
infer_params = {
"data_layer": WKTDataLayer,
"data_layer_params": {
"processed_data_folder": processed_data_folder,
"pad_vocab_to_eight": False,
"shuffle": False,
"repeat": False,
"rand_start": False,
"map_parallel_calls": 16,
"prefetch_buffer_size": 8,
"bptt": bptt,
"seed_tokens": "something The only game",
},
}
| OpenSeq2Seq-master | example_configs/lm/lstm-test-small.py |
import tensorflow as tf
from open_seq2seq.models import LSTMLM
from open_seq2seq.encoders import LMEncoder
from open_seq2seq.decoders import FakeDecoder
from open_seq2seq.data import WKTDataLayer
from open_seq2seq.parts.rnns.weight_drop import WeightDropLayerNormBasicLSTMCell
from open_seq2seq.losses import BasicSequenceLoss
from open_seq2seq.optimizers.lr_policies import fixed_lr
data_root = "[REPLACE THIS TO THE PATH WITH YOUR WikiText-2-raw DATA]"
processed_data_folder = 'wkt2-processed-folder'
base_model = LSTMLM
bptt = 12
steps = 40
base_params = {
"restore_best_checkpoint": True,
"processed_data_folder": processed_data_folder,
"use_horovod": False,
"num_gpus": 2,
"batch_size_per_gpu": 160,
"num_epochs": 1500,
"save_summaries_steps": steps,
"print_loss_steps": steps,
"print_samples_steps": steps,
"save_checkpoint_steps": steps,
"logdir": "LSTM-FP32-2GPU-SMALL-MIXED",
"eval_steps": steps * 2,
"optimizer": "Adam",
"optimizer_params": {},
"lr_policy": fixed_lr,
"lr_policy_params": {
"learning_rate": 9e-4
},
"summaries": ['learning_rate', 'variables', 'gradients',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
# "dtype": tf.float32,
"dtype": "mixed",
"loss_scaling": "Backoff",
"encoder": LMEncoder,
"encoder_params": {
"initializer": tf.random_uniform_initializer,
"initializer_params": {
"minval": -0.1,
"maxval": 0.1,
},
"use_cudnn_rnn": False,
"cudnn_rnn_type": None,
"core_cell": WeightDropLayerNormBasicLSTMCell,
"core_cell_params": {
"num_units": 128,
"forget_bias": 1.0,
},
"encoder_layers": 2,
"encoder_dp_input_keep_prob": 1.0,
"encoder_dp_output_keep_prob": 0.6,
"encoder_last_input_keep_prob": 1.0,
"encoder_last_output_keep_prob": 0.6,
"recurrent_keep_prob": 0.7,
'encoder_emb_keep_prob': 0.37,
"encoder_use_skip_connections": False,
"emb_size": 64,
"sampling_prob": 0.0, # 0 is always use the ground truth
"fc_use_bias": True,
"weight_tied": True,
"awd_initializer": False,
},
"decoder": FakeDecoder,
"regularizer": tf.contrib.layers.l2_regularizer,
"regularizer_params": {
'scale': 2e-6,
},
"loss": BasicSequenceLoss,
"loss_params": {
"offset_target_by_one": False,
"average_across_timestep": True,
"do_mask": False,
}
}
train_params = {
"data_layer": WKTDataLayer,
"data_layer_params": {
"data_root": data_root,
"processed_data_folder": processed_data_folder,
"pad_vocab_to_eight": False,
"rand_start": True,
"shuffle": False,
"shuffle_buffer_size": 25000,
"repeat": True,
"map_parallel_calls": 16,
"prefetch_buffer_size": 8,
"bptt": bptt,
"small": True,
},
}
eval_params = {
"data_layer": WKTDataLayer,
"data_layer_params": {
"processed_data_folder": processed_data_folder,
"pad_vocab_to_eight": False,
"shuffle": False,
"repeat": False,
"map_parallel_calls": 16,
"prefetch_buffer_size": 1,
"bptt": bptt,
"small": True,
},
}
infer_params = {
"data_layer": WKTDataLayer,
"data_layer_params": {
"processed_data_folder": processed_data_folder,
"pad_vocab_to_eight": False,
"shuffle": False,
"repeat": False,
"rand_start": False,
"map_parallel_calls": 16,
"prefetch_buffer_size": 8,
"bptt": bptt,
"seed_tokens": "something The only game",
},
}
| OpenSeq2Seq-master | example_configs/lm/lstm-test-small-mixed.py |
import tensorflow as tf
from open_seq2seq.models import LSTMLM
from open_seq2seq.encoders import LMEncoder
from open_seq2seq.decoders import FakeDecoder
from open_seq2seq.data import SSTDataLayer
from open_seq2seq.parts.rnns.weight_drop import WeightDropLayerNormBasicLSTMCell
from open_seq2seq.losses import BasicSequenceLoss, CrossEntropyLoss
from open_seq2seq.optimizers.lr_policies import fixed_lr
base_model = LSTMLM
steps = 10
data_root = "[REPLACE THIS TO THE PATH WITH YOUR SST DATA]"
processed_data_folder = 'sst-processed-data-wkt2'
binary = True
max_length = 96
base_params = {
"restore_best_checkpoint": True, # best checkpoint is only saved when using train_eval mode
"use_horovod": False,
"num_gpus": 1,
"batch_size_per_gpu": 20,
"eval_batch_size_per_gpu": 80,
"num_epochs": 120,
"save_summaries_steps": steps,
"print_loss_steps": steps,
"print_samples_steps": steps,
"save_checkpoint_steps": steps,
"load_model": "WKT2-CPT",
"lm_vocab_file": 'wkt2-processed-data/vocab.txt',
# "lm_vocab_file": '[LINK TO THE VOCAB FILE IN THE PROCESSED DATA USED TO TRAIN THE BASE LM]'
"logdir": "SST-WKT2-EXP10",
"processed_data_folder": processed_data_folder,
"eval_steps": steps,
"optimizer": "Adam",
"optimizer_params": {},
# luong10 decay scheme
"lr_policy": fixed_lr,
"lr_policy_params": {
"learning_rate": 1e-4
},
"summaries": ['learning_rate', 'variables', 'gradients',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
# "max_grad_norm": 0.25,
"dtype": tf.float32,
#"dtype": "mixed",
#"loss_scaling": "Backoff",
"encoder": LMEncoder,
"encoder_params": { # will need to update
"initializer": tf.random_uniform_initializer,
"initializer_params": { # need different initializers for embeddings and for weights
"minval": -0.1,
"maxval": 0.1,
},
"use_cudnn_rnn": False,
"cudnn_rnn_type": None,
"core_cell": WeightDropLayerNormBasicLSTMCell,
"core_cell_params": {
"num_units": 896,
"forget_bias": 1.0,
},
"encoder_layers": 3,
"encoder_dp_input_keep_prob": 1.0,
"encoder_dp_output_keep_prob": 0.8,
"encoder_last_input_keep_prob": 1.0,
"encoder_last_output_keep_prob": 0.8,
"recurrent_keep_prob": 1.0,
'encoder_emb_keep_prob': 0.7,
"encoder_use_skip_connections": False,
"emb_size": 256,
"num_tokens_gen": 10,
"sampling_prob": 0.0, # 0 is always use the ground truth
"fc_use_bias": True,
"weight_tied": True,
"awd_initializer": False,
"use_cell_state": True,
},
"decoder": FakeDecoder,
"regularizer": tf.contrib.layers.l2_regularizer,
"regularizer_params": {
'scale': 2e-6,
},
"loss": CrossEntropyLoss,
}
train_params = {
"data_layer": SSTDataLayer,
"data_layer_params": {
"data_root": data_root,
"pad_vocab_to_eight": False,
"shuffle": True,
"shuffle_buffer_size": 25000,
"repeat": True,
"map_parallel_calls": 16,
"prefetch_buffer_size": 8,
"max_length": max_length,
},
}
eval_params = {
"data_layer": SSTDataLayer,
"data_layer_params": {
"pad_vocab_to_eight": False,
"shuffle": False,
"repeat": False,
"map_parallel_calls": 16,
"prefetch_buffer_size": 1,
"max_length": max_length,
},
}
infer_params = {
"data_layer": SSTDataLayer,
"data_layer_params": {
"pad_vocab_to_eight": False,
"shuffle": False,
"repeat": False,
"rand_start": False,
"map_parallel_calls": 16,
"prefetch_buffer_size": 8,
"max_length": max_length,
},
}
| OpenSeq2Seq-master | example_configs/transfer/sst-wkt2.py |
import tensorflow as tf
from open_seq2seq.models import LSTMLM
from open_seq2seq.encoders import LMEncoder
from open_seq2seq.decoders import FakeDecoder
from open_seq2seq.data import IMDBDataLayer
from open_seq2seq.parts.rnns.weight_drop import WeightDropLayerNormBasicLSTMCell
from open_seq2seq.losses import CrossEntropyLoss
from open_seq2seq.optimizers.lr_policies import fixed_lr
data_root = "[REPLACE THIS TO THE PATH WITH YOUR IMDB DATA]"
processed_data_folder = 'imdb-processed-data-wkt2'
base_model = LSTMLM
max_length = 256
binary = True
steps = 10
base_params = {
"restore_best_checkpoint": True,
"use_horovod": False,
"num_gpus": 1,
"batch_size_per_gpu": 16,
"eval_batch_size_per_gpu": 64,
"num_epochs": 100,
"save_summaries_steps": steps,
"print_loss_steps": steps,
"print_samples_steps": steps,
"save_checkpoint_steps": steps,
"load_model": "LSTM-FP32-2GPU-SMALL",
"logdir": "IMDB-WKT103-CUDNN-MIXED",
"lm_vocab_file": 'wkt2-processed-data/vocab.txt',
# "lm_vocab_file": '[LINK TO THE VOCAB FILE IN THE PROCESSED DATA USED TO TRAIN THE BASE LM]'
"processed_data_folder": processed_data_folder,
"eval_steps": steps,
"optimizer": "Adam",
"optimizer_params": {},
# luong10 decay scheme
"lr_policy": fixed_lr,
"lr_policy_params": {
"learning_rate": 1e-4
},
"summaries": ['learning_rate', 'variables', 'gradients',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
# "max_grad_norm": 0.25,
# "dtype": tf.float32,
"dtype": "mixed",
"loss_scaling": "Backoff",
"encoder": LMEncoder,
"encoder_params": {
"initializer": tf.random_uniform_initializer,
"initializer_params": {
"minval": -0.1,
"maxval": 0.1,
},
"use_cudnn_rnn": True,
"cudnn_rnn_type": tf.contrib.cudnn_rnn.CudnnLSTM,
"core_cell": None,
"core_cell_params": {
"num_units": 1024,
"forget_bias": 1.0,
},
"encoder_layers": 3,
"encoder_dp_input_keep_prob": 1.0,
"encoder_dp_output_keep_prob": 0.8,
"encoder_last_input_keep_prob": 1.0,
"encoder_last_output_keep_prob": 0.8,
"recurrent_keep_prob": 1.0,
'encoder_emb_keep_prob': 0.6,
"encoder_use_skip_connections": False,
"emb_size": 256,
"num_tokens_gen": 10,
"sampling_prob": 0.0, # 0 is always use the ground truth
"fc_use_bias": True,
"weight_tied": True,
"awd_initializer": False,
},
"decoder": FakeDecoder,
"regularizer": tf.contrib.layers.l2_regularizer,
"regularizer_params": {
'scale': 2e-6,
},
"loss": CrossEntropyLoss,
}
train_params = {
"data_layer": IMDBDataLayer,
"data_layer_params": {
"data_root": data_root,
"pad_vocab_to_eight": False,
"shuffle": True,
"shuffle_buffer_size": 25000,
"repeat": True,
"map_parallel_calls": 16,
"prefetch_buffer_size": 8,
"binary": binary,
"max_length": max_length,
"get_stats": True,
# "small": True,
},
}
eval_params = {
"data_layer": IMDBDataLayer,
"data_layer_params": {
# "data_root": data_root,
"pad_vocab_to_eight": False,
"shuffle": False,
"repeat": False,
"map_parallel_calls": 16,
"prefetch_buffer_size": 1,
"binary": binary,
"max_length": max_length,
# "small": True,
},
}
infer_params = {
"data_layer": IMDBDataLayer,
"data_layer_params": {
# "data_root": data_root,
"pad_vocab_to_eight": False,
"shuffle": False,
"repeat": False,
"rand_start": False,
"map_parallel_calls": 16,
"prefetch_buffer_size": 8,
"binary": binary,
"max_length": max_length,
},
}
| OpenSeq2Seq-master | example_configs/transfer/imdb-wkt2-cudnn.py |
import tensorflow as tf
from open_seq2seq.models import LSTMLM
from open_seq2seq.encoders import LMEncoder
from open_seq2seq.decoders import FakeDecoder
from open_seq2seq.data import IMDBDataLayer
from open_seq2seq.parts.rnns.weight_drop import WeightDropLayerNormBasicLSTMCell
from open_seq2seq.losses import CrossEntropyLoss
from open_seq2seq.optimizers.lr_policies import fixed_lr
data_root = "[REPLACE THIS TO THE PATH WITH YOUR IMDB DATA]"
processed_data_folder = 'imdb-processed-data-wkt103'
base_model = LSTMLM
max_length = 256
binary = True
steps = 10
base_params = {
"restore_best_checkpoint": True,
"use_horovod": False,
"num_gpus": 1,
"batch_size_per_gpu": 16,
"eval_batch_size_per_gpu": 64,
"num_epochs": 100,
"save_summaries_steps": steps,
"print_loss_steps": steps,
"print_samples_steps": steps,
"save_checkpoint_steps": steps,
"load_model": "WKT103-CPT",
"logdir": "IMDB-WKT103-EXP1",
"lm_vocab_file": 'wkt103-processed-data/vocab.txt',
# "lm_vocab_file": '[LINK TO THE VOCAB FILE IN THE PROCESSED DATA USED TO TRAIN THE BASE LM]'
"processed_data_folder": processed_data_folder,
"eval_steps": steps,
"optimizer": "Adam",
"optimizer_params": {},
# luong10 decay scheme
"lr_policy": fixed_lr,
"lr_policy_params": {
"learning_rate": 1e-4
},
"summaries": ['learning_rate', 'variables', 'gradients',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
# "max_grad_norm": 0.25,
"dtype": tf.float32,
#"dtype": "mixed",
#"loss_scaling": "Backoff",
"encoder": LMEncoder,
"encoder_params": {
"initializer": tf.random_uniform_initializer,
"initializer_params": {
"minval": -0.1,
"maxval": 0.1,
},
"use_cudnn_rnn": False,
"cudnn_rnn_type": None,
"core_cell": WeightDropLayerNormBasicLSTMCell,
"core_cell_params": {
"num_units": 1024,
"forget_bias": 1.0,
},
"encoder_layers": 3,
"encoder_dp_input_keep_prob": 1.0,
"encoder_dp_output_keep_prob": 0.8,
"encoder_last_input_keep_prob": 1.0,
"encoder_last_output_keep_prob": 0.8,
"recurrent_keep_prob": 1.0,
'encoder_emb_keep_prob': 0.6,
"encoder_use_skip_connections": False,
"emb_size": 256,
"num_tokens_gen": 10,
"sampling_prob": 0.0, # 0 is always use the ground truth
"fc_use_bias": True,
"weight_tied": True,
"awd_initializer": False,
},
"decoder": FakeDecoder,
"regularizer": tf.contrib.layers.l2_regularizer,
"regularizer_params": {
'scale': 2e-6,
},
"loss": CrossEntropyLoss,
}
train_params = {
"data_layer": IMDBDataLayer,
"data_layer_params": {
"data_root": data_root,
"pad_vocab_to_eight": False,
"shuffle": True,
"shuffle_buffer_size": 25000,
"repeat": True,
"map_parallel_calls": 16,
"prefetch_buffer_size": 8,
"binary": binary,
"max_length": max_length,
"get_stats": True,
# "small": True,
},
}
eval_params = {
"data_layer": IMDBDataLayer,
"data_layer_params": {
# "data_root": data_root,
"pad_vocab_to_eight": False,
"shuffle": False,
"repeat": False,
"map_parallel_calls": 16,
"prefetch_buffer_size": 1,
"binary": binary,
"max_length": max_length,
# "small": True,
},
}
infer_params = {
"data_layer": IMDBDataLayer,
"data_layer_params": {
# "data_root": data_root,
"pad_vocab_to_eight": False,
"shuffle": False,
"repeat": False,
"rand_start": False,
"map_parallel_calls": 16,
"prefetch_buffer_size": 8,
"binary": binary,
"max_length": max_length,
},
}
| OpenSeq2Seq-master | example_configs/transfer/imdb-wkt103.py |
import tensorflow as tf
from open_seq2seq.models import LSTMLM
from open_seq2seq.encoders import LMEncoder
from open_seq2seq.decoders import FakeDecoder
from open_seq2seq.data import IMDBDataLayer
from open_seq2seq.parts.rnns.weight_drop import WeightDropLayerNormBasicLSTMCell
from open_seq2seq.losses import CrossEntropyLoss
from open_seq2seq.optimizers.lr_policies import fixed_lr
data_root = "[REPLACE THIS TO THE PATH WITH YOUR IMDB DATA]"
processed_data_folder = 'imdb-processed-data-wkt2'
base_model = LSTMLM
max_length = 256
binary = True
steps = 40
base_params = {
"restore_best_checkpoint": True,
"use_horovod": False,
"num_gpus": 2,
"batch_size_per_gpu": 160,
"num_epochs": 1500,
"save_summaries_steps": steps,
"print_loss_steps": steps,
"print_samples_steps": steps,
"save_checkpoint_steps": steps,
"logdir": "IMDB-START",
"lm_vocab_file": 'wkt2-processed-data/vocab.txt',
# "lm_vocab_file": '[LINK TO THE VOCAB FILE IN THE PROCESSED DATA USED TO TRAIN THE BASE LM]'
"processed_data_folder": processed_data_folder,
"eval_steps": steps * 2,
"optimizer": "Adam",
"optimizer_params": {},
# luong10 decay scheme
"lr_policy": fixed_lr,
"lr_policy_params": {
"learning_rate": 9e-4
},
"summaries": ['learning_rate', 'variables', 'gradients',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
# "max_grad_norm": 0.25,
"dtype": tf.float32,
#"dtype": "mixed",
#"automatic_loss_scaling": "Backoff",
"encoder": LMEncoder,
"encoder_params": {
"initializer": tf.random_uniform_initializer,
"initializer_params": {
"minval": -0.1,
"maxval": 0.1,
},
"use_cudnn_rnn": False,
"cudnn_rnn_type": None,
"core_cell": WeightDropLayerNormBasicLSTMCell,
"core_cell_params": {
"num_units": 896,
"forget_bias": 1.0,
},
"encoder_layers": 3,
"encoder_dp_input_keep_prob": 1.0,
"encoder_dp_output_keep_prob": 0.6,
"encoder_last_input_keep_prob": 1.0,
"encoder_last_output_keep_prob": 0.6,
"recurrent_keep_prob": 0.7,
'encoder_emb_keep_prob': 0.37,
"encoder_use_skip_connections": False,
"emb_size": 256,
"num_tokens_gen": 10,
"sampling_prob": 0.0, # 0 is always use the ground truth
"fc_use_bias": True,
"weight_tied": True,
"awd_initializer": False,
},
"decoder": FakeDecoder,
"regularizer": tf.contrib.layers.l2_regularizer,
"regularizer_params": {
'scale': 2e-6,
},
"loss": CrossEntropyLoss,
}
train_params = {
"data_layer": IMDBDataLayer,
"data_layer_params": {
"data_root": data_root,
"pad_vocab_to_eight": False,
"shuffle": True,
"shuffle_buffer_size": 25000,
"repeat": True,
"map_parallel_calls": 16,
"prefetch_buffer_size": 8,
"binary": binary,
"max_length": max_length,
},
}
eval_params = {
"data_layer": IMDBDataLayer,
"data_layer_params": {
# "data_root": data_root,
"pad_vocab_to_eight": False,
"shuffle": False,
"repeat": False,
"map_parallel_calls": 16,
"prefetch_buffer_size": 1,
"binary": binary,
"max_length": max_length,
},
}
infer_params = {
"data_layer": IMDBDataLayer,
"data_layer_params": {
# "data_root": data_root,
"pad_vocab_to_eight": False,
"shuffle": False,
"repeat": False,
"rand_start": False,
"map_parallel_calls": 16,
"prefetch_buffer_size": 8,
"binary": binary,
"max_length": max_length,
},
}
| OpenSeq2Seq-master | example_configs/transfer/imdb-from-scratch.py |
import tensorflow as tf
from open_seq2seq.models import LSTMLM
from open_seq2seq.encoders import LMEncoder
from open_seq2seq.decoders import FakeDecoder
from open_seq2seq.data import SSTDataLayer
from open_seq2seq.parts.rnns.weight_drop import WeightDropLayerNormBasicLSTMCell
from open_seq2seq.losses import BasicSequenceLoss, CrossEntropyLoss
from open_seq2seq.optimizers.lr_policies import fixed_lr
base_model = LSTMLM
steps = 10
data_root = "[REPLACE THIS TO THE PATH WITH YOUR SST DATA]"
processed_data_folder = 'sst-processed-data-wkt2'
binary = True
max_length = 96
base_params = {
"restore_best_checkpoint": True, # best checkpoint is only saved when using train_eval mode
"use_horovod": False,
"num_gpus": 1,
"batch_size_per_gpu": 20,
"eval_batch_size_per_gpu": 80,
"num_epochs": 120,
"save_summaries_steps": steps,
"print_loss_steps": steps,
"print_samples_steps": steps,
"save_checkpoint_steps": steps,
"load_model": "LSTM-FP32-2GPU-SMALL",
"lm_vocab_file": 'wkt2-processed-data/vocab.txt',
# "lm_vocab_file": '[LINK TO THE VOCAB FILE IN THE PROCESSED DATA USED TO TRAIN THE BASE LM]'
"logdir": "SST-WKT2-SMALL",
"processed_data_folder": processed_data_folder,
"eval_steps": steps,
"optimizer": "Adam",
"optimizer_params": {},
# luong10 decay scheme
"lr_policy": fixed_lr,
"lr_policy_params": {
"learning_rate": 1e-4
},
"summaries": ['learning_rate', 'variables', 'gradients',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
# "max_grad_norm": 0.25,
"dtype": tf.float32,
#"dtype": "mixed",
#"loss_scaling": "Backoff",
"encoder": LMEncoder,
"encoder_params": { # will need to update
"initializer": tf.random_uniform_initializer,
"initializer_params": { # need different initializers for embeddings and for weights
"minval": -0.1,
"maxval": 0.1,
},
"use_cudnn_rnn": False,
"cudnn_rnn_type": None,
"core_cell": WeightDropLayerNormBasicLSTMCell,
"core_cell_params": {
"num_units": 128,
"forget_bias": 1.0,
},
"encoder_layers": 3,
"encoder_dp_input_keep_prob": 1.0,
"encoder_dp_output_keep_prob": 0.8,
"encoder_last_input_keep_prob": 1.0,
"encoder_last_output_keep_prob": 0.8,
"recurrent_keep_prob": 1.0,
'encoder_emb_keep_prob': 0.7,
"encoder_use_skip_connections": False,
"emb_size": 64,
"num_tokens_gen": 10,
"sampling_prob": 0.0, # 0 is always use the ground truth
"fc_use_bias": True,
"weight_tied": True,
"awd_initializer": False,
"use_cell_state": True,
},
"decoder": FakeDecoder,
"regularizer": tf.contrib.layers.l2_regularizer,
"regularizer_params": {
'scale': 2e-6,
},
"loss": CrossEntropyLoss,
}
train_params = {
"data_layer": SSTDataLayer,
"data_layer_params": {
"data_root": data_root,
"pad_vocab_to_eight": False,
"shuffle": True,
"shuffle_buffer_size": 25000,
"repeat": True,
"map_parallel_calls": 16,
"prefetch_buffer_size": 8,
"max_length": max_length,
"get_stats": True,
},
}
eval_params = {
"data_layer": SSTDataLayer,
"data_layer_params": {
"pad_vocab_to_eight": False,
"shuffle": False,
"repeat": False,
"map_parallel_calls": 16,
"prefetch_buffer_size": 1,
"max_length": max_length,
},
}
infer_params = {
"data_layer": SSTDataLayer,
"data_layer_params": {
"pad_vocab_to_eight": False,
"shuffle": False,
"repeat": False,
"rand_start": False,
"map_parallel_calls": 16,
"prefetch_buffer_size": 8,
"max_length": max_length,
},
} | OpenSeq2Seq-master | example_configs/transfer/sst-wkt2-small.py |
import tensorflow as tf
from open_seq2seq.models import LSTMLM
from open_seq2seq.encoders import LMEncoder
from open_seq2seq.decoders import FakeDecoder
from open_seq2seq.data import IMDBDataLayer
from open_seq2seq.parts.rnns.weight_drop import WeightDropLayerNormBasicLSTMCell
from open_seq2seq.losses import CrossEntropyLoss
from open_seq2seq.optimizers.lr_policies import fixed_lr
data_root = "[REPLACE THIS TO THE PATH WITH YOUR IMDB DATA]"
processed_data_folder = 'imdb-processed-data-wkt2'
base_model = LSTMLM
max_length = 256
binary = True
steps = 5
base_params = {
"restore_best_checkpoint": True,
"use_horovod": False,
"num_gpus": 2,
"batch_size_per_gpu": 16,
"num_epochs": 25,
"save_summaries_steps": steps,
"print_loss_steps": steps,
"print_samples_steps": steps,
"save_checkpoint_steps": steps,
"load_model": "WKT2-CPT",
"logdir": "IMDB-WKT2",
"lm_vocab_file": 'wkt2-processed-data/vocab.txt',
# "lm_vocab_file": '[LINK TO THE VOCAB FILE IN THE PROCESSED DATA USED TO TRAIN THE BASE LM]'
"processed_data_folder": processed_data_folder,
"eval_steps": steps * 2,
"optimizer": "Adam",
"optimizer_params": {},
# luong10 decay scheme
"lr_policy": fixed_lr,
"lr_policy_params": {
"learning_rate": 1e-5
},
"summaries": ['learning_rate', 'variables', 'gradients',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
# "max_grad_norm": 0.25,
"dtype": tf.float32,
#"dtype": "mixed",
#"loss_scaling": "Backoff",
"encoder": LMEncoder,
"encoder_params": {
"initializer": tf.random_uniform_initializer,
"initializer_params": {
"minval": -0.1,
"maxval": 0.1,
},
"use_cudnn_rnn": False,
"cudnn_rnn_type": None,
"core_cell": WeightDropLayerNormBasicLSTMCell,
"core_cell_params": {
"num_units": 896,
"forget_bias": 1.0,
},
"encoder_layers": 3,
"encoder_dp_input_keep_prob": 1.0,
"encoder_dp_output_keep_prob": 1.0,
"encoder_last_input_keep_prob": 1.0,
"encoder_last_output_keep_prob": 1.0,
"recurrent_keep_prob": 1.0,
'encoder_emb_keep_prob': 1.0,
"encoder_use_skip_connections": False,
"emb_size": 256,
"num_tokens_gen": 10,
"sampling_prob": 0.0, # 0 is always use the ground truth
"fc_use_bias": True,
"weight_tied": True,
"awd_initializer": False,
},
"decoder": FakeDecoder,
"regularizer": tf.contrib.layers.l2_regularizer,
"regularizer_params": {
'scale': 2e-4,
},
"loss": CrossEntropyLoss,
}
train_params = {
"data_layer": IMDBDataLayer,
"data_layer_params": {
"data_root": data_root,
"pad_vocab_to_eight": False,
"shuffle": True,
"shuffle_buffer_size": 25000,
"repeat": True,
"map_parallel_calls": 16,
"prefetch_buffer_size": 8,
"binary": binary,
"max_length": max_length,
# "small": True,
},
}
eval_params = {
"data_layer": IMDBDataLayer,
"data_layer_params": {
# "data_root": data_root,
"pad_vocab_to_eight": False,
"shuffle": False,
"repeat": False,
"map_parallel_calls": 16,
"prefetch_buffer_size": 1,
"binary": binary,
"max_length": max_length,
# "small": True,
},
}
infer_params = {
"data_layer": IMDBDataLayer,
"data_layer_params": {
# "data_root": data_root,
"pad_vocab_to_eight": False,
"shuffle": False,
"repeat": False,
"rand_start": False,
"map_parallel_calls": 16,
"prefetch_buffer_size": 8,
"binary": binary,
"max_length": max_length,
},
}
| OpenSeq2Seq-master | example_configs/transfer/imdb-wkt2.py |
# pylint: skip-file
from __future__ import absolute_import, division, print_function
from open_seq2seq.models import Text2Text
from open_seq2seq.encoders import TransformerEncoder
from open_seq2seq.decoders import TransformerDecoder
from open_seq2seq.data.text2text.text2text import ParallelTextDataLayer
from open_seq2seq.losses import PaddedCrossEntropyLossWithSmoothing
from open_seq2seq.data.text2text.text2text import SpecialTextTokens
from open_seq2seq.data.text2text.tokenizer import EOS_ID
from open_seq2seq.optimizers.lr_policies import transformer_policy
import tensorflow as tf
"""
This configuration file describes a variant of Transformer model from
https://arxiv.org/abs/1706.03762
"""
base_model = Text2Text
d_model = 1024
num_layers = 6
# REPLACE THIS TO THE PATH WITH YOUR WMT DATA
data_root = "[REPLACE THIS TO THE PATH WITH YOUR WMT DATA]"
base_params = {
"use_horovod": True,
"num_gpus": 1, # when using Horovod we set number of workers with params to mpirun
"batch_size_per_gpu": 256, # this size is in sentence pairs, reduce it if you get OOM
"max_steps": 300000,
"save_summaries_steps": 100,
"print_loss_steps": 100,
"print_samples_steps": 100,
"eval_steps": 4001,
"save_checkpoint_steps": 299998,
"logdir": "Transformer-BIG",
#"dtype": tf.float32, # to enable mixed precision, comment this line and uncomment two below lines
"dtype": "mixed",
"loss_scaling": "Backoff",
"optimizer": tf.contrib.opt.LazyAdamOptimizer,
"optimizer_params": {
"beta1": 0.9,
"beta2": 0.997,
"epsilon": 1e-09,
},
"lr_policy": transformer_policy,
"lr_policy_params": {
"learning_rate": 2.0,
"warmup_steps": 8000,
"d_model": d_model,
},
"encoder": TransformerEncoder,
"encoder_params": {
"encoder_layers": num_layers,
"hidden_size": d_model,
"num_heads": 16,
"attention_dropout": 0.1,
"filter_size": 4 * d_model,
"relu_dropout": 0.3,
"layer_postprocess_dropout": 0.3,
"pad_embeddings_2_eight": True,
"remove_padding": True,
},
"decoder": TransformerDecoder,
"decoder_params": {
"layer_postprocess_dropout": 0.3,
"num_hidden_layers": num_layers,
"hidden_size": d_model,
"num_heads": 16,
"attention_dropout": 0.1,
"relu_dropout": 0.3,
"filter_size": 4 * d_model,
"beam_size": 4,
"alpha": 0.6,
"extra_decode_length": 50,
"EOS_ID": EOS_ID,
"GO_SYMBOL": SpecialTextTokens.S_ID.value,
"END_SYMBOL": SpecialTextTokens.EOS_ID.value,
"PAD_SYMBOL": SpecialTextTokens.PAD_ID.value,
},
"loss": PaddedCrossEntropyLossWithSmoothing,
"loss_params": {
"label_smoothing": 0.1,
}
}
train_params = {
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"pad_vocab_to_eight": True,
"src_vocab_file": data_root + "m_common.vocab",
"tgt_vocab_file": data_root + "m_common.vocab",
"source_file": data_root + "train.clean.en.shuffled.BPE_common.32K.tok",
"target_file": data_root + "train.clean.es.shuffled.BPE_common.32K.tok",
"delimiter": " ",
"shuffle": True,
"shuffle_buffer_size": 25000,
"repeat": True,
"map_parallel_calls": 16,
"max_length": 56,
},
}
eval_params = {
"batch_size_per_gpu": 16,
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"src_vocab_file": data_root+"m_common.vocab",
"tgt_vocab_file": data_root+"m_common.vocab",
"source_file": data_root+"wmt13-en-es.src.BPE_common.32K.tok",
"target_file": data_root+"wmt13-en-es.ref.BPE_common.32K.tok",
"delimiter": " ",
"shuffle": False,
"repeat": True,
"max_length": 256,
},
}
infer_params = {
"batch_size_per_gpu": 1,
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"src_vocab_file": data_root+"m_common.vocab",
"tgt_vocab_file": data_root+"m_common.vocab",
"source_file": data_root+"wmt14-en-es.src.BPE_common.32K.tok",
"target_file": data_root+"wmt14-en-es.src.BPE_common.32K.tok",
"delimiter": " ",
"shuffle": False,
"repeat": False,
"max_length": 256,
},
}
| OpenSeq2Seq-master | example_configs/text2text/en-es/transformer-big.py |
# pylint: skip-file
from __future__ import absolute_import, division, print_function
from open_seq2seq.models import Text2Text
from open_seq2seq.encoders import TransformerEncoder
from open_seq2seq.decoders import TransformerDecoder
from open_seq2seq.data.text2text.text2text import ParallelTextDataLayer
from open_seq2seq.losses import PaddedCrossEntropyLossWithSmoothing
from open_seq2seq.data.text2text.text2text import SpecialTextTokens
from open_seq2seq.data.text2text.tokenizer import EOS_ID
from open_seq2seq.optimizers.lr_policies import transformer_policy
import tensorflow as tf
"""
This configuration file describes a variant of Transformer model from
https://arxiv.org/abs/1706.03762
"""
base_model = Text2Text
d_model = 1024
num_layers = 6
# REPLACE THIS TO THE PATH WITH YOUR WMT DATA
data_root = "[REPLACE THIS TO THE PATH WITH YOUR WMT DATA]"
base_params = {
"use_horovod": True,
"num_gpus": 1, # when using Horovod we set number of workers with params to mpirun
"batch_size_per_gpu": 256, # this size is in sentence pairs, reduce it if you get OOM
"max_steps": 300000,
"save_summaries_steps": 100,
"print_loss_steps": 100,
"print_samples_steps": 100,
"eval_steps": 4001,
"save_checkpoint_steps": 299998,
"logdir": "Transformer-BIG",
#"dtype": tf.float32, # to enable mixed precision, comment this line and uncomment two below lines
"dtype": "mixed",
"loss_scaling": "Backoff",
"optimizer": tf.contrib.opt.LazyAdamOptimizer,
"optimizer_params": {
"beta1": 0.9,
"beta2": 0.997,
"epsilon": 1e-09,
},
"lr_policy": transformer_policy,
"lr_policy_params": {
"learning_rate": 2.0,
"warmup_steps": 8000,
"d_model": d_model,
},
"encoder": TransformerEncoder,
"encoder_params": {
"encoder_layers": num_layers,
"hidden_size": d_model,
"num_heads": 16,
"attention_dropout": 0.1,
"filter_size": 4 * d_model,
"relu_dropout": 0.3,
"layer_postprocess_dropout": 0.3,
"pad_embeddings_2_eight": True,
"remove_padding": True,
},
"decoder": TransformerDecoder,
"decoder_params": {
"layer_postprocess_dropout": 0.3,
"num_hidden_layers": num_layers,
"hidden_size": d_model,
"num_heads": 16,
"attention_dropout": 0.1,
"relu_dropout": 0.3,
"filter_size": 4 * d_model,
"beam_size": 4,
"alpha": 0.6,
"extra_decode_length": 50,
"EOS_ID": EOS_ID,
"GO_SYMBOL": SpecialTextTokens.S_ID.value,
"END_SYMBOL": SpecialTextTokens.EOS_ID.value,
"PAD_SYMBOL": SpecialTextTokens.PAD_ID.value,
},
"loss": PaddedCrossEntropyLossWithSmoothing,
"loss_params": {
"label_smoothing": 0.1,
}
}
train_params = {
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"pad_vocab_to_eight": True,
"src_vocab_file": data_root + "m_common.vocab",
"tgt_vocab_file": data_root + "m_common.vocab",
"target_file": data_root + "train.clean.en.shuffled.BPE_common.32K.tok",
"source_file": data_root + "train.clean.es.shuffled.BPE_common.32K.tok",
"delimiter": " ",
"shuffle": True,
"shuffle_buffer_size": 25000,
"repeat": True,
"map_parallel_calls": 16,
"max_length": 56,
},
}
eval_params = {
"batch_size_per_gpu": 16,
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"src_vocab_file": data_root+"m_common.vocab",
"tgt_vocab_file": data_root+"m_common.vocab",
"target_file": data_root+"wmt13-en-es.src.BPE_common.32K.tok",
"source_file": data_root+"wmt13-en-es.ref.BPE_common.32K.tok",
"delimiter": " ",
"shuffle": False,
"repeat": True,
"max_length": 256,
},
}
infer_params = {
"batch_size_per_gpu": 1,
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"src_vocab_file": data_root+"m_common.vocab",
"tgt_vocab_file": data_root+"m_common.vocab",
"target_file": data_root+"wmt14-en-es.src.BPE_common.32K.tok",
"source_file": data_root+"wmt14-en-es.src.BPE_common.32K.tok",
"delimiter": " ",
"shuffle": False,
"repeat": False,
"max_length": 256,
},
}
| OpenSeq2Seq-master | example_configs/text2text/es-en/transformer-big.py |
# pylint: skip-file
from __future__ import absolute_import, division, print_function
from open_seq2seq.models import Text2Text
from open_seq2seq.encoders import TransformerEncoder
from open_seq2seq.decoders import TransformerDecoder
from open_seq2seq.data.text2text.text2text import ParallelTextDataLayer
from open_seq2seq.losses import PaddedCrossEntropyLossWithSmoothing
from open_seq2seq.data.text2text.text2text import SpecialTextTokens
from open_seq2seq.data.text2text.tokenizer import EOS_ID
from open_seq2seq.optimizers.lr_policies import transformer_policy
import tensorflow as tf
"""
This configuration file describes a variant of Transformer model from
https://arxiv.org/abs/1706.03762
"""
base_model = Text2Text
d_model = 1024
num_layers = 6
# REPLACE THIS TO THE PATH WITH YOUR WMT DATA
data_root = "[REPLACE THIS TO THE PATH WITH YOUR WMT DATA]"
base_params = {
"use_horovod": True,
"num_gpus": 1, # when using Horovod we set number of workers with params to mpirun
"batch_size_per_gpu": 256, # this size is in sentence pairs, reduce it if you get OOM
"max_steps": 300000,
"save_summaries_steps": 100,
"print_loss_steps": 100,
"print_samples_steps": 100,
"eval_steps": 4000,
"save_checkpoint_steps": 299998,
"logdir": "Transformer-BIG",
#"dtype": tf.float32, # to enable mixed precision, comment this line and uncomment two below lines
"dtype": "mixed",
"loss_scaling": "Backoff",
"optimizer": tf.contrib.opt.LazyAdamOptimizer,
"optimizer_params": {
"beta1": 0.9,
"beta2": 0.997,
"epsilon": 1e-09,
},
"lr_policy": transformer_policy,
"lr_policy_params": {
"learning_rate": 2.0,
"warmup_steps": 8000,
"d_model": d_model,
},
"encoder": TransformerEncoder,
"encoder_params": {
"encoder_layers": num_layers,
"hidden_size": d_model,
"num_heads": 16,
"attention_dropout": 0.1,
"filter_size": 4 * d_model,
"relu_dropout": 0.3,
"layer_postprocess_dropout": 0.3,
"pad_embeddings_2_eight": True,
"remove_padding": True,
},
"decoder": TransformerDecoder,
"decoder_params": {
"layer_postprocess_dropout": 0.3,
"num_hidden_layers": num_layers,
"hidden_size": d_model,
"num_heads": 16,
"attention_dropout": 0.1,
"relu_dropout": 0.3,
"filter_size": 4 * d_model,
"beam_size": 4,
"alpha": 0.6,
"extra_decode_length": 50,
"EOS_ID": EOS_ID,
"GO_SYMBOL": SpecialTextTokens.S_ID.value,
"END_SYMBOL": SpecialTextTokens.EOS_ID.value,
"PAD_SYMBOL": SpecialTextTokens.PAD_ID.value,
},
"loss": PaddedCrossEntropyLossWithSmoothing,
"loss_params": {
"label_smoothing": 0.1,
}
}
train_params = {
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"pad_vocab_to_eight": True,
"src_vocab_file": data_root + "m_common.vocab",
"tgt_vocab_file": data_root + "m_common.vocab",
"source_file": data_root + "train.clean.en.shuffled.BPE_common.32K.tok",
"target_file": data_root + "train.clean.de.shuffled.BPE_common.32K.tok",
"delimiter": " ",
"shuffle": True,
"shuffle_buffer_size": 25000,
"repeat": True,
"map_parallel_calls": 16,
"max_length": 56,
},
}
eval_params = {
"batch_size_per_gpu": 16,
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"src_vocab_file": data_root+"m_common.vocab",
"tgt_vocab_file": data_root+"m_common.vocab",
"source_file": data_root+"wmt13-en-de.src.BPE_common.32K.tok",
"target_file": data_root+"wmt13-en-de.ref.BPE_common.32K.tok",
"delimiter": " ",
"shuffle": False,
"repeat": True,
"max_length": 256,
},
}
infer_params = {
"batch_size_per_gpu": 1,
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"src_vocab_file": data_root+"m_common.vocab",
"tgt_vocab_file": data_root+"m_common.vocab",
"source_file": data_root+"wmt14-en-de.src.BPE_common.32K.tok",
"target_file": data_root+"wmt14-en-de.src.BPE_common.32K.tok",
"delimiter": " ",
"shuffle": False,
"repeat": False,
"max_length": 256,
},
}
| OpenSeq2Seq-master | example_configs/text2text/en-de/transformer-big.py |
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import tensorflow as tf
from open_seq2seq.models import Text2Text
from open_seq2seq.data.text2text.text2text import ParallelTextDataLayer
from open_seq2seq.data.text2text.text2text import SpecialTextTokens
from open_seq2seq.data.text2text.tokenizer import EOS_ID
from open_seq2seq.encoders import ConvS2SEncoder
from open_seq2seq.decoders import ConvS2SDecoder
from open_seq2seq.losses import BasicSequenceLoss
from open_seq2seq.optimizers.lr_policies import transformer_policy
from open_seq2seq.parts.convs2s.utils import gated_linear_units
import math
"""
This configuration file describes a variant of ConvS2S model from
https://arxiv.org/pdf/1705.03122
"""
# REPLACE THIS TO THE PATH WITH YOUR WMT DATA
data_root = "[REPLACE THIS TO THE PATH WITH YOUR WMT DATA]"
base_model = Text2Text
num_layers = 15
d_model = 512
hidden_before_last = 512
conv_act = gated_linear_units
normalization_type = "weight_norm"
scaling_factor = math.sqrt(0.5)
max_length = 64
base_params = {
"use_horovod": True,
"num_gpus": 1, # Use 8 horovod workers to train on 8 GPUs
# max_step is set for 35 epochs on 8 gpus with batch size of 64,
# 4.5M is the size of the dataset
"max_steps": 310000,
"batch_size_per_gpu": 64,
"save_summaries_steps": 100,
"print_loss_steps": 100,
"print_samples_steps": 100,
"eval_steps": 4000,
"save_checkpoint_steps": 4000,
"logdir": "ConvSeq2Seq-8GPUs-FP32",
"optimizer": "Adam",
"optimizer_params": {},
"lr_policy": transformer_policy,
"lr_policy_params": {
"learning_rate": 9,
"max_lr": 1e-3,
"warmup_steps": 4000,
"d_model": d_model,
},
"max_grad_norm": 0.1,
"summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"dtype": tf.float32, # to enable mixed precision, comment this line and uncomment two below lines
#"dtype": "mixed",
#"loss_scaling": "Backoff",
"encoder": ConvS2SEncoder,
"encoder_params": {
"src_emb_size": d_model,
"pad_embeddings_2_eight": True,
"att_layer_num": num_layers,
# original ConvS2S paper
#"conv_nchannels_kwidth": [(512, 3)]*10 + [(768, 3)]*3 + [(2048, 1)]*2,
# fairseq config
"conv_nchannels_kwidth": [(512, 3)]*9 + [(1024, 3)]*4 + [(2048, 1)]*2,
"embedding_dropout_keep_prob": 0.8,
"hidden_dropout_keep_prob": 0.8,
"max_input_length": max_length,
"PAD_SYMBOL": SpecialTextTokens.PAD_ID.value,
"conv_activation": conv_act,
'normalization_type': normalization_type,
"scaling_factor": scaling_factor,
},
"decoder": ConvS2SDecoder,
"decoder_params": {
"shared_embed": True,
"tgt_emb_size": d_model,
"pad_embeddings_2_eight": True,
"out_emb_size": hidden_before_last,
"pos_embed": False,
# original ConvS2S paper
#"conv_nchannels_kwidth": [(512, 3)]*10 + [(768, 3)]*3 + [(2048, 1)]*2,
# fairseq config
"conv_nchannels_kwidth": [(512, 3)]*9 + [(1024, 3)]*4 + [(2048, 1)]*2,
"embedding_dropout_keep_prob": 0.8,
"hidden_dropout_keep_prob": 0.8,
"out_dropout_keep_prob": 0.8,
"max_input_length": max_length,
"extra_decode_length": 56,
"beam_size": 5,
"alpha": 0.6,
"EOS_ID": EOS_ID,
"GO_SYMBOL": SpecialTextTokens.S_ID.value,
"END_SYMBOL": SpecialTextTokens.EOS_ID.value,
"PAD_SYMBOL": SpecialTextTokens.PAD_ID.value,
"conv_activation": conv_act,
'normalization_type': normalization_type,
"scaling_factor": scaling_factor,
},
"loss": BasicSequenceLoss,
"loss_params": {
"offset_target_by_one": True,
"average_across_timestep": True,
"do_mask": True
}
}
train_params = {
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"pad_vocab_to_eight": True,
"src_vocab_file": data_root + "m_common.vocab",
"tgt_vocab_file": data_root + "m_common.vocab",
"source_file": data_root + "train.clean.en.shuffled.BPE_common.32K.tok",
"target_file": data_root + "train.clean.de.shuffled.BPE_common.32K.tok",
"delimiter": " ",
"shuffle": True,
"shuffle_buffer_size": 25000,
"repeat": True,
"map_parallel_calls": 16,
"prefetch_buffer_size": 2,
"max_length": max_length,
},
}
eval_params = {
"batch_size_per_gpu": 64,
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"src_vocab_file": data_root+"m_common.vocab",
"tgt_vocab_file": data_root+"m_common.vocab",
"source_file": data_root+"wmt13-en-de.src.BPE_common.32K.tok",
"target_file": data_root+"wmt13-en-de.ref.BPE_common.32K.tok",
"delimiter": " ",
"shuffle": False,
"repeat": True,
"max_length": max_length,
"prefetch_buffer_size": 1,
},
}
infer_params = {
"batch_size_per_gpu": 1,
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"src_vocab_file": data_root+"m_common.vocab",
"tgt_vocab_file": data_root+"m_common.vocab",
"source_file": data_root+"wmt14-en-de.src.BPE_common.32K.tok",
"target_file": data_root+"wmt14-en-de.src.BPE_common.32K.tok",
"delimiter": " ",
"shuffle": False,
"repeat": False,
"max_length": max_length*2,
"prefetch_buffer_size": 1,
},
} | OpenSeq2Seq-master | example_configs/text2text/en-de/en-de-convs2s-8-gpu.py |
# pylint: skip-file
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from open_seq2seq.models import Text2Text
from open_seq2seq.encoders import GNMTLikeEncoderWithEmbedding
from open_seq2seq.decoders import RNNDecoderWithAttention, \
BeamSearchRNNDecoderWithAttention
from open_seq2seq.data.text2text.text2text import ParallelTextDataLayer
from open_seq2seq.losses import BasicSequenceLoss
from open_seq2seq.data.text2text.text2text import SpecialTextTokens
from open_seq2seq.optimizers.lr_policies import exp_decay
data_root = "[REPLACE THIS TO THE PATH WITH YOUR WMT DATA]"
base_model = Text2Text
base_params = {
"use_horovod": False,
"num_gpus": 2,
"max_steps": 340000,
"batch_size_per_gpu": 32,
"save_summaries_steps": 10,
"print_loss_steps": 10,
"print_samples_steps": 10,
"eval_steps": 40,
"save_checkpoint_steps": 40,
"logdir": "GNMT-2GPUs-WT",
"optimizer": "Adam",
"optimizer_params": {},
# luong10 decay scheme
"lr_policy": exp_decay,
"lr_policy_params": {
"learning_rate": 0.0008,
"begin_decay_at": 170000,
"decay_steps": 17000,
"decay_rate": 0.5,
"use_staircase_decay": True,
"min_lr": 0.0000005,
},
#"summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
# 'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"max_grad_norm": 32768.0,
"dtype": tf.float32,
#"dtype": "mixed",
#"automatic_loss_scaling": "Backoff",
"encoder": GNMTLikeEncoderWithEmbedding,
"encoder_params": {
"initializer": tf.random_uniform_initializer,
"initializer_params": {
"minval": -0.1,
"maxval": 0.1,
},
"core_cell": tf.nn.rnn_cell.LSTMCell,
"core_cell_params": {
"num_units": 200,
"forget_bias": 1.0,
},
"encoder_layers": 7,
"encoder_dp_input_keep_prob": 0.8,
"encoder_dp_output_keep_prob": 1.0,
"encoder_use_skip_connections": True,
"src_emb_size": 80,
},
"decoder": RNNDecoderWithAttention,
"decoder_params": {
"initializer": tf.random_uniform_initializer,
"initializer_params": {
"minval": -0.1,
"maxval": 0.1,
},
"core_cell": tf.nn.rnn_cell.LSTMCell,
"core_cell_params": {
"num_units": 200,
"forget_bias": 1.0,
},
"decoder_layers": 8,
"decoder_dp_input_keep_prob": 0.8,
"decoder_dp_output_keep_prob": 1.0,
"decoder_use_skip_connections": True,
"GO_SYMBOL": SpecialTextTokens.S_ID.value,
"END_SYMBOL": SpecialTextTokens.EOS_ID.value,
"tgt_emb_size": 80,
"attention_type": "gnmt_v2",
"attention_layer_size": 1024,
"weight_tied": True,
},
"loss": BasicSequenceLoss,
"loss_params": {
"offset_target_by_one": True,
"average_across_timestep": True,
"do_mask": True
}
}
train_params = {
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"pad_vocab_to_eight": True,
"src_vocab_file": data_root+"vocab.bpe.32000",
"tgt_vocab_file": data_root+"vocab.bpe.32000",
"source_file": data_root+"train.tok.clean.bpe.32000.en",
"target_file": data_root+"train.tok.clean.bpe.32000.de",
"delimiter": " ",
"shuffle": True,
"shuffle_buffer_size": 25000,
"repeat": True,
"map_parallel_calls": 16,
"prefetch_buffer_size": 8,
"max_length": 50,
},
}
eval_params = {
"batch_size_per_gpu": 16,
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"pad_vocab_to_eight": True,
"src_vocab_file": data_root+"vocab.bpe.32000",
"tgt_vocab_file": data_root+"vocab.bpe.32000",
"source_file": data_root+"newstest2013.tok.bpe.32000.en",
"target_file": data_root+"newstest2013.tok.bpe.32000.de",
"delimiter": " ",
"shuffle": False,
"repeat": True,
"map_parallel_calls": 16,
"prefetch_buffer_size": 1,
"max_length": 32,
},
}
infer_params = {
"batch_size_per_gpu": 1,
"decoder": BeamSearchRNNDecoderWithAttention,
"decoder_params": {
"beam_width": 10,
"length_penalty": 1.0,
"core_cell": tf.nn.rnn_cell.LSTMCell,
"core_cell_params": {
"num_units": 200,
"forget_bias": 1.0,
},
"decoder_layers": 8,
"decoder_dp_input_keep_prob": 0.8,
"decoder_dp_output_keep_prob": 1.0,
"decoder_use_skip_connections": True,
"GO_SYMBOL": SpecialTextTokens.S_ID.value,
"END_SYMBOL": SpecialTextTokens.EOS_ID.value,
"PAD_SYMBOL": SpecialTextTokens.PAD_ID.value,
"tgt_emb_size": 80,
"attention_type": "gnmt_v2",
"attention_layer_size": 1024,
"weight_tied": True, # make sure that weight tied for Training/eval is true too
},
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"pad_vocab_to_eight": True,
"src_vocab_file": data_root+"vocab.bpe.32000",
"tgt_vocab_file": data_root+"vocab.bpe.32000",
"source_file": data_root+"newstest2014.tok.bpe.32000.en",
# this is intentional
"target_file": data_root+"newstest2014.tok.bpe.32000.en",
"delimiter": " ",
"shuffle": False,
"repeat": False,
"max_length": 64,
},
}
| OpenSeq2Seq-master | example_configs/text2text/en-de/en-de-gnmt-like-weight-tied-2GPUs.py |
# pylint: skip-file
from __future__ import absolute_import, division, print_function
from open_seq2seq.models import Text2Text
from open_seq2seq.encoders import TransformerEncoder
from open_seq2seq.decoders import TransformerDecoder
from open_seq2seq.data.text2text.text2text import ParallelTextDataLayer
from open_seq2seq.losses import PaddedCrossEntropyLossWithSmoothing
from open_seq2seq.data.text2text.text2text import SpecialTextTokens
from open_seq2seq.data.text2text.tokenizer import EOS_ID
from open_seq2seq.optimizers.lr_policies import transformer_policy
import tensorflow as tf
"""
This configuration file describes a variant of Transformer model from
https://arxiv.org/abs/1706.03762
"""
base_model = Text2Text
d_model = 512
num_layers = 6
# REPLACE THIS TO THE PATH WITH YOUR WMT DATA
data_root = "[REPLACE THIS TO THE PATH WITH YOUR WMT DATA]"
#data_root = "/raid/wmt16/"
base_params = {
"use_horovod": True,
"num_gpus": 1, # when using Horovod we set number of workers with params to mpirun
"batch_size_per_gpu": 256, # this size is in sentence pairs, reduce it if you get OOM
"max_steps": 300000,
"save_summaries_steps": 100,
"print_loss_steps": 100,
"print_samples_steps": 100,
"eval_steps": 4001,
"save_checkpoint_steps": 299998,
"logdir": "logs/transformer/",
"dtype": tf.float32, # to enable mixed precision, comment this line and uncomment two below lines
#"dtype": "mixed",
#"loss_scaling": "Backoff",
"optimizer": tf.contrib.opt.LazyAdamOptimizer,
"optimizer_params": {
"beta1": 0.9,
"beta2": 0.997,
"epsilon": 1e-09,
},
"lr_policy": transformer_policy,
"lr_policy_params": {
"learning_rate": 2.0,
"warmup_steps": 8000,
"d_model": d_model,
},
"encoder": TransformerEncoder,
"encoder_params": {
"encoder_layers": num_layers,
"hidden_size": d_model,
"num_heads": 8,
"attention_dropout": 0.1,
"filter_size": 4 * d_model,
"relu_dropout": 0.1,
"layer_postprocess_dropout": 0.1,
"pad_embeddings_2_eight": True,
"remove_padding": True,
},
"decoder": TransformerDecoder,
"decoder_params": {
"layer_postprocess_dropout": 0.1,
"num_hidden_layers": num_layers,
"hidden_size": d_model,
"num_heads": 8,
"attention_dropout": 0.1,
"relu_dropout": 0.1,
"filter_size": 4 * d_model,
"beam_size": 4,
"alpha": 0.6,
"extra_decode_length": 50,
"EOS_ID": EOS_ID,
"GO_SYMBOL": SpecialTextTokens.S_ID.value,
"END_SYMBOL": SpecialTextTokens.EOS_ID.value,
"PAD_SYMBOL": SpecialTextTokens.PAD_ID.value,
},
"loss": PaddedCrossEntropyLossWithSmoothing,
"loss_params": {
"label_smoothing": 0.1,
}
}
train_params = {
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"pad_vocab_to_eight": True,
"src_vocab_file": data_root + "m_common.vocab",
"tgt_vocab_file": data_root + "m_common.vocab",
"source_file": data_root + "train.clean.en.shuffled.BPE_common.32K.tok",
"target_file": data_root + "train.clean.de.shuffled.BPE_common.32K.tok",
"delimiter": " ",
"shuffle": True,
"shuffle_buffer_size": 25000,
"repeat": True,
"map_parallel_calls": 16,
"max_length": 56,
},
}
eval_params = {
"batch_size_per_gpu": 16,
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"src_vocab_file": data_root+"m_common.vocab",
"tgt_vocab_file": data_root+"m_common.vocab",
"source_file": data_root+"wmt13-en-de.src.BPE_common.32K.tok",
"target_file": data_root+"wmt13-en-de.ref.BPE_common.32K.tok",
"delimiter": " ",
"shuffle": False,
"repeat": False,
"max_length": 256,
},
}
infer_params = {
"batch_size_per_gpu": 1,
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"src_vocab_file": data_root+"m_common.vocab",
"tgt_vocab_file": data_root+"m_common.vocab",
"source_file": data_root+"wmt14-en-de.src.BPE_common.32K.tok",
"target_file": data_root+"wmt14-en-de.src.BPE_common.32K.tok",
"delimiter": " ",
"shuffle": False,
"repeat": False,
"max_length": 256,
},
}
| OpenSeq2Seq-master | example_configs/text2text/en-de/transformer-base.py |
# pylint: skip-file
from __future__ import absolute_import, division, print_function
from open_seq2seq.models import Text2Text
from open_seq2seq.encoders import TransformerEncoder
from open_seq2seq.decoders import TransformerDecoder
from open_seq2seq.data.text2text.text2text import ParallelTextDataLayer
from open_seq2seq.losses import PaddedCrossEntropyLossWithSmoothing
from open_seq2seq.data.text2text.text2text import SpecialTextTokens
from open_seq2seq.data.text2text.tokenizer import EOS_ID
from open_seq2seq.optimizers.lr_policies import transformer_policy, poly_decay
from open_seq2seq.optimizers.novograd import NovoGrad
import tensorflow as tf
"""
This configuration file describes a variant of Transformer model from
https://arxiv.org/abs/1706.03762
"""
base_model = Text2Text
d_model = 1024
num_layers = 6
norm_params= {
"type": "layernorm_L2",
"momentum":0.95,
"epsilon": 0.00001,
}
attention_dropout = 0.1
dropout = 0.3
# REPLACE THIS TO THE PATH WITH YOUR WMT DATA
#data_root = "[REPLACE THIS TO THE PATH WITH YOUR WMT DATA]"
data_root = "/data/wmt16-ende-sp/"
base_params = {
"use_horovod": True,
"num_gpus": 1, #8, # when using Horovod we set number of workers with params to mpirun
"batch_size_per_gpu": 128, # this size is in sentence pairs, reduce it if you get OOM
"max_steps": 600000,
"save_summaries_steps": 100,
"print_loss_steps": 100,
"print_samples_steps": 10000,
"eval_steps": 10000,
"save_checkpoint_steps": 99999,
"logdir": "tr-nvgrad2_0.90.99-b128-lr0.1-fp16",
# "dtype": tf.float32, # to enable mixed precision, comment this line and uncomment two below lines
"dtype": "mixed",
"loss_scaling": "Backoff",
"optimizer": NovoGrad,
"optimizer_params": {
"beta1": 0.95,
"beta2": 0.99,
"epsilon": 1e-08,
"weight_decay": 0.00001,
"grad_averaging": False,
},
"lr_policy": poly_decay,
"lr_policy_params": {
"learning_rate": 0.04,
"power": 2,
},
"encoder": TransformerEncoder,
"encoder_params": {
"encoder_layers": num_layers,
"hidden_size": d_model,
"num_heads": 16,
"filter_size": 4 * d_model,
"attention_dropout": attention_dropout, # 0.1,
"relu_dropout": dropout, # 0.3,
"layer_postprocess_dropout": dropout, # 0.3,
"pad_embeddings_2_eight": True,
"remove_padding": True,
"norm_params": norm_params,
},
"decoder": TransformerDecoder,
"decoder_params": {
"num_hidden_layers": num_layers,
"hidden_size": d_model,
"num_heads": 16,
"filter_size": 4 * d_model,
"attention_dropout": attention_dropout, # 0.1,
"relu_dropout": dropout, # 0.3,
"layer_postprocess_dropout": dropout, # 0.3,
"beam_size": 4,
"alpha": 0.6,
"extra_decode_length": 50,
"EOS_ID": EOS_ID,
"GO_SYMBOL": SpecialTextTokens.S_ID.value,
"END_SYMBOL": SpecialTextTokens.EOS_ID.value,
"PAD_SYMBOL": SpecialTextTokens.PAD_ID.value,
"norm_params": norm_params,
},
"loss": PaddedCrossEntropyLossWithSmoothing,
"loss_params": {
"label_smoothing": 0.1,
}
}
train_params = {
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"pad_vocab_to_eight": True,
"src_vocab_file": data_root + "m_common.vocab",
"tgt_vocab_file": data_root + "m_common.vocab",
"source_file": data_root + "train.clean.en.shuffled.BPE_common.32K.tok",
"target_file": data_root + "train.clean.de.shuffled.BPE_common.32K.tok",
"delimiter": " ",
"shuffle": True,
"shuffle_buffer_size": 25000,
"repeat": True,
"map_parallel_calls": 16,
"max_length": 56,
},
}
eval_params = {
"batch_size_per_gpu": 16,
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"src_vocab_file": data_root+"m_common.vocab",
"tgt_vocab_file": data_root+"m_common.vocab",
"source_file": data_root+"wmt13-en-de.src.BPE_common.32K.tok",
"target_file": data_root+"wmt13-en-de.ref.BPE_common.32K.tok",
"delimiter": " ",
"shuffle": False,
"repeat": True,
"max_length": 256,
},
}
infer_params = {
"batch_size_per_gpu": 1,
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"src_vocab_file": data_root+"m_common.vocab",
"tgt_vocab_file": data_root+"m_common.vocab",
"source_file": data_root+"wmt14-en-de.src.BPE_common.32K.tok",
"target_file": data_root+"wmt14-en-de.src.BPE_common.32K.tok",
"delimiter": " ",
"shuffle": False,
"repeat": False,
"max_length": 256,
},
}
| OpenSeq2Seq-master | example_configs/text2text/en-de/transformer-nvgrad.py |
# pylint: skip-file
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from open_seq2seq.models import Text2Text
from open_seq2seq.encoders import BidirectionalRNNEncoderWithEmbedding
from open_seq2seq.decoders import RNNDecoderWithAttention, \
BeamSearchRNNDecoderWithAttention
from open_seq2seq.data.text2text.text2text import ParallelTextDataLayer
from open_seq2seq.losses import BasicSequenceLoss
from open_seq2seq.data.text2text.text2text import SpecialTextTokens
from open_seq2seq.optimizers.lr_policies import fixed_lr
data_root = "[REPLACE THIS TO THE PATH WITH YOUR WMT DATA]"
# This model should run fine on single GPU such as 1080ti or better
base_model = Text2Text
base_params = {
"use_horovod": False,
"num_gpus": 1,
"max_steps": 160082,
"batch_size_per_gpu": 128,
"save_summaries_steps": 50,
"print_loss_steps": 48,
"print_samples_steps": 48,
"eval_steps": 1000,
"save_checkpoint_steps": 2001,
"logdir": "nmt-small-en-de",
"optimizer": "Adam",
"optimizer_params": {},
"lr_policy": fixed_lr,
"lr_policy_params": {
"learning_rate": 0.001,
},
"larc_params": {
"larc_eta": 0.001,
},
"dtype": tf.float32,
#"dtype": "mixed",
#"loss_scaling": "Backoff",
"encoder": BidirectionalRNNEncoderWithEmbedding,
"encoder_params": {
"initializer": tf.glorot_uniform_initializer,
"core_cell": tf.nn.rnn_cell.LSTMCell,
"core_cell_params": {
"num_units": 512,
"forget_bias": 1.0,
},
"encoder_layers": 2,
"encoder_dp_input_keep_prob": 0.8,
"encoder_dp_output_keep_prob": 1.0,
"encoder_use_skip_connections": False,
"src_emb_size": 512,
"use_swap_memory": True,
},
"decoder": RNNDecoderWithAttention,
"decoder_params": {
"initializer": tf.glorot_uniform_initializer,
"core_cell": tf.nn.rnn_cell.LSTMCell,
"core_cell_params": {
"num_units": 512,
"forget_bias": 1.0,
},
"decoder_layers": 2,
"decoder_dp_input_keep_prob": 0.8,
"decoder_dp_output_keep_prob": 1.0,
"decoder_use_skip_connections": False,
"GO_SYMBOL": SpecialTextTokens.S_ID.value,
"END_SYMBOL": SpecialTextTokens.EOS_ID.value,
"tgt_emb_size": 512,
"attention_type": "gnmt_v2",
"attention_layer_size": 512,
"use_swap_memory": True,
},
"loss": BasicSequenceLoss,
"loss_params": {
"offset_target_by_one": True,
"average_across_timestep": False,
"do_mask": True
}
}
train_params = {
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"src_vocab_file": data_root + "m_common.vocab",
"tgt_vocab_file": data_root + "m_common.vocab",
"source_file": data_root + "train.clean.en.shuffled.BPE_common.32K.tok",
"target_file": data_root + "train.clean.de.shuffled.BPE_common.32K.tok",
"delimiter": " ",
"shuffle": True,
"shuffle_buffer_size": 25000,
"repeat": True,
"map_parallel_calls": 16,
"prefetch_buffer_size": 2,
"max_length": 50,
},
}
eval_params = {
"batch_size_per_gpu": 32,
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"src_vocab_file": data_root+"m_common.vocab",
"tgt_vocab_file": data_root+"m_common.vocab",
"source_file": data_root+"wmt13-en-de.src.BPE_common.32K.tok",
"target_file": data_root+"wmt13-en-de.ref.BPE_common.32K.tok",
"delimiter": " ",
"shuffle": False,
"repeat": True,
"map_parallel_calls": 16,
"prefetch_buffer_size": 1,
"max_length": 16,
},
}
infer_params = {
"batch_size_per_gpu": 1,
"decoder": BeamSearchRNNDecoderWithAttention,
"decoder_params": {
"beam_width": 10,
"length_penalty": 1.0,
"core_cell": tf.nn.rnn_cell.LSTMCell,
"core_cell_params": {
"num_units": 512,
"forget_bias": 1.0,
},
"decoder_layers": 2,
"decoder_dp_input_keep_prob": 0.8,
"decoder_dp_output_keep_prob": 1.0,
"decoder_use_skip_connections": False,
"GO_SYMBOL": SpecialTextTokens.S_ID.value,
"END_SYMBOL": SpecialTextTokens.EOS_ID.value,
"PAD_SYMBOL": SpecialTextTokens.PAD_ID.value,
"tgt_emb_size": 512,
"attention_type": "gnmt_v2",
"attention_layer_size": 512,
},
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"src_vocab_file": data_root+"m_common.vocab",
"tgt_vocab_file": data_root+"m_common.vocab",
"source_file": data_root+"wmt14-en-de.src.BPE_common.32K.tok",
# this is intentional
"target_file": data_root+"wmt14-en-de.src.BPE_common.32K.tok",
"delimiter": " ",
"shuffle": False,
"repeat": False,
"max_length": 256,
"prefetch_buffer_size": 1,
},
}
| OpenSeq2Seq-master | example_configs/text2text/en-de/en-de-nmt-small.py |
# pylint: skip-file
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from open_seq2seq.models import Text2Text
from open_seq2seq.encoders import GNMTLikeEncoderWithEmbedding
from open_seq2seq.decoders import RNNDecoderWithAttention, \
BeamSearchRNNDecoderWithAttention
from open_seq2seq.data.text2text.text2text import ParallelTextDataLayer
from open_seq2seq.losses import BasicSequenceLoss
from open_seq2seq.data.text2text.text2text import SpecialTextTokens
from open_seq2seq.optimizers.lr_policies import exp_decay
data_root = "[REPLACE THIS TO THE PATH WITH YOUR WMT DATA]"
base_model = Text2Text
base_params = {
"use_horovod": False,
"num_gpus": 4,
"max_steps": 340000,
"batch_size_per_gpu": 32,
"save_summaries_steps": 50,
"print_loss_steps": 48,
"print_samples_steps": 48,
"eval_steps": 4001,
"save_checkpoint_steps": 4000,
"logdir": "GNMT-4GPUs-FP32",
"optimizer": "Adam",
"optimizer_params": {},
# luong10 decay scheme
"lr_policy": exp_decay,
"lr_policy_params": {
"learning_rate": 0.0008,
"begin_decay_at": 170000,
"decay_steps": 17000,
"decay_rate": 0.5,
"use_staircase_decay": True,
"min_lr": 0.0000005,
},
#"summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
# 'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"max_grad_norm": 32768.0,
"dtype": tf.float32, # to enable mixed precision, comment this line and uncomment two below lines
#"dtype": "mixed",
#"loss_scaling": "Backoff",
"encoder": GNMTLikeEncoderWithEmbedding,
"encoder_params": {
"initializer": tf.random_uniform_initializer,
"initializer_params": {
"minval": -0.1,
"maxval": 0.1,
},
"core_cell": tf.nn.rnn_cell.LSTMCell,
"core_cell_params": {
"num_units": 1024,
"forget_bias": 1.0,
},
"encoder_layers": 7,
"encoder_dp_input_keep_prob": 0.8,
"encoder_dp_output_keep_prob": 1.0,
"encoder_use_skip_connections": True,
"src_emb_size": 1024,
},
"decoder": RNNDecoderWithAttention,
"decoder_params": {
"initializer": tf.random_uniform_initializer,
"initializer_params": {
"minval": -0.1,
"maxval": 0.1,
},
"core_cell": tf.nn.rnn_cell.LSTMCell,
"core_cell_params": {
"num_units": 1024,
"forget_bias": 1.0,
},
"decoder_layers": 8,
"decoder_dp_input_keep_prob": 0.8,
"decoder_dp_output_keep_prob": 1.0,
"decoder_use_skip_connections": True,
"GO_SYMBOL": SpecialTextTokens.S_ID.value,
"END_SYMBOL": SpecialTextTokens.EOS_ID.value,
"tgt_emb_size": 1024,
"attention_type": "gnmt_v2",
"attention_layer_size": 1024,
},
"loss": BasicSequenceLoss,
"loss_params": {
"offset_target_by_one": True,
"average_across_timestep": True,
"do_mask": True
}
}
train_params = {
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"pad_vocab_to_eight": True,
"src_vocab_file": data_root + "m_common.vocab",
"tgt_vocab_file": data_root + "m_common.vocab",
"source_file": data_root + "train.clean.en.shuffled.BPE_common.32K.tok",
"target_file": data_root + "train.clean.de.shuffled.BPE_common.32K.tok",
"delimiter": " ",
"shuffle": True,
"shuffle_buffer_size": 25000,
"repeat": True,
"map_parallel_calls": 16,
"prefetch_buffer_size": 8,
"max_length": 50,
},
}
eval_params = {
"batch_size_per_gpu": 16,
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"pad_vocab_to_eight": True,
"src_vocab_file": data_root+"m_common.vocab",
"tgt_vocab_file": data_root+"m_common.vocab",
"source_file": data_root+"wmt13-en-de.src.BPE_common.32K.tok",
"target_file": data_root+"wmt13-en-de.ref.BPE_common.32K.tok",
"delimiter": " ",
"shuffle": False,
"repeat": True,
"map_parallel_calls": 16,
"prefetch_buffer_size": 1,
"max_length": 32,
},
}
infer_params = {
"batch_size_per_gpu": 1,
"decoder": BeamSearchRNNDecoderWithAttention,
"decoder_params": {
"beam_width": 10,
"length_penalty": 1.0,
"core_cell": tf.nn.rnn_cell.LSTMCell,
"core_cell_params": {
"num_units": 1024,
"forget_bias": 1.0,
},
"decoder_layers": 8,
"decoder_dp_input_keep_prob": 0.8,
"decoder_dp_output_keep_prob": 1.0,
"decoder_use_skip_connections": True,
"GO_SYMBOL": SpecialTextTokens.S_ID.value,
"END_SYMBOL": SpecialTextTokens.EOS_ID.value,
"PAD_SYMBOL": SpecialTextTokens.PAD_ID.value,
"tgt_emb_size": 1024,
"attention_type": "gnmt_v2",
"attention_layer_size": 1024,
},
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"pad_vocab_to_eight": True,
"src_vocab_file": data_root+"m_common.vocab",
"tgt_vocab_file": data_root+"m_common.vocab",
"source_file": data_root+"wmt14-en-de.src.BPE_common.32K.tok",
"target_file": data_root+"wmt14-en-de.src.BPE_common.32K.tok",
"delimiter": " ",
"shuffle": False,
"repeat": False,
"max_length": 512,
},
}
| OpenSeq2Seq-master | example_configs/text2text/en-de/en-de-gnmt-like-4GPUs.py |
# pylint: skip-file
from __future__ import absolute_import, division, print_function
from open_seq2seq.models import Text2Text
from open_seq2seq.encoders import TransformerEncoder
from open_seq2seq.decoders import TransformerDecoder
from open_seq2seq.data.text2text.text2text import ParallelTextDataLayer
from open_seq2seq.losses import PaddedCrossEntropyLossWithSmoothing
from open_seq2seq.data.text2text.text2text import SpecialTextTokens
from open_seq2seq.data.text2text.tokenizer import EOS_ID
from open_seq2seq.optimizers.lr_policies import transformer_policy, poly_decay
import tensorflow as tf
"""
This configuration file describes a variant of Transformer model from
https://arxiv.org/abs/1706.03762
"""
base_model = Text2Text
d_model = 1024
num_layers = 6
regularizer=tf.contrib.layers.l2_regularizer # None
regularizer_params = {'scale': 0.001}
norm_params= {
"type": "batch_norm", # "layernorm_L1" , "layernorm_L2" #
"momentum":0.95,
"epsilon": 0.00001,
"center_scale": False, #True,
"regularizer":regularizer,
"regularizer_params": regularizer_params
}
attention_dropout = 0.02
dropout = 0.3
# REPLACE THIS TO THE PATH WITH YOUR WMT DATA
data_root = "[REPLACE THIS TO THE PATH WITH YOUR WMT DATA]"
base_params = {
"use_horovod": False, #True,
"num_gpus": 2, #8, # when using Horovod we set number of workers with params to mpirun
"batch_size_per_gpu": 128, # this size is in sentence pairs, reduce it if you get OOM
"max_steps": 1000000,
"save_summaries_steps": 100,
"print_loss_steps": 100,
"print_samples_steps": 10000,
"eval_steps": 10000,
"save_checkpoint_steps": 99999,
"logdir": "logs/tr-bn2-reg",
#"dtype": tf.float32, # to enable mixed precision, comment this line and uncomment two below lines
"dtype": "mixed",
"loss_scaling": "Backoff",
# "summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
# 'variable_norm', 'gradient_norm', 'global_gradient_norm'],
#"iter_size": 1,
"optimizer": tf.contrib.opt.LazyAdamOptimizer,
"optimizer_params": {
"beta1": 0.9,
"beta2": 0.997,
"epsilon": 1e-09,
},
"lr_policy": transformer_policy,
"lr_policy_params": {
"learning_rate": 2.0,
"warmup_steps": 8000,
"d_model": d_model,
},
# "optimizer": "Momentum",
# "optimizer_params": {
# "momentum": 0.95,
# },
# "lr_policy": poly_decay, # fixed_lr,
# "lr_policy_params": {
# "learning_rate": 0.1, # 0,2 for 4 GPU
# "power": 2,
# },
"larc_params": {
"larc_eta": 0.001,
},
"encoder": TransformerEncoder,
"encoder_params": {
"encoder_layers": num_layers,
"hidden_size": d_model,
"num_heads": 16,
"filter_size": 4 * d_model,
"attention_dropout": attention_dropout, # 0.1,
"relu_dropout": dropout, # 0.3,
"layer_postprocess_dropout": dropout, # 0.3,
"pad_embeddings_2_eight": True,
"remove_padding": True,
"norm_params": norm_params,
"regularizer": regularizer,
"regularizer_params": regularizer_params,
},
"decoder": TransformerDecoder,
"decoder_params": {
"num_hidden_layers": num_layers,
"hidden_size": d_model,
"num_heads": 16,
"filter_size": 4 * d_model,
"attention_dropout": attention_dropout, # 0.1,
"relu_dropout": dropout, # 0.3,
"layer_postprocess_dropout": dropout, # 0.3,
"beam_size": 4,
"alpha": 0.6,
"extra_decode_length": 50,
"EOS_ID": EOS_ID,
"GO_SYMBOL": SpecialTextTokens.S_ID.value,
"END_SYMBOL": SpecialTextTokens.EOS_ID.value,
"PAD_SYMBOL": SpecialTextTokens.PAD_ID.value,
"norm_params": norm_params,
"regularizer": regularizer,
"regularizer_params": regularizer_params,
},
"loss": PaddedCrossEntropyLossWithSmoothing,
"loss_params": {
"label_smoothing": 0.1,
}
}
train_params = {
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"pad_vocab_to_eight": True,
"src_vocab_file": data_root + "m_common.vocab",
"tgt_vocab_file": data_root + "m_common.vocab",
# "source_file": data_root + "wmt13-en-de.src.BPE_common.32K.tok",
# "target_file": data_root + "wmt13-en-de.ref.BPE_common.32K.tok",
"source_file": data_root + "train.clean.en.shuffled.BPE_common.32K.tok",
"target_file": data_root + "train.clean.de.shuffled.BPE_common.32K.tok",
"delimiter": " ",
"shuffle": True,
"shuffle_buffer_size": 4500000,
"repeat": True,
"map_parallel_calls": 16,
"max_length": 64,
},
}
eval_params = {
"batch_size_per_gpu": 16,
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"src_vocab_file": data_root+"m_common.vocab",
"tgt_vocab_file": data_root+"m_common.vocab",
"source_file": data_root+"wmt13-en-de.src.BPE_common.32K.tok",
"target_file": data_root+"wmt13-en-de.ref.BPE_common.32K.tok",
"delimiter": " ",
"shuffle": False,
"repeat": True,
"max_length": 256,
},
}
infer_params = {
"batch_size_per_gpu": 1,
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"src_vocab_file": data_root+"m_common.vocab",
"tgt_vocab_file": data_root+"m_common.vocab",
"source_file": data_root+"wmt14-en-de.src.BPE_common.32K.tok",
"target_file": data_root+"wmt14-en-de.src.BPE_common.32K.tok",
"delimiter": " ",
"shuffle": False,
"repeat": False,
"max_length": 256,
},
}
| OpenSeq2Seq-master | example_configs/text2text/en-de/transformer-bn.py |
# pylint: skip-file
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from open_seq2seq.models import Text2Text
from open_seq2seq.encoders import BidirectionalRNNEncoderWithEmbedding
from open_seq2seq.decoders import RNNDecoderWithAttention, \
BeamSearchRNNDecoderWithAttention
from open_seq2seq.data.text2text.text2text import ParallelTextDataLayer
from open_seq2seq.losses import BasicSequenceLoss
from open_seq2seq.data.text2text.text2text import SpecialTextTokens
from open_seq2seq.optimizers.lr_policies import fixed_lr
"""
This configuration file describes classic RNN-based encoder-decoder model
with attention on the toy task of reversing sequences
"""
base_model = Text2Text
base_params = {
"use_horovod": False,
#"iter_size": 10,
# set this to number of available GPUs
"num_gpus": 1,
"batch_size_per_gpu": 64,
"max_steps": 800,
"save_summaries_steps": 10,
"print_loss_steps": 10,
"print_samples_steps": 20,
"eval_steps": 50,
"save_checkpoint_steps": 300,
"logdir": "ReversalTask-RNN-RNN",
"optimizer": "Adam",
"optimizer_params": {"epsilon": 1e-4},
"lr_policy": fixed_lr,
"lr_policy_params": {
'learning_rate': 0.001
},
"max_grad_norm": 3.0,
"dtype": tf.float32,
# "dtype": "mixed",
# "loss_scaling": "Backoff",
"encoder": BidirectionalRNNEncoderWithEmbedding,
"encoder_params": {
"core_cell": tf.nn.rnn_cell.LSTMCell,
"core_cell_params": {
"num_units": 128,
"forget_bias": 1.0,
},
"encoder_layers": 1,
"encoder_dp_input_keep_prob": 0.8,
"encoder_dp_output_keep_prob": 1.0,
"encoder_use_skip_connections": False,
"src_emb_size": 128,
},
"decoder": RNNDecoderWithAttention,
"decoder_params": {
"core_cell": tf.nn.rnn_cell.LSTMCell,
"core_cell_params": {
"num_units": 128,
# "forget_bias": 1.0,
},
"decoder_layers": 1,
"decoder_dp_input_keep_prob": 0.8,
"decoder_dp_output_keep_prob": 1.0,
"decoder_use_skip_connections": False,
"GO_SYMBOL": SpecialTextTokens.S_ID.value,
"END_SYMBOL": SpecialTextTokens.EOS_ID.value,
"tgt_emb_size": 128,
"attention_type": "luong",
"luong_scale": False,
"attention_layer_size": 128,
},
"loss": BasicSequenceLoss,
"loss_params": {
"offset_target_by_one": True,
"average_across_timestep": False,
"do_mask": True
}
}
train_params = {
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"src_vocab_file": "toy_text_data/vocab/source.txt",
"tgt_vocab_file": "toy_text_data/vocab/target.txt",
"source_file": "toy_text_data/train/source.txt",
"target_file": "toy_text_data/train/target.txt",
"shuffle": True,
"repeat": True,
"max_length": 56,
"delimiter": " ",
"special_tokens_already_in_vocab": False,
},
}
eval_params = {
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"src_vocab_file": "toy_text_data/vocab/source.txt",
"tgt_vocab_file": "toy_text_data/vocab/target.txt",
"source_file": "toy_text_data/dev/source.txt",
"target_file": "toy_text_data/dev/target.txt",
"shuffle": False,
# because we evaluate many times
"repeat": True,
"max_length": 56,
"delimiter": " ",
"special_tokens_already_in_vocab": False,
},
}
infer_params = {
"batch_size_per_gpu": 1,
"decoder": BeamSearchRNNDecoderWithAttention,
"decoder_params": {
#"decoder_cell_type": "lstm",
#"decoder_cell_units": 128,
"core_cell": tf.nn.rnn_cell.LSTMCell,
"core_cell_params": {
"num_units": 128,
"forget_bias": 1.0,
},
"decoder_layers": 1,
"decoder_dp_input_keep_prob": 0.8,
"decoder_dp_output_keep_prob": 1.0,
"decoder_use_skip_connections": False,
"GO_SYMBOL": SpecialTextTokens.S_ID.value,
"END_SYMBOL": SpecialTextTokens.EOS_ID.value,
"PAD_SYMBOL": SpecialTextTokens.PAD_ID.value,
"tgt_emb_size": 128,
"attention_type": "luong",
"luong_scale": False,
"attention_layer_size": 128,
"beam_width": 10,
"length_penalty": 1.0,
},
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"src_vocab_file": "toy_text_data/vocab/source.txt",
"tgt_vocab_file": "toy_text_data/vocab/source.txt",
"source_file": "toy_text_data/test/source.txt",
"target_file": "toy_text_data/test/target.txt",
"shuffle": False,
"repeat": False,
"max_length": 256,
"delimiter": " ",
"special_tokens_already_in_vocab": False,
},
} | OpenSeq2Seq-master | example_configs/text2text/toy-reversal/nmt-reversal-RR.py |
# pylint: skip-file
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import tensorflow as tf
from open_seq2seq.models import Text2Text
from open_seq2seq.decoders import RNNDecoderWithAttention, BeamSearchRNNDecoderWithAttention
from open_seq2seq.encoders import ConvS2SEncoder
from open_seq2seq.data.text2text.text2text import ParallelTextDataLayer
from open_seq2seq.losses import BasicSequenceLoss
from open_seq2seq.data.text2text.text2text import SpecialTextTokens
from open_seq2seq.optimizers.lr_policies import fixed_lr
"""
This configuration file describes convolutional encoder and rnn decoder with attention
on the toy task of reversing sequences
"""
base_model = Text2Text
d_model = 128
num_layers = 2
base_params = {
"use_horovod": False,
"num_gpus": 1,
"batch_size_per_gpu": 64,
"max_steps": 1000,
"save_summaries_steps": 10,
"print_loss_steps": 10,
"print_samples_steps": 20,
"eval_steps": 50,
"save_checkpoint_steps": 200,
"logdir": "ReversalTask-Conv-RNN",
"optimizer": "Adam",
"optimizer_params": {"epsilon": 1e-9},
"lr_policy": fixed_lr,
"lr_policy_params": {
'learning_rate': 1e-3
},
"max_grad_norm": 3.0,
"dtype": tf.float32,
# "dtype": "mixed",
# "loss_scaling": "Backoff",
"summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"encoder": ConvS2SEncoder,
"encoder_params": {
"src_emb_size": d_model,
"att_layer_num": num_layers,
"embedding_dropout_keep_prob": 0.9,
"pad_embeddings_2_eight": True,
"hidden_dropout_keep_prob": 0.9,
"conv_nchannels_kwidth": [(d_model, 3)] * num_layers,
"max_input_length": 100,
"PAD_SYMBOL": SpecialTextTokens.PAD_ID.value,
},
"decoder": RNNDecoderWithAttention,
"decoder_params": {
"core_cell": tf.nn.rnn_cell.LSTMCell,
"core_cell_params": {
"num_units": d_model,
},
"decoder_layers": num_layers,
"decoder_dp_input_keep_prob": 0.8,
"decoder_dp_output_keep_prob": 1.0,
"decoder_use_skip_connections": False,
"GO_SYMBOL": SpecialTextTokens.S_ID.value,
"END_SYMBOL": SpecialTextTokens.EOS_ID.value,
"PAD_SYMBOL": SpecialTextTokens.PAD_ID.value,
"tgt_emb_size": d_model,
"attention_type": "luong",
"luong_scale": False,
"attention_layer_size": 128,
},
"loss": BasicSequenceLoss,
"loss_params": {
"offset_target_by_one": True,
"average_across_timestep": True,
"do_mask": True
}
}
train_params = {
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"src_vocab_file": "toy_text_data/vocab/source.txt",
"tgt_vocab_file": "toy_text_data/vocab/target.txt",
"source_file": "toy_text_data/train/source.txt",
"target_file": "toy_text_data/train/target.txt",
"shuffle": True,
"repeat": True,
"max_length": 56,
"delimiter": " ",
"special_tokens_already_in_vocab": False,
},
}
eval_params = {
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"src_vocab_file": "toy_text_data/vocab/source.txt",
"tgt_vocab_file": "toy_text_data/vocab/target.txt",
"source_file": "toy_text_data/dev/source.txt",
"target_file": "toy_text_data/dev/target.txt",
"shuffle": False,
"repeat": True,
"max_length": 56,
"delimiter": " ",
"special_tokens_already_in_vocab": False,
},
}
infer_params = {
"batch_size_per_gpu": 1,
"decoder": BeamSearchRNNDecoderWithAttention,
"decoder_params": {
"core_cell": tf.nn.rnn_cell.LSTMCell,
"core_cell_params": {
"num_units": d_model,
},
"decoder_layers": num_layers,
"decoder_dp_input_keep_prob": 0.8,
"decoder_dp_output_keep_prob": 1.0,
"decoder_use_skip_connections": False,
"GO_SYMBOL": SpecialTextTokens.S_ID.value,
"END_SYMBOL": SpecialTextTokens.EOS_ID.value,
"PAD_SYMBOL": SpecialTextTokens.PAD_ID.value,
"tgt_emb_size": d_model,
"attention_type": "luong",
"luong_scale": False,
"attention_layer_size": d_model,
"beam_width": 5,
"length_penalty": 1.0,
},
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"src_vocab_file": "toy_text_data/vocab/source.txt",
"tgt_vocab_file": "toy_text_data/vocab/source.txt",
"source_file": "toy_text_data/test/source.txt",
"target_file": "toy_text_data/test/source.txt",
"shuffle": False,
"repeat": False,
"max_length": 256,
"delimiter": " ",
"special_tokens_already_in_vocab": False,
},
}
| OpenSeq2Seq-master | example_configs/text2text/toy-reversal/nmt-reversal-CR.py |
# pylint: skip-file
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import tensorflow as tf
from open_seq2seq.models import Text2Text
from open_seq2seq.decoders import ConvS2SDecoder
from open_seq2seq.encoders import ConvS2SEncoder
from open_seq2seq.data.text2text.text2text import ParallelTextDataLayer
from open_seq2seq.losses import BasicSequenceLoss
from open_seq2seq.data.text2text.text2text import SpecialTextTokens
from open_seq2seq.data.text2text.tokenizer import EOS_ID
from open_seq2seq.optimizers.lr_policies import fixed_lr
"""
This configuration file describes fully convolutional model (ConvS2S)
on the toy task of reversing sequences
"""
base_model = Text2Text
d_model = 128
num_layers = 2
base_params = {
"use_horovod": False,
"num_gpus": 1,
"batch_size_per_gpu": 64,
"max_steps": 1000,
"save_summaries_steps": 10,
"print_loss_steps": 10,
"print_samples_steps": 20,
"eval_steps": 50,
"save_checkpoint_steps": 200,
"logdir": "ReversalTask-Conv-Conv",
"optimizer": "Adam",
"optimizer_params": {"epsilon": 1e-9},
"lr_policy": fixed_lr,
"lr_policy_params": {
'learning_rate': 1e-3
},
"max_grad_norm": 3.0,
"dtype": tf.float32,
# "dtype": "mixed",
# "loss_scaling": "Backoff",
"summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"encoder": ConvS2SEncoder,
"encoder_params": {
"src_emb_size": d_model,
"embedding_dropout_keep_prob": 0.9,
"pad_embeddings_2_eight": False,
"att_layer_num": num_layers,
"conv_nchannels_kwidth": [(d_model, 3)] * num_layers,
"hidden_dropout_keep_prob": 0.9,
"max_input_length": 100,
"PAD_SYMBOL": SpecialTextTokens.PAD_ID.value,
},
"decoder": ConvS2SDecoder,
"decoder_params": {
"shared_embed": True,
"tgt_emb_size": d_model,
"embedding_dropout_keep_prob": 0.9,
"pad_embeddings_2_eight": False,
"pos_embed": True,
"conv_nchannels_kwidth": [(d_model, 3)] * num_layers,
"hidden_dropout_keep_prob": 0.9,
"out_dropout_keep_prob": 0.9,
"max_input_length": 120,
"extra_decode_length": 10,
"beam_size": 5,
"alpha": 0.6,
"EOS_ID": EOS_ID,
"GO_SYMBOL": SpecialTextTokens.S_ID.value,
"END_SYMBOL": SpecialTextTokens.EOS_ID.value,
"PAD_SYMBOL": SpecialTextTokens.PAD_ID.value,
},
"loss": BasicSequenceLoss,
"loss_params": {
"offset_target_by_one": True,
"average_across_timestep": True,
"do_mask": True
}
}
train_params = {
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"src_vocab_file": "toy_text_data/vocab/source.txt",
"tgt_vocab_file": "toy_text_data/vocab/target.txt",
"source_file": "toy_text_data/train/source.txt",
"target_file": "toy_text_data/train/target.txt",
"shuffle": True,
"repeat": True,
"max_length": 56,
"delimiter": " ",
"special_tokens_already_in_vocab": False,
},
}
eval_params = {
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"src_vocab_file": "toy_text_data/vocab/source.txt",
"tgt_vocab_file": "toy_text_data/vocab/target.txt",
"source_file": "toy_text_data/dev/source.txt",
"target_file": "toy_text_data/dev/target.txt",
"shuffle": False,
"repeat": True,
"max_length": 56,
"delimiter": " ",
"special_tokens_already_in_vocab": False,
},
}
infer_params = {
"batch_size_per_gpu": 1,
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"src_vocab_file": "toy_text_data/vocab/source.txt",
"tgt_vocab_file": "toy_text_data/vocab/source.txt",
"source_file": "toy_text_data/test/source.txt",
# this is intentional to be sure model is not using ground truth
"target_file": "toy_text_data/test/source.txt",
"shuffle": False,
"repeat": False,
"max_length": 256,
"delimiter": " ",
"special_tokens_already_in_vocab": False,
},
}
| OpenSeq2Seq-master | example_configs/text2text/toy-reversal/nmt-reversal-CC.py |
# pylint: skip-file
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import tensorflow as tf
from open_seq2seq.models import Text2Text
from open_seq2seq.encoders import BidirectionalRNNEncoderWithEmbedding
from open_seq2seq.decoders import ConvS2SDecoder
from open_seq2seq.data.text2text.text2text import ParallelTextDataLayer
from open_seq2seq.losses import BasicSequenceLoss
from open_seq2seq.data.text2text.tokenizer import EOS_ID
from open_seq2seq.data.text2text.text2text import SpecialTextTokens
from open_seq2seq.optimizers.lr_policies import fixed_lr
"""
This configuration file describes bidirectional rnn based encoder and convolutional decoder
on the toy task of reversing sequences
"""
base_model = Text2Text
d_model = 128
num_layers = 2
base_params = {
"use_horovod": False,
"num_gpus": 1,
"batch_size_per_gpu": 64,
"max_steps": 1000,
"save_summaries_steps": 10,
"print_loss_steps": 10,
"print_samples_steps": 20,
"eval_steps": 50,
"save_checkpoint_steps": 200,
"logdir": "ReversalTask-RNN-Conv",
"optimizer": "Adam",
"optimizer_params": {"epsilon": 1e-9},
"lr_policy": fixed_lr,
"lr_policy_params": {
'learning_rate': 1e-3
},
"max_grad_norm": 3.0,
"dtype": tf.float32,
# "dtype": "mixed",
# "loss_scaling": "Backoff",
"summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"encoder": BidirectionalRNNEncoderWithEmbedding,
"encoder_params": {
"core_cell": tf.nn.rnn_cell.LSTMCell,
"core_cell_params": {
"num_units": int(d_model/2),
},
"encoder_layers": num_layers,
"encoder_dp_input_keep_prob": 0.8,
"encoder_dp_output_keep_prob": 1.0,
"encoder_use_skip_connections": False,
"src_emb_size": d_model,
},
"decoder": ConvS2SDecoder,
"decoder_params": {
"shared_embed": True,
"tgt_emb_size": d_model,
"conv_nchannels_kwidth": [(d_model, 3)] * num_layers,
"embedding_dropout_keep_prob": 0.9,
"hidden_dropout_keep_prob": 0.9,
"out_dropout_keep_prob": 0.9,
"max_input_length": 100,
"extra_decode_length": 10,
"beam_size": 5,
"alpha": 0.6,
"EOS_ID": EOS_ID,
"GO_SYMBOL": SpecialTextTokens.S_ID.value,
"END_SYMBOL": SpecialTextTokens.EOS_ID.value,
"PAD_SYMBOL": SpecialTextTokens.PAD_ID.value,
},
"loss": BasicSequenceLoss,
"loss_params": {
"offset_target_by_one": True,
"average_across_timestep": True,
"do_mask": True
}
}
train_params = {
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"src_vocab_file": "toy_text_data/vocab/source.txt",
"tgt_vocab_file": "toy_text_data/vocab/target.txt",
"source_file": "toy_text_data/train/source.txt",
"target_file": "toy_text_data/train/target.txt",
"shuffle": True,
"repeat": True,
"max_length": 56,
"delimiter": " ",
"special_tokens_already_in_vocab": False,
},
}
eval_params = {
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"src_vocab_file": "toy_text_data/vocab/source.txt",
"tgt_vocab_file": "toy_text_data/vocab/target.txt",
"source_file": "toy_text_data/dev/source.txt",
"target_file": "toy_text_data/dev/target.txt",
"shuffle": False,
"repeat": True,
"max_length": 56,
"delimiter": " ",
"special_tokens_already_in_vocab": False,
},
}
infer_params = {
"batch_size_per_gpu": 1,
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"src_vocab_file": "toy_text_data/vocab/source.txt",
"tgt_vocab_file": "toy_text_data/vocab/source.txt",
"source_file": "toy_text_data/test/source.txt",
# this is intentional to be sure model is not using ground truth
"target_file": "toy_text_data/test/source.txt",
"shuffle": False,
"repeat": False,
"max_length": 256,
"delimiter": " ",
"special_tokens_already_in_vocab": False,
},
}
| OpenSeq2Seq-master | example_configs/text2text/toy-reversal/nmt-reversal-RC.py |
# pylint: skip-file
from __future__ import absolute_import, division, print_function
from open_seq2seq.models import Text2Text
from open_seq2seq.encoders import TransformerEncoder
from open_seq2seq.decoders import TransformerDecoder
from open_seq2seq.data.text2text.text2text import ParallelTextDataLayer
from open_seq2seq.losses import PaddedCrossEntropyLossWithSmoothing
from open_seq2seq.data.text2text.text2text import SpecialTextTokens
from open_seq2seq.optimizers.lr_policies import transformer_policy
import tensorflow as tf
"""
This configuration file describes a tiny variant of Transformer model from
https://arxiv.org/abs/1706.03762 on the toy task of reversing sequences
"""
base_model = Text2Text
d_model = 128
num_layers = 2
base_params = {
"use_horovod": False,
"num_gpus": 1,
"batch_size_per_gpu": 64,
"max_steps": 800,
"save_summaries_steps": 50,
"print_loss_steps": 50,
"print_samples_steps": 50,
"eval_steps": 50,
"save_checkpoint_steps": 300,
"logdir": "ReversalTask-Transformer-Transformer",
"dtype": tf.float32,
# "dtype": "mixed",
# "loss_scaling": "Backoff",
"optimizer": tf.contrib.opt.LazyAdamOptimizer,
"optimizer_params": {
"beta1": 0.9,
"beta2": 0.997,
"epsilon": 0.000000001,
},
"lr_policy": transformer_policy,
"lr_policy_params": {
"learning_rate": 1.0,
"warmup_steps": 200,
"d_model": d_model,
},
"encoder": TransformerEncoder,
"encoder_params": {
"encoder_layers": num_layers,
"hidden_size": d_model,
"num_heads": 8,
"attention_dropout": 0.1,
"filter_size": 4*d_model,
"relu_dropout": 0.1,
"layer_postprocess_dropout": 0.1,
"remove_padding": True,
},
"decoder": TransformerDecoder,
"decoder_params": {
"layer_postprocess_dropout": 0.1,
"num_hidden_layers": num_layers,
"hidden_size": d_model,
"num_heads": 8,
"attention_dropout": 0.1,
"relu_dropout": 0.1,
"filter_size": 4*d_model,
"beam_size": 5,
"alpha": 1.0,
"extra_decode_length": 2,
"EOS_ID": SpecialTextTokens.EOS_ID.value,
"GO_SYMBOL": SpecialTextTokens.S_ID.value,
"END_SYMBOL": SpecialTextTokens.EOS_ID.value,
"PAD_SYMBOL": SpecialTextTokens.PAD_ID.value,
},
"loss": PaddedCrossEntropyLossWithSmoothing,
"loss_params": {}
}
train_params = {
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"src_vocab_file": "toy_text_data/vocab/source.txt",
"tgt_vocab_file": "toy_text_data/vocab/target.txt",
"source_file": "toy_text_data/train/source.txt",
"target_file": "toy_text_data/train/target.txt",
"shuffle": True,
"repeat": True,
"max_length": 56,
"delimiter": " ",
"special_tokens_already_in_vocab": False,
"use_start_token": False,
},
}
eval_params = {
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"src_vocab_file": "toy_text_data/vocab/source.txt",
"tgt_vocab_file": "toy_text_data/vocab/target.txt",
"source_file": "toy_text_data/dev/source.txt",
"target_file": "toy_text_data/dev/target.txt",
"shuffle": False,
# because we evaluate many times
"repeat": True,
"max_length": 56,
"delimiter": " ",
"special_tokens_already_in_vocab": False,
"use_start_token": False,
},
}
infer_params = {
"batch_size_per_gpu": 1,
"data_layer": ParallelTextDataLayer,
"data_layer_params": {
"src_vocab_file": "toy_text_data/vocab/source.txt",
"tgt_vocab_file": "toy_text_data/vocab/source.txt",
"source_file": "toy_text_data/test/source.txt",
"target_file": "toy_text_data/test/target.txt",
"shuffle": False,
"repeat": False,
"max_length": 256,
"delimiter": " ",
"special_tokens_already_in_vocab": False,
"use_start_token": False,
},
}
| OpenSeq2Seq-master | example_configs/text2text/toy-reversal/nmt-reversal-TT.py |
# pylint: skip-file
import os
import tensorflow as tf
from open_seq2seq.data import Text2SpeechDataLayer
from open_seq2seq.decoders import CentaurDecoder
from open_seq2seq.encoders import CentaurEncoder
from open_seq2seq.losses import Text2SpeechLoss
from open_seq2seq.models import Text2SpeechCentaur
from open_seq2seq.optimizers.lr_policies import poly_decay
from open_seq2seq.optimizers.novograd import NovoGrad
base_model = Text2SpeechCentaur
dataset = "LJ"
dataset_location = "/data/LJSpeech"
output_type = "both"
trim = False
exp_mag = True
mag_num_feats = 513
train = "train.csv"
valid = "test.csv"
batch_size = 32
num_audio_features = {
"mel": 80,
"magnitude": mag_num_feats
}
data_min = {
"mel": 1e-2,
"magnitude": 1e-5,
}
debug = False
num_gpus = 8 if not debug else 1
reduction_factor = 2
attention_layers = 4
encoder_hidden_size = 256
decoder_hidden_size = 512
base_params = {
"random_seed": 0,
"use_horovod": True if not debug else False,
"max_steps": 1000000,
"bench_start": 0,
"num_gpus": num_gpus,
"batch_size_per_gpu": batch_size,
"save_summaries_steps": 1000 if not debug else 10,
"print_loss_steps": 1000 if not debug else 10,
"print_samples_steps": 1000 if not debug else 10,
"eval_steps": 5000 if not debug else 50,
"save_checkpoint_steps": 5000,
"save_to_tensorboard": True,
"logdir": "result/centaur-float",
"max_grad_norm": 1.,
"optimizer": NovoGrad,
"optimizer_params": {
"beta1": 0.95,
"beta2": 0.98,
"epsilon": 1e-08,
"weight_decay": 0.001,
},
"lr_policy": poly_decay,
"lr_policy_params": {
"learning_rate": 0.02,
"power": 2.0,
},
"dtype": tf.float32,
"initializer": tf.contrib.layers.xavier_initializer,
"summaries": ["learning_rate", "variables", "gradients", "larc_summaries",
"variable_norm", "gradient_norm", "global_gradient_norm"],
"encoder": CentaurEncoder,
"encoder_params": {
"src_vocab_size": 94,
"embedding_size": encoder_hidden_size,
"output_size": encoder_hidden_size,
"pad_embeddings_2_eight": True,
"cnn_dropout_prob": 0.1,
"conv_layers": [
{
"kernel_size": [3], "stride": [1],
"num_channels": encoder_hidden_size, "padding": "SAME",
"activation_fn": tf.nn.relu
},
{
"kernel_size": [3], "stride": [1],
"num_channels": encoder_hidden_size, "padding": "SAME",
"activation_fn": tf.nn.relu
},
{
"kernel_size": [3], "stride": [1],
"num_channels": encoder_hidden_size, "padding": "SAME",
"activation_fn": tf.nn.relu
},
{
"kernel_size": [3], "stride": [1],
"num_channels": encoder_hidden_size, "padding": "SAME",
"activation_fn": tf.nn.relu
}
]
},
"decoder": CentaurDecoder,
"decoder_params": {
"attention_layers": attention_layers,
"self_attention_conv_params": {
"kernel_size": [5],
"stride": [1],
"num_channels": decoder_hidden_size,
"padding": "VALID",
"is_causal": True,
"activation_fn": tf.nn.relu
},
"window_size": 4,
"back_step_size": 0,
"force_layers": [1, 3],
"hidden_size": decoder_hidden_size,
"reduction_factor": reduction_factor,
"prenet_layers": 2,
"prenet_hidden_size": decoder_hidden_size,
"prenet_use_inference_dropout": False,
"cnn_dropout_prob": 0.1,
"prenet_dropout": 0.5,
"conv_layers":
[
{
"kernel_size": [5],
"stride": [1],
"num_channels": decoder_hidden_size,
"padding": "VALID",
"is_causal": True,
"activation_fn": tf.nn.relu
}
] * 4,
"mag_conv_layers":
[
{
"kernel_size": [5],
"stride": [1],
"num_channels": decoder_hidden_size,
"padding": "VALID",
"is_causal": True,
"activation_fn": tf.nn.relu
}
] * 4,
"attention_dropout": 0.1,
"layer_postprocess_dropout": 0.1
},
"loss": Text2SpeechLoss,
"loss_params": {
"use_mask": True,
"l1_norm": True
},
"data_layer": Text2SpeechDataLayer,
"data_layer_params": {
"dataset": dataset,
"use_cache": True,
"num_audio_features": num_audio_features,
"output_type": output_type,
"vocab_file": "open_seq2seq/test_utils/vocab_tts.txt",
"dataset_location": dataset_location,
"mag_power": 1,
"pad_EOS": True,
"feature_normalize": False,
"feature_normalize_mean": 0.,
"feature_normalize_std": 1.,
"data_min": data_min,
"mel_type": "htk",
"trim": trim,
"duration_max": 1024,
"duration_min": 24,
"exp_mag": exp_mag
},
}
train_params = {
"data_layer_params": {
"dataset_files": [
os.path.join(dataset_location, train),
],
"shuffle": True,
},
}
eval_params = {
"data_layer_params": {
"dataset_files": [
os.path.join(dataset_location, valid),
],
"duration_max": 1000,
"duration_min": 0,
"shuffle": False,
},
}
infer_params = {
"data_layer_params": {
"dataset_files": [
os.path.join(dataset_location, "infer.csv"),
],
"duration_max": 1000,
"duration_min": 0,
"shuffle": False,
},
}
interactive_infer_params = {
"data_layer_params": {
"dataset_files": [],
"duration_max": 1000,
"duration_min": 0,
"shuffle": False,
},
}
| OpenSeq2Seq-master | example_configs/text2speech/centaur_float.py |
# pylint: skip-file
import tensorflow as tf
from open_seq2seq.models import Text2SpeechWavenet
from open_seq2seq.encoders import WavenetEncoder
from open_seq2seq.decoders import FakeDecoder
from open_seq2seq.losses import WavenetLoss
from open_seq2seq.data import WavenetDataLayer
from open_seq2seq.optimizers.lr_policies import exp_decay
from open_seq2seq.parts.convs2s.utils import gated_linear_units
base_model = Text2SpeechWavenet
base_params = {
"random_seed": 0,
"use_horovod": False,
"max_steps": 1000000,
"num_gpus": 1,
"batch_size_per_gpu": 4,
"save_summaries_steps": 50,
"print_loss_steps": 50,
"print_samples_steps": 500,
"eval_steps": 500,
"save_checkpoint_steps": 2500,
"logdir": "result/wavenet-LJ-mixed",
"optimizer": "Adam",
"optimizer_params": {},
"lr_policy": exp_decay,
"lr_policy_params": {
"learning_rate": 1e-3,
"decay_steps": 20000,
"decay_rate": 0.1,
"use_staircase_decay": False,
"begin_decay_at": 45000,
"min_lr": 1e-5,
},
"dtype": "mixed",
"loss_scaling": "Backoff",
"regularizer": tf.contrib.layers.l2_regularizer,
"regularizer_params": {
"scale": 1e-6
},
"initializer": tf.contrib.layers.xavier_initializer,
"summaries": [],
"encoder": WavenetEncoder,
"encoder_params": {
"layer_type": "conv1d",
"kernel_size": 3,
"strides": 1,
"padding": "VALID",
"blocks": 3,
"layers_per_block": 10,
"filters": 64,
"quantization_channels": 256
},
"decoder": FakeDecoder,
"loss": WavenetLoss,
"data_layer": WavenetDataLayer,
"data_layer_params": {
"num_audio_features": 80,
"dataset_location": "data/speech/LJSpeech/wavs/"
}
}
train_params = {
"data_layer_params": {
"dataset_files": [
"data/speech/LJSpeech/train.csv",
],
"shuffle": True,
},
}
eval_params = {
"data_layer_params": {
"dataset_files": [
"data/speech/LJSpeech/val.csv",
],
"shuffle": False,
},
}
infer_params = {
"data_layer_params": {
"dataset_files": [
"data/speech/LJSpeech/test.csv",
],
"shuffle": False,
},
}
interactive_infer_params = {
"data_layer_params": {
"dataset_files": [],
"shuffle": False,
},
}
| OpenSeq2Seq-master | example_configs/text2speech/wavenet_mixed.py |
# pylint: skip-file
import os
import tensorflow as tf
from open_seq2seq.models import Text2SpeechTacotron
from open_seq2seq.encoders import Tacotron2Encoder
from open_seq2seq.decoders import Tacotron2Decoder
from open_seq2seq.data import Text2SpeechDataLayer
from open_seq2seq.losses import Text2SpeechLoss
from open_seq2seq.optimizers.lr_policies import fixed_lr, transformer_policy, exp_decay
base_model = Text2SpeechTacotron
dataset = "LJ"
dataset_location = "/data/speech/LJSpeech"
output_type = "both"
if dataset == "MAILABS":
trim = True
mag_num_feats = 401
train = "train.csv"
val = "val.csv"
batch_size = 32
elif dataset == "LJ":
trim = False
mag_num_feats = 513
train = "train_32.csv"
val = "val_32.csv"
batch_size = 48
else:
raise ValueError("Unknown dataset")
exp_mag = False
if output_type == "magnitude":
num_audio_features = mag_num_feats
data_min = 1e-5
elif output_type == "mel":
num_audio_features = 80
data_min = 1e-2
elif output_type == "both":
num_audio_features = {
"mel": 80,
"magnitude": mag_num_feats
}
data_min = {
"mel": 1e-2,
"magnitude": 1e-5,
}
exp_mag = True
else:
raise ValueError("Unknown param for output_type")
base_params = {
"random_seed": 0,
"use_horovod": True,
"max_steps": 40000,
"batch_size_per_gpu": batch_size,
"save_summaries_steps": 50,
"print_loss_steps": 50,
"print_samples_steps": 500,
"eval_steps": 500,
"save_checkpoint_steps": 2500,
"save_to_tensorboard": True,
"logdir": "result/tacotron-LJ-float-8gpu",
"max_grad_norm":1.,
"optimizer": "Adam",
"optimizer_params": {},
"lr_policy": exp_decay,
"lr_policy_params": {
"learning_rate": 1e-3,
"decay_steps": 10000,
"decay_rate": 0.1,
"use_staircase_decay": False,
"begin_decay_at": 20000,
"min_lr": 1e-5,
},
"dtype": tf.float32,
"regularizer": tf.contrib.layers.l2_regularizer,
"regularizer_params": {
'scale': 1e-6
},
"initializer": tf.contrib.layers.xavier_initializer,
"summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"encoder": Tacotron2Encoder,
"encoder_params": {
"cnn_dropout_prob": 0.5,
"rnn_dropout_prob": 0.,
'src_emb_size': 512,
"conv_layers": [
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME"
},
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME"
},
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME"
}
],
"activation_fn": tf.nn.relu,
"num_rnn_layers": 1,
"rnn_cell_dim": 256,
"rnn_unidirectional": False,
"use_cudnn_rnn": True,
"rnn_type": tf.contrib.cudnn_rnn.CudnnLSTM,
"zoneout_prob": 0.,
"data_format": "channels_last",
},
"decoder": Tacotron2Decoder,
"decoder_params": {
"zoneout_prob": 0.,
"dropout_prob": 0.1,
'attention_type': 'location',
'attention_layer_size': 128,
'attention_bias': True,
'decoder_cell_units': 1024,
'decoder_cell_type': tf.nn.rnn_cell.LSTMCell,
'decoder_layers': 2,
'enable_prenet': True,
'prenet_layers': 2,
'prenet_units': 256,
'enable_postnet': True,
"postnet_keep_dropout_prob": 0.5,
"postnet_data_format": "channels_last",
"postnet_conv_layers": [
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME",
"activation_fn": tf.nn.tanh
},
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME",
"activation_fn": tf.nn.tanh
},
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME",
"activation_fn": tf.nn.tanh
},
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME",
"activation_fn": tf.nn.tanh
},
{
"kernel_size": [5], "stride": [1],
"num_channels": -1, "padding": "SAME",
"activation_fn": None
}
],
"mask_decoder_sequence": True,
"parallel_iterations": 32,
},
"loss": Text2SpeechLoss,
"loss_params": {
"use_mask": True
},
"data_layer": Text2SpeechDataLayer,
"data_layer_params": {
"dataset": dataset,
"num_audio_features": num_audio_features,
"output_type": output_type,
"vocab_file": "open_seq2seq/test_utils/vocab_tts.txt",
'dataset_location':dataset_location,
"mag_power": 1,
"pad_EOS": True,
"feature_normalize": False,
"feature_normalize_mean": 0.,
"feature_normalize_std": 1.,
"data_min":data_min,
"mel_type":'htk',
"trim": trim,
"duration_max":1024,
"duration_min":24,
"exp_mag": exp_mag
},
}
train_params = {
"data_layer_params": {
"dataset_files": [
os.path.join(dataset_location, train),
],
"shuffle": True,
},
}
eval_params = {
"data_layer_params": {
"dataset_files": [
os.path.join(dataset_location, val),
],
"duration_max":10000,
"duration_min":0,
"shuffle": False,
},
}
infer_params = {
"data_layer_params": {
"dataset_files": [
os.path.join(dataset_location, "test.csv"),
],
"duration_max":10000,
"duration_min":0,
"shuffle": False,
},
} | OpenSeq2Seq-master | example_configs/text2speech/tacotron_float_8gpu.py |
# pylint: skip-file
import tensorflow as tf
from open_seq2seq.models import Text2SpeechWavenet
from open_seq2seq.encoders import WavenetEncoder
from open_seq2seq.decoders import FakeDecoder
from open_seq2seq.losses import WavenetLoss
from open_seq2seq.data import WavenetDataLayer
from open_seq2seq.optimizers.lr_policies import exp_decay
from open_seq2seq.parts.convs2s.utils import gated_linear_units
base_model = Text2SpeechWavenet
base_params = {
"random_seed": 0,
"use_horovod": True,
"max_steps": 1000000,
"num_gpus": 8,
"batch_size_per_gpu": 1,
"save_summaries_steps": 50,
"print_loss_steps": 50,
"print_samples_steps": 500,
"eval_steps": 500,
"save_checkpoint_steps": 2500,
"logdir": "result/wavenet-LJ-float",
"optimizer": "Adam",
"optimizer_params": {},
"lr_policy": exp_decay,
"lr_policy_params": {
"learning_rate": 1e-3,
"decay_steps": 20000,
"decay_rate": 0.1,
"use_staircase_decay": False,
"begin_decay_at": 45000,
"min_lr": 1e-5,
},
"dtype": tf.float32,
"regularizer": tf.contrib.layers.l2_regularizer,
"regularizer_params": {
"scale": 1e-6
},
"initializer": tf.contrib.layers.xavier_initializer,
"summaries": [],
"encoder": WavenetEncoder,
"encoder_params": {
"layer_type": "conv1d",
"kernel_size": 3,
"strides": 1,
"padding": "VALID",
"blocks": 3,
"layers_per_block": 10,
"filters": 64,
"quantization_channels": 256
},
"decoder": FakeDecoder,
"loss": WavenetLoss,
"data_layer": WavenetDataLayer,
"data_layer_params": {
"num_audio_features": 80,
"dataset_location": "/data/LJSpeech-1.1-partitioned/wavs/"
}
}
train_params = {
"data_layer_params": {
"dataset_files": [
"/data/LJSpeech-1.1-partitioned/train.csv",
],
"shuffle": True,
},
}
eval_params = {
"data_layer_params": {
"dataset_files": [
"/data/LJSpeech-1.1-partitioned/val.csv",
],
"shuffle": False,
},
}
infer_params = {
"data_layer_params": {
"dataset_files": [
"/data/LJSpeech-1.1-partitioned/test.csv",
],
"shuffle": False,
},
}
interactive_infer_params = {
"data_layer_params": {
"dataset_files": [],
"shuffle": False,
},
}
| OpenSeq2Seq-master | example_configs/text2speech/wavenet_float_8gpu.py |
# pylint: skip-file
import os
import tensorflow as tf
from open_seq2seq.models import Text2SpeechTacotron
from open_seq2seq.encoders import Tacotron2Encoder
from open_seq2seq.decoders import Tacotron2Decoder
from open_seq2seq.data import Text2SpeechDataLayer
from open_seq2seq.losses import Text2SpeechLoss
from open_seq2seq.optimizers.lr_policies import fixed_lr, transformer_policy, exp_decay
base_model = Text2SpeechTacotron
dataset = "LJ"
dataset_location = "/data/speech/LJSpeech"
output_type = "both"
if dataset == "MAILABS":
trim = True
mag_num_feats = 401
train = "train.csv"
val = "val.csv"
batch_size = 80
elif dataset == "LJ":
trim = False
mag_num_feats = 513
train = "train_32.csv"
val = "val_32.csv"
batch_size = 96
else:
raise ValueError("Unknown dataset")
exp_mag = False
if output_type == "magnitude":
num_audio_features = mag_num_feats
data_min = 1e-5
elif output_type == "mel":
num_audio_features = 80
data_min = 1e-2
elif output_type == "both":
num_audio_features = {
"mel": 80,
"magnitude": mag_num_feats
}
data_min = {
"mel": 1e-2,
"magnitude": 1e-5,
}
exp_mag = True
else:
raise ValueError("Unknown param for output_type")
base_params = {
"random_seed": 0,
"use_horovod": False,
"max_steps": 100000,
"num_gpus": 1,
"batch_size_per_gpu": batch_size,
"save_summaries_steps": 50,
"print_loss_steps": 50,
"print_samples_steps": 500,
"eval_steps": 500,
"save_checkpoint_steps": 2500,
"save_to_tensorboard": True,
"logdir": "result/tacotron-LJ-mixed",
"max_grad_norm":1.,
"optimizer": "Adam",
"optimizer_params": {},
"lr_policy": exp_decay,
"lr_policy_params": {
"learning_rate": 1e-3,
"decay_steps": 20000,
"decay_rate": 0.1,
"use_staircase_decay": False,
"begin_decay_at": 45000,
"min_lr": 1e-5,
},
"dtype": "mixed",
"loss_scaling": "Backoff",
"regularizer": tf.contrib.layers.l2_regularizer,
"regularizer_params": {
'scale': 1e-6
},
"initializer": tf.contrib.layers.xavier_initializer,
"summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"encoder": Tacotron2Encoder,
"encoder_params": {
"cnn_dropout_prob": 0.5,
"rnn_dropout_prob": 0.,
'src_emb_size': 512,
"conv_layers": [
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME"
},
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME"
},
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME"
}
],
"activation_fn": tf.nn.relu,
"num_rnn_layers": 1,
"rnn_cell_dim": 256,
"rnn_unidirectional": False,
"use_cudnn_rnn": True,
"rnn_type": tf.contrib.cudnn_rnn.CudnnLSTM,
"zoneout_prob": 0.,
"data_format": "channels_last",
},
"decoder": Tacotron2Decoder,
"decoder_params": {
"zoneout_prob": 0.,
"dropout_prob": 0.1,
'attention_type': 'location',
'attention_layer_size': 128,
'attention_bias': True,
'decoder_cell_units': 1024,
'decoder_cell_type': tf.nn.rnn_cell.LSTMCell,
'decoder_layers': 2,
'enable_prenet': True,
'prenet_layers': 2,
'prenet_units': 256,
'enable_postnet': True,
"postnet_keep_dropout_prob": 0.5,
"postnet_data_format": "channels_last",
"postnet_conv_layers": [
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME",
"activation_fn": tf.nn.tanh
},
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME",
"activation_fn": tf.nn.tanh
},
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME",
"activation_fn": tf.nn.tanh
},
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME",
"activation_fn": tf.nn.tanh
},
{
"kernel_size": [5], "stride": [1],
"num_channels": -1, "padding": "SAME",
"activation_fn": None
}
],
"mask_decoder_sequence": True,
"parallel_iterations": 32,
},
"loss": Text2SpeechLoss,
"loss_params": {
"use_mask": True
},
"data_layer": Text2SpeechDataLayer,
"data_layer_params": {
"dataset": dataset,
"num_audio_features": num_audio_features,
"output_type": output_type,
"vocab_file": "open_seq2seq/test_utils/vocab_tts.txt",
'dataset_location':dataset_location,
"mag_power": 1,
"pad_EOS": True,
"feature_normalize": False,
"feature_normalize_mean": 0.,
"feature_normalize_std": 1.,
"data_min":data_min,
"mel_type":'htk',
"trim": trim,
"duration_max":1024,
"duration_min":24,
"exp_mag": exp_mag
},
}
train_params = {
"data_layer_params": {
"dataset_files": [
os.path.join(dataset_location, train),
],
"shuffle": True,
},
}
eval_params = {
"data_layer_params": {
"dataset_files": [
os.path.join(dataset_location, val),
],
"duration_max":10000,
"duration_min":0,
"shuffle": False,
},
}
infer_params = {
"data_layer_params": {
"dataset_files": [
os.path.join(dataset_location, "test.csv"),
],
"duration_max":10000,
"duration_min":0,
"shuffle": False,
},
}
interactive_infer_params = {
"data_layer_params": {
"dataset_files": [],
"duration_max":10000,
"duration_min":0,
"shuffle": False,
},
}
| OpenSeq2Seq-master | example_configs/text2speech/tacotron_mixed.py |
# pylint: skip-file
import os
import tensorflow as tf
from open_seq2seq.models import Text2SpeechTacotron
from open_seq2seq.encoders import Tacotron2Encoder
from open_seq2seq.decoders import Tacotron2Decoder
from open_seq2seq.data import Text2SpeechDataLayer
from open_seq2seq.losses import Text2SpeechLoss
from open_seq2seq.optimizers.lr_policies import fixed_lr, transformer_policy, exp_decay
base_model = Text2SpeechTacotron
dataset = "LJ"
dataset_location = "/data/speech/LJSpeech"
output_type = "both"
if dataset == "MAILABS":
trim = True
mag_num_feats = 401
train = "train.csv"
val = "val.csv"
batch_size = 80
elif dataset == "LJ":
trim = False
mag_num_feats = 513
train = "train_32.csv"
val = "val_32.csv"
batch_size = 96
else:
raise ValueError("Unknown dataset")
exp_mag = False
if output_type == "magnitude":
num_audio_features = mag_num_feats
data_min = 1e-5
elif output_type == "mel":
num_audio_features = 80
data_min = 1e-2
elif output_type == "both":
num_audio_features = {
"mel": 80,
"magnitude": mag_num_feats
}
data_min = {
"mel": 1e-2,
"magnitude": 1e-5,
}
exp_mag = True
else:
raise ValueError("Unknown param for output_type")
base_params = {
"random_seed": 0,
"use_horovod": True,
"max_steps": 40000,
"batch_size_per_gpu": batch_size,
"save_summaries_steps": 50,
"print_loss_steps": 50,
"print_samples_steps": 500,
"eval_steps": 500,
"save_checkpoint_steps": 2500,
"save_to_tensorboard": True,
"logdir": "result/tacotron-LJ-mixed-8gpu",
"max_grad_norm":1.,
"optimizer": "Adam",
"optimizer_params": {},
"lr_policy": exp_decay,
"lr_policy_params": {
"learning_rate": 1e-3,
"decay_steps": 10000,
"decay_rate": 0.1,
"use_staircase_decay": False,
"begin_decay_at": 20000,
"min_lr": 1e-5,
},
"dtype": "mixed",
"loss_scaling": "Backoff",
"regularizer": tf.contrib.layers.l2_regularizer,
"regularizer_params": {
'scale': 1e-6
},
"initializer": tf.contrib.layers.xavier_initializer,
"summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"encoder": Tacotron2Encoder,
"encoder_params": {
"cnn_dropout_prob": 0.5,
"rnn_dropout_prob": 0.,
'src_emb_size': 512,
"conv_layers": [
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME"
},
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME"
},
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME"
}
],
"activation_fn": tf.nn.relu,
"num_rnn_layers": 1,
"rnn_cell_dim": 256,
"rnn_unidirectional": False,
"use_cudnn_rnn": True,
"rnn_type": tf.contrib.cudnn_rnn.CudnnLSTM,
"zoneout_prob": 0.,
"data_format": "channels_last",
},
"decoder": Tacotron2Decoder,
"decoder_params": {
"zoneout_prob": 0.,
"dropout_prob": 0.1,
'attention_type': 'location',
'attention_layer_size': 128,
'attention_bias': True,
'decoder_cell_units': 1024,
'decoder_cell_type': tf.nn.rnn_cell.LSTMCell,
'decoder_layers': 2,
'enable_prenet': True,
'prenet_layers': 2,
'prenet_units': 256,
'enable_postnet': True,
"postnet_keep_dropout_prob": 0.5,
"postnet_data_format": "channels_last",
"postnet_conv_layers": [
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME",
"activation_fn": tf.nn.tanh
},
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME",
"activation_fn": tf.nn.tanh
},
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME",
"activation_fn": tf.nn.tanh
},
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME",
"activation_fn": tf.nn.tanh
},
{
"kernel_size": [5], "stride": [1],
"num_channels": -1, "padding": "SAME",
"activation_fn": None
}
],
"mask_decoder_sequence": True,
"parallel_iterations": 32,
},
"loss": Text2SpeechLoss,
"loss_params": {
"use_mask": True
},
"data_layer": Text2SpeechDataLayer,
"data_layer_params": {
"dataset": dataset,
"num_audio_features": num_audio_features,
"output_type": output_type,
"vocab_file": "open_seq2seq/test_utils/vocab_tts.txt",
'dataset_location':dataset_location,
"mag_power": 1,
"pad_EOS": True,
"feature_normalize": False,
"feature_normalize_mean": 0.,
"feature_normalize_std": 1.,
"data_min":data_min,
"mel_type":'htk',
"trim": trim,
"duration_max":1024,
"duration_min":24,
"exp_mag": exp_mag
},
}
train_params = {
"data_layer_params": {
"dataset_files": [
os.path.join(dataset_location, train),
],
"shuffle": True,
},
}
eval_params = {
"data_layer_params": {
"dataset_files": [
os.path.join(dataset_location, val),
],
"duration_max":10000,
"duration_min":0,
"shuffle": False,
},
}
infer_params = {
"data_layer_params": {
"dataset_files": [
os.path.join(dataset_location, "test.csv"),
],
"duration_max":10000,
"duration_min":0,
"shuffle": False,
},
}
| OpenSeq2Seq-master | example_configs/text2speech/tacotron_mixed_8gpu.py |
# pylint: skip-file
import tensorflow as tf
from open_seq2seq.models import Text2SpeechWavenet
from open_seq2seq.encoders import WavenetEncoder
from open_seq2seq.decoders import FakeDecoder
from open_seq2seq.losses import WavenetLoss
from open_seq2seq.data import WavenetDataLayer
from open_seq2seq.optimizers.lr_policies import exp_decay
from open_seq2seq.parts.convs2s.utils import gated_linear_units
base_model = Text2SpeechWavenet
base_params = {
"random_seed": 0,
"use_horovod": True,
"max_steps": 1000000,
"num_gpus": 8,
"batch_size_per_gpu": 2,
"save_summaries_steps": 50,
"print_loss_steps": 50,
"print_samples_steps": 500,
"eval_steps": 500,
"save_checkpoint_steps": 2500,
"logdir": "result/wavenet-LJ-mixed",
"optimizer": "Adam",
"optimizer_params": {},
"lr_policy": exp_decay,
"lr_policy_params": {
"learning_rate": 1e-3,
"decay_steps": 20000,
"decay_rate": 0.1,
"use_staircase_decay": False,
"begin_decay_at": 45000,
"min_lr": 1e-5,
},
"dtype": "mixed",
"loss_scaling": "Backoff",
"regularizer": tf.contrib.layers.l2_regularizer,
"regularizer_params": {
"scale": 1e-6
},
"initializer": tf.contrib.layers.xavier_initializer,
"summaries": [],
"encoder": WavenetEncoder,
"encoder_params": {
"layer_type": "conv1d",
"kernel_size": 3,
"strides": 1,
"padding": "VALID",
"blocks": 3,
"layers_per_block": 10,
"filters": 64,
"quantization_channels": 256
},
"decoder": FakeDecoder,
"loss": WavenetLoss,
"data_layer": WavenetDataLayer,
"data_layer_params": {
"num_audio_features": 80,
"dataset_location": "/data/LJSpeech-1.1-partitioned/wavs/"
}
}
train_params = {
"data_layer_params": {
"dataset_files": [
"/data/LJSpeech-1.1-partitioned/train.csv",
],
"shuffle": True,
},
}
eval_params = {
"data_layer_params": {
"dataset_files": [
"/data/LJSpeech-1.1-partitioned/val.csv",
],
"shuffle": False,
},
}
infer_params = {
"data_layer_params": {
"dataset_files": [
"/data/LJSpeech-1.1-partitioned/test.csv",
],
"shuffle": False,
},
}
interactive_infer_params = {
"data_layer_params": {
"dataset_files": [],
"shuffle": False,
},
}
| OpenSeq2Seq-master | example_configs/text2speech/wavenet_mixed_8gpu.py |
# pylint: skip-file
import os
import tensorflow as tf
from open_seq2seq.models import Text2SpeechTacotron
from open_seq2seq.encoders import Tacotron2Encoder
from open_seq2seq.decoders import Tacotron2Decoder
from open_seq2seq.data import Text2SpeechDataLayer
from open_seq2seq.losses import Text2SpeechLoss
from open_seq2seq.optimizers.lr_policies import fixed_lr, transformer_policy, exp_decay
base_model = Text2SpeechTacotron
dataset = "MAILABS"
dataset_location = DL_REPLACE
output_type = "both"
if dataset == "MAILABS":
trim = True
mag_num_feats = 401
train = "train.csv"
val = "val.csv"
batch_size = 32
elif dataset == "LJ":
trim = False
mag_num_feats = 513
train = "train_32.csv"
val = "val_32.csv"
batch_size = 48
else:
raise ValueError("Unknown dataset")
exp_mag = False
if output_type == "magnitude":
num_audio_features = mag_num_feats
data_min = 1e-5
elif output_type == "mel":
num_audio_features = 80
data_min = 1e-2
elif output_type == "both":
num_audio_features = {
"mel": 80,
"magnitude": mag_num_feats
}
data_min = {
"mel": 1e-2,
"magnitude": 1e-5,
}
exp_mag = True
else:
raise ValueError("Unknown param for output_type")
base_params = {
"random_seed": 0,
"use_horovod": False,
"num_gpus": 2,
"num_epochs": 25,
"batch_size_per_gpu": batch_size,
"save_summaries_steps": 50,
"print_loss_steps": 50,
"print_samples_steps": 500,
"eval_steps": 500,
"save_checkpoint_steps": 2500,
"save_to_tensorboard": True,
"logdir": "result/tacotron-gst-8gpu",
"max_grad_norm":1.,
"optimizer": "Adam",
"optimizer_params": {},
"lr_policy": exp_decay,
"lr_policy_params": {
"learning_rate": 1e-3,
"decay_steps": 10000,
"decay_rate": 0.1,
"use_staircase_decay": False,
"begin_decay_at": 20000,
"min_lr": 1e-5,
},
"dtype": tf.float32,
"regularizer": tf.contrib.layers.l2_regularizer,
"regularizer_params": {
'scale': 1e-6
},
"initializer": tf.contrib.layers.xavier_initializer,
"summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"encoder": Tacotron2Encoder,
"encoder_params": {
"cnn_dropout_prob": 0.5,
"rnn_dropout_prob": 0.,
'src_emb_size': 512,
"conv_layers": [
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME"
},
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME"
},
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME"
}
],
"activation_fn": tf.nn.relu,
"num_rnn_layers": 1,
"rnn_cell_dim": 256,
"rnn_unidirectional": False,
"use_cudnn_rnn": True,
"rnn_type": tf.contrib.cudnn_rnn.CudnnLSTM,
"zoneout_prob": 0.,
"data_format": "channels_last",
"style_embedding_enable": True,
"style_embedding_params": {
"conv_layers": [
{
"kernel_size": [3,3], "stride": [2,2],
"num_channels": 32, "padding": "SAME"
},
{
"kernel_size": [3,3], "stride": [2,2],
"num_channels": 32, "padding": "SAME"
},
{
"kernel_size": [3,3], "stride": [2,2],
"num_channels": 64, "padding": "SAME"
},
{
"kernel_size": [3,3], "stride": [2,2],
"num_channels": 64, "padding": "SAME"
},
{
"kernel_size": [3,3], "stride": [2,2],
"num_channels": 128, "padding": "SAME"
},
{
"kernel_size": [3,3], "stride": [2,2],
"num_channels": 128, "padding": "SAME"
}
],
"num_rnn_layers": 1,
"rnn_cell_dim": 128,
"rnn_unidirectional": True,
"rnn_type": tf.nn.rnn_cell.GRUCell,
"emb_size": 512,
'attention_layer_size': 512,
"num_tokens": 32,
"num_heads": 8
}
},
"decoder": Tacotron2Decoder,
"decoder_params": {
"zoneout_prob": 0.,
"dropout_prob": 0.1,
'attention_type': 'location',
'attention_layer_size': 128,
'attention_bias': True,
'decoder_cell_units': 1024,
'decoder_cell_type': tf.nn.rnn_cell.LSTMCell,
'decoder_layers': 2,
'enable_prenet': True,
'prenet_layers': 2,
'prenet_units': 256,
'enable_postnet': True,
"postnet_keep_dropout_prob": 0.5,
"postnet_data_format": "channels_last",
"postnet_conv_layers": [
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME",
"activation_fn": tf.nn.tanh
},
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME",
"activation_fn": tf.nn.tanh
},
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME",
"activation_fn": tf.nn.tanh
},
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME",
"activation_fn": tf.nn.tanh
},
{
"kernel_size": [5], "stride": [1],
"num_channels": -1, "padding": "SAME",
"activation_fn": None
}
],
"mask_decoder_sequence": True,
"parallel_iterations": 32,
},
"loss": Text2SpeechLoss,
"loss_params": {
"use_mask": True
},
"data_layer": Text2SpeechDataLayer,
"data_layer_params": {
"dataset": dataset,
"num_audio_features": num_audio_features,
"output_type": output_type,
"vocab_file": "open_seq2seq/test_utils/vocab_tts.txt",
'dataset_location':dataset_location,
"mag_power": 1,
"pad_EOS": True,
"feature_normalize": False,
"feature_normalize_mean": 0.,
"feature_normalize_std": 1.,
"data_min":data_min,
"mel_type":'htk',
"trim": trim,
"duration_max":1024,
"duration_min":24,
"exp_mag": exp_mag
},
}
train_params = {
"data_layer_params": {
"dataset_files": [
os.path.join(dataset_location, train),
],
"shuffle": True,
"style_input": "wav"
},
}
eval_params = {
"data_layer_params": {
"dataset_files": [
os.path.join(dataset_location, val),
],
"duration_max":10000,
"duration_min":0,
"shuffle": False,
"style_input": "wav"
},
}
infer_params = {
"data_layer_params": {
"dataset_files": ["generate.csv"],
"duration_max":10000,
"duration_min":0,
"shuffle": False,
"style_input": "wav"
},
} | OpenSeq2Seq-master | example_configs/text2speech/tacotron_gst.py |
# pylint: skip-file
import os
import tensorflow as tf
from open_seq2seq.models import Text2SpeechTacotron
from open_seq2seq.encoders import Tacotron2Encoder
from open_seq2seq.decoders import Tacotron2Decoder
from open_seq2seq.data import Text2SpeechDataLayer
from open_seq2seq.losses import Text2SpeechLoss
from open_seq2seq.optimizers.lr_policies import fixed_lr, transformer_policy, exp_decay
base_model = Text2SpeechTacotron
dataset = "LJ"
dataset_location = "/data/speech/LJSpeech"
output_type = "both"
if dataset == "MAILABS":
trim = True
mag_num_feats = 401
train = "train.csv"
val = "val.csv"
batch_size = 32
elif dataset == "LJ":
trim = False
mag_num_feats = 513
train = "train_32.csv"
val = "val_32.csv"
batch_size = 48
else:
raise ValueError("Unknown dataset")
exp_mag = False
if output_type == "magnitude":
num_audio_features = mag_num_feats
data_min = 1e-5
elif output_type == "mel":
num_audio_features = 80
data_min = 1e-2
elif output_type == "both":
num_audio_features = {
"mel": 80,
"magnitude": mag_num_feats
}
data_min = {
"mel": 1e-2,
"magnitude": 1e-5,
}
exp_mag = True
else:
raise ValueError("Unknown param for output_type")
base_params = {
"random_seed": 0,
"use_horovod": False,
"max_steps": 100000,
"num_gpus": 1,
"batch_size_per_gpu": batch_size,
"save_summaries_steps": 50,
"print_loss_steps": 50,
"print_samples_steps": 500,
"eval_steps": 500,
"save_checkpoint_steps": 2500,
"save_to_tensorboard": True,
"logdir": "result/tacotron-LJ-float",
"max_grad_norm":1.,
"optimizer": "Adam",
"optimizer_params": {},
"lr_policy": exp_decay,
"lr_policy_params": {
"learning_rate": 1e-3,
"decay_steps": 20000,
"decay_rate": 0.1,
"use_staircase_decay": False,
"begin_decay_at": 45000,
"min_lr": 1e-5,
},
"dtype": tf.float32,
"regularizer": tf.contrib.layers.l2_regularizer,
"regularizer_params": {
'scale': 1e-6
},
"initializer": tf.contrib.layers.xavier_initializer,
"summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"encoder": Tacotron2Encoder,
"encoder_params": {
"cnn_dropout_prob": 0.5,
"rnn_dropout_prob": 0.,
'src_emb_size': 512,
"conv_layers": [
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME"
},
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME"
},
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME"
}
],
"activation_fn": tf.nn.relu,
"num_rnn_layers": 1,
"rnn_cell_dim": 256,
"rnn_unidirectional": False,
"use_cudnn_rnn": True,
"rnn_type": tf.contrib.cudnn_rnn.CudnnLSTM,
"zoneout_prob": 0.,
"data_format": "channels_last",
},
"decoder": Tacotron2Decoder,
"decoder_params": {
"zoneout_prob": 0.,
"dropout_prob": 0.1,
'attention_type': 'location',
'attention_layer_size': 128,
'attention_bias': True,
'decoder_cell_units': 1024,
'decoder_cell_type': tf.nn.rnn_cell.LSTMCell,
'decoder_layers': 2,
'enable_prenet': True,
'prenet_layers': 2,
'prenet_units': 256,
'enable_postnet': True,
"postnet_keep_dropout_prob": 0.5,
"postnet_data_format": "channels_last",
"postnet_conv_layers": [
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME",
"activation_fn": tf.nn.tanh
},
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME",
"activation_fn": tf.nn.tanh
},
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME",
"activation_fn": tf.nn.tanh
},
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME",
"activation_fn": tf.nn.tanh
},
{
"kernel_size": [5], "stride": [1],
"num_channels": -1, "padding": "SAME",
"activation_fn": None
}
],
"mask_decoder_sequence": True,
"parallel_iterations": 32,
},
"loss": Text2SpeechLoss,
"loss_params": {
"use_mask": True
},
"data_layer": Text2SpeechDataLayer,
"data_layer_params": {
"dataset": dataset,
"num_audio_features": num_audio_features,
"output_type": output_type,
"vocab_file": "open_seq2seq/test_utils/vocab_tts.txt",
'dataset_location':dataset_location,
"mag_power": 1,
"pad_EOS": True,
"feature_normalize": False,
"feature_normalize_mean": 0.,
"feature_normalize_std": 1.,
"data_min":data_min,
"mel_type":'htk',
"trim": trim,
"duration_max":1024,
"duration_min":24,
"exp_mag": exp_mag
},
}
train_params = {
"data_layer_params": {
"dataset_files": [
os.path.join(dataset_location, train),
],
"shuffle": True,
},
}
eval_params = {
"data_layer_params": {
"dataset_files": [
os.path.join(dataset_location, val),
],
"duration_max":10000,
"duration_min":0,
"shuffle": False,
},
}
infer_params = {
"data_layer_params": {
"dataset_files": [
os.path.join(dataset_location, "test.csv"),
],
"duration_max":10000,
"duration_min":0,
"shuffle": False,
},
}
interactive_infer_params = {
"data_layer_params": {
"dataset_files": [],
"duration_max":10000,
"duration_min":0,
"shuffle": False,
},
}
| OpenSeq2Seq-master | example_configs/text2speech/tacotron_float.py |
# pylint: skip-file
import tensorflow as tf
from open_seq2seq.models import Text2SpeechWavenet
from open_seq2seq.encoders import WavenetEncoder
from open_seq2seq.decoders import FakeDecoder
from open_seq2seq.losses import WavenetLoss
from open_seq2seq.data import WavenetDataLayer
from open_seq2seq.optimizers.lr_policies import exp_decay
from open_seq2seq.parts.convs2s.utils import gated_linear_units
base_model = Text2SpeechWavenet
base_params = {
"random_seed": 0,
"use_horovod": False,
"max_steps": 1000000,
"num_gpus": 1,
"batch_size_per_gpu": 2,
"save_summaries_steps": 50,
"print_loss_steps": 50,
"print_samples_steps": 500,
"eval_steps": 500,
"save_checkpoint_steps": 2500,
"logdir": "result/wavenet-LJ-float",
"optimizer": "Adam",
"optimizer_params": {},
"lr_policy": exp_decay,
"lr_policy_params": {
"learning_rate": 1e-3,
"decay_steps": 20000,
"decay_rate": 0.1,
"use_staircase_decay": False,
"begin_decay_at": 45000,
"min_lr": 1e-5,
},
"dtype": tf.float32,
"regularizer": tf.contrib.layers.l2_regularizer,
"regularizer_params": {
"scale": 1e-6
},
"initializer": tf.contrib.layers.xavier_initializer,
"summaries": [],
"encoder": WavenetEncoder,
"encoder_params": {
"layer_type": "conv1d",
"kernel_size": 3,
"strides": 1,
"padding": "VALID",
"blocks": 3,
"layers_per_block": 10,
"filters": 64,
"quantization_channels": 256
},
"decoder": FakeDecoder,
"loss": WavenetLoss,
"data_layer": WavenetDataLayer,
"data_layer_params": {
"num_audio_features": 80,
"dataset_location": "data/speech/LJSpeech/wavs/"
}
}
train_params = {
"data_layer_params": {
"dataset_files": [
"data/speech/LJSpeech/train.csv",
],
"shuffle": True,
},
}
eval_params = {
"data_layer_params": {
"dataset_files": [
"data/speech/LJSpeech/val.csv",
],
"shuffle": False,
},
}
infer_params = {
"data_layer_params": {
"dataset_files": [
"data/speech/LJSpeech/test.csv",
],
"shuffle": False,
},
}
interactive_infer_params = {
"data_layer_params": {
"dataset_files": [],
"shuffle": False,
},
}
| OpenSeq2Seq-master | example_configs/text2speech/wavenet_float.py |
# pylint: skip-file
import tensorflow as tf
from open_seq2seq.models import Speech2Text
from open_seq2seq.encoders import ListenAttendSpellEncoder
from open_seq2seq.decoders import JointCTCAttentionDecoder
from open_seq2seq.data import Speech2TextDataLayer
from open_seq2seq.losses import MultiTaskCTCEntropyLoss
from open_seq2seq.optimizers.lr_policies import poly_decay
from open_seq2seq.decoders import ListenAttendSpellDecoder
from open_seq2seq.decoders import FullyConnectedCTCDecoder
base_model = Speech2Text
base_params = {
"random_seed": 0,
"use_horovod": True,
"num_epochs": 50,
"num_gpus": 8,
"batch_size_per_gpu": 64,
"iter_size": 1,
"save_summaries_steps": 1100,
"print_loss_steps": 10,
"print_samples_steps": 200,
"eval_steps": 1100,
"save_checkpoint_steps": 1100,
"logdir": "jca_log_folder",
"optimizer": "Adam",
"optimizer_params": {
},
"lr_policy": poly_decay,
"lr_policy_params": {
"learning_rate": 1e-3,
"power": 2.0,
"min_lr": 1e-5
},
"max_grad_norm": 1.0,
"regularizer": tf.contrib.layers.l2_regularizer,
"regularizer_params": {
'scale': 0.0001
},
#"dtype": "mixed",
#"loss_scaling": "Backoff",
"dtype": tf.float32,
"summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"encoder": ListenAttendSpellEncoder,
"encoder_params": {
"convnet_layers": [
{
"type": "conv1d", "repeat": 1,
"kernel_size": [11], "stride": [2],
"num_channels": 256, "padding": "SAME",
"dropout_keep_prob": 0.8,
},
{
"type": "conv1d", "repeat": 7,
"kernel_size": [11], "stride": [1],
"num_channels": 256, "padding": "SAME",
"dropout_keep_prob": 0.8,
},
{
"type": "conv1d", "repeat": 1,
"kernel_size": [11], "stride": [2],
"num_channels": 384, "padding": "SAME",
"dropout_keep_prob": 0.8,
},
{
"type": "conv1d", "repeat": 3,
"kernel_size": [11], "stride": [1],
"num_channels": 512, "padding": "SAME",
"dropout_keep_prob": 0.8,
},
{
"type": "conv1d", "repeat": 4,
"kernel_size": [11], "stride": [1],
"num_channels": 768, "padding": "SAME",
"dropout_keep_prob": 0.7,
},
],
"recurrent_layers": [],
"dropout_keep_prob": 0.8,
"initializer": tf.contrib.layers.xavier_initializer,
"initializer_params": {
'uniform': False,
},
"normalization": "batch_norm",
"activation_fn": lambda x: tf.minimum(tf.nn.relu(x), 20.0),
"data_format": "channels_last",
},
"decoder": JointCTCAttentionDecoder,
"decoder_params": {
"attn_decoder": ListenAttendSpellDecoder,
"attn_decoder_params": {
"tgt_emb_size": 256,
"pos_embedding": True,
"attention_params": {
"attention_dim": 256,
"attention_type": "chorowski",
"use_coverage": True,
"num_heads": 1,
"plot_attention": True,
},
"rnn_type": "lstm",
"hidden_dim": 512,
"num_layers": 1,
"dropout_keep_prob": 0.8,
},
"ctc_decoder": FullyConnectedCTCDecoder,
"ctc_decoder_params": {
"initializer": tf.contrib.layers.xavier_initializer,
"use_language_model": False,
},
"beam_search_params": {
"beam_width": 4,
},
"language_model_params": {
# params for decoding the sequence with language model
"use_language_model": False,
},
},
"loss": MultiTaskCTCEntropyLoss,
"loss_params": {
"seq_loss_params": {
"offset_target_by_one": False,
"average_across_timestep": True,
"do_mask": True
},
"ctc_loss_params": {
},
"lambda_value": 0.25,
}
}
train_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"num_audio_features": 64,
"input_type": "logfbank",
"vocab_file": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"dataset_files": [
"data/librispeech/librivox-train-clean-100.csv",
"data/librispeech/librivox-train-clean-360.csv",
"data/librispeech/librivox-train-other-500.csv",
],
"max_duration": 16.7,
"shuffle": True,
"autoregressive": True,
},
}
eval_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"num_audio_features": 64,
"input_type": "logfbank",
"vocab_file": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"dataset_files": [
"data/librispeech/librivox-dev-clean.csv",
],
"shuffle": False,
"autoregressive": True,
},
}
infer_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"num_audio_features": 64,
"input_type": "logfbank",
"vocab_file": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"dataset_files": [
"data/librispeech/librivox-test-clean.csv",
],
"shuffle": False,
"autoregressive": True,
},
}
| OpenSeq2Seq-master | example_configs/speech2text/jca_large_8gpus.py |
# pylint: skip-file
import os
import tensorflow as tf
from open_seq2seq.data import Speech2TextDataLayer
from open_seq2seq.decoders import FullyConnectedCTCDecoder
from open_seq2seq.encoders import DeepSpeech2Encoder
from open_seq2seq.losses import CTCLoss
from open_seq2seq.models import Speech2Text
from open_seq2seq.optimizers.lr_policies import exp_decay
base_model = Speech2Text
dataset_location = os.path.expanduser("~/datasets/speech/librispeech/")
### INPUT FEATURES CONFIG ####
# input_type = "spectrogram"
# num_audio_features = 96
input_type = "mfcc"
num_audio_features = 13 # primary MFCC coefficients
### PREPROCESSING CACHING CONFIG ###
train_cache_features = False
eval_cache_features = True
cache_format = 'hdf5'
cache_regenerate = False
### RNN CONFIG ####
num_rnn_layers = 2
rnn_cell_dim = 512
rnn_type = "cudnn_lstm"
rnn_unidirectional = True
base_params = {
"random_seed": 0,
"use_horovod": False,
"num_epochs": 50,
"num_gpus": 1,
"batch_size_per_gpu": 64,
"save_summaries_steps": 100,
"print_loss_steps": 50,
"print_samples_steps": 250,
"eval_steps": 250,
"save_checkpoint_steps": 250,
"logdir": "logs/librispeech-" +
rnn_type + str(num_rnn_layers) + "x" + str(rnn_cell_dim) + "-" +
input_type + str(num_audio_features),
"optimizer": "Adam",
"optimizer_params": {},
"lr_policy": exp_decay,
"lr_policy_params": {
"learning_rate": 0.001,
"begin_decay_at": 0,
"decay_steps": 500,
"decay_rate": 0.9,
"use_staircase_decay": True,
"min_lr": 1e-8,
},
# "dtype": tf.float32,
"dtype": "mixed",
"max_grad_norm": 0.25,
"loss_scaling": "Backoff",
# weight decay
"regularizer": tf.contrib.layers.l2_regularizer,
"regularizer_params": {
'scale': 0.0005
},
"initializer": tf.contrib.layers.xavier_initializer,
"summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"encoder": DeepSpeech2Encoder,
"encoder_params": {
# CONV layers
"conv_layers": [ # no CONV layers needed? with MFCC input
],
# RNN layers
"num_rnn_layers": num_rnn_layers,
"rnn_cell_dim": rnn_cell_dim,
"use_cudnn_rnn": True if 'cudnn' in rnn_type else False,
"rnn_type": rnn_type,
"rnn_unidirectional": rnn_unidirectional,
"row_conv": False,
# FC layers
"n_hidden": 512,
"dropout_keep_prob": 0.5,
"activation_fn": tf.nn.relu,
"data_format": "channels_first",
},
"decoder": FullyConnectedCTCDecoder,
"decoder_params": {
"use_language_model": False,
# params for decoding the sequence with language model
"beam_width": 512,
"alpha": 2.0,
"beta": 1.0,
"decoder_library_path":
"ctc_decoder_with_lm/libctc_decoder_with_kenlm.so",
"lm_path": "language_model/4-gram.binary",
"trie_path": "language_model/trie.binary",
"alphabet_config_path":
"open_seq2seq/test_utils/toy_speech_data/vocab.txt",
},
"loss": CTCLoss,
"loss_params": {},
}
train_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"cache_features": train_cache_features,
"cache_format": cache_format,
"cache_regenerate": cache_regenerate,
"num_audio_features": num_audio_features,
"input_type": input_type,
"augmentation": {
'time_stretch_ratio': 0.05,
'noise_level_min': -90,
'noise_level_max': -60
},
"vocab_file": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"dataset_files": [
os.path.join(dataset_location, "librivox-train-clean-100.csv"),
os.path.join(dataset_location, "librivox-train-clean-360.csv"),
],
"max_duration": 16.7,
"shuffle": True,
},
}
eval_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"cache_features": eval_cache_features,
"cache_format": cache_format,
"cache_regenerate": cache_regenerate,
"num_audio_features": num_audio_features,
"input_type": input_type,
"vocab_file": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"dataset_files": [
os.path.join(dataset_location, "librivox-dev-clean.csv"),
],
"shuffle": False,
},
}
| OpenSeq2Seq-master | example_configs/speech2text/lstm_small_1gpu.py |
# pylint: skip-file
import tensorflow as tf
from open_seq2seq.models import Speech2Text
from open_seq2seq.encoders import DeepSpeech2Encoder
from open_seq2seq.decoders import FullyConnectedCTCDecoder
from open_seq2seq.data import Speech2TextDataLayer
from open_seq2seq.losses import CTCLoss
from open_seq2seq.optimizers.lr_policies import poly_decay
base_model = Speech2Text
base_params = {
"random_seed": 0,
"use_horovod": False,
#"num_epochs": 200,
"max_steps": 1000,
"num_gpus": 2,
"batch_size_per_gpu": 8,
"save_summaries_steps": 100,
"print_loss_steps": 100,
"print_samples_steps": 100,
"eval_steps": 500,
"save_checkpoint_steps": 500,
"logdir": "ds2_log/toy",
"optimizer": "Momentum",
"optimizer_params": {
"momentum": 0.90,
},
"lr_policy": poly_decay,
"lr_policy_params": {
"learning_rate": 0.001,
"power": 2,
},
"larc_params": {
"larc_eta": 0.001,
},
"dtype": tf.float32,
"summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"encoder": DeepSpeech2Encoder,
"encoder_params": {
"conv_layers": [
{
"kernel_size": [11, 41], "stride": [2, 2],
"num_channels": 32, "padding": "SAME"
},
{
"kernel_size": [11, 21], "stride": [1, 2],
"num_channels": 64, "padding": "SAME"
},
{
"kernel_size": [11, 21], "stride": [1, 2],
"num_channels": 96, "padding": "SAME"
},
],
"data_format": "BFTC", #"channels_last", "channels_first",'BCTF', 'BTFC', 'BCFT', 'BFTC'
"n_hidden": 256,
"rnn_cell_dim": 256,
"rnn_type": "gru",
"num_rnn_layers": 1,
"rnn_unidirectional": False,
"row_conv": False,
"row_conv_width": 8,
"use_cudnn_rnn": True,
"dropout_keep_prob": 1.0,
"initializer": tf.contrib.layers.xavier_initializer,
"initializer_params": {
'uniform': False,
},
"activation_fn": lambda x: tf.minimum(tf.nn.relu(x), 20.0),
},
"decoder": FullyConnectedCTCDecoder,
"decoder_params": {
"initializer": tf.contrib.layers.xavier_initializer,
"use_language_model": False,
# params for decoding the sequence with language model
"beam_width": 64,
"alpha": 1.0,
"beta": 1.5,
"decoder_library_path": "ctc_decoder_with_lm/libctc_decoder_with_kenlm.so",
"lm_path": "language_model/4-gram.binary",
"trie_path": "language_model/trie.binary",
"alphabet_config_path": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
},
"loss": CTCLoss,
"loss_params": {},
}
train_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"num_audio_features": 160,
"input_type": "spectrogram",
"vocab_file": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"dataset_files": [
"open_seq2seq/test_utils/toy_speech_data/toy_data.csv",
],
"shuffle": True,
},
}
eval_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"num_audio_features": 160,
"input_type": "spectrogram",
"vocab_file": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"dataset_files": [
"open_seq2seq/test_utils/toy_speech_data/toy_data.csv",
],
"shuffle": False,
},
}
infer_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"num_audio_features": 160,
"input_type": "spectrogram",
"vocab_file": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"dataset_files": [
"open_seq2seq/test_utils/toy_speech_data/toy_data.csv",
],
"shuffle": False,
},
}
| OpenSeq2Seq-master | example_configs/speech2text/ds2_toy_config.py |
# pylint: skip-file
import tensorflow as tf
from open_seq2seq.models import Speech2Text
from open_seq2seq.encoders import DeepSpeech2Encoder
from open_seq2seq.decoders import FullyConnectedCTCDecoder
from open_seq2seq.data import Speech2TextDataLayer
from open_seq2seq.losses import CTCLoss
from open_seq2seq.optimizers.lr_policies import poly_decay
base_model = Speech2Text
base_params = {
"random_seed": 0,
"use_horovod": False,
"num_gpus": 8,
"batch_size_per_gpu": 16,
"num_epochs": 50,
"save_summaries_steps": 100,
"print_loss_steps": 10,
"print_samples_steps": 5000,
"eval_steps": 5000,
"save_checkpoint_steps": 1000,
"logdir": "experiments/librispeech",
"optimizer": "Momentum",
"optimizer_params": {
"momentum": 0.90,
},
"lr_policy": poly_decay,
"lr_policy_params": {
"learning_rate": 0.001,
"power": 0.5,
},
"larc_params": {
"larc_eta": 0.001,
},
"dtype": tf.float32,
# weight decay
"regularizer": tf.contrib.layers.l2_regularizer,
"regularizer_params": {
'scale': 0.0005
},
"initializer": tf.contrib.layers.xavier_initializer,
"summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"encoder": DeepSpeech2Encoder,
"encoder_params": {
"conv_layers": [
{
"kernel_size": [11, 41], "stride": [2, 2],
"num_channels": 32, "padding": "SAME"
},
{
"kernel_size": [11, 21], "stride": [1, 2],
"num_channels": 32, "padding": "SAME"
},
],
"num_rnn_layers": 5,
"rnn_cell_dim": 800,
"use_cudnn_rnn": True,
"rnn_type": "cudnn_gru",
"rnn_unidirectional": False,
"row_conv": False,
"n_hidden": 1600,
"dropout_keep_prob": 0.5,
"activation_fn": tf.nn.relu,
"data_format": "channels_first",
},
"decoder": FullyConnectedCTCDecoder,
"decoder_params": {
"use_language_model": False,
# params for decoding the sequence with language model
"beam_width": 512,
"alpha": 2.0,
"beta": 1.0,
"decoder_library_path": "ctc_decoder_with_lm/libctc_decoder_with_kenlm.so",
"lm_path": "language_model/4-gram.binary",
"trie_path": "language_model/trie.binary",
"alphabet_config_path": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
},
"loss": CTCLoss,
"loss_params": {},
}
train_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"num_audio_features": 160,
"input_type": "spectrogram",
"augmentation": {'time_stretch_ratio': 0.05,
'noise_level_min': -90,
'noise_level_max': -60},
"vocab_file": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"dataset_files": [
"data/librispeech/librivox-train-clean-100.csv",
"data/librispeech/librivox-train-clean-360.csv",
"data/librispeech/librivox-train-other-500.csv",
],
"max_duration": 16.7,
"shuffle": True,
},
}
eval_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"num_audio_features": 160,
"input_type": "spectrogram",
"vocab_file": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"dataset_files": [
"data/librispeech/librivox-dev-clean.csv",
],
"shuffle": False,
},
}
| OpenSeq2Seq-master | example_configs/speech2text/ds2_large_8gpus.py |
# pylint: skip-file
import tensorflow as tf
from open_seq2seq.models import Speech2Text
from open_seq2seq.encoders import DeepSpeech2Encoder
from open_seq2seq.decoders import FullyConnectedCTCDecoder
from open_seq2seq.data import Speech2TextDataLayer
from open_seq2seq.losses import CTCLoss
from open_seq2seq.optimizers.lr_policies import poly_decay
base_model = Speech2Text
#data_root = "[REPLACE THIS TO THE PATH WITH YOUR LIBRISPEECH DATA]"
data_root = "/raid/data"
base_params = {
"random_seed": 0,
"use_horovod": False,
"num_gpus": 8,
"batch_size_per_gpu": 16,
# "max_steps": 1000,
"num_epochs": 50,
"save_summaries_steps": 100,
"print_loss_steps": 100,
"print_samples_steps": 5000,
"eval_steps": 5000,
"save_checkpoint_steps": 10000,
"logdir": "experiments/librispeech",
"optimizer": "Momentum",
"optimizer_params": {
"momentum": 0.90,
},
"lr_policy": poly_decay,
"lr_policy_params": {
"learning_rate": 0.001,
"power": 0.5,
},
"larc_params": {
"larc_eta": 0.001,
},
"dtype": "mixed",
# weight decay
"regularizer": tf.contrib.layers.l2_regularizer,
"regularizer_params": {
'scale': 0.0005
},
"initializer": tf.contrib.layers.xavier_initializer,
"summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"encoder": DeepSpeech2Encoder,
"encoder_params": {
"conv_layers": [
{
"kernel_size": [11, 41], "stride": [2, 2],
"num_channels": 32, "padding": "SAME"
},
{
"kernel_size": [11, 21], "stride": [1, 2],
"num_channels": 32, "padding": "SAME"
},
],
"num_rnn_layers": 5,
"rnn_cell_dim": 800,
"use_cudnn_rnn": True,
"rnn_type": "cudnn_gru",
"rnn_unidirectional": False,
"row_conv": False,
"n_hidden": 1600,
"dropout_keep_prob": 0.5,
"activation_fn": tf.nn.relu,
# "data_format": "BCFT", # "channels_first",'BCTF', 'BTFC', 'BCFT', 'BFTC'
},
"decoder": FullyConnectedCTCDecoder,
"decoder_params": {
"use_language_model": False,
# params for decoding the sequence with language model
"beam_width": 512,
"alpha": 2.0,
"beta": 1.0,
"decoder_library_path": "ctc_decoder_with_lm/libctc_decoder_with_kenlm.so",
"lm_path": "language_model/4-gram.binary",
"trie_path": "language_model/trie.binary",
"alphabet_config_path": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
},
"loss": CTCLoss,
"loss_params": {},
}
train_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"num_audio_features": 160,
"input_type": "spectrogram",
"augmentation": {'time_stretch_ratio': 0.05,
'noise_level_min': -90,
'noise_level_max': -60},
"vocab_file": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"dataset_files": [
data_root + "/librispeech/librivox-train-clean-100.csv",
data_root + "/librispeech/librivox-train-clean-360.csv",
data_root + "/librispeech/librivox-train-other-500.csv",
],
"max_duration": 16.7,
"shuffle": True,
},
}
eval_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"num_audio_features": 160,
"input_type": "spectrogram",
"vocab_file": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"dataset_files": [
data_root + "/librispeech/librivox-dev-clean.csv",
],
"shuffle": False,
},
}
| OpenSeq2Seq-master | example_configs/speech2text/ds2_large_8gpus_mp.py |
# pylint: skip-file
import tensorflow as tf
from open_seq2seq.models import Speech2Text
from open_seq2seq.encoders import TDNNEncoder
from open_seq2seq.decoders import FullyConnectedCTCDecoder
from open_seq2seq.data import Speech2TextDataLayer
from open_seq2seq.losses import CTCLoss
from open_seq2seq.optimizers.lr_policies import poly_decay
base_model = Speech2Text
base_params = {
"random_seed": 0,
"use_horovod": True,
"num_epochs": 200,
"num_gpus": 8,
"batch_size_per_gpu": 32,
"iter_size": 1,
"save_summaries_steps": 100,
"print_loss_steps": 10,
"print_samples_steps": 2200,
"eval_steps": 2200,
"save_checkpoint_steps": 1100,
"num_checkpoints": 5,
"logdir": "w2l_log_folder",
"optimizer": "Momentum",
"optimizer_params": {
"momentum": 0.90,
},
"lr_policy": poly_decay,
"lr_policy_params": {
"learning_rate": 0.05,
"power": 2.0,
},
"larc_params": {
"larc_eta": 0.001,
},
"regularizer": tf.contrib.layers.l2_regularizer,
"regularizer_params": {
'scale': 0.001
},
"dtype": tf.float32,
"summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"encoder": TDNNEncoder,
"encoder_params": {
"convnet_layers": [
{
"type": "conv1d", "repeat": 1,
"kernel_size": [11], "stride": [2],
"num_channels": 256, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.8,
},
{
"type": "conv1d", "repeat": 3,
"kernel_size": [11], "stride": [1],
"num_channels": 256, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.8,
},
{
"type": "conv1d", "repeat": 3,
"kernel_size": [13], "stride": [1],
"num_channels": 384, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.8,
},
{
"type": "conv1d", "repeat": 3,
"kernel_size": [17], "stride": [1],
"num_channels": 512, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.8,
},
{
"type": "conv1d", "repeat": 3,
"kernel_size": [21], "stride": [1],
"num_channels": 640, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.7,
},
{
"type": "conv1d", "repeat": 3,
"kernel_size": [25], "stride": [1],
"num_channels": 768, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.7,
},
{
"type": "conv1d", "repeat": 1,
"kernel_size": [29], "stride": [1],
"num_channels": 896, "padding": "SAME",
"dilation":[2], "dropout_keep_prob": 0.6,
},
{
"type": "conv1d", "repeat": 1,
"kernel_size": [1], "stride": [1],
"num_channels": 1024, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.6,
}
],
"dropout_keep_prob": 0.7,
"initializer": tf.contrib.layers.xavier_initializer,
"initializer_params": {
'uniform': False,
},
"normalization": "batch_norm",
"activation_fn": lambda x: tf.minimum(tf.nn.relu(x), 20.0),
"data_format": "channels_last",
},
"decoder": FullyConnectedCTCDecoder,
"decoder_params": {
"initializer": tf.contrib.layers.xavier_initializer,
"use_language_model": False,
# params for decoding the sequence with language model
"beam_width": 512,
"alpha": 2.0,
"beta": 1.5,
"decoder_library_path": "ctc_decoder_with_lm/libctc_decoder_with_kenlm.so",
"lm_path": "language_model/4-gram.binary",
"trie_path": "language_model/trie.binary",
"alphabet_config_path": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
},
"loss": CTCLoss,
"loss_params": {},
}
train_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"num_audio_features": 64,
"input_type": "logfbank",
"vocab_file": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"dataset_files": [
"data/librispeech/librivox-train-clean-100.csv",
"data/librispeech/librivox-train-clean-360.csv",
"data/librispeech/librivox-train-other-500.csv",
],
"max_duration": 16.7,
"shuffle": True,
},
}
eval_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"num_audio_features": 64,
"input_type": "logfbank",
"vocab_file": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"dataset_files": [
"data/librispeech/librivox-dev-clean.csv",
],
"shuffle": False,
},
}
infer_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"num_audio_features": 64,
"input_type": "logfbank",
"vocab_file": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"dataset_files": [
"data/librispeech/librivox-test-clean.csv",
],
"shuffle": False,
},
}
| OpenSeq2Seq-master | example_configs/speech2text/w2lplus_large_8gpus.py |
# pylint: skip-file
import tensorflow as tf
from open_seq2seq.models import Speech2Text
from open_seq2seq.encoders import TDNNEncoder
from open_seq2seq.decoders import FullyConnectedCTCDecoder
from open_seq2seq.data import Speech2TextDataLayer
from open_seq2seq.losses import CTCLoss
from open_seq2seq.optimizers.lr_policies import poly_decay
base_model = Speech2Text
base_params = {
"random_seed": 0,
"use_horovod": True,
"num_epochs": 200,
"num_gpus": 8,
"batch_size_per_gpu": 32,
"iter_size": 1,
"save_summaries_steps": 100,
"print_loss_steps": 10,
"print_samples_steps": 2200,
"eval_steps": 2200,
"save_checkpoint_steps": 1100,
"logdir": "w2l_log_folder",
"optimizer": "Momentum",
"optimizer_params": {
"momentum": 0.90,
},
"lr_policy": poly_decay,
"lr_policy_params": {
"learning_rate": 0.05,
"power": 2.0,
},
"larc_params": {
"larc_eta": 0.001,
},
"regularizer": tf.contrib.layers.l2_regularizer,
"regularizer_params": {
'scale': 0.001
},
"dtype": tf.float32,
"summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"encoder": TDNNEncoder,
"encoder_params": {
"convnet_layers": [
{
"type": "conv1d", "repeat": 3,
"kernel_size": [11], "stride": [1],
"num_channels": 256, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.8,
},
{
"type": "conv1d", "repeat": 3,
"kernel_size": [13], "stride": [1],
"num_channels": 384, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.8,
},
{
"type": "conv1d", "repeat": 3,
"kernel_size": [17], "stride": [1],
"num_channels": 512, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.8,
},
{
"type": "conv1d", "repeat": 3,
"kernel_size": [21], "stride": [1],
"num_channels": 640, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.7,
},
{
"type": "conv1d", "repeat": 3,
"kernel_size": [25], "stride": [1],
"num_channels": 768, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.7,
},
{
"type": "conv1d", "repeat": 1,
"kernel_size": [29], "stride": [1],
"num_channels": 896, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.6,
},
{
"type": "conv1d", "repeat": 1,
"kernel_size": [1], "stride": [1],
"num_channels": 1024, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.6,
}
],
"dropout_keep_prob": 0.7,
"initializer": tf.contrib.layers.xavier_initializer,
"initializer_params": {
'uniform': False,
},
"normalization": "batch_norm",
"activation_fn": lambda x: tf.minimum(tf.nn.relu(x), 20.0),
"data_format": "channels_last",
},
"decoder": FullyConnectedCTCDecoder,
"decoder_params": {
"initializer": tf.contrib.layers.xavier_initializer,
"use_language_model": False,
# params for decoding the sequence with language model
"beam_width": 512,
"alpha": 2.0,
"beta": 1.5,
"decoder_library_path": "ctc_decoder_with_lm/libctc_decoder_with_kenlm.so",
"lm_path": "language_model/4-gram.binary",
"trie_path": "language_model/trie.binary",
"alphabet_config_path": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
},
"loss": CTCLoss,
"loss_params": {},
}
train_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"num_audio_features": 64,
"input_type": "logfbank",
"vocab_file": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"dataset_files": [
"data/librispeech/librivox-train-clean-100.csv",
"data/librispeech/librivox-train-clean-360.csv",
"data/librispeech/librivox-train-other-500.csv",
],
"max_duration": 16.7,
"shuffle": True,
},
}
eval_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"num_audio_features": 64,
"input_type": "logfbank",
"vocab_file": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"dataset_files": [
"data/librispeech/librivox-dev-clean.csv",
],
"shuffle": False,
},
}
infer_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"num_audio_features": 64,
"input_type": "logfbank",
"vocab_file": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"dataset_files": [
"data/librispeech/librivox-test-clean.csv",
],
"shuffle": False,
},
}
| OpenSeq2Seq-master | example_configs/speech2text/w2l_large_8gpus.py |
# pylint: skip-file
import tensorflow as tf
from open_seq2seq.models import Speech2Text
from open_seq2seq.encoders import TDNNEncoder
from open_seq2seq.decoders import FullyConnectedCTCDecoder
from open_seq2seq.data.speech2text.speech2text import Speech2TextDataLayer
from open_seq2seq.losses import CTCLoss
from open_seq2seq.optimizers.lr_policies import poly_decay
from open_seq2seq.optimizers.novograd import NovoGrad
residual_dense = False # Enable or disable Dense Residual
base_model = Speech2Text
base_params = {
"random_seed": 0,
"use_horovod": True,
"num_epochs": 50,
"num_gpus": 1,
"batch_size_per_gpu": 32,
"iter_size": 1,
"save_summaries_steps": 100,
"print_loss_steps": 10,
"print_samples_steps": 2200,
"eval_steps": 2200,
"save_checkpoint_steps": 1100,
"logdir": "jasper_log_folder",
"num_checkpoints": 2,
"optimizer": NovoGrad,
"optimizer_params": {
"beta1": 0.95,
"beta2": 0.98,
"epsilon": 1e-08,
"weight_decay": 0.001,
"grad_averaging": False,
},
"lr_policy": poly_decay,
"lr_policy_params": {
"learning_rate": 0.02,
"min_lr": 1e-5,
"power": 2.0,
},
"larc_params": {
"larc_eta": 0.001,
},
"dtype": tf.float32,
# "loss_scaling": "Backoff",
"summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"encoder": TDNNEncoder,
"encoder_params": {
"convnet_layers": [
{
"type": "sep_conv1d", "repeat": 1,
"kernel_size": [11], "stride": [2],
"num_channels": 256, "padding": "SAME",
"dilation": [1]
},
{
"type": "sep_conv1d", "repeat": 3,
"kernel_size": [11], "stride": [1],
"num_channels": 256, "padding": "SAME",
"dilation": [1],
"residual": True, "residual_dense": residual_dense
},
{
"type": "sep_conv1d", "repeat": 3,
"kernel_size": [11], "stride": [1],
"num_channels": 256, "padding": "SAME",
"dilation": [1],
"residual": True, "residual_dense": residual_dense
},
{
"type": "sep_conv1d", "repeat": 3,
"kernel_size": [13], "stride": [1],
"num_channels": 256, "padding": "SAME",
"dilation": [1],
"residual": True, "residual_dense": residual_dense
},
{
"type": "sep_conv1d", "repeat": 3,
"kernel_size": [13], "stride": [1],
"num_channels": 256, "padding": "SAME",
"dilation": [1],
"residual": True, "residual_dense": residual_dense
},
{
"type": "sep_conv1d", "repeat": 3,
"kernel_size": [17], "stride": [1],
"num_channels": 512, "padding": "SAME",
"dilation": [1],
"residual": True, "residual_dense": residual_dense
},
{
"type": "sep_conv1d", "repeat": 3,
"kernel_size": [17], "stride": [1],
"num_channels": 512, "padding": "SAME",
"dilation": [1],
"residual": True, "residual_dense": residual_dense
},
{
"type": "sep_conv1d", "repeat": 3,
"kernel_size": [21], "stride": [1],
"num_channels": 512, "padding": "SAME",
"dilation": [1],
"residual": True, "residual_dense": residual_dense
},
{
"type": "sep_conv1d", "repeat": 3,
"kernel_size": [21], "stride": [1],
"num_channels": 512, "padding": "SAME",
"dilation": [1],
"residual": True, "residual_dense": residual_dense
},
{
"type": "sep_conv1d", "repeat": 3,
"kernel_size": [25], "stride": [1],
"num_channels": 512, "padding": "SAME",
"dilation": [1],
"residual": True, "residual_dense": residual_dense
},
{
"type": "sep_conv1d", "repeat": 3,
"kernel_size": [25], "stride": [1],
"num_channels": 512, "padding": "SAME",
"dilation": [1],
"residual": True, "residual_dense": residual_dense
},
{
"type": "sep_conv1d", "repeat": 1,
"kernel_size": [29], "stride": [1],
"num_channels": 512, "padding": "SAME",
"dilation": [2]
},
{
"type": "sep_conv1d", "repeat": 1,
"kernel_size": [1], "stride": [1],
"num_channels": 1024, "padding": "SAME",
"dilation": [1]
}
],
"dropout_keep_prob": 1.0,
"initializer": tf.contrib.layers.xavier_initializer,
"initializer_params": {
'uniform': False,
},
"normalization": "batch_norm",
"activation_fn": tf.nn.relu,
"data_format": "channels_last",
"use_conv_mask": True,
},
"decoder": FullyConnectedCTCDecoder,
"decoder_params": {
"initializer": tf.contrib.layers.xavier_initializer,
"use_language_model": False,
"infer_logits_to_pickle": False,
},
"loss": CTCLoss,
"loss_params": {},
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"num_audio_features": 64,
"input_type": "logfbank",
"vocab_file": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"norm_per_feature": True,
"window": "hanning",
"precompute_mel_basis": True,
"sample_freq": 16000,
"pad_to": 16,
"dither": 1e-5,
"backend": "librosa",
},
}
train_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"augmentation": {
'n_freq_mask': 2,
'n_time_mask': 2,
'width_freq_mask': 6,
'width_time_mask': 6,
},
"dataset_files": [
"/data/librispeech/librivox-train-clean-100.csv",
"/data/librispeech/librivox-train-clean-360.csv",
"/data/librispeech/librivox-train-other-500.csv"
],
"max_duration": 16.7,
"shuffle": True,
},
}
eval_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"dataset_files": [
"/data/librispeech/librivox-dev-clean.csv",
],
"shuffle": False,
},
}
infer_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"dataset_files": [
"/data/librispeech/librivox-test-clean.csv",
],
"shuffle": False,
},
}
| OpenSeq2Seq-master | example_configs/speech2text/jasper-Mini-for-Jetson.py |
# pylint: skip-file
import tensorflow as tf
from open_seq2seq.models import Image2Label
from open_seq2seq.encoders import TDNNEncoder
from open_seq2seq.decoders import FullyConnectedSCDecoder
from open_seq2seq.data import SpeechCommandsDataLayer
from open_seq2seq.losses import CrossEntropyLoss
from open_seq2seq.optimizers.lr_policies import poly_decay
base_model = Image2Label
dataset_version = "v1-12"
dataset_location = "/data/speech-commands/v1"
if dataset_version == "v1-12":
num_labels = 12
elif dataset_version == "v1-30":
num_labels = 30
else:
num_labels = 35
dataset_location = "/data/speech-commands/v2"
base_params = {
"random_seed": 0,
"use_horovod": True,
"num_epochs": 200,
"num_gpus": 8,
"batch_size_per_gpu": 64,
"iter_size": 1,
"save_summaries_steps": 10000,
"print_loss_steps": 100,
"print_samples_steps": 1000,
"eval_steps": 1000,
"save_checkpoint_steps": 10000,
"logdir": "result/jasper_commands",
"optimizer": "Momentum",
"optimizer_params": {
"momentum": 0.95,
},
"lr_policy": poly_decay,
"lr_policy_params": {
"learning_rate": 0.05,
"min_lr": 1e-5,
"power": 2.0,
},
"larc_params": {
"larc_eta": 0.001,
},
"regularizer": tf.contrib.layers.l2_regularizer,
"regularizer_params": {
'scale': 0.001
},
"dtype": "mixed",
"loss_scaling": "Backoff",
"summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"encoder": TDNNEncoder,
"encoder_params": {
"convnet_layers": [
{
"type": "conv2d", "repeat": 1,
"kernel_size": [11,1], "stride": [2,1],
"num_channels": 256, "padding": "SAME",
"dilation":[1,1], "dropout_keep_prob": 0.8,
},
{
"type": "conv2d", "repeat": 3,
"kernel_size": [11,1], "stride": [1,1],
"num_channels": 256, "padding": "SAME",
"dilation":[1,1], "dropout_keep_prob": 0.8,
"residual": True
},
{
"type": "conv2d", "repeat": 3,
"kernel_size": [11,1], "stride": [1,1],
"num_channels": 256, "padding": "SAME",
"dilation":[1,1], "dropout_keep_prob": 0.8,
"residual": True
},
{
"type": "conv2d", "repeat": 1,
"kernel_size": [13,1], "stride": [2,1],
"num_channels": 384, "padding": "SAME",
"dilation":[1,1], "dropout_keep_prob": 0.8,
},
{
"type": "conv2d", "repeat": 3,
"kernel_size": [13,1], "stride": [1,1],
"num_channels": 384, "padding": "SAME",
"dilation":[1,1], "dropout_keep_prob": 0.8,
"residual": True
},
{
"type": "conv2d", "repeat": 3,
"kernel_size": [13,1], "stride": [1,1],
"num_channels": 384, "padding": "SAME",
"dilation":[1,1], "dropout_keep_prob": 0.8,
"residual": True
},
{
"type": "conv2d", "repeat": 1,
"kernel_size": [17,1], "stride": [2,1],
"num_channels": 512, "padding": "SAME",
"dilation":[1,1], "dropout_keep_prob": 0.8,
},
{
"type": "conv2d", "repeat": 3,
"kernel_size": [17,1], "stride": [1,1],
"num_channels": 512, "padding": "SAME",
"dilation":[1,1], "dropout_keep_prob": 0.8,
"residual": True
},
{
"type": "conv2d", "repeat": 3,
"kernel_size": [17,1], "stride": [1,1],
"num_channels": 512, "padding": "SAME",
"dilation":[1,1], "dropout_keep_prob": 0.8,
"residual": True
},
{
"type": "conv2d", "repeat": 3,
"kernel_size": [21,1], "stride": [1,1],
"num_channels": 640, "padding": "SAME",
"dilation":[1,1], "dropout_keep_prob": 0.7,
"residual": True
},
{
"type": "conv2d", "repeat": 3,
"kernel_size": [21,1], "stride": [1,1],
"num_channels": 640, "padding": "SAME",
"dilation":[1,1], "dropout_keep_prob": 0.7,
"residual": True
},
{
"type": "conv2d", "repeat": 3,
"kernel_size": [25,1], "stride": [1,1],
"num_channels": 768, "padding": "SAME",
"dilation":[1,1], "dropout_keep_prob": 0.7,
"residual": True
},
{
"type": "conv2d", "repeat": 3,
"kernel_size": [25,1], "stride": [1,1],
"num_channels": 768, "padding": "SAME",
"dilation":[1,1], "dropout_keep_prob": 0.7,
"residual": True
},
{
"type": "conv2d", "repeat": 1,
"kernel_size": [29,1], "stride": [1,1],
"num_channels": 896, "padding": "SAME",
"dilation":[2,1], "dropout_keep_prob": 0.6,
},
{
"type": "conv2d", "repeat": 1,
"kernel_size": [1,1], "stride": [1,1],
"num_channels": 1024, "padding": "SAME",
"dilation":[1,1], "dropout_keep_prob": 0.6,
}
],
"dropout_keep_prob": 0.7,
"initializer": tf.contrib.layers.xavier_initializer,
"initializer_params": {
'uniform': False,
},
"normalization": "batch_norm",
"activation_fn": tf.nn.relu,
"data_format": "channels_last",
},
"decoder": FullyConnectedSCDecoder,
"decoder_params": {
"output_dim": num_labels,
},
"loss": CrossEntropyLoss,
"data_layer": SpeechCommandsDataLayer,
"data_layer_params": {
"dataset_location": dataset_location,
"num_audio_features": 128,
"audio_length": 128,
"num_labels": num_labels,
"cache_data": True,
"augment_data": True,
"model_format": "jasper"
},
}
train_params = {
"data_layer_params": {
"dataset_files": [
dataset_version + "-train.txt"
],
"shuffle": True,
"repeat": True
},
}
eval_params = {
"batch_size_per_gpu": 4,
"data_layer_params": {
"dataset_files": [
dataset_version + "-val.txt"
],
"shuffle": False,
"repeat": False
},
}
| OpenSeq2Seq-master | example_configs/speech2text/jasper_commands_8gpu.py |
# pylint: skip-file
import tensorflow as tf
from open_seq2seq.models import Speech2Text
from open_seq2seq.encoders import TDNNEncoder
from open_seq2seq.decoders import FullyConnectedCTCDecoder
from open_seq2seq.data.speech2text.speech2text import Speech2TextDataLayer
from open_seq2seq.losses import CTCLoss
from open_seq2seq.optimizers.lr_policies import poly_decay
from open_seq2seq.optimizers.novograd import NovoGrad
residual_dense = True # Enable or disable Dense Residual
base_model = Speech2Text
base_params = {
"random_seed": 0,
"use_horovod": True,
"num_epochs": 400,
"num_gpus": 8,
"batch_size_per_gpu": 32,
"iter_size": 1,
"save_summaries_steps": 100,
"print_loss_steps": 10,
"print_samples_steps": 2200,
"eval_steps": 2200,
"save_checkpoint_steps": 1100,
"logdir": "jasper_log_folder",
"num_checkpoints": 2,
"optimizer": NovoGrad,
"optimizer_params": {
"beta1": 0.95,
"beta2": 0.98,
"epsilon": 1e-08,
"weight_decay": 0.001,
"grad_averaging": False,
},
"lr_policy": poly_decay,
"lr_policy_params": {
"learning_rate": 0.02,
"min_lr": 1e-5,
"power": 2.0,
},
"larc_params": {
"larc_eta": 0.001,
},
"dtype": "mixed",
"loss_scaling": "Backoff",
"summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"encoder": TDNNEncoder,
"encoder_params": {
"convnet_layers": [
{
"type": "conv1d", "repeat": 1,
"kernel_size": [11], "stride": [2],
"num_channels": 256, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.8,
},
{
"type": "conv1d", "repeat": 5,
"kernel_size": [11], "stride": [1],
"num_channels": 256, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.8,
"residual": True, "residual_dense": residual_dense
},
{
"type": "conv1d", "repeat": 5,
"kernel_size": [11], "stride": [1],
"num_channels": 256, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.8,
"residual": True, "residual_dense": residual_dense
},
{
"type": "conv1d", "repeat": 5,
"kernel_size": [13], "stride": [1],
"num_channels": 384, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.8,
"residual": True, "residual_dense": residual_dense
},
{
"type": "conv1d", "repeat": 5,
"kernel_size": [13], "stride": [1],
"num_channels": 384, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.8,
"residual": True, "residual_dense": residual_dense
},
{
"type": "conv1d", "repeat": 5,
"kernel_size": [17], "stride": [1],
"num_channels": 512, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.8,
"residual": True, "residual_dense": residual_dense
},
{
"type": "conv1d", "repeat": 5,
"kernel_size": [17], "stride": [1],
"num_channels": 512, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.8,
"residual": True, "residual_dense": residual_dense
},
{
"type": "conv1d", "repeat": 5,
"kernel_size": [21], "stride": [1],
"num_channels": 640, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.7,
"residual": True, "residual_dense": residual_dense
},
{
"type": "conv1d", "repeat": 5,
"kernel_size": [21], "stride": [1],
"num_channels": 640, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.7,
"residual": True, "residual_dense": residual_dense
},
{
"type": "conv1d", "repeat": 5,
"kernel_size": [25], "stride": [1],
"num_channels": 768, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.7,
"residual": True, "residual_dense": residual_dense
},
{
"type": "conv1d", "repeat": 5,
"kernel_size": [25], "stride": [1],
"num_channels": 768, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.7,
"residual": True, "residual_dense": residual_dense
},
{
"type": "conv1d", "repeat": 1,
"kernel_size": [29], "stride": [1],
"num_channels": 896, "padding": "SAME",
"dilation":[2], "dropout_keep_prob": 0.6,
},
{
"type": "conv1d", "repeat": 1,
"kernel_size": [1], "stride": [1],
"num_channels": 1024, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.6,
}
],
"dropout_keep_prob": 0.7,
"initializer": tf.contrib.layers.xavier_initializer,
"initializer_params": {
'uniform': False,
},
"normalization": "batch_norm",
"activation_fn": tf.nn.relu,
"data_format": "channels_last",
"use_conv_mask": True,
},
"decoder": FullyConnectedCTCDecoder,
"decoder_params": {
"initializer": tf.contrib.layers.xavier_initializer,
"use_language_model": False,
# params for decoding the sequence with language model
# "beam_width": 2048,
# "alpha": 2.0,
# "beta": 1.5,
# "decoder_library_path": "ctc_decoder_with_lm/libctc_decoder_with_kenlm.so",
# "lm_path": "language_model/4-gram.binary",
# "trie_path": "language_model/trie.binary",
# "alphabet_config_path": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"infer_logits_to_pickle": False,
},
"loss": CTCLoss,
"loss_params": {},
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"num_audio_features": 64,
"input_type": "logfbank",
"vocab_file": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"norm_per_feature": True,
"window": "hanning",
"precompute_mel_basis": True,
"sample_freq": 16000,
"pad_to": 16,
"dither": 1e-5,
"backend": "librosa"
},
}
train_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"augmentation": {
'speed_perturbation_ratio': [0.9, 1., 1.1],
},
"dataset_files": [
"/data/librispeech/librivox-train-clean-100.csv",
"/data/librispeech/librivox-train-clean-360.csv",
"/data/librispeech/librivox-train-other-500.csv"
],
"max_duration": 16.7,
"shuffle": True,
},
}
eval_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"dataset_files": [
"/data/librispeech/librivox-dev-clean.csv",
],
"shuffle": False,
},
}
infer_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"dataset_files": [
"/data/librispeech/librivox-test-clean.csv",
],
"shuffle": False,
},
}
| OpenSeq2Seq-master | example_configs/speech2text/jasper10x5_LibriSpeech_nvgrad.py |
# pylint: skip-file
import tensorflow as tf
from open_seq2seq.models import Speech2Text
from open_seq2seq.encoders import TDNNEncoder
from open_seq2seq.decoders import FullyConnectedCTCDecoder
from open_seq2seq.data import Speech2TextDataLayer
from open_seq2seq.losses import CTCLoss
from open_seq2seq.optimizers.lr_policies import poly_decay
base_model = Speech2Text
base_params = {
"random_seed": 0,
"use_horovod": True,
"num_epochs": 200,
"num_gpus": 8,
"batch_size_per_gpu": 64,
"iter_size": 1,
"save_summaries_steps": 100,
"print_loss_steps": 10,
"print_samples_steps": 2200,
"eval_steps": 2200,
"save_checkpoint_steps": 1100,
"num_checkpoints": 5,
"logdir": "w2l_log_folder",
"optimizer": "Momentum",
"optimizer_params": {
"momentum": 0.90,
},
"lr_policy": poly_decay,
"lr_policy_params": {
"learning_rate": 0.05,
"power": 2.0,
},
"larc_params": {
"larc_eta": 0.001,
},
"regularizer": tf.contrib.layers.l2_regularizer,
"regularizer_params": {
'scale': 0.001
},
"dtype": "mixed",
"loss_scaling": "Backoff",
"summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"encoder": TDNNEncoder,
"encoder_params": {
"convnet_layers": [
{
"type": "conv1d", "repeat": 1,
"kernel_size": [11], "stride": [2],
"num_channels": 256, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.8,
},
{
"type": "conv1d", "repeat": 3,
"kernel_size": [11], "stride": [1],
"num_channels": 256, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.8,
},
{
"type": "conv1d", "repeat": 3,
"kernel_size": [13], "stride": [1],
"num_channels": 384, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.8,
},
{
"type": "conv1d", "repeat": 3,
"kernel_size": [17], "stride": [1],
"num_channels": 512, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.8,
},
{
"type": "conv1d", "repeat": 3,
"kernel_size": [21], "stride": [1],
"num_channels": 640, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.7,
},
{
"type": "conv1d", "repeat": 3,
"kernel_size": [25], "stride": [1],
"num_channels": 768, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.7,
},
{
"type": "conv1d", "repeat": 1,
"kernel_size": [29], "stride": [1],
"num_channels": 896, "padding": "SAME",
"dilation":[2], "dropout_keep_prob": 0.6,
},
{
"type": "conv1d", "repeat": 1,
"kernel_size": [1], "stride": [1],
"num_channels": 1024, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.6,
}
],
"dropout_keep_prob": 0.7,
"initializer": tf.contrib.layers.xavier_initializer,
"initializer_params": {
'uniform': False,
},
"normalization": "batch_norm",
"activation_fn": lambda x: tf.minimum(tf.nn.relu(x), 20.0),
"data_format": "channels_last",
},
"decoder": FullyConnectedCTCDecoder,
"decoder_params": {
"initializer": tf.contrib.layers.xavier_initializer,
# "use_language_model": False,
# params for decoding the sequence with language model
# "beam_width": 512,
# "alpha": 2.5,
# "beta": 0.,
# "decoder_library_path": "ctc_decoder_with_lm/libctc_decoder_with_kenlm.so",
# "lm_path": "language_model/4-gram.binary",
# "trie_path": "language_model/trie.binary",
# "alphabet_config_path": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"infer_logits_to_pickle": False,
},
"loss": CTCLoss,
"loss_params": {},
}
train_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"num_audio_features": 64,
"input_type": "logfbank",
"vocab_file": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"dataset_files": [
"data/librispeech/librivox-train-clean-100.csv",
"data/librispeech/librivox-train-clean-360.csv",
"data/librispeech/librivox-train-other-500.csv",
],
"max_duration": 16.7,
"shuffle": True,
},
}
eval_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"num_audio_features": 64,
"input_type": "logfbank",
"vocab_file": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"dataset_files": [
"data/librispeech/librivox-dev-clean.csv",
],
"shuffle": False,
},
}
infer_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"num_audio_features": 64,
"input_type": "logfbank",
"vocab_file": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"dataset_files": [
"data/librispeech/librivox-test-clean.csv",
],
"shuffle": False,
},
}
| OpenSeq2Seq-master | example_configs/speech2text/w2lplus_large_8gpus_mp.py |
# pylint: skip-file
import tensorflow as tf
from open_seq2seq.models import Speech2Text
from open_seq2seq.encoders import DeepSpeech2Encoder
from open_seq2seq.decoders import FullyConnectedCTCDecoder
from open_seq2seq.data import Speech2TextDataLayer
from open_seq2seq.losses import CTCLoss
from open_seq2seq.optimizers.lr_policies import poly_decay
base_model = Speech2Text
base_params = {
"random_seed": 0,
"use_horovod": False,
"num_gpus": 4,
"batch_size_per_gpu": 32,
"num_epochs": 50,
"save_summaries_steps": 1000,
"print_loss_steps": 10,
"print_samples_steps": 10000,
"eval_steps": 10000,
"save_checkpoint_steps": 1000,
"logdir": "experiments/ds2/base_000",
"optimizer": "Adam",
"lr_policy": poly_decay,
"lr_policy_params": {
"learning_rate": 0.0002,
"power": 0.5
},
# weight decay
"regularizer": tf.contrib.layers.l2_regularizer,
"regularizer_params": {
'scale': 0.0005
},
"initializer": tf.contrib.layers.xavier_initializer,
"summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"encoder": DeepSpeech2Encoder,
"encoder_params": {
"conv_layers": [
{
"kernel_size": [11, 41], "stride": [2, 2],
"num_channels": 32, "padding": "SAME"
},
{
"kernel_size": [11, 21], "stride": [1, 2],
"num_channels": 64, "padding": "SAME"
},
{
"kernel_size": [11, 21], "stride": [1, 2],
"num_channels": 96, "padding": "SAME"
},
],
"num_rnn_layers": 3,
"rnn_cell_dim": 1024,
"use_cudnn_rnn": True,
"rnn_type": "cudnn_gru",
"rnn_unidirectional": True,
"row_conv": True,
"row_conv_width": 8,
"n_hidden": 2048,
"dropout_keep_prob": 0.5,
"activation_fn": tf.nn.relu,
"data_format": "channels_first",
},
"decoder": FullyConnectedCTCDecoder,
"decoder_params": {
"use_language_model": False,
# params for decoding the sequence with language model
"beam_width": 512,
"alpha": 2.0,
"beta": 1.0,
"decoder_library_path": "ctc_decoder_with_lm/libctc_decoder_with_kenlm.so",
"lm_path": "language_model/4-gram.binary",
"trie_path": "language_model/trie.binary",
"alphabet_config_path": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
},
"loss": CTCLoss,
"loss_params": {},
}
train_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"num_audio_features": 160,
"input_type": "spectrogram",
"augmentation": {'time_stretch_ratio': 0.05,
'noise_level_min': -90,
'noise_level_max': -60},
"vocab_file": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"dataset_files": [
"data/librispeech/librivox-train-clean-100.csv",
"data/librispeech/librivox-train-clean-360.csv",
"data/librispeech/librivox-train-other-500.csv"
],
"shuffle": True,
},
}
eval_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"num_audio_features": 160,
"input_type": "spectrogram",
"vocab_file": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"dataset_files": [
"data/librispeech/librivox-dev-clean.csv"
],
"shuffle": False,
},
}
| OpenSeq2Seq-master | example_configs/speech2text/ds2_medium_4gpus.py |
# pylint: skip-file
import tensorflow as tf
from open_seq2seq.data import Speech2TextDataLayer
from open_seq2seq.decoders import FullyConnectedCTCDecoder
from open_seq2seq.encoders import DeepSpeech2Encoder
from open_seq2seq.losses import CTCLoss
from open_seq2seq.models import Speech2Text
from open_seq2seq.optimizers.lr_policies import exp_decay
base_model = Speech2Text
base_params = {
"random_seed": 0,
"use_horovod": False,
"num_epochs": 12,
"num_gpus": 1,
"batch_size_per_gpu": 32,
"save_summaries_steps": 100,
"print_loss_steps": 10,
"print_samples_steps": 5000,
"eval_steps": 5000,
"save_checkpoint_steps": 1000,
"logdir": "experiments/librispeech-quick",
"optimizer": "Adam",
"optimizer_params": {},
"lr_policy": exp_decay,
"lr_policy_params": {
"learning_rate": 0.0001,
"begin_decay_at": 0,
"decay_steps": 5000,
"decay_rate": 0.9,
"use_staircase_decay": True,
"min_lr": 0.0,
},
"dtype": tf.float32,
# weight decay
"regularizer": tf.contrib.layers.l2_regularizer,
"regularizer_params": {
'scale': 0.0005
},
"initializer": tf.contrib.layers.xavier_initializer,
"summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"encoder": DeepSpeech2Encoder,
"encoder_params": {
"conv_layers": [
{
"kernel_size": [11, 41], "stride": [2, 2],
"num_channels": 32, "padding": "SAME"
},
{
"kernel_size": [11, 21], "stride": [1, 2],
"num_channels": 32, "padding": "SAME"
}
],
"num_rnn_layers": 2,
"rnn_cell_dim": 512,
"use_cudnn_rnn": True,
"rnn_type": "cudnn_gru",
"rnn_unidirectional": False,
"row_conv": False,
"n_hidden": 1024,
"dropout_keep_prob": 0.5,
"activation_fn": tf.nn.relu,
"data_format": "channels_first",
},
"decoder": FullyConnectedCTCDecoder,
"decoder_params": {
"use_language_model": False,
# params for decoding the sequence with language model
"beam_width": 512,
"alpha": 2.0,
"beta": 1.0,
"decoder_library_path": "ctc_decoder_with_lm/libctc_decoder_with_kenlm.so",
"lm_path": "language_model/4-gram.binary",
"trie_path": "language_model/trie.binary",
"alphabet_config_path": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
},
"loss": CTCLoss,
"loss_params": {},
}
train_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"num_audio_features": 96,
"input_type": "spectrogram",
"augmentation": {
'time_stretch_ratio': 0.05,
'noise_level_min': -90,
'noise_level_max': -60
},
"vocab_file": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"dataset_files": [
"data/librispeech/librivox-train-clean-100.csv",
"data/librispeech/librivox-train-clean-360.csv",
],
"shuffle": True,
},
}
eval_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"num_audio_features": 96,
"input_type": "spectrogram",
"vocab_file": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"dataset_files": [
"data/librispeech/librivox-dev-clean.csv",
],
"shuffle": False,
},
}
| OpenSeq2Seq-master | example_configs/speech2text/ds2_small_1gpu.py |
# pylint: skip-file
import tensorflow as tf
from open_seq2seq.models import Speech2Text
from open_seq2seq.encoders import TDNNEncoder
from open_seq2seq.decoders import FullyConnectedCTCDecoder
from open_seq2seq.data.speech2text.speech2text import Speech2TextDataLayer
from open_seq2seq.losses import CTCLoss
from open_seq2seq.optimizers.lr_policies import poly_decay
from open_seq2seq.optimizers.novograd import NovoGrad
residual_dense = True # Enable or disable Dense Residual
base_model = Speech2Text
base_params = {
"random_seed": 0,
"use_horovod": True,
"num_epochs": 400,
"num_gpus": 8,
"batch_size_per_gpu": 32,
"iter_size": 1,
"save_summaries_steps": 100,
"print_loss_steps": 10,
"print_samples_steps": 2200,
"eval_steps": 2200,
"save_checkpoint_steps": 1100,
"logdir": "jasper_log_folder",
"num_checkpoints": 2,
"optimizer": NovoGrad,
"optimizer_params": {
"beta1": 0.95,
"beta2": 0.98,
"epsilon": 1e-08,
"weight_decay": 0.001,
"grad_averaging": False,
},
"lr_policy": poly_decay,
"lr_policy_params": {
"learning_rate": 0.02,
"min_lr": 1e-5,
"power": 2.0,
},
"larc_params": {
"larc_eta": 0.001,
},
"dtype": "mixed",
"loss_scaling": "Backoff",
"summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"encoder": TDNNEncoder,
"encoder_params": {
"convnet_layers": [
{
"type": "conv1d", "repeat": 1,
"kernel_size": [11], "stride": [2],
"num_channels": 256, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.8,
},
{
"type": "conv1d", "repeat": 5,
"kernel_size": [11], "stride": [1],
"num_channels": 256, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.8,
"residual": True, "residual_dense": residual_dense
},
{
"type": "conv1d", "repeat": 5,
"kernel_size": [11], "stride": [1],
"num_channels": 256, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.8,
"residual": True, "residual_dense": residual_dense
},
{
"type": "conv1d", "repeat": 5,
"kernel_size": [13], "stride": [1],
"num_channels": 384, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.8,
"residual": True, "residual_dense": residual_dense
},
{
"type": "conv1d", "repeat": 5,
"kernel_size": [13], "stride": [1],
"num_channels": 384, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.8,
"residual": True, "residual_dense": residual_dense
},
{
"type": "conv1d", "repeat": 5,
"kernel_size": [17], "stride": [1],
"num_channels": 512, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.8,
"residual": True, "residual_dense": residual_dense
},
{
"type": "conv1d", "repeat": 5,
"kernel_size": [17], "stride": [1],
"num_channels": 512, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.8,
"residual": True, "residual_dense": residual_dense
},
{
"type": "conv1d", "repeat": 5,
"kernel_size": [21], "stride": [1],
"num_channels": 640, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.7,
"residual": True, "residual_dense": residual_dense
},
{
"type": "conv1d", "repeat": 5,
"kernel_size": [21], "stride": [1],
"num_channels": 640, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.7,
"residual": True, "residual_dense": residual_dense
},
{
"type": "conv1d", "repeat": 5,
"kernel_size": [25], "stride": [1],
"num_channels": 768, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.7,
"residual": True, "residual_dense": residual_dense
},
{
"type": "conv1d", "repeat": 5,
"kernel_size": [25], "stride": [1],
"num_channels": 768, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.7,
"residual": True, "residual_dense": residual_dense
},
{
"type": "conv1d", "repeat": 1,
"kernel_size": [29], "stride": [1],
"num_channels": 896, "padding": "SAME",
"dilation":[2], "dropout_keep_prob": 0.6,
},
{
"type": "conv1d", "repeat": 1,
"kernel_size": [1], "stride": [1],
"num_channels": 1024, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.6,
}
],
"dropout_keep_prob": 0.7,
"initializer": tf.contrib.layers.xavier_initializer,
"initializer_params": {
'uniform': False,
},
"normalization": "batch_norm",
"activation_fn": tf.nn.relu,
"data_format": "channels_last",
"use_conv_mask": True,
},
"decoder": FullyConnectedCTCDecoder,
"decoder_params": {
"initializer": tf.contrib.layers.xavier_initializer,
"use_language_model": False,
"infer_logits_to_pickle": False,
},
"loss": CTCLoss,
"loss_params": {},
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"num_audio_features": 64,
"input_type": "logfbank",
"vocab_file": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"norm_per_feature": True,
"window": "hanning",
"precompute_mel_basis": True,
"sample_freq": 16000,
"pad_to": 16,
"dither": 1e-5,
"backend": "librosa"
},
}
train_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"augmentation": {
'n_freq_mask': 2,
'n_time_mask': 2,
'width_freq_mask': 6,
'width_time_mask': 6,
},
"dataset_files": [
"/data/librispeech/librivox-train-clean-100.csv",
"/data/librispeech/librivox-train-clean-360.csv",
"/data/librispeech/librivox-train-other-500.csv"
],
"max_duration": 16.7,
"shuffle": True,
},
}
eval_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"dataset_files": [
"/data/librispeech/librivox-dev-clean.csv",
],
"shuffle": False,
},
}
infer_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"dataset_files": [
"/data/librispeech/librivox-test-clean.csv",
],
"shuffle": False,
},
}
| OpenSeq2Seq-master | example_configs/speech2text/jasper10x5_LibriSpeech_nvgrad_masks.py |
# pylint: skip-file
import tensorflow as tf
from open_seq2seq.models import Speech2Text
from open_seq2seq.encoders import TDNNEncoder
from open_seq2seq.decoders import FullyConnectedCTCDecoder
from open_seq2seq.data import Speech2TextDataLayer
from open_seq2seq.losses import CTCLoss
from open_seq2seq.optimizers.lr_policies import poly_decay
base_model = Speech2Text
base_params = {
"random_seed": 0,
"use_horovod": True,
"num_epochs": 200,
"num_gpus": 8,
"batch_size_per_gpu": 64,
"iter_size": 1,
"save_summaries_steps": 100,
"print_loss_steps": 10,
"print_samples_steps": 2200,
"eval_steps": 2200,
"save_checkpoint_steps": 1100,
"logdir": "w2l_log_folder",
"optimizer": "Momentum",
"optimizer_params": {
"momentum": 0.90,
},
"lr_policy": poly_decay,
"lr_policy_params": {
"learning_rate": 0.05,
"power": 2.0,
},
"larc_params": {
"larc_eta": 0.001,
},
"regularizer": tf.contrib.layers.l2_regularizer,
"regularizer_params": {
'scale': 0.001
},
"dtype": "mixed",
"loss_scaling": "Backoff",
"summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"encoder": TDNNEncoder,
"encoder_params": {
"convnet_layers": [
{
"type": "conv1d", "repeat": 3,
"kernel_size": [11], "stride": [1],
"num_channels": 256, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.8,
},
{
"type": "conv1d", "repeat": 3,
"kernel_size": [13], "stride": [1],
"num_channels": 384, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.8,
},
{
"type": "conv1d", "repeat": 3,
"kernel_size": [17], "stride": [1],
"num_channels": 512, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.8,
},
{
"type": "conv1d", "repeat": 3,
"kernel_size": [21], "stride": [1],
"num_channels": 640, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.7,
},
{
"type": "conv1d", "repeat": 3,
"kernel_size": [25], "stride": [1],
"num_channels": 768, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.7,
},
{
"type": "conv1d", "repeat": 1,
"kernel_size": [29], "stride": [1],
"num_channels": 896, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.6,
},
{
"type": "conv1d", "repeat": 1,
"kernel_size": [1], "stride": [1],
"num_channels": 1024, "padding": "SAME",
"dilation":[1], "dropout_keep_prob": 0.6,
}
],
"dropout_keep_prob": 0.7,
"initializer": tf.contrib.layers.xavier_initializer,
"initializer_params": {
'uniform': False,
},
"normalization": "batch_norm",
"activation_fn": lambda x: tf.minimum(tf.nn.relu(x), 20.0),
"data_format": "channels_last",
},
"decoder": FullyConnectedCTCDecoder,
"decoder_params": {
"initializer": tf.contrib.layers.xavier_initializer,
"use_language_model": False,
# params for decoding the sequence with language model
"beam_width": 512,
"alpha": 2.0,
"beta": 1.5,
"decoder_library_path": "ctc_decoder_with_lm/libctc_decoder_with_kenlm.so",
"lm_path": "language_model/4-gram.binary",
"trie_path": "language_model/trie.binary",
"alphabet_config_path": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
},
"loss": CTCLoss,
"loss_params": {},
}
train_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"num_audio_features": 64,
"input_type": "logfbank",
"vocab_file": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"dataset_files": [
"data/librispeech/librivox-train-clean-100.csv",
"data/librispeech/librivox-train-clean-360.csv",
"data/librispeech/librivox-train-other-500.csv",
],
"max_duration": 16.7,
"shuffle": True,
},
}
eval_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"num_audio_features": 64,
"input_type": "logfbank",
"vocab_file": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"dataset_files": [
"data/librispeech/librivox-dev-clean.csv",
],
"shuffle": False,
},
}
infer_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"num_audio_features": 64,
"input_type": "logfbank",
"vocab_file": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"dataset_files": [
"data/librispeech/librivox-test-clean.csv",
],
"shuffle": False,
},
}
| OpenSeq2Seq-master | example_configs/speech2text/w2l_large_8gpus_mp.py |
# pylint: skip-file
# QuartzNet paper: https://arxiv.org/abs/1910.10261
import tensorflow as tf
from open_seq2seq.models import Speech2Text
from open_seq2seq.encoders import TDNNEncoder
from open_seq2seq.decoders import FullyConnectedCTCDecoder
from open_seq2seq.data.speech2text.speech2text import Speech2TextDataLayer
from open_seq2seq.losses import CTCLoss
from open_seq2seq.optimizers.lr_policies import cosine_decay
from open_seq2seq.optimizers.novograd import NovoGrad
residual_dense = False # Enable or disable Dense Residual
base_model = Speech2Text
base_params = {
"random_seed": 0,
"use_horovod": True,
"num_epochs": 400,
"num_gpus": 8,
"batch_size_per_gpu": 32,
"iter_size": 1,
"save_summaries_steps": 100,
"print_loss_steps": 10,
"print_samples_steps": 2200,
"eval_steps": 2200,
"save_checkpoint_steps": 1100,
"logdir": "jasper_log_folder",
"num_checkpoints": 2,
"optimizer": NovoGrad,
"optimizer_params": {
"beta1": 0.95,
"beta2": 0.5,
"epsilon": 1e-08,
"weight_decay": 0.001,
"grad_averaging": False,
},
"lr_policy": cosine_decay,
"lr_policy_params": {
"learning_rate": 0.01,
"min_lr": 0.0,
"warmup_steps": 1000
},
"dtype": tf.float32,
# "loss_scaling": "Backoff",
"summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"encoder": TDNNEncoder,
"encoder_params": {
"convnet_layers": [
{
"type": "sep_conv1d", "repeat": 1,
"kernel_size": [33], "stride": [2],
"num_channels": 256, "padding": "SAME",
"dilation":[1]
},
{
"type": "sep_conv1d", "repeat": 5,
"kernel_size": [33], "stride": [1],
"num_channels": 256, "padding": "SAME",
"dilation":[1],
"residual": True, "residual_dense": residual_dense
},
{
"type": "sep_conv1d", "repeat": 5,
"kernel_size": [33], "stride": [1],
"num_channels": 256, "padding": "SAME",
"dilation":[1],
"residual": True, "residual_dense": residual_dense
},
{
"type": "sep_conv1d", "repeat": 5,
"kernel_size": [33], "stride": [1],
"num_channels": 256, "padding": "SAME",
"dilation":[1],
"residual": True, "residual_dense": residual_dense
},
{
"type": "sep_conv1d", "repeat": 5,
"kernel_size": [39], "stride": [1],
"num_channels": 256, "padding": "SAME",
"dilation":[1],
"residual": True, "residual_dense": residual_dense
},
{
"type": "sep_conv1d", "repeat": 5,
"kernel_size": [39], "stride": [1],
"num_channels": 256, "padding": "SAME",
"dilation":[1],
"residual": True, "residual_dense": residual_dense
},
{
"type": "sep_conv1d", "repeat": 5,
"kernel_size": [39], "stride": [1],
"num_channels": 256, "padding": "SAME",
"dilation":[1],
"residual": True, "residual_dense": residual_dense
},
{
"type": "sep_conv1d", "repeat": 5,
"kernel_size": [51], "stride": [1],
"num_channels": 512, "padding": "SAME",
"dilation":[1],
"residual": True, "residual_dense": residual_dense
},
{
"type": "sep_conv1d", "repeat": 5,
"kernel_size": [51], "stride": [1],
"num_channels": 512, "padding": "SAME",
"dilation":[1],
"residual": True, "residual_dense": residual_dense
},
{
"type": "sep_conv1d", "repeat": 5,
"kernel_size": [51], "stride": [1],
"num_channels": 512, "padding": "SAME",
"dilation":[1],
"residual": True, "residual_dense": residual_dense
},
{
"type": "sep_conv1d", "repeat": 5,
"kernel_size": [63], "stride": [1],
"num_channels": 512, "padding": "SAME",
"dilation":[1],
"residual": True, "residual_dense": residual_dense
},
{
"type": "sep_conv1d", "repeat": 5,
"kernel_size": [63], "stride": [1],
"num_channels": 512, "padding": "SAME",
"dilation":[1],
"residual": True, "residual_dense": residual_dense
},
{
"type": "sep_conv1d", "repeat": 5,
"kernel_size": [63], "stride": [1],
"num_channels": 512, "padding": "SAME",
"dilation":[1],
"residual": True, "residual_dense": residual_dense
},
{
"type": "sep_conv1d", "repeat": 5,
"kernel_size": [75], "stride": [1],
"num_channels": 512, "padding": "SAME",
"dilation":[1],
"residual": True, "residual_dense": residual_dense
},
{
"type": "sep_conv1d", "repeat": 5,
"kernel_size": [75], "stride": [1],
"num_channels": 512, "padding": "SAME",
"dilation":[1],
"residual": True, "residual_dense": residual_dense
},
{
"type": "sep_conv1d", "repeat": 5,
"kernel_size": [75], "stride": [1],
"num_channels": 512, "padding": "SAME",
"dilation":[1],
"residual": True, "residual_dense": residual_dense
},
{
"type": "sep_conv1d", "repeat": 1,
"kernel_size": [87], "stride": [1],
"num_channels": 512, "padding": "SAME",
"dilation":[2],
"residual": True, "residual_dense": residual_dense
},
{
"type": "conv1d", "repeat": 1,
"kernel_size": [1], "stride": [1],
"num_channels": 1024, "padding": "SAME",
"dilation":[1]
}
],
"dropout_keep_prob": 1.0,
"initializer": tf.contrib.layers.xavier_initializer,
"initializer_params": {
'uniform': False,
},
"normalization": "batch_norm",
"activation_fn": tf.nn.relu,
"data_format": "channels_last",
"use_conv_mask": True,
},
"decoder": FullyConnectedCTCDecoder,
"decoder_params": {
"initializer": tf.contrib.layers.xavier_initializer,
"use_language_model": False,
"infer_logits_to_pickle": False,
},
"loss": CTCLoss,
"loss_params": {},
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"num_audio_features": 64,
"input_type": "logfbank",
"vocab_file": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"norm_per_feature": True,
"window": "hanning",
"precompute_mel_basis": True,
"sample_freq": 16000,
"pad_to": 16,
"dither": 1e-5,
"backend": "librosa",
},
}
train_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"augmentation": {
'n_freq_mask': 2,
'n_time_mask': 2,
'width_freq_mask': 6,
'width_time_mask': 6,
},
"dataset_files": [
"/data/librispeech/librivox-train-clean-100.csv",
"/data/librispeech/librivox-train-clean-360.csv",
"/data/librispeech/librivox-train-other-500.csv"
],
"max_duration": 16.7,
"shuffle": True,
},
}
eval_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"dataset_files": [
"/data/librispeech/librivox-dev-clean.csv",
],
"shuffle": False,
},
}
infer_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"dataset_files": [
"/data/librispeech/librivox-test-clean.csv",
],
"shuffle": False,
},
}
| OpenSeq2Seq-master | example_configs/speech2text/quartznet15x5_LibriSpeech.py |
# pylint: skip-file
import tensorflow as tf
from open_seq2seq.models import Image2Label
from open_seq2seq.encoders import TDNNEncoder
from open_seq2seq.decoders import FullyConnectedSCDecoder
from open_seq2seq.data import SpeechCommandsDataLayer
from open_seq2seq.losses import CrossEntropyLoss
from open_seq2seq.optimizers.lr_policies import poly_decay
base_model = Image2Label
dataset_version = "v1-12"
dataset_location = "data/speech_commands_v0.01"
if dataset_version == "v1-12":
num_labels = 12
elif dataset_version == "v1-30":
num_labels = 30
else:
num_labels = 35
dataset_location = "data/speech_commands_v0.02"
base_params = {
"random_seed": 0,
"use_horovod": False,
"num_epochs": 20,
"num_gpus": 1,
"batch_size_per_gpu": 64,
"iter_size": 1,
"save_summaries_steps": 10000,
"print_loss_steps": 10,
"print_samples_steps": 1000,
"eval_steps": 100,
"save_checkpoint_steps": 10000,
"logdir": "result/" + dataset_version + "-jasper",
"optimizer": "Momentum",
"optimizer_params": {
"momentum": 0.95,
},
"lr_policy": poly_decay,
"lr_policy_params": {
"learning_rate": 0.05,
"min_lr": 1e-5,
"power": 2.0,
},
"larc_params": {
"larc_eta": 0.001,
},
"regularizer": tf.contrib.layers.l2_regularizer,
"regularizer_params": {
'scale': 0.001
},
"dtype": "mixed",
"loss_scaling": "Backoff",
"summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"encoder": TDNNEncoder,
"encoder_params": {
"convnet_layers": [
{
"type": "conv2d", "repeat": 1,
"kernel_size": [11,1], "stride": [2,1],
"num_channels": 256, "padding": "SAME",
"dilation":[1,1], "dropout_keep_prob": 0.8,
},
{
"type": "conv2d", "repeat": 3,
"kernel_size": [11,1], "stride": [1,1],
"num_channels": 256, "padding": "SAME",
"dilation":[1,1], "dropout_keep_prob": 0.8,
"residual": True
},
{
"type": "conv2d", "repeat": 3,
"kernel_size": [11,1], "stride": [1,1],
"num_channels": 256, "padding": "SAME",
"dilation":[1,1], "dropout_keep_prob": 0.8,
"residual": True
},
{
"type": "conv2d", "repeat": 1,
"kernel_size": [13,1], "stride": [2,1],
"num_channels": 384, "padding": "SAME",
"dilation":[1,1], "dropout_keep_prob": 0.8,
},
{
"type": "conv2d", "repeat": 3,
"kernel_size": [13,1], "stride": [1,1],
"num_channels": 384, "padding": "SAME",
"dilation":[1,1], "dropout_keep_prob": 0.8,
"residual": True
},
{
"type": "conv2d", "repeat": 3,
"kernel_size": [13,1], "stride": [1,1],
"num_channels": 384, "padding": "SAME",
"dilation":[1,1], "dropout_keep_prob": 0.8,
"residual": True
},
{
"type": "conv2d", "repeat": 1,
"kernel_size": [17,1], "stride": [2,1],
"num_channels": 512, "padding": "SAME",
"dilation":[1,1], "dropout_keep_prob": 0.8,
},
{
"type": "conv2d", "repeat": 3,
"kernel_size": [17,1], "stride": [1,1],
"num_channels": 512, "padding": "SAME",
"dilation":[1,1], "dropout_keep_prob": 0.8,
"residual": True
},
{
"type": "conv2d", "repeat": 3,
"kernel_size": [17,1], "stride": [1,1],
"num_channels": 512, "padding": "SAME",
"dilation":[1,1], "dropout_keep_prob": 0.8,
"residual": True
},
{
"type": "conv2d", "repeat": 3,
"kernel_size": [21,1], "stride": [1,1],
"num_channels": 640, "padding": "SAME",
"dilation":[1,1], "dropout_keep_prob": 0.7,
"residual": True
},
{
"type": "conv2d", "repeat": 3,
"kernel_size": [21,1], "stride": [1,1],
"num_channels": 640, "padding": "SAME",
"dilation":[1,1], "dropout_keep_prob": 0.7,
"residual": True
},
{
"type": "conv2d", "repeat": 3,
"kernel_size": [25,1], "stride": [1,1],
"num_channels": 768, "padding": "SAME",
"dilation":[1,1], "dropout_keep_prob": 0.7,
"residual": True
},
{
"type": "conv2d", "repeat": 3,
"kernel_size": [25,1], "stride": [1,1],
"num_channels": 768, "padding": "SAME",
"dilation":[1,1], "dropout_keep_prob": 0.7,
"residual": True
},
{
"type": "conv2d", "repeat": 1,
"kernel_size": [29,1], "stride": [1,1],
"num_channels": 896, "padding": "SAME",
"dilation":[2,1], "dropout_keep_prob": 0.6,
},
{
"type": "conv2d", "repeat": 1,
"kernel_size": [1,1], "stride": [1,1],
"num_channels": 1024, "padding": "SAME",
"dilation":[1,1], "dropout_keep_prob": 0.6,
}
],
"dropout_keep_prob": 0.7,
"initializer": tf.contrib.layers.xavier_initializer,
"initializer_params": {
'uniform': False,
},
"normalization": "batch_norm",
"activation_fn": tf.nn.relu,
"data_format": "channels_last",
},
"decoder": FullyConnectedSCDecoder,
"decoder_params": {
"output_dim": num_labels,
},
"loss": CrossEntropyLoss,
"data_layer": SpeechCommandsDataLayer,
"data_layer_params": {
"dataset_location": dataset_location,
"num_audio_features": 128,
"audio_length": 128,
"num_labels": num_labels,
"cache_data": True,
"augment_data": True,
"model_format": "jasper"
},
}
train_params = {
"data_layer_params": {
"dataset_files": [
dataset_version + "-train.txt"
],
"shuffle": True,
"repeat": True
},
}
eval_params = {
"batch_size_per_gpu": 16,
"data_layer_params": {
"dataset_files": [
dataset_version + "-val.txt"
],
"shuffle": False,
"repeat": False
},
}
| OpenSeq2Seq-master | example_configs/speech2text/jasper_commands.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import ctypes
import numpy as np
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
import numpy as np
TRT_LOGGER = trt.Logger(trt.Logger.ERROR)
class DeviceBuffer(object):
def __init__(self, shape, dtype=trt.int32):
self.buf = cuda.mem_alloc(trt.volume(shape) * dtype.itemsize)
def binding(self):
return int(self.buf)
def free(self):
self.buf.free()
def main():
parser = argparse.ArgumentParser(description='BERT Inference Benchmark')
parser.add_argument("-e", "--engine", help='Path to BERT TensorRT engine')
parser.add_argument('-b', '--batch-size', default=[], action="append", help='Batch size(s) to benchmark. Can be specified multiple times for more than one batch size. This script assumes that the engine has been built with one optimization profile for each batch size, and that these profiles are in order of increasing batch size.', type=int)
parser.add_argument('-s', '--sequence-length', default=128, help='Sequence length of the BERT model', type=int)
parser.add_argument('-i', '--iterations', default=200, help='Number of iterations to run when benchmarking each batch size.', type=int)
parser.add_argument('-w', '--warm-up-runs', default=10, help='Number of iterations to run prior to benchmarking.', type=int)
parser.add_argument('-r', '--random-seed', required=False, default=12345, help='Random seed.', type=int)
args, _ = parser.parse_known_args()
args.batch_size = args.batch_size or [1]
# Import necessary plugins for BERT TensorRT
ctypes.CDLL("libnvinfer_plugin.so", mode=ctypes.RTLD_GLOBAL)
with open(args.engine, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(f.read()) as engine, engine.create_execution_context() as context:
# Allocate buffers large enough to store the largest batch size
max_input_shape = (max(args.batch_size), args.sequence_length)
max_output_shape = (max(args.batch_size), args.sequence_length, 2, 1, 1)
buffers = [
DeviceBuffer(max_input_shape),
DeviceBuffer(max_input_shape),
DeviceBuffer(max_input_shape),
DeviceBuffer(max_output_shape)
]
# Prepare random input
pseudo_vocab_size = 30522
pseudo_type_vocab_size = 2
np.random.seed(args.random_seed)
test_word_ids = np.random.randint(0, pseudo_vocab_size, (max(args.batch_size), args.sequence_length), dtype=np.int32)
test_segment_ids = np.random.randint(0, pseudo_type_vocab_size, (max(args.batch_size), args.sequence_length), dtype=np.int32)
test_input_mask = np.ones((max(args.batch_size), args.sequence_length), dtype=np.int32)
# Copy input h2d
cuda.memcpy_htod(buffers[0].buf, test_word_ids.ravel())
cuda.memcpy_htod(buffers[1].buf, test_segment_ids.ravel())
cuda.memcpy_htod(buffers[2].buf, test_input_mask.ravel())
num_binding_per_profile = engine.num_bindings // engine.num_optimization_profiles
bench_times = {}
stream = cuda.Stream()
for idx, batch_size in enumerate(sorted(args.batch_size)):
context.set_optimization_profile_async(idx, stream.handle)
# Each profile has unique bindings
binding_idx_offset = idx * num_binding_per_profile
bindings = [0] * binding_idx_offset + [buf.binding() for buf in buffers]
shapes = {
"input_ids": (batch_size, args.sequence_length),
"segment_ids": (batch_size, args.sequence_length),
"input_mask": (batch_size, args.sequence_length),
}
for binding, shape in shapes.items():
context.set_binding_shape(engine[binding] + binding_idx_offset, shape)
assert context.all_binding_shapes_specified
# Inference
total_time = 0
start = cuda.Event()
end = cuda.Event()
# Warmup
for _ in range(args.warm_up_runs):
context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
stream.synchronize()
# Timing loop
times = []
for _ in range(args.iterations):
start.record(stream)
context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
end.record(stream)
stream.synchronize()
times.append(end.time_since(start))
# Compute average time, 95th percentile time and 99th percentile time.
bench_times[batch_size] = times
[b.free() for b in buffers]
for batch_size, times in bench_times.items():
total_time = sum(times)
avg_time = total_time / float(len(times))
times.sort()
percentile95 = times[int(len(times) * 0.95)]
percentile99 = times[int(len(times) * 0.99)]
print("Running {:} iterations with Batch Size: {:}\n\tTotal Time: {:} ms \tAverage Time: {:} ms\t95th Percentile Time: {:} ms\t99th Percentile Time: {:}".format(args.iterations, batch_size, total_time, avg_time, percentile95, percentile99))
if __name__ == '__main__':
main()
| TensorRT-master | demo/BERT/perf.py |
import re
import pickle
import numpy as np
import onnx
import torch
import tensorrt as trt
try:
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
except ImportError as err:
sys.stderr.write("""Error: Failed to import tensorflow module ({})\n""".format(err))
sys.exit()
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
"""
Attentions Keys
"""
WQ = "self_query_kernel"
BQ = "self_query_bias"
WK = "self_key_kernel"
BK = "self_key_bias"
WV = "self_value_kernel"
BV = "self_value_bias"
WQKV = "self_qkv_kernel"
BQKV = "self_qkv_bias"
"""
Transformer Keys
"""
W_AOUT = "attention_output_dense_kernel"
B_AOUT = "attention_output_dense_bias"
AOUT_LN_BETA = "attention_output_layernorm_beta"
AOUT_LN_GAMMA = "attention_output_layernorm_gamma"
W_MID = "intermediate_dense_kernel"
B_MID = "intermediate_dense_bias"
W_LOUT = "output_dense_kernel"
B_LOUT = "output_dense_bias"
LOUT_LN_BETA = "output_layernorm_beta"
LOUT_LN_GAMMA = "output_layernorm_gamma"
"""
Squad Output Keys
"""
SQD_W = "squad_output_weights"
SQD_B = "squad_output_bias"
def load_tf_weights(inputbase, config):
"""
Load the weights from the tensorflow checkpoint
"""
weights_dict = dict()
try:
reader = tf.train.NewCheckpointReader(inputbase)
tensor_dict = reader.get_variable_to_shape_map()
# There might be training-related variables in the checkpoint that can be discarded
param_names = [key for key in sorted(tensor_dict) if "adam" not in key and "global_step" not in key and "pooler" not in key]
count = len(param_names)
TRT_LOGGER.log(TRT_LOGGER.INFO, "Found {:} entries in weight map".format(count))
for pn in param_names:
toks = pn.lower().split("/")
if "encoder" in pn:
assert ("layer" in pn)
l = (re.findall("\d+", pn))[0]
outname = "l{}_".format(l) + "_".join(toks[3:])
else:
outname = "_".join(toks)
tensor = reader.get_tensor(pn)
shape = tensor.shape
if pn.find("kernel") != -1:
weights_dict[outname + "_notrans"] = trt.Weights(np.ascontiguousarray(tensor).flatten())
TRT_LOGGER.log(TRT_LOGGER.VERBOSE, "Transposing {}\n".format(np))
tensor = np.transpose(tensor)
shape = tensor.shape
flat_tensor = tensor.flatten()
shape_str = "{} ".format(len(shape)) + " ".join([str(d) for d in shape])
weights_dict[outname] = trt.Weights(flat_tensor)
TRT_LOGGER.log(TRT_LOGGER.VERBOSE, "Original name: {:}, TensorRT name: {:}, shape: {:}".format(pn, outname, shape_str))
N = config.num_attention_heads
H = config.head_size
additional_dict = dict()
for key, value in weights_dict.items():
pos = key.find(BQ)
if pos != -1:
hidden_size = value.size
prefix = key[:pos]
Bq_ = value
Bk_ = weights_dict[prefix + BK]
Bv_ = weights_dict[prefix + BV]
Wq_ = weights_dict[prefix + WQ]
Wk_ = weights_dict[prefix + WK]
Wv_ = weights_dict[prefix + WV]
mat_size = hidden_size * hidden_size
wcount = 3 * mat_size
Wall = np.zeros(wcount, np.float32)
bcount = 3 * hidden_size
Ball = np.zeros(bcount, np.float32)
Wall[0:mat_size] = Wq_.numpy()[0:mat_size]
Wall[mat_size:2*mat_size] = Wk_.numpy()[0:mat_size]
Wall[2*mat_size:3*mat_size] = Wv_.numpy()[0:mat_size]
Ball[0:hidden_size] = Bq_.numpy()[0:hidden_size]
Ball[hidden_size:2*hidden_size] = Bk_.numpy()[0:hidden_size]
Ball[2*hidden_size:3*hidden_size] = Bv_.numpy()[0:hidden_size]
if config.use_int8 and getattr(config, 'interleaved', False):
Wall = np.ascontiguousarray(Wall.reshape((3, N, H, N, H)), dtype=np.float32)
Ball = np.ascontiguousarray(Ball.reshape((3, N, H)), dtype=np.float32)
else:
Wall = np.ascontiguousarray(Wall.reshape((3, N, H, N, H)).transpose((1, 0, 2, 3, 4)), dtype=np.float32)
Ball = np.ascontiguousarray(Ball.reshape((3, N, H)).transpose((1, 0, 2)), dtype=np.float32)
additional_dict[prefix + WQKV] = trt.Weights(Wall)
additional_dict[prefix + BQKV] = trt.Weights(Ball)
additional_dict[prefix + WQKV + "_notrans"] = trt.Weights(Wall.T)
except Exception as error:
TRT_LOGGER.log(TRT_LOGGER.ERROR, str(error))
weights_dict.update(additional_dict)
return weights_dict
def onnx_to_trt_name(onnx_name):
"""
Converting variables in the onnx checkpoint to names corresponding to the naming convention used in the TF version, expected by the builder
"""
qkv_strings = {'key', 'value', 'query', 'query_key_value'}
onnx_name = onnx_name.lower()
toks = [t.strip('_') for t in onnx_name.split('.')]
if toks[0] == 'bert': #embeddings or encoder
if toks[1] == 'encoder': #transformer
# Token conversions for sparse checkpoints
if toks[-2] == 'dense_act':
toks[-2] = 'dense'
elif toks[-3] == 'dense_act':
if toks[-2] == 'input_quantizer':
toks[-2] = 'input'
elif toks[-2] == 'weight_quantizer':
toks[-2] = 'kernel'
toks[-3] = 'dense'
elif toks[-2].startswith('matmul'):
toks[-2] = {
'matmul_q_quantizer': 'qv_a_input_quantizer',
'matmul_k_quantizer': 'qv_b_input_quantizer',
'matmul_v_quantizer': 'av_b_input_quantizer',
'matmul_a_quantizer': 'av_a_input_quantizer',
}[toks[-2].replace('input_', '')]
# Token conversions for all checkpoints
if toks[-2] == 'layernorm': #bias->beta, weight->gamma
toks[-1] = 'beta' if toks[-1] == 'bias' else 'gamma'
elif (toks[-2] == 'dense' or toks[-2] in qkv_strings) and toks[-1] == 'weight':
toks[-1] = 'kernel'
elif (toks[-3] == 'dense' or toks[-3] in qkv_strings) and toks[-1] == 'amax':
if toks[-2] == 'weight_quantizer':
toks[-2] = 'kernel'
elif toks[-2] == 'input_quantizer':
toks[-2] = 'input'
if 'final_input_quantizer' not in toks[2]:
ind = toks.index('layers')+1 if 'layers' in toks else 3
toks = toks[ind:]
toks[0] = 'l{}'.format(int(toks[0]))
else:
if toks[-2] == 'layernorm': #bias->beta, weight->gamma
toks[-1] = 'beta' if toks[-1] == 'bias' else 'gamma'
else: #embeddings: drop "_weight" suffix
if toks[-1] == 'amax':
toks[-2] = 'amax'
toks = toks[:-1]
elif 'qa' in onnx_name:
name = 'cls_squad_output_bias' if toks[-1] == 'bias' else 'cls_squad_output_weights'
return name
else:
print("Encountered unknown case:", onnx_name)
assert(False)
parsed = '_'.join(toks)
return parsed
def get_onnx_weight_dict(tensor_dict, config):
N = config.num_attention_heads
H = config.head_size
hidden_size = config.hidden_size
weights_dict = dict()
for outname, tensor in tensor_dict.items():
if outname.find("_amax") != -1:
weights_dict[outname] = tensor
elif outname.find(BQ) != -1:
prefix = outname[:outname.find(BQ)]
Wqkv = np.zeros((3, hidden_size, hidden_size), np.float32)
Bqkv = np.zeros((3, hidden_size), np.float32)
Wqkv[0,:,:] = tensor_dict[prefix + WQ]
Wqkv[1,:,:] = tensor_dict[prefix + WK]
Wqkv[2,:,:] = tensor_dict[prefix + WV]
Bqkv[0,:] = tensor
Bqkv[1,:] = tensor_dict[prefix + BK]
Bqkv[2,:] = tensor_dict[prefix + BV]
if config.use_int8 and getattr(config, 'interleaved', False):
Wqkv = np.ascontiguousarray(Wqkv.reshape((3, N, H, N, H)))
Bqkv = np.ascontiguousarray(Bqkv.reshape((3, N, H)))
else:
Wqkv = np.ascontiguousarray(Wqkv.reshape((3, N, H, N, H)).transpose((1,0,2,3,4)))
Bqkv = np.ascontiguousarray(Bqkv.reshape((3, N, H)).transpose((1,0,2)))
weights_dict[prefix + WQKV] = trt.Weights(Wqkv)
weights_dict[prefix + BQKV] = trt.Weights(Bqkv)
weights_dict[prefix + WQKV + "_notrans"] = trt.Weights(Wqkv.T)
elif outname.find(BK) != -1 or outname.find(BV) != -1 or outname.find(WQ) != -1 or outname.find(WK) != -1 or outname.find(WV) != -1:
pass
else:
flat_tensor = np.ascontiguousarray(tensor).flatten()
weights_dict[outname] = trt.Weights(flat_tensor)
if outname.find("kernel") != -1:
tensor = np.transpose(tensor)
weights_dict[outname + "_notrans"] = trt.Weights(np.ascontiguousarray(tensor).flatten())
TRT_LOGGER.log(TRT_LOGGER.INFO, "Found {:} entries in weight map".format(len(weights_dict)))
return weights_dict
def load_onnx_weights_and_quant(path, config):
"""
Load the weights from the onnx checkpoint
"""
model = onnx.load(path)
weights = model.graph.initializer
tensor_dict = dict((onnx_to_trt_name(w.name), np.frombuffer(w.raw_data, np.int8).reshape(w.dims))
if w.name.split('_')[-1] == 'mask' else
(onnx_to_trt_name(w.name), np.frombuffer(w.raw_data, np.float32).reshape(w.dims))
for w in weights)
return get_onnx_weight_dict(tensor_dict, config)
def load_pytorch_weights_and_quant(path, config):
"""
Load the weights from the pytorch checkpoint
"""
state_dict = torch.load(path, map_location='cpu')["model"]
tensor_dict = {onnx_to_trt_name(name):val.numpy() for name, val in state_dict.items()}
return get_onnx_weight_dict(tensor_dict, config)
def load_megatron_pickle_weights(path, config):
N = config.num_attention_heads
H = config.head_size
with open(path, 'rb') as f:
tensor_dict = pickle.load(f)
weight_dict = {}
for name, tensor in tensor_dict.items():
if 'scale' in name:
continue
name = (onnx_to_trt_name(name)
.replace('embedding_', 'embeddings_')
.replace('tokentype_', 'token_type_')
.replace('_av', '_self_av')
.replace('_qv', '_self_qv')
.replace('query_key_value', 'self_qkv'))
if name.endswith('self_qkv_kernel'):
tensor = np.ascontiguousarray(tensor.reshape((3, N, H, N, H))).astype(np.float32)
weight_dict[name] = trt.Weights(tensor)
elif name.endswith('self_qkv_bias'):
tensor = np.ascontiguousarray(tensor.reshape((3, N, H))).astype(np.float32)
weight_dict[name] = trt.Weights(tensor)
elif name == 'l{}_output_layernorm_output_quantizer_amax'.format(config.num_hidden_layers-1):
weight_dict['bert_encoder_final_input_quantizer_amax'] = tensor
elif name.endswith('_amax'):
weight_dict[name] = tensor
if name.endswith('_qkv_input_amax'):
weight_dict[name.replace('_qkv_input_amax', '_query_input_amax')] = tensor
weight_dict[name.replace('_qkv_input_amax', '_key_input_amax')] = tensor
weight_dict[name.replace('_qkv_input_amax', '_value_input_amax')] = tensor
else:
flat_tensor = np.ascontiguousarray(tensor).flatten().astype(np.float32)
weight_dict[name] = trt.Weights(flat_tensor)
TRT_LOGGER.log(TRT_LOGGER.INFO, "Found {:} entries in weight map".format(len(weight_dict)))
return weight_dict
| TensorRT-master | demo/BERT/builder_utils.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This script uses a prebuilt TensorRT BERT QA Engine to answer a question
based on the provided passage. It additionally includes an interactive mode
where multiple questions can be asked.
"""
import time
import json
import ctypes
import argparse
import collections
import numpy as np
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
import helpers.tokenization as tokenization
import helpers.data_processing as dp
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
def parse_args():
"""
Parse command line arguments
"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-e', '--engine',
help='Path to BERT TensorRT engine')
parser.add_argument('-p', '--passage', nargs='*',
help='Text for paragraph/passage for BERT QA',
default='')
parser.add_argument('-pf', '--passage-file',
help='File containing input passage',
default='')
parser.add_argument('-q', '--question', nargs='*',
help='Text for query/question for BERT QA',
default='')
parser.add_argument('-qf', '--question-file',
help='File containing input question',
default='')
parser.add_argument('-sq', '--squad-json',
help='SQuAD json file',
default='')
parser.add_argument('-o', '--output-prediction-file',
help='Output prediction file for SQuAD evaluation',
default='./predictions.json')
parser.add_argument('-v', '--vocab-file',
help='Path to file containing entire understandable vocab')
parser.add_argument('-s', '--sequence-length',
help='The sequence length to use. Defaults to 128',
default=128, type=int)
parser.add_argument('--max-query-length',
help='The maximum length of a query in number of tokens. Queries longer than this will be truncated',
default=64, type=int)
parser.add_argument('--max-answer-length',
help='The maximum length of an answer that can be generated',
default=30, type=int)
parser.add_argument('--n-best-size',
help='Total number of n-best predictions to generate in the nbest_predictions.json output file',
default=20, type=int)
parser.add_argument('--doc-stride',
help='When splitting up a long document into chunks, what stride to take between chunks',
default=128, type=int)
args, _ = parser.parse_known_args()
return args
if __name__ == '__main__':
args = parse_args()
paragraph_text = None
squad_examples = None
output_prediction_file = None
if not args.passage == '':
paragraph_text = ' '.join(args.passage)
elif not args.passage_file == '':
f = open(args.passage_file, 'r')
paragraph_text = f.read()
elif not args.squad_json == '':
squad_examples = dp.read_squad_json(args.squad_json)
output_prediction_file = args.output_prediction_file
else:
paragraph_text = input("Paragraph: ")
question_text = None
if not args.question == '':
question_text = ' '.join(args.question)
elif not args.question_file == '':
f = open(args.question_file, 'r')
question_text = f.read()
tokenizer = tokenization.FullTokenizer(vocab_file=args.vocab_file, do_lower_case=True)
# When splitting up a long document into chunks, how much stride to take between chunks.
doc_stride = args.doc_stride
# The maximum total input sequence length after WordPiece tokenization.
# Sequences longer than this will be truncated, and sequences shorter
max_seq_length = args.sequence_length
def question_features(tokens, question):
# Extract features from the paragraph and question
return dp.convert_example_to_features(tokens, question, tokenizer, max_seq_length, doc_stride, args.max_query_length)
# Import necessary plugins for BERT TensorRT
handle = ctypes.CDLL("libnvinfer_plugin.so", mode=ctypes.RTLD_GLOBAL)
if not handle:
raise RuntimeError("Could not load plugin library. Is `libnvinfer_plugin.so` on your LD_LIBRARY_PATH?")
# The first context created will use the 0th profile. A new context must be created
# for each additional profile needed. Here, we only use batch size 1, thus we only need the first profile.
with open(args.engine, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, \
runtime.deserialize_cuda_engine(f.read()) as engine, engine.create_execution_context() as context:
# select engine profile
context.active_optimization_profile = 0
input_nbytes = max_seq_length * trt.int32.itemsize
# Create a stream in which to copy inputs/outputs and run inference.
stream = cuda.Stream()
# Allocate device memory for inputs.
d_inputs = [cuda.mem_alloc(input_nbytes) for binding in range(4)]
# Allocate output buffer by querying the size from the context. This may be different for different input shapes.
h_output = cuda.pagelocked_empty((2 * max_seq_length), dtype=np.float32)
d_output = cuda.mem_alloc(h_output.nbytes)
def inference(features, tokens):
global h_output
_NetworkOutput = collections.namedtuple( # pylint: disable=invalid-name
"NetworkOutput",
["start_logits", "end_logits", "feature_index"])
networkOutputs = []
eval_time_elapsed = 0
for feature_index, feature in enumerate(features):
# Copy inputs
B = 1
S = np.sum(feature.input_mask)
input_ids = feature.input_ids[0:S]
segment_ids = feature.segment_ids[0:S]
cu_seq_lens = np.array([0, S], dtype=np.int32);
if context.get_binding_shape(0)[0] != S:
context.set_binding_shape(0, (S,))
if context.get_binding_shape(1)[0] != S:
context.set_binding_shape(1, (S,))
if context.get_binding_shape(2)[0] != 2:
context.set_binding_shape(2, (2,))
if context.get_binding_shape(3)[0] != S:
context.set_binding_shape(3, (S,))
h_input_ids = cuda.register_host_memory(np.ascontiguousarray(input_ids.ravel()))
h_segment_ids = cuda.register_host_memory(np.ascontiguousarray(segment_ids.ravel()))
h_cu_seq_lens = cuda.register_host_memory(np.ascontiguousarray(cu_seq_lens.ravel()))
eval_start_time = time.time()
cuda.memcpy_htod_async(d_inputs[0], h_input_ids, stream)
cuda.memcpy_htod_async(d_inputs[1], h_segment_ids, stream)
cuda.memcpy_htod_async(d_inputs[2], h_cu_seq_lens, stream)
# Run inference
context.execute_async_v2(bindings=[int(d_inp) for d_inp in d_inputs] + [int(d_output)], stream_handle=stream.handle)
# Synchronize the stream
stream.synchronize()
eval_time_elapsed += (time.time() - eval_start_time)
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(h_output, d_output, stream)
stream.synchronize()
# Only retrieve and post-process the first batch
networkOutputs.append(_NetworkOutput(
start_logits = np.array(h_output[0:S]),
end_logits = np.array(h_output[S:S*2]),
feature_index = feature_index
))
eval_time_elapsed /= len(features)
# Total number of n-best predictions to generate in the nbest_predictions.json output file
n_best_size = 20
# The maximum length of an answer that can be generated. This is needed
# because the start and end predictions are not conditioned on one another
max_answer_length = 30
prediction, nbest_json, scores_diff_json = dp.get_predictions(tokens, features,
networkOutputs, args.n_best_size, args.max_answer_length)
return eval_time_elapsed, prediction, nbest_json
def print_single_query(eval_time_elapsed, prediction, nbest_json):
print("------------------------")
print("Running inference in {:.3f} Sentences/Sec".format(1/eval_time_elapsed))
print("------------------------")
print("Answer: '{}'".format(prediction))
print("With probability: {:.3f}".format(nbest_json[0]['probability'] * 100.0))
if squad_examples:
all_predictions = collections.OrderedDict()
for example in squad_examples:
features = question_features(example.doc_tokens, example.question_text)
eval_time_elapsed, prediction, nbest_json = inference(features, example.doc_tokens)
all_predictions[example.id] = prediction
with open(output_prediction_file, "w") as f:
f.write(json.dumps(all_predictions, indent=4))
print("\nOutput dump to {}".format(output_prediction_file))
else:
# Extract tokecs from the paragraph
doc_tokens = dp.convert_doc_tokens(paragraph_text)
if question_text:
print("\nPassage: {}".format(paragraph_text))
print("\nQuestion: {}".format(question_text))
features = question_features(doc_tokens, question_text)
eval_time_elapsed, prediction, nbest_json = inference(features, doc_tokens)
print_single_query(eval_time_elapsed, prediction, nbest_json)
else:
# If no question text is provided, loop until the question is 'exit'
EXIT_CMDS = ["exit", "quit"]
question_text = input("Question (to exit, type one of {:}): ".format(EXIT_CMDS))
while question_text.strip() not in EXIT_CMDS:
features = question_features(doc_tokens, question_text)
eval_time_elapsed, prediction, nbest_json = inference(features, doc_tokens)
print_single_query(eval_time_elapsed, prediction, nbest_json)
question_text = input("Question (to exit, type one of {:}): ".format(EXIT_CMDS))
| TensorRT-master | demo/BERT/inference_varseqlen.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import ctypes
import json
import numpy as np
import os
import os.path
import re
import sys
import time
import onnx
import pycuda.autoinit
# TensorRT
import tensorrt as trt
from builder_utils import load_tf_weights, load_pytorch_weights_and_quant, load_onnx_weights_and_quant, load_megatron_pickle_weights
from builder_utils import WQKV, BQKV # Attention Keys
from builder_utils import W_AOUT, B_AOUT, W_MID, B_MID, W_LOUT, B_LOUT # Transformer Keys
from builder_utils import SQD_W, SQD_B # SQuAD Output Keys
"""
TensorRT Initialization
"""
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
trt_version = [int(n) for n in trt.__version__.split('.')]
handle = ctypes.CDLL("libnvinfer_plugin.so", mode=ctypes.RTLD_GLOBAL)
if not handle:
raise RuntimeError("Could not load plugin library. Is `libnvinfer_plugin.so` on your LD_LIBRARY_PATH?")
trt.init_libnvinfer_plugins(TRT_LOGGER, "")
plg_registry = trt.get_plugin_registry()
emln_plg_creator2 = plg_registry.get_plugin_creator("CustomEmbLayerNormPluginDynamic", "2", "")
mha_plg_creator2 = plg_registry.get_plugin_creator("CustomQKVToContextPluginDynamic", "2", "")
skln_plg_creator2 = plg_registry.get_plugin_creator("CustomSkipLayerNormPluginDynamic", "2", "")
mha_plg_creator3 = plg_registry.get_plugin_creator("CustomQKVToContextPluginDynamic", "3", "")
skln_plg_creator3 = plg_registry.get_plugin_creator("CustomSkipLayerNormPluginDynamic", "3", "")
# Megatron Plugins
emln_plg_creator3 = plg_registry.get_plugin_creator("CustomEmbLayerNormPluginDynamic", "3", "")
skln_plg_creator4 = plg_registry.get_plugin_creator("CustomSkipLayerNormPluginDynamic", "4", "")
class BertConfig:
def __init__(self, bert_config_path, use_fp16, use_int8, use_qat, interleaved, timing_cache, use_sparsity, use_megatron):
with open(bert_config_path, "r") as f:
data = json.load(f)
self.num_attention_heads = data["num_attention_heads"]
self.hidden_size = data["hidden_size"]
self.intermediate_size = data["intermediate_size"]
self.num_hidden_layers = data["num_hidden_layers"]
self.head_size = self.hidden_size // self.num_attention_heads
self.use_fp16 = use_fp16
self.use_int8 = use_int8
self.use_qat = use_qat
self.interleaved = interleaved
self.timing_cache = timing_cache
self.use_sparsity = use_sparsity
self.use_megatron = use_megatron
def get_trt_dtype(self):
dtype = trt.float32
if self.use_fp16:
dtype = trt.float16
if self.use_int8:
dtype = trt.int8
return dtype
def set_tensor_name(tensor, prefix, name):
tensor.name = prefix + name
def set_output_name(layer, prefix, name, out_idx = 0):
set_tensor_name(layer.get_output(out_idx), prefix, name)
def set_output_range(layer, maxval, out_idx = 0):
layer.get_output(out_idx).set_dynamic_range(-maxval, maxval)
def attention_layer_opt(prefix, config, init_dict, network, input_tensor, mask_idx, cu_seqlens, max_seqlen):
"""
Add the attention layer
"""
hidden_size = config.hidden_size
num_heads = config.num_attention_heads
head_size = int(hidden_size / num_heads)
Wall = init_dict[prefix + WQKV]
Ball = init_dict[prefix + BQKV]
# FC_attention
if config.use_int8:
mult_all = network.add_convolution_nd(input_tensor, 3 * hidden_size, (1, 1), Wall, Ball)
else:
mult_all = network.add_fully_connected(input_tensor, 3 * hidden_size, Wall, Ball)
if config.use_qat:
dr_qkv = max(
init_dict[prefix + 'self_qv_a_input_quantizer_amax'],
init_dict[prefix + 'self_qv_b_input_quantizer_amax'],
init_dict[prefix + 'self_av_b_input_quantizer_amax'],
)
set_output_range(mult_all, dr_qkv)
set_output_name(mult_all, prefix, "qkv_mult")
# QKV2CTX
dtype = config.get_trt_dtype()
pf_type = trt.PluginField("type_id", np.array([int(dtype)], np.int32), trt.PluginFieldType.INT32)
pf_hidden_size = trt.PluginField("hidden_size", np.array([hidden_size], np.int32), trt.PluginFieldType.INT32)
pf_num_heads = trt.PluginField("num_heads", np.array([num_heads], np.int32), trt.PluginFieldType.INT32)
pf_has_mask = trt.PluginField("has_mask", np.array([1], np.int32), trt.PluginFieldType.INT32)
pf_var_seqlen = trt.PluginField("var_seqlen", np.array([int(1)], np.int32), trt.PluginFieldType.FLOAT32)
if config.use_qat:
dr_probs = init_dict[prefix + 'self_av_a_input_quantizer_amax']
dq_probs = dr_probs / 127.0
pf_dq_probs = trt.PluginField("dq_probs", np.array([dq_probs], np.float32), trt.PluginFieldType.FLOAT32)
fields = [pf_hidden_size, pf_num_heads, pf_dq_probs]
else:
fields = [pf_hidden_size, pf_num_heads]
if config.use_int8 and config.interleaved:
pfc = trt.PluginFieldCollection(fields)
qkv2ctx_plug = mha_plg_creator3.create_plugin("qkv2ctx", pfc)
qkv_in = [mult_all.get_output(0), cu_seqlens, max_seqlen]
else:
fields.append(pf_has_mask)
fields.append(pf_type)
fields.append(pf_var_seqlen)
pfc = trt.PluginFieldCollection(fields)
qkv2ctx_plug = mha_plg_creator2.create_plugin("qkv2ctx", pfc)
qkv_in = [mult_all.get_output(0), mask_idx, cu_seqlens, max_seqlen]
qkv2ctx = network.add_plugin_v2(qkv_in, qkv2ctx_plug)
qkv2ctx.name = prefix + 'qkv_to_ctx'
if config.use_qat:
dr_ctx = init_dict[prefix + 'output_dense_input_amax']
set_output_range(qkv2ctx, dr_ctx)
set_output_name(qkv2ctx, prefix, "context_layer")
return qkv2ctx
def skipln(prefix, config, init_dict, network, input_tensor, skip, is_last_skipln=False):
"""
Add the skip layer
"""
hidden_size = config.hidden_size
dtype = config.get_trt_dtype()
pf_ld = trt.PluginField("ld", np.array([hidden_size], np.int32), trt.PluginFieldType.INT32)
wbeta = init_dict[prefix + "beta"]
pf_beta = trt.PluginField("beta", wbeta.numpy(), trt.PluginFieldType.FLOAT32)
wgamma = init_dict[prefix + "gamma"]
pf_gamma = trt.PluginField("gamma", wgamma.numpy(), trt.PluginFieldType.FLOAT32)
pf_type = trt.PluginField("type_id", np.array([int(dtype)], np.int32), trt.PluginFieldType.INT32)
if config.use_int8 and config.interleaved:
pfc = trt.PluginFieldCollection([pf_beta, pf_gamma])
creator = skln_plg_creator3 if not config.use_megatron or is_last_skipln else skln_plg_creator4
skipln_plug = creator.create_plugin("skipln", pfc)
else:
pfc = trt.PluginFieldCollection([pf_ld, pf_beta, pf_gamma, pf_type])
skipln_plug = skln_plg_creator2.create_plugin("skipln", pfc)
skipln_inputs = [input_tensor, skip]
layer = network.add_plugin_v2(skipln_inputs, skipln_plug)
return layer
def transformer_layer_opt(prefix, config, init_dict, network, input_tensor, residual, mask_idx, cu_seqlens, max_seqlen):
"""
Add the transformer layer
"""
hidden_size = config.hidden_size
if config.use_qat:
dr_input = init_dict[prefix + 'attention_self_query_input_amax']
assert(dr_input ==init_dict[prefix + 'attention_self_key_input_amax'] )
assert(dr_input ==init_dict[prefix + 'attention_self_value_input_amax'] )
input_tensor.set_dynamic_range(-dr_input, dr_input)
context_transposed = attention_layer_opt(prefix + "attention_", config, init_dict, network, input_tensor, mask_idx, cu_seqlens, max_seqlen)
attention_heads = context_transposed.get_output(0)
# FC0
B_aout = init_dict[prefix + B_AOUT]
W_aout = init_dict[prefix + W_AOUT]
if config.use_int8:
attention_out_fc = network.add_convolution_nd(attention_heads, hidden_size, (1, 1), W_aout, B_aout)
else:
attention_out_fc = network.add_fully_connected(attention_heads, hidden_size, W_aout, B_aout)
if config.use_int8 and config.use_qat:
dr_fc_aout = init_dict[prefix + 'attention_output_add_local_input_quantizer_amax']
set_output_range(attention_out_fc, dr_fc_aout)
if config.use_megatron:
dr_skln1_res_in = init_dict[prefix + "attention_output_add_residual_input_quantizer_amax"]
residual.set_dynamic_range(-dr_skln1_res_in, dr_skln1_res_in)
skip = residual
else:
skip = input_tensor
skiplayer = skipln(prefix + "attention_output_layernorm_", config, init_dict, network, attention_out_fc.get_output(0), skip)
attention_ln = skiplayer.get_output(0)
if config.use_qat:
dr_skln1 = init_dict[prefix + 'intermediate_dense_input_amax']
set_output_range(skiplayer, dr_skln1)
# FC1 + GELU
B_mid = init_dict[prefix + B_MID]
W_mid = init_dict[prefix + W_MID]
if config.use_int8:
mid_dense = network.add_convolution_nd(attention_ln, config.intermediate_size, (1, 1), W_mid, B_mid)
else:
mid_dense = network.add_fully_connected(attention_ln, config.intermediate_size, W_mid, B_mid)
gelu_layer = add_gelu(network, mid_dense.get_output(0))
intermediate_act = gelu_layer.get_output(0)
set_tensor_name(intermediate_act, prefix, "gelu")
if config.use_int8:
if config.use_qat:
dr_gelu = init_dict[prefix + 'output_dense_input_amax']
set_output_range(gelu_layer, dr_gelu)
else:
# use gelu10 according to whitepaper http://arxiv.org/abs/2004.09602
set_output_range(gelu_layer, 10)
# FC2
# Dense to hidden size
B_lout = init_dict[prefix + B_LOUT]
W_lout = init_dict[prefix + W_LOUT]
if config.use_int8:
out_dense = network.add_convolution_nd(intermediate_act, hidden_size, (1, 1), W_lout, B_lout)
else:
out_dense = network.add_fully_connected(intermediate_act, hidden_size, W_lout, B_lout)
if config.use_int8 and config.use_qat:
dr_fc_out = init_dict[prefix + 'output_add_local_input_quantizer_amax']
set_output_range(out_dense, dr_fc_out)
set_output_name(out_dense, prefix + "output_", "dense")
if config.use_megatron:
dr_skln2_res_in = init_dict[prefix + 'output_add_residual_input_quantizer_amax']
set_output_range(skiplayer, dr_skln2_res_in, out_idx=1)
skip = skiplayer.get_output(1)
else:
skip = attention_ln
is_last_skipln = prefix.startswith('l{}'.format(config.num_hidden_layers-1))
out_layer = skipln(prefix + "output_layernorm_", config, init_dict, network, out_dense.get_output(0), skip, is_last_skipln)
set_output_name(out_layer, prefix + "output_", "reshape")
return out_layer
def add_gelu(network, input_tensor):
"""
Adds elementwise GELU, and will trigger FC+GELU fusion in TRT
"""
shape = (1, ) * len(input_tensor.shape)
POW = network.add_constant(shape, trt.Weights(np.ascontiguousarray([3.0], dtype=np.float32)))
MULTIPLY = network.add_constant(shape, trt.Weights(np.ascontiguousarray([0.044715], dtype=np.float32)))
SQRT = network.add_constant(shape, trt.Weights((np.ascontiguousarray([0.79788456080286535587989211986876], dtype=np.float32))))
ONE = network.add_constant(shape, trt.Weights((np.ascontiguousarray([1.0], dtype=np.float32))))
HALF = network.add_constant(shape, trt.Weights((np.ascontiguousarray([0.5], dtype=np.float32))))
X_pow = network.add_elementwise(input_tensor, POW.get_output(0), trt.ElementWiseOperation.POW)
X_pow_t = X_pow.get_output(0)
X_mul = network.add_elementwise(X_pow_t, MULTIPLY.get_output(0), trt.ElementWiseOperation.PROD)
X_add = network.add_elementwise(input_tensor, X_mul.get_output(0), trt.ElementWiseOperation.SUM)
X_sqrt = network.add_elementwise(X_add.get_output(0), SQRT.get_output(0), trt.ElementWiseOperation.PROD)
X_sqrt_tensor = X_sqrt.get_output(0)
X_tanh = network.add_activation(X_sqrt_tensor, trt.ActivationType.TANH)
X_tanh_tensor = X_tanh.get_output(0)
X_one = network.add_elementwise(X_tanh_tensor, ONE.get_output(0), trt.ElementWiseOperation.SUM)
CDF = network.add_elementwise(X_one.get_output(0), HALF.get_output(0), trt.ElementWiseOperation.PROD)
gelu_layer = network.add_elementwise(CDF.get_output(0), input_tensor, trt.ElementWiseOperation.PROD)
# enable elementwise fusing for int8 && fp16
POW.precision = trt.DataType.FLOAT
MULTIPLY.precision = trt.DataType.FLOAT
SQRT.precision = trt.DataType.FLOAT
ONE.precision = trt.DataType.FLOAT
HALF.precision = trt.DataType.FLOAT
X_pow.precision = trt.DataType.FLOAT
X_mul.precision = trt.DataType.FLOAT
X_add.precision = trt.DataType.FLOAT
X_sqrt.precision = trt.DataType.FLOAT
X_tanh.precision = trt.DataType.FLOAT
X_one.precision = trt.DataType.FLOAT
CDF.precision = trt.DataType.FLOAT
gelu_layer.precision = trt.DataType.FLOAT
return gelu_layer
def bert_model(config, init_dict, network, input_tensor, residual, mask_idx, cu_seqlens, max_seqlen):
"""
Create the bert model
"""
prev_input = input_tensor
for layer in range(0, config.num_hidden_layers):
ss = "l{}_".format(layer)
out_layer = transformer_layer_opt(ss, config, init_dict, network, prev_input, residual, mask_idx, cu_seqlens, max_seqlen)
prev_input = out_layer.get_output(0)
# Skip reading residual from final layer
if config.use_megatron and (layer != config.num_hidden_layers - 1):
residual = out_layer.get_output(1)
if config.use_qat:
dr_out = init_dict["bert_encoder_final_input_quantizer_amax"]
set_output_range(out_layer, dr_out)
squad_logits = squad_output("cls_", config, init_dict, network, prev_input)
squad_logits_out = squad_logits.get_output(0)
network.mark_output(squad_logits_out)
def squad_output(prefix, config, init_dict, network, input_tensor):
"""
Create the squad output
"""
hidden_size = config.hidden_size
W_out = init_dict[prefix + SQD_W]
B_out = init_dict[prefix + SQD_B]
if config.use_int8:
dense = network.add_convolution_nd(input_tensor, 2, (1, 1), W_out, B_out)
else:
dense = network.add_fully_connected(input_tensor, 2, W_out, B_out)
OUT = network.add_shuffle(dense.get_output(0))
if config.use_int8 and config.interleaved:
OUT.second_transpose = (1, 2, 0, 3)
else:
OUT.second_transpose = (1, 0, 2, 3)
set_output_name(OUT, prefix, "squad_logits")
return OUT
def emb_layernorm(builder, network, config, weights_dict, builder_config, max_sequence_length, max_batch_size):
input_ids = network.add_input(name="input_ids", dtype=trt.int32, shape=(-1,))
segment_ids = network.add_input(name="segment_ids", dtype=trt.int32, shape=(-1,))
cu_seqlens = network.add_input(name="cu_seqlens", dtype=trt.int32, shape=(-1,))
max_seqlen = network.add_input(name="max_seqlen", dtype=trt.int32, shape=(-1,))
# Specify profiles
profile = builder.create_optimization_profile()
min_shape = (1,)
shape = (max_sequence_length*max_batch_size,)
profile.set_shape("input_ids", min=min_shape, opt=shape, max=shape)
profile.set_shape("segment_ids", min=min_shape, opt=shape, max=shape)
profile.set_shape("cu_seqlens", min=min_shape, opt=(max_batch_size+1,), max=(max_batch_size+1,))
profile.set_shape("max_seqlen", min=min_shape, opt=(max_sequence_length,), max=(max_sequence_length,))
builder_config.add_optimization_profile(profile)
wbeta = trt.PluginField("bert_embeddings_layernorm_beta", weights_dict["bert_embeddings_layernorm_beta"].numpy(), trt.PluginFieldType.FLOAT32)
wgamma = trt.PluginField("bert_embeddings_layernorm_gamma", weights_dict["bert_embeddings_layernorm_gamma"].numpy(), trt.PluginFieldType.FLOAT32)
wwordemb = trt.PluginField("bert_embeddings_word_embeddings", weights_dict["bert_embeddings_word_embeddings"].numpy(), trt.PluginFieldType.FLOAT32)
wtokemb = trt.PluginField("bert_embeddings_token_type_embeddings", weights_dict["bert_embeddings_token_type_embeddings"].numpy(), trt.PluginFieldType.FLOAT32)
wposemb = trt.PluginField("bert_embeddings_position_embeddings", weights_dict["bert_embeddings_position_embeddings"].numpy(), trt.PluginFieldType.FLOAT32)
output_fp16 = trt.PluginField("output_fp16", np.array([1 if config.use_fp16 or config.use_int8 else 0]).astype(np.int32), trt.PluginFieldType.INT32)
pfc = trt.PluginFieldCollection([wbeta, wgamma, wwordemb, wtokemb, wposemb, output_fp16])
fn = (emln_plg_creator3 if config.use_megatron else emln_plg_creator2).create_plugin("embeddings", pfc)
inputs = [input_ids, segment_ids, cu_seqlens, max_seqlen]
emb_layer = network.add_plugin_v2(inputs, fn)
if config.use_int8 and config.use_qat:
dr_input = weights_dict['l0_attention_self_query_input_amax']
set_output_range(emb_layer, dr_input, out_idx=0)
if config.use_megatron:
dr_skln1_res_in = weights_dict['l0_attention_output_add_residual_input_quantizer_amax']
set_output_range(emb_layer, dr_skln1_res_in, out_idx=1)
set_output_name(emb_layer, "embeddings_", "output")
return emb_layer, cu_seqlens, max_seqlen
def build_engine(batch_size, workspace_size, sequence_length, config, weights_dict, squad_json, vocab_file, calibrationCacheFile, calib_num):
explicit_batch_flag = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(explicit_batch_flag) as network, builder.create_builder_config() as builder_config:
builder_config.max_workspace_size = workspace_size * (1024 * 1024)
if config.use_fp16:
builder_config.set_flag(trt.BuilderFlag.FP16)
if config.use_int8:
builder_config.set_flag(trt.BuilderFlag.INT8)
if not config.use_qat:
raise RuntimeError("Post training calibration is not supported in variable-length BERT.")
# speed up the engine build for trt major version >= 8
# 1. disable cudnn tactic
# 2. load global timing cache
if trt_version[0] >= 8:
tactic_source = 1 << int(trt.TacticSource.CUBLAS) | 1 << int(trt.TacticSource.CUBLAS_LT)
builder_config.set_tactic_sources(tactic_source)
if config.timing_cache != None:
if os.path.exists(config.timing_cache):
with open(config.timing_cache, "rb") as f:
cache = builder_config.create_timing_cache(f.read())
builder_config.set_timing_cache(cache, ignore_mismatch = False)
else:
cache = builder_config.create_timing_cache(b"")
builder_config.set_timing_cache(cache, ignore_mismatch = False)
if config.use_sparsity:
TRT_LOGGER.log(TRT_LOGGER.INFO, "Setting sparsity flag on builder_config.")
builder_config.set_flag(trt.BuilderFlag.SPARSE_WEIGHTS)
# Create the network
emb_layer, cu_seqlens, max_seqlen = emb_layernorm(builder, network, config, weights_dict, builder_config, sequence_length, batch_size)
embeddings = emb_layer.get_output(0)
if config.use_int8 and config.interleaved:
shuffle = network.add_shuffle(embeddings)
shuffle.second_transpose = (2, 1, 0, 3)
embeddings = shuffle.get_output(0)
mask_idx = None
else:
mask_idx = emb_layer.get_output(1)
if config.use_megatron: # megatron currently only supports int8 and interleaved
shuffler = network.add_shuffle(emb_layer.get_output(1))
shuffler.second_transpose = (2, 1, 0, 3)
residual = shuffler.get_output(0)
dr_emb = weights_dict['l0_attention_self_query_input_amax']
embeddings.set_dynamic_range(-dr_emb, dr_emb)
dr_skln1_res_in = weights_dict['l0_attention_output_add_residual_input_quantizer_amax']
residual.set_dynamic_range(-dr_skln1_res_in, dr_skln1_res_in)
else:
residual = None
bert_model(config, weights_dict, network, embeddings, residual, mask_idx, cu_seqlens, max_seqlen)
build_start_time = time.time()
engine = builder.build_engine(network, builder_config)
build_time_elapsed = (time.time() - build_start_time)
TRT_LOGGER.log(TRT_LOGGER.INFO, "build engine in {:.3f} Sec".format(build_time_elapsed))
# save global timing cache
if trt_version[0] >= 8 and config.timing_cache != None:
cache = builder_config.get_timing_cache()
with cache.serialize() as buffer:
with open(config.timing_cache, "wb") as f:
f.write(buffer)
f.flush()
os.fsync(f)
return engine
def main():
parser = argparse.ArgumentParser(description="TensorRT BERT Sample", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-m", "--ckpt", required=False,
help="The checkpoint file basename, e.g.: basename(model.ckpt-766908.data-00000-of-00001) is model.ckpt-766908")
parser.add_argument("-x", "--onnx", required=False, help="The ONNX model file path.")
parser.add_argument("-pt", "--pytorch", required=False, help="The PyTorch checkpoint file path.")
parser.add_argument("-pkl", "--pickle", required=False, help="The Pickle weights dictionary file path for the Megatron variant of BERT.")
parser.add_argument("-o", "--output", required=True, default="bert_base_384.engine", help="The bert engine file, ex bert.engine")
parser.add_argument("-b", "--max-batch-size", default=1, help="Max batch size. The engine will be usable with any input with (batch-size * sequence-length) below (max-batch-size * max-sequence-length).", type=int)
parser.add_argument("-s", "--max-sequence-length", default=128, help="Max sequence length of the BERT model. The engine will be usable with any input with (batch-size * sequence-length) below (max-batch-size * max-sequence-length).", type=int)
parser.add_argument("-c", "--config-dir", required=True,
help="The folder containing the bert_config.json, which can be downloaded e.g. from https://github.com/google-research/bert#pre-trained-models or by running download_models.py in dle/TensorFlow/LanguageModeling/BERT/data/pretrained_models_google")
parser.add_argument("-f", "--fp16", action="store_true", help="Indicates that inference should be run in FP16 precision", required=False)
parser.add_argument("-i", "--int8", action="store_true", help="Indicates that inference should be run in INT8 precision", required=False)
parser.add_argument("-w", "--workspace-size", default=1000, help="Workspace size in MiB for building the BERT engine", type=int)
parser.add_argument("-j", "--squad-json", default="squad/dev-v1.1.json", help="squad json dataset used for int8 calibration", required=False)
parser.add_argument("-v", "--vocab-file", default="./pre-trained_model/uncased_L-24_H-1024_A-16/vocab.txt", help="Path to file containing entire understandable vocab", required=False)
parser.add_argument("-n", "--calib-num", default=100, help="calibration batch numbers", type=int)
parser.add_argument("-p", "--calib-path", help="calibration cache path", required=False)
parser.add_argument("-il", "--interleaved", action="store_true", help="use interleaved format, only valid in INT8 precision", required=False)
parser.add_argument("-tcf", "--timing-cache-file", help="Path to tensorrt build timeing cache file, only available for tensorrt 8.0 and later", required=False)
parser.add_argument("-sp", "--sparse", action="store_true", help="Indicates that model is sparse", required=False)
parser.add_argument("--megatron", action="store_true", help="Indicates that model is the Megatron-style architecture", required=False)
args, _ = parser.parse_known_args()
cc = pycuda.autoinit.device.compute_capability()
if cc[0] * 10 + cc[1] < 72:
raise RuntimeError("This variable-length BERT demo only support Xavier+ GPU.")
if args.megatron:
if not (args.interleaved and args.int8):
raise RuntimeError("Megatron BERT currently only supports int8 and interleaved.")
if not args.pickle:
raise RuntimeError("Megatron BERT currently only supports loading a pickle weights dictionary.")
bert_config_path = os.path.join(args.config_dir, "bert_config.json")
TRT_LOGGER.log(TRT_LOGGER.INFO, "Using configuration file: {:}".format(bert_config_path))
config = BertConfig(bert_config_path, args.fp16, args.int8, args.int8 and (args.onnx or args.pytorch or args.pickle), args.interleaved, args.timing_cache_file, args.sparse, args.megatron)
if args.calib_path != None:
calib_cache = args.calib_path
else:
calib_cache = "BertSquadL{}H{}A{}S{}CalibCache".format(config.num_hidden_layers, config.head_size, config.num_attention_heads, args.max_sequence_length)
if args.onnx != None:
weights_dict = load_onnx_weights_and_quant(args.onnx, config)
elif args.pytorch != None:
weights_dict = load_pytorch_weights_and_quant(args.pytorch, config)
elif args.ckpt != None:
weights_dict = load_tf_weights(args.ckpt, config)
elif args.pickle != None:
weights_dict = load_megatron_pickle_weights(args.pickle, config)
else:
raise RuntimeError("You need either specify TF checkpoint using option --ckpt, ONNX using option --onnx, "
"PyTorch using option --pytorch, or Pickle weight dictionary using option --pickle "
"to build TRT BERT model.")
with build_engine(args.max_batch_size, args.workspace_size, args.max_sequence_length, config, weights_dict, args.squad_json, args.vocab_file, calib_cache, args.calib_num) as engine:
TRT_LOGGER.log(TRT_LOGGER.VERBOSE, "Serializing Engine...")
serialized_engine = engine.serialize()
TRT_LOGGER.log(TRT_LOGGER.INFO, "Saving Engine to {:}".format(args.output))
with open(args.output, "wb") as fout:
fout.write(serialized_engine)
TRT_LOGGER.log(TRT_LOGGER.INFO, "Done.")
if __name__ == "__main__":
main()
| TensorRT-master | demo/BERT/builder_varseqlen.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import ctypes
import json
import numpy as np
import os
import os.path
import re
import sys
import time
import onnx
import pycuda.autoinit
# TensorRT
import tensorrt as trt
from helpers.calibrator import BertCalibrator as BertCalibrator
from builder_utils import load_tf_weights, load_pytorch_weights_and_quant, load_onnx_weights_and_quant
from builder_utils import WQKV, BQKV # Attention Keys
from builder_utils import W_AOUT, B_AOUT, W_MID, B_MID, W_LOUT, B_LOUT # Transformer Keys
from builder_utils import SQD_W, SQD_B # SQuAD Output Keys
"""
TensorRT Initialization
"""
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
trt_version = [int(n) for n in trt.__version__.split('.')]
handle = ctypes.CDLL("libnvinfer_plugin.so", mode=ctypes.RTLD_GLOBAL)
if not handle:
raise RuntimeError("Could not load plugin library. Is `libnvinfer_plugin.so` on your LD_LIBRARY_PATH?")
trt.init_libnvinfer_plugins(TRT_LOGGER, "")
plg_registry = trt.get_plugin_registry()
emln_plg_creator = plg_registry.get_plugin_creator("CustomEmbLayerNormPluginDynamic", "1", "")
qkv2_plg_creator = plg_registry.get_plugin_creator("CustomQKVToContextPluginDynamic", "1", "")
skln_plg_creator = plg_registry.get_plugin_creator("CustomSkipLayerNormPluginDynamic", "1", "")
fc_plg_creator = plg_registry.get_plugin_creator("CustomFCPluginDynamic", "1", "")
class BertConfig:
def __init__(self, bert_config_path, use_fp16, use_int8, use_strict, use_fc2_gemm, use_int8_skipln, use_int8_multihead, use_qat, use_sparsity, timing_cache):
with open(bert_config_path, "r") as f:
data = json.load(f)
self.num_attention_heads = data["num_attention_heads"]
self.hidden_size = data["hidden_size"]
self.intermediate_size = data["intermediate_size"]
self.num_hidden_layers = data["num_hidden_layers"]
self.head_size = self.hidden_size // self.num_attention_heads
self.use_fp16 = use_fp16
self.use_int8 = use_int8
self.use_fc2_gemm = use_fc2_gemm
self.use_strict = use_strict
self.use_int8_skipln = use_int8_skipln
self.use_int8_multihead = use_int8_multihead
self.is_calib_mode = False
self.use_qat = use_qat
self.use_sparsity = use_sparsity
self.timing_cache = timing_cache
def set_tensor_name(tensor, prefix, name):
tensor.name = prefix + name
def set_output_name(layer, prefix, name, out_idx = 0):
set_tensor_name(layer.get_output(out_idx), prefix, name)
def set_output_range(layer, maxval, out_idx = 0):
layer.get_output(out_idx).set_dynamic_range(-maxval, maxval)
def get_mha_dtype(config):
dtype = trt.float32
if config.use_fp16:
dtype = trt.float16
# Multi-head attention doesn't use INT8 inputs and output by default unless it is specified.
if config.use_int8 and config.use_int8_multihead and not config.is_calib_mode:
dtype = trt.int8
return int(dtype)
def attention_layer_opt(prefix, config, init_dict, network, input_tensor, imask):
"""
Add the attention layer
"""
assert(len(input_tensor.shape) == 5)
B, S, hidden_size, _, _ = input_tensor.shape
num_heads = config.num_attention_heads
head_size = int(hidden_size / num_heads)
Wall = init_dict[prefix + WQKV]
Ball = init_dict[prefix + BQKV]
# FC_attention
if config.use_int8:
mult_all = network.add_convolution_nd(input_tensor, 3 * hidden_size, (1, 1), Wall, Ball)
else:
mult_all = network.add_fully_connected(input_tensor, 3 * hidden_size, Wall, Ball)
if config.use_qat:
dr_qkv = max(
init_dict[prefix + 'self_qv_a_input_quantizer_amax'],
init_dict[prefix + 'self_qv_b_input_quantizer_amax'],
init_dict[prefix + 'self_av_b_input_quantizer_amax'],
)
set_output_range(mult_all, dr_qkv)
set_output_name(mult_all, prefix, "qkv_mult")
has_mask = imask is not None
# QKV2CTX
pf_type = trt.PluginField("type_id", np.array([get_mha_dtype(config)], np.int32), trt.PluginFieldType.INT32)
pf_hidden_size = trt.PluginField("hidden_size", np.array([hidden_size], np.int32), trt.PluginFieldType.INT32)
pf_num_heads = trt.PluginField("num_heads", np.array([num_heads], np.int32), trt.PluginFieldType.INT32)
pf_has_mask = trt.PluginField("has_mask", np.array([has_mask], np.int32), trt.PluginFieldType.INT32)
if config.use_qat:
dr_probs = init_dict[prefix + 'self_av_a_input_quantizer_amax']
dq_probs = dr_probs / 127.0
pf_dq_probs = trt.PluginField("dq_probs", np.array([dq_probs], np.float32), trt.PluginFieldType.FLOAT32)
pfc = trt.PluginFieldCollection([pf_hidden_size, pf_num_heads, pf_has_mask, pf_type, pf_dq_probs])
else:
pfc = trt.PluginFieldCollection([pf_hidden_size, pf_num_heads, pf_has_mask, pf_type])
qkv2ctx_plug = qkv2_plg_creator.create_plugin("qkv2ctx", pfc)
qkv_in = [mult_all.get_output(0)]
if has_mask:
qkv_in.append(imask)
qkv2ctx = network.add_plugin_v2(qkv_in, qkv2ctx_plug)
if config.use_qat:
dr_ctx = init_dict[prefix + 'output_dense_input_amax']
set_output_range(qkv2ctx, dr_ctx)
set_output_name(qkv2ctx, prefix, "context_layer")
return qkv2ctx
def skipln(prefix, config, init_dict, network, input_tensor, skip, bias=None):
"""
Add the skip layer
"""
idims = input_tensor.shape
assert len(idims) == 5
hidden_size = idims[2]
dtype = trt.float32
if config.use_fp16:
dtype = trt.float16
# Skip layernorm doesn't use INT8 inputs and output by default unless it is specified.
if config.use_int8 and config.use_int8_skipln and not config.is_calib_mode:
dtype = trt.int8
pf_ld = trt.PluginField("ld", np.array([hidden_size], np.int32), trt.PluginFieldType.INT32)
wbeta = init_dict[prefix + "beta"]
pf_beta = trt.PluginField("beta", wbeta.numpy(), trt.PluginFieldType.FLOAT32)
wgamma = init_dict[prefix + "gamma"]
pf_gamma = trt.PluginField("gamma", wgamma.numpy(), trt.PluginFieldType.FLOAT32)
pf_type = trt.PluginField("type_id", np.array([int(dtype)], np.int32), trt.PluginFieldType.INT32)
fields = [pf_ld, pf_beta, pf_gamma, pf_type ]
if bias:
pf_bias = trt.PluginField("bias", bias.numpy(), trt.PluginFieldType.FLOAT32)
fields.append(pf_bias)
pfc = trt.PluginFieldCollection(fields)
skipln_plug = skln_plg_creator.create_plugin("skipln", pfc)
skipln_inputs = [input_tensor, skip]
layer = network.add_plugin_v2(skipln_inputs, skipln_plug)
return layer
# Custom FC plugin is faster than native FC only on older architectures.
def use_custom_fc():
cc = pycuda.autoinit.device.compute_capability()
return cc[0] * 10 + cc[1] <= 70
def custom_fc(config, network, input_tensor, out_dims, W):
pf_out_dims = trt.PluginField("out_dims", np.array([out_dims], dtype=np.int32), trt.PluginFieldType.INT32)
pf_W = trt.PluginField("W", W.numpy(), trt.PluginFieldType.FLOAT32)
pf_type = trt.PluginField("type_id", np.array([1 if config.use_fp16 else 0], np.int32), trt.PluginFieldType.INT32)
pfc = trt.PluginFieldCollection([pf_out_dims, pf_W, pf_type])
fc_plugin = fc_plg_creator.create_plugin("fcplugin", pfc)
plug_inputs = [input_tensor]
out_dense = network.add_plugin_v2(plug_inputs, fc_plugin)
return out_dense
def transformer_layer_opt(prefix, config, init_dict, network, input_tensor, imask):
"""
Add the transformer layer
"""
idims = input_tensor.shape
assert len(idims) == 5
hidden_size = idims[2]
if config.use_qat:
dr_input = init_dict[prefix + 'attention_self_query_input_amax']
assert(dr_input ==init_dict[prefix + 'attention_self_key_input_amax'] )
assert(dr_input ==init_dict[prefix + 'attention_self_value_input_amax'] )
input_tensor.set_dynamic_range(-dr_input, dr_input)
context_transposed = attention_layer_opt(prefix + "attention_", config, init_dict, network, input_tensor, imask)
attention_heads = context_transposed.get_output(0)
# FC0
B_aout = init_dict[prefix + B_AOUT]
if config.use_int8:
W_aout = init_dict[prefix + W_AOUT]
attention_out_fc = network.add_convolution_nd(attention_heads, hidden_size, (1, 1), W_aout, B_aout)
B_aout = None
if not config.use_int8_skipln:
attention_out_fc.set_output_type(0, trt.DataType.HALF if config.use_fp16 else trt.DataType.FLOAT)
if config.use_qat:
dr_fc_aout = init_dict[prefix + 'attention_output_add_local_input_quantizer_amax']
set_output_range(attention_out_fc, dr_fc_aout)
elif use_custom_fc():
W_aoutT = init_dict[prefix + W_AOUT + "_notrans"]
attention_out_fc = custom_fc(config, network, attention_heads, hidden_size, W_aoutT)
else:
W_aout = init_dict[prefix + W_AOUT]
attention_out_fc = network.add_fully_connected(attention_heads, hidden_size, W_aout, B_aout)
B_aout = None
skiplayer = skipln(prefix + "attention_output_layernorm_",config, init_dict, network, attention_out_fc.get_output(0), input_tensor, B_aout)
attention_ln = skiplayer.get_output(0)
if config.use_qat:
dr_skln1 = init_dict[prefix + 'intermediate_dense_input_amax']
set_output_range(skiplayer, dr_skln1)
# FC1 + GELU
B_mid = init_dict[prefix + B_MID]
W_mid = init_dict[prefix + W_MID]
if config.use_int8:
mid_dense = network.add_convolution_nd(attention_ln, config.intermediate_size, (1, 1), W_mid, B_mid)
else:
mid_dense = network.add_fully_connected(attention_ln, config.intermediate_size, W_mid, B_mid)
mid_dense_out = mid_dense.get_output(0)
POW = network.add_constant((1, 1, 1, 1, 1), trt.Weights(np.ascontiguousarray([3.0], dtype=np.float32)))
MULTIPLY = network.add_constant((1, 1, 1, 1, 1), trt.Weights(np.ascontiguousarray([0.044715], dtype=np.float32)))
SQRT = network.add_constant((1, 1, 1, 1, 1), trt.Weights((np.ascontiguousarray([0.79788456080286535587989211986876], dtype=np.float32))))
ONE = network.add_constant((1, 1, 1, 1, 1), trt.Weights((np.ascontiguousarray([1.0], dtype=np.float32))))
HALF = network.add_constant((1, 1, 1, 1, 1), trt.Weights((np.ascontiguousarray([0.5], dtype=np.float32))))
X_pow = network.add_elementwise(mid_dense_out, POW.get_output(0), trt.ElementWiseOperation.POW)
X_pow_t = X_pow.get_output(0)
X_mul = network.add_elementwise(X_pow_t, MULTIPLY.get_output(0), trt.ElementWiseOperation.PROD)
X_add = network.add_elementwise(mid_dense_out, X_mul.get_output(0), trt.ElementWiseOperation.SUM)
X_sqrt = network.add_elementwise(X_add.get_output(0), SQRT.get_output(0), trt.ElementWiseOperation.PROD)
X_sqrt_tensor = X_sqrt.get_output(0)
X_tanh = network.add_activation(X_sqrt_tensor, trt.ActivationType.TANH)
X_tanh_tensor = X_tanh.get_output(0)
X_one = network.add_elementwise(X_tanh_tensor, ONE.get_output(0), trt.ElementWiseOperation.SUM)
CDF = network.add_elementwise(X_one.get_output(0), HALF.get_output(0), trt.ElementWiseOperation.PROD)
gelu_layer = network.add_elementwise(CDF.get_output(0), mid_dense_out, trt.ElementWiseOperation.PROD)
intermediate_act = gelu_layer.get_output(0)
set_tensor_name(intermediate_act, prefix, "gelu")
if config.use_int8:
if config.use_qat:
dr_gelu = init_dict[prefix + 'output_dense_input_amax']
set_output_range(gelu_layer, dr_gelu)
else:
# use gelu10 according to whitepaper http://arxiv.org/abs/2004.09602
set_output_range(gelu_layer, 10)
# FC2
# Dense to hidden size
B_lout = init_dict[prefix + B_LOUT]
if config.use_int8 and not config.use_fc2_gemm:
W_lout = init_dict[prefix + W_LOUT]
out_dense = network.add_convolution_nd(intermediate_act, hidden_size, (1, 1), W_lout, B_lout)
B_lout = None
if not config.use_int8_skipln:
out_dense.set_output_type(0, trt.DataType.HALF if config.use_fp16 else trt.DataType.FLOAT)
elif use_custom_fc():
W_loutT = init_dict[prefix + W_LOUT + "_notrans"]
out_dense = custom_fc(config, network, intermediate_act, hidden_size, W_loutT)
else:
W_lout = init_dict[prefix + W_LOUT]
out_dense = network.add_fully_connected(intermediate_act, hidden_size, W_lout, B_lout)
B_lout = None
if config.use_qat:
dr_fc_out = init_dict[prefix + 'output_add_local_input_quantizer_amax']
set_output_range(out_dense, dr_fc_out)
set_output_name(out_dense, prefix + "output_", "dense")
out_layer = skipln(prefix + "output_layernorm_", config, init_dict, network, out_dense.get_output(0), attention_ln, B_lout)
set_output_name(out_layer, prefix + "output_", "reshape")
return out_layer
def bert_model(config, init_dict, network, input_tensor, input_mask):
"""
Create the bert model
"""
prev_input = input_tensor
for layer in range(0, config.num_hidden_layers):
ss = "l{}_".format(layer)
out_layer = transformer_layer_opt(ss, config, init_dict, network, prev_input, input_mask)
prev_input = out_layer.get_output(0)
if config.use_qat:
dr_out = init_dict["bert_encoder_final_input_quantizer_amax"]
set_output_range(out_layer, dr_out)
return prev_input
def squad_output(prefix, config, init_dict, network, input_tensor):
"""
Create the squad output
"""
idims = input_tensor.shape
assert len(idims) == 5
B, S, hidden_size, _, _ = idims
W_out = init_dict[prefix + SQD_W]
B_out = init_dict[prefix + SQD_B]
W = network.add_constant((1, hidden_size, 2), W_out)
dense = network.add_fully_connected(input_tensor, 2, W_out, B_out)
OUT = network.add_shuffle(dense.get_output(0))
OUT.second_transpose = (1, 0, 2, 3, 4)
set_output_name(OUT, prefix, "squad_logits")
return OUT
def emb_layernorm(builder, network, config, weights_dict, builder_config, sequence_lengths, batch_sizes):
# int8 only support some of the sequence length, we dynamic on sequence length is not allowed.
input_ids = network.add_input(name="input_ids", dtype=trt.int32, shape=(-1 if len(batch_sizes) > 1 else batch_sizes[0], -1 if len(sequence_lengths) > 1 else sequence_lengths[0]))
segment_ids = network.add_input(name="segment_ids", dtype=trt.int32, shape=(-1 if len(batch_sizes) > 1 else batch_sizes[0], -1 if len(sequence_lengths) > 1 else sequence_lengths[0]))
input_mask = network.add_input(name="input_mask", dtype=trt.int32, shape=(-1 if len(batch_sizes) > 1 else batch_sizes[0], -1 if len(sequence_lengths) > 1 else sequence_lengths[0]))
# Specify profiles for the batch sizes we're interested in.
# Make sure the profile also works for all sizes not covered by the previous profile.
if len(sequence_lengths) > 1 or len(batch_sizes) > 1:
for batch_size in sorted(batch_sizes):
if len(sequence_lengths) == 1:
profile = builder.create_optimization_profile()
min_shape = (1, sequence_lengths[0])
shape = (batch_size, sequence_lengths[0])
profile.set_shape("input_ids", min=min_shape, opt=shape, max=shape)
profile.set_shape("segment_ids", min=min_shape, opt=shape, max=shape)
profile.set_shape("input_mask", min=min_shape, opt=shape, max=shape)
builder_config.add_optimization_profile(profile)
else:
for sequence_length in sorted(sequence_lengths):
profile = builder.create_optimization_profile()
min_shape = (1, sequence_length)
shape = (batch_size, sequence_length)
profile.set_shape("input_ids", min=min_shape, opt=shape, max=shape)
profile.set_shape("segment_ids", min=min_shape, opt=shape, max=shape)
profile.set_shape("input_mask", min=min_shape, opt=shape, max=shape)
builder_config.add_optimization_profile(profile)
wbeta = trt.PluginField("bert_embeddings_layernorm_beta", weights_dict["bert_embeddings_layernorm_beta"].numpy(), trt.PluginFieldType.FLOAT32)
wgamma = trt.PluginField("bert_embeddings_layernorm_gamma", weights_dict["bert_embeddings_layernorm_gamma"].numpy(), trt.PluginFieldType.FLOAT32)
wwordemb = trt.PluginField("bert_embeddings_word_embeddings", weights_dict["bert_embeddings_word_embeddings"].numpy(), trt.PluginFieldType.FLOAT32)
wtokemb = trt.PluginField("bert_embeddings_token_type_embeddings", weights_dict["bert_embeddings_token_type_embeddings"].numpy(), trt.PluginFieldType.FLOAT32)
wposemb = trt.PluginField("bert_embeddings_position_embeddings", weights_dict["bert_embeddings_position_embeddings"].numpy(), trt.PluginFieldType.FLOAT32)
output_fp16 = trt.PluginField("output_fp16", np.array([1 if config.use_fp16 else 0]).astype(np.int32), trt.PluginFieldType.INT32)
mha_type = trt.PluginField("mha_type_id", np.array([get_mha_dtype(config)], np.int32), trt.PluginFieldType.INT32)
pfc = trt.PluginFieldCollection([wbeta, wgamma, wwordemb, wtokemb, wposemb, output_fp16, mha_type])
fn = emln_plg_creator.create_plugin("embeddings", pfc)
input_ids = network.add_shuffle(input_ids)
input_ids.second_transpose = (1, 0)
segment_ids = network.add_shuffle(segment_ids)
segment_ids.second_transpose = (1, 0)
input_mask = network.add_shuffle(input_mask)
input_mask.second_transpose = (1, 0)
inputs = [input_ids.get_output(0),
segment_ids.get_output(0),
input_mask.get_output(0)]
emb_layer = network.add_plugin_v2(inputs, fn)
if config.use_qat:
set_output_range(emb_layer, 1, 1)
set_output_name(emb_layer, "embeddings_", "output")
return emb_layer
def build_engine(batch_sizes, workspace_size, sequence_lengths, config, weights_dict, squad_json, vocab_file, calibrationCacheFile, calib_num):
explicit_batch_flag = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(explicit_batch_flag) as network, builder.create_builder_config() as builder_config:
builder_config.max_workspace_size = workspace_size * (1024 * 1024)
if config.use_fp16:
builder_config.set_flag(trt.BuilderFlag.FP16)
if config.use_int8:
builder_config.set_flag(trt.BuilderFlag.INT8)
if not config.use_qat:
calibrator = BertCalibrator(squad_json, vocab_file, calibrationCacheFile, 1, sequence_lengths[-1], calib_num)
builder_config.set_quantization_flag(trt.QuantizationFlag.CALIBRATE_BEFORE_FUSION)
builder_config.int8_calibrator = calibrator
if config.use_strict:
builder_config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if config.use_sparsity:
TRT_LOGGER.log(TRT_LOGGER.INFO, "Setting sparsity flag on builder_config.")
builder_config.set_flag(trt.BuilderFlag.SPARSE_WEIGHTS)
# speed up the engine build for trt major version >= 8
# 1. disable cudnn tactic
# 2. load global timing cache
if trt_version[0] >= 8:
tactic_source = 1 << int(trt.TacticSource.CUBLAS) | 1 << int(trt.TacticSource.CUBLAS_LT)
builder_config.set_tactic_sources(tactic_source)
if config.timing_cache != None:
if os.path.exists(config.timing_cache):
with open(config.timing_cache, "rb") as f:
cache = builder_config.create_timing_cache(f.read())
builder_config.set_timing_cache(cache, ignore_mismatch = False)
else:
cache = builder_config.create_timing_cache(b"")
builder_config.set_timing_cache(cache, ignore_mismatch = False)
# only use the largest sequence when in calibration mode
if config.is_calib_mode:
sequence_lengths = sequence_lengths[-1:]
# Create the network
emb_layer = emb_layernorm(builder, network, config, weights_dict, builder_config, sequence_lengths, batch_sizes)
embeddings = emb_layer.get_output(0)
mask_idx = emb_layer.get_output(1)
bert_out = bert_model(config, weights_dict, network, embeddings, mask_idx)
squad_logits = squad_output("cls_", config, weights_dict, network, bert_out)
squad_logits_out = squad_logits.get_output(0)
network.mark_output(squad_logits_out)
build_start_time = time.time()
engine = builder.build_engine(network, builder_config)
build_time_elapsed = (time.time() - build_start_time)
TRT_LOGGER.log(TRT_LOGGER.INFO, "build engine in {:.3f} Sec".format(build_time_elapsed))
# save global timing cache
if trt_version[0] >= 8 and config.timing_cache != None:
cache = builder_config.get_timing_cache()
with cache.serialize() as buffer:
with open(config.timing_cache, "wb") as f:
f.write(buffer)
f.flush()
os.fsync(f)
if config.use_int8 and not config.use_qat:
calibrator.free()
return engine
def generate_calibration_cache(sequence_lengths, workspace_size, config, weights_dict, squad_json, vocab_file, calibrationCacheFile, calib_num):
"""
BERT demo needs a separate engine building path to generate calibration cache.
This is because we need to configure SLN and MHA plugins in FP32 mode when
generating calibration cache, and INT8 mode when building the actual engine.
This cache could be generated by examining certain training data and can be
reused across different configurations.
"""
# dynamic shape not working with calibration, so we need generate a calibration cache first using fulldims network
if not config.use_int8 or os.path.exists(calibrationCacheFile):
return calibrationCacheFile
# generate calibration cache
saved_use_fp16 = config.use_fp16
config.use_fp16 = False
config.is_calib_mode = True
with build_engine([1], workspace_size, sequence_lengths, config, weights_dict, squad_json, vocab_file, calibrationCacheFile, calib_num) as engine:
TRT_LOGGER.log(TRT_LOGGER.INFO, "calibration cache generated in {:}".format(calibrationCacheFile))
config.use_fp16 = saved_use_fp16
config.is_calib_mode = False
def main():
parser = argparse.ArgumentParser(description="TensorRT BERT Sample", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-m", "--ckpt", required=False,
help="The checkpoint file basename, e.g.: basename(model.ckpt-766908.data-00000-of-00001) is model.ckpt-766908")
parser.add_argument("-x", "--onnx", required=False, help="The ONNX model file path.")
parser.add_argument("-pt", "--pytorch", required=False, help="The PyTorch checkpoint file path.")
parser.add_argument("-o", "--output", required=True, default="bert_base_384.engine", help="The bert engine file, ex bert.engine")
parser.add_argument("-b", "--batch-size", default=[], action="append", help="Batch size(s) to optimize for. The engine will be usable with any batch size below this, but may not be optimal for smaller sizes. Can be specified multiple times to optimize for more than one batch size.", type=int)
parser.add_argument("-s", "--sequence-length", default=[], action="append", help="Sequence length of the BERT model", type=int)
parser.add_argument("-c", "--config-dir", required=True,
help="The folder containing the bert_config.json, which can be downloaded e.g. from https://github.com/google-research/bert#pre-trained-models or by running download_models.py in dle/TensorFlow/LanguageModeling/BERT/data/pretrained_models_google")
parser.add_argument("-f", "--fp16", action="store_true", help="Indicates that inference should be run in FP16 precision", required=False)
parser.add_argument("-i", "--int8", action="store_true", help="Indicates that inference should be run in INT8 precision", required=False)
parser.add_argument("-t", "--strict", action="store_true", help="Indicates that inference should be run in strict precision mode", required=False)
parser.add_argument("-w", "--workspace-size", default=1000, help="Workspace size in MiB for building the BERT engine", type=int)
parser.add_argument("-j", "--squad-json", default="squad/dev-v1.1.json", help="squad json dataset used for int8 calibration", required=False)
parser.add_argument("-v", "--vocab-file", default="./pre-trained_model/uncased_L-24_H-1024_A-16/vocab.txt", help="Path to file containing entire understandable vocab", required=False)
parser.add_argument("-n", "--calib-num", default=100, help="calibration batch numbers", type=int)
parser.add_argument("-p", "--calib-path", help="calibration cache path", required=False)
parser.add_argument("-g", "--force-fc2-gemm", action="store_true", help="Force use gemm to implement FC2 layer", required=False)
parser.add_argument("-iln", "--force-int8-skipln", action="store_true", help="Run skip layernorm with INT8 (FP32 or FP16 by default) inputs and output", required=False)
parser.add_argument("-imh", "--force-int8-multihead", action="store_true", help="Run multi-head attention with INT8 (FP32 or FP16 by default) input and output", required=False)
parser.add_argument("-sp", "--sparse", action="store_true", help="Indicates that model is sparse", required=False)
parser.add_argument("-tcf", "--timing-cache-file", help="Path to tensorrt build timeing cache file, only available for tensorrt 8.0 and later", required=False)
args, _ = parser.parse_known_args()
args.batch_size = args.batch_size or [1]
args.sequence_length = args.sequence_length or [128]
cc = pycuda.autoinit.device.compute_capability()
if cc[0] * 10 + cc[1] < 75 and args.force_int8_multihead:
raise RuntimeError("--force-int8-multihead option is only supported on Turing+ GPU.")
if cc[0] * 10 + cc[1] < 72 and args.force_int8_skipln:
raise RuntimeError("--force-int8-skipln option is only supported on Xavier+ GPU.")
bert_config_path = os.path.join(args.config_dir, "bert_config.json")
TRT_LOGGER.log(TRT_LOGGER.INFO, "Using configuration file: {:}".format(bert_config_path))
config = BertConfig(bert_config_path, args.fp16, args.int8, args.strict, args.force_fc2_gemm, args.force_int8_skipln, args.force_int8_multihead, args.int8 and args.onnx != None, args.sparse, args.timing_cache_file)
if args.calib_path != None:
calib_cache = args.calib_path
else:
calib_cache = "BertSquadL{}H{}A{}S{}CalibCache".format(config.num_hidden_layers, config.head_size, config.num_attention_heads, "-".join(str(len) for len in args.sequence_length))
if args.onnx != None:
weights_dict = load_onnx_weights_and_quant(args.onnx, config)
elif args.pytorch != None:
weights_dict = load_pytorch_weights_and_quant(args.pytorch, config)
elif args.ckpt != None:
weights_dict = load_tf_weights(args.ckpt, config)
generate_calibration_cache(args.sequence_length, args.workspace_size, config, weights_dict, args.squad_json, args.vocab_file, calib_cache, args.calib_num)
else:
raise RuntimeError("You need either specify TF checkpoint using option --ckpt or ONNX using option --onnx to build TRT BERT model.")
with build_engine(args.batch_size, args.workspace_size, args.sequence_length, config, weights_dict, args.squad_json, args.vocab_file, calib_cache, args.calib_num) as engine:
TRT_LOGGER.log(TRT_LOGGER.VERBOSE, "Serializing Engine...")
serialized_engine = engine.serialize()
TRT_LOGGER.log(TRT_LOGGER.INFO, "Saving Engine to {:}".format(args.output))
with open(args.output, "wb") as fout:
fout.write(serialized_engine)
TRT_LOGGER.log(TRT_LOGGER.INFO, "Done.")
if __name__ == "__main__":
main()
| TensorRT-master | demo/BERT/builder.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import ctypes
import numpy as np
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
import numpy as np
import helpers.tokenization as tokenization
import helpers.data_processing as dp
import pdb
TRT_LOGGER = trt.Logger(trt.Logger.ERROR)
class DeviceBuffer(object):
def __init__(self, shape, dtype=trt.int32):
self.buf = cuda.mem_alloc(trt.volume(shape) * dtype.itemsize)
def binding(self):
return int(self.buf)
def free(self):
self.buf.free()
def main():
parser = argparse.ArgumentParser(description='BERT Inference Benchmark')
parser.add_argument("-e", "--engine", help='Path to BERT TensorRT engine')
parser.add_argument('-b', '--batch-size', default=[], action="append", help='Batch size(s) to benchmark. Can be specified multiple times for more than one batch size. This script assumes that the engine has been built with one optimization profile for each batch size, and that these profiles are in order of increasing batch size.', type=int)
parser.add_argument('-s', '--sequence-length', default=128, help='Sequence length of the BERT model', type=int)
parser.add_argument('-i', '--iterations', default=1, help='Number of iterations to run when benchmarking each batch size.', type=int)
parser.add_argument('-w', '--warm-up-runs', default=0, help='Number of iterations to run prior to benchmarking.', type=int)
parser.add_argument('-r', '--random-seed', required=False, default=12345, help='Random seed.', type=int)
parser.add_argument('-p', '--passage', nargs='*', help='Text for paragraph/passage for BERT QA', default='')
parser.add_argument('-q', '--question', nargs='*', help='Text for query/question for BERT QA', default='')
parser.add_argument('-v', '--vocab-file', help='Path to file containing entire understandable vocab')
args, _ = parser.parse_known_args()
args.batch_size = args.batch_size or [1]
# Import necessary plugins for BERT TensorRT
ctypes.CDLL("libnvinfer_plugin.so", mode=ctypes.RTLD_GLOBAL)
with open(args.engine, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(f.read()) as engine, engine.create_execution_context() as context:
# Allocate buffers large enough to store the largest batch size
max_input_shape = (args.sequence_length, max(args.batch_size))
max_output_shape = (args.sequence_length, max(args.batch_size), 2, 1, 1)
buffers = [
DeviceBuffer(max_input_shape),
DeviceBuffer(max_input_shape),
DeviceBuffer(max_input_shape),
DeviceBuffer(max_output_shape)
]
def question_features(tokens, question):
# Extract features from the paragraph and question
tokenizer = tokenization.FullTokenizer(vocab_file=args.vocab_file, do_lower_case=True)
return dp.convert_example_to_features(tokens, question, tokenizer, args.sequence_length, 128, 64)
# Prepare random input
pseudo_vocab_size = 30522
pseudo_type_vocab_size = 2
np.random.seed(args.random_seed)
paragraph_text = ' '.join(args.passage)
question_text = ' '.join(args.question)
print("\nPassage: {}".format(paragraph_text))
print("\nQuestion: {}".format(question_text))
doc_tokens = dp.convert_doc_tokens(paragraph_text)
features = question_features(doc_tokens, question_text)
test_word_ids = features[0].input_ids
test_segment_ids = features[0].segment_ids
test_input_mask = features[0].input_mask
# Copy input h2d
cuda.memcpy_htod(buffers[0].buf, test_word_ids.ravel())
cuda.memcpy_htod(buffers[1].buf, test_segment_ids.ravel())
cuda.memcpy_htod(buffers[2].buf, test_input_mask.ravel())
num_binding_per_profile = engine.num_bindings // engine.num_optimization_profiles
bench_times = {}
for idx, batch_size in enumerate(sorted(args.batch_size)):
context.active_optimization_profile = idx
# Each profile has unique bindings
binding_idx_offset = idx * num_binding_per_profile
bindings = [0] * binding_idx_offset + [buf.binding() for buf in buffers]
shapes = {
"input_ids": (args.sequence_length, batch_size),
"segment_ids": (args.sequence_length, batch_size),
"input_mask": (args.sequence_length, batch_size),
}
for binding, shape in shapes.items():
context.set_binding_shape(engine[binding] + binding_idx_offset, shape)
assert context.all_binding_shapes_specified
# Inference
total_time = 0
start = cuda.Event()
end = cuda.Event()
stream = cuda.Stream()
# Warmup
for _ in range(args.warm_up_runs):
context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
stream.synchronize()
# Timing loop
times = []
for _ in range(args.iterations):
start.record(stream)
context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
end.record(stream)
stream.synchronize()
times.append(end.time_since(start))
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(h_output, d_output, stream)
for index, batch in enumerate(h_output):
# Data Post-processing
networkOutputs.append(_NetworkOutput(
start_logits = np.array(batch.squeeze()[:, 0]),
end_logits = np.array(batch.squeeze()[:, 1]),
feature_index = feature_index
))
# Compute average time, 95th percentile time and 99th percentile time.
bench_times[batch_size] = times
[b.free() for b in buffers]
for batch_size, times in bench_times.items():
total_time = sum(times)
avg_time = total_time / float(len(times))
times.sort()
percentile95 = times[int(len(times) * 0.95)]
percentile99 = times[int(len(times) * 0.99)]
print("Running {:} iterations with Batch Size: {:}\n\tTotal Time: {:} ms \tAverage Time: {:} ms\t95th Percentile Time: {:} ms\t99th Percentile Time: {:}".format(args.iterations, batch_size, total_time, avg_time, percentile95, percentile99))
if __name__ == '__main__':
main()
| TensorRT-master | demo/BERT/infer.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This script uses a prebuilt TensorRT BERT QA Engine to answer a question
based on the provided passage. It additionally includes an interactive mode
where multiple questions can be asked.
"""
import time
import json
import ctypes
import argparse
import collections
import numpy as np
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
import helpers.tokenization as tokenization
import helpers.data_processing as dp
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
def parse_args():
"""
Parse command line arguments
"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-e', '--engine',
help='Path to BERT TensorRT engine')
parser.add_argument("-b", "--batch-size", default=1, help="Batch size for inference.", type=int)
parser.add_argument('-p', '--passage', nargs='*',
help='Text for paragraph/passage for BERT QA',
default='')
parser.add_argument('-pf', '--passage-file',
help='File containing input passage',
default='')
parser.add_argument('-q', '--question', nargs='*',
help='Text for query/question for BERT QA',
default='')
parser.add_argument('-qf', '--question-file',
help='File containing input question',
default='')
parser.add_argument('-sq', '--squad-json',
help='SQuAD json file',
default='')
parser.add_argument('-o', '--output-prediction-file',
help='Output prediction file for SQuAD evaluation',
default='./predictions.json')
parser.add_argument('-v', '--vocab-file',
help='Path to file containing entire understandable vocab')
parser.add_argument('-s', '--sequence-length',
help='The sequence length to use. Defaults to 128',
default=128, type=int)
parser.add_argument('--max-query-length',
help='The maximum length of a query in number of tokens. Queries longer than this will be truncated',
default=64, type=int)
parser.add_argument('--max-answer-length',
help='The maximum length of an answer that can be generated',
default=30, type=int)
parser.add_argument('--n-best-size',
help='Total number of n-best predictions to generate in the nbest_predictions.json output file',
default=20, type=int)
parser.add_argument('--doc-stride',
help='When splitting up a long document into chunks, what stride to take between chunks',
default=128, type=int)
args, _ = parser.parse_known_args()
return args
if __name__ == '__main__':
args = parse_args()
paragraph_text = None
squad_examples = None
output_prediction_file = None
if not args.passage == '':
paragraph_text = ' '.join(args.passage)
elif not args.passage_file == '':
f = open(args.passage_file, 'r')
paragraph_text = f.read()
elif not args.squad_json == '':
squad_examples = dp.read_squad_json(args.squad_json)
output_prediction_file = args.output_prediction_file
else:
paragraph_text = input("Paragraph: ")
question_text = None
if not args.question == '':
question_text = ' '.join(args.question)
elif not args.question_file == '':
f = open(args.question_file, 'r')
question_text = f.read()
tokenizer = tokenization.FullTokenizer(vocab_file=args.vocab_file, do_lower_case=True)
# When splitting up a long document into chunks, how much stride to take between chunks.
doc_stride = args.doc_stride
# The maximum total input sequence length after WordPiece tokenization.
# Sequences longer than this will be truncated, and sequences shorter
max_seq_length = args.sequence_length
def question_features(tokens, question):
# Extract features from the paragraph and question
return dp.convert_example_to_features(tokens, question, tokenizer, max_seq_length, doc_stride, args.max_query_length)
# Import necessary plugins for BERT TensorRT
handle = ctypes.CDLL("libnvinfer_plugin.so", mode=ctypes.RTLD_GLOBAL)
if not handle:
raise RuntimeError("Could not load plugin library. Is `libnvinfer_plugin.so` on your LD_LIBRARY_PATH?")
# The first context created will use the 0th profile. A new context must be created
# for each additional profile needed. Here, we only use batch size 1, thus we only need the first profile.
with open(args.engine, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, \
runtime.deserialize_cuda_engine(f.read()) as engine, engine.create_execution_context() as context:
# select engine profile
selected_profile = -1
num_binding_per_profile = engine.num_bindings // engine.num_optimization_profiles
for idx in range(engine.num_optimization_profiles):
profile_shape = engine.get_profile_shape(profile_index = idx, binding = idx * num_binding_per_profile)
if profile_shape[0][0] <= args.batch_size and profile_shape[2][0] >= args.batch_size and profile_shape[0][1] <= max_seq_length and profile_shape[2][1] >= max_seq_length:
selected_profile = idx
break
if selected_profile == -1:
raise RuntimeError("Could not find any profile that can run batch size {}.".format(args.batch_size))
context.active_optimization_profile = selected_profile
binding_idx_offset = selected_profile * num_binding_per_profile
# Specify input shapes. These must be within the min/max bounds of the active profile
# Note that input shapes can be specified on a per-inference basis, but in this case, we only have a single shape.
input_shape = (args.batch_size, max_seq_length)
input_nbytes = trt.volume(input_shape) * trt.int32.itemsize
for binding in range(3):
context.set_binding_shape(binding_idx_offset + binding, input_shape)
assert context.all_binding_shapes_specified
# Create a stream in which to copy inputs/outputs and run inference.
stream = cuda.Stream()
# Allocate device memory for inputs.
d_inputs = [cuda.mem_alloc(input_nbytes) for binding in range(3)]
# Allocate output buffer by querying the size from the context. This may be different for different input shapes.
h_output = cuda.pagelocked_empty(tuple(context.get_binding_shape(binding_idx_offset + 3)), dtype=np.float32)
d_output = cuda.mem_alloc(h_output.nbytes)
def inference(features, tokens):
global h_output
_NetworkOutput = collections.namedtuple( # pylint: disable=invalid-name
"NetworkOutput",
["start_logits", "end_logits", "feature_index"])
networkOutputs = []
eval_time_elapsed = 0
for feature_index, feature in enumerate(features):
# Copy inputs
input_ids_batch = np.repeat(np.expand_dims(feature.input_ids, 0), args.batch_size, axis=0)
segment_ids_batch = np.repeat(np.expand_dims(feature.segment_ids, 0), args.batch_size, axis=0)
input_mask_batch = np.repeat(np.expand_dims(feature.input_mask, 0), args.batch_size, axis=0)
input_ids = cuda.register_host_memory(np.ascontiguousarray(input_ids_batch.ravel()))
segment_ids = cuda.register_host_memory(np.ascontiguousarray(segment_ids_batch.ravel()))
input_mask = cuda.register_host_memory(np.ascontiguousarray(input_mask_batch.ravel()))
eval_start_time = time.time()
cuda.memcpy_htod_async(d_inputs[0], input_ids, stream)
cuda.memcpy_htod_async(d_inputs[1], segment_ids, stream)
cuda.memcpy_htod_async(d_inputs[2], input_mask, stream)
# Run inference
context.execute_async_v2(bindings=[0 for i in range(binding_idx_offset)] + [int(d_inp) for d_inp in d_inputs] + [int(d_output)], stream_handle=stream.handle)
# Synchronize the stream
stream.synchronize()
eval_time_elapsed += (time.time() - eval_start_time)
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(h_output, d_output, stream)
stream.synchronize()
# Only retrieve and post-process the first batch
batch = h_output[0]
networkOutputs.append(_NetworkOutput(
start_logits = np.array(batch.squeeze()[:, 0]),
end_logits = np.array(batch.squeeze()[:, 1]),
feature_index = feature_index
))
eval_time_elapsed /= len(features)
# Total number of n-best predictions to generate in the nbest_predictions.json output file
n_best_size = 20
# The maximum length of an answer that can be generated. This is needed
# because the start and end predictions are not conditioned on one another
max_answer_length = 30
prediction, nbest_json, scores_diff_json = dp.get_predictions(tokens, features,
networkOutputs, args.n_best_size, args.max_answer_length)
return eval_time_elapsed, prediction, nbest_json
def print_single_query(eval_time_elapsed, prediction, nbest_json):
print("------------------------")
print("Running inference in {:.3f} Sentences/Sec".format(args.batch_size/eval_time_elapsed))
print("------------------------")
print("Answer: '{}'".format(prediction))
print("With probability: {:.3f}".format(nbest_json[0]['probability'] * 100.0))
if squad_examples:
all_predictions = collections.OrderedDict()
for example in squad_examples:
features = question_features(example.doc_tokens, example.question_text)
eval_time_elapsed, prediction, nbest_json = inference(features, example.doc_tokens)
all_predictions[example.id] = prediction
with open(output_prediction_file, "w") as f:
f.write(json.dumps(all_predictions, indent=4))
print("\nOutput dump to {}".format(output_prediction_file))
else:
# Extract tokecs from the paragraph
doc_tokens = dp.convert_doc_tokens(paragraph_text)
if question_text:
print("\nPassage: {}".format(paragraph_text))
print("\nQuestion: {}".format(question_text))
features = question_features(doc_tokens, question_text)
eval_time_elapsed, prediction, nbest_json = inference(features, doc_tokens)
print_single_query(eval_time_elapsed, prediction, nbest_json)
else:
# If no question text is provided, loop until the question is 'exit'
EXIT_CMDS = ["exit", "quit"]
question_text = input("Question (to exit, type one of {:}): ".format(EXIT_CMDS))
while question_text.strip() not in EXIT_CMDS:
features = question_features(doc_tokens, question_text)
eval_time_elapsed, prediction, nbest_json = inference(features, doc_tokens)
print_single_query(eval_time_elapsed, prediction, nbest_json)
question_text = input("Question (to exit, type one of {:}): ".format(EXIT_CMDS))
| TensorRT-master | demo/BERT/inference.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import ctypes
import numpy as np
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
import numpy as np
TRT_LOGGER = trt.Logger(trt.Logger.ERROR)
class DeviceBuffer(object):
def __init__(self, shape, dtype=trt.int32):
self.buf = cuda.mem_alloc(trt.volume(shape) * dtype.itemsize)
def binding(self):
return int(self.buf)
def free(self):
self.buf.free()
def main():
parser = argparse.ArgumentParser(description='BERT Inference Benchmark')
parser.add_argument("-e", "--engine", help='Path to BERT TensorRT engine')
parser.add_argument('-b', '--batch-size', default=[], action="append", help='Batch size(s) to benchmark. Can be specified multiple times for more than one batch size. This script assumes that the engine has been built with one optimization profile for each batch size, and that these profiles are in order of increasing batch size.', type=int)
parser.add_argument('-s', '--sequence-length', default=128, help='Sequence length of the BERT model', type=int)
parser.add_argument('-i', '--iterations', default=200, help='Number of iterations to run when benchmarking each batch size.', type=int)
parser.add_argument('-w', '--warm-up-runs', default=10, help='Number of iterations to run prior to benchmarking.', type=int)
parser.add_argument('-r', '--random-seed', required=False, default=12345, help='Random seed.', type=int)
args, _ = parser.parse_known_args()
args.batch_size = args.batch_size or [1]
# Import necessary plugins for BERT TensorRT
ctypes.CDLL("libnvinfer_plugin.so", mode=ctypes.RTLD_GLOBAL)
with open(args.engine, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(f.read()) as engine, engine.create_execution_context() as context:
# Allocate buffers large enough to store the largest batch size
max_input_shape = (args.sequence_length * max(args.batch_size), )
max_output_shape = (args.sequence_length * max(args.batch_size), 2, 1, 1)
buffers = [
DeviceBuffer(max_input_shape),
DeviceBuffer(max_input_shape),
DeviceBuffer((max(args.batch_size) + 1, )),
DeviceBuffer((args.sequence_length, )),
DeviceBuffer(max_output_shape)
]
# Prepare random input
pseudo_vocab_size = 30522
pseudo_type_vocab_size = 2
np.random.seed(args.random_seed)
test_word_ids = np.random.randint(0, pseudo_vocab_size, (args.sequence_length * max(args.batch_size)), dtype=np.int32)
test_segment_ids = np.random.randint(0, pseudo_type_vocab_size, (args.sequence_length * max(args.batch_size)), dtype=np.int32)
test_cu_seq_lens = np.arange(0, args.sequence_length * max(args.batch_size) + 1, args.sequence_length, dtype=np.int32)
# Copy input h2d
cuda.memcpy_htod(buffers[0].buf, test_word_ids.ravel())
cuda.memcpy_htod(buffers[1].buf, test_segment_ids.ravel())
cuda.memcpy_htod(buffers[2].buf, test_cu_seq_lens.ravel())
bench_times = {}
for idx, batch_size in enumerate(sorted(args.batch_size)):
context.active_optimization_profile = 0
# Each profile has unique bindings
bindings = [buf.binding() for buf in buffers]
shapes = {
"input_ids": (args.sequence_length * batch_size, ),
"segment_ids": (args.sequence_length * batch_size, ),
"cu_seqlens": (batch_size + 1, ),
"max_seqlen": (args.sequence_length, ),
}
for binding, shape in shapes.items():
context.set_binding_shape(engine[binding], shape)
assert context.all_binding_shapes_specified
# Inference
total_time = 0
start = cuda.Event()
end = cuda.Event()
stream = cuda.Stream()
# Warmup
for _ in range(args.warm_up_runs):
context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
stream.synchronize()
# Timing loop
times = []
for _ in range(args.iterations):
start.record(stream)
context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
end.record(stream)
stream.synchronize()
times.append(end.time_since(start))
# Compute average time, 95th percentile time and 99th percentile time.
bench_times[batch_size] = times
[b.free() for b in buffers]
for batch_size, times in bench_times.items():
total_time = sum(times)
avg_time = total_time / float(len(times))
times.sort()
percentile95 = times[int(len(times) * 0.95)]
percentile99 = times[int(len(times) * 0.99)]
print("Running {:} iterations with Batch Size: {:}\n\tTotal Time: {:} ms \tAverage Time: {:} ms\t95th Percentile Time: {:} ms\t99th Percentile Time: {:}".format(args.iterations, batch_size, total_time, avg_time, percentile95, percentile99))
if __name__ == '__main__':
main()
| TensorRT-master | demo/BERT/perf_varseqlen.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This script uses a prebuilt TensorRT BERT QA Engine to answer a question
based on the provided passage. It additionally includes an interactive mode
where multiple questions can be asked.
"""
import os
import sys
import time
import json
import argparse
import collections
import numpy as np
import helpers.tokenization as tokenization
import helpers.data_processing as dp
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'build'))
import infer_c
def parse_args():
"""
Parse command line arguments
"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-e', '--engine',
help='Path to BERT TensorRT engine')
parser.add_argument('-p', '--passage', nargs='*',
help='Text for paragraph/passage for BERT QA',
default='')
parser.add_argument('-pf', '--passage-file',
help='File containing input passage',
default='')
parser.add_argument('-q', '--question', nargs='*',
help='Text for query/question for BERT QA',
default='')
parser.add_argument('-qf', '--question-file',
help='File containing input question',
default='')
parser.add_argument('-sq', '--squad-json',
help='SQuAD json file',
default='')
parser.add_argument('-o', '--output-prediction-file',
help='Output prediction file for SQuAD evaluation',
default='./predictions.json')
parser.add_argument('-v', '--vocab-file',
help='Path to file containing entire understandable vocab')
parser.add_argument('-s', '--sequence-length',
help='The sequence length to use. Defaults to 128',
default=128, type=int)
parser.add_argument('--max-query-length',
help='The maximum length of a query in number of tokens. Queries longer than this will be truncated',
default=64, type=int)
parser.add_argument('--max-answer-length',
help='The maximum length of an answer that can be generated',
default=30, type=int)
parser.add_argument('--n-best-size',
help='Total number of n-best predictions to generate in the nbest_predictions.json output file',
default=20, type=int)
parser.add_argument('--enable-graph',
help='Enable CUDA Graph support',
action='store_true',
default=False)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
paragraph_text = None
squad_examples = None
output_prediction_file = None
if not args.passage == '':
paragraph_text = ' '.join(args.passage)
elif not args.passage_file == '':
f = open(args.passage_file, 'r')
paragraph_text = f.read()
elif not args.squad_json == '':
squad_examples = dp.read_squad_json(args.squad_json)
output_prediction_file = args.output_prediction_file
else:
paragraph_text = input("Paragraph: ")
question_text = None
if not args.question == '':
question_text = ' '.join(args.question)
elif not args.question_file == '':
f = open(args.question_file, 'r')
question_text = f.read()
tokenizer = tokenization.FullTokenizer(vocab_file=args.vocab_file, do_lower_case=True)
# When splitting up a long document into chunks, how much stride to take between chunks.
doc_stride = 128
# The maximum total input sequence length after WordPiece tokenization.
# Sequences longer than this will be truncated, and sequences shorter
max_seq_length = args.sequence_length
def question_features(tokens, question):
# Extract features from the paragraph and question
return dp.convert_example_to_features(tokens, question, tokenizer, max_seq_length, doc_stride, args.max_query_length)
# The first context created will use the 0th profile. A new context must be created
# for each additional profile needed. Here, we only use batch size 1, thus we only need the first profile.
# We always use batch size 1.
# Specify input shapes as (max_seq_length, 1).
# These must be within the min/max bounds of the active profile (0th profile in this case)
# Note that input shapes can be specified on a per-inference basis, but in this case, we only have a single shape.
bert = infer_c.bert_inf(args.engine, 1, max_seq_length, args.enable_graph)
bert.prepare(1)
def inference(features, tokens):
_NetworkOutput = collections.namedtuple( # pylint: disable=invalid-name
"NetworkOutput",
["start_logits", "end_logits", "feature_index"])
networkOutputs = []
eval_time_elapsed = 0
for feature_index, feature in enumerate(features):
# Copy inputs
input_ids = np.ascontiguousarray(feature.input_ids.ravel())
segment_ids = np.ascontiguousarray(feature.segment_ids.ravel())
input_mask = np.ascontiguousarray(feature.input_mask.ravel())
eval_start_time = time.time()
# Run inference
h_output = bert.run(input_ids, segment_ids, input_mask)
eval_time_elapsed += (time.time() - eval_start_time)
# Data Post-processing
if len(h_output.shape) == 1:
S = int(h_output.shape[0] / 2)
networkOutputs.append(_NetworkOutput(
start_logits = np.array(h_output[0:S]),
end_logits = np.array(h_output[S:S*2]),
feature_index = feature_index
))
else:
for index, batch in enumerate(h_output):
networkOutputs.append(_NetworkOutput(
start_logits = np.array(batch.squeeze()[:, 0]),
end_logits = np.array(batch.squeeze()[:, 1]),
feature_index = feature_index
))
eval_time_elapsed /= len(features)
# Total number of n-best predictions to generate in the nbest_predictions.json output file
n_best_size = 20
# The maximum length of an answer that can be generated. This is needed
# because the start and end predictions are not conditioned on one another
max_answer_length = 30
prediction, nbest_json, scores_diff_json = dp.get_predictions(tokens, features,
networkOutputs, args.n_best_size, args.max_answer_length)
return eval_time_elapsed, prediction, nbest_json
def print_single_query(eval_time_elapsed, prediction, nbest_json):
print("------------------------")
print("Running inference in {:.3f} Sentences/Sec".format(1.0/eval_time_elapsed))
print("------------------------")
print("Answer: '{}'".format(prediction))
print("With probability: {:.3f}".format(nbest_json[0]['probability'] * 100.0))
if squad_examples:
all_predictions = collections.OrderedDict()
for example_index, example in enumerate(squad_examples):
print("Processing example {} of {}".format(example_index+1, len(squad_examples)), end="\r")
features = question_features(example.doc_tokens, example.question_text)
eval_time_elapsed, prediction, nbest_json = inference(features, example.doc_tokens)
all_predictions[example.id] = prediction
with open(output_prediction_file, "w") as f:
f.write(json.dumps(all_predictions, indent=4))
print("\nOutput dump to {}".format(output_prediction_file))
else:
# Extract tokecs from the paragraph
doc_tokens = dp.convert_doc_tokens(paragraph_text)
if question_text:
print("\nPassage: {}".format(paragraph_text))
print("\nQuestion: {}".format(question_text))
features = question_features(doc_tokens, question_text)
eval_time_elapsed, prediction, nbest_json = inference(features, doc_tokens)
print_single_query(eval_time_elapsed, prediction, nbest_json)
else:
# If no question text is provided, loop until the question is 'exit'
EXIT_CMDS = ["exit", "quit"]
question_text = input("Question (to exit, type one of {:}): ".format(EXIT_CMDS))
while question_text.strip() not in EXIT_CMDS:
features = question_features(doc_tokens, question_text)
eval_time_elapsed, prediction, nbest_json = inference(features, doc_tokens)
print_single_query(eval_time_elapsed, prediction, nbest_json)
question_text = input("Question (to exit, type one of {:}): ".format(EXIT_CMDS))
| TensorRT-master | demo/BERT/inference_c.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Obtained from https://rajpurkar.github.io/SQuAD-explorer/
""" Official evaluation script for v1.1 of the SQuAD dataset. """
from __future__ import print_function
from collections import Counter
import string
import re
import argparse
import json
import sys
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return (normalize_answer(prediction) == normalize_answer(ground_truth))
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def evaluate(dataset, predictions, f1_acc):
f1 = exact_match = total = 0
for article in dataset:
for paragraph in article['paragraphs']:
for qa in paragraph['qas']:
total += 1
if qa['id'] not in predictions:
message = 'Unanswered question ' + qa['id'] + \
' will receive score 0.'
print(message, file=sys.stderr)
continue
ground_truths = list(map(lambda x: x['text'], qa['answers']))
prediction = predictions[qa['id']]
exact_match += metric_max_over_ground_truths(
exact_match_score, prediction, ground_truths)
f1 += metric_max_over_ground_truths(
f1_score, prediction, ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
if (f1 < f1_acc - 0.5):
print("&&&& FAILED TensorRT BERT Squad Accuracy matches reference.")
else:
print("&&&& PASSED TensorRT BERT Squad Accuracy matches reference.")
return {'exact_match': exact_match, 'f1': f1}
if __name__ == '__main__':
expected_version = '1.1'
parser = argparse.ArgumentParser(
description='Evaluation for SQuAD ' + expected_version)
parser.add_argument('dataset_file', help='Dataset file')
parser.add_argument('prediction_file', help='Prediction File')
parser.add_argument('f1_acc', help='Reference Accuracy')
args = parser.parse_args()
with open(args.dataset_file) as dataset_file:
dataset_json = json.load(dataset_file)
if (dataset_json['version'] != expected_version):
print('Evaluation expects v-' + expected_version +
', but got dataset with v-' + dataset_json['version'],
file=sys.stderr)
dataset = dataset_json['data']
with open(args.prediction_file) as prediction_file:
predictions = json.load(prediction_file)
f1_acc = float(args.f1_acc)
print(json.dumps(evaluate(dataset, predictions, f1_acc)))
| TensorRT-master | demo/BERT/squad/evaluate-v1.1.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Obtained from https://rajpurkar.github.io/SQuAD-explorer/
"""Official evaluation script for SQuAD version 2.0.
In addition to basic functionality, we also compute additional statistics and
plot precision-recall curves if an additional na_prob.json file is provided.
This file is expected to map question ID's to the model's predicted probability
that a question is unanswerable.
"""
import argparse
import collections
import json
import numpy as np
import os
import re
import string
import sys
OPTS = None
def parse_args():
parser = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.')
parser.add_argument('data_file', metavar='data.json', help='Input data JSON file.')
parser.add_argument('pred_file', metavar='pred.json', help='Model predictions.')
parser.add_argument('--out-file', '-o', metavar='eval.json',
help='Write accuracy metrics to file (default is stdout).')
parser.add_argument('--na-prob-file', '-n', metavar='na_prob.json',
help='Model estimates of probability of no answer.')
parser.add_argument('--na-prob-thresh', '-t', type=float, default=1.0,
help='Predict "" if no-answer probability exceeds this (default = 1.0).')
parser.add_argument('--out-image-dir', '-p', metavar='out_images', default=None,
help='Save precision-recall curves to directory.')
parser.add_argument('--verbose', '-v', action='store_true')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def make_qid_to_has_ans(dataset):
qid_to_has_ans = {}
for article in dataset:
for p in article['paragraphs']:
for qa in p['qas']:
qid_to_has_ans[qa['id']] = bool(qa['answers'])
return qid_to_has_ans
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r'\b(a|an|the)\b', re.UNICODE)
return re.sub(regex, ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def get_tokens(s):
if not s: return []
return normalize_answer(s).split()
def compute_exact(a_gold, a_pred):
return int(normalize_answer(a_gold) == normalize_answer(a_pred))
def compute_f1(a_gold, a_pred):
gold_toks = get_tokens(a_gold)
pred_toks = get_tokens(a_pred)
common = collections.Counter(gold_toks) & collections.Counter(pred_toks)
num_same = sum(common.values())
if len(gold_toks) == 0 or len(pred_toks) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def get_raw_scores(dataset, preds):
exact_scores = {}
f1_scores = {}
for article in dataset:
for p in article['paragraphs']:
for qa in p['qas']:
qid = qa['id']
gold_answers = [a['text'] for a in qa['answers']
if normalize_answer(a['text'])]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
gold_answers = ['']
if qid not in preds:
print('Missing prediction for %s' % qid)
continue
a_pred = preds[qid]
# Take max over all gold answers
exact_scores[qid] = max(compute_exact(a, a_pred) for a in gold_answers)
f1_scores[qid] = max(compute_f1(a, a_pred) for a in gold_answers)
return exact_scores, f1_scores
def apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh):
new_scores = {}
for qid, s in scores.items():
pred_na = na_probs[qid] > na_prob_thresh
if pred_na:
new_scores[qid] = float(not qid_to_has_ans[qid])
else:
new_scores[qid] = s
return new_scores
def make_eval_dict(exact_scores, f1_scores, qid_list=None):
if not qid_list:
total = len(exact_scores)
return collections.OrderedDict([
('exact', 100.0 * sum(exact_scores.values()) / total),
('f1', 100.0 * sum(f1_scores.values()) / total),
('total', total),
])
else:
total = len(qid_list)
return collections.OrderedDict([
('exact', 100.0 * sum(exact_scores[k] for k in qid_list) / total),
('f1', 100.0 * sum(f1_scores[k] for k in qid_list) / total),
('total', total),
])
def merge_eval(main_eval, new_eval, prefix):
for k in new_eval:
main_eval['%s_%s' % (prefix, k)] = new_eval[k]
def plot_pr_curve(precisions, recalls, out_image, title):
plt.step(recalls, precisions, color='b', alpha=0.2, where='post')
plt.fill_between(recalls, precisions, step='post', alpha=0.2, color='b')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.xlim([0.0, 1.05])
plt.ylim([0.0, 1.05])
plt.title(title)
plt.savefig(out_image)
plt.clf()
def make_precision_recall_eval(scores, na_probs, num_true_pos, qid_to_has_ans,
out_image=None, title=None):
qid_list = sorted(na_probs, key=lambda k: na_probs[k])
true_pos = 0.0
cur_p = 1.0
cur_r = 0.0
precisions = [1.0]
recalls = [0.0]
avg_prec = 0.0
for i, qid in enumerate(qid_list):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
cur_p = true_pos / float(i+1)
cur_r = true_pos / float(num_true_pos)
if i == len(qid_list) - 1 or na_probs[qid] != na_probs[qid_list[i+1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(cur_p)
recalls.append(cur_r)
if out_image:
plot_pr_curve(precisions, recalls, out_image, title)
return {'ap': 100.0 * avg_prec}
def run_precision_recall_analysis(main_eval, exact_raw, f1_raw, na_probs,
qid_to_has_ans, out_image_dir):
if out_image_dir and not os.path.exists(out_image_dir):
os.makedirs(out_image_dir)
num_true_pos = sum(1 for v in qid_to_has_ans.values() if v)
if num_true_pos == 0:
return
pr_exact = make_precision_recall_eval(
exact_raw, na_probs, num_true_pos, qid_to_has_ans,
out_image=os.path.join(out_image_dir, 'pr_exact.png'),
title='Precision-Recall curve for Exact Match score')
pr_f1 = make_precision_recall_eval(
f1_raw, na_probs, num_true_pos, qid_to_has_ans,
out_image=os.path.join(out_image_dir, 'pr_f1.png'),
title='Precision-Recall curve for F1 score')
oracle_scores = {k: float(v) for k, v in qid_to_has_ans.items()}
pr_oracle = make_precision_recall_eval(
oracle_scores, na_probs, num_true_pos, qid_to_has_ans,
out_image=os.path.join(out_image_dir, 'pr_oracle.png'),
title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)')
merge_eval(main_eval, pr_exact, 'pr_exact')
merge_eval(main_eval, pr_f1, 'pr_f1')
merge_eval(main_eval, pr_oracle, 'pr_oracle')
def histogram_na_prob(na_probs, qid_list, image_dir, name):
if not qid_list:
return
x = [na_probs[k] for k in qid_list]
weights = np.ones_like(x) / float(len(x))
plt.hist(x, weights=weights, bins=20, range=(0.0, 1.0))
plt.xlabel('Model probability of no-answer')
plt.ylabel('Proportion of dataset')
plt.title('Histogram of no-answer probability: %s' % name)
plt.savefig(os.path.join(image_dir, 'na_prob_hist_%s.png' % name))
plt.clf()
def find_best_thresh(preds, scores, na_probs, qid_to_has_ans):
num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
cur_score = num_no_ans
best_score = cur_score
best_thresh = 0.0
qid_list = sorted(na_probs, key=lambda k: na_probs[k])
for i, qid in enumerate(qid_list):
if qid not in scores: continue
if qid_to_has_ans[qid]:
diff = scores[qid]
else:
if preds[qid]:
diff = -1
else:
diff = 0
cur_score += diff
if cur_score > best_score:
best_score = cur_score
best_thresh = na_probs[qid]
return 100.0 * best_score / len(scores), best_thresh
def find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):
best_exact, exact_thresh = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans)
best_f1, f1_thresh = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans)
main_eval['best_exact'] = best_exact
main_eval['best_exact_thresh'] = exact_thresh
main_eval['best_f1'] = best_f1
main_eval['best_f1_thresh'] = f1_thresh
def main():
with open(OPTS.data_file) as f:
dataset_json = json.load(f)
dataset = dataset_json['data']
with open(OPTS.pred_file) as f:
preds = json.load(f)
if OPTS.na_prob_file:
with open(OPTS.na_prob_file) as f:
na_probs = json.load(f)
else:
na_probs = {k: 0.0 for k in preds}
qid_to_has_ans = make_qid_to_has_ans(dataset) # maps qid to True/False
has_ans_qids = [k for k, v in qid_to_has_ans.items() if v]
no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v]
exact_raw, f1_raw = get_raw_scores(dataset, preds)
exact_thresh = apply_no_ans_threshold(exact_raw, na_probs, qid_to_has_ans,
OPTS.na_prob_thresh)
f1_thresh = apply_no_ans_threshold(f1_raw, na_probs, qid_to_has_ans,
OPTS.na_prob_thresh)
out_eval = make_eval_dict(exact_thresh, f1_thresh)
if has_ans_qids:
has_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=has_ans_qids)
merge_eval(out_eval, has_ans_eval, 'HasAns')
if no_ans_qids:
no_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=no_ans_qids)
merge_eval(out_eval, no_ans_eval, 'NoAns')
if OPTS.na_prob_file:
find_all_best_thresh(out_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans)
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(out_eval, exact_raw, f1_raw, na_probs,
qid_to_has_ans, OPTS.out_image_dir)
histogram_na_prob(na_probs, has_ans_qids, OPTS.out_image_dir, 'hasAns')
histogram_na_prob(na_probs, no_ans_qids, OPTS.out_image_dir, 'noAns')
if OPTS.out_file:
with open(OPTS.out_file, 'w') as f:
json.dump(out_eval, f)
else:
print(json.dumps(out_eval, indent=2))
if __name__ == '__main__':
OPTS = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| TensorRT-master | demo/BERT/squad/evaluate-v2.0.py |
TensorRT-master | demo/BERT/helpers/__init__.py |
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import unicodedata
import six
def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
"""Checks whether the casing config is consistent with the checkpoint name."""
# The casing has to be passed in by the user and there is no explicit check
# as to whether it matches the checkpoint. The casing information probably
# should have been stored in the bert_config.json file, but it's not, so
# we have to heuristically detect it to validate.
if not init_checkpoint:
return
m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint)
if m is None:
return
model_name = m.group(1)
lower_models = [
"uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12",
"multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12"
]
cased_models = [
"cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16",
"multi_cased_L-12_H-768_A-12"
]
is_bad_config = False
if model_name in lower_models and not do_lower_case:
is_bad_config = True
actual_flag = "False"
case_name = "lowercased"
opposite_flag = "True"
if model_name in cased_models and do_lower_case:
is_bad_config = True
actual_flag = "True"
case_name = "cased"
opposite_flag = "False"
if is_bad_config:
raise ValueError(
"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. "
"However, `%s` seems to be a %s model, so you "
"should pass in `--do_lower_case=%s` so that the fine-tuning matches "
"how the model was pre-training. If this error is wrong, please "
"just comment out this check." % (actual_flag, init_checkpoint,
model_name, case_name, opposite_flag))
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r", encoding='utf-8') as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class BertTokenizer(object):
"""Runs end-to-end tokenization: punctuation splitting + wordpiece"""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
ids.append(self.vocab[token])
return ids
def convert_ids_to_tokens(self, ids):
"""Converts a sequence of ids in wordpiece tokens using the vocab."""
tokens = []
for i in ids:
tokens.append(self.ids_to_tokens[i])
return tokens
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
| TensorRT-master | demo/BERT/helpers/tokenization.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tensorrt as trt
import os
import pycuda.driver as cuda
import pycuda.autoinit
import numpy as np
import helpers.tokenization as tokenization
import helpers.data_processing as dp
class BertCalibrator(trt.IInt8LegacyCalibrator):
def __init__(self, squad_json, vocab_file, cache_file, batch_size, max_seq_length, num_inputs):
# Whenever you specify a custom constructor for a TensorRT class,
# you MUST call the constructor of the parent explicitly.
trt.IInt8LegacyCalibrator.__init__(self)
self.cache_file = cache_file
# Every time get_batch is called, the next batch of size batch_size will be copied to the device and returned.
self.data = dp.read_squad_json(squad_json)
self.max_seq_length = max_seq_length
self.batch_size = batch_size
self.current_index = 0
self.num_inputs = num_inputs
self.tokenizer = tokenization.BertTokenizer(vocab_file=vocab_file, do_lower_case=True)
self.doc_stride = 128
self.max_query_length = 64
# Allocate enough memory for a whole batch.
self.device_inputs = [cuda.mem_alloc(self.max_seq_length * trt.int32.itemsize * self.batch_size) for binding in range(3)]
def free(self):
for dinput in self.device_inputs:
dinput.free()
def get_batch_size(self):
return self.batch_size
# TensorRT passes along the names of the engine bindings to the get_batch function.
# You don't necessarily have to use them, but they can be useful to understand the order of
# the inputs. The bindings list is expected to have the same ordering as 'names'.
def get_batch(self, names):
if self.current_index + self.batch_size > self.num_inputs:
print("Calibrating index {:} batch size {:} exceed max input limit {:} sentences".format(self.current_index, self.batch_size, self.num_inputs))
return None
current_batch = int(self.current_index / self.batch_size)
if current_batch % 10 == 0:
print("Calibrating batch {:}, containing {:} sentences".format(current_batch, self.batch_size))
input_ids = []
segment_ids = []
input_mask = []
for i in range(self.batch_size):
example = self.data[self.current_index + i]
features = dp.convert_example_to_features(example.doc_tokens, example.question_text, self.tokenizer, self.max_seq_length, self.doc_stride, self.max_query_length)
if len(input_ids) and len(segment_ids) and len(input_mask):
input_ids = np.concatenate((input_ids, features[0].input_ids))
segment_ids = np.concatenate((segment_ids, features[0].segment_ids))
input_mask = np.concatenate((input_mask, features[0].input_mask))
else:
input_ids = features[0].input_ids
segment_ids = features[0].segment_ids
input_mask = features[0].input_mask
cuda.memcpy_htod(self.device_inputs[0], input_ids.ravel())
cuda.memcpy_htod(self.device_inputs[1], segment_ids.ravel())
cuda.memcpy_htod(self.device_inputs[2], input_mask.ravel())
self.current_index += self.batch_size
return self.device_inputs
def read_calibration_cache(self):
# If there is a cache, use it instead of calibrating again. Otherwise, implicitly return None.
if os.path.exists(self.cache_file):
with open(self.cache_file, "rb") as f:
return f.read()
def write_calibration_cache(self, cache):
with open(self.cache_file, "wb") as f:
f.write(cache)
f.flush()
os.fsync(f)
def get_quantile(self):
return 0.9999
def get_regression_cutoff(self):
return 1.0
def read_histogram_cache(self, length):
return None
def write_histogram_cache(self, ptr, length):
return None
| TensorRT-master | demo/BERT/helpers/calibrator.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import helpers.tokenization as tokenization
import collections
import numpy as np
import six
import math
import json
def convert_doc_tokens(paragraph_text):
""" Return the list of tokens from the doc text """
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
doc_tokens = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
return doc_tokens
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def convert_example_to_features(doc_tokens, question_text, tokenizer, max_seq_length,
doc_stride, max_query_length):
"""Loads a data file into a list of `InputBatch`s."""
query_tokens = tokenizer.tokenize(question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
_Feature = collections.namedtuple( # pylint: disable=invalid-name
"Feature",
["input_ids", "input_mask", "segment_ids", "tokens", "token_to_orig_map", "token_is_max_context"])
features = []
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index, split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
def create_int_feature(values):
feature = np.asarray(values, dtype=np.int32, order=None)
return feature
features.append(_Feature(
input_ids = create_int_feature(input_ids),
input_mask = create_int_feature(input_mask),
segment_ids = create_int_feature(segment_ids),
tokens = tokens,
token_to_orig_map = token_to_orig_map,
token_is_max_context = token_is_max_context
))
return features
def read_squad_json(input_file):
"""read from squad json into a list of examples"""
with open(input_file, "r", encoding='utf-8') as reader:
input_data = json.load(reader)["data"]
_Example = collections.namedtuple( # pylint: disable=invalid-name
"Example",
["id", "question_text", "doc_tokens"])
examples = []
for entry in input_data:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["context"]
doc_tokens = convert_doc_tokens(paragraph_text)
for qa in paragraph["qas"]:
examples.append(_Example(
id = qa["id"],
question_text = qa["question"],
doc_tokens = doc_tokens
))
return examples
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def get_final_text(pred_text, orig_text, do_lower_case):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heruistic between
# `pred_text` and `orig_text` to get a character-to-charcter alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in six.iteritems(tok_ns_to_s_map):
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
def get_predictions(doc_tokens, features, results, n_best_size, max_answer_length):
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
prediction = ""
scores_diff_json = 0.0
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
min_null_feature_index = 0 # the paragraph slice with min mull score
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
version_2_with_negative = False
for result in results:
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
feature = features[result.feature_index]
# if we could have irrelevant answers, get the min score of irrelevant
if version_2_with_negative:
feature_null_score = result.start_logits[0] + result.end_logits[0]
if feature_null_score < score_null:
score_null = feature_null_score
min_null_feature_index = 0
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=result.feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
if version_2_with_negative:
prelim_predictions.append(
_PrelimPrediction(
feature_index=result.feature_index,
start_index=0,
end_index=0,
start_logit=null_start_logit,
end_logit=null_end_logit))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
if pred.start_index > 0: # this is a non-null prediction
feature = features[pred.feature_index]
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, True)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
if len(final_text):
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# if we didn't inlude the empty option in the n-best, inlcude it
if version_2_with_negative:
if "" not in seen_predictions:
nbest.append(
_NbestPrediction(
text="", start_logit=null_start_logit,
end_logit=null_end_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
null_score_diff_threshold = 0.0
if not version_2_with_negative:
prediction = nbest_json[0]["text"]
else:
# predict "" iff the null score - the score of best non-null > threshold
score_diff = score_null - best_non_null_entry.start_logit - (
best_non_null_entry.end_logit)
scores_diff_json = score_diff
if score_diff > null_score_diff_threshold:
prediction = ""
else:
prediction = best_non_null_entry.text
return prediction, nbest_json, scores_diff_json
| TensorRT-master | demo/BERT/helpers/data_processing.py |
"""
Demonstrates TensorRT capabilities with networks located in HuggingFace repository.
Requires Python 3.5+
"""
# std
import os
import sys
import argparse
import importlib
from abc import abstractmethod
from typing import List
# tabulate
from tabulate import tabulate
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(ROOT_DIR)
# Wrapper actions supported
WRAPPER_RUN_ACTION = "run"
WRAPPER_LIST_ACTION = "list"
WRAPPER_COMPARE_ACTION = "compare"
WRAPPER_ACTIONS = [WRAPPER_RUN_ACTION, WRAPPER_LIST_ACTION, WRAPPER_COMPARE_ACTION]
# NNDF
from NNDF.general_utils import process_results, register_network_folders, RANDOM_SEED
from NNDF.logger import G_LOGGER
# huggingface
from transformers import set_seed
# Force seed to 42 for reproducibility.
set_seed(RANDOM_SEED)
class Action:
def __init__(self, networks: List[str], parser: argparse.ArgumentParser):
self.networks = networks
self.parser = parser
self.add_args(self.parser)
@abstractmethod
def execute(self, args: argparse.Namespace):
pass
@abstractmethod
def add_args(self, parser: argparse.ArgumentParser):
pass
class NetworkScriptAction(Action):
# Reserved files names for each network folder
FRAMEWORKS_SCRIPT_NAME = "frameworks"
TRT_SCRIPT_NAME = "trt"
ONNX_SCRIPT_NAME = "onnxrt"
PER_NETWORK_SCRIPTS = [FRAMEWORKS_SCRIPT_NAME, TRT_SCRIPT_NAME, ONNX_SCRIPT_NAME]
def add_args(self, parser):
network_group = parser.add_argument_group("specify network")
network_group.add_argument(
"network", help="Network to run.", choices=self.networks
)
def load_script(self, script_name: str, args: argparse.Namespace):
"""Helper for loading a specific script for given network."""
assert (
script_name in self.PER_NETWORK_SCRIPTS
), "Script must be a reserved name."
# Load the specific commandline script
return importlib.import_module("{}.{}".format(args.network, script_name))
class RunAction(NetworkScriptAction):
def execute(self, args: argparse.Namespace):
module = self.load_script(args.script, args)
module.RUN_CMD._parser = self.parser
os.chdir(args.network)
print(module.RUN_CMD())
return 0
def add_args(self, parser: argparse.ArgumentParser):
super().add_args(parser)
run_group = parser.add_argument_group("run args")
run_group.add_argument("script", choices=self.PER_NETWORK_SCRIPTS)
class CompareAction(NetworkScriptAction):
GENERAL_HEADERS = ["script", "accuracy"]
def execute(self, args: argparse.Namespace):
compare_group = []
if args.compare is None:
compare_group = self.PER_NETWORK_SCRIPTS
else:
compare_group = args.compare
if len(compare_group) <= 1:
G_LOGGER.error(
"Comparison command must have atleast two groups to compare to."
)
exit()
results = []
# Get the parser for inference script which is a superset
module = None
try:
module = self.load_script(self.TRT_SCRIPT_NAME, args)
except ModuleNotFoundError as e:
print("Unable to do comparison. TRT script not yet supported.")
exit(1)
nconfig = module.RUN_CMD.config
nconfig.MetadataClass.add_inference_args(self.parser)
self.parser.parse_known_args()
results = []
# It is possible certain scripts are not implemented
# Allow the results to generate even if script does not exist.
modified_compare_group = []
for g in compare_group:
cwd = os.getcwd()
try:
print()
print("Collecting Data for {}".format(g))
os.chdir(args.network)
module = self.load_script(g, args)
module.RUN_CMD._parser = self.parser
results.append(module.RUN_CMD())
modified_compare_group.append(g)
except ModuleNotFoundError as e:
print("{} is not valid, the demo does not support this script yet. Ignoring.".format(g))
finally:
os.chdir(cwd)
headers, rows = process_results(modified_compare_group, results, nconfig)
print()
print(tabulate(rows, headers=headers))
return 0
def add_args(self, parser: argparse.ArgumentParser):
super().add_args(parser)
compare_group = parser.add_argument_group("compare args")
compare_group.add_argument(
"--compare",
"-c",
nargs="+",
default=None,
choices=self.PER_NETWORK_SCRIPTS,
help="Specific frameworks to compare. If none is specified, all are compared.",
)
class ListAction(Action):
def __init__(self, networks: List[str], parser: argparse.ArgumentParser):
super().__init__(networks, parser)
self.networks = networks
def execute(self, args: argparse.Namespace):
print("Networks that are supported by HuggingFace Demo:")
[print(n) for n in self.networks]
return 0
def get_action(
action_name: str, networks: List[str], parser: argparse.ArgumentParser
) -> Action:
return {
WRAPPER_COMPARE_ACTION: CompareAction,
WRAPPER_LIST_ACTION: ListAction,
WRAPPER_RUN_ACTION: RunAction,
}[action_name](networks, parser)
def get_default_parser(
networks: List[str], description: str = "", add_default_help=False
) -> argparse.ArgumentParser:
"""
Returns argparser for use by main(). Allows the ability to toggle default help message with a custom help flag
so that argparser does not throw SystemExit when --help is passed in. Useful for custom --help functionality.
Returns:
(argparse.ArgumentParser): argparser used by main()
"""
# This variable is set so that usage errors don't show up in wrapper
parser = argparse.ArgumentParser(
conflict_handler="resolve",
description=description,
add_help=add_default_help,
prog="run.py",
)
required_group = parser.add_argument_group("required wrapper arguments")
required_group.add_argument("action", choices=WRAPPER_ACTIONS)
if not add_default_help:
parser.add_argument(
"--help",
"-h",
help="Shows help message. If --network is supplied, returns help for specific script.",
action="store_true",
)
return parser
def main() -> None:
"""
Parses network folders and responsible for passing --help flags to subcommands if --network is provided.
Returns:
None
"""
# Get all available network scripts
networks = register_network_folders(os.getcwd())
# Add network folder for entry point
description = "Runs TensorRT networks that are based-off of HuggingFace variants."
parser = get_default_parser(networks, description, add_default_help=False)
# Get the general network wrapper help
known_args, _ = parser.parse_known_args()
# Delegate parser to action specifics
action = get_action(known_args.action, networks, parser)
known_args, _ = parser.parse_known_args()
return action.execute(known_args)
if __name__ == "__main__":
main()
| TensorRT-master | demo/HuggingFace/run.py |
"""
Contains logic that captures T5 HuggingFace models into ONNX models.
Inspired by https://github.com/onnx/models/blob/master/text/machine_comprehension/t5/dependencies/T5-export.py
"""
# std
from itertools import islice
# tensorrt
import tensorrt as trt
# polygraphy
from polygraphy.backend.trt import Profile
# torch
import torch
from torch.nn import Module
# huggingface
from transformers.generation_utils import GenerationMixin
from transformers.modeling_outputs import Seq2SeqLMOutput
# TRT-HuggingFace
from T5.T5ModelConfig import T5ModelTRTConfig
from NNDF.tensorrt_utils import clamp_weights_onnx_to_fp16_bounds
from NNDF.networks import NetworkMetadata
from NNDF.logger import G_LOGGER
from NNDF.models import (
TRTEngineFile,
TorchModelFile,
ONNXModelFile,
ModelFileConverter,
)
def add_extra_fp32(network_definition):
def window(seq, n=2):
"Returns a sliding window (of width n) over data from the iterable"
" s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ... "
it = iter(seq)
result = tuple(islice(it, n))
if len(result) == n:
yield result
for elem in it:
result = result[1:] + (elem,)
yield result
indices = list(range(0, network_definition[1].num_layers))
for i, i_1, i_2, i_3, i_4, i_5 in window(indices, 6):
l = network_definition[1].get_layer(i)
l_1 = network_definition[1].get_layer(i_1)
l_2 = network_definition[1].get_layer(i_2)
l_3 = network_definition[1].get_layer(i_3)
l_4 = network_definition[1].get_layer(i_4)
l_5 = network_definition[1].get_layer(i_5)
if not all([l.get_output(k).is_execution_tensor for k in range(l.num_outputs)]):
continue
if l.get_output_type(0) != trt.float32:
continue
if l.type == trt.LayerType.ELEMENTWISE and \
l_1.type == trt.LayerType.REDUCE and \
l_2.type == trt.LayerType.CONSTANT and \
l_4.type == trt.LayerType.ELEMENTWISE and \
l_5.type == trt.LayerType.UNARY:
l.__class__ = getattr(trt, "IElementWiseLayer")
if l.op == trt.ElementWiseOperation.POW:
l.precision = trt.float32
l.set_output_type(0, trt.float32)
l_1.precision = trt.float32
l_1.set_output_type(0, trt.float32)
l_4.__class__ = getattr(trt, "IElementWiseLayer")
if l_4.op == trt.ElementWiseOperation.SUM:
l_4.precision = trt.float32
l_4.set_output_type(0, trt.float32)
l_5.__class__ = getattr(trt, "IUnaryLayer")
if l_5.op == trt.UnaryOperation.SQRT:
l_5.precision = trt.float32
l_5.set_output_type(0, trt.float32)
return network_definition
# Torch File Encoding #
class T5DecoderTorchFile(TorchModelFile):
class TorchModule(Module, GenerationMixin):
"""
A simplied definition of T5 Decoder without support for loss.
Decoder with lm-head attached.
"""
def __init__(self, decoder, lm_head, config):
super().__init__()
self.decoder = decoder
self.lm_head = lm_head
self.config = config
def prepare_inputs_for_generation(self, input_ids, **kwargs):
return {
"input_ids": input_ids,
"encoder_hidden_states": kwargs["encoder_hidden_states"],
}
def forward(self, input_ids, encoder_hidden_states, **kwargs):
decoder_outputs = self.decoder(
input_ids=input_ids,
encoder_hidden_states=encoder_hidden_states,
**kwargs
)
# self.config.d_model ** -0.5 for rescaling output on vocab.
# as seen in https://huggingface.co/transformers/_modules/transformers/models/t5/modeling_t5.html#T5ForConditionalGeneration
sequence_output = decoder_outputs[0] * self.config.d_model ** -0.5
logits = self.lm_head(sequence_output)
if not kwargs.get("return_dict", False):
return (logits,) + decoder_outputs[1:]
return Seq2SeqLMOutput(logits=logits)
def __init__(self, model, network_metadata):
super().__init__(model, T5DecoderConverter, network_metadata)
class T5EncoderTorchFile(TorchModelFile):
"""Creation of a class to output only the last hidden state from the encoder."""
class TorchModule(Module, GenerationMixin):
def __init__(self, encoder):
super().__init__()
self.encoder = encoder
def forward(self, *input, **kwargs):
return self.encoder(*input, **kwargs)[0]
def __call__(self, *args, **kwargs):
return self.forward(*args, **kwargs)
def __init__(self, model, network_metadata):
super().__init__(model, T5EncoderConverter, network_metadata)
# ONNX File Encoding #
class T5EncoderONNXFile(ONNXModelFile):
def __init__(self, model, network_metadata):
super().__init__(model, T5EncoderConverter, network_metadata)
class T5DecoderONNXFile(ONNXModelFile):
def __init__(self, model, network_metadata):
super().__init__(model, T5DecoderConverter, network_metadata)
# TRT Engine File Encoding #
class T5DecoderTRTEngine(TRTEngineFile):
DEFAULT_TRT_WORKSPACE_MB = 3072
def __init__(self, model, network_metadata):
super().__init__(model, T5DecoderConverter, network_metadata)
def get_network_definition(self, network_definition):
return add_extra_fp32(network_definition)
def get_dynamic_shape_profiles(self):
max_sequence_length = T5ModelTRTConfig.MAX_SEQUENCE_LENGTH[
self.network_metadata.variant
]
profile = Profile()
profile.add(
"input_ids",
min=(1, 1),
opt=(1, max_sequence_length // 2),
max=(1, max_sequence_length),
)
profile.add(
"encoder_hidden_states",
min=(1, 1, max_sequence_length),
opt=(1, max_sequence_length // 2, max_sequence_length),
max=(1, max_sequence_length, max_sequence_length),
)
return [profile]
def use_strict_types(self):
return self.network_metadata.precision.fp16
class T5EncoderTRTEngine(TRTEngineFile):
DEFAULT_TRT_WORKSPACE_MB = 2048
def __init__(self, model, network_metadata):
super().__init__(model, T5EncoderConverter, network_metadata)
def get_network_definition(self, network_definition):
return add_extra_fp32(network_definition)
def get_dynamic_shape_profiles(self):
max_sequence_length = T5ModelTRTConfig.MAX_SEQUENCE_LENGTH[
self.network_metadata.variant
]
return [
Profile().add(
"input_ids",
min=(1, 1),
opt=(1, max_sequence_length // 2),
max=(1, max_sequence_length),
)
]
def use_strict_types(self):
return self.network_metadata.precision.fp16
# Converters #
class T5DecoderConverter(ModelFileConverter):
def __init__(self):
super().__init__(T5DecoderTorchFile, T5DecoderONNXFile, T5DecoderTRTEngine)
def torch_to_onnx(
self, output_fpath: str, model: Module, network_metadata: NetworkMetadata
):
"""
Exports a given huggingface T5 to decoder architecture only.
Inspired by https://github.com/onnx/models/blob/master/text/machine_comprehension/t5/dependencies/T5-export.py
Args:
output_prefix (str): Path to the onnx file
model (torch.Model): Model loaded torch class
Returns:
T5DecoderONNXFile: ONNX decoder object.
"""
input_ids = torch.tensor([[42] * 10])
# Exporting the decoder requires a basic instance of the encoder
# Create one temporarily
simplified_encoder = T5EncoderTorchFile.TorchModule(model.encoder)
# Exports to ONNX
decoder_with_lm_head = T5DecoderTorchFile.TorchModule(
model.decoder, model.lm_head, model.config
)
# This code allows for huggingface compatible torch class to use onnx exporter
old_forward = decoder_with_lm_head.forward
def _export_forward(*args, **kwargs):
result = old_forward(*args, **kwargs)
return result[0]
decoder_with_lm_head.forward = _export_forward
inputs = T5ModelTRTConfig.get_input_dims(network_metadata)["decoder"]
outputs = T5ModelTRTConfig.get_output_dims(network_metadata)["decoder"]
torch.onnx.export(
decoder_with_lm_head,
(input_ids, simplified_encoder(input_ids)),
output_fpath,
export_params=True,
opset_version=12,
input_names=inputs.get_names(),
output_names=outputs.get_names(),
dynamic_axes={
**inputs.get_torch_dynamic_axis_encoding(),
**outputs.get_torch_dynamic_axis_encoding(),
},
training=False,
use_external_data_format=True
)
if network_metadata.precision.fp16:
G_LOGGER.debug("Clamping FP16 weights for T5")
clamp_weights_onnx_to_fp16_bounds(output_fpath, output_fpath)
return T5DecoderONNXFile(output_fpath, network_metadata)
class T5EncoderConverter(ModelFileConverter):
def __init__(self):
super().__init__(T5EncoderTorchFile, T5EncoderONNXFile, T5EncoderTRTEngine)
def torch_to_onnx(
self, output_fpath: str, model: Module, network_metadata: NetworkMetadata
):
"""
Exports a given huggingface T5 to encoder architecture only.
Inspired by https://github.com/onnx/models/blob/master/text/machine_comprehension/t5/dependencies/T5-export.py
Args:
output_prefix (str): Path to the onnx file
model (torch.Model): Model loaded torch class
Returns:
Tuple[str]: Names of generated models
"""
input_ids = torch.tensor([[42] * 10])
simplified_encoder = T5EncoderTorchFile.TorchModule(model.encoder)
inputs = T5ModelTRTConfig.get_input_dims(network_metadata)["encoder"]
outputs = T5ModelTRTConfig.get_output_dims(network_metadata)["encoder"]
# Exports to ONNX
torch.onnx._export(
simplified_encoder,
input_ids,
output_fpath,
export_params=True,
opset_version=12,
input_names=inputs.get_names(),
output_names=outputs.get_names(),
dynamic_axes={
**inputs.get_torch_dynamic_axis_encoding(),
**outputs.get_torch_dynamic_axis_encoding(),
},
training=False,
use_external_data_format=True
)
if network_metadata.precision.fp16:
G_LOGGER.debug("Clamping FP16 weights for T5")
clamp_weights_onnx_to_fp16_bounds(output_fpath, output_fpath)
return T5EncoderONNXFile(output_fpath, network_metadata)
| TensorRT-master | demo/HuggingFace/T5/export.py |
# std
import argparse
from collections import namedtuple, OrderedDict
from itertools import product
from typing import Dict
# TRT-HuggingFace
from NNDF.networks import Precision, NetworkMetadata, NNConfig, Dims
from NNDF.interface import MetadataArgparseInteropMixin
# Limitation of namedtuples. You must declare namedtuples in module scope and not in classes.
# Otherwise pickle doesn't work.
# See: https://stackoverflow.com/questions/4677012/python-cant-pickle-type-x-attribute-lookup-failed
_T5Metadata = namedtuple("T5Metadata", ["kv_cache"])
class T5Metadata(_T5Metadata, MetadataArgparseInteropMixin):
@staticmethod
def add_args(parser: argparse.ArgumentParser) -> None:
"""Add commandline interface parser."""
network_group = parser.add_argument_group("T5 network")
network_group.add_argument(
"--variant",
help="T5 variant to generate",
choices=T5ModelTRTConfig.TARGET_MODELS,
required=True,
)
network_group.add_argument(
"--enable-kv-cache",
help="T5 enable KV cache",
action="store_true",
default=False,
)
@staticmethod
def from_args(args: argparse.Namespace):
return NetworkMetadata(
variant=args.variant,
precision=Precision(fp16=False),
other=T5Metadata(kv_cache=args.enable_kv_cache),
)
@staticmethod
def add_inference_args(parser: argparse.ArgumentParser) -> None:
T5Metadata.add_args(parser)
inference_group = parser.add_argument_group("inference group")
inference_group.add_argument(
"--fp16", action="store_true", help="Enables fp16 TensorRT tactics."
)
@staticmethod
def from_inference_args(args: argparse.Namespace):
base_metadata = T5Metadata.from_args(args)
return base_metadata._replace(precision=Precision(fp16=args.fp16))
class T5ModelTRTConfig(NNConfig):
TARGET_MODELS = ["t5-small", "t5-base", "t5-large"]
NUMBER_OF_LAYERS = {TARGET_MODELS[0]: 6, TARGET_MODELS[1]: 12, TARGET_MODELS[2]: 24}
MAX_SEQUENCE_LENGTH = {
TARGET_MODELS[0]: 512,
TARGET_MODELS[1]: 768,
TARGET_MODELS[2]: 1024,
}
NETWORK_FULL_NAME = "full"
NETWORK_DECODER_SEGMENT_NAME = "decoder"
NETWORK_ENCODER_SEGMENT_NAME = "encoder"
NETWORK_SEGMENTS = [NETWORK_DECODER_SEGMENT_NAME, NETWORK_ENCODER_SEGMENT_NAME]
def __init__(self):
precision_fp16 = [False, True]
kv_caches = [False, True]
variants = []
for variant, fp16, kv_cache in product(
T5ModelTRTConfig.TARGET_MODELS, precision_fp16, kv_caches
):
variants.append(
NetworkMetadata(
variant=variant,
precision=Precision(fp16=fp16),
other=T5Metadata(kv_cache=kv_cache),
)
)
super().__init__("T5", variants=variants)
def get_python_requirements(self):
base_requirements = super().get_python_requirements()
base_requirements.append("transformers==4.6.1")
return base_requirements
def get_network_segments(self):
"""
Returns exportable segments for the given network.
Used in the case where a single network needs to
be exported into multiple parts.
"""
return T5ModelTRTConfig.NETWORK_SEGMENTS
def get_metadata_string(self, metadata: NetworkMetadata) -> str:
# Remove redundant t5 name
metadata = metadata._replace(variant=metadata.variant.lstrip("t5-"))
return super().get_metadata_string(metadata)
@staticmethod
def get_input_dims(metadata) -> Dict:
"""
Returns dictionary encoding of input dimensions.
Keys will be equal to get_model_segments()
Returns:
(Dict[str, Dims]): {"decoder": Dims, "encoder": Dims}
"""
decoder_inputs = Dims(
OrderedDict(
{
"input_ids": (Dims.BATCH, Dims.SEQUENCE),
"encoder_hidden_states": (
Dims.BATCH,
Dims.create_new_sequence_dim("encoder_hidden_length"),
T5ModelTRTConfig.MAX_SEQUENCE_LENGTH[metadata.variant],
),
}
)
)
encoder_inputs = Dims(OrderedDict({"input_ids": (Dims.BATCH, Dims.SEQUENCE)}))
return {
T5ModelTRTConfig.NETWORK_DECODER_SEGMENT_NAME: decoder_inputs,
T5ModelTRTConfig.NETWORK_ENCODER_SEGMENT_NAME: encoder_inputs,
}
@staticmethod
def get_output_dims(metadata) -> Dict:
"""
Returns dictionary encoding of output dimensions.
Keys will be equal to get_model_segments()
Returns:
(Dict[str, Dims]): {"decoder": Dims, "encoder": Dims}
"""
decoder_outputs = Dims(
OrderedDict({"hidden_states": (Dims.BATCH, Dims.SEQUENCE)})
)
encoder_outputs = Dims(
OrderedDict(
{
"hidden_states": (
Dims.BATCH,
Dims.SEQUENCE,
T5ModelTRTConfig.MAX_SEQUENCE_LENGTH[metadata.variant],
)
}
)
)
return {
T5ModelTRTConfig.NETWORK_DECODER_SEGMENT_NAME: decoder_outputs,
T5ModelTRTConfig.NETWORK_ENCODER_SEGMENT_NAME: encoder_outputs,
}
| TensorRT-master | demo/HuggingFace/T5/T5ModelConfig.py |
"""
Executes ONNX Runtime framework code. See README.md for more information.
"""
# std
import os
from re import S
import sys
from typing import Dict, List, Tuple
# Add syspath for custom library
if __name__ == "__main__":
filepath = os.path.dirname(os.path.abspath(__file__))
project_root = os.path.join(filepath, os.pardir)
sys.path.append(project_root)
# huggingface
from transformers import T5Tokenizer, T5Config, PretrainedConfig
from transformers.generation_utils import GenerationMixin
from transformers.modeling_outputs import Seq2SeqLMOutput
# torch
import torch
# TRT-HuggingFace
from NNDF.interface import OnnxRTCommand
from NNDF.networks import (
NetworkMetadata,
NetworkModels,
NetworkModel,
NetworkResult,
NetworkRuntime,
Precision,
TimingProfile,
)
from NNDF.general_utils import NNFolderWorkspace
from NNDF.tensorrt_utils import PolygraphyOnnxRunner
from T5.frameworks import T5FHuggingFace
from T5.T5ModelConfig import T5ModelTRTConfig
from T5.measurements import decoder_inference, encoder_inference, full_inference_greedy
class OnnxHFRunner(PolygraphyOnnxRunner, GenerationMixin):
"""Runner that adds interop support for HF and HF provided greedy_search functions."""
def __init__(self, engine_fpath: str, network_metadata: NetworkMetadata, tfm_config: PretrainedConfig):
super().__init__(engine_fpath, network_metadata)
# required for greedy search used by generation mixin
self.config = tfm_config
class T5OnnxEncoder(OnnxHFRunner):
"""OnnxRT implemented network interface that is mainly to check correctness."""
def forward(self, input_ids, *args, **kwargs):
# Unoptimized unconditional transfer to numpy for interfacing with polygraphy
input_ids = input_ids.cpu().numpy().astype("int64")
return torch.from_numpy(self.trt_context.infer({"input_ids": input_ids})["hidden_states"])
class T5OnnxDecoder(OnnxHFRunner):
def prepare_inputs_for_generation(self, input_ids, **kwargs):
return {
"input_ids": input_ids,
"encoder_hidden_states": kwargs["encoder_hidden_states"],
}
def forward(self, input_ids, encoder_hidden_states, *args, **kwargs):
# Unoptimized unconditional transfer to numpy for interfacing with polygraphy
input_ids = input_ids.cpu().numpy().astype("int64")
encoder_hidden_states = encoder_hidden_states.cpu().numpy().astype("float32")
logits = self.trt_context.infer(
{"input_ids": input_ids, "encoder_hidden_states": encoder_hidden_states}
)["hidden_states"]
return Seq2SeqLMOutput(logits=torch.from_numpy(logits))
class T5ONNXRT(OnnxRTCommand):
def __init__(self):
super().__init__(
T5ModelTRTConfig,
"Runs polygraphy results for T5 model.",
T5FHuggingFace,
)
self.t5_trt_decoder = None
self.t5_trt_encoder = None
def cleanup(
self,
workspace: NNFolderWorkspace,
keep_onnx_model: bool = False,
keep_torch_model: bool = False,
) -> None:
# Deactivates context
if self.t5_trt_encoder:
self.t5_trt_encoder.release()
if self.t5_trt_decoder:
self.t5_trt_decoder.release()
self.frameworks_cmd.cleanup(workspace, keep_onnx_model, keep_torch_model)
def execute_inference(
self,
metadata: NetworkMetadata,
onnx_fpaths: Dict[str, NetworkModel],
inference_input: str,
timing_profile: TimingProfile,
) -> NetworkResult:
tokenizer = T5Tokenizer.from_pretrained(metadata.variant)
input_ids = tokenizer(inference_input, return_tensors="pt").input_ids
encoder_last_hidden_state, encoder_e2e_median_time = encoder_inference(
self.t5_trt_encoder, input_ids, timing_profile
)
_, decoder_e2e_median_time = decoder_inference(
self.t5_trt_decoder,
input_ids,
encoder_last_hidden_state,
timing_profile,
use_cuda=False,
)
decoder_output_greedy, full_e2e_median_runtime = full_inference_greedy(
self.t5_trt_encoder,
self.t5_trt_decoder,
input_ids,
tokenizer,
timing_profile,
max_length=T5ModelTRTConfig.MAX_SEQUENCE_LENGTH[metadata.variant],
use_cuda=False,
)
# Remove the padding and end tokens.
semantic_outputs = tokenizer.convert_ids_to_tokens(
decoder_output_greedy.tolist()[0]
)[1:-1]
remove_underscore = "".join(
[s.replace("\u2581", " ") for s in semantic_outputs]
)
return NetworkResult(
input=inference_input,
output_tensor=encoder_last_hidden_state,
semantic_output=remove_underscore.strip(),
median_runtime=[
NetworkRuntime(
name=T5ModelTRTConfig.NETWORK_DECODER_SEGMENT_NAME,
runtime=decoder_e2e_median_time,
),
NetworkRuntime(
name=T5ModelTRTConfig.NETWORK_ENCODER_SEGMENT_NAME,
runtime=encoder_e2e_median_time,
),
NetworkRuntime(
name=T5ModelTRTConfig.NETWORK_FULL_NAME,
runtime=full_e2e_median_runtime,
),
],
models=NetworkModels(
torch=None,
onnx=list(onnx_fpaths.values()),
trt=None
),
)
def run_onnxrt(
self,
metadata: NetworkMetadata,
onnx_fpaths: Tuple[NetworkModel],
network_input: List[str],
working_directory: str,
keep_onnx_model: bool,
keep_torch_model: bool,
timing_profile: TimingProfile,
) -> List[NetworkResult]:
workspace = NNFolderWorkspace(
self.frameworks_cmd.config.network_name, metadata, working_directory
)
results = []
try:
# no fpath provided for onnx files, download them
if len(onnx_fpaths) == 0:
onnx_fpaths = self.frameworks_cmd.generate_and_download_framework(
metadata, workspace
).onnx
else:
keep_onnx_model = True
keep_torch_model = True
# Output networks shall not exceed number of network segments explicitly defined by configuraiton file.
assert len(onnx_fpaths) == len(
T5ModelTRTConfig.NETWORK_SEGMENTS
), "There should only be {} exported ONNX segments in T5 model.".format(
len(T5ModelTRTConfig.NETWORK_SEGMENTS)
)
lookup_onnx_table = {v.name: v for v in onnx_fpaths}
tfm_config = T5Config(
use_cache=metadata.other.kv_cache,
num_layers=T5ModelTRTConfig.NUMBER_OF_LAYERS[metadata.variant],
)
self.t5_trt_encoder = T5OnnxEncoder(
lookup_onnx_table["encoder"].fpath, metadata, tfm_config
)
self.t5_trt_decoder = T5OnnxDecoder(
lookup_onnx_table["decoder"].fpath, metadata, tfm_config
)
for ninput in network_input:
results.append(
self.execute_inference(
metadata, lookup_onnx_table, ninput, timing_profile
)
)
finally:
self.cleanup(workspace, keep_onnx_model, keep_torch_model)
return results
def add_args(self, parser) -> None:
super().add_args(parser)
onnx_group = parser.add_argument_group("onnx models")
onnx_group.add_argument(
"--onnx-decoder-fpath",
default=None,
help="Path to ONNX decoder. If None is supplied, scripts will generate them from HuggingFace.",
)
onnx_group.add_argument(
"--onnx-encoder-fpath",
default=None,
help="Path to ONNX encoder. If None is supplied, scripts will generate them from HuggingFace.",
)
def args_to_network_models(self, args) -> List[NetworkModel]:
# Check if both flags are given otherwise error out
decoder_fpath_check = args.onnx_decoder_fpath is None
encoder_fpath_check = args.onnx_encoder_fpath is None
network_models = None
if decoder_fpath_check and encoder_fpath_check:
network_models = tuple()
elif decoder_fpath_check or encoder_fpath_check:
raise self._parser.error(
"Both --onnx-decoder-fpath and --onnx-encoder-fpath must be given. Otherwise neither should be provided for script to download them."
)
else:
onnx_decoder = NetworkModel(
name=T5ModelTRTConfig.NETWORK_DECODER_SEGMENT_NAME,
fpath=args.onnx_decoder_fpath,
)
onnx_encoder = NetworkModel(
name=T5ModelTRTConfig.NETWORK_ENCODER_SEGMENT_NAME,
fpath=args.onnx_encoder_fpath,
)
network_models = (onnx_decoder, onnx_encoder)
return network_models
def args_to_network_metadata(self, args) -> NetworkMetadata:
"""Override args to metadata to use export subroutine."""
frameworks_parsed_metadata = self.frameworks_cmd.args_to_network_metadata(args)
return NetworkMetadata(
variant=frameworks_parsed_metadata.variant,
precision=Precision(fp16=args.fp16),
other=frameworks_parsed_metadata.other,
)
RUN_CMD = T5ONNXRT()
if __name__ == "__main__":
result = RUN_CMD()
print("Results: {}".format(result))
| TensorRT-master | demo/HuggingFace/T5/onnxrt.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.