python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2018 NVIDIA Corporation
'''
This file implements function to calcuate basic metrics.
'''
import numpy as np
import tensorflow as tf
def true_positives(labels, preds):
return np.sum(np.logical_and(labels, preds))
def accuracy(labels, preds):
return np.sum(np.equal(labels, preds)) / len(preds)
def recall(labels, preds):
return true_positives(labels, preds) / np.sum(labels)
def precision(labels, preds):
return true_positives(labels, preds) / np.sum(preds)
def f1(labels, preds):
rec = recall(labels, preds)
pre = precision(labels, preds)
if rec == 0 or pre == 0:
return 0
return 2 * rec * pre / (rec + pre)
| OpenSeq2Seq-master | open_seq2seq/utils/metrics.py |
# Copyright (c) 2017 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import math
import os
import time
import tensorflow as tf
from open_seq2seq.utils.utils import deco_print, log_summaries_from_dict, \
get_results_for_epoch
class BroadcastGlobalVariablesHook(tf.train.SessionRunHook):
"""
SessionRunHook that will broadcast all global variables from root rank
to all other processes during initialization.
This is necessary to ensure consistent initialization of all workers when
training is started with random weights or restored from a checkpoint.
"""
def __init__(self, root_rank, device=''):
"""Construct a new BroadcastGlobalVariablesHook that will broadcast all
global variables from root rank to all other processes during initialization.
Args:
root_rank:
Rank that will send data, other ranks will receive data.
device:
Device to be used for broadcasting. Uses GPU by default
if Horovod was build with HOROVOD_GPU_BROADCAST.
"""
super(BroadcastGlobalVariablesHook, self).__init__()
self.root_rank = root_rank
self.bcast_op = None
self.device = device
def begin(self):
def broadcast_global_variables(root_rank):
from horovod.tensorflow.mpi_ops import broadcast
ops = []
for var in tf.global_variables():
if var.dtype.base_dtype == tf.float16:
ops.append(tf.assign(var, tf.cast(broadcast(tf.cast(var, tf.float32),
root_rank), tf.float16)))
else:
ops.append(tf.assign(var, broadcast(var, root_rank)))
return tf.group(*ops)
if not self.bcast_op or self.bcast_op.graph != tf.get_default_graph():
with tf.device(self.device):
self.bcast_op = broadcast_global_variables(self.root_rank)
def after_create_session(self, session, coord):
session.run(self.bcast_op)
class PrintSamplesHook(tf.train.SessionRunHook):
"""Session hook that prints training samples and prediction from time to time
"""
def __init__(self, every_steps, model):
super(PrintSamplesHook, self).__init__()
self._timer = tf.train.SecondOrStepTimer(every_steps=every_steps)
self._iter_count = 0
self._global_step = None
self._model = model
# using only first GPU
output_tensors = model.get_output_tensors(0)
self._fetches = [
model.get_data_layer(0).input_tensors,
output_tensors,
]
def begin(self):
self._iter_count = 0
self._global_step = tf.train.get_global_step()
def before_run(self, run_context):
if self._timer.should_trigger_for_step(self._iter_count):
return tf.train.SessionRunArgs([self._fetches, self._global_step])
return tf.train.SessionRunArgs([[], self._global_step])
def after_run(self, run_context, run_values):
results, step = run_values.results
self._iter_count = step
if not results:
return
self._timer.update_last_triggered_step(self._iter_count - 1)
input_values, output_values = results
dict_to_log = self._model.maybe_print_logs(input_values, output_values, step)
# optionally logging to tensorboard any values
# returned from maybe_print_logs
if self._model.params['save_summaries_steps'] and dict_to_log:
log_summaries_from_dict(
dict_to_log,
self._model.params['logdir'],
step,
)
class PrintLossAndTimeHook(tf.train.SessionRunHook):
"""Session hook that prints training samples and prediction from time to time
"""
def __init__(self, every_steps, model, print_ppl=False):
super(PrintLossAndTimeHook, self).__init__()
self._timer = tf.train.SecondOrStepTimer(every_steps=every_steps)
self._every_steps = every_steps
self._iter_count = 0
self._global_step = None
self._model = model
self._fetches = [model.loss]
self._last_time = time.time()
self._print_ppl = print_ppl
def begin(self):
self._iter_count = 0
self._global_step = tf.train.get_global_step()
def before_run(self, run_context):
if self._timer.should_trigger_for_step(self._iter_count):
return tf.train.SessionRunArgs([self._fetches, self._global_step])
return tf.train.SessionRunArgs([[], self._global_step])
def after_run(self, run_context, run_values):
results, step = run_values.results
self._iter_count = step
if not results:
return
self._timer.update_last_triggered_step(self._iter_count - 1)
if self._model.steps_in_epoch is None:
deco_print("Global step {}:".format(step), end=" ")
else:
deco_print(
"Epoch {}, global step {}:".format(
step // self._model.steps_in_epoch, step),
end=" ",
)
loss = results[0]
if not self._model.on_horovod or self._model.hvd.rank() == 0:
if self._print_ppl:
deco_print("Train loss: {:.4f} | ppl = {:.4f} | bpc = {:.4f}"
.format(loss, math.exp(loss),
loss/math.log(2)),
start="", end=", ")
else:
deco_print(
"Train loss: {:.4f} ".format(loss),
offset=4)
tm = (time.time() - self._last_time) / self._every_steps
m, s = divmod(tm, 60)
h, m = divmod(m, 60)
deco_print(
"time per step = {}:{:02}:{:.3f}".format(int(h), int(m), s),
start="",
)
self._last_time = time.time()
class RunEvaluationHook(tf.train.SessionRunHook):
"""Session hook that runs evaluation on a validation set
"""
def __init__(self, every_steps, model, last_step=-1, print_ppl=False):
super(RunEvaluationHook, self).__init__()
self._timer = tf.train.SecondOrStepTimer(every_steps=every_steps)
self._iter_count = 0
self._global_step = None
self._model = model
self._triggered = False
self._last_step = last_step
self._eval_saver = tf.train.Saver(
save_relative_paths=True,
max_to_keep=self._model.params['num_checkpoints']
)
self._best_eval_loss = 1e9
self._print_ppl = print_ppl
def begin(self):
self._iter_count = 0
self._global_step = tf.train.get_global_step()
def before_run(self, run_context):
self._triggered = self._timer.should_trigger_for_step(self._iter_count)
return tf.train.SessionRunArgs([[], self._global_step])
def after_run(self, run_context, run_values):
results, step = run_values.results
self._iter_count = step
if not self._triggered and step != self._last_step - 1:
return
self._timer.update_last_triggered_step(self._iter_count - 1)
if not self._model.on_horovod or self._model.hvd.rank() == 0:
deco_print("Running evaluation on a validation set:")
results_per_batch, total_loss = get_results_for_epoch(
self._model, run_context.session, mode="eval", compute_loss=True,
)
if not self._model.on_horovod or self._model.hvd.rank() == 0:
if self._print_ppl:
deco_print("Validation loss: {:.4f} | ppl = {:.4f} | bpc = {:.4f}"
.format(total_loss, math.exp(total_loss),
total_loss/math.log(2)), offset=4)
else:
deco_print(
"Validation loss: {:.4f} ".format(total_loss),
offset=4)
dict_to_log = self._model.finalize_evaluation(results_per_batch, step)
dict_to_log['eval_loss'] = total_loss
if self._print_ppl:
# Add bpc and ppl metrics to tensorboard
dict_to_log['ppl'] = math.exp(total_loss)
dict_to_log['bpc'] = math.exp(total_loss/math.log(2))
# saving the best validation model
if self._model.params['save_checkpoint_steps'] and \
total_loss < self._best_eval_loss:
self._best_eval_loss = total_loss
self._eval_saver.save(
run_context.session,
os.path.join(self._model.params['logdir'], 'best_models',
'val_loss={:.4f}-step'.format(total_loss)),
global_step=step + 1,
)
# optionally logging to tensorboard any values
# returned from maybe_print_logs
if self._model.params['save_summaries_steps']:
log_summaries_from_dict(
dict_to_log,
self._model.params['logdir'],
step,
)
| OpenSeq2Seq-master | open_seq2seq/utils/hooks.py |
# Copyright (c) 2018 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import copy
import tempfile
import numpy as np
import numpy.testing as npt
import tensorflow as tf
from six.moves import range
from open_seq2seq.test_utils.test_speech_configs.ds2_test_config import \
base_params, train_params, eval_params, base_model
from open_seq2seq.utils.utils import get_results_for_epoch, get_available_gpus
class UtilsTests(tf.test.TestCase):
def setUp(self):
base_params['logdir'] = tempfile.mktemp()
self.train_config = copy.deepcopy(base_params)
self.eval_config = copy.deepcopy(base_params)
self.train_config.update(copy.deepcopy(train_params))
self.eval_config.update(copy.deepcopy(eval_params))
def tearDown(self):
pass
def test_get_results_for_epoch(self):
# this will take all gpu memory, but that's probably fine for tests
gpus = get_available_gpus()
length_list = []
for num_gpus in [1, 2, 3]:
if num_gpus > len(gpus):
continue
for bs in [1, 2, 3, 5, 7]:
if bs * num_gpus > 10:
continue
with tf.Graph().as_default() as g:
self.eval_config['batch_size_per_gpu'] = bs
self.eval_config['num_gpus'] = num_gpus
model = base_model(params=self.eval_config, mode="infer", hvd=None)
model.compile()
model.infer = lambda inputs, outputs: inputs
model.finalize_inference = lambda results: results
with self.test_session(g, use_gpu=True) as sess:
sess.run(tf.global_variables_initializer())
inputs_per_batch = get_results_for_epoch(
model, sess, False, "infer")
length = np.hstack([inp['source_tensors'][1]
for inp in inputs_per_batch])
ids = np.hstack([inp['source_ids'] for inp in inputs_per_batch])
length_list.append(length[np.argsort(ids)])
for i in range(len(length_list) - 1):
npt.assert_allclose(length_list[i], length_list[i + 1])
if __name__ == '__main__':
tf.test.main()
| OpenSeq2Seq-master | open_seq2seq/utils/utils_test.py |
# Copyright (c) 2017 NVIDIA Corporation
from .funcs import train, infer, evaluate
| OpenSeq2Seq-master | open_seq2seq/utils/__init__.py |
# Copyright (c) 2017 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import time
import numpy as np
import tensorflow as tf
# pylint: disable=no-name-in-module
from tensorflow.python import debug as tf_debug
from six.moves import range
from open_seq2seq.utils.utils import deco_print, get_results_for_epoch, \
collect_if_horovod
from .hooks import PrintSamplesHook, RunEvaluationHook, PrintLossAndTimeHook, \
BroadcastGlobalVariablesHook
from .helpers import TransferMonitoredTrainingSession, TransferScaffold, \
get_assign_ops_and_restore_dict, run_assign_and_saver
from open_seq2seq.data import WKTDataLayer
def train(train_model, eval_model=None, debug_port=None, custom_hooks=None):
if eval_model is not None and 'eval_steps' not in eval_model.params:
raise ValueError("eval_steps parameter has to be specified "
"if eval_model is provided")
hvd = train_model.hvd
if hvd:
master_worker = hvd.rank() == 0
else:
master_worker = True
# initializing session parameters
sess_config = tf.ConfigProto(allow_soft_placement=True)
# pylint: disable=no-member
sess_config.gpu_options.allow_growth = True
if hvd is not None:
# pylint: disable=no-member
sess_config.gpu_options.visible_device_list = str(hvd.local_rank())
if train_model.params.get('use_xla_jit', False):
sess_config.graph_options.optimizer_options.global_jit_level = (
tf.OptimizerOptions.ON_1)
# defining necessary hooks
hooks = [tf.train.StopAtStepHook(last_step=train_model.last_step)]
if custom_hooks:
for custom_hook in custom_hooks:
hooks.append(custom_hook(train_model=train_model, eval_model=eval_model))
if hvd is not None:
hooks.append(BroadcastGlobalVariablesHook(0))
if master_worker:
checkpoint_dir = train_model.params['logdir']
load_model_dir = train_model.params['load_model']
else:
checkpoint_dir = None
load_model_dir = None
if eval_model is not None:
# noinspection PyTypeChecker
hooks.append(
RunEvaluationHook(
every_steps=eval_model.params['eval_steps'],
model=eval_model,
last_step=train_model.last_step,
print_ppl=isinstance(eval_model.get_data_layer(), WKTDataLayer),
),
)
if master_worker:
if train_model.params['save_checkpoint_steps'] is not None:
# noinspection PyTypeChecker
saver = tf.train.Saver(
save_relative_paths=True,
max_to_keep=train_model.params['num_checkpoints']
)
hooks.append(tf.train.CheckpointSaverHook(
checkpoint_dir,
saver=saver,
save_steps=train_model.params['save_checkpoint_steps'],
))
if train_model.params['print_loss_steps'] is not None:
# noinspection PyTypeChecker
hooks.append(PrintLossAndTimeHook(
every_steps=train_model.params['print_loss_steps'],
model=train_model,
print_ppl=isinstance(train_model.get_data_layer(), WKTDataLayer),
))
if train_model.params['print_samples_steps'] is not None:
# noinspection PyTypeChecker
hooks.append(PrintSamplesHook(
every_steps=train_model.params['print_samples_steps'],
model=train_model,
))
total_time = 0.0
bench_start = train_model.params.get('bench_start', 10)
if debug_port:
hooks.append(
tf_debug.TensorBoardDebugHook("localhost:{}".format(debug_port))
)
if train_model.on_horovod:
init_data_layer = train_model.get_data_layer().iterator.initializer
else:
init_data_layer = tf.group(
[train_model.get_data_layer(i).iterator.initializer
for i in range(train_model.num_gpus)]
)
# We restore only if the user provides load_model_dir. load_model_dir is the
# directory containing the checkpoint we want to load partial or all weights
# from.. Useful for transer learning or if we do not want to overwrite our
# checkpoint.
restoring = load_model_dir and not tf.train.latest_checkpoint(checkpoint_dir)
if restoring:
vars_in_checkpoint = {}
for var_name, var_shape in tf.train.list_variables(load_model_dir):
vars_in_checkpoint[var_name] = var_shape
print('VARS_IN_CHECKPOINT:')
print(vars_in_checkpoint)
vars_to_load = []
for var in tf.global_variables():
var_name = var.name.split(':')[0]
if var_name in vars_in_checkpoint:
if var.shape == vars_in_checkpoint[var_name] and \
'global_step' not in var_name:
vars_to_load.append(var)
print('VARS_TO_LOAD:')
for var in vars_to_load:
print(var)
load_model_fn = tf.contrib.framework.assign_from_checkpoint_fn(
tf.train.latest_checkpoint(load_model_dir), vars_to_load
)
scaffold = tf.train.Scaffold(
local_init_op=tf.group(tf.local_variables_initializer(), init_data_layer),
init_fn = lambda scaffold_self, sess: load_model_fn(sess)
)
else:
scaffold = tf.train.Scaffold(
local_init_op=tf.group(tf.local_variables_initializer(), init_data_layer)
)
fetches = [train_model.train_op]
try:
total_objects = 0.0
# on horovod num_gpus is 1
for worker_id in range(train_model.num_gpus):
fetches.append(train_model.get_num_objects_per_step(worker_id))
except NotImplementedError:
deco_print("WARNING: Can't compute number of objects per step, since "
"train model does not define get_num_objects_per_step method.")
# starting training
sess = tf.train.MonitoredTrainingSession(
scaffold=scaffold,
checkpoint_dir=checkpoint_dir,
save_summaries_steps=train_model.params['save_summaries_steps'],
config=sess_config,
save_checkpoint_secs=None,
log_step_count_steps=train_model.params['save_summaries_steps'],
stop_grace_period_secs=300,
hooks=hooks)
step = 0
num_bench_updates = 0
while True:
if sess.should_stop():
break
tm = time.time()
try:
feed_dict = {}
iter_size = train_model.params.get('iter_size', 1)
if iter_size > 1:
feed_dict[train_model.skip_update_ph] = step % iter_size != 0
if step % iter_size == 0:
if step >= bench_start:
num_bench_updates += 1
fetches_vals = sess.run(fetches, feed_dict)
else:
# necessary to skip "no-update" steps when iter_size > 1
def run_with_no_hooks(step_context):
return step_context.session.run(fetches, feed_dict)
fetches_vals = sess.run_step_fn(run_with_no_hooks)
except tf.errors.OutOfRangeError:
break
if step >= bench_start:
total_time += time.time() - tm
if len(fetches) > 1:
for i in range(train_model.num_gpus):
total_objects += np.sum(fetches_vals[i + 1])
if train_model.params['print_bench_info_steps'] is not None:
if step % train_model.params['print_bench_info_steps'] == 0:
total_objects_cur = collect_if_horovod(total_objects, hvd,
mode="sum")
if master_worker:
avg_objects = 1.0 * total_objects_cur / total_time
deco_print("Avg objects per second: {:.3f}".format(avg_objects))
step += 1
sess.close()
if len(fetches) > 1:
total_objects = collect_if_horovod(total_objects, hvd, mode="sum")
if master_worker:
deco_print("Finished training")
if step > bench_start:
avg_time = 1.0 * total_time / num_bench_updates
deco_print("Avg time per step: {:.3f}s".format(avg_time))
if len(fetches) > 1:
avg_objects = 1.0 * total_objects / total_time
deco_print("Avg objects per second: {:.3f}".format(avg_objects))
else:
deco_print("Not enough steps for benchmarking")
def restore_and_get_results(model, checkpoint, mode):
if not model.params.get("use_trt", False):
# Checkpoint is restored prior to freezing graph when using TRT
saver = tf.train.Saver()
sess_config = tf.ConfigProto(allow_soft_placement=True)
# pylint: disable=no-member
sess_config.gpu_options.allow_growth = True
if model.hvd:
# pylint: disable=no-member
sess_config.gpu_options.visible_device_list = str(model.hvd.local_rank())
with tf.Session(config=sess_config) as sess:
if not model.params.get("use_trt", False):
assign_ops, restore_dict = get_assign_ops_and_restore_dict(
checkpoint, True)
if assign_ops:
run_assign_and_saver(sess, checkpoint, assign_ops, restore_dict)
else:
saver = tf.train.Saver()
saver.restore(sess, checkpoint)
results_per_batch = get_results_for_epoch(
model, sess, mode=mode, compute_loss=False, verbose=True,
)
return results_per_batch
def infer(model, checkpoint, output_file):
results_per_batch = restore_and_get_results(model, checkpoint, mode="infer")
if not model.on_horovod or model.hvd.rank() == 0:
model.finalize_inference(results_per_batch, output_file)
deco_print("Finished inference")
def evaluate(model, checkpoint):
results_per_batch = restore_and_get_results(model, checkpoint, mode="eval")
if not model.on_horovod or model.hvd.rank() == 0:
eval_dict = model.finalize_evaluation(results_per_batch)
deco_print("Finished evaluation")
return eval_dict
return None
| OpenSeq2Seq-master | open_seq2seq/utils/funcs.py |
# Copyright (c) 2017 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import argparse
import ast
import copy
import datetime
import os
import pprint
import runpy
import shutil
import subprocess
import sys
import time
import numpy as np
import six
from six import string_types
from six.moves import range
import tensorflow as tf
# pylint: disable=no-name-in-module
from tensorflow.python.client import device_lib
def get_available_gpus():
# WARNING: this method will take all the memory on all devices!
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
def clip_sparse(value, size):
dense_shape_clipped = value.dense_shape
dense_shape_clipped[0] = size
indices_clipped = []
values_clipped = []
for idx_tuple, val in zip(value.indices, value.values):
if idx_tuple[0] < size:
indices_clipped.append(idx_tuple)
values_clipped.append(val)
return tf.SparseTensorValue(np.array(indices_clipped),
np.array(values_clipped),
dense_shape_clipped)
def collect_if_horovod(value, hvd, mode='sum'):
"""Collects values from all workers if run on Horovod.
Note, that on all workers except first this function will return None.
Args:
value: value to collect.
hvd: horovod.tensorflow module or None
mode: could be "sum", "mean" or "gather", indicating reduce_sum or gather.
For "sum" and "mean" value has to be numerical, for "gather", value has
to be iterable.
Returns:
collected results if run on Horovod or value otherwise.
"""
if hvd is None:
return value
import mpi4py.rc
mpi4py.rc.initialize = False
from mpi4py import MPI
values = MPI.COMM_WORLD.gather(value)
# synchronize all workers
MPI.COMM_WORLD.Barrier()
if MPI.COMM_WORLD.Get_rank() != 0:
return None
if mode == 'sum':
return np.sum(values)
elif mode == 'mean':
return np.mean(values)
elif mode == 'gather':
return [item for sl in values for item in sl]
else:
raise ValueError("Incorrect mode: {}".format(mode))
def clip_last_batch(last_batch, true_size):
last_batch_clipped = []
for val in last_batch:
if isinstance(val, tf.SparseTensorValue):
last_batch_clipped.append(clip_sparse(val, true_size))
else:
last_batch_clipped.append(val[:true_size])
return last_batch_clipped
def iterate_data(model, sess, compute_loss, mode, verbose, num_steps=None):
total_time = 0.0
bench_start = model.params.get('bench_start', 10)
results_per_batch = []
size_defined = model.get_data_layer().get_size_in_samples() is not None
if size_defined:
dl_sizes = []
if compute_loss:
total_loss = 0.0
total_samples = []
fetches = []
# on horovod num_gpus is 1
for worker_id in range(model.num_gpus):
cur_fetches = [
model.get_data_layer(worker_id).input_tensors,
model.get_output_tensors(worker_id),
]
if compute_loss:
cur_fetches.append(model.eval_losses[worker_id])
if size_defined:
dl_sizes.append(model.get_data_layer(worker_id).get_size_in_samples())
try:
total_objects = 0.0
cur_fetches.append(model.get_num_objects_per_step(worker_id))
except NotImplementedError:
total_objects = None
deco_print("WARNING: Can't compute number of objects per step, since "
"train model does not define get_num_objects_per_step method.")
fetches.append(cur_fetches)
total_samples.append(0.0)
sess.run([model.get_data_layer(i).iterator.initializer
for i in range(model.num_gpus)])
step = 0
processed_batches = 0
if verbose:
if model.on_horovod:
ending = " on worker {}".format(model.hvd.rank())
else:
ending = ""
while True:
tm = time.time()
fetches_vals = {}
if size_defined:
fetches_to_run = {}
# removing finished data layers
for worker_id in range(model.num_gpus):
if total_samples[worker_id] < dl_sizes[worker_id]:
fetches_to_run[worker_id] = fetches[worker_id]
fetches_vals = sess.run(fetches_to_run)
else:
# if size is not defined we have to process fetches sequentially, so not
# to lose data when exception is thrown on one data layer
for worker_id, one_fetch in enumerate(fetches):
try:
fetches_vals[worker_id] = sess.run(one_fetch)
except tf.errors.OutOfRangeError:
continue
if step >= bench_start:
total_time += time.time() - tm
# looping over num_gpus. In Horovod case this loop is "dummy",
# since num_gpus = 1
for worker_id, fetches_val in fetches_vals.items():
if compute_loss:
inputs, outputs, loss = fetches_val[:3]
else:
inputs, outputs = fetches_val[:2]
if total_objects is not None and step >= bench_start:
total_objects += np.sum(fetches_val[-1])
# assuming any element of inputs["source_tensors"] .shape[0] is batch size
batch_size = inputs["source_tensors"][0].shape[0]
total_samples[worker_id] += batch_size
if size_defined:
# this data_layer is at the last batch with few more elements, cutting
if total_samples[worker_id] > dl_sizes[worker_id]:
last_batch_size = dl_sizes[worker_id] % batch_size
for key, value in inputs.items():
inputs[key] = model.clip_last_batch(value, last_batch_size)
outputs = model.clip_last_batch(outputs, last_batch_size)
processed_batches += 1
if compute_loss:
total_loss += loss * batch_size
if mode == 'eval':
results_per_batch.append(model.evaluate(inputs, outputs))
elif mode == 'infer':
results_per_batch.append(model.infer(inputs, outputs))
else:
raise ValueError("Unknown mode: {}".format(mode))
if verbose:
if size_defined:
data_size = int(np.sum(np.ceil(np.array(dl_sizes) / batch_size)))
if step == 0 or len(fetches_vals) == 0 or \
(data_size > 10 and processed_batches % (data_size // 10) == 0):
deco_print("Processed {}/{} batches{}".format(
processed_batches, data_size, ending
))
else:
deco_print("Processed {} batches{}".format(processed_batches, ending),
end='\r')
if len(fetches_vals) == 0:
break
step += 1
# break early in the case of INT8 calibration
if num_steps is not None and step >= num_steps:
break
if verbose:
if step > bench_start:
deco_print(
"Avg time per step{}: {:.3}s".format(
ending, 1.0 * total_time / (step - bench_start)
),
)
if total_objects is not None:
avg_objects = 1.0 * total_objects / total_time
deco_print("Avg objects per second{}: {:.3f}".format(ending,
avg_objects))
else:
deco_print("Not enough steps for benchmarking{}".format(ending))
if compute_loss:
return results_per_batch, total_loss, np.sum(total_samples)
else:
return results_per_batch
def get_results_for_epoch(model, sess, compute_loss, mode, verbose=False):
if compute_loss:
results_per_batch, total_loss, total_samples = iterate_data(
model, sess, compute_loss, mode, verbose,
)
else:
results_per_batch = iterate_data(
model, sess, compute_loss, mode, verbose,
)
if compute_loss:
total_samples = collect_if_horovod(total_samples, model.hvd, 'sum')
total_loss = collect_if_horovod(total_loss, model.hvd, 'sum')
results_per_batch = collect_if_horovod(results_per_batch, model.hvd, 'gather')
if results_per_batch is None:
# returning dummy tuple of correct shape if not in master worker
if compute_loss:
return None, None
else:
return None
if compute_loss:
return results_per_batch, total_loss / total_samples
else:
return results_per_batch
def log_summaries_from_dict(dict_to_log, output_dir, step):
"""
A function that writes values from dict_to_log to a tensorboard
log file inside output_dir.
Args:
dict_to_log (dict):
A dictiontary containing the tags and scalar values to log.
The dictionary values could also contain tf.Summary.Value objects
to support logging of image and audio data. In this mode, the
dictionary key is ignored, as tf.Summary.Value already contains a
tag.
output_dir (str): dir containing the tensorboard file
step (int): current training step
"""
sm_writer = tf.summary.FileWriterCache.get(output_dir)
for tag, value in dict_to_log.items():
if isinstance(value, tf.Summary.Value):
sm_writer.add_summary(
tf.Summary(value=[value]),
global_step=step,
)
else:
sm_writer.add_summary(
tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)]),
global_step=step,
)
sm_writer.flush()
def get_git_hash():
try:
return subprocess.check_output(['git', 'rev-parse', 'HEAD'],
stderr=subprocess.STDOUT).decode()
except subprocess.CalledProcessError as e:
return "{}\n".format(e.output.decode("utf-8"))
def get_git_diff():
try:
return subprocess.check_output(['git', 'diff'],
stderr=subprocess.STDOUT).decode()
except subprocess.CalledProcessError as e:
return "{}\n".format(e.output.decode("utf-8"))
class Logger(object):
def __init__(self, stream, log_file):
self.stream = stream
self.log = log_file
def write(self, msg):
self.stream.write(msg)
self.log.write(msg)
def flush(self):
self.stream.flush()
self.log.flush()
def flatten_dict(dct):
flat_dict = {}
for key, value in dct.items():
if isinstance(value, (int, float, string_types, bool)):
flat_dict.update({key: value})
elif isinstance(value, dict):
flat_dict.update(
{key + '/' + k: v for k, v in flatten_dict(dct[key]).items()}
)
return flat_dict
def nest_dict(flat_dict):
nst_dict = {}
for key, value in flat_dict.items():
nest_keys = key.split('/')
cur_dict = nst_dict
for i in range(len(nest_keys) - 1):
if nest_keys[i] not in cur_dict:
cur_dict[nest_keys[i]] = {}
cur_dict = cur_dict[nest_keys[i]]
cur_dict[nest_keys[-1]] = value
return nst_dict
def nested_update(org_dict, upd_dict):
for key, value in upd_dict.items():
if isinstance(value, dict):
if key in org_dict:
if not isinstance(org_dict[key], dict):
raise ValueError(
"Mismatch between org_dict and upd_dict at node {}".format(key)
)
nested_update(org_dict[key], value)
else:
org_dict[key] = value
else:
org_dict[key] = value
def mask_nans(x):
x_zeros = tf.zeros_like(x)
x_mask = tf.is_finite(x)
y = tf.where(x_mask, x, x_zeros)
return y
def deco_print(line, offset=0, start="*** ", end='\n'):
if six.PY2:
print((start + " " * offset + line).encode('utf-8'), end=end)
else:
print(start + " " * offset + line, end=end)
def array_to_string(row, vocab, delim=' '):
n = len(vocab)
return delim.join(map(lambda x: vocab[x], [r for r in row if 0 <= r < n]))
def text_ids_to_string(row, vocab, S_ID, EOS_ID, PAD_ID,
ignore_special=False, delim=' '):
"""For _-to-text outputs this function takes a row with ids,
target vocabulary and prints it as a human-readable string
"""
n = len(vocab)
if ignore_special:
f_row = []
for char_id in row:
if char_id == EOS_ID:
break
if char_id != PAD_ID and char_id != S_ID:
f_row += [char_id]
return delim.join(map(lambda x: vocab[x], [r for r in f_row if 0 < r < n]))
else:
return delim.join(map(lambda x: vocab[x], [r for r in row if 0 < r < n]))
def check_params(config, required_dict, optional_dict):
if required_dict is None or optional_dict is None:
return
for pm, vals in required_dict.items():
if pm not in config:
raise ValueError("{} parameter has to be specified".format(pm))
else:
if vals == str:
vals = string_types
if vals and isinstance(vals, list) and config[pm] not in vals:
raise ValueError("{} has to be one of {}".format(pm, vals))
if vals and not isinstance(vals, list) and not isinstance(config[pm], vals):
raise ValueError("{} has to be of type {}".format(pm, vals))
for pm, vals in optional_dict.items():
if vals == str:
vals = string_types
if pm in config:
if vals and isinstance(vals, list) and config[pm] not in vals:
raise ValueError("{} has to be one of {}".format(pm, vals))
if vals and not isinstance(vals, list) and not isinstance(config[pm], vals):
raise ValueError("{} has to be of type {}".format(pm, vals))
for pm in config:
if pm not in required_dict and pm not in optional_dict:
raise ValueError("Unknown parameter: {}".format(pm))
def cast_types(input_dict, dtype):
cast_input_dict = {}
for key, value in input_dict.items():
if isinstance(value, tf.Tensor):
if value.dtype == tf.float16 or value.dtype == tf.float32:
if value.dtype.base_dtype != dtype.base_dtype:
cast_input_dict[key] = tf.cast(value, dtype)
continue
if isinstance(value, dict):
cast_input_dict[key] = cast_types(input_dict[key], dtype)
continue
if isinstance(value, list):
cur_list = []
for nest_value in value:
if isinstance(nest_value, tf.Tensor):
if nest_value.dtype == tf.float16 or nest_value.dtype == tf.float32:
if nest_value.dtype.base_dtype != dtype.base_dtype:
cur_list.append(tf.cast(nest_value, dtype))
continue
cur_list.append(nest_value)
cast_input_dict[key] = cur_list
continue
cast_input_dict[key] = input_dict[key]
return cast_input_dict
def get_interactive_infer_results(model, sess, model_in):
fetches = [
model.get_data_layer().input_tensors,
model.get_output_tensors(),
]
feed_dict = model.get_data_layer().create_feed_dict(model_in)
inputs, outputs = sess.run(fetches, feed_dict=feed_dict)
return model.infer(inputs, outputs)
def get_base_config(args):
"""This function parses the command line arguments, reads the config file, and
gets the base_model from the config.
Args:
args (str): The command line arugments
Returns
args (dict): The arguments parsed into a dictionary
base_config (dict): The config read from the file and ammended with the
command line arguments
base_model (OpenSeq2Seq model): The model specified in the config file
config_module (dict): The raw config file processed by runpy
"""
parser = argparse.ArgumentParser(description='Experiment parameters')
parser.add_argument("--config_file", required=True,
help="Path to the configuration file")
parser.add_argument("--mode", default='train',
help="Could be \"train\", \"eval\", "
"\"train_eval\" or \"infer\"")
parser.add_argument("--infer_output_file", default='infer-out.txt',
help="Path to the output of inference")
parser.add_argument('--continue_learning', dest='continue_learning',
action='store_true', help="whether to continue learning")
parser.add_argument('--no_dir_check', dest='no_dir_check',
action='store_true',
help="whether to check that everything is correct "
"with log directory")
parser.add_argument('--benchmark', dest='benchmark', action='store_true',
help='automatic config change for benchmarking')
parser.add_argument('--bench_steps', type=int, default='20',
help='max_steps for benchmarking')
parser.add_argument('--bench_start', type=int,
help='first step to start counting time for benchmarking')
parser.add_argument('--debug_port', type=int,
help='run TensorFlow in debug mode on specified port')
parser.add_argument('--enable_logs', dest='enable_logs', action='store_true',
help='whether to log output, git info, cmd args, etc.')
parser.add_argument('--use_xla_jit', dest='use_xla_jit', action='store_true',
help='whether to use XLA_JIT to compile and run the model.')
args, unknown = parser.parse_known_args(args)
if args.mode not in [
'train',
'eval',
'train_eval',
'infer',
'interactive_infer'
]:
raise ValueError("Mode has to be one of "
"['train', 'eval', 'train_eval', 'infer', "
"'interactive_infer']")
config_module = runpy.run_path(args.config_file, init_globals={'tf': tf})
base_config = config_module.get('base_params', None)
if base_config is None:
raise ValueError('base_config dictionary has to be '
'defined in the config file')
base_config['use_xla_jit'] = args.use_xla_jit or base_config.get('use_xla_jit', False)
base_model = config_module.get('base_model', None)
if base_model is None:
raise ValueError('base_config class has to be defined in the config file')
# after we read the config, trying to overwrite some of the properties
# with command line arguments that were passed to the script
parser_unk = argparse.ArgumentParser()
for pm, value in flatten_dict(base_config).items():
if type(value) == int or type(value) == float or \
isinstance(value, string_types):
parser_unk.add_argument('--' + pm, default=value, type=type(value))
elif type(value) == bool:
parser_unk.add_argument('--' + pm, default=value, type=ast.literal_eval)
config_update = parser_unk.parse_args(unknown)
nested_update(base_config, nest_dict(vars(config_update)))
return args, base_config, base_model, config_module
def get_calibration_config(arguments):
"""This function parses the command line arguments, reads the config file, and
gets the base_model from the config.
Args:
args (str): The command line arguments
Returns
args (dict): The arguments parsed into a dictionary
base_config (dict): The config read from the file and ammended with the
command line arguments
base_model (OpenSeq2Seq model): The model specified in the config file
config_module (dict): The raw config file processed by runpy
"""
parser = argparse.ArgumentParser(description='Calibration parameters')
parser.add_argument("--config_file", required=True,
help="Path to the configuration file")
parser.add_argument("--infer_output_file", default="calibration/sample.pkl",
help="Path to the output of inference")
parser.add_argument("--calibration_out", default = "calibration.txt",
help="Path to calibration output")
class CustomSpace(object):
def __init__(self, **kwargs):
for name in kwargs:
setattr(self, name, kwargs[name])
__hash__ = None
def __repr__(self):
type_name = type(self).__name__
arg_strings = []
for arg in self._get_args():
arg_strings.append(repr(arg))
for name, value in self._get_kwargs():
arg_strings.append('%s=%r' % (name, value))
return '%s(%s)' % (type_name, ', '.join(arg_strings))
def _get_kwargs(self):
return sorted(self.__dict__.items())
def _get_args(self):
return []
def __eq__(self, other):
if not isinstance(other, CustomSpace):
return NotImplemented
return vars(self) == vars(other)
def __ne__(self, other):
if not isinstance(other, CustomSpace):
return NotImplemented
return not (self == other)
def __contains__(self, key):
return key in self.__dict__
custom_dict = {"benchmark":False,
"enable_logs":False,
"mode":"infer",
"continue_learning":False,
}
args, unknown = parser.parse_known_args(arguments,namespace=CustomSpace(**custom_dict))
config_module = runpy.run_path(args.config_file, init_globals={'tf': tf})
base_config = config_module.get('base_params', None)
if base_config is None:
raise ValueError('base_config dictionary has to be '
'defined in the config file')
base_model = config_module.get('base_model', None)
if base_model is None:
raise ValueError('base_config class has to be defined in the config file')
# after we read the config, trying to overwrite some of the properties
# with command line arguments that were passed to the script
parser_unk = argparse.ArgumentParser()
for pm, value in flatten_dict(base_config).items():
if type(value) == int or type(value) == float or \
isinstance(value, string_types):
parser_unk.add_argument('--' + pm, default=value, type=type(value))
elif type(value) == bool:
parser_unk.add_argument('--' + pm, default=value, type=ast.literal_eval)
config_update = parser_unk.parse_args(unknown)
nested_update(base_config, nest_dict(vars(config_update)))
return args, base_config, base_model, config_module
def check_logdir(args, base_config, restore_best_checkpoint=False):
"""A helper function that ensures the logdir is setup correctly
Args:
args (dict): Dictionary as returned from get_base_config()
base_config (dict): Dictionary as returned from get_base_config()
restore_best_checkpoint (bool): If True, will look for ckpt_dir + /best_models
Returns:
checkpoint: Either None if continue-learning is not set and training, or
the name of the checkpoint used to restore the model
"""
# checking that everything is correct with log directory
logdir = base_config['logdir']
if args.benchmark:
args.no_dir_check = True
try:
if args.enable_logs:
ckpt_dir = os.path.join(logdir, 'logs')
else:
ckpt_dir = logdir
if args.mode == 'train' or args.mode == 'train_eval':
if os.path.isfile(logdir):
raise IOError("There is a file with the same name as \"logdir\" "
"parameter. You should change the log directory path "
"or delete the file to continue.")
# check if "logdir" directory exists and non-empty
if os.path.isdir(logdir) and os.listdir(logdir) != []:
if not args.continue_learning:
raise IOError("Log directory is not empty. If you want to continue "
"learning, you should provide "
"\"--continue_learning\" flag")
checkpoint = tf.train.latest_checkpoint(ckpt_dir)
if checkpoint is None:
raise IOError(
"There is no valid TensorFlow checkpoint in the "
"{} directory. Can't load model".format(ckpt_dir)
)
else:
if args.continue_learning:
raise IOError("The log directory is empty or does not exist. "
"You should probably not provide "
"\"--continue_learning\" flag?")
checkpoint = None
elif (args.mode == 'infer' or args.mode == 'eval' or
args.mode == 'interactive_infer'):
if os.path.isdir(logdir) and os.listdir(logdir) != []:
# if os.path.isdir(logdir) and 'checkpoint' in os.listdir(logdir):
best_ckpt_dir = os.path.join(ckpt_dir, 'best_models')
if restore_best_checkpoint and os.path.isdir(best_ckpt_dir):
deco_print("Restoring from the best checkpoint")
checkpoint = tf.train.latest_checkpoint(best_ckpt_dir)
ckpt_dir = best_ckpt_dir
else:
deco_print("Restoring from the latest checkpoint")
checkpoint = tf.train.latest_checkpoint(ckpt_dir)
if checkpoint is None:
raise IOError(
"There is no valid TensorFlow checkpoint in the "
"{} directory. Can't load model".format(ckpt_dir)
)
else:
raise IOError(
"{} does not exist or is empty, can't restore model".format(
ckpt_dir
)
)
except IOError as e:
if args.no_dir_check:
print("Warning: {}".format(e))
print("Resuming operation since no_dir_check argument was provided")
else:
raise
return checkpoint
def check_base_model_logdir(base_logdir, args, restore_best_checkpoint=False):
"""A helper function that ensures the logdir is setup correctly
Args:
args (dict): Dictionary as returned from get_base_config()
base_config (dict): Dictionary as returned from get_base_config()
restore_best_checkpoint (bool): If True, will look for ckpt_dir + /best_models
Returns:
checkpoint: Either None if continue-learning is not set and training, or
the name of the checkpoint used to restore the model
"""
# checking that everything is correct with log directory
if not base_logdir:
return ''
if (not os.path.isdir(base_logdir)) or len(os.listdir(base_logdir)) == 0:
raise IOError("The log directory for the base model is empty or does not exist.")
if args.enable_logs:
ckpt_dir = os.path.join(base_logdir, 'logs')
if not os.path.isdir(ckpt_dir):
raise IOError("There's no folder 'logs' in the base model logdir. \
If checkpoints exist, put them in the 'logs' folder.")
else:
ckpt_dir = base_logdir
if restore_best_checkpoint and os.path.isdir(os.path.join(ckpt_dir, 'best_models')):
ckpt_dir = os.path.join(ckpt_dir, 'best_models')
checkpoint = tf.train.latest_checkpoint(ckpt_dir)
if checkpoint is None:
raise IOError(
"There is no valid TensorFlow checkpoint in the \
{} directory. Can't load model".format(ckpt_dir))
return ckpt_dir
def create_logdir(args, base_config):
"""A helper function that ensures the logdir and log files are setup corretly.
Only called in --enable_logs is set.
Args:
args (dict): Dictionary as returned from get_base_config()
base_config (dict): Dictionary as returned from get_base_config()
Returns:
Some objects that need to be cleaned up in run.py
"""
logdir = base_config['logdir']
if not os.path.exists(logdir):
os.makedirs(logdir)
tm_suf = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
shutil.copy(
args.config_file,
os.path.join(logdir, 'config_{}.py'.format(tm_suf)),
)
with open(os.path.join(logdir, 'cmd-args_{}.log'.format(tm_suf)),
'w') as f:
f.write(" ".join(sys.argv))
with open(os.path.join(logdir, 'git-info_{}.log'.format(tm_suf)),
'w') as f:
f.write('commit hash: {}'.format(get_git_hash()))
f.write(get_git_diff())
old_stdout = sys.stdout
old_stderr = sys.stderr
stdout_log = open(
os.path.join(logdir, 'stdout_{}.log'.format(tm_suf)), 'a', 1
)
stderr_log = open(
os.path.join(logdir, 'stderr_{}.log'.format(tm_suf)), 'a', 1
)
sys.stdout = Logger(sys.stdout, stdout_log)
sys.stderr = Logger(sys.stderr, stderr_log)
return old_stdout, old_stderr, stdout_log, stderr_log
def create_model(args, base_config, config_module, base_model, hvd,
checkpoint=None):
"""A helpful function that creates the train, eval, and infer models as
needed.
Args:
args (dict): Dictionary as returned from get_base_config()
base_config (dict): Dictionary as returned from get_base_config()
config_module: config_module as returned from get_base_config()
base_model (OpenSeq2Seq model): Dictionary as returned from
get_base_config()
hvd: Either None if Horovod is not enabled, or the Horovod library
checkpoint (str): checkpoint path as returned from
tf.train.latest_checkpoint
Returns:
model: A compiled model. For the 'train_eval' mode, a tuple containing the
(train_model, eval_model) is returned.
"""
train_config = copy.deepcopy(base_config)
eval_config = copy.deepcopy(base_config)
infer_config = copy.deepcopy(base_config)
if args.mode == 'train' or args.mode == 'train_eval':
if 'train_params' in config_module:
nested_update(train_config, copy.deepcopy(config_module['train_params']))
if hvd is None or hvd.rank() == 0:
deco_print("Training config:")
pprint.pprint(train_config)
if args.mode == 'eval' or args.mode == 'train_eval':
if 'eval_params' in config_module:
nested_update(eval_config, copy.deepcopy(config_module['eval_params']))
if hvd is None or hvd.rank() == 0:
deco_print("Evaluation config:")
pprint.pprint(eval_config)
if args.mode == "infer":
if args.infer_output_file is None:
raise ValueError("\"infer_output_file\" command line parameter is "
"required in inference mode")
if "infer_params" in config_module:
nested_update(infer_config, copy.deepcopy(config_module['infer_params']))
if hvd is None or hvd.rank() == 0:
deco_print("Inference config:")
pprint.pprint(infer_config)
if args.mode == "interactive_infer":
if "interactive_infer_params" in config_module:
nested_update(
infer_config,
copy.deepcopy(config_module['interactive_infer_params'])
)
if hvd is None or hvd.rank() == 0:
deco_print("Inference config:")
pprint.pprint(infer_config)
if args.benchmark:
deco_print("Adjusting config for benchmarking")
train_config['print_samples_steps'] = None
train_config['print_loss_steps'] = 1
train_config['save_summaries_steps'] = None
train_config['save_checkpoint_steps'] = None
train_config['logdir'] = str("")
if 'num_epochs' in train_config:
del train_config['num_epochs']
train_config['max_steps'] = args.bench_steps
if args.bench_start:
train_config['bench_start'] = args.bench_start
elif 'bench_start' not in train_config:
train_config['bench_start'] = 10 # default value
if hvd is None or hvd.rank() == 0:
deco_print("New benchmarking config:")
pprint.pprint(train_config)
args.mode = "train"
if args.mode == 'train_eval':
train_model = base_model(params=train_config, mode="train", hvd=hvd)
train_model.compile()
eval_model = base_model(params=eval_config, mode="eval", hvd=hvd)
eval_model.compile(force_var_reuse=True)
model = (train_model, eval_model)
elif args.mode == 'train':
model = base_model(params=train_config, mode="train", hvd=hvd)
model.compile()
elif args.mode == 'eval':
model = base_model(params=eval_config, mode="eval", hvd=hvd)
model.compile(force_var_reuse=False)
else:
model = base_model(params=infer_config, mode=args.mode, hvd=hvd)
model.compile(checkpoint=checkpoint)
return model
| OpenSeq2Seq-master | open_seq2seq/utils/utils.py |
'''
This file modifies standard TensorFlow modules necessary for transfer learning,
such as MonitoredTrainingSession, ChiefSessionCreator, Scaffold, SessionManager
'''
import re
import time
import tensorflow as tf
from tensorflow.python.ops import resources
from tensorflow.python.training import saver as training_saver
FP32_TEST = re.compile(r'Loss_Optimization\/FP32-master-copy\/')
# Value that indicates no value was provided.
USE_DEFAULT = object()
def TransferMonitoredTrainingSession(master='', # pylint: disable=invalid-name
is_chief=True,
checkpoint_dir=None,
scaffold=None,
hooks=None,
chief_only_hooks=None,
save_checkpoint_secs=USE_DEFAULT,
save_summaries_steps=USE_DEFAULT,
save_summaries_secs=USE_DEFAULT,
config=None,
stop_grace_period_secs=120,
log_step_count_steps=100,
max_wait_secs=7200,
save_checkpoint_steps=USE_DEFAULT,
summary_dir=None,
load_model_dir=None,
load_fc=False):
"""Creates a `MonitoredSession` for training.
For a chief, this utility sets proper session initializer/restorer. It also
creates hooks related to checkpoint and summary saving. For workers, this
utility sets proper session creator which waits for the chief to
initialize/restore. Please check `tf.train.MonitoredSession` for more
information.
Args:
master: `String` the TensorFlow master to use.
is_chief: If `True`, it will take care of initialization and recovery the
underlying TensorFlow session. If `False`, it will wait on a chief to
initialize or recover the TensorFlow session.
checkpoint_dir: A string. Optional path to a directory where to restore
variables.
scaffold: A `Scaffold` used for gathering or building supportive ops. If
not specified, a default one is created. It's used to finalize the graph.
hooks: Optional list of `SessionRunHook` objects.
chief_only_hooks: list of `SessionRunHook` objects. Activate these hooks if
`is_chief==True`, ignore otherwise.
save_checkpoint_secs: The frequency, in seconds, that a checkpoint is saved
using a default checkpoint saver. If both `save_checkpoint_steps` and
`save_checkpoint_secs` are set to `None`, then the default checkpoint
saver isn't used. If both are provided, then only `save_checkpoint_secs`
is used. Default 600.
save_summaries_steps: The frequency, in number of global steps, that the
summaries are written to disk using a default summary saver. If both
`save_summaries_steps` and `save_summaries_secs` are set to `None`, then
the default summary saver isn't used. Default 100.
save_summaries_secs: The frequency, in secs, that the summaries are written
to disk using a default summary saver. If both `save_summaries_steps` and
`save_summaries_secs` are set to `None`, then the default summary saver
isn't used. Default not enabled.
config: an instance of `tf.ConfigProto` proto used to configure the session.
It's the `config` argument of constructor of `tf.Session`.
stop_grace_period_secs: Number of seconds given to threads to stop after
`close()` has been called.
log_step_count_steps: The frequency, in number of global steps, that the
global step/sec is logged.
max_wait_secs: Maximum time workers should wait for the session to
become available. This should be kept relatively short to help detect
incorrect code, but sometimes may need to be increased if the chief takes
a while to start up.
save_checkpoint_steps: The frequency, in number of global steps, that a
checkpoint is saved using a default checkpoint saver. If both
`save_checkpoint_steps` and `save_checkpoint_secs` are set to `None`, then
the default checkpoint saver isn't used. If both are provided, then only
`save_checkpoint_secs` is used. Default not enabled.
summary_dir: A string. Optional path to a directory where to
save summaries. If None, checkpoint_dir is used instead.
load_model_dir (str): The location of the checkpoint file used to load the
model weights.
Returns:
A `MonitoredSession` object.
"""
if save_summaries_steps == USE_DEFAULT and save_summaries_secs == USE_DEFAULT:
save_summaries_steps = 100
save_summaries_secs = None
elif save_summaries_secs == USE_DEFAULT:
save_summaries_secs = None
elif save_summaries_steps == USE_DEFAULT:
save_summaries_steps = None
if (save_checkpoint_steps == USE_DEFAULT and
save_checkpoint_secs == USE_DEFAULT):
save_checkpoint_steps = None
save_checkpoint_secs = 600
elif save_checkpoint_secs == USE_DEFAULT:
save_checkpoint_secs = None
elif save_checkpoint_steps == USE_DEFAULT:
save_checkpoint_steps = None
if not is_chief:
session_creator = tf.train.WorkerSessionCreator(
scaffold=scaffold,
master=master,
config=config,
max_wait_secs=max_wait_secs)
return tf.train.MonitoredSession(
session_creator=session_creator, hooks=hooks or [],
stop_grace_period_secs=stop_grace_period_secs)
all_hooks = []
if chief_only_hooks:
all_hooks.extend(chief_only_hooks)
restore_all = False
if not load_model_dir:
load_model_dir = checkpoint_dir
restore_all = True
assign_ops, restore_dict = get_assign_ops_and_restore_dict(
tf.train.latest_checkpoint(load_model_dir), restore_all)
if ((restore_all or tf.train.latest_checkpoint(checkpoint_dir))
and len(assign_ops) == 0):
# Checking to see if we can use the default TensorFlow Session Creator
# We need two conditions to be true:
# 1a) We are not loading partial vars through load_model_dir OR
# 1b) There is a saved checkpoint file from which we can load
# 2) if there is no dtype mismatch between checkpoint vars and vars in graph
session_creator = tf.train.ChiefSessionCreator(
scaffold=scaffold,
checkpoint_dir=checkpoint_dir,
master=master,
config=config)
else: # load variables from the base model's checkpoint
if load_model_dir:
print("Loading the base model from {}.".format(load_model_dir))
session_creator = TransferChiefSessionCreator(
scaffold=scaffold,
checkpoint_dir=load_model_dir,
master=master,
config=config,
load_fc=load_fc,
assign_ops=assign_ops,
restore_dict=restore_dict)
summary_dir = summary_dir or checkpoint_dir
if summary_dir:
if log_step_count_steps and log_step_count_steps > 0:
all_hooks.append(
tf.train.StepCounterHook(
output_dir=summary_dir, every_n_steps=log_step_count_steps))
if (save_summaries_steps and save_summaries_steps > 0) or (
save_summaries_secs and save_summaries_secs > 0):
all_hooks.append(tf.train.SummarySaverHook(
scaffold=scaffold,
save_steps=save_summaries_steps,
save_secs=save_summaries_secs,
output_dir=summary_dir))
if checkpoint_dir:
if (save_checkpoint_secs and save_checkpoint_secs > 0) or (
save_checkpoint_steps and save_checkpoint_steps > 0):
all_hooks.append(tf.train.CheckpointSaverHook(
checkpoint_dir,
save_steps=save_checkpoint_steps,
save_secs=save_checkpoint_secs,
scaffold=scaffold))
if hooks:
all_hooks.extend(hooks)
return tf.train.MonitoredSession(
session_creator=session_creator, hooks=all_hooks,
stop_grace_period_secs=stop_grace_period_secs)
class TransferChiefSessionCreator(tf.train.SessionCreator):
def __init__(self,
scaffold=None,
master='',
config=None,
checkpoint_dir=None,
checkpoint_filename_with_path=None,
load_fc=False,
assign_ops=None,
restore_dict=None):
"""Initializes a chief session creator.
Args:
scaffold: A `Scaffold` used for gathering or building supportive ops. If
not specified a default one is created. It's used to finalize the graph.
master: `String` representation of the TensorFlow master to use.
config: `ConfigProto` proto used to configure the session.
checkpoint_dir: A string. Optional path to a directory where to restore
variables.
checkpoint_filename_with_path: Full file name path to the checkpoint file.
"""
self._checkpoint_dir = checkpoint_dir
self._checkpoint_filename_with_path = checkpoint_filename_with_path
self._scaffold = scaffold or TransferScaffold()
self._session_manager = None
self._master = master
self._config = config
self._load_fc = load_fc
self._assign_ops = assign_ops
self._restore_dict = restore_dict
def _get_session_manager(self):
if self._session_manager:
return self._session_manager
self._session_manager = TransferSessionManager(
local_init_op=self._scaffold.local_init_op,
ready_op=self._scaffold.ready_op,
ready_for_local_init_op=self._scaffold.ready_for_local_init_op,
graph=tf.get_default_graph())
return self._session_manager
def create_session(self):
print('SCAFFOLD TYPE:', type(self._scaffold))
self._scaffold.finalize()
# tf.get_default_graph()._unsafe_unfinalize()
return self._get_session_manager().prepare_session(
self._master,
saver=self._scaffold.saver,
checkpoint_dir=self._checkpoint_dir,
checkpoint_filename_with_path=self._checkpoint_filename_with_path,
config=self._config,
init_op=self._scaffold.init_op,
init_feed_dict=self._scaffold.init_feed_dict,
init_fn=self._scaffold.init_fn,
load_fc=self._load_fc,
assign_ops=self._assign_ops,
restore_dict=self._restore_dict)
class TransferScaffold(tf.train.Scaffold):
def finalize(self):
"""Creates operations if needed and finalizes the graph."""
if self._init_op is None:
def default_init_op():
return tf.group(
tf.global_variables_initializer(),
resources.initialize_resources(resources.shared_resources()))
self._init_op = TransferScaffold.get_or_default(
'init_op',
tf.GraphKeys.INIT_OP,
default_init_op)
if self._ready_op is None:
def default_ready_op():
return tf.concat([
tf.report_uninitialized_variables(),
resources.report_uninitialized_resources()
], 0)
self._ready_op = TransferScaffold.get_or_default(
'ready_op', tf.GraphKeys.READY_OP,
default_ready_op)
if self._ready_for_local_init_op is None:
def default_ready_for_local_init_op():
return tf.report_uninitialized_variables(
tf.global_variables())
self._ready_for_local_init_op = TransferScaffold.get_or_default(
'ready_for_local_init_op', tf.GraphKeys.READY_FOR_LOCAL_INIT_OP,
default_ready_for_local_init_op)
if self._local_init_op is None:
self._local_init_op = TransferScaffold.get_or_default(
'local_init_op', tf.GraphKeys.LOCAL_INIT_OP,
TransferScaffold.default_local_init_op)
if self._summary_op is None:
self._summary_op = TransferScaffold.get_or_default(
'summary_op', tf.GraphKeys.SUMMARY_OP, tf.summary.merge_all)
# pylint: disable=g-long-lambda
if self._saver is None:
self._saver = training_saver._get_saver_or_default() # pylint: disable=protected-access
# pylint: enable=g-long-lambda
self._saver.build()
# ops.get_default_graph().finalize()
# logging.info('Graph was finalized.')
return self
class TransferSessionManager(tf.train.SessionManager):
def _restore_checkpoint(self,
master,
sess,
saver=None,
checkpoint_dir=None,
checkpoint_filename_with_path=None,
wait_for_checkpoint=False,
max_wait_secs=7200,
config=None,
load_fc=False,
assign_ops=None,
restore_dict=None):
"""Creates a `Session`, and tries to restore a checkpoint.
Args:
master: `String` representation of the TensorFlow master to use.
saver: A `Saver` object used to restore a model.
checkpoint_dir: Path to the checkpoint files. The latest checkpoint in the
dir will be used to restore.
checkpoint_filename_with_path: Full file name path to the checkpoint file.
wait_for_checkpoint: Whether to wait for checkpoint to become available.
max_wait_secs: Maximum time to wait for checkpoints to become available.
config: Optional `ConfigProto` proto used to configure the session.
Returns:
A pair (sess, is_restored) where 'is_restored' is `True` if
the session could be restored, `False` otherwise.
Raises:
ValueError: If both checkpoint_dir and checkpoint_filename_with_path are
set.
"""
self._target = master
# sess = tf.Session(self._target, graph=self._graph, config=config)
if checkpoint_dir and checkpoint_filename_with_path:
raise ValueError("Can not provide both checkpoint_dir and "
"checkpoint_filename_with_path.")
# If either saver or checkpoint_* is not specified, cannot restore. Just
# return.
print('checkpoint_dir', checkpoint_dir)
print('checkpoint_filename_with_path', checkpoint_filename_with_path)
if not saver or not (checkpoint_dir or checkpoint_filename_with_path):
return sess, False
if checkpoint_filename_with_path:
# saver.restore(sess, checkpoint_filename_with_path)
# restore_certain_variables(sess, checkpoint_filename_with_path)
run_assign_and_saver(
sess, checkpoint_filename_with_path, assign_ops, restore_dict)
return sess, True
# Waits up until max_wait_secs for checkpoint to become available.
wait_time = 0
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
while not ckpt or not ckpt.model_checkpoint_path:
if wait_for_checkpoint and wait_time < max_wait_secs:
tf.logging.info("Waiting for checkpoint to be available.")
time.sleep(self._recovery_wait_secs)
wait_time += self._recovery_wait_secs
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
else:
return sess, False
# Loads the checkpoint.
ckpt_file = ckpt.model_checkpoint_path
# restore_certain_variables(sess, ckpt_file)
run_assign_and_saver(sess, ckpt_file, assign_ops, restore_dict)
saver.recover_last_checkpoints(ckpt.all_model_checkpoint_paths)
return sess, True
def prepare_session(self,
master,
init_op=None,
saver=None,
checkpoint_dir=None,
checkpoint_filename_with_path=None,
wait_for_checkpoint=False,
max_wait_secs=7200,
config=None,
init_feed_dict=None,
init_fn=None,
load_fc=False,
assign_ops=None,
restore_dict=None):
"""Creates a `Session`. Makes sure the model is ready to be used.
Creates a `Session` on 'master'. If a `saver` object is passed in, and
`checkpoint_dir` points to a directory containing valid checkpoint
files, then it will try to recover the model from checkpoint. If
no checkpoint files are available, and `wait_for_checkpoint` is
`True`, then the process would check every `recovery_wait_secs`,
up to `max_wait_secs`, for recovery to succeed.
If the model cannot be recovered successfully then it is initialized by
running the `init_op` and calling `init_fn` if they are provided.
The `local_init_op` is also run after init_op and init_fn, regardless of
whether the model was recovered successfully, but only if
`ready_for_local_init_op` passes.
If the model is recovered from a checkpoint it is assumed that all
global variables have been initialized, in particular neither `init_op`
nor `init_fn` will be executed.
It is an error if the model cannot be recovered and no `init_op`
or `init_fn` or `local_init_op` are passed.
Args:
master: `String` representation of the TensorFlow master to use.
init_op: Optional `Operation` used to initialize the model.
saver: A `Saver` object used to restore a model.
checkpoint_dir: Path to the checkpoint files. The latest checkpoint in
the dir will be used to restore.
checkpoint_filename_with_path: Full file name path to the checkpoint
file.
wait_for_checkpoint: Whether to wait for checkpoint to become available.
max_wait_secs: Maximum time to wait for checkpoints to become available.
config: Optional `ConfigProto` proto used to configure the session.
init_feed_dict: Optional dictionary that maps `Tensor` objects to feed
values. This feed dictionary is passed to the session `run()` call
when running the init op.
init_fn: Optional callable used to initialize the model. Called after
the optional `init_op` is called. The callable must accept one
argument, the session being initialized.
Returns:
A `Session` object that can be used to drive the model.
Raises:
RuntimeError: If the model cannot be initialized or recovered.
ValueError: If both checkpoint_dir and checkpoint_filename_with_path are
set.
"""
sess = tf.Session(master, graph=self._graph, config=config)
if init_op is None and not init_fn and self._local_init_op is None:
raise RuntimeError("Model is not initialized and no init_op or "
"init_fn or local_init_op was given")
if init_op is not None:
sess.run(init_op, feed_dict=init_feed_dict)
if init_fn:
init_fn(sess)
sess.run(tf.local_variables_initializer()) # why do i have to add this?
print("LOCAL INIT OP", self._local_init_op)
sess, is_loaded_from_checkpoint = self._restore_checkpoint(
master,
sess,
saver,
checkpoint_dir=checkpoint_dir,
checkpoint_filename_with_path=checkpoint_filename_with_path,
wait_for_checkpoint=wait_for_checkpoint,
max_wait_secs=max_wait_secs,
config=config,
load_fc=load_fc,
assign_ops=assign_ops,
restore_dict=restore_dict)
local_init_success, msg = self._try_run_local_init_op(sess)
if not local_init_success:
raise RuntimeError(
"Init operations did not make model ready for local_init. "
"Init op: %s, init fn: %s, error: %s" % (_maybe_name(init_op),
init_fn,
msg))
is_ready, msg = self._model_ready(sess)
if not is_ready:
raise RuntimeError(
"Init operations did not make model ready. "
"Init op: %s, init fn: %s, local_init_op: %s, error: %s" %
(_maybe_name(init_op), init_fn, self._local_init_op, msg))
return sess
def _restore_embed(embed_var, var_to_shape_map, reader):
if len([var for var in var_to_shape_map if 'EmbeddingMatrix' in var]) > 0:
return None, None # assume same name
for var in var_to_shape_map:
if (var.endswith('dense/kernel')
and var_to_shape_map[var] == tf.transpose(embed_var).shape):
print('Assigning', var, 'to', embed_var.name)
tensor = reader.get_tensor(var).T
if tensor.dtype != var.dtype.as_numpy_dtype():
return embed_var.assign(tf.cast(tensor, embed_var.dtype)), True
return embed_var, False
return None, None
def get_assign_ops_and_restore_dict(filename, restore_all=False):
"""Helper function to read variable checkpoints from filename.
Iterates through all vars in restore_all=False else all trainable vars. It
attempts to match variables by name and variable shape. Returns a possibly
empty list of assign_ops, and a possibly empty dictionary for tf.train.Saver()
"""
def check_name_and_shape(name, var, shape_map):
if name in shape_map:
# Cannot check variables with unknown sizes such as cudnn rnns
if str(var.shape) == "<unknown>":
# Just return True and hope the shapes match
return True
if var.shape == shape_map[name]:
return True
return False
assign_ops = []
restore_dict = {}
try:
reader = tf.train.NewCheckpointReader(filename)
var_to_shape_map = reader.get_variable_to_shape_map()
variables = tf.trainable_variables()
if restore_all:
variables = tf.get_collection(tf.GraphKeys.VARIABLES)
for var in variables:
idx = var.name.find(":")
if idx != -1:
true_name = var.name[:idx]
loss_idx = re.search("Loss_Optimization", true_name)
if 'EmbeddingMatrix' in true_name:
embed_restore, assign = _restore_embed(var, var_to_shape_map, reader)
if assign:
assign_ops.append(embed_restore)
else:
restore_dict[true_name] = embed_restore
if check_name_and_shape(true_name, var, var_to_shape_map):
tensor = reader.get_tensor(true_name)
if tensor.dtype != var.dtype.as_numpy_dtype():
assign_ops.append(var.assign(tf.cast(tensor, var.dtype)))
else:
restore_dict[true_name] = var
elif loss_idx:
loss_idx = loss_idx.end()
if FP32_TEST.search(true_name):
true_name = FP32_TEST.sub("", true_name)
else:
true_name = (true_name[:loss_idx]
+ "/Loss_Optimization/FP32-master-copy"
+ true_name[loss_idx:])
if check_name_and_shape(true_name, var, var_to_shape_map):
tensor = reader.get_tensor(true_name)
if tensor.dtype != var.dtype.as_numpy_dtype():
assign_ops.append(var.assign(tf.cast(tensor, var.dtype)))
else:
restore_dict[true_name] = var
else:
print("Not restoring {}".format(var.name))
if true_name not in var_to_shape_map:
print("true name [{}] was not in shape map".format(true_name))
else:
if var.shape != var_to_shape_map[true_name]:
print(("var.shape [{}] does not match var_to_shape_map[true_name]"
"[{}]").format(var.shape, var_to_shape_map[true_name]))
print("WARNING: Run will mostly error out due to this")
except Exception as e: # pylint: disable=broad-except
print(str(e))
if "corrupted compressed block contents" in str(e):
print("It's likely that your checkpoint file has been compressed "
"with SNAPPY.")
if ("Data loss" in str(e) and
(any([e in filename for e in [".index", ".meta", ".data"]]))):
proposed_file = ".".join(filename.split(".")[0:-1])
v2_file_error_template = """
It's likely that this is a V2 checkpoint and you need to provide the
filename *prefix*. Try removing the '.' and extension. Try:
inspect checkpoint --file_name = {}"""
print(v2_file_error_template.format(proposed_file))
raise ValueError("Error in loading checkpoint")
return assign_ops, restore_dict
def run_assign_and_saver(sess, filename, assign_ops, restore_dict):
"""Helper function to restore variables. All variables with the same dtype
can be restored using tf.train.Saver(). All variables with different dtype
are restored using assign_ops
"""
if restore_dict:
restorer = tf.train.Saver(restore_dict)
restorer.restore(sess, filename)
if assign_ops:
sess.run(assign_ops)
def _maybe_name(obj):
"""Returns object name if it has one, or a message otherwise.
This is useful for names that apper in error messages.
Args:
obj: Object to get the name of.
Returns:
name, "None", or a "no name" message.
"""
if obj is None:
return "None"
elif hasattr(obj, "name"):
return obj.name
else:
return "<no name for %s>" % type(obj)
| OpenSeq2Seq-master | open_seq2seq/utils/helpers.py |
# Copyright (c) 2017 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import codecs
import re
import nltk
import tensorflow as tf
from six.moves import range
from open_seq2seq.data.text2text.text2text import SpecialTextTokens
from open_seq2seq.utils.utils import deco_print, array_to_string, \
text_ids_to_string
from .encoder_decoder import EncoderDecoderModel
def transform_for_bleu(row, vocab, ignore_special=False,
delim=' ', bpe_used=False):
n = len(vocab)
if ignore_special:
f_row = []
for i in range(0, len(row)):
char_id = row[i]
if char_id == SpecialTextTokens.EOS_ID.value:
break
if char_id != SpecialTextTokens.PAD_ID.value and \
char_id != SpecialTextTokens.S_ID.value:
f_row += [char_id]
sentence = [vocab[r] for r in f_row if 0 < r < n]
else:
sentence = [vocab[r] for r in row if 0 < r < n]
if bpe_used:
sentence = delim.join(sentence)
sentence = re.sub("@@ ", "", sentence)
sentence = sentence.split(delim)
return sentence
def calculate_bleu(preds, targets):
"""Function to calculate BLEU score.
Args:
preds: list of lists
targets: list of lists
Returns:
float32: BLEU score
"""
bleu_score = nltk.translate.bleu_score.corpus_bleu(
targets, preds, emulate_multibleu=True,
)
return bleu_score
class Text2Text(EncoderDecoderModel):
"""An example class implementing classical text-to-text model."""
def _create_encoder(self):
self.params['encoder_params']['src_vocab_size'] = (
self.get_data_layer().params['src_vocab_size']
)
return super(Text2Text, self)._create_encoder()
def _create_decoder(self):
self.params['decoder_params']['batch_size'] = (
self.params['batch_size_per_gpu']
)
self.params['decoder_params']['tgt_vocab_size'] = (
self.get_data_layer().params['tgt_vocab_size']
)
return super(Text2Text, self)._create_decoder()
def _create_loss(self):
self.params['loss_params']['batch_size'] = self.params['batch_size_per_gpu']
self.params['loss_params']['tgt_vocab_size'] = (
self.get_data_layer().params['tgt_vocab_size']
)
return super(Text2Text, self)._create_loss()
def infer(self, input_values, output_values):
input_strings, output_strings = [], []
input_values = input_values['source_tensors']
for input_sample, output_sample in zip(input_values, output_values):
for i in range(0, input_sample.shape[0]): # iterate over batch dimension
output_strings.append(text_ids_to_string(
output_sample[i],
self.get_data_layer().params['target_idx2seq'],
S_ID=self.decoder.params.get('GO_SYMBOL',
SpecialTextTokens.S_ID.value),
EOS_ID=self.decoder.params.get('END_SYMBOL',
SpecialTextTokens.EOS_ID),
PAD_ID=self.decoder.params.get('PAD_SYMBOL',
SpecialTextTokens.PAD_ID),
ignore_special=True, delim=' ',
))
input_strings.append(text_ids_to_string(
input_sample[i],
self.get_data_layer().params['source_idx2seq'],
S_ID=self.decoder.params.get('GO_SYMBOL',
SpecialTextTokens.S_ID.value),
EOS_ID=self.decoder.params.get('END_SYMBOL',
SpecialTextTokens.EOS_ID.value),
PAD_ID=self.decoder.params.get('PAD_SYMBOL',
SpecialTextTokens.PAD_ID),
ignore_special=True, delim=' ',
))
return input_strings, output_strings
def finalize_inference(self, results_per_batch, output_file):
with codecs.open(output_file, 'w', 'utf-8') as fout:
step = 0
for input_strings, output_strings in results_per_batch:
for input_string, output_string in zip(input_strings, output_strings):
fout.write(output_string + "\n")
if step % 200 == 0:
deco_print("Input sequence: {}".format(input_string))
deco_print("Output sequence: {}".format(output_string))
deco_print("")
step += 1
def maybe_print_logs(self, input_values, output_values, training_step):
x, len_x = input_values['source_tensors']
y, len_y = input_values['target_tensors']
samples = output_values[0]
x_sample = x[0]
len_x_sample = len_x[0]
y_sample = y[0]
len_y_sample = len_y[0]
deco_print(
"Train Source[0]: " + array_to_string(
x_sample[:len_x_sample],
vocab=self.get_data_layer().params['source_idx2seq'],
delim=self.get_data_layer().params["delimiter"],
),
offset=4,
)
deco_print(
"Train Target[0]: " + array_to_string(
y_sample[:len_y_sample],
vocab=self.get_data_layer().params['target_idx2seq'],
delim=self.get_data_layer().params["delimiter"],
),
offset=4,
)
deco_print(
"Train Prediction[0]: " + array_to_string(
samples[0, :],
vocab=self.get_data_layer().params['target_idx2seq'],
delim=self.get_data_layer().params["delimiter"],
),
offset=4,
)
return {}
def evaluate(self, input_values, output_values):
ex, elen_x = input_values['source_tensors']
ey, elen_y = input_values['target_tensors']
x_sample = ex[0]
len_x_sample = elen_x[0]
y_sample = ey[0]
len_y_sample = elen_y[0]
deco_print(
"*****EVAL Source[0]: " + array_to_string(
x_sample[:len_x_sample],
vocab=self.get_data_layer().params['source_idx2seq'],
delim=self.get_data_layer().params["delimiter"],
),
offset=4,
)
deco_print(
"*****EVAL Target[0]: " + array_to_string(
y_sample[:len_y_sample],
vocab=self.get_data_layer().params['target_idx2seq'],
delim=self.get_data_layer().params["delimiter"],
),
offset=4,
)
samples = output_values[0]
deco_print(
"*****EVAL Prediction[0]: " + array_to_string(
samples[0, :],
vocab=self.get_data_layer().params['target_idx2seq'],
delim=self.get_data_layer().params["delimiter"],
),
offset=4,
)
preds, targets = [], []
if self.params.get('eval_using_bleu', True):
preds.extend([transform_for_bleu(
sample,
vocab=self.get_data_layer().params['target_idx2seq'],
ignore_special=True,
delim=self.get_data_layer().params["delimiter"],
bpe_used=self.params.get('bpe_used', False),
) for sample in samples])
targets.extend([[transform_for_bleu(
yi,
vocab=self.get_data_layer().params['target_idx2seq'],
ignore_special=True,
delim=self.get_data_layer().params["delimiter"],
bpe_used=self.params.get('bpe_used', False),
)] for yi in ey])
return preds, targets
def finalize_evaluation(self, results_per_batch, training_step=None):
preds, targets = [], []
for preds_cur, targets_cur in results_per_batch:
if self.params.get('eval_using_bleu', True):
preds.extend(preds_cur)
targets.extend(targets_cur)
if self.params.get('eval_using_bleu', True):
eval_bleu = calculate_bleu(preds, targets)
deco_print("Eval BLUE score: {}".format(eval_bleu), offset=4)
return {'Eval_BLEU_Score': eval_bleu}
return {}
def _get_num_objects_per_step(self, worker_id=0):
"""Returns number of source tokens + number of target tokens in batch."""
data_layer = self.get_data_layer(worker_id)
# sum of source length in batch
num_tokens = tf.reduce_sum(data_layer.input_tensors['source_tensors'][1])
if self.mode != "infer":
# sum of target length in batch
num_tokens += tf.reduce_sum(data_layer.input_tensors['target_tensors'][1])
else:
# this will count padding for batch size > 1. Need to be changed
# if that's not expected behaviour
num_tokens += tf.reduce_sum(
tf.shape(self.get_output_tensors(worker_id)[0])
)
return num_tokens
| OpenSeq2Seq-master | open_seq2seq/models/text2text.py |
# Copyright (c) 2018 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import tensorflow as tf
from open_seq2seq.models.model import Model
from open_seq2seq.utils.utils import deco_print
class EncoderDecoderModel(Model):
"""
Standard encoder-decoder class with one encoder and one decoder.
"encoder-decoder-loss" models should inherit from this class.
"""
@staticmethod
def get_required_params():
return dict(Model.get_required_params(), **{
'encoder': None, # could be any user defined class
'decoder': None, # could be any user defined class
})
@staticmethod
def get_optional_params():
return dict(Model.get_optional_params(), **{
'encoder_params': dict,
'decoder_params': dict,
'loss': None, # could be any user defined class
'loss_params': dict,
})
def __init__(self, params, mode="train", hvd=None):
"""Encoder-decoder model constructor.
Note that TensorFlow graph should not be created here. All graph creation
logic is happening inside
:meth:`self._build_forward_pass_graph() <_build_forward_pass_graph>` method.
Args:
params (dict): parameters describing the model.
All supported parameters are listed in :meth:`get_required_params`,
:meth:`get_optional_params` functions.
mode (string, optional): "train", "eval" or "infer".
If mode is "train" all parts of the graph will be built
(model, loss, optimizer).
If mode is "eval", only model and loss will be built.
If mode is "infer", only model will be built.
hvd (optional): if Horovod is used, this should be
``horovod.tensorflow`` module.
If Horovod is not used, it should be None.
Config parameters:
* **encoder** (any class derived from
:class:`Encoder <encoders.encoder.Encoder>`) --- encoder class to use.
* **encoder_params** (dict) --- dictionary with encoder configuration. For
complete list of possible parameters see the corresponding class docs.
* **decoder** (any class derived from
:class:`Decoder <decoders.decoder.Decoder>`) --- decoder class to use.
* **decoder_params** (dict) --- dictionary with decoder configuration. For
complete list of possible parameters see the corresponding class docs.
* **loss** (any class derived from
:class:`Loss <losses.loss.Loss>`) --- loss class to use.
* **loss_params** (dict) --- dictionary with loss configuration. For
complete list of possible parameters see the corresponding class docs.
"""
super(EncoderDecoderModel, self).__init__(params=params, mode=mode, hvd=hvd)
if 'encoder_params' not in self.params:
self.params['encoder_params'] = {}
if 'decoder_params' not in self.params:
self.params['decoder_params'] = {}
if 'loss_params' not in self.params:
self.params['loss_params'] = {}
self._encoder = self._create_encoder()
self._decoder = self._create_decoder()
if self.mode == 'train' or self.mode == 'eval':
self._loss_computator = self._create_loss()
else:
self._loss_computator = None
def _create_encoder(self):
"""This function should return encoder class.
Overwrite this function if additional parameters need to be specified for
encoder, besides provided in the config.
Returns:
instance of a class derived from :class:`encoders.encoder.Encoder`.
"""
params = self.params['encoder_params']
return self.params['encoder'](params=params, mode=self.mode, model=self)
def _create_decoder(self):
"""This function should return decoder class.
Overwrite this function if additional parameters need to be specified for
decoder, besides provided in the config.
Returns:
instance of a class derived from :class:`decoders.decoder.Decoder`.
"""
params = self.params['decoder_params']
return self.params['decoder'](params=params, mode=self.mode, model=self)
def _create_loss(self):
"""This function should return loss class.
Overwrite this function if additional parameters need to be specified for
loss, besides provided in the config.
Returns:
instance of a class derived from :class:`losses.loss.Loss`.
"""
return self.params['loss'](params=self.params['loss_params'], model=self)
def _build_forward_pass_graph(self, input_tensors, gpu_id=0):
"""TensorFlow graph for encoder-decoder-loss model is created here.
This function connects encoder, decoder and loss together. As an input for
encoder it will specify source tensors (as returned from
the data layer). As an input for decoder it will specify target tensors
as well as all output returned from encoder. For loss it
will also specify target tensors and all output returned from
decoder. Note that loss will only be built for mode == "train" or "eval".
Args:
input_tensors (dict): ``input_tensors`` dictionary that has to contain
``source_tensors`` key with the list of all source tensors, and
``target_tensors`` with the list of all target tensors. Note that
``target_tensors`` only need to be provided if mode is
"train" or "eval".
gpu_id (int, optional): id of the GPU where the current copy of the model
is constructed. For Horovod this is always zero.
Returns:
tuple: tuple containing loss tensor as returned from
``loss.compute_loss()`` and list of outputs tensors, which is taken from
``decoder.decode()['outputs']``. When ``mode == 'infer'``, loss will
be None.
"""
if not isinstance(input_tensors, dict) or \
'source_tensors' not in input_tensors:
raise ValueError('Input tensors should be a dict containing '
'"source_tensors" key')
if not isinstance(input_tensors['source_tensors'], list):
raise ValueError('source_tensors should be a list')
source_tensors = input_tensors['source_tensors']
if self.mode == "train" or self.mode == "eval":
if 'target_tensors' not in input_tensors:
raise ValueError('Input tensors should contain "target_tensors" key'
'when mode != "infer"')
if not isinstance(input_tensors['target_tensors'], list):
raise ValueError('target_tensors should be a list')
target_tensors = input_tensors['target_tensors']
with tf.variable_scope("ForwardPass"):
encoder_input = {"source_tensors": source_tensors}
encoder_output = self.encoder.encode(input_dict=encoder_input)
decoder_input = {"encoder_output": encoder_output}
if self.mode == "train" or self.mode == "eval":
decoder_input['target_tensors'] = target_tensors
decoder_output = self.decoder.decode(input_dict=decoder_input)
model_outputs = decoder_output.get("outputs", None)
if self.mode == "train" or self.mode == "eval":
with tf.variable_scope("Loss"):
loss_input_dict = {
"decoder_output": decoder_output,
"target_tensors": target_tensors,
}
loss = self.loss_computator.compute_loss(loss_input_dict)
else:
deco_print("Inference Mode. Loss part of graph isn't built.")
loss = None
return loss, model_outputs
@property
def encoder(self):
"""Model encoder."""
return self._encoder
@property
def decoder(self):
"""Model decoder."""
return self._decoder
@property
def loss_computator(self):
"""Model loss computator."""
return self._loss_computator
| OpenSeq2Seq-master | open_seq2seq/models/encoder_decoder.py |
import random
import numpy as np
import tensorflow as tf
from .encoder_decoder import EncoderDecoderModel
from open_seq2seq.data import WKTDataLayer
from open_seq2seq.utils.utils import deco_print, array_to_string
from open_seq2seq.utils import metrics
class LSTMLM(EncoderDecoderModel):
"""
An example class implementing an LSTM language model.
"""
def __init__(self, params, mode="train", hvd=None):
super(EncoderDecoderModel, self).__init__(params=params, mode=mode, hvd=hvd)
if 'encoder_params' not in self.params:
self.params['encoder_params'] = {}
if 'decoder_params' not in self.params:
self.params['decoder_params'] = {}
if 'loss_params' not in self.params:
self.params['loss_params'] = {}
self._lm_phase = isinstance(self.get_data_layer(), WKTDataLayer)
self._encoder = self._create_encoder()
self._decoder = self._create_decoder()
if self.mode == 'train' or self.mode == 'eval':
self._loss_computator = self._create_loss()
else:
self._loss_computator = None
self.delimiter = self.get_data_layer().delimiter
def _create_encoder(self):
self._print_f1 = False
self.params['encoder_params']['vocab_size'] = (
self.get_data_layer().vocab_size
)
self.params['encoder_params']['end_token'] = (
self.get_data_layer().end_token
)
self.params['encoder_params']['batch_size'] = (
self.get_data_layer().batch_size
)
if not self._lm_phase:
self.params['encoder_params']['fc_dim'] = (
self.get_data_layer().num_classes
)
if self.params['encoder_params']['fc_dim'] == 2:
self._print_f1 = True
if self._lm_phase:
self.params['encoder_params']['seed_tokens'] = (
self.get_data_layer().params['seed_tokens']
)
return super(LSTMLM, self)._create_encoder()
def _create_loss(self):
if self._lm_phase:
self.params['loss_params']['batch_size'] = (
self.get_data_layer().batch_size
)
self.params['loss_params']['tgt_vocab_size'] = (
self.get_data_layer().vocab_size
)
return super(LSTMLM, self)._create_loss()
def infer(self, input_values, output_values):
if self._lm_phase:
vocab = self.get_data_layer().corp.dictionary.idx2word
seed_tokens = self.params['encoder_params']['seed_tokens']
for i in range(len(seed_tokens)):
print('Seed:', vocab[seed_tokens[i]] + '\n')
deco_print(
"Output: " + array_to_string(
output_values[0][i],
vocab=self.get_data_layer().corp.dictionary.idx2word,
delim=self.delimiter,
),
offset=4,
)
return []
else:
ex, elen_x = input_values['source_tensors']
ey, elen_y = None, None
if 'target_tensors' in input_values:
ey, elen_y = input_values['target_tensors']
n_samples = len(ex)
results = []
for i in range(n_samples):
current_x = array_to_string(
ex[i][:elen_x[i]],
vocab=self.get_data_layer().corp.dictionary.idx2word,
delim=self.delimiter,
),
current_pred = np.argmax(output_values[0][i])
curret_y = None
if ey is not None:
current_y = np.argmax(ey[i])
results.append((current_x[0], current_pred, current_y))
return results
def maybe_print_logs(self, input_values, output_values, training_step):
x, len_x = input_values['source_tensors']
y, len_y = input_values['target_tensors']
x_sample = x[0]
len_x_sample = len_x[0]
y_sample = y[0]
len_y_sample = len_y[0]
deco_print(
"Train Source[0]: " + array_to_string(
x_sample[:len_x_sample],
vocab=self.get_data_layer().corp.dictionary.idx2word,
delim=self.delimiter,
),
offset=4,
)
if self._lm_phase:
deco_print(
"Train Target[0]: " + array_to_string(
y_sample[:len_y_sample],
vocab=self.get_data_layer().corp.dictionary.idx2word,
delim=self.delimiter,
),
offset=4,
)
else:
deco_print(
"TRAIN Target[0]: " + str(np.argmax(y_sample)),
offset=4,
)
samples = output_values[0][0]
deco_print(
"TRAIN Prediction[0]: " + str(samples),
offset=4,
)
labels = np.argmax(y, 1)
preds = np.argmax(output_values[0], axis=-1)
print('Labels', labels)
print('Preds', preds)
deco_print(
"Accuracy: {:.4f}".format(metrics.accuracy(labels, preds)),
offset = 4,
)
if self._print_f1:
deco_print(
"Precision: {:.4f} | Recall: {:.4f} | F1: {:.4f}"
.format(metrics.precision(labels, preds),
metrics.recall(labels, preds),
metrics.f1(labels, preds)),
offset = 4,
)
return {}
def evaluate(self, input_values, output_values):
ex, elen_x = input_values['source_tensors']
ey, elen_y = input_values['target_tensors']
x_sample = ex[0]
len_x_sample = elen_x[0]
y_sample = ey[0]
len_y_sample = elen_y[0]
return_values = {}
if self._lm_phase:
flip = random.random()
if flip <= 0.9:
return return_values
deco_print(
"*****EVAL Source[0]: " + array_to_string(
x_sample[:len_x_sample],
vocab=self.get_data_layer().corp.dictionary.idx2word,
delim=self.delimiter,
),
offset=4,
)
samples = np.argmax(output_values[0][0], axis=-1)
deco_print(
"*****EVAL Target[0]: " + array_to_string(
y_sample[:len_y_sample],
vocab=self.get_data_layer().corp.dictionary.idx2word,
delim=self.delimiter,
),
offset=4,
)
deco_print(
"*****EVAL Prediction[0]: " + array_to_string(
samples,
vocab=self.get_data_layer().corp.dictionary.idx2word,
delim=self.delimiter,
),
offset=4,
)
else:
deco_print(
"*****EVAL Source[0]: " + array_to_string(
x_sample[:len_x_sample],
vocab=self.get_data_layer().corp.dictionary.idx2word,
delim=self.delimiter,
),
offset=4,
)
samples = output_values[0][0]
deco_print(
"EVAL Target[0]: " + str(np.argmax(y_sample)),
offset=4,
)
deco_print(
"EVAL Prediction[0]: " + str(samples),
offset=4,
)
labels = np.argmax(ey, 1)
preds = np.argmax(output_values[0], axis=-1)
print('Labels', labels)
print('Preds', preds)
return_values['accuracy'] = metrics.accuracy(labels, preds)
if self._print_f1:
return_values['true_pos'] = metrics.true_positives(labels, preds)
return_values['pred_pos'] = np.sum(preds)
return_values['actual_pos'] = np.sum(labels)
return return_values
def finalize_evaluation(self, results_per_batch, training_step=None):
accuracies = []
true_pos, pred_pos, actual_pos = 0.0, 0.0, 0.0
for results in results_per_batch:
if not 'accuracy' in results:
return {}
accuracies.append(results['accuracy'])
if 'true_pos' in results:
true_pos += results['true_pos']
pred_pos += results['pred_pos']
actual_pos += results['actual_pos']
deco_print(
"EVAL Accuracy: {:.4f}".format(np.mean(accuracies)),
offset = 4,
)
if true_pos > 0:
prec = true_pos / pred_pos
rec = true_pos / actual_pos
f1 = 2.0 * prec * rec / (rec + prec)
deco_print(
"EVAL Precision: {:.4f} | Recall: {:.4f} | F1: {:.4f} | True pos: {}"
.format(prec, rec, f1, true_pos),
offset = 4,
)
return {}
def finalize_inference(self, results_per_batch, output_file):
out = open(output_file, 'w')
out.write('\t'.join(['Source', 'Pred', 'Label']) + '\n')
preds, labels = [], []
for results in results_per_batch:
for x, pred, y in results:
out.write('\t'.join([x, str(pred), str(y)]) + '\n')
preds.append(pred)
labels.append(y)
if len(labels) > 0 and labels[0] is not None:
preds = np.asarray(preds)
labels = np.asarray(labels)
deco_print(
"TEST Accuracy: {:.4f}".format(metrics.accuracy(labels, preds)),
offset = 4,
)
deco_print(
"TEST Precision: {:.4f} | Recall: {:.4f} | F1: {:.4f}"
.format(metrics.precision(labels, preds),
metrics.recall(labels, preds),
metrics.f1(labels, preds)),
offset = 4,
)
return {}
def _get_num_objects_per_step(self, worker_id=0):
"""Returns number of source tokens + number of target tokens in batch."""
data_layer = self.get_data_layer(worker_id)
# sum of source length in batch
num_tokens = tf.reduce_sum(data_layer.input_tensors['source_tensors'][1])
if self.mode != "infer":
# sum of target length in batch
num_tokens += tf.reduce_sum(data_layer.input_tensors['target_tensors'][1])
else:
# TODO: this is not going to be correct when batch size > 1, since it will
# count padding?
num_tokens += tf.reduce_sum(tf.shape(self.get_output_tensors(worker_id)[0]))
return num_tokens
| OpenSeq2Seq-master | open_seq2seq/models/lstm_lm.py |
# Copyright (c) 2017 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import copy
import os
import tempfile
import numpy as np
import numpy.testing as npt
import pandas as pd
import tensorflow as tf
from six.moves import range
from open_seq2seq.utils import train, evaluate, infer
from open_seq2seq.utils.utils import get_available_gpus
from .speech2text import levenshtein
class Speech2TextModelTests(tf.test.TestCase):
def setUp(self):
# define this values in subclasses
self.base_params = None
self.train_params = None
self.eval_params = None
self.base_model = None
def run_model(self, train_config, eval_config, hvd=None):
with tf.Graph().as_default() as g:
# pylint: disable=not-callable
train_model = self.base_model(params=train_config, mode="train", hvd=hvd)
train_model.compile()
eval_model = self.base_model(params=eval_config, mode="eval", hvd=hvd)
eval_model.compile(force_var_reuse=True)
train(train_model, eval_model)
saver = tf.train.Saver()
checkpoint = tf.train.latest_checkpoint(train_model.params['logdir'])
with self.test_session(g, use_gpu=True) as sess:
saver.restore(sess, checkpoint)
sess.run([train_model.get_data_layer(i).iterator.initializer
for i in range(train_model.num_gpus)])
sess.run([eval_model.get_data_layer(i).iterator.initializer
for i in range(eval_model.num_gpus)])
weights = sess.run(tf.trainable_variables())
loss = sess.run(train_model.loss)
eval_losses = sess.run(eval_model.eval_losses)
eval_loss = np.mean(eval_losses)
weights_new = sess.run(tf.trainable_variables())
# checking that the weights has not changed from
# just computing the loss
for w, w_new in zip(weights, weights_new):
npt.assert_allclose(w, w_new)
eval_dict = evaluate(eval_model, checkpoint)
return loss, eval_loss, eval_dict
def prepare_config(self):
self.base_params['logdir'] = tempfile.mktemp()
train_config = copy.deepcopy(self.base_params)
eval_config = copy.deepcopy(self.base_params)
train_config.update(copy.deepcopy(self.train_params))
eval_config.update(copy.deepcopy(self.eval_params))
return train_config, eval_config
def regularizer_test(self):
for dtype in [tf.float16, tf.float32, 'mixed']:
train_config, eval_config = self.prepare_config()
train_config['num_epochs'] = 60
train_config.update({
"dtype": dtype,
# pylint: disable=no-member
"regularizer": tf.contrib.layers.l2_regularizer,
"regularizer_params": {
'scale': 1e4,
},
})
eval_config.update({
"dtype": dtype,
})
loss, eval_loss, eval_dict = self.run_model(train_config, eval_config)
self.assertGreaterEqual(loss, 400.0)
self.assertGreaterEqual(eval_loss, 400.0)
self.assertGreaterEqual(eval_dict['Eval WER'], 0.9)
def convergence_test(self, train_loss_threshold,
eval_loss_threshold, eval_wer_threshold):
for dtype in [tf.float32, "mixed"]:
train_config, eval_config = self.prepare_config()
train_config.update({
"dtype": dtype,
})
eval_config.update({
"dtype": dtype,
})
loss, eval_loss, eval_dict = self.run_model(train_config, eval_config)
self.assertLess(loss, train_loss_threshold)
self.assertLess(eval_loss, eval_loss_threshold)
self.assertLess(eval_dict['Eval WER'], eval_wer_threshold)
def finetuning_test(self, train_loss_threshold,
eval_loss_threshold, eval_wer_threshold):
for dtype in [tf.float32, "mixed"]:
# pre-training
train_config, eval_config = self.prepare_config()
train_config.update({
"dtype": dtype,
})
eval_config.update({
"dtype": dtype,
})
loss, eval_loss, eval_dict = self.run_model(train_config, eval_config)
self.assertLess(loss, train_loss_threshold)
self.assertLess(eval_loss, eval_loss_threshold)
self.assertLess(eval_dict['Eval WER'], eval_wer_threshold)
# finetuning
restore_dir = train_config['logdir']
train_config['logdir'] = tempfile.mktemp()
eval_config['logdir'] = train_config['logdir']
train_config.update({
"load_model": restore_dir,
"lr_policy_params": {
"learning_rate": 0.0001,
"power": 2,
}
})
loss_ft, eval_loss_ft, eval_dict_ft = self.run_model(train_config, eval_config)
self.assertLess(loss_ft, train_loss_threshold)
self.assertLess(eval_loss_ft, eval_loss_threshold)
self.assertLess(eval_dict_ft['Eval WER'], eval_wer_threshold)
def convergence_with_iter_size_test(self):
try:
import horovod.tensorflow as hvd
hvd.init()
except ImportError:
print("Horovod not installed skipping test_convergence_with_iter_size")
return
for dtype in [tf.float32, "mixed"]:
train_config, eval_config = self.prepare_config()
train_config.update({
"dtype": dtype,
"iter_size": 5,
"batch_size_per_gpu": 2,
"use_horovod": True,
"num_epochs": 200,
})
eval_config.update({
"dtype": dtype,
"iter_size": 5,
"batch_size_per_gpu": 2,
"use_horovod": True,
})
loss, eval_loss, eval_dict = self.run_model(
train_config, eval_config, hvd,
)
self.assertLess(loss, 10.0)
self.assertLess(eval_loss, 30.0)
self.assertLess(eval_dict['Eval WER'], 0.2)
def infer_test(self):
train_config, infer_config = self.prepare_config()
train_config['num_epochs'] = 250
infer_config['batch_size_per_gpu'] = 4
with tf.Graph().as_default() as g:
with self.test_session(g, use_gpu=True) as sess:
gpus = get_available_gpus()
if len(gpus) > 1:
infer_config['num_gpus'] = 2
else:
infer_config['num_gpus'] = 1
with tf.Graph().as_default():
# pylint: disable=not-callable
train_model = self.base_model(
params=train_config, mode="train", hvd=None)
train_model.compile()
train(train_model, None)
with tf.Graph().as_default():
# pylint: disable=not-callable
infer_model = self.base_model(
params=infer_config, mode="infer", hvd=None)
infer_model.compile()
print(train_model.params['logdir'])
output_file = os.path.join(train_model.params['logdir'], 'infer_out.csv')
infer(
infer_model,
tf.train.latest_checkpoint(train_model.params['logdir']),
output_file,
)
pred_csv = pd.read_csv(output_file)
true_csv = pd.read_csv(
'open_seq2seq/test_utils/toy_speech_data/toy_data.csv',
)
for pred_row, true_row in zip(pred_csv.as_matrix(), true_csv.as_matrix()):
# checking file name
self.assertEqual(pred_row[0], true_row[0])
# checking prediction: no more than 5 chars difference
self.assertLess(levenshtein(pred_row[-1], true_row[-1]), 5)
def mp_collection_test(self, num_train_vars, num_master_copies):
train_config, eval_config = self.prepare_config()
train_config['dtype'] = 'mixed'
with tf.Graph().as_default():
# pylint: disable=not-callable
model = self.base_model(params=train_config, mode="train", hvd=None)
model.compile()
self.assertEqual(len(tf.trainable_variables()), num_train_vars)
self.assertEqual(
len(tf.get_collection('FP32_MASTER_COPIES')),
num_master_copies, # exclude batch norm beta, gamma and row_conv vars
)
def levenshtein_test(self):
sample1 = 'this is a great day'
sample2 = 'this is great day'
self.assertEqual(levenshtein(sample1.split(), sample2.split()), 1)
self.assertEqual(levenshtein(sample2.split(), sample1.split()), 1)
sample1 = 'this is a great day'
sample2 = 'this great day'
self.assertEqual(levenshtein(sample1.split(), sample2.split()), 2)
self.assertEqual(levenshtein(sample2.split(), sample1.split()), 2)
sample1 = 'this is a great day'
sample2 = 'this great day'
self.assertEqual(levenshtein(sample1.split(), sample2.split()), 2)
self.assertEqual(levenshtein(sample2.split(), sample1.split()), 2)
sample1 = 'this is a great day'
sample2 = 'this day is a great'
self.assertEqual(levenshtein(sample1.split(), sample2.split()), 2)
self.assertEqual(levenshtein(sample2.split(), sample1.split()), 2)
sample1 = 'this is a great day'
sample2 = 'this day is great'
self.assertEqual(levenshtein(sample1.split(), sample2.split()), 3)
self.assertEqual(levenshtein(sample2.split(), sample1.split()), 3)
sample1 = 'london is the capital of great britain'
sample2 = 'london capital gret britain'
self.assertEqual(levenshtein(sample1.split(), sample2.split()), 4)
self.assertEqual(levenshtein(sample2.split(), sample1.split()), 4)
self.assertEqual(levenshtein(sample1, sample2), 11)
self.assertEqual(levenshtein(sample2, sample1), 11)
def maybe_functions_test(self):
train_config, eval_config = self.prepare_config()
with tf.Graph().as_default():
# pylint: disable=not-callable
model = self.base_model(params=train_config, mode="train", hvd=None)
model.compile()
model._gpu_ids = range(5)
model.params['batch_size_per_gpu'] = 2
char2idx = model.get_data_layer().params['char2idx']
inputs = [
['this is a great day', 'london is the capital of great britain'],
['ooo', 'lll'],
['a b c\' asdf', 'blah blah bblah'],
['this is great day', 'london capital gret britain'],
['aaaaaaaasdfdasdf', 'df d sdf asd fd f sdf df blah\' blah'],
]
outputs = [
['this is great a day', 'london capital gret britain'],
['ooo', 'lll'],
['aaaaaaaasdfdasdf', 'df d sdf asd fd f sdf df blah blah'],
['this is a great day', 'london is the capital of great britain'],
['a b c\' asdf', 'blah blah\' bblah'],
]
y = [None] * len(inputs)
len_y = [None] * len(inputs)
indices, values, dense_shape = [], [], []
num_gpus = len(inputs)
for gpu_id in range(num_gpus):
num_samples = len(inputs[gpu_id])
max_len = np.max(list(map(len, inputs[gpu_id])))
y[gpu_id] = np.zeros((num_samples, max_len), dtype=np.int)
len_y[gpu_id] = np.zeros(num_samples, dtype=np.int)
for sample_id in range(num_samples):
num_letters = len(inputs[gpu_id][sample_id])
len_y[gpu_id][sample_id] = num_letters
for letter_id in range(num_letters):
y[gpu_id][sample_id, letter_id] = char2idx[
inputs[gpu_id][sample_id][letter_id]
]
num_gpus = len(outputs)
for gpu_id in range(num_gpus):
num_samples = len(outputs[gpu_id])
max_len = np.max(list(map(len, outputs[gpu_id])))
dense_shape.append(np.array((num_samples, max_len)))
values.append([])
indices.append([])
for sample_id in range(num_samples):
num_letters = len(outputs[gpu_id][sample_id])
for letter_id in range(num_letters):
values[gpu_id].append(
char2idx[outputs[gpu_id][sample_id][letter_id]]
)
indices[gpu_id].append(np.array([sample_id, letter_id]))
values[gpu_id] = np.array(values[gpu_id], dtype=np.int)
indices[gpu_id] = np.array(indices[gpu_id], dtype=np.int)
x = [np.empty(2)] * len(y)
len_x = [None] * len(y)
input_values = list(zip(x, len_x, y, len_y))
output_values = [
[tf.SparseTensorValue(indices[i], values[i], dense_shape[i])]
for i in range(num_gpus)
]
results = []
for inp, out in zip(input_values, output_values):
inp_dict = {'source_tensors': [inp[0], inp[1]],
'target_tensors': [inp[2], inp[3]]}
results.append(model.evaluate(inp_dict, out))
for inp, out in zip(input_values, output_values):
inp_dict = {'source_tensors': [inp[0], inp[1]],
'target_tensors': [inp[2], inp[3]]}
results.append(model.evaluate(inp_dict, out))
output_dict = model.finalize_evaluation(results)
w_lev = 0.0
w_len = 0.0
for batch_id in range(len(inputs)):
for sample_id in range(len(inputs[batch_id])):
input_sample = inputs[batch_id][sample_id]
output_sample = outputs[batch_id][sample_id]
w_lev += levenshtein(input_sample.split(), output_sample.split())
w_len += len(input_sample.split())
self.assertEqual(output_dict['Eval WER'], w_lev / w_len)
self.assertEqual(output_dict['Eval WER'], 37 / 40.0)
inp_dict = {'source_tensors': [input_values[0][0], input_values[0][1]],
'target_tensors': [input_values[0][2], input_values[0][3]]}
output_dict = model.maybe_print_logs(inp_dict, output_values[0], 0)
self.assertEqual(output_dict['Sample WER'], 0.4)
| OpenSeq2Seq-master | open_seq2seq/models/speech2text_test.py |
# Copyright (c) 2017 NVIDIA Corporation
"""All base models available in OpenSeq2Seq."""
from .model import Model
from .text2text import Text2Text
from .speech2text import Speech2Text
from .image2label import Image2Label
from .lstm_lm import LSTMLM
from .text2speech_tacotron import Text2SpeechTacotron
from .text2speech_wavenet import Text2SpeechWavenet
from .text2speech_centaur import Text2SpeechCentaur
| OpenSeq2Seq-master | open_seq2seq/models/__init__.py |
# Copyright (c) 2017 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import tensorflow as tf
from open_seq2seq.test_utils.test_speech_configs.ds2_test_config import \
base_params, train_params, eval_params, base_model
from .speech2text_test import Speech2TextModelTests
class DS2ModelTests(Speech2TextModelTests):
def setUp(self):
self.base_model = base_model
self.base_params = base_params
self.train_params = train_params
self.eval_params = eval_params
def tearDown(self):
pass
def test_regularizer(self):
return self.regularizer_test()
def test_convergence(self):
return self.convergence_test(5.0, 30.0, 0.1)
def test_convergence_with_iter_size(self):
return self.convergence_with_iter_size_test()
def test_infer(self):
return self.infer_test()
def test_mp_collection(self):
return self.mp_collection_test(14, 7)
def test_levenshtein(self):
return self.levenshtein_test()
def test_maybe_functions(self):
return self.maybe_functions_test()
if __name__ == '__main__':
tf.test.main()
| OpenSeq2Seq-master | open_seq2seq/models/speech2text_ds2_test.py |
# Copyright (c) 2017 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
from six.moves import range
import abc
import six
import tensorflow as tf
import numpy as np
import copy
import time
import re
try:
from inspect import signature
except ImportError:
from funcsigs import signature
from open_seq2seq.utils.utils import deco_print, clip_last_batch
from open_seq2seq.optimizers import optimize_loss, get_regularization_loss
from open_seq2seq.utils.utils import check_params
@six.add_metaclass(abc.ABCMeta)
class Model:
"""Abstract class that any model should inherit from.
It automatically enables multi-GPU (or Horovod) computation,
has mixed precision support, logs training summaries, etc.
"""
@staticmethod
def get_required_params():
"""Static method with description of required parameters.
Returns:
dict:
Dictionary containing all the parameters that **have to** be
included into the ``params`` parameter of the
class :meth:`__init__` method.
"""
return {
'use_horovod': bool,
'batch_size_per_gpu': int,
'data_layer': None, # could be any user defined class
}
@staticmethod
def get_optional_params():
"""Static method with description of optional parameters.
Returns:
dict:
Dictionary containing all the parameters that **can** be
included into the ``params`` parameter of the
class :meth:`__init__` method.
"""
return {
'logdir': str,
'num_gpus': int, # cannot be used when gpu_ids is specified
'gpu_ids': list, # cannot be used when num_gpus is specified
'load_model': str,
'save_summaries_steps': None, # could be int or None
'print_loss_steps': None, # could be int or None
'print_samples_steps': None, # could be int or None
'print_bench_info_steps': None, # could be int or None
'save_checkpoint_steps': None, # could be int or None
'num_checkpoints': int, # maximum number of last checkpoints to keep
'restore_best_checkpoint': bool, # if True,restore best check point instead of latest checkpoint
'eval_steps': int,
'finetune': bool,
'eval_batch_size_per_gpu': int,
'hooks': list,
'random_seed': int,
'num_epochs': int,
'max_steps': int,
'bench_start': int,
'data_layer_params': dict,
'optimizer': None, # could be class or string
'optimizer_params': dict,
'freeze_variables_regex' : None, # could be str or None
'initializer': None, # any valid TensorFlow initializer
'initializer_params': dict,
'regularizer': None, # any valid TensorFlow regularizer
'regularizer_params': dict,
'dtype': [tf.float16, tf.float32, 'mixed'],
'lr_policy': None, # any valid learning rate policy function
'lr_policy_params': dict,
'max_grad_norm': float,
'larc_params': dict,
'loss_scaling': None, # float, "Backoff" or "LogMax"
'loss_scaling_params': dict,
'summaries': list,
'iter_size': int,
'lm_vocab_file': str, #TODO: move this paramters to lstm_lm.py
'processed_data_folder': str,
# Parameters for TensorRT (infer mode only)
'use_trt': bool,
'trt_precision_mode': str,
'trt_max_workspace_size_bytes': int,
'trt_minimum_segment_size': int,
'trt_is_dynamic_op': bool,
'trt_maximum_cached_engines': int,
# Parameters for XLA
'use_xla_jit' : bool,
}
def __init__(self, params, mode="train", hvd=None):
"""Model constructor.
The TensorFlow graph should not be created here, but rather in the
:meth:`self.compile() <compile>` method.
Args:
params (dict): parameters describing the model.
All supported parameters are listed in :meth:`get_required_params`,
:meth:`get_optional_params` functions.
mode (string, optional): "train", "eval" or "infer".
If mode is "train" all parts of the graph will be built
(model, loss, optimizer).
If mode is "eval", only model and loss will be built.
If mode is "infer", only model will be built.
hvd (optional): if Horovod is used, this should be
``horovod.tensorflow`` module.
If Horovod is not used, it should be None.
Config parameters:
* **random_seed** (int) --- random seed to use.
* **use_horovod** (bool) --- whether to use Horovod for distributed
execution.
* **num_gpus** (int) --- number of GPUs to use. This parameter cannot be
used if ``gpu_ids`` is specified. When ``use_horovod`` is True
this parameter is ignored.
* **gpu_ids** (list of ints) --- GPU ids to use. This parameter cannot be
used if ``num_gpus`` is specified. When ``use_horovod`` is True
this parameter is ignored.
* **batch_size_per_gpu** (int) --- batch size to use for each GPU.
* **eval_batch_size_per_gpu** (int) --- batch size to use for each GPU during
inference. This is for when training and inference have different computation
and memory requirements, such as when training uses sampled softmax and
inference uses full softmax. If not specified, it's set
to ``batch_size_per_gpu``.
* **restore_best_checkpoint** (bool) --- if set to True, when doing evaluation
and inference, the model will load the best checkpoint instead of the latest
checkpoint. Best checkpoint is evaluated based on evaluation results, so
it's only available when the model is trained untder ``train_eval`` mode.
Default to False.
* **load_model** (str) --- points to the location of the pretrained model for
transfer learning. If specified, during training, the system will look
into the checkpoint in this folder and restore all variables whose names and
shapes match a variable in the new model.
* **num_epochs** (int) --- number of epochs to run training for.
This parameter cannot be used if ``max_steps`` is specified.
* **max_steps** (int) --- number of steps to run training for.
This parameter cannot be used if ``num_epochs`` is specified.
* **save_summaries_steps** (int or None) --- how often to save summaries.
Setting it to None disables summaries saving.
* **print_loss_steps** (int or None) --- how often to print loss during
training. Setting it to None disables loss printing.
* **print_samples_steps** (int or None) --- how often to print training
samples (input sequences, correct answers and model predictions).
Setting it to None disables samples printing.
* **print_bench_info_steps** (int or None) --- how often to print training
benchmarking information (average number of objects processed per step).
Setting it to None disables intermediate benchmarking printing, but
the average information across the whole training will always be printed
after the last iteration.
* **save_checkpoint_steps** (int or None) --- how often to save model
checkpoints. Setting it to None disables checkpoint saving.
* **num_checkpoints** (int) --- number of last checkpoints to keep.
* **eval_steps** (int) --- how often to run evaluation during training.
This parameter is only checked if ``--mode`` argument of ``run.py`` is
"train\_eval". If no evaluation is needed you should use "train" mode.
* **logdir** (string) --- path to the log directory where all checkpoints
and summaries will be saved.
* **data_layer** (any class derived from
:class:`DataLayer <data.data_layer.DataLayer>`) --- data layer class
to use.
* **data_layer_params** (dict) --- dictionary with data layer
configuration.
For complete list of possible parameters see the corresponding
class docs.
* **optimizer** (string or TensorFlow optimizer class) --- optimizer to
use for training. Could be either "Adam", "Adagrad", "Ftrl", "Momentum",
"RMSProp", "SGD" or any valid TensorFlow optimizer class.
* **optimizer_params** (dict) --- dictionary that will be passed to
optimizer ``__init__`` method.
* **initializer** --- any valid TensorFlow initializer.
* **initializer_params** (dict) --- dictionary that will be passed to
initializer ``__init__`` method.
* **freeze_variables_regex** (str or None) --- if zero or more characters
at the beginning of the name of a trainable variable match this
pattern, then this variable will be frozen during training.
Setting it to None disables freezing of variables.
* **regularizer** --- and valid TensorFlow regularizer.
* **regularizer_params** (dict) --- dictionary that will be passed to
regularizer ``__init__`` method.
* **dtype** --- model dtype. Could be either ``tf.float16``,
``tf.float32`` or "mixed". For details see
:ref:`mixed precision training <mixed_precision>` section in docs.
* **lr_policy** --- any valid learning rate policy function. For examples,
see :any:`optimizers.lr_policies` module.
* **lr_policy_params** (dict) --- dictionary containing lr_policy
parameters.
* **max_grad_norm** (float) --- maximum value of gradient norm. Clipping
will be performed if some gradients exceed this value (this is checked
for each variable independently).
* **loss_scaling** --- could be float or string. If float, static loss
scaling is applied. If string, the corresponding automatic
loss scaling algorithm is used. Must be one of 'Backoff'
of 'LogMax' (case insensitive). Only used when dtype="mixed". For details
see :ref:`mixed precision training <mixed_precision>` section in docs.
* **loss_scaling_params** (dict) --- dictionary containing loss scaling
parameters.
* **summaries** (list) --- which summaries to log. Could contain
"learning_rate", "gradients", "gradient_norm", "global_gradient_norm",
"variables", "variable_norm", "loss_scale".
* **iter_size** (int) --- use this parameter to emulate large batches.
The gradients will be accumulated for ``iter_size`` number of steps before
applying update.
* **larc_params** --- dictionary with parameters for LARC (or LARS)
optimization algorithms. Can contain the following parameters:
* **larc_mode** --- Could be either "scale" (LARS) or "clip" (LARC).
Note that it works in addition to any other optimization algorithm
since we treat
it as adaptive gradient clipping and learning rate adjustment.
* **larc_eta** (float) --- LARC or LARS scaling parameter.
* **min_update** (float) --- minimal value of the LARC (LARS) update.
* **epsilon** (float) --- small number added to gradient norm in
denominator for numerical stability.
"""
check_params(params, self.get_required_params(), self.get_optional_params())
self._params = copy.deepcopy(params)
if self._params.get('iter_size', 1) > 1 and hvd is None:
raise ValueError("iter_size is only supported in Horovod mode")
# parameter checks
self._mode = mode
self._interactive = False
if self._mode == "interactive_infer":
self._mode = "infer"
self._interactive = True
if self._mode not in ["train", "infer", "eval"]:
raise ValueError("Mode has to be one of ['train', 'infer', 'eval']")
if "use_trt" in params and self._mode != "infer":
raise ValueError("TensorRT can only be used in inference mode.")
if "max_steps" in params and "num_epochs" in params:
raise ValueError("You can't provide both max_steps and num_epochs. "
"Please, remove one of them from the config.")
if mode == "train":
if "max_steps" not in params and "num_epochs" not in params:
raise ValueError("For training mode either max_steps or "
"num_epochs has to be provided")
if 'print_samples_steps' not in self._params:
self._params['print_samples_steps'] = None
if 'print_loss_steps' not in self._params:
self._params['print_loss_steps'] = None
if 'save_checkpoint_steps' not in self._params:
self._params['save_checkpoint_steps'] = None
if 'save_summaries_steps' not in self._params:
self._params['save_summaries_steps'] = None
if 'print_bench_info_steps' not in self._params:
self._params['print_bench_info_steps'] = None
self._params['num_checkpoints'] = self._params.get('num_checkpoints', 5)
self._params['finetune'] = self._params.get('finetune', False)
# self._params['base_logdir'] = self._params.get('base_logdir', None)
self._params['load_model'] = self._params.get('load_model', None)
self._params['load_fc'] = self._params.get('load_fc', False)
self._params['eval_batch_size_per_gpu'] = self._params.get(
'eval_batch_size_per_gpu',
self._params['batch_size_per_gpu']
)
# checking that frequencies of samples and loss are aligned
s_fr = self._params['print_samples_steps']
l_fr = self._params['print_loss_steps']
if s_fr is not None and l_fr is not None and s_fr % l_fr != 0:
raise ValueError("print_samples_steps has to be a multiple of "
"print_loss_steps.")
self._hvd = hvd
if self._hvd:
self._gpu_ids = range(1)
else:
if 'gpu_ids' in self._params:
self._gpu_ids = self._params['gpu_ids']
elif 'num_gpus' in self._params:
self._gpu_ids = range(self._params['num_gpus'])
else:
raise ValueError('Either "gpu_ids" or "num_gpus" has to '
'be specified in the config')
if self._interactive and len(self._gpu_ids) > 1:
raise ValueError("Interactive infer is meant to be used with 1 gpu")
# setting random seed
rs = self._params.get('random_seed', int(time.time()))
if self.on_horovod:
rs += hvd.rank()
tf.set_random_seed(rs)
np.random.seed(rs)
if 'dtype' not in self._params:
self._params['dtype'] = tf.float32
dl_params = self._params.get('data_layer_params', {})
if mode == 'train':
dl_params['batch_size'] = self._params['batch_size_per_gpu']
else:
dl_params['batch_size'] = self._params['eval_batch_size_per_gpu']
if 'lm_vocab_file' in self._params:
dl_params['lm_vocab_file'] = self._params['lm_vocab_file']
if 'processed_data_folder' in self._params:
dl_params['processed_data_folder'] = self._params['processed_data_folder']
dl_params['mode'] = self._mode
dl_params['interactive'] = self._interactive
if self.on_horovod:
self._data_layer = self._params['data_layer'](
params=dl_params, model=self,
num_workers=self._hvd.size(), worker_id=self._hvd.rank(),
)
else:
self._data_layers = []
for worker_id in range(self.num_gpus):
self._data_layers.append(self._params['data_layer'](
params=dl_params, model=self,
num_workers=self.num_gpus, worker_id=worker_id,
))
if self._mode == "train":
if "max_steps" in self._params:
self._last_step = self._params["max_steps"]
self._steps_in_epoch = None
else:
# doing a few less steps if data size is not divisible by the batch size
self._steps_in_epoch = self.get_data_layer().get_size_in_samples() // \
self.get_data_layer().params['batch_size']
if self._steps_in_epoch is None:
raise ValueError('The data_layer is not compatible with '
'epoch execution, since it does not provide '
'get_size_in_samples() method. Either update the '
'data layer or switch to using "max_steps" '
'paremeter.')
if self.on_horovod:
self._steps_in_epoch //= self._hvd.size()
else:
self._steps_in_epoch //= self.num_gpus
self._steps_in_epoch //= self._params.get('iter_size', 1)
if self._steps_in_epoch == 0:
raise ValueError("Overall batch size is too big for this dataset.")
self._last_step = self._params['num_epochs'] * self._steps_in_epoch
if self.on_horovod:
self._output = None
else:
self._outputs = [None] * self.num_gpus
self.loss = None
self.train_op = None
self.eval_losses = None
self._num_objects_per_step = None
self.skip_update_ph = None
def compile(self, force_var_reuse=False, checkpoint=None):
"""TensorFlow graph is built here."""
if 'initializer' not in self.params:
initializer = None
else:
init_dict = self.params.get('initializer_params', {})
initializer = self.params['initializer'](**init_dict)
if not self.on_horovod: # not using Horovod
# below we follow data parallelism for multi-GPU training
losses = []
for gpu_cnt, gpu_id in enumerate(self._gpu_ids):
with tf.device("/gpu:{}".format(gpu_id)), tf.variable_scope(
name_or_scope=tf.get_variable_scope(),
# re-using variables across GPUs.
reuse=force_var_reuse or (gpu_cnt > 0),
initializer=initializer,
dtype=self.get_tf_dtype(),
):
deco_print("Building graph on GPU:{}".format(gpu_id))
if self._interactive:
self.get_data_layer(gpu_cnt).create_interactive_placeholders()
else:
self.get_data_layer(gpu_cnt).build_graph()
input_tensors = self.get_data_layer(gpu_cnt).input_tensors
if self.params.get("use_trt", False):
# Build TF-TRT graph
loss, self._outputs[gpu_cnt] = self.build_trt_forward_pass_graph(
input_tensors,
gpu_id=gpu_cnt,
checkpoint=checkpoint
)
else:
# Build regular TF graph
loss, self._outputs[gpu_cnt] = self._build_forward_pass_graph(
input_tensors,
gpu_id=gpu_cnt
)
if self._outputs[gpu_cnt] is not None and \
not isinstance(self._outputs[gpu_cnt], list):
raise ValueError('Decoder outputs have to be either None or list')
if self._mode == "train" or self._mode == "eval":
losses.append(loss)
# end of for gpu_ind loop
if self._mode == "train":
self.loss = tf.reduce_mean(losses)
if self._mode == "eval":
self.eval_losses = losses
else: # is using Horovod
# gpu_id should always be zero, since Horovod takes care of isolating
# different processes to 1 GPU only
with tf.device("/gpu:0"), tf.variable_scope(
name_or_scope=tf.get_variable_scope(),
reuse=force_var_reuse,
initializer=initializer,
dtype=self.get_tf_dtype(),
):
deco_print(
"Building graph in Horovod rank: {}".format(self._hvd.rank())
)
self.get_data_layer().build_graph()
input_tensors = self.get_data_layer().input_tensors
if self.params.get("use_trt", False):
# Build TF-TRT graph
all_loss, self._output = self.build_trt_forward_pass_graph(
input_tensors,
gpu_id=0,
checkpoint=checkpoint
)
else:
# Build regular TF graph
all_loss, self._output = self._build_forward_pass_graph(
input_tensors,
gpu_id=0
)
if isinstance(all_loss, (dict,)):
loss = all_loss['loss']
else:
loss = all_loss
if self._output is not None and not isinstance(self._output, list):
raise ValueError('Decoder outputs have to be either None or list')
if self._mode == "train":
self.loss = loss
if self._mode == "eval":
self.eval_losses = [loss]
try:
self._num_objects_per_step = [self._get_num_objects_per_step(worker_id)
for worker_id in range(self.num_gpus)]
except NotImplementedError:
pass
if self._mode == "train":
if 'lr_policy' not in self.params:
lr_policy = None
else:
lr_params = self.params.get('lr_policy_params', {})
# adding default decay_steps = max_steps if lr_policy supports it and
# different value is not provided
func_params = signature(self.params['lr_policy']).parameters
if 'decay_steps' in func_params and 'decay_steps' not in lr_params:
lr_params['decay_steps'] = self._last_step
if 'begin_decay_at' in func_params:
if 'warmup_steps' in func_params:
lr_params['begin_decay_at'] = max(
lr_params.get('begin_decay_at', 0),
lr_params.get('warmup_steps', 0)
)
lr_params['decay_steps'] -= lr_params.get('begin_decay_at', 0)
if 'steps_per_epoch' in func_params and \
'steps_per_epoch' not in lr_params and 'num_epochs' in self.params:
lr_params['steps_per_epoch'] = self.steps_in_epoch
lr_policy = lambda gs: self.params['lr_policy'](global_step=gs,
**lr_params)
if self.params.get('iter_size', 1) > 1:
self.skip_update_ph = tf.placeholder(tf.bool)
var_list = tf.trainable_variables()
freeze_variables_regex = self.params.get('freeze_variables_regex', None)
if freeze_variables_regex is not None:
pattern = re.compile(freeze_variables_regex)
var_list = [var for var in tf.trainable_variables()
if not pattern.match(var.name)]
self.train_op = optimize_loss(
loss=tf.cast(self.loss, tf.float32) + get_regularization_loss(),
dtype=self.params['dtype'],
optimizer=self.params['optimizer'],
optimizer_params=self.params.get('optimizer_params', {}),
var_list=var_list,
clip_gradients=self.params.get('max_grad_norm', None),
learning_rate_decay_fn=lr_policy,
summaries=self.params.get('summaries', None),
larc_params=self.params.get('larc_params', None),
loss_scaling=self.params.get('loss_scaling', 1.0),
loss_scaling_params=self.params.get('loss_scaling_params', None),
on_horovod=self.on_horovod,
iter_size=self.params.get('iter_size', 1),
skip_update_ph=self.skip_update_ph,
model=self
)
tf.summary.scalar(name="train_loss", tensor=self.loss)
if self.steps_in_epoch:
tf.summary.scalar(
name="epoch",
tensor=tf.floor(tf.train.get_global_step() /
tf.constant(self.steps_in_epoch, dtype=tf.int64)),
)
if not self.on_horovod or self._hvd.rank() == 0:
if freeze_variables_regex is not None:
deco_print('Complete list of variables:')
for var in tf.trainable_variables():
deco_print('{}'.format(var.name), offset=2)
deco_print("Trainable variables:")
total_params = 0
unknown_shape = False
for var in var_list:
var_params = 1
deco_print('{}'.format(var.name), offset=2)
deco_print('shape: {}, {}'.format(var.get_shape(), var.dtype),
offset=4)
if var.get_shape():
for dim in var.get_shape():
var_params *= dim.value
total_params += var_params
else:
unknown_shape = True
if unknown_shape:
deco_print("Encountered unknown variable shape, can't compute total "
"number of parameters.")
else:
deco_print('Total trainable parameters: {}'.format(total_params))
def build_trt_forward_pass_graph(self, input_tensors, gpu_id=0,
checkpoint=None):
"""Wrapper around _build_forward_pass_graph which converts graph using
TF-TRT"""
import tensorflow.contrib.tensorrt as trt
# Default parameters
trt_params = {
"batch_size_per_gpu": 64,
"trt_max_workspace_size_bytes": (4096 << 20) - 1000,
"trt_precision_mode": "FP32",
"trt_minimum_segment_size": 10,
"trt_is_dynamic_op": True,
"trt_maximum_cached_engines": 1
}
# Update params from user config
for key in trt_params:
if key in self.params:
trt_params[key] = self.params[key]
# Create temporary graph which will contain the native TF graph
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
temp_graph = tf.Graph()
input_map = {}
# We have to deconstruct SparseTensors into their 3 internal tensors
# (indicies, values, dense_shape). This maps each tensor name to a list of
# all 3 tensor names in its SparseTensor.
output_sparse_tensor_map = {}
with temp_graph.as_default() as tf_graph:
with tf.Session(config=tf_config) as tf_sess:
# Create temporary input placeholders used to build native TF graph
input_placeholders = {'source_tensors': []}
for i, original_input in enumerate(input_tensors['source_tensors']):
name = 'input_map_%d' % i
input_placeholders['source_tensors'].append(
tf.placeholder(shape=original_input.shape,
dtype=original_input.dtype,
name=name))
# And map it back to original input
input_map[name] = original_input
# Build native graph
loss, outputs = self._build_forward_pass_graph(
input_placeholders,
gpu_id=gpu_id
)
# Gather output tensors
output_node_names = []
output_node_names_and_ports = []
for x in outputs:
if isinstance(x, tf.SparseTensor):
components = [x.indices.name, x.values.name, x.dense_shape.name]
fetch_names = [tensor.split(':')[0] for tensor in components]
# Remove duplicates (i.e. if SparseTensor is output of one node)
fetch_names = list(set(fetch_names))
output_node_names.extend(fetch_names)
output_node_names_and_ports.extend(components)
# Add all components to map so SparseTensor can be reconstructed
# from tensor components which will be outputs of new graph
for tensor in components:
output_sparse_tensor_map[tensor] = components
else:
output_node_names.append(x.name.split(':')[0])
output_node_names_and_ports.append(x.name)
# Restore checkpoint here because we have to freeze the graph
tf_saver = tf.train.Saver()
tf_saver.restore(save_path=checkpoint, sess=tf_sess)
frozen_graph = tf.graph_util.convert_variables_to_constants(
tf_sess,
tf_sess.graph_def,
output_node_names=output_node_names
)
num_nodes = len(frozen_graph.node)
print('Converting graph using TensorFlow-TensorRT...')
frozen_graph = trt.create_inference_graph(
input_graph_def=frozen_graph,
outputs=output_node_names,
max_batch_size=trt_params["batch_size_per_gpu"],
max_workspace_size_bytes=trt_params["trt_max_workspace_size_bytes"],
precision_mode=trt_params["trt_precision_mode"],
minimum_segment_size=trt_params["trt_minimum_segment_size"],
is_dynamic_op=trt_params["trt_is_dynamic_op"],
maximum_cached_engines=trt_params["trt_maximum_cached_engines"]
)
# Remove unused inputs from input_map.
inputs_to_remove = []
for k in input_map:
if k not in [node.name for node in frozen_graph.node]:
inputs_to_remove.append(k)
for k in inputs_to_remove:
del input_map[k]
print('Total node count before and after TF-TRT conversion:',
num_nodes, '->', len(frozen_graph.node))
print('TRT node count:',
len([1 for n in frozen_graph.node if str(n.op) == 'TRTEngineOp']))
# Perform calibration for INT8 precision mode
if self.params.get("trt_precision_mode", "FP32").upper() == 'INT8':
with tf.Session(config=tf_config) as tf_sess:
calib_graph = frozen_graph
num_iterations = 10
print('Calibrating INT8...')
outputs = tf.import_graph_def(
calib_graph,
input_map=input_map,
return_elements=output_node_names_and_ports,
name='')
self._num_objects_per_step = [self._get_num_objects_per_step(worker_id)
for worker_id in range(self.num_gpus)]
results_per_batch = iterate_data(
self, tf_sess, compute_loss=False, mode='infer', verbose=False,
num_steps=num_iterations
)
frozen_graph = trt.calib_graph_to_infer_graph(calib_graph)
del calib_graph
print('INT8 graph created.')
print('Nodes INT8:', len(frozen_graph.node))
# Import TRT converted graph to default graph, mapping it to the original
# input tensors.
outputs = tf.import_graph_def(
frozen_graph,
input_map=input_map,
return_elements=output_node_names_and_ports,
name='')
# Reconstruct SparseTensors
final_outputs = []
for tensor in outputs:
if tensor.name in output_sparse_tensor_map:
component_names = output_sparse_tensor_map[tensor.name]
# Find tensors in outputs for components
component_tensors = [[x for x in outputs if x.name == name][0]
for name in component_names]
# Remove all components from outputs so we don't create duplicates of
# this SparseTensor
for x in component_tensors:
if x in outputs:
outputs.remove(x)
final_outputs.append(tf.SparseTensor(*component_tensors))
else:
final_outputs.append(tensor)
return loss, final_outputs
@abc.abstractmethod
def _build_forward_pass_graph(self, input_tensors, gpu_id=0):
"""Abstract method. Should create the graph of the forward pass of the model.
Args:
input_tensors: ``input_tensors`` defined by the data_layer class.
gpu_id (int, optional): id of the GPU where the current copy of the model
is constructed. For Horovod this is always zero.
Returns:
tuple: tuple containing loss tensor and list of outputs tensors.
Loss tensor will be automatically provided to the optimizer and
corresponding :attr:`train_op` will be created.
Samples tensors are stored in the :attr:`_outputs` attribute and can be
accessed by calling :meth:`get_output_tensors` function. For example,
this happens inside :class:`utils.hooks.RunEvaluationHook`
to fetch output values for evaluation.
Both loss and outputs can be None when corresponding part of the graph
is not built.
"""
pass
def maybe_print_logs(self, input_values, output_values, training_step):
"""This method can be used to print logs that help to visualize training.
For example, you can print sample input sequences and their corresponding
predictions. This method will be called every ``print_samples_steps``
(config parameter) iterations and input/output values will be populated
automatically by calling ``sess.run`` on corresponding tensors. Note that
this method is not abstract and does not have to be implemented in
derived classes. But if additional printing functionality is required,
overwriting this method can be a useful way to add it.
Args:
input_values: evaluation of
:meth:`self.get_data_layer(0).input_tensors
<data.data_layer.DataLayer.input_tensors>`, that is, input tensors
for one batch on the *first* GPU.
output_values: evaluation of
:meth:`self.get_output_tensors(0) <get_output_tensors>`,
that is, output tensors for one batch on the *first* GPU.
training_step (int): Current training step.
Returns:
dict: dictionary with values that need to be logged to TensorBoard
(can be empty).
"""
# by default return an empty dictionary and do nothing
return {}
def evaluate(self, input_values, output_values):
"""This method can be used in conjunction with
:meth:`self.finalize_evaluation()<finalize_evaluation>` to calculate
evaluation metrics.
For example, for speech-to-text models these methods can calculate
word-error-rate on the validation data. For text-to-text models, these
methods can compute BLEU score. Look at the corresponding derived classes
for examples of this. These methods will be called every
``eval_steps`` (config parameter) iterations and
input/output values will be populated automatically by calling ``sess.run``
on corresponding tensors (using evaluation model).
The :meth:`self.evaluate()<evaluate>` method is called on each batch data
and it's results will be collected and provided to
:meth:`self.finalize_evaluation()<finalize_evaluation>` for finalization.
Note that
this function is not abstract and does not have to be implemented in
derived classes. But if evaluation functionality is required,
overwriting this function can be a useful way to add it.
Args:
input_values: evaluation of
:meth:`self.get_data_layer().input_tensors
<data.data_layer.DataLayer.input_tensors>` concatenated across
all workers. That is, input tensors for one batch combined
from *all* GPUs.
output_values: evaluation of
:meth:`self.get_output_tensors() <get_output_tensors>` concatenated
across all workers. That is, output tensors for one batch combined
from *all* GPUs.
Returns:
list: all necessary values for evaluation finalization (e.g. accuracy on
current batch, which will then be averaged in finalization method).
"""
return []
def finalize_evaluation(self, results_per_batch, training_step=None):
"""This method can be used in conjunction with
:meth:`self.evaluate()<evaluate>` to calculate
evaluation metrics.
For example, for speech-to-text models these methods can calculate
word-error-rate on the validation data. For text-to-text models, these
methods can compute BLEU score. Look at the corresponding derived classes
for examples of this. These methods will be called every
``eval_steps`` (config parameter) iterations and
input/output values will be populated automatically by calling ``sess.run``
on corresponding tensors (using evaluation model).
The :meth:`self.evaluate()<evaluate>` method is called on each batch data
and it's results will be collected and provided to
:meth:`self.finalize_evaluation()<finalize_evaluation>` for finalization.
Note that
these methods are not abstract and does not have to be implemented in
derived classes. But if evaluation functionality is required,
overwriting these methods can be a useful way to add it.
Args:
results_per_batch (list): aggregation of values returned from all calls
to :meth:`self.evaluate()<evaluate>` method (number of calls will be
equal to number of evaluation batches).
training_step (int): current training step. Will only be passed if mode
is "train_eval".
Returns:
dict: dictionary with values that need to be logged to TensorBoard
(can be empty).
"""
# by default return an empty dictionary and do nothing
return {}
def infer(self, input_values, output_values):
"""This method is analogous to :meth:`self.evaluate()<evaluate>`, but used
in conjunction with :meth:`self.finalize_inference()<finalize_inference>`
to perform inference.
Args:
input_values: evaluation of
:meth:`self.get_data_layer().input_tensors
<data.data_layer.DataLayer.input_tensors>` concatenated across
all workers. That is, input tensors for one batch combined
from *all* GPUs.
output_values: evaluation of
:meth:`self.get_output_tensors() <get_output_tensors>` concatenated
across all workers. That is, output tensors for one batch combined
from *all* GPUs.
Returns:
list: all necessary values for inference finalization (e.g. this method
can return final generated sequences for each batch which will then be
saved to file in :meth:`self.finalize_inference()<finalize_inference>`
method).
"""
return []
def finalize_inference(self, results_per_batch, output_file):
"""This method should be implemented if the model support inference mode.
For example for speech-to-text and text-to-text models, this method will
log the corresponding input-output pair to the output_file.
Args:
results_per_batch (list): aggregation of values returned from all calls
to :meth:`self.evaluate()<evaluate>` method (number of calls will be
equal to number of evaluation batches).
output_file (str): name of the output file that inference results should
be saved to.
"""
pass
def clip_last_batch(self, last_batch, true_size):
"""This method performs last batch clipping.
Used in cases when dataset is not divisible by the batch size and model
does not support dynamic batch sizes. In those cases, the last batch will
contain some data from the "next epoch" and this method can be used
to remove that data. This method works for both
dense and sparse tensors. In most cases you will not need to overwrite this
method.
Args:
last_batch (list): list with elements that could be either ``np.array``
or ``tf.SparseTensorValue`` containing data for last batch. The
assumption is that the first axis of all data tensors will correspond
to the current batch size.
true_size (int): true size that the last batch should be cut to.
"""
return clip_last_batch(last_batch, true_size)
def get_output_tensors(self, worker_id=0):
"""Returns output tensors generated by :meth:`_build_forward_pass_graph.`
When using Horovod, ``worker_id`` parameter is ignored. When using
tower-based multi-GPU approach, ``worker_id`` can be used to select tensors
for corresponding tower/GPU.
Args:
worker_id (int): id of the worker to get tensors from
(not used for Horovod).
Returns:
output tensors.
"""
if self.on_horovod:
return self._output
else:
return self._outputs[worker_id]
def get_data_layer(self, worker_id=0):
"""Returns model data layer.
When using Horovod, ``worker_id`` parameter is ignored. When using
tower-based multi-GPU approach, ``worker_id`` can be used to select
data layer for corresponding tower/GPU.
Args:
worker_id (int): id of the worker to get data layer from
(not used for Horovod).
Returns:
model data layer.
"""
if self.on_horovod:
return self._data_layer
else:
return self._data_layers[worker_id]
def get_tf_dtype(self):
"""Returns actual TensorFlow dtype that will be used as variables dtype."""
if self.params['dtype'] == "mixed":
return tf.float16
else:
return self.params['dtype']
def _get_num_objects_per_step(self, worker_id=0):
"""Define this method if you need benchmarking functionality.
For example, for translation models, this method should return number of
tokens in current batch, for image recognition model should return number
of images in current batch.
Args:
worker_id (int): id of the worker to get data layer from
(not used for Horovod).
Returns:
tf.Tensor with number of objects in batch.
"""
raise NotImplementedError()
def get_num_objects_per_step(self, worker_id=0):
if self._num_objects_per_step:
return self._num_objects_per_step[worker_id]
else:
raise NotImplementedError()
@property
def params(self):
"""Parameters used to construct the model (dictionary)."""
return self._params
@property
def steps_in_epoch(self):
"""Number of steps in epoch.
This parameter is only populated if ``num_epochs`` was specified in the
config (otherwise it is None).
It is used in training hooks to correctly print epoch number.
"""
return self._steps_in_epoch
@property
def last_step(self):
"""Number of steps the training should be run for."""
return self._last_step
@property
def num_gpus(self):
"""Number of GPUs the model will be run on.
For Horovod this is always 1 and actual number of GPUs is controlled by
Open-MPI parameters.
"""
return len(self._gpu_ids)
@property
def mode(self):
"""Mode the model is executed in ("train", "eval" or "infer")."""
return self._mode
@property
def on_horovod(self):
"""Whether the model is run on Horovod or not."""
return self._hvd is not None
@property
def hvd(self):
"""horovod.tensorflow module"""
return self._hvd
| OpenSeq2Seq-master | open_seq2seq/models/model.py |
# Copyright (c) 2019 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import librosa
import matplotlib as mpl
import numpy as np
from scipy.io.wavfile import write
from six import BytesIO
from six.moves import range
mpl.use('Agg')
import matplotlib.pyplot as plt
import tensorflow as tf
from .encoder_decoder import EncoderDecoderModel
def plot_spectrograms(
specs,
titles,
stop_token_pred,
audio_length,
logdir,
train_step,
stop_token_target=None,
number=0,
append=False,
save_to_tensorboard=False
):
"""
Helper function to create a image to be logged to disk or a tf.Summary to be
logged to tensorboard.
Args:
specs (array): array of images to show
titles (array): array of titles. Must match lengths of specs array
stop_token_pred (np.array): np.array of size [time, 1] containing the stop
token predictions from the model.
audio_length (int): lenth of the predicted spectrogram
logdir (str): dir to save image file is save_to_tensorboard is disabled.
train_step (int): current training step
stop_token_target (np.array): np.array of size [time, 1] containing the stop
token target.
number (int): Current sample number (used if evaluating more than 1 sample
from a batch)
append (str): Optional string to append to file name eg. train, eval, infer
save_to_tensorboard (bool): If False, the created image is saved to the
logdir as a png file. If True, the function returns a tf.Summary object
containing the image and will be logged to the current tensorboard file.
Returns:
tf.Summary or None
"""
num_figs = len(specs) + 1
fig, ax = plt.subplots(nrows=num_figs, figsize=(8, num_figs * 3))
for i, (spec, title) in enumerate(zip(specs, titles)):
spec = np.pad(spec, ((1, 1), (1, 1)), "constant", constant_values=0.)
spec = spec.astype(float)
colour = ax[i].imshow(
spec.T, cmap='viridis', interpolation=None, aspect='auto'
)
ax[i].invert_yaxis()
ax[i].set_title(title)
fig.colorbar(colour, ax=ax[i])
if stop_token_target is not None:
stop_token_target = stop_token_target.astype(float)
ax[-1].plot(stop_token_target, 'r.')
stop_token_pred = stop_token_pred.astype(float)
ax[-1].plot(stop_token_pred, 'g.')
ax[-1].axvline(x=audio_length)
ax[-1].set_xlim(0, len(specs[0]))
ax[-1].set_title("stop token")
plt.xlabel('time')
plt.tight_layout()
cb = fig.colorbar(colour, ax=ax[-1])
cb.remove()
if save_to_tensorboard:
tag = "{}_image".format(append)
iostream = BytesIO()
fig.savefig(iostream, dpi=300)
summary = tf.Summary.Image(
encoded_image_string=iostream.getvalue(),
height=int(fig.get_figheight() * 300),
width=int(fig.get_figwidth() * 300)
)
summary = tf.Summary.Value(tag=tag, image=summary)
plt.close(fig)
return summary
else:
if append:
name = '{}/Output_step{}_{}_{}.png'.format(
logdir, train_step, number, append
)
else:
name = '{}/Output_step{}_{}.png'.format(logdir, train_step, number)
if logdir[0] != '/':
name = "./" + name
# save
fig.savefig(name, dpi=300)
plt.close(fig)
return None
def save_audio(
magnitudes,
logdir,
step,
sampling_rate,
n_fft=1024,
mode="train",
number=0,
save_format="tensorboard",
power=1.5,
gl_iters=50,
verbose=True,
max_normalization=False
):
"""
Helper function to create a wav file to be logged to disk or a tf.Summary to
be logged to tensorboard.
Args:
magnitudes (np.array): np.array of size [time, n_fft/2 + 1] containing the
energy spectrogram.
logdir (str): dir to save image file is save_to_tensorboard is disabled.
step (int): current training step
n_fft (int): number of filters for fft and ifft.
sampling_rate (int): samplng rate in Hz of the audio to be saved.
number (int): Current sample number (used if evaluating more than 1 sample
mode (str): Optional string to append to file name eg. train, eval, infer
from a batch)
save_format: save_audio can either return the np.array containing the
generated sound, log the wav file to the disk, or return a tensorboard
summary object. Each method can be enabled by passing save_format as
"np.array", "tensorboard", or "disk" respectively.
Returns:
tf.Summary or None
"""
# Clip signal max and min
if np.min(magnitudes) < 0 or np.max(magnitudes) > 255:
if verbose:
print("WARNING: {} audio was clipped at step {}".format(mode.capitalize(), step))
magnitudes = np.clip(magnitudes, a_min=0, a_max=255)
signal = griffin_lim(magnitudes.T ** power, n_iters=gl_iters, n_fft=n_fft)
if max_normalization:
signal /= np.max(np.abs(signal))
if save_format == "np.array":
return signal
elif save_format == "tensorboard":
tag = "{}_audio".format(mode)
iostream = BytesIO()
write(iostream, sampling_rate, signal)
summary = tf.Summary.Audio(encoded_audio_string=iostream.getvalue())
summary = tf.Summary.Value(tag=tag, audio=summary)
return summary
elif save_format == "disk":
file_name = '{}/sample_step{}_{}_{}.wav'.format(logdir, step, number, mode)
if logdir[0] != '/':
file_name = "./" + file_name
write(file_name, sampling_rate, signal)
return None
else:
print((
"WARN: The save format passed to save_audio was not understood. No "
"sound files will be saved for the current step. "
"Received '{}'."
"Expected one of 'np.array', 'tensorboard', or 'disk'"
).format(save_format))
return None
def griffin_lim(magnitudes, n_iters=50, n_fft=1024):
"""
Griffin-Lim algorithm to convert magnitude spectrograms to audio signals
"""
phase = np.exp(2j * np.pi * np.random.rand(*magnitudes.shape))
complex_spec = magnitudes * phase
signal = librosa.istft(complex_spec)
if not np.isfinite(signal).all():
print("WARNING: audio was not finite, skipping audio saving")
return np.array([0])
for _ in range(n_iters):
_, phase = librosa.magphase(librosa.stft(signal, n_fft=n_fft))
complex_spec = magnitudes * phase
signal = librosa.istft(complex_spec)
return signal
class Text2Speech(EncoderDecoderModel):
"""
Text-to-speech data layer.
"""
@staticmethod
def get_required_params():
return dict(
EncoderDecoderModel.get_required_params(), **{
"save_to_tensorboard": bool,
}
)
def __init__(self, params, mode="train", hvd=None):
super(Text2Speech, self).__init__(params, mode=mode, hvd=hvd)
self._save_to_tensorboard = self.params["save_to_tensorboard"]
def print_logs(self,
mode,
specs,
titles,
stop_token_pred,
stop_target,
audio_length,
step,
predicted_final_spec,
predicted_mag_spec=None):
"""
Save audio files and plots.
Args:
mode: "train" or "eval".
specs: spectograms to plot.
titles: spectogram titles.
stop_token_pred: stop token prediction.
stop_target: stop target.
audio_length: length of the audio.
step: current step.
predicted_final_spec: predicted mel spectogram.
predicted_mag_spec: predicted magnitude spectogram.
Returns:
Dictionary to log.
"""
dict_to_log = {}
im_summary = plot_spectrograms(
specs,
titles,
stop_token_pred,
audio_length,
self.params["logdir"],
step,
append=mode,
save_to_tensorboard=self._save_to_tensorboard,
stop_token_target=stop_target
)
dict_to_log['image'] = im_summary
if audio_length < 3:
return {}
if self._save_to_tensorboard:
save_format = "tensorboard"
else:
save_format = "disk"
if predicted_mag_spec is not None:
predicted_mag_spec = predicted_mag_spec[:audio_length - 1, :]
if self.get_data_layer()._exp_mag is False:
predicted_mag_spec = np.exp(predicted_mag_spec)
predicted_mag_spec = self.get_data_layer().get_magnitude_spec(predicted_mag_spec)
wav_summary = save_audio(
predicted_mag_spec,
self.params["logdir"],
step,
n_fft=self.get_data_layer().n_fft,
sampling_rate=self.get_data_layer().sampling_rate,
mode=mode + "_mag",
save_format=save_format
)
dict_to_log['audio_mag'] = wav_summary
predicted_final_spec = predicted_final_spec[:audio_length - 1, :]
predicted_final_spec = self.get_data_layer().get_magnitude_spec(
predicted_final_spec,
is_mel=True
)
wav_summary = save_audio(
predicted_final_spec,
self.params["logdir"],
step,
n_fft=self.get_data_layer().n_fft,
sampling_rate=self.get_data_layer().sampling_rate,
mode=mode,
save_format=save_format,
max_normalization=self.get_data_layer().max_normalization
)
dict_to_log['audio'] = wav_summary
if self._save_to_tensorboard:
return dict_to_log
return {}
def infer(self, input_values, output_values):
if self.on_horovod:
raise ValueError("Inference is not supported on horovod")
return [input_values, output_values]
def evaluate(self, input_values, output_values):
# Need to reduce amount of data sent for horovod
# Use last element
idx = -1
output_values = [(item[idx]) for item in output_values]
input_values = {
key: [value[0][idx], value[1][idx]] for key, value in input_values.items()
}
return [input_values, output_values]
def get_alignments(self, attention_mask):
"""
Get attention alignment plots.
Args:
attention_mask: attention alignment.
Returns:
Specs and titles to plot.
"""
raise NotImplementedError()
def finalize_inference(self, results_per_batch, output_file):
print("output_file is ignored for tts")
print("results are logged to the logdir")
batch_size = len(results_per_batch[0][0]["source_tensors"][0])
for i, sample in enumerate(results_per_batch):
output_values = sample[1]
predicted_final_specs = output_values[1]
attention_mask = output_values[2]
stop_tokens = output_values[3]
sequence_lengths = output_values[4]
for j in range(len(predicted_final_specs)):
predicted_final_spec = predicted_final_specs[j]
attention_mask_sample = attention_mask[j]
stop_tokens_sample = stop_tokens[j]
specs = [predicted_final_spec]
titles = ["final spectrogram"]
audio_length = sequence_lengths[j]
alignment_specs, alignment_titles = self.get_alignments(attention_mask_sample)
specs += alignment_specs
titles += alignment_titles
if "mel" in self.get_data_layer().params["output_type"]:
mag_spec = self.get_data_layer().get_magnitude_spec(predicted_final_spec)
log_mag_spec = np.log(np.clip(mag_spec, a_min=1e-5, a_max=None))
specs.append(log_mag_spec)
titles.append("magnitude spectrogram")
elif "both" in self.get_data_layer().params["output_type"]:
mag_spec = self.get_data_layer().get_magnitude_spec(predicted_final_spec, is_mel=True)
specs.append(mag_spec)
titles.append("mag spectrogram from mel basis")
specs.append(output_values[5][j])
titles.append("mag spectrogram from proj layer")
im_summary = plot_spectrograms(
specs,
titles,
stop_tokens_sample,
audio_length,
self.params["logdir"],
0,
number=i * batch_size + j,
append="infer"
)
if audio_length > 2:
if "both" in self.get_data_layer().params["output_type"]:
predicted_mag_spec = output_values[5][j][:audio_length - 1, :]
wav_summary = save_audio(
predicted_mag_spec,
self.params["logdir"],
0,
n_fft=self.get_data_layer().n_fft,
sampling_rate=self.get_data_layer().sampling_rate,
mode="infer_mag",
number=i * batch_size + j,
save_format="disk",
max_normalization=self.get_data_layer().max_normalization
)
predicted_final_spec = predicted_final_spec[:audio_length - 1, :]
predicted_final_spec = self.get_data_layer().get_magnitude_spec(predicted_final_spec, is_mel=True)
wav_summary = save_audio(
predicted_final_spec,
self.params["logdir"],
0,
n_fft=self.get_data_layer().n_fft,
sampling_rate=self.get_data_layer().sampling_rate,
mode="infer",
number=i * batch_size + j,
save_format="disk",
max_normalization=self.get_data_layer().max_normalization
)
def finalize_evaluation(self, results_per_batch, training_step=None, samples_count=1):
sample = results_per_batch[0]
input_values = sample[0]
output_values = sample[1]
y_sample, stop_target = input_values["target_tensors"]
predicted_spec = output_values[0]
predicted_final_spec = output_values[1]
attention_mask = output_values[2]
stop_token_pred = output_values[3]
audio_length = output_values[4]
max_length = np.max([
y_sample.shape[0],
predicted_final_spec.shape[0],
])
predictions_pad = np.zeros(
[max_length - np.shape(predicted_final_spec)[0], np.shape(predicted_final_spec)[-1]]
)
stop_token_pred_pad = np.zeros(
[max_length - np.shape(predicted_final_spec)[0], 1]
)
spec_pad = np.zeros([max_length - np.shape(y_sample)[0], np.shape(y_sample)[-1]])
stop_token_pad = np.zeros([max_length - np.shape(y_sample)[0]])
predicted_spec = np.concatenate(
[predicted_spec, predictions_pad], axis=0
)
predicted_final_spec = np.concatenate(
[predicted_final_spec, predictions_pad], axis=0
)
stop_token_pred = np.concatenate(
[stop_token_pred, stop_token_pred_pad], axis=0
)
y_sample = np.concatenate([y_sample, spec_pad], axis=0)
stop_target = np.concatenate([stop_target, stop_token_pad], axis=0)
specs = [
y_sample,
predicted_spec,
predicted_final_spec
]
titles = [
"training data",
"decoder results",
"post net results"
]
alignment_specs, alignment_titles = self.get_alignments(attention_mask)
specs += alignment_specs
titles += alignment_titles
predicted_mag_spec = None
if "both" in self.get_data_layer().params["output_type"]:
n_feats = self.get_data_layer().params["num_audio_features"]
predicted_mag_spec = output_values[5]
mag_pred_pad = np.zeros(
[max_length - np.shape(predicted_mag_spec)[0], n_feats["magnitude"]]
)
predicted_mag_spec = np.concatenate([predicted_mag_spec, mag_pred_pad], axis=0)
specs.append(predicted_mag_spec)
titles.append("magnitude spectrogram")
mel, mag = np.split(
y_sample,
[n_feats["mel"]],
axis=1
)
specs.insert(0, mel)
specs[1] = mag
titles.insert(0, "target mel")
titles[1] = "target mag"
return self.print_logs(
mode="eval",
specs=specs,
titles=titles,
stop_token_pred=stop_token_pred,
stop_target=stop_target[0],
audio_length=audio_length,
step=training_step,
predicted_final_spec=predicted_final_spec,
predicted_mag_spec=predicted_mag_spec
)
def maybe_print_logs(self, input_values, output_values, training_step):
spec, stop_target, _ = input_values['target_tensors']
predicted_decoder_spec = output_values[0]
predicted_final_spec = output_values[1]
attention_mask = output_values[2]
stop_token_pred = output_values[3]
y_sample = spec[0]
stop_target = stop_target[0]
predicted_spec = predicted_decoder_spec[0]
predicted_final_spec = predicted_final_spec[0]
alignment = attention_mask[0]
stop_token_pred = stop_token_pred[0]
audio_length = output_values[4][0]
specs = [
y_sample,
predicted_spec,
predicted_final_spec
]
titles = [
"training data",
"decoder results",
"post net results"
]
alignment_specs, alignment_titles = self.get_alignments(alignment)
specs += alignment_specs
titles += alignment_titles
predicted_mag_spec = None
if "both" in self.get_data_layer().params["output_type"]:
predicted_mag_spec = output_values[5][0]
specs.append(predicted_mag_spec)
titles.append("magnitude spectrogram")
n_feats = self.get_data_layer().params["num_audio_features"]
mel, mag = np.split(
y_sample,
[n_feats["mel"]],
axis=1
)
specs.insert(0, mel)
specs[1] = mag
titles.insert(0, "target mel")
titles[1] = "target mag"
return self.print_logs(
mode="train",
specs=specs,
titles=titles,
stop_token_pred=stop_token_pred,
stop_target=stop_target,
audio_length=audio_length,
step=training_step,
predicted_final_spec=predicted_final_spec,
predicted_mag_spec=predicted_mag_spec
)
| OpenSeq2Seq-master | open_seq2seq/models/text2speech.py |
# Copyright (c) 2019 NVIDIA Corporation
from .text2speech import Text2Speech
class Text2SpeechTacotron(Text2Speech):
"""
Text-to-speech data layer for Tacotron.
"""
def get_alignments(self, attention_mask):
specs = [attention_mask]
titles = ["alignments"]
return specs, titles
| OpenSeq2Seq-master | open_seq2seq/models/text2speech_tacotron.py |
# Copyright (c) 2018 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import numpy as np
import pandas as pd
import tensorflow as tf
from six.moves import range
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from io import BytesIO
from open_seq2seq.utils.utils import deco_print
from .encoder_decoder import EncoderDecoderModel
import pickle
def sparse_tensor_to_chars(tensor, idx2char):
text = [''] * tensor.dense_shape[0]
for idx_tuple, value in zip(tensor.indices, tensor.values):
text[idx_tuple[0]] += idx2char[value]
return text
def sparse_tensor_to_chars_bpe(tensor):
idx = [[] for _ in range(tensor.dense_shape[0])]
for idx_tuple, value in zip(tensor.indices, tensor.values):
idx[idx_tuple[0]].append(int(value))
return idx
def dense_tensor_to_chars(tensor, idx2char, startindex, endindex):
batch_size = len(tensor)
text = [''] * batch_size
for batch_num in range(batch_size):
'''text[batch_num] = "".join([idx2char[idx] for idx in tensor[batch_num]
if idx not in [startindex, endindex]])'''
text[batch_num] = ""
for idx in tensor[batch_num]:
if idx == endindex:
break
text[batch_num] += idx2char[idx]
return text
def levenshtein(a, b):
"""Calculates the Levenshtein distance between a and b.
The code was copied from: http://hetland.org/coding/python/levenshtein.py
"""
n, m = len(a), len(b)
if n > m:
# Make sure n <= m, to use O(min(n,m)) space
a, b = b, a
n, m = m, n
current = list(range(n + 1))
for i in range(1, m + 1):
previous, current = current, [i] + [0] * n
for j in range(1, n + 1):
add, delete = previous[j] + 1, current[j - 1] + 1
change = previous[j - 1]
if a[j - 1] != b[i - 1]:
change = change + 1
current[j] = min(add, delete, change)
return current[n]
def plot_attention(alignments, pred_text, encoder_len, training_step):
alignments = alignments[:len(pred_text), :encoder_len]
fig = plt.figure(figsize=(15, 10))
ax = fig.add_subplot(1, 1, 1)
img = ax.imshow(alignments, interpolation='nearest', cmap='Blues')
ax.grid()
#fig.savefig('/home/rgadde/Desktop/OpenSeq2Seq/plots/file{}.png'.format(training_step), dpi=300)
sbuffer = BytesIO()
fig.savefig(sbuffer, dpi=300)
summary = tf.Summary.Image(
encoded_image_string=sbuffer.getvalue(),
height=int(fig.get_figheight() * 2),
width=int(fig.get_figwidth() * 2)
)
summary = tf.Summary.Value(
tag="attention_summary_step_{}".format(int(training_step / 2200)), image=summary)
plt.close(fig)
return summary
class Speech2Text(EncoderDecoderModel):
def _create_decoder(self):
data_layer = self.get_data_layer()
self.params['decoder_params']['tgt_vocab_size'] = (
data_layer.params['tgt_vocab_size']
)
self.dump_outputs = self.params['decoder_params'].get('infer_logits_to_pickle', False)
self.is_bpe = data_layer.params.get('bpe', False)
self.tensor_to_chars = sparse_tensor_to_chars
self.tensor_to_char_params = {}
self.autoregressive = data_layer.params.get('autoregressive', False)
if self.autoregressive:
self.params['decoder_params']['GO_SYMBOL'] = data_layer.start_index
self.params['decoder_params']['END_SYMBOL'] = data_layer.end_index
self.tensor_to_chars = dense_tensor_to_chars
self.tensor_to_char_params['startindex'] = data_layer.start_index
self.tensor_to_char_params['endindex'] = data_layer.end_index
return super(Speech2Text, self)._create_decoder()
def _create_loss(self):
if self.get_data_layer().params.get('autoregressive', False):
self.params['loss_params'][
'batch_size'] = self.params['batch_size_per_gpu']
self.params['loss_params']['tgt_vocab_size'] = (
self.get_data_layer().params['tgt_vocab_size']
)
return super(Speech2Text, self)._create_loss()
def _build_forward_pass_graph(self, input_tensors, gpu_id=0):
"""TensorFlow graph for speech2text model is created here.
This function connects encoder, decoder and loss together. As an input for
encoder it will specify source tensors (as returned from
the data layer). As an input for decoder it will specify target tensors
as well as all output returned from encoder. For loss it
will also specify target tensors and all output returned from
decoder. Note that loss will only be built for mode == "train" or "eval".
Args:
input_tensors (dict): ``input_tensors`` dictionary that has to contain
``source_tensors`` key with the list of all source tensors, and
``target_tensors`` with the list of all target tensors. Note that
``target_tensors`` only need to be provided if mode is
"train" or "eval".
gpu_id (int, optional): id of the GPU where the current copy of the model
is constructed. For Horovod this is always zero.
Returns:
tuple: tuple containing loss tensor as returned from
``loss.compute_loss()`` and list of outputs tensors, which is taken from
``decoder.decode()['outputs']``. When ``mode == 'infer'``, loss will
be None.
"""
if not isinstance(input_tensors, dict) or \
'source_tensors' not in input_tensors:
raise ValueError('Input tensors should be a dict containing '
'"source_tensors" key')
if not isinstance(input_tensors['source_tensors'], list):
raise ValueError('source_tensors should be a list')
source_tensors = input_tensors['source_tensors']
if self.mode == "train" or self.mode == "eval":
if 'target_tensors' not in input_tensors:
raise ValueError('Input tensors should contain "target_tensors" key'
'when mode != "infer"')
if not isinstance(input_tensors['target_tensors'], list):
raise ValueError('target_tensors should be a list')
target_tensors = input_tensors['target_tensors']
with tf.variable_scope("ForwardPass"):
encoder_input = {"source_tensors": source_tensors}
encoder_output = self.encoder.encode(input_dict=encoder_input)
decoder_input = {"encoder_output": encoder_output}
if self.mode == "train" or self.mode == "eval":
decoder_input['target_tensors'] = target_tensors
decoder_output = self.decoder.decode(input_dict=decoder_input)
model_outputs = decoder_output.get("outputs", None)
if self.mode == "train" or self.mode == "eval":
with tf.variable_scope("Loss"):
loss_input_dict = {
"decoder_output": decoder_output,
"target_tensors": target_tensors,
}
loss = self.loss_computator.compute_loss(loss_input_dict)
else:
deco_print("Inference Mode. Loss part of graph isn't built.")
loss = None
if self.dump_outputs:
model_logits = decoder_output.get("logits", None)
return loss, [model_logits]
return loss, model_outputs
def maybe_print_logs(self, input_values, output_values, training_step):
y, len_y = input_values['target_tensors']
decoded_sequence = output_values
y_one_sample = y[0]
len_y_one_sample = len_y[0]
decoded_sequence_one_batch = decoded_sequence[0]
if self.is_bpe:
dec_list = sparse_tensor_to_chars_bpe(decoded_sequence_one_batch)[0]
true_text = self.get_data_layer().sp.DecodeIds(y_one_sample[:len_y_one_sample].tolist())
pred_text = self.get_data_layer().sp.DecodeIds(dec_list)
else:
# we also clip the sample by the correct length
true_text = "".join(map(
self.get_data_layer().params['idx2char'].get,
y_one_sample[:len_y_one_sample],
))
pred_text = "".join(self.tensor_to_chars(
decoded_sequence_one_batch,
self.get_data_layer().params['idx2char'],
**self.tensor_to_char_params
)[0])
sample_wer = levenshtein(true_text.split(), pred_text.split()) / \
len(true_text.split())
self.autoregressive = self.get_data_layer().params.get('autoregressive', False)
self.plot_attention = False # (output_values[1] != None).all()
if self.plot_attention:
attention_summary = plot_attention(
output_values[1][0], pred_text, output_values[2][0], training_step)
deco_print("Sample WER: {:.4f}".format(sample_wer), offset=4)
deco_print("Sample target: " + true_text, offset=4)
deco_print("Sample prediction: " + pred_text, offset=4)
if self.plot_attention:
return {
'Sample WER': sample_wer,
'Attention Summary': attention_summary,
}
else:
return {
'Sample WER': sample_wer,
}
def finalize_evaluation(self, results_per_batch, training_step=None):
total_word_lev = 0.0
total_word_count = 0.0
for word_lev, word_count in results_per_batch:
total_word_lev += word_lev
total_word_count += word_count
total_wer = 1.0 * total_word_lev / total_word_count
deco_print("Validation WER: {:.4f}".format(total_wer), offset=4)
return {
"Eval WER": total_wer,
}
def evaluate(self, input_values, output_values):
total_word_lev = 0.0
total_word_count = 0.0
decoded_sequence = output_values[0]
if self.is_bpe:
decoded_texts = sparse_tensor_to_chars_bpe(decoded_sequence)
else:
decoded_texts = self.tensor_to_chars(
decoded_sequence,
self.get_data_layer().params['idx2char'],
**self.tensor_to_char_params
)
batch_size = input_values['source_tensors'][0].shape[0]
for sample_id in range(batch_size):
# y is the third returned input value, thus input_values[2]
# len_y is the fourth returned input value
y = input_values['target_tensors'][0][sample_id]
len_y = input_values['target_tensors'][1][sample_id]
if self.is_bpe:
true_text = self.get_data_layer().sp.DecodeIds(y[:len_y].tolist())
pred_text = self.get_data_layer().sp.DecodeIds(decoded_texts[sample_id])
else:
true_text = "".join(map(self.get_data_layer().params['idx2char'].get,
y[:len_y]))
pred_text = "".join(decoded_texts[sample_id])
if self.get_data_layer().params.get('autoregressive', False):
true_text = true_text[:-4]
# print('TRUE_TEXT: "{}"'.format(true_text))
# print('PRED_TEXT: "{}"'.format(pred_text))
total_word_lev += levenshtein(true_text.split(), pred_text.split())
total_word_count += len(true_text.split())
return total_word_lev, total_word_count
def infer(self, input_values, output_values):
preds = []
decoded_sequence = output_values[0]
if self.dump_outputs:
# decoded_sequence has 'time_major' shape: [T, B, C]
for i in range(decoded_sequence.shape[0]):
preds.append(decoded_sequence[i, :, :].squeeze())
else:
decoded_texts = self.tensor_to_chars(
decoded_sequence,
self.get_data_layer().params['idx2char'],
**self.tensor_to_char_params
)
for decoded_text in decoded_texts:
preds.append("".join(decoded_text))
return preds, input_values['source_ids']
def finalize_inference(self, results_per_batch, output_file):
preds = []
ids = []
for result, idx in results_per_batch:
preds.extend(result)
ids.extend(idx)
preds = np.array(preds)
ids = np.hstack(ids)
# restoring the correct order
preds = preds[np.argsort(ids)]
if self.dump_outputs:
dump_out = {}
dump_results = {}
files = self.get_data_layer().all_files
for i, f in enumerate(files):
dump_results[f] = preds[i]
dump_out["logits"] = dump_results
step_size = self.get_data_layer().params["window_stride"]
scale = 1
# check strides in convolutional layers
for layers in ['convnet_layers', 'conv_layers', 'cnn_layers']:
convs = self.encoder.params.get(layers)
if convs:
for c in convs:
scale *= c["stride"][0]
dump_out["step_size"] = scale*step_size
dump_out["vocab"] = self.get_data_layer().params['idx2char']
with open(output_file, 'wb') as f:
pickle.dump(dump_out, f, protocol=pickle.HIGHEST_PROTOCOL)
f.close()
else:
pd.DataFrame(
{
'wav_filename': self.get_data_layer().all_files,
'predicted_transcript': preds,
},
columns=['wav_filename', 'predicted_transcript'],
).to_csv(output_file, index=False)
def _get_num_objects_per_step(self, worker_id=0):
"""Returns number of audio frames in current batch."""
data_layer = self.get_data_layer(worker_id)
num_frames = tf.reduce_sum(data_layer.input_tensors['source_tensors'][1])
return num_frames
| OpenSeq2Seq-master | open_seq2seq/models/speech2text.py |
# Copyright (c) 2017 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import runpy
import tensorflow as tf
from open_seq2seq.test_utils.create_reversed_examples import create_data, \
remove_data
class BasicText2TextWithAttentionTest(tf.test.TestCase):
def setUp(self):
print("Setting Up BasicSeq2SeqWithAttention")
create_data(train_corpus_size=500, data_path='tmp2')
def tearDown(self):
print("Tear down BasicSeq2SeqWithAttention")
remove_data(data_path='tmp2')
def test_train(self):
config_module = runpy.run_path(
"./example_configs/text2text/toy-reversal/nmt-reversal-RR.py"
)
train_config = config_module['base_params']
if 'train_params' in config_module:
train_config.update(config_module['train_params'])
# TODO: should we maybe have just a single directory parameter?
train_config['data_layer_params']['src_vocab_file'] = (
"tmp2/vocab/source.txt"
)
train_config['data_layer_params']['tgt_vocab_file'] = (
"tmp2/vocab/target.txt"
)
train_config['data_layer_params']['source_file'] = (
"tmp2/train/source.txt"
)
train_config['data_layer_params']['target_file'] = (
"tmp2/train/target.txt"
)
step = 0
with tf.Graph().as_default():
model = config_module['base_model'](train_config, "train", None)
model.compile()
with self.test_session(use_gpu=True) as sess:
tf.global_variables_initializer().run()
sess.run(model.get_data_layer().iterator.initializer)
while True:
try:
loss, _ = sess.run([model.loss, model.train_op])
except tf.errors.OutOfRangeError:
break
step += 1
if step >= 25:
break
class BasicText2TextWithAttentionTestOnHorovod(tf.test.TestCase):
def setUp(self):
print("Setting Up BasicSeq2SeqWithAttention on Horovod")
create_data(train_corpus_size=500, data_path='tmp3')
def tearDown(self):
print("Tear down BasicSeq2SeqWithAttention on Horovod")
remove_data(data_path='tmp3')
def test_train(self):
try:
import horovod.tensorflow as hvd
except ImportError:
print("Could not test on Horovod. Is it installed?")
return
print("Attempting BasicSeq2SeqWithAttention on Horovod")
hvd.init()
config_module = runpy.run_path(
"./example_configs/text2text/toy-reversal/nmt-reversal-RR.py"
)
train_config = config_module['base_params']
if 'train_params' in config_module:
train_config.update(config_module['train_params'])
train_config['data_layer_params']['src_vocab_file'] = (
"tmp3/vocab/source.txt"
)
train_config['data_layer_params']['tgt_vocab_file'] = (
"tmp3/vocab/target.txt"
)
train_config['data_layer_params']['source_file'] = (
"tmp3/train/source.txt"
)
train_config['data_layer_params']['target_file'] = (
"tmp3/train/target.txt"
)
train_config["use_horovod"] = True
step = 0
with tf.Graph().as_default():
model = config_module['base_model'](train_config, "train", None)
model.compile()
with self.test_session(use_gpu=True) as sess:
tf.global_variables_initializer().run()
sess.run(model.get_data_layer().iterator.initializer)
while True:
try:
loss, _ = sess.run(
[model.loss, model.train_op]
)
except tf.errors.OutOfRangeError:
break
step += 1
if step >= 25:
break
if __name__ == '__main__':
tf.test.main()
| OpenSeq2Seq-master | open_seq2seq/models/text2text_test.py |
# Copyright (c) 2019 NVIDIA Corporation
from six.moves import range
from .text2speech import Text2Speech
class Text2SpeechCentaur(Text2Speech):
"""
Text-to-speech data layer for Centaur.
"""
def get_alignments(self, attention_mask):
alignments_name = ["dec_enc_alignment"]
specs = []
titles = []
for name, alignment in zip(alignments_name, attention_mask):
for layer in range(len(alignment)):
for head in range(alignment.shape[1]):
specs.append(alignment[layer][head])
titles.append("{}_layer_{}_head_{}".format(name, layer, head))
return specs, titles
| OpenSeq2Seq-master | open_seq2seq/models/text2speech_centaur.py |
# Copyright (c) 2018 NVIDIA Corporation
import numpy as np
from scipy.io.wavfile import write
from .encoder_decoder import EncoderDecoderModel
def save_audio(signal, logdir, step, sampling_rate, mode):
signal = np.float32(signal)
file_name = '{}/sample_step{}_{}.wav'.format(logdir, step, mode)
if logdir[0] != '/':
file_name = "./" + file_name
write(file_name, sampling_rate, signal)
class Text2SpeechWavenet(EncoderDecoderModel):
@staticmethod
def get_required_params():
return dict(
EncoderDecoderModel.get_required_params(), **{}
)
def __init__(self, params, mode="train", hvd=None):
super(Text2SpeechWavenet, self).__init__(params, mode=mode, hvd=hvd)
def maybe_print_logs(self, input_values, output_values, training_step):
save_audio(
output_values[1][-1],
self.params["logdir"],
training_step,
sampling_rate=22050,
mode="train"
)
return {}
def evaluate(self, input_values, output_values):
return output_values[1][-1]
def finalize_evaluation(self, results_per_batch, training_step=None):
save_audio(
results_per_batch[0],
self.params["logdir"],
training_step,
sampling_rate=22050,
mode="eval"
)
return {}
def infer(self, input_values, output_values):
return output_values[1][-1]
def finalize_inference(self, results_per_batch, output_file):
return {}
| OpenSeq2Seq-master | open_seq2seq/models/text2speech_wavenet.py |
# Copyright (c) 2018 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import numpy as np
import tensorflow as tf
from open_seq2seq.utils.utils import deco_print
from .encoder_decoder import EncoderDecoderModel
class Image2Label(EncoderDecoderModel):
def maybe_print_logs(self, input_values, output_values, training_step):
labels = input_values['target_tensors'][0]
logits = output_values[0]
labels = np.where(labels == 1)[1]
total = logits.shape[0]
top1 = np.sum(np.argmax(logits, axis=1) == labels)
top5 = np.sum(labels[:, np.newaxis] == np.argpartition(logits, -5)[:, -5:])
top1 = 1.0 * top1 / total
top5 = 1.0 * top5 / total
deco_print("Train batch top-1: {:.4f}".format(top1), offset=4)
deco_print("Train batch top-5: {:.4f}".format(top5), offset=4)
return {
"Train batch top-1": top1,
"Train batch top-5": top5,
}
def finalize_evaluation(self, results_per_batch, training_step=None):
top1 = 0.0
top5 = 0.0
total = 0.0
for cur_total, cur_top1, cur_top5 in results_per_batch:
top1 += cur_top1
top5 += cur_top5
total += cur_total
top1 = 1.0 * top1 / total
top5 = 1.0 * top5 / total
deco_print("Validation top-1: {:.4f}".format(top1), offset=4)
deco_print("Validation top-5: {:.4f}".format(top5), offset=4)
return {
"Eval top-1": top1,
"Eval top-5": top5,
}
def evaluate(self, input_values, output_values):
logits = output_values[0]
labels = input_values['target_tensors'][0]
labels = np.where(labels == 1)[1]
total = logits.shape[0]
top1 = np.sum(np.equal(np.argmax(logits, axis=1), labels))
top5 = np.sum(np.equal(labels[:, np.newaxis],
np.argpartition(logits, -5)[:, -5:]))
return total, top1, top5
def _get_num_objects_per_step(self, worker_id=0):
"""Returns number of images in current batch, i.e. batch size."""
data_layer = self.get_data_layer(worker_id)
num_images = tf.shape(data_layer.input_tensors['source_tensors'][0])[0]
return num_images
| OpenSeq2Seq-master | open_seq2seq/models/image2label.py |
# Copyright (c) 2017 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import tensorflow as tf
from open_seq2seq.test_utils.test_speech_configs.w2l_test_config import \
base_params, train_params, eval_params, base_model
from .speech2text_test import Speech2TextModelTests
class W2LModelTests(Speech2TextModelTests):
def setUp(self):
self.base_model = base_model
self.base_params = base_params
self.train_params = train_params
self.eval_params = eval_params
def tearDown(self):
pass
def test_convergence(self):
return self.convergence_test(5.0, 30.0, 0.1)
def test_mp_collection(self):
return self.mp_collection_test(14, 6)
if __name__ == '__main__':
tf.test.main()
| OpenSeq2Seq-master | open_seq2seq/models/speech2text_w2l_test.py |
# Copyright (c) 2017 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import numpy as np
import numpy.testing as npt
import tensorflow as tf
from six.moves import range
from open_seq2seq.optimizers import optimize_loss
from open_seq2seq.optimizers.mp_wrapper import mp_regularizer_wrapper, \
MixedPrecisionOptimizerWrapper
from .lr_policies import fixed_lr
class MixedPrecisionOptimizerTests(tf.test.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_regularization_normal(self):
n_samples = 3
n_hid = 2
scale_init = 1e-4
wd = 1e-4
X = np.ones((n_samples, n_hid)) / n_hid
y = np.ones((n_samples, 1)) * scale_init
for dtype in [tf.float16, tf.float32]:
# pylint: disable=no-member
regularizer = tf.contrib.layers.l2_regularizer(wd)
with tf.Graph().as_default() as g:
x_ph = tf.placeholder(dtype, [n_samples, n_hid])
y_ph = tf.placeholder(dtype, [n_samples, 1])
y_pred = tf.layers.dense(
x_ph, 1, kernel_regularizer=regularizer,
use_bias=False,
kernel_initializer=tf.constant_initializer(scale_init, dtype=dtype),
)
loss = tf.reduce_mean((y_ph - y_pred) ** 2)
reg_loss = tf.losses.get_regularization_loss()
loss += reg_loss
opt = tf.train.AdamOptimizer()
grad = opt.compute_gradients(loss)[0][0]
with self.test_session(g, use_gpu=True) as sess:
sess.run(tf.global_variables_initializer())
reg_loss_val, grad_val = sess.run([reg_loss, grad],
{x_ph: X, y_ph: y})
if dtype == tf.float16:
self.assertEqual(reg_loss_val, 0.0)
npt.assert_allclose(grad_val, np.zeros((2, 1), dtype=np.float16))
else:
self.assertAlmostEqual(reg_loss_val, 1e-12)
npt.assert_allclose(grad_val, np.ones((2, 1)) * 1e-8)
def test_regularization_mixed(self):
n_samples = 3
n_hid = 2
scale_init = 1e-4
wd = 1e-4
X = np.ones((n_samples, n_hid)) / n_hid
y = np.ones((n_samples, 1)) * scale_init
dtype = tf.float16
# pylint: disable=no-member
regularizer = mp_regularizer_wrapper(tf.contrib.layers.l2_regularizer(wd))
with tf.Graph().as_default() as g:
x_ph = tf.placeholder(dtype, [n_samples, n_hid])
y_ph = tf.placeholder(dtype, [n_samples, 1])
y_pred = tf.layers.dense(
x_ph, 1, kernel_regularizer=regularizer,
use_bias=False,
kernel_initializer=tf.constant_initializer(scale_init, dtype=dtype),
)
loss = tf.reduce_mean((y_ph - y_pred) ** 2)
reg_loss = tf.losses.get_regularization_loss()
loss += tf.cast(reg_loss, loss.dtype)
opt = MixedPrecisionOptimizerWrapper(tf.train.AdamOptimizer())
grad = opt.compute_gradients(loss)[0][0]
with self.test_session(g, use_gpu=True) as sess:
sess.run(tf.global_variables_initializer())
reg_loss_val, grad_val = sess.run([reg_loss, grad],
{x_ph: X, y_ph: y})
self.assertAlmostEqual(reg_loss_val, 0.0)
self.assertEqual(reg_loss.name, "Const_1:0")
npt.assert_allclose(grad_val, np.ones((2, 1)) * 1e-8, atol=1e-11)
def test_convergence(self):
for dtype in ['mixed', tf.float32]:
with tf.Graph().as_default() as g:
n_samples = 10
n_hid = 10
var_dtype = tf.float32 if dtype == tf.float32 else tf.float16
np.random.seed(0)
X = np.random.rand(n_samples, n_hid)
y = np.random.rand(n_samples, 1)
w = np.linalg.solve(X.T.dot(X), X.T.dot(y))
x_ph = tf.placeholder(var_dtype, [n_samples, n_hid])
y_ph = tf.placeholder(var_dtype, [n_samples, 1])
y_pred = tf.layers.dense(x_ph, 1, use_bias=False)
loss = tf.losses.mean_squared_error(y_ph, y_pred)
loss += tf.losses.get_regularization_loss()
train_op = optimize_loss(loss, "Adam", {},
lambda gs: fixed_lr(gs, 0.05), dtype=dtype)
with self.test_session(g, use_gpu=True) as sess:
sess.run(tf.global_variables_initializer())
for i in range(6000):
sess.run(train_op, {x_ph: X, y_ph: y})
w_learned = sess.run(tf.trainable_variables()[0])
npt.assert_allclose(w_learned, w, atol=0.01)
if __name__ == '__main__':
tf.test.main()
| OpenSeq2Seq-master | open_seq2seq/optimizers/mp_wrapper_test.py |
# Copyright (c) 2017 NVIDIA Corporation
"""
Module containing various learning rate policies. Learning rate policy can
be any function that takes arbitrary arguments from the config (with additional
``global_step`` variable provided automatically) and returns learning rate
value for the current step.
"""
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import math
import tensorflow as tf
from tensorflow.python.framework import ops
def fixed_lr(global_step, learning_rate):
"""Fixed learning rate policy.
This function always returns ``learning_rate``, ignoring ``global_step``
value.
Args:
global_step: global step TensorFlow tensor (ignored for this policy).
learning_rate (float): fixed learning rate to use.
Returns:
learning rate at step ``global_step``.
"""
return learning_rate
def piecewise_constant(global_step, learning_rate, boundaries,
decay_rates, steps_per_epoch=None):
"""Piecewise constant learning rate decay.
When defined in the config, only ``boundaries`` and ``decay_rates`` need to
be provided (other parameters are automatically populated by
:class:`Model<models.model.Model>` class). ``boundaries`` are treated as
epochs if ``num_epochs`` is provided in the config, otherwise treated as
steps.
Args:
global_step: global step TensorFlow tensor.
learning_rate (float): initial learning rate to use.
boundaries (list): could be either defined in steps
(if ``batches_per_epoch=None``) or in epochs if ``batches_per_epoch``
parameter is defined.
decay_rates: multiplier of the initial learning rate for each boundary.
steps_per_epoch: number of batches in one training epoch. If provided,
boundaries are treated as epochs, otherwise as steps.
Returns:
learning rate at step ``global_step``.
"""
if steps_per_epoch is not None:
boundaries = [steps_per_epoch * epoch for epoch in boundaries]
decay_rates = [1.0] + decay_rates
vals = [learning_rate * decay for decay in decay_rates]
return tf.train.piecewise_constant(global_step, boundaries, vals)
def exp_decay(global_step, learning_rate, decay_steps, decay_rate,
use_staircase_decay, begin_decay_at=0, min_lr=0.0):
"""Exponential decay learning rate policy.
This function is equivalent to ``tensorflow.train.exponential_decay`` with
some additional functionality. Namely, it adds ``begin_decay_at`` parameter
and ``min_lr`` parameter which are the first step to start decaying learning
rate and minimal value of the learning rate correspondingly.
Args:
global_step: global step TensorFlow tensor.
learning_rate (float): initial learning rate to use.
decay_steps (int): number of steps to apply decay for.
decay_rate (float): the rate of the decay.
use_staircase_decay (bool): whether to use staircase decay.
begin_decay_at (int): the first step to start decaying learning rate.
min_lr (float): minimal value of the learning rate.
Returns:
learning rate at step ``global_step``.
"""
new_lr = tf.cond(
global_step < begin_decay_at,
lambda: learning_rate,
lambda: tf.train.exponential_decay(
learning_rate=learning_rate,
global_step=global_step-begin_decay_at,
decay_steps=decay_steps,
decay_rate=decay_rate,
staircase=use_staircase_decay),
name="learning_rate",
)
final_lr = tf.maximum(min_lr, new_lr)
return final_lr
def poly_decay(global_step, learning_rate, decay_steps, power=1.0,
begin_decay_at=0, min_lr=0.0, warmup_steps=0):
"""Polynomial decay learning rate policy.
This function is equivalent to ``tensorflow.train.polynomial_decay`` with
some additional functionality. Namely, it adds ``begin_decay_at`` parameter
which is the first step to start decaying learning rate.
Args:
global_step: global step TensorFlow tensor.
learning_rate (float): initial learning rate to use.
decay_steps (int): number of steps to apply decay for.
power (float): power for polynomial decay.
begin_decay_at (int): the first step to start decaying learning rate.
min_lr (float): minimal value of the learning rate
(same as ``end_learning_rate`` TensorFlow parameter).
Returns:
learning rate at step ``global_step``.
"""
if warmup_steps > 0:
learning_rate = tf.cond(
global_step < warmup_steps,
lambda: (learning_rate*tf.cast(global_step,tf.float32)/tf.cast(warmup_steps,tf.float32)),
lambda: learning_rate,
)
lr = tf.cond(
global_step < begin_decay_at,
lambda: learning_rate,
lambda: tf.train.polynomial_decay(
learning_rate=learning_rate,
global_step=global_step-begin_decay_at,
decay_steps=decay_steps,
end_learning_rate=min_lr,
power=power),
name="learning_rate"
)
return lr
def cosine_decay(global_step, learning_rate, decay_steps, power=1.0,
begin_decay_at=0, min_lr=0.0, warmup_steps=0):
"""cosine decay learning rate policy.
This function is equivalent to ``tensorflow.train.cosine_decay`` with
some additional functionality. Namely, it adds ``begin_decay_at`` parameter
which is the first step to start decaying learning rate.
Args:
global_step: global step TensorFlow tensor.
learning_rate (float): initial learning rate to use.
decay_steps (int): number of steps to apply decay for.
power (float): power for polynomial decay.
begin_decay_at (int): the first step to start decaying learning rate.
min_lr (float): minimal value of the learning rate
(same as ``end_learning_rate`` TensorFlow parameter).
Returns:
learning rate at step ``global_step``.
"""
if warmup_steps > 0:
learning_rate = tf.cond(
global_step < warmup_steps,
lambda: (learning_rate*tf.cast(global_step,tf.float32)/tf.cast(warmup_steps,tf.float32)),
lambda: learning_rate,
)
lr = tf.cond(
global_step < begin_decay_at,
lambda: learning_rate,
lambda: tf.train.cosine_decay(
learning_rate=learning_rate,
global_step=global_step-begin_decay_at,
decay_steps=decay_steps,
alpha=min_lr
),
name="learning_rate"
)
return lr
def transformer_policy(global_step, learning_rate, d_model, warmup_steps,
max_lr=None, coefficient=1.0, dtype=tf.float32):
"""Transformer's learning rate policy from
https://arxiv.org/pdf/1706.03762.pdf
with a hat (max_lr) (also called "noam" learning rate decay scheme).
Args:
global_step: global step TensorFlow tensor (ignored for this policy).
learning_rate (float): initial learning rate to use.
d_model (int): model dimensionality.
warmup_steps (int): number of warm-up steps.
max_lr (float): maximal learning rate, i.e. hat.
coefficient (float): optimizer adjustment.
Recommended 0.002 if using "Adam" else 1.0.
dtype: dtype for this policy.
Returns:
learning rate at step ``global_step``.
"""
step_num = tf.cast(global_step, dtype=dtype)
ws = tf.cast(warmup_steps, dtype=dtype)
decay = coefficient * d_model ** -0.5 * tf.minimum(
(step_num + 1) * ws ** -1.5, (step_num + 1) ** -0.5
)
new_lr = decay * learning_rate
if max_lr is not None:
return tf.minimum(max_lr, new_lr)
return new_lr
def inv_poly_decay(global_step, learning_rate, decay_steps, min_lr,
power=1.0, begin_decay_at=0, warmup_steps=0,
name="learning_rate"):
"""Inverse poly decay learning rate policy.
lr = initial lr / ( 1+ decay * t)^power
This function is similar to ``tensorflow.train.inv_time_decay`` with
some additional functionality. Namely, it adds :
``min_lr`` - end learning rate with 0.00001
``power`` - power
``begin_decay_at``- first step to start decaying learning rate.
Args:
global_step: global step TensorFlow tensor.
learning_rate (float): initial learning rate to use.
decay_steps (int): number of steps to apply decay for.
power (float): power for inv_time_decay.
begin_decay_at (int): the first step to start decaying learning rate.
min_lr (float): minimal value of the learning rate
(same as ``end_learning_rate`` TensorFlow parameter).
Returns:
learning rate at step ``global_step``.
"""
min_lr=max(min_lr, 1e-8)
min_lr=min(min_lr, learning_rate)
if power <= 0.:
raise ValueError("Inv poly decay requires power > 0.")
if global_step is None:
raise ValueError("Inv poly decay requires global_step")
with ops.name_scope(name, "InvDecay",
[learning_rate, global_step]) as name:
scale = (math.pow(learning_rate / min_lr, 1./power) - 1.) / decay_steps
learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
decay_steps = tf.cast(decay_steps, tf.float32)
global_step = tf.cast(global_step, tf.float32)
denom = tf.pow(1. + scale * global_step , power)
lr = tf.div(learning_rate, denom, name=name)
return lr
| OpenSeq2Seq-master | open_seq2seq/optimizers/lr_policies.py |
# Copyright (c) 2018 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import tensorflow as tf
from .automatic_loss_scaler import AutomaticLossScaler
# pylint: disable=abstract-method
class MixedPrecisionOptimizerWrapper(tf.train.Optimizer):
def __init__(self, optimizer, loss_scale=None):
super(MixedPrecisionOptimizerWrapper, self).__init__(
optimizer._use_locking,
optimizer._name + '-MP',
)
self._optimizer = optimizer
self._fp32_to_fp16 = {}
self._loss_scaler = None
if loss_scale is None:
self._loss_scale = 1.0
elif isinstance(loss_scale, float):
self._loss_scale = loss_scale
elif isinstance(loss_scale, AutomaticLossScaler):
self._loss_scaler = loss_scale
self._loss_scale = self._loss_scaler.loss_scale
def compute_gradients(self, loss, var_list=None,
gate_gradients=tf.train.Optimizer.GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
grad_loss=None):
loss *= self._loss_scale
grads_and_vars_fp16 = self._optimizer.compute_gradients(
loss, var_list=var_list,
gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
grad_loss=grad_loss,
)
# collecting regularization functions
reg_var_funcs = tf.get_collection('REGULARIZATION_FUNCTIONS')
reg_funcs = dict(map(lambda x: (x[0].name, x[1]), reg_var_funcs))
# creating FP-32 variables and filling the fp32 dict
grads_and_vars_fp32 = []
with tf.variable_scope('FP32-master-copy'):
for grad, var in grads_and_vars_fp16:
if var.dtype.base_dtype == tf.float16:
fp32_var = tf.Variable(
initial_value=tf.cast(var.initialized_value(), tf.float32),
name=var.name.split(':')[0],
expected_shape=var.shape,
dtype=tf.float32,
trainable=False,
# necessary for cudnn_rnn layers which have unknown shape
validate_shape=bool(var.get_shape()),
collections=[tf.GraphKeys.GLOBAL_VARIABLES,
"FP32_MASTER_COPIES"],
)
self._fp32_to_fp16[fp32_var.name] = var
fp32_grad = tf.cast(grad, tf.float32)
# adding regularization part with respect to fp32 copy
if var.name in reg_funcs:
fp32_grad += self._loss_scale * tf.gradients(
# pylint: disable=no-member
tf.contrib.layers.apply_regularization(
reg_funcs[var.name],
[fp32_var],
),
fp32_var,
)[0]
grads_and_vars_fp32.append((fp32_grad, fp32_var))
else:
grads_and_vars_fp32.append((grad, var))
grads_and_vars_fp32 = _scale_grads(grads_and_vars_fp32,
1.0 / self._loss_scale)
return grads_and_vars_fp32
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
def apply_ops_wrapper():
update_op = self._optimizer.apply_gradients(grads_and_vars,
global_step, name)
apply_ops = []
with tf.control_dependencies([update_op]):
for grad, var in grads_and_vars:
if var.name in self._fp32_to_fp16:
dst_var = self._fp32_to_fp16[var.name]
apply_ops.append(
tf.assign(dst_var, tf.saturate_cast(var, tf.float16))
)
if apply_ops:
return tf.group(apply_ops)
return update_op
if self._loss_scaler:
grad_has_nans, grad_amax = AutomaticLossScaler.check_grads(grads_and_vars)
should_skip_update = tf.logical_or(tf.is_inf(grad_amax), grad_has_nans)
loss_scale_update_op = self._loss_scaler.update_op(grad_has_nans,
grad_amax)
with tf.control_dependencies([loss_scale_update_op]):
return tf.cond(should_skip_update, tf.no_op, apply_ops_wrapper)
else:
return apply_ops_wrapper()
def mp_regularizer_wrapper(regularizer):
def func_wrapper(weights):
if weights.dtype.base_dtype == tf.float16:
tf.add_to_collection('REGULARIZATION_FUNCTIONS', (weights, regularizer))
# disabling the inner regularizer
return None
return regularizer(weights)
return func_wrapper
def _scale_grads(grads_and_vars, scale):
scaled_grads_and_vars = []
for grad, var in grads_and_vars:
if grad is not None:
if isinstance(grad, tf.IndexedSlices):
grad_values = grad.values * scale
grad = tf.IndexedSlices(grad_values, grad.indices, grad.dense_shape)
else:
grad *= scale
scaled_grads_and_vars.append((grad, var))
return scaled_grads_and_vars
| OpenSeq2Seq-master | open_seq2seq/optimizers/mp_wrapper.py |
# Copyright (c) 2017 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import numpy as np
import numpy.testing as npt
import tensorflow as tf
from six.moves import range
from open_seq2seq.optimizers import optimize_loss
from .lr_policies import fixed_lr
class IterSizeTests(tf.test.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_updates(self):
try:
import horovod.tensorflow as hvd
hvd.init()
except ImportError:
print("Horovod not installed skipping test_updates")
return
dtype = tf.float32
with tf.Graph().as_default() as g:
n_samples = 10
n_hid = 10
var_dtype = tf.float32 if dtype == tf.float32 else tf.float16
np.random.seed(0)
X = np.random.rand(n_samples, n_hid)
y = np.random.rand(n_samples, 1)
w = np.linalg.solve(X.T.dot(X), X.T.dot(y))
x_ph = tf.placeholder(var_dtype, [n_samples, n_hid])
y_ph = tf.placeholder(var_dtype, [n_samples, 1])
y_pred = tf.layers.dense(x_ph, 1, use_bias=False)
loss = tf.losses.mean_squared_error(y_ph, y_pred)
loss += tf.losses.get_regularization_loss()
skip_update_ph = tf.placeholder(tf.bool)
iter_size = 8
train_op = optimize_loss(loss, "SGD", {},
lambda gs: fixed_lr(gs, 0.1), dtype=dtype,
iter_size=iter_size, on_horovod=True,
skip_update_ph=skip_update_ph)
grad_accum = [var for var in tf.global_variables() if 'accum' in var.name][0]
var = tf.trainable_variables()[0]
with self.test_session(g, use_gpu=True) as sess:
sess.run(tf.global_variables_initializer())
for _ in range(3):
g, v = sess.run([grad_accum, var])
npt.assert_allclose(g, np.zeros(g.shape))
true_g = 2 * (X.T.dot(X).dot(v) - X.T.dot(y)) / X.shape[0] / iter_size
sess.run(train_op, {x_ph: X, y_ph: y, skip_update_ph: True})
g_new, v_new = sess.run([grad_accum, var])
npt.assert_allclose(g_new, true_g, atol=1e-7)
npt.assert_allclose(v_new, v)
sess.run(train_op, {x_ph: X, y_ph: y, skip_update_ph: True})
g_new, v_new = sess.run([grad_accum, var])
npt.assert_allclose(g_new, true_g * 2, atol=1e-7)
npt.assert_allclose(v_new, v)
sess.run(train_op, {x_ph: X, y_ph: y, skip_update_ph: True})
g_new, v_new = sess.run([grad_accum, var])
npt.assert_allclose(g_new, true_g * 3, atol=1e-7)
npt.assert_allclose(v_new, v)
sess.run(train_op, {x_ph: X, y_ph: y, skip_update_ph: False})
g_new, v_new = sess.run([grad_accum, var])
npt.assert_allclose(g_new, np.zeros(g.shape))
npt.assert_allclose(v_new, v - 0.1 * true_g * 4, atol=1e-7)
if __name__ == '__main__':
tf.test.main()
| OpenSeq2Seq-master | open_seq2seq/optimizers/optimizers_test.py |
# Copyright (c) 2018 NVIDIA Corporation
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import tensorflow as tf
from open_seq2seq.utils.utils import check_params
class AutomaticLossScaler(object):
SUPPORTED_ALGOS = ['backoff', 'logmax']
def __init__(self, algorithm='Backoff', params=None):
algorithm = algorithm.lower().strip()
if algorithm == 'backoff':
self.scaler = BackoffScaler(params)
elif algorithm == 'logmax':
self.scaler = LogMaxScaler(params) # ppf(.999)
else:
raise ValueError('Unknown scaling algorithm: {}'.format(algorithm))
def update_op(self, has_nan, amax):
return self.scaler.update_op(has_nan, amax)
@property
def loss_scale(self):
return self.scaler.loss_scale
@staticmethod
def check_grads(grads_and_vars):
has_nan_ops = []
amax_ops = []
for grad, _ in grads_and_vars:
if grad is not None:
if isinstance(grad, tf.IndexedSlices):
x = grad.values
else:
x = grad
has_nan_ops.append(tf.reduce_any(tf.is_nan(x)))
amax_ops.append(tf.reduce_max(tf.abs(x)))
has_nan = tf.reduce_any(has_nan_ops)
amax = tf.reduce_max(amax_ops)
return has_nan, amax
class BackoffScaler(object):
def __init__(self, params):
if params is None:
params = {}
check_params(
config=params,
required_dict={},
optional_dict={
'scale_min': float,
'scale_max': float,
'step_factor': float,
'step_window': int
},
)
self.scale_min = params.get('scale_min', 1.0)
self.scale_max = params.get('scale_max', 2.**14)
self.step_factor = params.get('step_factor', 2.0)
self.step_window = params.get('step_window', 2000)
self.iteration = tf.Variable(initial_value=0,
trainable=False,
dtype=tf.int64)
self.last_overflow_iteration = tf.Variable(initial_value=-1,
trainable=False,
dtype=tf.int64)
self.scale = tf.Variable(initial_value=self.scale_max,
trainable=False)
def update_op(self, has_nan, amax):
def overflow_case():
new_scale_val = tf.clip_by_value(self.scale / self.step_factor,
self.scale_min, self.scale_max)
scale_assign = tf.assign(self.scale, new_scale_val)
overflow_iter_assign = tf.assign(self.last_overflow_iteration,
self.iteration)
with tf.control_dependencies([scale_assign, overflow_iter_assign]):
return tf.identity(self.scale)
def scale_case():
since_overflow = self.iteration - self.last_overflow_iteration
should_update = tf.equal(since_overflow % self.step_window, 0)
def scale_update_fn():
new_scale_val = tf.clip_by_value(self.scale * self.step_factor,
self.scale_min, self.scale_max)
return tf.assign(self.scale, new_scale_val)
return tf.cond(should_update,
scale_update_fn,
lambda: self.scale)
iter_update = tf.assign_add(self.iteration, 1)
overflow = tf.logical_or(has_nan, tf.is_inf(amax))
update_op = tf.cond(overflow,
overflow_case,
scale_case)
with tf.control_dependencies([update_op]):
return tf.identity(iter_update)
@property
def loss_scale(self):
return self.scale
class LogMaxScaler(object):
def __init__(self, params):
if params is None:
params = {}
check_params(
config=params,
required_dict={},
optional_dict={
'scale_min': float,
'scale_max': float,
'log_max': float,
'beta1': float,
'beta2': float,
'overflow_std_dev': float
},
)
self.scale_min = params.get('scale_min', 1.0)
self.scale_max = params.get('scale_max', 2.**14)
self.log_max = params.get('log_max', 16.)
self.beta1 = params.get('beta1', 0.99)
self.beta2 = params.get('beta2', 0.999)
self.overflow_std_dev = params.get('overflow_std_dev', 3.09)
self.iteration = tf.Variable(initial_value=0,
trainable=False,
dtype=tf.int64)
self.scale = tf.Variable(initial_value=1.0,
trainable=False)
self.x_hat = tf.Variable(initial_value=0,
trainable=False,
dtype=tf.float32)
self.slow_x_hat = tf.Variable(initial_value=0,
trainable=False,
dtype=tf.float32)
self.xsquared_hat = tf.Variable(initial_value=0,
trainable=False,
dtype=tf.float32)
self.b1_correction = tf.Variable(initial_value=1.,
trainable=False,
dtype=tf.float32)
self.b2_correction = tf.Variable(initial_value=1.,
trainable=False,
dtype=tf.float32)
# NB: assumes that `amax` is already has been downscaled
def update_op(self, has_nan, amax):
is_nonfinite = tf.logical_or(has_nan, tf.is_inf(amax))
x = tf.cond(is_nonfinite,
lambda: tf.pow(2., self.log_max),
lambda: tf.log(amax) / tf.log(tf.constant(2.)))
x_hat_assn = tf.assign(self.x_hat, self.beta1 * self.x_hat +
(1 - self.beta1) * x)
b1_corr_assn = tf.assign(self.b1_correction,
self.b1_correction * self.beta1)
with tf.control_dependencies([x_hat_assn, b1_corr_assn]):
mu = self.x_hat.read_value() / (1 - self.b1_correction.read_value())
slow_x_hat_assn = tf.assign(self.slow_x_hat, self.beta2 * self.slow_x_hat +
(1 - self.beta2) * x)
xsquared_hat_assn = tf.assign(
self.xsquared_hat,
self.beta2 * self.xsquared_hat + (1 - self.beta2) * (x * x),
)
b2_corr_assn = tf.assign(self.b2_correction,
self.b2_correction * self.beta2)
with tf.control_dependencies([slow_x_hat_assn, xsquared_hat_assn,
b2_corr_assn]):
e_xsquared = self.xsquared_hat.read_value() / \
(1 - self.b2_correction.read_value())
slow_mu = self.slow_x_hat.read_value() / \
(1 - self.b2_correction.read_value())
sigma2 = e_xsquared - (slow_mu * slow_mu)
sigma = tf.sqrt(tf.maximum(sigma2, tf.constant(0.)))
log_cutoff = sigma * self.overflow_std_dev + mu
log_difference = 16 - log_cutoff
proposed_scale = tf.pow(2., log_difference)
scale_update = tf.assign(
self.scale,
tf.clip_by_value(proposed_scale, self.scale_min, self.scale_max),
)
iter_update = tf.assign_add(self.iteration, 1)
with tf.control_dependencies([scale_update]):
return tf.identity(iter_update)
@property
def loss_scale(self):
return self.scale
| OpenSeq2Seq-master | open_seq2seq/optimizers/automatic_loss_scaler.py |
# Copyright (c) 2017 NVIDIA Corporation
from .optimizers import optimize_loss, get_regularization_loss
| OpenSeq2Seq-master | open_seq2seq/optimizers/__init__.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Optimizer ops for use in layers and tf.learn."""
# This file was copy-pasted from TF repo on 10/04/2017 by Oleksii Kuchaiev
# The following changes were made:
# LARC support to "optimize_loss" function
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import six
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
from open_seq2seq.utils.utils import mask_nans, check_params
from .automatic_loss_scaler import AutomaticLossScaler
from .mp_wrapper import MixedPrecisionOptimizerWrapper
OPTIMIZER_CLS_NAMES = {
"Adagrad": tf.train.AdagradOptimizer,
"Adam": tf.train.AdamOptimizer,
"Ftrl": tf.train.FtrlOptimizer,
"Momentum": tf.train.MomentumOptimizer,
"RMSProp": tf.train.RMSPropOptimizer,
"SGD": tf.train.GradientDescentOptimizer,
"AdamW": tf.contrib.opt.AdamWOptimizer,
}
OPTIMIZER_SUMMARIES = [
"learning_rate",
"gradients",
"gradient_norm",
"global_gradient_norm",
"variables",
"variable_norm",
"larc_summaries",
"loss_scale"
]
# necessary to redefine this function for pure float16 support
def get_regularization_loss(scope=None, name="total_regularization_loss"):
"""Gets the total regularization loss.
Args:
scope: An optional scope name for filtering the losses to return.
name: The name of the returned tensor.
Returns:
A scalar regularization loss.
"""
losses = tf.losses.get_regularization_losses(scope)
if losses:
return tf.add_n(list(map(lambda x: tf.cast(x, tf.float32), losses)),
name=name)
else:
return tf.constant(0.0)
def reduce_gradients(grads_and_vars, on_horovod, model=None):
if on_horovod:
from horovod.tensorflow import allreduce, size
if size() > 1:
averaged_grads_and_vars = []
with tf.name_scope("all_reduce"):
for grad, var in grads_and_vars:
if grad is not None:
if isinstance(grad, tf.IndexedSlices):
if model._decoder.params.get('shared_embed', False):
from tensorflow.python.training.optimizer import _deduplicate_indexed_slices
summed_values, unique_indices = _deduplicate_indexed_slices(
values=grad.values, indices=grad.indices)
gradient_no_duplicate_indices = tf.IndexedSlices(
indices=unique_indices,
values=summed_values,
dense_shape=grad.dense_shape)
grad = tf.convert_to_tensor(gradient_no_duplicate_indices)
avg_grad = allreduce(grad)
averaged_grads_and_vars.append((avg_grad, var))
else:
averaged_grads_and_vars.append((None, var))
return averaged_grads_and_vars
else:
return grads_and_vars
else:
raise NotImplementedError("Reduce in tower-mode is not implemented.")
def optimize_loss(loss,
optimizer,
optimizer_params,
learning_rate_decay_fn,
var_list=None,
dtype=tf.float32,
clip_gradients=None,
summaries=None,
larc_params=None,
loss_scaling=1.0,
loss_scaling_params=None,
on_horovod=False,
iter_size=1,
skip_update_ph=None,
model=None):
"""Given loss and parameters for optimizer, returns a training op.
Args:
loss: Scalar `Tensor`.
optimizer: string or class of optimizer, used as trainer.
string should be name of optimizer, like 'SGD',
'Adam', 'Adagrad'. Full list in OPTIMIZER_CLS_NAMES constant.
class should be sub-class of `tf.Optimizer` that implements
`compute_gradients` and `apply_gradients` functions.
optimizer_params: parameters of the optimizer.
var_list: List of trainable variables. Can be used to freeze
certain trainable variables by excluding them from this list.
If set to None, all trainable variables will be optimized.
dtype: model dtype (tf.float16, tf.float32 or "mixed").
learning_rate_decay_fn: function, takes `global_step`
`Tensor`s, returns `Tensor`.
Can be used to implement any learning rate decay
functions.
For example: `tf.train.exponential_decay`.
Ignored if `learning_rate` is not supplied.
clip_gradients: float, max gradient norm to clip to.
summaries: List of internal quantities to visualize on tensorboard. If not
set only the loss and the learning rate will be reported. The
complete list is in OPTIMIZER_SUMMARIES.
larc_params: If not None, LARC re-scaling will
be applied with corresponding parameters.
loss_scaling: could be float or string. If float, static loss scaling
is applied. If string, the corresponding automatic
loss scaling algorithm is used. Must be one of 'Backoff'
of 'LogMax' (case insensitive). Only used when dtype="mixed".
on_horovod: whether the model is run on horovod.
Returns:
training op.
"""
if summaries is None:
summaries = ["learning_rate", "global_gradient_norm", "loss_scale"]
else:
for summ in summaries:
if summ not in OPTIMIZER_SUMMARIES:
raise ValueError(
"Summaries should be one of [{}], you provided {}.".format(
", ".join(OPTIMIZER_SUMMARIES), summ,
)
)
if clip_gradients is not None and larc_params is not None:
raise AttributeError(
"LARC and gradient norm clipping should not be used together"
)
global_step = tf.train.get_or_create_global_step()
lr = learning_rate_decay_fn(global_step)
if "learning_rate" in summaries:
tf.summary.scalar("learning_rate", lr)
with tf.variable_scope("Loss_Optimization"):
update_ops = set(tf.get_collection(tf.GraphKeys.UPDATE_OPS))
loss = control_flow_ops.with_dependencies(list(update_ops), loss)
if optimizer=="AdamW":
optimizer_params["weight_decay"] = optimizer_params["weight_decay"]*lr
# Create optimizer, given specified parameters.
if isinstance(optimizer, six.string_types):
if optimizer not in OPTIMIZER_CLS_NAMES:
raise ValueError(
"Optimizer name should be one of [{}], you provided {}.".format(
", ".join(OPTIMIZER_CLS_NAMES), optimizer
)
)
optimizer = OPTIMIZER_CLS_NAMES[optimizer]
opt = optimizer(learning_rate=lr, **optimizer_params)
if isinstance(loss_scaling, six.string_types):
loss_scaling = AutomaticLossScaler(
algorithm=loss_scaling,
params=loss_scaling_params
)
if "loss_scale" in summaries:
tf.summary.scalar("loss_scale", loss_scaling.loss_scale)
if dtype == 'mixed':
opt = MixedPrecisionOptimizerWrapper(opt, loss_scale=loss_scaling)
# Compute gradients.
grads_and_vars = opt.compute_gradients(
loss, colocate_gradients_with_ops=True, var_list=var_list
)
if on_horovod:
if iter_size > 1:
grads_and_vars_accum = []
accum_ops = []
for grad, var in grads_and_vars:
# necessary to use tf.Variable directly to instantiate cudnn rnn cells
# which don't have explicit shape.
grad_accum = tf.Variable(
initial_value=tf.zeros_like(var),
name=grad.name.split(":")[0] + "_accum",
expected_shape=var.shape,
dtype=grad.dtype,
trainable=False,
validate_shape=bool(var.get_shape())
)
if isinstance(grad, tf.IndexedSlices):
add_grads = tf.scatter_nd_add(grad_accum, grad.indices,
grad.values / iter_size)
else:
add_grads = grad_accum + grad / iter_size
accum_ops.append(tf.assign(grad_accum, add_grads))
grads_and_vars_accum.append((grad_accum, var))
accum_op = tf.group(accum_ops)
def update_and_clear_op():
with tf.control_dependencies([accum_op]):
red_grad_updates = opt.apply_gradients(
post_process_gradients(
reduce_gradients(grads_and_vars_accum, on_horovod=True, model=model),
lr=lr,
clip_gradients=clip_gradients,
larc_params=larc_params,
summaries=summaries,
),
global_step=global_step,
)
with tf.control_dependencies([red_grad_updates]):
return tf.group([tf.assign(g, tf.zeros_like(g))
for g, v in grads_and_vars_accum])
grad_updates = tf.cond(
pred=skip_update_ph,
true_fn=lambda: accum_op,
false_fn=update_and_clear_op,
)
else:
grad_updates = opt.apply_gradients(
post_process_gradients(
reduce_gradients(grads_and_vars, on_horovod=True, model=model),
lr=lr,
clip_gradients=clip_gradients,
larc_params=larc_params,
summaries=summaries,
),
global_step=global_step,
)
else:
grad_updates = opt.apply_gradients(
post_process_gradients(
grads_and_vars,
lr=lr,
clip_gradients=clip_gradients,
larc_params=larc_params,
summaries=summaries,
),
global_step=global_step,
)
# Ensure the train_tensor computes grad_updates.
train_tensor = control_flow_ops.with_dependencies([grad_updates], loss)
return train_tensor
def post_process_gradients(grads_and_vars, summaries, lr,
clip_gradients, larc_params):
"""Applies post processing to gradients, i.e. clipping, LARC, summaries."""
if "global_gradient_norm" in summaries:
tf.summary.scalar(
"global_gradient_norm",
_global_norm_with_cast(grads_and_vars),
)
# Optionally clip gradients by global norm.
if clip_gradients is not None:
grads_and_vars = _clip_gradients_by_norm(grads_and_vars, clip_gradients)
# Add histograms for variables, gradients and gradient norms.
for gradient, variable in grads_and_vars:
if isinstance(gradient, tf.IndexedSlices):
grad_values = gradient.values
else:
grad_values = gradient
if isinstance(variable, tf.IndexedSlices):
var_values = variable.values
else:
var_values = variable
if grad_values is not None:
var_name = variable.name.replace(":", "_")
if "gradients" in summaries:
# need to mask nans for automatic loss scaling
tf.summary.histogram("gradients/%s" % var_name, mask_nans(grad_values))
if "gradient_norm" in summaries:
tf.summary.scalar("gradient_norm/%s" % var_name, tf.norm(grad_values))
if "variables" in summaries:
tf.summary.histogram("variables/%s" % var_name, var_values)
if "variable_norm" in summaries:
tf.summary.scalar("variable_norm/%s" % var_name, tf.norm(var_values))
if clip_gradients is not None and "global_gradient_norm" in summaries:
tf.summary.scalar(
"global_clipped_gradient_norm",
_global_norm_with_cast(grads_and_vars),
)
# LARC gradient re-scaling
if larc_params is not None:
check_params(
config=larc_params,
required_dict={'larc_eta': float},
optional_dict={
'larc_mode': ['clip', 'scale'],
'min_update': float,
'epsilon': float
},
)
larc_eta = larc_params['larc_eta']
larc_mode = larc_params.get('larc_mode', 'clip')
min_update = larc_params.get('min_update', 1e-7)
eps = larc_params.get('epsilon', 1e-7)
grads_and_vars_larc = [None] * len(grads_and_vars)
for idx, (g, v) in enumerate(grads_and_vars):
var_dtype = v.dtype
v_norm = tf.norm(tensor=tf.cast(v, tf.float32), ord=2)
g_norm = tf.norm(tensor=tf.cast(g, tf.float32), ord=2)
if larc_mode == 'clip':
larc_grad_update = tf.maximum(
larc_eta * v_norm / (lr * (g_norm + eps)),
min_update,
)
if "larc_summaries" in summaries:
tf.summary.scalar('larc_clip_on/{}'.format(v.name),
tf.cast(tf.less(larc_grad_update, 1.0), tf.int32))
larc_grad_update = tf.minimum(larc_grad_update, 1.0)
else:
larc_grad_update = tf.maximum(
larc_eta * v_norm / (g_norm + eps),
min_update,
)
larc_grad_update = tf.saturate_cast(larc_grad_update, var_dtype)
grads_and_vars_larc[idx] = (larc_grad_update * g, v)
# adding additional summary
if "larc_summaries" in summaries:
tf.summary.scalar('larc_grad_update/{}'.format(v.name),
larc_grad_update)
tf.summary.scalar("larc_final_lr/{}".format(v.name),
tf.cast(lr, var_dtype) * larc_grad_update)
grads_and_vars = grads_and_vars_larc
return grads_and_vars
def _global_norm_with_cast(grads_and_vars):
return tf.global_norm(list(map(
lambda x: tf.cast(x, tf.float32),
list(zip(*grads_and_vars))[0]
)))
def _clip_gradients_by_norm(grads_and_vars, clip_gradients):
"""Clips gradients by global norm."""
gradients, variables = zip(*grads_and_vars)
dtypes = [var.dtype for var in variables]
# Clip gradients in float32
clipped_gradients, _ = _clip_by_global_norm(
gradients,
clip_gradients,
use_norm=_global_norm_with_cast(grads_and_vars)
)
# Convert gradients back to the proper dtype
clipped_gradients = [
tf.cast(grad, dtype)
for grad, dtype in zip(clipped_gradients, dtypes)
]
return list(zip(clipped_gradients, variables))
def _clip_by_global_norm(t_list, clip_norm, use_norm, name=None):
"""Clips values of multiple tensors by the ratio of the sum of their norms.
Given a tuple or list of tensors `t_list`, and a clipping ratio `clip_norm`,
this operation returns a list of clipped tensors `list_clipped`
and the global norm (`global_norm`) of all tensors in `t_list`. The global
norm is expected to be pre-computed and passed as use_norm.
To perform the clipping, the values `t_list[i]` are set to:
t_list[i] * clip_norm / max(global_norm, clip_norm)
where:
global_norm = sqrt(sum([l2norm(t)**2 for t in t_list]))
If `clip_norm > global_norm` then the entries in `t_list` remain as they are,
otherwise they're all shrunk by the global ratio.
Any of the entries of `t_list` that are of type `None` are ignored.
This is the correct way to perform gradient clipping (for example, see
[Pascanu et al., 2012](http://arxiv.org/abs/1211.5063)
([pdf](http://arxiv.org/pdf/1211.5063.pdf))).
However, it is slower than `clip_by_norm()` because all the parameters must be
ready before the clipping operation can be performed.
Args:
t_list: A tuple or list of mixed `Tensors`, `IndexedSlices`, or None.
clip_norm: A 0-D (scalar) `Tensor` > 0. The clipping ratio.
use_norm: A 0-D (scalar) `Tensor` of type `float` (optional). The global
norm to use. If not provided, `global_norm()` is used to compute the norm.
name: A name for the operation (optional).
Returns:
list_clipped: A list of `Tensors` of the same type as `list_t`.
global_norm: A 0-D (scalar) `Tensor` representing the global norm.
Raises:
TypeError: If `t_list` is not a sequence.
"""
if (not isinstance(t_list, collections.Sequence)
or isinstance(t_list, six.string_types)):
raise TypeError("t_list should be a sequence")
t_list = list(t_list)
# Removed as use_norm should always be passed
# if use_norm is None:
# use_norm = global_norm(t_list, name)
with tf.name_scope(name, "clip_by_global_norm",
t_list + [clip_norm]) as name:
# Calculate L2-norm, clip elements by ratio of clip_norm to L2-norm
scale = clip_norm * tf.minimum(
1.0 / use_norm,
tf.ones([1], dtype=use_norm.dtype) / clip_norm)
values = [
tf.cast(
tf.convert_to_tensor(
t.values if isinstance(t, tf.IndexedSlices) else t,
name="t_%d" % i),
dtype=tf.float32
)
if t is not None else t
for i, t in enumerate(t_list)]
values_clipped = []
for i, v in enumerate(values):
if v is None:
values_clipped.append(None)
else:
with tf.colocate_with(v):
values_clipped.append(
tf.identity(v * scale, name="%s_%d" % (name, i)))
list_clipped = [
tf.IndexedSlices(c_v, t.indices, t.dense_shape)
if isinstance(t, tf.IndexedSlices)
else c_v
for (c_v, t) in zip(values_clipped, t_list)]
return list_clipped, use_norm
| OpenSeq2Seq-master | open_seq2seq/optimizers/optimizers.py |
# Copyright (c) 2019 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
from tensorflow.train import MomentumOptimizer
import tensorflow as tf
class NovoGrad(MomentumOptimizer):
"""
Optimizer that implements SGD with layer-wise normalized gradients,
when normalization is done by sqrt(ema(sqr(grads))), similar to Adam
```
Second moment = ema of Layer-wise sqr of grads:
v_t <-- beta2*v_{t-1} + (1-beta2)*(g_t)^2
First moment has two mode:
1. moment of grads normalized by u_t:
m_t <- beta1*m_{t-1} + lr_t * [ g_t/sqrt(v_t+epsilon)]
1. moment similar to Adam: ema of grads normalized by u_t:
m_t <- beta1*m_{t-1} + lr_t * [(1-beta1)*(g_t/sqrt(v_t+epsilon))]
if weight decay add wd term after grads are rescaled by 1/sqrt(v_t):
m_t <- beta1*m_{t-1} + lr_t * [g_t/sqrt(v_t+epsilon) + wd*w_{t-1}]
Weight update:
w_t <- w_{t-1} - *m_t
```
"""
def __init__(self,
learning_rate=1.0,
beta1=0.95,
beta2=0.98,
epsilon=1e-8,
weight_decay=0.0,
grad_averaging=False,
use_locking=False,
name='NovoGrad'):
"""Constructor:
Args:
learning_rate: A `Tensor` or a floating point value. The learning rate.
beta1: A `Tensor` or a float, used in ema for momentum.Default = 0.95.
beta2: A `Tensor` or a float, used in ema for grad norms.Default = 0.99.
epsilon: a float. Default = 1e-8.
weight_decay: A `Tensor` or a float, Default = 0.0.
grad_averaging: switch between Momentum and SAG, Default = False,
use_locking: If `True` use locks for update operations.
name: Optional, name prefix for the ops created when applying
gradients. Defaults to "NovoGrad".
use_nesterov: If `True` use Nesterov Momentum.
"""
super(NovoGrad, self).__init__(learning_rate, momentum=beta1,
use_locking=use_locking, name=name,
use_nesterov=False)
self._beta1 = beta1
self._beta2 = beta2
self._epsilon = epsilon
self._wd = weight_decay
self._grad_averaging = grad_averaging
self._grads_ema = None
# Tensor versions, converted to tensors in apply_gradients
# self._beta1_t = None
# self._beta2_t = None
# self._wd_t = None
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
# self._beta1_t = ops.convert_to_tensor(self._beta1, name='beta1', dtype = tf.float32)
# self._beta2_t = ops.convert_to_tensor(self._beta2, name='beta2', dtype = tf.float32)
# init ema variables if required
len_vars = len(grads_and_vars)
if self._grads_ema is None:
self._grads_ema = [None] * len_vars
for i in range(len_vars):
self._grads_ema[i] = tf.get_variable(name="nvgrad2_ema" + str(i),
shape=[], dtype=tf.float32,
initializer=tf.keras.initializers.Zeros(),
trainable=False)
# compute ema for grads^2 for each layer
for i, (grad, var) in enumerate(grads_and_vars):
g_2 = tf.reduce_sum(tf.square(x=tf.cast(grad, tf.float32)))
self._grads_ema[i] = tf.cond(tf.equal(self._grads_ema[i], 0.),
lambda: g_2,
lambda: self._grads_ema[i]*self._beta2 + g_2*(1.-self._beta2)
)
grad *= 1.0 / tf.sqrt(self._grads_ema[i] + self._epsilon)
# weight decay
if (self._wd > 0.):
grad += (self._wd * var)
# Momentum --> SAG
if self._grad_averaging:
grad *= (1.-self._beta1)
grads_and_vars[i] = (grad, var)
# call Momentum to do update
return super(NovoGrad, self).apply_gradients(
grads_and_vars, global_step=global_step, name=name)
| OpenSeq2Seq-master | open_seq2seq/optimizers/novograd.py |
# This code is heavily based on the code from MLPerf
# https://github.com/mlperf/reference/tree/master/translation/tensorflow
# /transformer
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import tensorflow as tf
from six.moves import range
from open_seq2seq.encoders import Encoder
from open_seq2seq.parts.transformer import attention_layer, ffn_layer, utils, \
embedding_layer
from open_seq2seq.parts.transformer.common import PrePostProcessingWrapper, \
LayerNormalization, Transformer_BatchNorm
class TransformerEncoder(Encoder):
"""Transformer model encoder"""
@staticmethod
def get_required_params():
"""Static method with description of required parameters.
Returns:
dict:
Dictionary containing all the parameters that **have to** be
included into the ``params`` parameter of the
class :meth:`__init__` method.
"""
return dict(Encoder.get_required_params(), **{
"encoder_layers": int,
"hidden_size": int,
"num_heads": int,
"attention_dropout": float,
"filter_size": int,
"src_vocab_size": int,
"relu_dropout": float,
"layer_postprocess_dropout": float,
"remove_padding": bool,
})
@staticmethod
def get_optional_params():
"""Static method with description of optional parameters.
Returns:
dict:
Dictionary containing all the parameters that **can** be
included into the ``params`` parameter of the
class :meth:`__init__` method.
"""
return dict(Encoder.get_optional_params(), **{
'regularizer': None, # any valid TensorFlow regularizer
'regularizer_params': dict,
'initializer': None, # any valid TensorFlow initializer
'initializer_params': dict,
'pad_embeddings_2_eight': bool,
'norm_params': dict,
})
def __init__(self, params, model, name="transformer_encoder", mode='train' ):
super(TransformerEncoder, self).__init__(
params, model, name=name, mode=mode,
)
self.layers = []
self.output_normalization = None
self._mode = mode
self.embedding_softmax_layer = None
self.norm_params = self.params.get("norm_params", {"type": "layernorm_L2"})
self.regularizer = self.params.get("regularizer", None)
if self.regularizer != None:
self.regularizer_params = params.get("regularizer_params", {'scale': 0.0})
self.regularizer=self.regularizer(self.regularizer_params['scale']) \
if self.regularizer_params['scale'] > 0.0 else None
def _call(self, encoder_inputs, attention_bias, inputs_padding):
for n, layer in enumerate(self.layers):
# Run inputs through the sublayers.
self_attention_layer = layer[0]
feed_forward_network = layer[1]
with tf.variable_scope("layer_%d" % n):
with tf.variable_scope("self_attention"):
encoder_inputs = self_attention_layer(encoder_inputs, attention_bias)
with tf.variable_scope("ffn"):
encoder_inputs = feed_forward_network(encoder_inputs, inputs_padding)
return self.output_normalization(encoder_inputs)
def _encode(self, input_dict):
training = (self.mode == "train")
if len(self.layers) == 0:
# prepare encoder graph
self.embedding_softmax_layer = embedding_layer.EmbeddingSharedWeights(
self.params["src_vocab_size"], self.params["hidden_size"],
pad_vocab_to_eight=self.params.get('pad_embeddings_2_eight', False),
)
for _ in range(self.params['encoder_layers']):
# Create sublayers for each layer.
self_attention_layer = attention_layer.SelfAttention(
hidden_size=self.params["hidden_size"],
num_heads=self.params["num_heads"],
attention_dropout=self.params["attention_dropout"],
train=training,
regularizer=self.regularizer
)
feed_forward_network = ffn_layer.FeedFowardNetwork(
hidden_size=self.params["hidden_size"],
filter_size=self.params["filter_size"],
relu_dropout=self.params["relu_dropout"],
train=training,
regularizer=self.regularizer
)
self.layers.append([
PrePostProcessingWrapper(self_attention_layer, self.params,
training),
PrePostProcessingWrapper(feed_forward_network, self.params,
training)
])
# final normalization layer.
print("Encoder:", self.norm_params["type"], self.mode)
if self.norm_params["type"] =="batch_norm":
self.output_normalization = Transformer_BatchNorm(
training=training,
params=self.norm_params)
else:
self.output_normalization = LayerNormalization(
hidden_size=self.params["hidden_size"], params=self.norm_params)
# actual encoder part
with tf.name_scope("encode"):
inputs = input_dict['source_tensors'][0]
# Prepare inputs to the layer stack by adding positional encodings and
# applying dropout.
embedded_inputs = self.embedding_softmax_layer(inputs)
if self.params["remove_padding"]:
inputs_padding = utils.get_padding(inputs)
#inputs_padding = utils.get_padding(inputs,dtype=self._params["dtype"])
else:
inputs_padding = None
inputs_attention_bias = utils.get_padding_bias(inputs)
# inputs_attention_bias = utils.get_padding_bias(inputs, dtype=self._params["dtype"])
with tf.name_scope("add_pos_encoding"):
length = tf.shape(embedded_inputs)[1]
pos_encoding = utils.get_position_encoding(
length, self.params["hidden_size"],
)
encoder_inputs = embedded_inputs + tf.cast(x=pos_encoding,
dtype=embedded_inputs.dtype)
if self.mode == "train":
encoder_inputs = tf.nn.dropout(encoder_inputs,
keep_prob = 1.0 - self.params["layer_postprocess_dropout"],
)
encoded = self._call(encoder_inputs, inputs_attention_bias,
inputs_padding)
return {'outputs': encoded,
'inputs_attention_bias': inputs_attention_bias,
'state': None,
'src_lengths': input_dict['source_tensors'][1],
'embedding_softmax_layer': self.embedding_softmax_layer,
'encoder_input': inputs}
| OpenSeq2Seq-master | open_seq2seq/encoders/transformer_encoder.py |
# Copyright (c) 2018 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import tensorflow as tf
from .encoder import Encoder
from open_seq2seq.data.speech2text.speech2text import Speech2TextDataLayer
from open_seq2seq.parts.cnns.conv_blocks import conv_actv, conv_bn_actv,\
conv_ln_actv, conv_in_actv,\
conv_bn_res_bn_actv
class TDNNEncoder(Encoder):
"""General time delay neural network (TDNN) encoder. Fully convolutional model
"""
@staticmethod
def get_required_params():
return dict(Encoder.get_required_params(), **{
'dropout_keep_prob': float,
'convnet_layers': list,
'activation_fn': None, # any valid callable
})
@staticmethod
def get_optional_params():
return dict(Encoder.get_optional_params(), **{
'data_format': ['channels_first', 'channels_last'],
'normalization': [None, 'batch_norm', 'layer_norm', 'instance_norm'],
'bn_momentum': float,
'bn_epsilon': float,
'use_conv_mask': bool,
'drop_block_prob': float,
'drop_block_index': int,
})
def __init__(self, params, model, name="w2l_encoder", mode='train'):
"""TDNN encoder constructor.
See parent class for arguments description.
Config parameters:
* **dropout_keep_prob** (float) --- keep probability for dropout.
* **convnet_layers** (list) --- list with the description of convolutional
layers. For example::
"convnet_layers": [
{
"type": "conv1d", "repeat" : 5,
"kernel_size": [7], "stride": [1],
"num_channels": 250, "padding": "SAME"
},
{
"type": "conv1d", "repeat" : 3,
"kernel_size": [11], "stride": [1],
"num_channels": 500, "padding": "SAME"
},
{
"type": "conv1d", "repeat" : 1,
"kernel_size": [32], "stride": [1],
"num_channels": 1000, "padding": "SAME"
},
{
"type": "conv1d", "repeat" : 1,
"kernel_size": [1], "stride": [1],
"num_channels": 1000, "padding": "SAME"
},
]
* **activation_fn** --- activation function to use.
* **data_format** (string) --- could be either "channels_first" or
"channels_last". Defaults to "channels_last".
* **normalization** --- normalization to use. Accepts [None, 'batch_norm'].
Use None if you don't want to use normalization. Defaults to 'batch_norm'.
* **bn_momentum** (float) --- momentum for batch norm. Defaults to 0.90.
* **bn_epsilon** (float) --- epsilon for batch norm. Defaults to 1e-3.
* **drop_block_prob** (float) --- probability of dropping encoder blocks.
Defaults to 0.0 which corresponds to training without dropping blocks.
* **drop_block_index** (int) -- index of the block to drop on inference.
Defaults to -1 which corresponds to keeping all blocks.
* **use_conv_mask** (bool) --- whether to apply a sequence mask prior to
convolution operations. Defaults to False for backwards compatibility.
Recommended to set as True
"""
super(TDNNEncoder, self).__init__(params, model, name, mode)
def _encode(self, input_dict):
"""Creates TensorFlow graph for Wav2Letter like encoder.
Args:
input_dict (dict): input dictionary that has to contain
the following fields::
input_dict = {
"source_tensors": [
src_sequence (shape=[batch_size, sequence length, num features]),
src_length (shape=[batch_size])
]
}
Returns:
dict: dictionary with the following tensors::
{
'outputs': hidden state, shape=[batch_size, sequence length, n_hidden]
'src_length': tensor, shape=[batch_size]
}
"""
source_sequence, src_length = input_dict['source_tensors']
num_pad = tf.constant(0)
if isinstance(self._model.get_data_layer(), Speech2TextDataLayer):
pad_to = 0
if self._model.get_data_layer().params.get('backend', 'psf') == 'librosa':
pad_to = self._model.get_data_layer().params.get("pad_to", 8)
if pad_to > 0:
num_pad = tf.mod(pad_to - tf.mod(tf.reduce_max(src_length), pad_to), pad_to)
else:
print("WARNING: TDNNEncoder is currently meant to be used with the",
"Speech2Text data layer. Assuming that this data layer does not",
"do additional padding past padded_batch.")
max_len = tf.reduce_max(src_length) + num_pad
training = (self._mode == "train")
dropout_keep_prob = self.params['dropout_keep_prob'] if training else 1.0
regularizer = self.params.get('regularizer', None)
data_format = self.params.get('data_format', 'channels_last')
normalization = self.params.get('normalization', 'batch_norm')
drop_block_prob = self.params.get('drop_block_prob', 0.0)
drop_block_index = self.params.get('drop_block_index', -1)
normalization_params = {}
if self.params.get("use_conv_mask", False):
mask = tf.sequence_mask(
lengths=src_length, maxlen=max_len,
dtype=source_sequence.dtype
)
mask = tf.expand_dims(mask, 2)
if normalization is None:
conv_block = conv_actv
elif normalization == "batch_norm":
conv_block = conv_bn_actv
normalization_params['bn_momentum'] = self.params.get(
'bn_momentum', 0.90)
normalization_params['bn_epsilon'] = self.params.get('bn_epsilon', 1e-3)
elif normalization == "layer_norm":
conv_block = conv_ln_actv
elif normalization == "instance_norm":
conv_block = conv_in_actv
else:
raise ValueError("Incorrect normalization")
conv_inputs = source_sequence
if data_format == 'channels_last':
conv_feats = conv_inputs # B T F
else:
conv_feats = tf.transpose(conv_inputs, [0, 2, 1]) # B F T
residual_aggregation = []
# ----- Convolutional layers ---------------------------------------------
convnet_layers = self.params['convnet_layers']
for idx_convnet in range(len(convnet_layers)):
layer_type = convnet_layers[idx_convnet]['type']
layer_repeat = convnet_layers[idx_convnet]['repeat']
ch_out = convnet_layers[idx_convnet]['num_channels']
kernel_size = convnet_layers[idx_convnet]['kernel_size']
strides = convnet_layers[idx_convnet]['stride']
padding = convnet_layers[idx_convnet]['padding']
dilation = convnet_layers[idx_convnet]['dilation']
dropout_keep = convnet_layers[idx_convnet].get(
'dropout_keep_prob', dropout_keep_prob) if training else 1.0
residual = convnet_layers[idx_convnet].get('residual', False)
residual_dense = convnet_layers[idx_convnet].get('residual_dense', False)
# For the first layer in the block, apply a mask
if self.params.get("use_conv_mask", False):
conv_feats = conv_feats * mask
if residual:
layer_res = conv_feats
if residual_dense:
residual_aggregation.append(layer_res)
layer_res = residual_aggregation
for idx_layer in range(layer_repeat):
if padding == "VALID":
src_length = (src_length - kernel_size[0]) // strides[0] + 1
max_len = (max_len - kernel_size[0]) // strides[0] + 1
else:
src_length = (src_length + strides[0] - 1) // strides[0]
max_len = (max_len + strides[0] - 1) // strides[0]
# For all layers other than first layer, apply mask
if idx_layer > 0 and self.params.get("use_conv_mask", False):
conv_feats = conv_feats * mask
# Since we have a stride 2 layer, we need to update mask for future operations
if (self.params.get("use_conv_mask", False) and
(padding == "VALID" or strides[0] > 1)):
mask = tf.sequence_mask(
lengths=src_length,
maxlen=max_len,
dtype=conv_feats.dtype
)
mask = tf.expand_dims(mask, 2)
if residual and idx_layer == layer_repeat - 1:
conv_feats = conv_bn_res_bn_actv(
layer_type=layer_type,
name="conv{}{}".format(
idx_convnet + 1, idx_layer + 1),
inputs=conv_feats,
res_inputs=layer_res,
filters=ch_out,
kernel_size=kernel_size,
activation_fn=self.params['activation_fn'],
strides=strides,
padding=padding,
dilation=dilation,
regularizer=regularizer,
training=training,
data_format=data_format,
drop_block_prob=drop_block_prob,
drop_block=(drop_block_index == idx_convnet),
**normalization_params
)
else:
conv_feats = conv_block(
layer_type=layer_type,
name="conv{}{}".format(
idx_convnet + 1, idx_layer + 1),
inputs=conv_feats,
filters=ch_out,
kernel_size=kernel_size,
activation_fn=self.params['activation_fn'],
strides=strides,
padding=padding,
dilation=dilation,
regularizer=regularizer,
training=training,
data_format=data_format,
**normalization_params
)
conv_feats = tf.nn.dropout(x=conv_feats, keep_prob=dropout_keep)
outputs = conv_feats
if data_format == 'channels_first':
outputs = tf.transpose(outputs, [0, 2, 1])
return {
'outputs': outputs,
'src_length': src_length,
}
| OpenSeq2Seq-master | open_seq2seq/encoders/tdnn_encoder.py |
# Copyright (c) 2018 NVIDIA Corporation
"""
Conv-based encoder
"""
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import tensorflow as tf
import math
from .encoder import Encoder
from open_seq2seq.parts.transformer import embedding_layer
from open_seq2seq.parts.transformer.utils import get_padding_bias, get_padding
from open_seq2seq.parts.convs2s import ffn_wn_layer, conv_wn_layer
from open_seq2seq.parts.convs2s.utils import gated_linear_units
# Default value used if max_input_length is not given
MAX_INPUT_LENGTH = 128
class ConvS2SEncoder(Encoder):
"""
Fully convolutional Encoder of ConvS2S
"""
@staticmethod
def get_required_params():
return dict(
Encoder.get_required_params(), **{
"src_emb_size": int,
"src_vocab_size": int,
"pad_embeddings_2_eight": bool,
"conv_nchannels_kwidth": list,
"embedding_dropout_keep_prob": float,
"hidden_dropout_keep_prob": float,
})
@staticmethod
def get_optional_params():
return dict(
Encoder.get_optional_params(), **{
"att_layer_num": int,
'max_input_length': int,
'PAD_SYMBOL': int,
'conv_activation': None,
'normalization_type': str,
'scaling_factor': float,
'init_var': None,
})
def __init__(self,
params,
model,
name="convs2s_encoder_with_emb",
mode='train'):
super(ConvS2SEncoder, self).__init__(params, model, name=name, mode=mode)
self._src_vocab_size = self.params['src_vocab_size']
self._src_emb_size = self.params['src_emb_size']
self.layers = []
self._mode = mode
self._pad_sym = self.params.get('PAD_SYMBOL', 0)
self._pad2eight = params.get('pad_embeddings_2_eight', False)
self.scaling_factor = self.params.get("scaling_factor", math.sqrt(0.5))
self.normalization_type = self.params.get("normalization_type", "weight_norm")
self.conv_activation = self.params.get("conv_activation", gated_linear_units)
self.regularizer = self.params.get('regularizer', None)
self.init_var = self.params.get('init_var', None)
def _encode(self, input_dict):
inputs = input_dict['source_tensors'][0]
source_length = input_dict['source_tensors'][1]
with tf.variable_scope("encode"):
# prepare encoder graph
if len(self.layers) == 0:
knum_list = list(zip(*self.params.get("conv_nchannels_kwidth")))[0]
kwidth_list = list(zip(*self.params.get("conv_nchannels_kwidth")))[1]
with tf.variable_scope("embedding"):
self.embedding_softmax_layer = embedding_layer.EmbeddingSharedWeights(
vocab_size=self._src_vocab_size,
hidden_size=self._src_emb_size,
pad_vocab_to_eight=self._pad2eight,
init_var=0.1,
embed_scale=False,
pad_sym=self._pad_sym,
mask_paddings=True)
with tf.variable_scope("pos_embedding"):
self.position_embedding_layer = embedding_layer.EmbeddingSharedWeights(
vocab_size=self.params.get("max_input_length", MAX_INPUT_LENGTH),
hidden_size=self._src_emb_size,
pad_vocab_to_eight=self._pad2eight,
init_var=0.1,
embed_scale=False,
pad_sym=self._pad_sym,
mask_paddings=True)
# linear projection before cnn layers
self.layers.append(
ffn_wn_layer.FeedFowardNetworkNormalized(
self._src_emb_size,
knum_list[0],
dropout=self.params["embedding_dropout_keep_prob"],
var_scope_name="linear_mapping_before_cnn_layers",
mode=self.mode,
normalization_type=self.normalization_type,
regularizer=self.regularizer,
init_var=self.init_var))
for i in range(len(knum_list)):
in_dim = knum_list[i] if i == 0 else knum_list[i - 1]
out_dim = knum_list[i]
# linear projection is needed for residual connections if
# input and output of a cnn layer do not match
if in_dim != out_dim:
linear_proj = ffn_wn_layer.FeedFowardNetworkNormalized(
in_dim,
out_dim,
var_scope_name="linear_mapping_cnn_" + str(i + 1),
dropout=1.0,
mode=self.mode,
normalization_type=self.normalization_type,
regularizer=self.regularizer,
init_var=self.init_var)
else:
linear_proj = None
conv_layer = conv_wn_layer.Conv1DNetworkNormalized(
in_dim,
out_dim,
kernel_width=kwidth_list[i],
mode=self.mode,
layer_id=i + 1,
hidden_dropout=self.params["hidden_dropout_keep_prob"],
conv_padding="SAME",
decode_padding=False,
activation=self.conv_activation,
normalization_type=self.normalization_type,
regularizer=self.regularizer,
init_var=self.init_var)
self.layers.append([linear_proj, conv_layer])
# linear projection after cnn layers
self.layers.append(
ffn_wn_layer.FeedFowardNetworkNormalized(
knum_list[-1],
self._src_emb_size,
dropout=1.0,
var_scope_name="linear_mapping_after_cnn_layers",
mode=self.mode,
normalization_type=self.normalization_type,
regularizer=self.regularizer,
init_var=self.init_var))
encoder_inputs = self.embedding_softmax_layer(inputs)
inputs_attention_bias = get_padding_bias(
inputs, res_rank=3, pad_sym=self._pad_sym)
with tf.name_scope("add_pos_encoding"):
pos_input = tf.range(
0,
tf.shape(encoder_inputs)[1],
delta=1,
dtype=tf.int32,
name='range')
pos_encoding = self.position_embedding_layer(pos_input)
encoder_inputs = encoder_inputs + tf.cast(
x=pos_encoding, dtype=encoder_inputs.dtype)
if self.mode == "train":
encoder_inputs = tf.nn.dropout(
encoder_inputs, self.params["embedding_dropout_keep_prob"])
# mask the paddings in the input given to cnn layers
inputs_padding = get_padding(
inputs, self._pad_sym, dtype=encoder_inputs.dtype)
padding_mask = tf.expand_dims(1 - inputs_padding, 2)
encoder_inputs *= padding_mask
outputs, outputs_b, final_state = self._call(encoder_inputs, padding_mask)
return {
'outputs': outputs,
'outputs_b': outputs_b,
'inputs_attention_bias_cs2s': inputs_attention_bias,
'state': final_state,
'src_lengths': source_length, # should it include paddings or not?
'embedding_softmax_layer': self.embedding_softmax_layer,
'encoder_input': inputs
}
def _call(self, encoder_inputs, padding_mask):
# Run inputs through the sublayers.
with tf.variable_scope("linear_layer_before_cnn_layers"):
outputs = self.layers[0](encoder_inputs)
for i in range(1, len(self.layers) - 1):
linear_proj, conv_layer = self.layers[i]
with tf.variable_scope("layer_%d" % i):
if linear_proj is not None:
res_inputs = linear_proj(outputs)
else:
res_inputs = outputs
if padding_mask is not None:
outputs *= padding_mask
outputs = conv_layer(outputs)
outputs = (outputs + res_inputs) * self.scaling_factor
with tf.variable_scope("linear_layer_after_cnn_layers"):
outputs = self.layers[-1](outputs)
if padding_mask is not None:
outputs *= padding_mask
# Gradients are scaled as the gradients from
# all decoder attention layers enters the encoder
scale = 1.0 / (
2.0 * self.params.get("att_layer_num", 1))
outputs = (1.0 - scale) * tf.stop_gradient(outputs) + scale * outputs
outputs_b = (outputs + encoder_inputs) * self.scaling_factor
if padding_mask is not None:
outputs_b *= padding_mask
# Average of the encoder outputs is calculated as the final state of the encoder
# it can be used for decoders which just accept the final state
final_state = tf.reduce_mean(outputs_b, 1)
return outputs, outputs_b, final_state
@property
def src_vocab_size(self):
return self._src_vocab_size
@property
def src_emb_size(self):
return self._src_emb_size
| OpenSeq2Seq-master | open_seq2seq/encoders/convs2s_encoder.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for Residual Networks.
Residual networks ('v1' ResNets) were originally proposed in:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
The full preactivation 'v2' ResNet variant was introduced by:
[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027
The key difference of the full preactivation 'v2' variant compared to the
'v1' variant in [1] is the use of batch normalization before every weight layer
rather than after.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
import tensorflow as tf
################################################################################
# Convenience functions for building the ResNet model.
################################################################################
def batch_norm(inputs, training, data_format, regularizer, momentum, epsilon):
"""Performs a batch normalization using a standard set of parameters."""
# We set fused=True for a significant performance boost. See
# https://www.tensorflow.org/performance/performance_guide#common_fused_ops
return tf.layers.batch_normalization(
inputs=inputs, axis=1 if data_format == 'channels_first' else 3,
momentum=momentum, epsilon=epsilon, center=True,
scale=True, training=training, fused=True, gamma_regularizer=regularizer)
def fixed_padding(inputs, kernel_size, data_format):
"""Pads the input along the spatial dimensions independently of input size.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
Should be a positive integer.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
A tensor with the same format as the input with the data either intact
(if kernel_size == 1) or padded (if kernel_size > 1).
"""
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
if data_format == 'channels_first':
padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],
[pad_beg, pad_end], [pad_beg, pad_end]])
else:
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],
[pad_beg, pad_end], [0, 0]])
return padded_inputs
def conv2d_fixed_padding(inputs, filters, kernel_size, strides,
data_format, regularizer):
"""Strided 2-D convolution with explicit padding."""
# The padding is consistent and is based only on `kernel_size`, not on the
# dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).
if strides > 1:
inputs = fixed_padding(inputs, kernel_size, data_format)
return tf.layers.conv2d(
inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides,
padding=('SAME' if strides == 1 else 'VALID'), use_bias=False,
data_format=data_format, kernel_regularizer=regularizer)
################################################################################
# ResNet block definitions.
################################################################################
def building_block_v1(inputs, filters, training, projection_shortcut, strides,
data_format, regularizer, bn_regularizer,
bn_momentum, bn_epsilon):
"""A single block for ResNet v1, without a bottleneck.
Convolution then batch normalization then ReLU as described by:
Deep Residual Learning for Image Recognition
https://arxiv.org/pdf/1512.03385.pdf
by Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun, Dec 2015.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
filters: The number of filters for the convolutions.
training: A Boolean for whether the model is in training or inference
mode. Needed for batch normalization.
projection_shortcut: The function to use for projection shortcuts
(typically a 1x1 convolution when downsampling the input).
strides: The block's stride. If greater than 1, this block will ultimately
downsample the input.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
The output tensor of the block; shape should match inputs.
"""
shortcut = inputs
if projection_shortcut is not None:
shortcut = projection_shortcut(inputs)
shortcut = batch_norm(inputs=shortcut, training=training,
data_format=data_format, regularizer=bn_regularizer)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=3, strides=strides,
data_format=data_format, regularizer=regularizer)
inputs = batch_norm(inputs, training, data_format, regularizer=bn_regularizer,
momentum=bn_momentum, epsilon=bn_epsilon)
inputs = tf.nn.relu(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=3, strides=1,
data_format=data_format, regularizer=regularizer)
inputs = batch_norm(inputs, training, data_format, regularizer=bn_regularizer,
momentum=bn_momentum, epsilon=bn_epsilon)
inputs += shortcut
inputs = tf.nn.relu(inputs)
return inputs
def building_block_v2(inputs, filters, training, projection_shortcut, strides,
data_format, regularizer, bn_regularizer,
bn_momentum, bn_epsilon):
"""A single block for ResNet v2, without a bottleneck.
Batch normalization then ReLu then convolution as described by:
Identity Mappings in Deep Residual Networks
https://arxiv.org/pdf/1603.05027.pdf
by Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun, Jul 2016.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
filters: The number of filters for the convolutions.
training: A Boolean for whether the model is in training or inference
mode. Needed for batch normalization.
projection_shortcut: The function to use for projection shortcuts
(typically a 1x1 convolution when downsampling the input).
strides: The block's stride. If greater than 1, this block will ultimately
downsample the input.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
The output tensor of the block; shape should match inputs.
"""
shortcut = inputs
inputs = batch_norm(inputs, training, data_format, regularizer=bn_regularizer,
momentum=bn_momentum, epsilon=bn_epsilon)
inputs = tf.nn.relu(inputs)
# The projection shortcut should come after the first batch norm and ReLU
# since it performs a 1x1 convolution.
if projection_shortcut is not None:
shortcut = projection_shortcut(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=3, strides=strides,
data_format=data_format, regularizer=regularizer)
inputs = batch_norm(inputs, training, data_format, regularizer=bn_regularizer,
momentum=bn_momentum, epsilon=bn_epsilon)
inputs = tf.nn.relu(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=3, strides=1,
data_format=data_format, regularizer=regularizer)
return inputs + shortcut
def bottleneck_block_v1(inputs, filters, training, projection_shortcut,
strides, data_format, regularizer, bn_regularizer,
bn_momentum, bn_epsilon):
"""A single block for ResNet v1, with a bottleneck.
Similar to _building_block_v1(), except using the "bottleneck" blocks
described in:
Convolution then batch normalization then ReLU as described by:
Deep Residual Learning for Image Recognition
https://arxiv.org/pdf/1512.03385.pdf
by Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun, Dec 2015.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
filters: The number of filters for the convolutions.
training: A Boolean for whether the model is in training or inference
mode. Needed for batch normalization.
projection_shortcut: The function to use for projection shortcuts
(typically a 1x1 convolution when downsampling the input).
strides: The block's stride. If greater than 1, this block will ultimately
downsample the input.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
The output tensor of the block; shape should match inputs.
"""
shortcut = inputs
if projection_shortcut is not None:
shortcut = projection_shortcut(inputs)
shortcut = batch_norm(inputs=shortcut, training=training,
data_format=data_format, regularizer=bn_regularizer,
momentum=bn_momentum, epsilon=bn_epsilon)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=1, strides=1,
data_format=data_format, regularizer=regularizer)
inputs = batch_norm(inputs, training, data_format, regularizer=bn_regularizer,
momentum=bn_momentum, epsilon=bn_epsilon)
inputs = tf.nn.relu(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=3, strides=strides,
data_format=data_format, regularizer=regularizer)
inputs = batch_norm(inputs, training, data_format, regularizer=bn_regularizer,
momentum=bn_momentum, epsilon=bn_epsilon)
inputs = tf.nn.relu(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=4 * filters, kernel_size=1, strides=1,
data_format=data_format, regularizer=regularizer)
inputs = batch_norm(inputs, training, data_format, regularizer=bn_regularizer,
momentum=bn_momentum, epsilon=bn_epsilon)
inputs += shortcut
inputs = tf.nn.relu(inputs)
return inputs
def bottleneck_block_v2(inputs, filters, training, projection_shortcut,
strides, data_format, regularizer, bn_regularizer,
bn_momentum, bn_epsilon):
"""A single block for ResNet v2, without a bottleneck.
Similar to _building_block_v2(), except using the "bottleneck" blocks
described in:
Convolution then batch normalization then ReLU as described by:
Deep Residual Learning for Image Recognition
https://arxiv.org/pdf/1512.03385.pdf
by Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun, Dec 2015.
Adapted to the ordering conventions of:
Batch normalization then ReLu then convolution as described by:
Identity Mappings in Deep Residual Networks
https://arxiv.org/pdf/1603.05027.pdf
by Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun, Jul 2016.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
filters: The number of filters for the convolutions.
training: A Boolean for whether the model is in training or inference
mode. Needed for batch normalization.
projection_shortcut: The function to use for projection shortcuts
(typically a 1x1 convolution when downsampling the input).
strides: The block's stride. If greater than 1, this block will ultimately
downsample the input.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
The output tensor of the block; shape should match inputs.
"""
shortcut = inputs
inputs = batch_norm(inputs, training, data_format, regularizer=bn_regularizer,
momentum=bn_momentum, epsilon=bn_epsilon)
inputs = tf.nn.relu(inputs)
# The projection shortcut should come after the first batch norm and ReLU
# since it performs a 1x1 convolution.
if projection_shortcut is not None:
shortcut = projection_shortcut(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=1, strides=1,
data_format=data_format, regularizer=regularizer)
inputs = batch_norm(inputs, training, data_format, regularizer=bn_regularizer,
momentum=bn_momentum, epsilon=bn_epsilon)
inputs = tf.nn.relu(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=3, strides=strides,
data_format=data_format, regularizer=regularizer)
inputs = batch_norm(inputs, training, data_format, regularizer=bn_regularizer,
momentum=bn_momentum, epsilon=bn_epsilon)
inputs = tf.nn.relu(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=4 * filters, kernel_size=1, strides=1,
data_format=data_format, regularizer=regularizer)
return inputs + shortcut
def block_layer(inputs, filters, bottleneck, block_fn, blocks, strides,
training, name, data_format, regularizer, bn_regularizer,
bn_momentum, bn_epsilon):
"""Creates one layer of blocks for the ResNet model.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
filters: The number of filters for the first convolution of the layer.
bottleneck: Is the block created a bottleneck block.
block_fn: The block to use within the model, either `building_block` or
`bottleneck_block`.
blocks: The number of blocks contained in the layer.
strides: The stride to use for the first convolution of the layer. If
greater than 1, this layer will ultimately downsample the input.
training: Either True or False, whether we are currently training the
model. Needed for batch norm.
name: A string name for the tensor output of the block layer.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
The output tensor of the block layer.
"""
# Bottleneck blocks end with 4x the number of filters as they start with
filters_out = filters * 4 if bottleneck else filters
def projection_shortcut(inputs):
return conv2d_fixed_padding(
inputs=inputs, filters=filters_out, kernel_size=1, strides=strides,
data_format=data_format, regularizer=regularizer)
# Only the first block per block_layer uses projection_shortcut and strides
inputs = block_fn(inputs, filters, training, projection_shortcut, strides,
data_format, regularizer=regularizer,
bn_regularizer=bn_regularizer,
bn_momentum=bn_momentum, bn_epsilon=bn_epsilon)
for _ in range(1, blocks):
inputs = block_fn(inputs, filters, training, None, 1, data_format,
regularizer=regularizer, bn_regularizer=bn_regularizer,
bn_momentum=bn_momentum, bn_epsilon=bn_epsilon)
return tf.identity(inputs, name)
| OpenSeq2Seq-master | open_seq2seq/encoders/resnet_blocks.py |
# Copyright (c) 2018 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import tensorflow as tf
from .resnet_blocks import conv2d_fixed_padding, batch_norm, block_layer, \
bottleneck_block_v1, bottleneck_block_v2, \
building_block_v1, building_block_v2
from .encoder import Encoder
class ResNetEncoder(Encoder):
@staticmethod
def get_optional_params():
return dict(Encoder.get_optional_params(), **{
'resnet_size': int,
'block_sizes': list,
'block_strides': list,
'version': [1, 2],
'bottleneck': bool,
'final_size': int,
'first_num_filters': int,
'first_kernel_size': int,
'first_conv_stride': int,
'first_pool_size': int,
'first_pool_stride': int,
'data_format': ['channels_first', 'channels_last'],
'regularize_bn': bool,
'bn_momentum': float,
'bn_epsilon': float,
})
def __init__(self, params, model, name="resnet_encoder", mode='train'):
super(ResNetEncoder, self).__init__(params, model, name, mode)
def _encode(self, input_dict):
inputs = input_dict['source_tensors'][0]
if 'resnet_size' not in self.params and 'block_sizes' not in self.params:
raise ValueError('Either "resnet_size" or "block_sizes" '
'have to be specified in the config')
if 'resnet_size' in self.params and 'block_sizes' in self.params:
raise ValueError('"resnet_size" and "block_sizes" cannot '
'be specified together')
if 'resnet_size' in self.params:
if self.params['resnet_size'] < 50:
bottleneck = self.params.get('bottleneck', False)
final_size = self.params.get('final_size', 512)
else:
bottleneck = self.params.get('bottleneck', True)
final_size = self.params.get('final_size', 2048)
block_sizes_dict = {
18: [2, 2, 2, 2],
34: [3, 4, 6, 3],
50: [3, 4, 6, 3],
101: [3, 4, 23, 3],
152: [3, 8, 36, 3],
200: [3, 24, 36, 3],
}
block_sizes = block_sizes_dict[self.params['resnet_size']]
else:
if 'bottleneck' not in self.params:
raise ValueError('If "resnet_size" not specified you have to provide '
'"bottleneck" parameter')
if 'final_size' not in self.params:
raise ValueError('If "resnet_size" not specified you have to provide '
'"final_size" parameter')
bottleneck = self.params['bottleneck']
final_size = self.params['final_size']
block_sizes = self.params['block_sizes']
num_filters = self.params.get('first_num_filters', 64)
kernel_size = self.params.get('first_kernel_size', 7)
conv_stride = self.params.get('first_conv_stride', 2)
first_pool_size = self.params.get('first_pool_size', 3)
first_pool_stride = self.params.get('first_pool_stride', 2)
block_strides = self.params.get('block_strides', [1, 2, 2, 2])
version = self.params.get('version', 2)
data_format = self.params.get('data_format', 'channels_first')
bn_momentum = self.params.get('bn_momentum', 0.997)
bn_epsilon = self.params.get('bn_epsilon', 1e-5)
if bottleneck:
if version == 1:
block_fn = bottleneck_block_v1
else:
block_fn = bottleneck_block_v2
else:
if version == 1:
block_fn = building_block_v1
else:
block_fn = building_block_v2
training = self.mode == 'train'
regularizer = self.params.get('regularizer', None)
regularize_bn = self.params.get('regularize_bn', True)
bn_regularizer = regularizer if regularize_bn else None
if data_format == 'channels_first':
inputs = tf.transpose(inputs, [0, 3, 1, 2])
inputs = conv2d_fixed_padding(
inputs=inputs, filters=num_filters, kernel_size=kernel_size,
strides=conv_stride, data_format=data_format, regularizer=regularizer,
)
inputs = tf.identity(inputs, 'initial_conv')
if version == 1:
inputs = batch_norm(inputs, training, data_format,
regularizer=bn_regularizer,
momentum=bn_momentum, epsilon=bn_epsilon)
inputs = tf.nn.relu(inputs)
if first_pool_size:
inputs = tf.layers.max_pooling2d(
inputs=inputs, pool_size=first_pool_size,
strides=first_pool_stride, padding='SAME',
data_format=data_format,
)
inputs = tf.identity(inputs, 'initial_max_pool')
for i, num_blocks in enumerate(block_sizes):
cur_num_filters = num_filters * (2**i)
inputs = block_layer(
inputs=inputs, filters=cur_num_filters, bottleneck=bottleneck,
block_fn=block_fn, blocks=num_blocks,
strides=block_strides[i], training=training,
name='block_layer{}'.format(i + 1), data_format=data_format,
regularizer=regularizer, bn_regularizer=bn_regularizer,
bn_momentum=bn_momentum, bn_epsilon=bn_epsilon,
)
if version == 2:
inputs = batch_norm(inputs, training, data_format,
regularizer=bn_regularizer,
momentum=bn_momentum, epsilon=bn_epsilon)
inputs = tf.nn.relu(inputs)
# The current top layer has shape
# `batch_size x pool_size x pool_size x final_size`.
# ResNet does an Average Pooling layer over pool_size,
# but that is the same as doing a reduce_mean. We do a reduce_mean
# here because it performs better than AveragePooling2D.
axes = [2, 3] if data_format == 'channels_first' else [1, 2]
inputs = tf.reduce_mean(inputs, axes, keepdims=True)
inputs = tf.identity(inputs, 'final_reduce_mean')
outputs = tf.reshape(inputs, [-1, final_size])
return {'outputs': outputs}
| OpenSeq2Seq-master | open_seq2seq/encoders/resnet_encoder.py |
# Copyright (c) 2018 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
from six.moves import range
import inspect
import tensorflow as tf
from tensorflow.contrib.cudnn_rnn.python.ops import cudnn_rnn_ops
from tensorflow.python.framework import ops
from open_seq2seq.parts.cnns.conv_blocks import conv_bn_actv
from open_seq2seq.parts.rnns.utils import single_cell
from open_seq2seq.parts.transformer import attention_layer
from .encoder import Encoder
class Tacotron2Encoder(Encoder):
"""Tacotron-2 like encoder.
Consists of an embedding layer followed by a convolutional layer followed by
a recurrent layer.
"""
@staticmethod
def get_required_params():
return dict(
Encoder.get_required_params(),
**{
'cnn_dropout_prob': float,
'rnn_dropout_prob': float,
'src_emb_size': int,
'conv_layers': list,
'activation_fn': None, # any valid callable
'num_rnn_layers': int,
'rnn_cell_dim': int,
'use_cudnn_rnn': bool,
'rnn_type': None,
'rnn_unidirectional': bool,
}
)
@staticmethod
def get_optional_params():
return dict(
Encoder.get_optional_params(), **{
'data_format': ['channels_first', 'channels_last'],
'bn_momentum': float,
'bn_epsilon': float,
'zoneout_prob': float,
'style_embedding_enable': bool,
'style_embedding_params': dict,
}
)
def __init__(self, params, model, name="tacotron2_encoder", mode='train'):
"""Tacotron-2 like encoder constructor.
See parent class for arguments description.
Config parameters:
* **cnn_dropout_prob** (float) --- dropout probabilty for cnn layers.
* **rnn_dropout_prob** (float) --- dropout probabilty for cnn layers.
* **src_emb_size** (int) --- dimensionality of character embedding.
* **conv_layers** (list) --- list with the description of convolutional
layers. For example::
"conv_layers": [
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME"
},
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME"
},
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME"
}
]
* **activation_fn** (callable) --- activation function to use for conv
layers.
* **num_rnn_layers** --- number of RNN layers to use.
* **rnn_cell_dim** (int) --- dimension of RNN cells.
* **rnn_type** (callable) --- Any valid RNN Cell class. Suggested class is
lstm
* **rnn_unidirectional** (bool) --- whether to use uni-directional or
bi-directional RNNs.
* **zoneout_prob** (float) --- zoneout probability. Defaults to 0.
* **use_cudnn_rnn** (bool) --- need to be enabled in rnn_type is a Cudnn
class.
* **data_format** (string) --- could be either "channels_first" or
"channels_last". Defaults to "channels_last".
* **bn_momentum** (float) --- momentum for batch norm. Defaults to 0.1.
* **bn_epsilon** (float) --- epsilon for batch norm. Defaults to 1e-5.
* **style_embedding_enable** (bool) --- Whether to enable GST. Defaults to
False.
* **style_embedding_params** (dict) --- Parameters for GST layer. See
_embed_style documentation.
"""
super(Tacotron2Encoder, self).__init__(params, model, name, mode)
def _encode(self, input_dict):
"""Creates TensorFlow graph for Tacotron-2 like encoder.
Args:
input_dict (dict): dictionary with inputs.
Must define:
source_tensors - array containing [
* source_sequence: tensor of shape [batch_size, sequence length]
* src_length: tensor of shape [batch_size]
]
Returns:
dict: A python dictionary containing:
* outputs - tensor containing the encoded text to be passed to the
attention layer
* src_length - the length of the encoded text
"""
text = input_dict['source_tensors'][0]
text_len = input_dict['source_tensors'][1]
training = (self._mode == "train")
regularizer = self.params.get('regularizer', None)
data_format = self.params.get('data_format', 'channels_last')
src_vocab_size = self._model.get_data_layer().params['src_vocab_size']
zoneout_prob = self.params.get('zoneout_prob', 0.)
# if src_vocab_size % 8 != 0:
# src_vocab_size += 8 - (src_vocab_size % 8)
# ----- Embedding layer -----------------------------------------------
enc_emb_w = tf.get_variable(
name="EncoderEmbeddingMatrix",
shape=[src_vocab_size, self.params['src_emb_size']],
dtype=self.params['dtype'],
# initializer=tf.random_normal_initializer()
)
embedded_inputs = tf.cast(
tf.nn.embedding_lookup(
enc_emb_w,
text,
), self.params['dtype']
)
# ----- Style layer ---------------------------------------------------
if self.params.get("style_embedding_enable", False):
if "style_embedding_params" not in self.params:
raise ValueError(
"style_embedding_params must be passed if style embedding is",
"enabled"
)
with tf.variable_scope("style_encoder"):
if (self._model.get_data_layer().params.get("style_input", None)
== "wav"):
style_spec = input_dict['source_tensors'][2]
style_len = input_dict['source_tensors'][3]
style_embedding = self._embed_style(style_spec, style_len)
else:
raise ValueError("The data layer's style input parameter must be set.")
style_embedding = tf.expand_dims(style_embedding, 1)
style_embedding = tf.tile(
style_embedding,
[1, tf.reduce_max(text_len), 1]
)
# ----- Convolutional layers -----------------------------------------------
input_layer = embedded_inputs
if data_format == 'channels_last':
top_layer = input_layer
else:
top_layer = tf.transpose(input_layer, [0, 2, 1])
for i, conv_params in enumerate(self.params['conv_layers']):
ch_out = conv_params['num_channels']
kernel_size = conv_params['kernel_size'] # [time, freq]
strides = conv_params['stride']
padding = conv_params['padding']
if padding == "VALID":
text_len = (text_len - kernel_size[0] + strides[0]) // strides[0]
else:
text_len = (text_len + strides[0] - 1) // strides[0]
top_layer = conv_bn_actv(
layer_type="conv1d",
name="conv{}".format(i + 1),
inputs=top_layer,
filters=ch_out,
kernel_size=kernel_size,
activation_fn=self.params['activation_fn'],
strides=strides,
padding=padding,
regularizer=regularizer,
training=training,
data_format=data_format,
bn_momentum=self.params.get('bn_momentum', 0.1),
bn_epsilon=self.params.get('bn_epsilon', 1e-5),
)
top_layer = tf.layers.dropout(
top_layer, rate=self.params["cnn_dropout_prob"], training=training
)
if data_format == 'channels_first':
top_layer = tf.transpose(top_layer, [0, 2, 1])
# ----- RNN ---------------------------------------------------------------
num_rnn_layers = self.params['num_rnn_layers']
if num_rnn_layers > 0:
cell_params = {}
cell_params["num_units"] = self.params['rnn_cell_dim']
rnn_type = self.params['rnn_type']
rnn_input = top_layer
rnn_vars = []
if self.params["use_cudnn_rnn"]:
if self._mode == "infer":
cell = lambda: tf.contrib.cudnn_rnn.CudnnCompatibleLSTMCell(
cell_params["num_units"]
)
cells_fw = [cell() for _ in range(1)]
cells_bw = [cell() for _ in range(1)]
(top_layer, _, _) = tf.contrib.rnn.stack_bidirectional_dynamic_rnn(
cells_fw, cells_bw, rnn_input,
sequence_length=text_len,
dtype=rnn_input.dtype,
time_major=False)
else:
all_cudnn_classes = [
i[1]
for i in inspect.getmembers(tf.contrib.cudnn_rnn, inspect.isclass)
]
if not rnn_type in all_cudnn_classes:
raise TypeError("rnn_type must be a Cudnn RNN class")
if zoneout_prob != 0.:
raise ValueError(
"Zoneout is currently not supported for cudnn rnn classes"
)
rnn_input = tf.transpose(top_layer, [1, 0, 2])
if self.params['rnn_unidirectional']:
direction = cudnn_rnn_ops.CUDNN_RNN_UNIDIRECTION
else:
direction = cudnn_rnn_ops.CUDNN_RNN_BIDIRECTION
rnn_block = rnn_type(
num_layers=num_rnn_layers,
num_units=cell_params["num_units"],
direction=direction,
dtype=rnn_input.dtype,
name="cudnn_rnn"
)
rnn_block.build(rnn_input.get_shape())
top_layer, _ = rnn_block(rnn_input)
top_layer = tf.transpose(top_layer, [1, 0, 2])
rnn_vars += rnn_block.trainable_variables
else:
multirnn_cell_fw = tf.nn.rnn_cell.MultiRNNCell(
[
single_cell(
cell_class=rnn_type,
cell_params=cell_params,
zoneout_prob=zoneout_prob,
training=training,
residual_connections=False
) for _ in range(num_rnn_layers)
]
)
rnn_vars += multirnn_cell_fw.trainable_variables
if self.params['rnn_unidirectional']:
top_layer, _ = tf.nn.dynamic_rnn(
cell=multirnn_cell_fw,
inputs=rnn_input,
sequence_length=text_len,
dtype=rnn_input.dtype,
time_major=False,
)
else:
multirnn_cell_bw = tf.nn.rnn_cell.MultiRNNCell(
[
single_cell(
cell_class=rnn_type,
cell_params=cell_params,
zoneout_prob=zoneout_prob,
training=training,
residual_connections=False
) for _ in range(num_rnn_layers)
]
)
top_layer, _ = tf.nn.bidirectional_dynamic_rnn(
cell_fw=multirnn_cell_fw,
cell_bw=multirnn_cell_bw,
inputs=rnn_input,
sequence_length=text_len,
dtype=rnn_input.dtype,
time_major=False
)
# concat 2 tensors [B, T, n_cell_dim] --> [B, T, 2*n_cell_dim]
top_layer = tf.concat(top_layer, 2)
rnn_vars += multirnn_cell_bw.trainable_variables
if regularizer and training:
cell_weights = []
cell_weights += rnn_vars
cell_weights += [enc_emb_w]
for weights in cell_weights:
if "bias" not in weights.name:
# print("Added regularizer to {}".format(weights.name))
if weights.dtype.base_dtype == tf.float16:
tf.add_to_collection(
'REGULARIZATION_FUNCTIONS', (weights, regularizer)
)
else:
tf.add_to_collection(
ops.GraphKeys.REGULARIZATION_LOSSES, regularizer(weights)
)
# -- end of rnn------------------------------------------------------------
top_layer = tf.layers.dropout(
top_layer, rate=self.params["rnn_dropout_prob"], training=training
)
outputs = top_layer
if self.params.get("style_embedding_enable", False):
outputs = tf.concat([outputs, style_embedding], axis=-1)
return {
'outputs': outputs,
'src_length': text_len
}
def _embed_style(self, style_spec, style_len):
"""
Code that implements the reference encoder as described in "Towards
end-to-end prosody transfer for expressive speech synthesis with Tacotron",
and "Style Tokens: Unsupervised Style Modeling, Control and Transfer in
End-to-End Speech Synthesis"
Config parameters:
* **conv_layers** (list) --- See the conv_layers parameter for the
Tacotron-2 model.
* **num_rnn_layers** (int) --- Number of rnn layers in the reference encoder
* **rnn_cell_dim** (int) --- Size of rnn layer
* **rnn_unidirectional** (bool) --- Uni- or bi-directional rnn.
* **rnn_type** --- Must be a valid tf rnn cell class
* **emb_size** (int) --- Size of gst
* **attention_layer_size** (int) --- Size of linear layers in attention
* **num_tokens** (int) --- Number of tokens for gst
* **num_heads** (int) --- Number of attention heads
"""
training = (self._mode == "train")
regularizer = self.params.get('regularizer', None)
data_format = self.params.get('data_format', 'channels_last')
batch_size = style_spec.get_shape().as_list()[0]
top_layer = tf.expand_dims(style_spec, -1)
params = self.params['style_embedding_params']
if "conv_layers" in params:
for i, conv_params in enumerate(params['conv_layers']):
ch_out = conv_params['num_channels']
kernel_size = conv_params['kernel_size'] # [time, freq]
strides = conv_params['stride']
padding = conv_params['padding']
if padding == "VALID":
style_len = (style_len - kernel_size[0] + strides[0]) // strides[0]
else:
style_len = (style_len + strides[0] - 1) // strides[0]
top_layer = conv_bn_actv(
layer_type="conv2d",
name="conv{}".format(i + 1),
inputs=top_layer,
filters=ch_out,
kernel_size=kernel_size,
activation_fn=self.params['activation_fn'],
strides=strides,
padding=padding,
regularizer=regularizer,
training=training,
data_format=data_format,
bn_momentum=self.params.get('bn_momentum', 0.1),
bn_epsilon=self.params.get('bn_epsilon', 1e-5),
)
if data_format == 'channels_first':
top_layer = tf.transpose(top_layer, [0, 2, 1])
top_layer = tf.concat(tf.unstack(top_layer, axis=2), axis=-1)
num_rnn_layers = params['num_rnn_layers']
if num_rnn_layers > 0:
cell_params = {}
cell_params["num_units"] = params['rnn_cell_dim']
rnn_type = params['rnn_type']
rnn_input = top_layer
rnn_vars = []
multirnn_cell_fw = tf.nn.rnn_cell.MultiRNNCell(
[
single_cell(
cell_class=rnn_type,
cell_params=cell_params,
training=training,
residual_connections=False
) for _ in range(num_rnn_layers)
]
)
rnn_vars += multirnn_cell_fw.trainable_variables
if params['rnn_unidirectional']:
top_layer, final_state = tf.nn.dynamic_rnn(
cell=multirnn_cell_fw,
inputs=rnn_input,
sequence_length=style_len,
dtype=rnn_input.dtype,
time_major=False,
)
final_state = final_state[0]
else:
multirnn_cell_bw = tf.nn.rnn_cell.MultiRNNCell(
[
single_cell(
cell_class=rnn_type,
cell_params=cell_params,
training=training,
residual_connections=False
) for _ in range(num_rnn_layers)
]
)
top_layer, final_state = tf.nn.bidirectional_dynamic_rnn(
cell_fw=multirnn_cell_fw,
cell_bw=multirnn_cell_bw,
inputs=rnn_input,
sequence_length=style_len,
dtype=rnn_input.dtype,
time_major=False
)
# concat 2 tensors [B, T, n_cell_dim] --> [B, T, 2*n_cell_dim]
final_state = tf.concat((final_state[0][0].h, final_state[1][0].h), 1)
rnn_vars += multirnn_cell_bw.trainable_variables
top_layer = final_state
# Apply linear layer
top_layer = tf.layers.dense(
top_layer,
128,
activation=tf.nn.tanh,
kernel_regularizer=regularizer,
name="reference_activation"
)
if regularizer and training:
cell_weights = rnn_vars
for weights in cell_weights:
if "bias" not in weights.name:
# print("Added regularizer to {}".format(weights.name))
if weights.dtype.base_dtype == tf.float16:
tf.add_to_collection(
'REGULARIZATION_FUNCTIONS', (weights, regularizer)
)
else:
tf.add_to_collection(
ops.GraphKeys.REGULARIZATION_LOSSES, regularizer(weights)
)
num_units = params["num_tokens"]
att_size = params["attention_layer_size"]
# Randomly initilized tokens
gst_embedding = tf.get_variable(
"token_embeddings",
shape=[num_units, params["emb_size"]],
dtype=self.params["dtype"],
initializer=tf.random_uniform_initializer(
minval=-1.,
maxval=1.,
dtype=self.params["dtype"]
),
trainable=False
)
attention = attention_layer.Attention(
params["attention_layer_size"], params["num_heads"],
0.,
training,
mode="bahdanau"
)
top_layer = tf.expand_dims(top_layer, 1)
gst_embedding = tf.nn.tanh(gst_embedding)
gst_embedding = tf.expand_dims(gst_embedding, 0)
gst_embedding = tf.tile(gst_embedding, [batch_size, 1, 1])
token_embeddings = attention(top_layer, gst_embedding, None)
token_embeddings = tf.squeeze(token_embeddings, 1)
return token_embeddings
| OpenSeq2Seq-master | open_seq2seq/encoders/tacotron2_encoder.py |
# Copyright (c) 2018 NVIDIA Corporation
"""
This module contains classes and functions to build "general" convolutional
neural networks from the description of arbitrary "layers".
"""
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import copy
import tensorflow as tf
try:
from inspect import signature
except ImportError:
from funcsigs import signature
from open_seq2seq.utils.utils import deco_print
from .encoder import Encoder
def build_layer(inputs, layer, layer_params, data_format,
regularizer, training, verbose=True):
"""This function builds a layer from the layer function and it's parameters.
It will automatically add regularizer parameter to the layer_params if the
layer supports regularization. To check this, it will look for the
"regularizer", "kernel_regularizer" and "gamma_regularizer" names in this
order in the ``layer`` call signature. If one of this parameters is supported
it will pass regularizer object as a value for that parameter. Based on the
same "checking signature" technique "data_format" and "training" parameters
will try to be added. Finally, "axis" parameter will try to be specified with
axis = ``1 if data_format == 'channels_first' else 3``. This is required for
automatic building batch normalization layer.
Args:
inputs: input Tensor that will be passed to the layer. Note that layer has
to accept input as the first parameter.
layer: layer function or class with ``__call__`` method defined.
layer_params (dict): parameters passed to the ``layer``.
data_format (string): data format ("channels_first" or "channels_last")
that will be tried to be passed as an additional argument.
regularizer: regularizer instance that will be tried to be passed as an
additional argument.
training (bool): whether layer is built in training mode. Will be tried to
be passed as an additional argument.
verbose (bool): whether to print information about built layers.
Returns:
Tensor with layer output.
"""
layer_params_cp = copy.deepcopy(layer_params)
for reg_name in ['regularizer', 'kernel_regularizer', 'gamma_regularizer']:
if reg_name not in layer_params_cp and \
reg_name in signature(layer).parameters:
layer_params_cp.update({reg_name: regularizer})
if 'data_format' not in layer_params_cp and \
'data_format' in signature(layer).parameters:
layer_params_cp.update({'data_format': data_format})
# necessary to check axis for correct batch normalization processing
if 'axis' not in layer_params_cp and \
'axis' in signature(layer).parameters:
layer_params_cp.update({'axis': 1 if data_format == 'channels_first' else 3})
if 'training' not in layer_params_cp and \
'training' in signature(layer).parameters:
layer_params_cp.update({'training': training})
outputs = layer(inputs, **layer_params_cp)
if verbose:
if hasattr(layer, '_tf_api_names'):
layer_name = layer._tf_api_names[0]
else:
layer_name = layer
deco_print("Building layer: {}(inputs, {})".format(
layer_name,
", ".join("{}={}".format(key, value)
for key, value in layer_params_cp.items())
))
return outputs
class CNNEncoder(Encoder):
"""General CNN encoder that can be used to construct various different models.
"""
@staticmethod
def get_required_params():
return dict(Encoder.get_required_params(), **{
'cnn_layers': list,
})
@staticmethod
def get_optional_params():
return dict(Encoder.get_optional_params(), **{
'data_format': ['channels_first', 'channels_last'],
'fc_layers': list,
})
def __init__(self, params, model, name="cnn_encoder", mode='train'):
"""CNN Encoder constructor.
See parent class for arguments description.
Config parameters:
* **cnn_layers** (list) --- list with the description of "convolutional"
layers. For example::
"conv_layers": [
(tf.layers.conv2d, {
'filters': 64, 'kernel_size': (11, 11),
'strides': (4, 4), 'padding': 'VALID',
'activation': tf.nn.relu,
}),
(tf.layers.max_pooling2d, {
'pool_size': (3, 3), 'strides': (2, 2),
}),
(tf.layers.conv2d, {
'filters': 192, 'kernel_size': (5, 5),
'strides': (1, 1), 'padding': 'SAME',
}),
(tf.layers.batch_normalization, {'momentum': 0.9, 'epsilon': 0.0001}),
(tf.nn.relu, {}),
]
Note that you don't need to provide "regularizer", "training",
"data_format" and "axis" parameters since they will be
automatically added. "axis" will be derived from "data_format" and will
be ``1 if data_format == "channels_first" else 3``.
* **fc_layers** (list) --- list with the description of "fully-connected"
layers. The only different from convolutional layers is that the input
will be automatically reshaped to 2D (batch size x num features).
For example::
'fc_layers': [
(tf.layers.dense, {'units': 4096, 'activation': tf.nn.relu}),
(tf.layers.dropout, {'rate': 0.5}),
(tf.layers.dense, {'units': 4096, 'activation': tf.nn.relu}),
(tf.layers.dropout, {'rate': 0.5}),
],
Note that you don't need to provide "regularizer", "training",
"data_format" and "axis" parameters since they will be
automatically added. "axis" will be derived from "data_format" and will
be ``1 if data_format == "channels_first" else 3``.
* **data_format** (string) --- could be either "channels_first" or
"channels_last". Defaults to "channels_first".
"""
super(CNNEncoder, self).__init__(params, model, name, mode)
def _encode(self, input_dict):
regularizer = self.params.get('regularizer', None)
data_format = self.params.get('data_format', 'channels_first')
x = input_dict['source_tensors'][0]
if data_format == 'channels_first':
x = tf.transpose(x, [0, 3, 1, 2])
for layer, layer_params in self.params['cnn_layers']:
x = build_layer(x, layer, layer_params, data_format,
regularizer, self.mode == 'train')
if data_format == 'channels_first':
x = tf.transpose(x, [0, 2, 3, 1])
fc_layers = self.params.get('fc_layers', [])
# if fully connected layers exist, flattening the output and applying them
if fc_layers:
input_shape = x.get_shape().as_list()
num_inputs = input_shape[1] * input_shape[2] * input_shape[3]
x = tf.reshape(x, [-1, num_inputs])
for layer, layer_params in fc_layers:
x = build_layer(x, layer, layer_params, data_format, regularizer,
self.mode == 'train')
else:
# if there are no fully connected layers, doing average pooling
x = tf.reduce_mean(x, [1, 2])
return {'outputs': x}
| OpenSeq2Seq-master | open_seq2seq/encoders/cnn_encoder.py |
# Copyright (c) 2018 NVIDIA Corporation
"""
This package contains various encoders.
An encoder typically takes data and produces representation.
"""
from .encoder import Encoder
from .rnn_encoders import UnidirectionalRNNEncoderWithEmbedding, \
BidirectionalRNNEncoderWithEmbedding, \
GNMTLikeEncoderWithEmbedding,\
GNMTLikeEncoderWithEmbedding_cuDNN
from .transformer_encoder import TransformerEncoder
from .ds2_encoder import DeepSpeech2Encoder
from .resnet_encoder import ResNetEncoder
from .tacotron2_encoder import Tacotron2Encoder
from .tdnn_encoder import TDNNEncoder
from .las_encoder import ListenAttendSpellEncoder
from .convs2s_encoder import ConvS2SEncoder
from .lm_encoders import LMEncoder
from .wavenet_encoder import WavenetEncoder
from .centaur_encoder import CentaurEncoder | OpenSeq2Seq-master | open_seq2seq/encoders/__init__.py |
# Copyright (c) 2018 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import abc
import copy
import six
import tensorflow as tf
from open_seq2seq.optimizers.mp_wrapper import mp_regularizer_wrapper
from open_seq2seq.utils.utils import check_params, cast_types
@six.add_metaclass(abc.ABCMeta)
class Encoder:
"""Abstract class from which all encoders must inherit.
"""
@staticmethod
def get_required_params():
"""Static method with description of required parameters.
Returns:
dict:
Dictionary containing all the parameters that **have to** be
included into the ``params`` parameter of the
class :meth:`__init__` method.
"""
return {}
@staticmethod
def get_optional_params():
"""Static method with description of optional parameters.
Returns:
dict:
Dictionary containing all the parameters that **can** be
included into the ``params`` parameter of the
class :meth:`__init__` method.
"""
return {
'regularizer': None, # any valid TensorFlow regularizer
'regularizer_params': dict,
'initializer': None, # any valid TensorFlow initializer
'initializer_params': dict,
'dtype': [tf.float32, tf.float16, 'mixed'],
}
def __init__(self, params, model, name="encoder", mode='train'):
"""Encoder constructor.
Note that encoder constructors should not modify TensorFlow graph, all
graph construction should happen in the :meth:`self._encode() <_encode>`
method.
Args:
params (dict): parameters describing the encoder.
All supported parameters are listed in :meth:`get_required_params`,
:meth:`get_optional_params` functions.
model (instance of a class derived from :class:`Model<models.model.Model>`):
parent model that created this encoder.
Could be None if no model access is required for the use case.
name (str): name for encoder variable scope.
mode (str): mode encoder is going to be run in.
Could be "train", "eval" or "infer".
Config parameters:
* **initializer** --- any valid TensorFlow initializer. If no initializer
is provided, model initializer will be used.
* **initializer_params** (dict) --- dictionary that will be passed to
initializer ``__init__`` method.
* **regularizer** --- and valid TensorFlow regularizer. If no regularizer
is provided, model regularizer will be used.
* **regularizer_params** (dict) --- dictionary that will be passed to
regularizer ``__init__`` method.
* **dtype** --- model dtype. Could be either ``tf.float16``, ``tf.float32``
or "mixed". For details see
:ref:`mixed precision training <mixed_precision>` section in docs. If no
dtype is provided, model dtype will be used.
"""
check_params(params, self.get_required_params(), self.get_optional_params())
self._params = copy.deepcopy(params)
self._model = model
if 'dtype' not in self._params:
if self._model:
self._params['dtype'] = self._model.params['dtype']
else:
self._params['dtype'] = tf.float32
self._name = name
self._mode = mode
self._compiled = False
def encode(self, input_dict):
"""Wrapper around :meth:`self._encode() <_encode>` method.
Here name, initializer and dtype are set in the variable scope and then
:meth:`self._encode() <_encode>` method is called.
Args:
input_dict (dict): see :meth:`self._encode() <_encode>` docs.
Returns:
see :meth:`self._encode() <_encode>` docs.
"""
if not self._compiled:
if 'regularizer' not in self._params:
if self._model and 'regularizer' in self._model.params:
self._params['regularizer'] = copy.deepcopy(
self._model.params['regularizer']
)
self._params['regularizer_params'] = copy.deepcopy(
self._model.params['regularizer_params']
)
if 'regularizer' in self._params:
init_dict = self._params.get('regularizer_params', {})
if self._params['regularizer'] is not None:
self._params['regularizer'] = self._params['regularizer'](**init_dict)
if self._params['dtype'] == 'mixed':
self._params['regularizer'] = mp_regularizer_wrapper(
self._params['regularizer'],
)
if self._params['dtype'] == 'mixed':
self._params['dtype'] = tf.float16
if 'initializer' in self.params:
init_dict = self.params.get('initializer_params', {})
initializer = self.params['initializer'](**init_dict)
else:
initializer = None
self._compiled = True
with tf.variable_scope(self._name, initializer=initializer,
dtype=self.params['dtype']):
return self._encode(self._cast_types(input_dict))
def _cast_types(self, input_dict):
"""This function performs automatic cast of all inputs to encoder dtype.
Args:
input_dict (dict): dictionary passed to :meth:`self._encode() <_encode>`
method.
Returns:
dict: same as input_dict, but with all Tensors cast to encoder dtype.
"""
return cast_types(input_dict, self.params['dtype'])
@abc.abstractmethod
def _encode(self, input_dict):
"""This is the main function which should construct encoder graph.
Typically, encoder will take raw input sequence as an input and
produce some hidden representation as an output.
Args:
input_dict (dict): dictionary containing encoder inputs.
If the encoder is used with :class:`models.encoder_decoder` class,
``input_dict`` will have the following content::
{
"source_tensors": data_layer.input_tensors['source_tensors']
}
Returns:
dict:
dictionary of encoder outputs. Return all necessary outputs.
Typically this will be just::
{
"outputs": outputs,
"state": state,
}
"""
pass
@property
def params(self):
"""Parameters used to construct the encoder (dictionary)."""
return self._params
@property
def mode(self):
"""Mode encoder is run in."""
return self._mode
@property
def name(self):
"""Encoder name."""
return self._name
| OpenSeq2Seq-master | open_seq2seq/encoders/encoder.py |
# Copyright (c) 2018 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import tensorflow as tf
from .encoder import Encoder
from open_seq2seq.parts.cnns.conv_blocks import conv_actv, conv_bn_actv
cells_dict = {
"lstm": tf.nn.rnn_cell.BasicLSTMCell,
"gru": tf.nn.rnn_cell.GRUCell,
}
def rnn_layer(layer_type, num_layers, name, inputs, src_length, hidden_dim,
regularizer, training, dropout_keep_prob=1.0):
"""Helper function that applies convolution and activation.
Args:
layer_type: the following types are supported
'lstm', 'gru'
"""
rnn_cell = cells_dict[layer_type]
dropout = tf.nn.rnn_cell.DropoutWrapper
multirnn_cell_fw = tf.nn.rnn_cell.MultiRNNCell(
[dropout(rnn_cell(hidden_dim),
output_keep_prob=dropout_keep_prob)
for _ in range(num_layers)]
)
multirnn_cell_bw = tf.nn.rnn_cell.MultiRNNCell(
[dropout(rnn_cell(hidden_dim),
output_keep_prob=dropout_keep_prob)
for _ in range(num_layers)]
)
output, state = tf.nn.bidirectional_dynamic_rnn(
cell_fw=multirnn_cell_fw, cell_bw=multirnn_cell_bw,
inputs=inputs,
sequence_length=src_length,
dtype=inputs.dtype,
scope=name,
)
output = tf.concat(output, 2)
return output
class ListenAttendSpellEncoder(Encoder):
"""Listen Attend Spell like encoder with support for reduction in time dimension of the input.
Can use convolutional layers, recurrent layers or both.
"""
@staticmethod
def get_required_params():
return dict(Encoder.get_required_params(), **{
'dropout_keep_prob': float,
'recurrent_layers': list,
'convnet_layers': list,
'activation_fn': None, # any valid callable
})
@staticmethod
def get_optional_params():
return dict(Encoder.get_optional_params(), **{
'data_format': ['channels_first', 'channels_last'],
'normalization': [None, 'batch_norm'],
'bn_momentum': float,
'bn_epsilon': float,
})
def __init__(self, params, model, name="las_encoder", mode='train'):
"""Listen Attend Spell like encoder constructor.
See parent class for arguments description.
Config parameters:
* **dropout_keep_prop** (float) --- keep probability for dropout.
* **convnet_layers** (list) --- list with the description of convolutional
layers. For example::
"convnet_layers": [
{
"type": "conv1d", "repeat" : 5,
"kernel_size": [7], "stride": [1],
"num_channels": 250, "padding": "SAME"
},
{
"type": "conv1d", "repeat" : 1,
"kernel_size": [1], "stride": [2],
"num_channels": 1000, "padding": "SAME"
},
]
* **recurrent_layers** (list) --- list with the description of recurrent
layers. For example::
"recurrent_layers": [
{
"type": "lstm", "num_layers": 1,
"hidden_dim": 512, "dropout_keep_prob": 0.8,
"pool": True, "pool_size":[2], "stride": [2],
},
{
"type": "lstm", "num_layers": 3,
"hidden_dim": 512, "dropout_keep_prob": 0.8,
"pool": False, "pool_size":[-1], "stride": [-1],
},
],
* **activation_fn** --- activation function to use.
* **data_format** (string) --- could be either "channels_first" or
"channels_last". Defaults to "channels_last".
* **normalization** --- normalization to use. Accepts [None, 'batch_norm'].
Use None if you don't want to use normalization. Defaults to 'batch_norm'.
* **bn_momentum** (float) --- momentum for batch norm. Defaults to 0.90.
* **bn_epsilon** (float) --- epsilon for batch norm. Defaults to 1e-3.
"""
super(ListenAttendSpellEncoder, self).__init__(params, model, name, mode)
def _encode(self, input_dict):
"""Creates TensorFlow graph for Wav2Letter like encoder.
Args:
input_dict (dict): input dictionary that has to contain
the following fields::
input_dict = {
"source_tensors": [
src_sequence (shape=[batch_size, sequence length, num features]),
src_length (shape=[batch_size])
]
}
Returns:
dict: dictionary with the following tensors::
{
'outputs': hidden state, shape=[batch_size, sequence length, n_hidden]
'src_length': tensor, shape=[batch_size]
}
"""
source_sequence, src_length = input_dict['source_tensors']
training = (self._mode == "train")
dropout_keep_prob = self.params['dropout_keep_prob'] if training else 1.0
regularizer = self.params.get('regularizer', None)
normalization = self.params.get('normalization', 'batch_norm')
data_format = self.params.get('data_format', 'channels_last')
normalization_params = {}
if normalization is None:
conv_block = conv_actv
elif normalization == "batch_norm":
conv_block = conv_bn_actv
normalization_params['bn_momentum'] = self.params.get(
'bn_momentum', 0.90)
normalization_params['bn_epsilon'] = self.params.get('bn_epsilon', 1e-3)
else:
raise ValueError("Incorrect normalization")
conv_feats = source_sequence
# ----- Convolutional layers ---------------------------------------------
convnet_layers = self.params['convnet_layers']
for idx_convnet in range(len(convnet_layers)):
layer_type = convnet_layers[idx_convnet]['type']
layer_repeat = convnet_layers[idx_convnet]['repeat']
ch_out = convnet_layers[idx_convnet]['num_channels']
kernel_size = convnet_layers[idx_convnet]['kernel_size']
strides = convnet_layers[idx_convnet]['stride']
padding = convnet_layers[idx_convnet]['padding']
dropout_keep = convnet_layers[idx_convnet].get(
'dropout_keep_prob', dropout_keep_prob) if training else 1.0
if padding == "VALID":
src_length = (src_length - kernel_size[0]) // strides[0] + 1
else:
src_length = (src_length + strides[0] - 1) // strides[0]
for idx_layer in range(layer_repeat):
conv_feats = conv_block(
layer_type=layer_type,
name="conv{}{}".format(
idx_convnet + 1, idx_layer + 1),
inputs=conv_feats,
filters=ch_out,
kernel_size=kernel_size,
activation_fn=self.params['activation_fn'],
strides=strides,
padding=padding,
regularizer=regularizer,
training=training,
data_format=data_format,
**normalization_params
)
conv_feats = tf.nn.dropout(x=conv_feats, keep_prob=dropout_keep)
rnn_feats = conv_feats
rnn_block = rnn_layer
# ----- Recurrent layers ---------------------------------------------
recurrent_layers = self.params['recurrent_layers']
for idx_rnn in range(len(recurrent_layers)):
layer_type = recurrent_layers[idx_rnn]['type']
num_layers = recurrent_layers[idx_rnn]['num_layers']
hidden_dim = recurrent_layers[idx_rnn]['hidden_dim']
dropout_keep = recurrent_layers[idx_rnn].get(
'dropout_keep_prob', dropout_keep_prob) if training else 1.0
use_pool = recurrent_layers[idx_rnn]['pool']
pool_size = recurrent_layers[idx_rnn]['pool_size']
strides = recurrent_layers[idx_rnn]['stride']
rnn_feats = rnn_block(
layer_type=layer_type,
num_layers=num_layers,
name="rnn{}".format(
idx_rnn + 1),
inputs=rnn_feats,
src_length=src_length,
hidden_dim=hidden_dim,
regularizer=regularizer,
training=training,
dropout_keep_prob=dropout_keep,
)
if use_pool:
rnn_feats = tf.layers.max_pooling1d(
inputs=rnn_feats,
pool_size=pool_size,
strides=strides,
)
src_length = (src_length - pool_size[0]) // strides[0] + 1
outputs = rnn_feats
return {
'outputs': outputs,
'src_length': src_length,
}
| OpenSeq2Seq-master | open_seq2seq/encoders/las_encoder.py |
# Copyright (c) 2018 NVIDIA Corporation
"""
RNN-based encoders
"""
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import tensorflow as tf
from tensorflow.contrib.cudnn_rnn.python.ops import cudnn_rnn_ops
from open_seq2seq.parts.rnns.utils import single_cell
from .encoder import Encoder
class UnidirectionalRNNEncoderWithEmbedding(Encoder):
"""
Uni-directional RNN decoder with embeddings.
Can support various RNN cell types.
"""
@staticmethod
def get_required_params():
return dict(Encoder.get_required_params(), **{
'src_vocab_size': int,
'src_emb_size': int,
'core_cell': None,
'core_cell_params': dict,
'encoder_layers': int,
'encoder_use_skip_connections': bool,
})
@staticmethod
def get_optional_params():
return dict(Encoder.get_optional_params(), **{
'encoder_dp_input_keep_prob': float,
'encoder_dp_output_keep_prob': float,
'time_major': bool,
'use_swap_memory': bool,
'proj_size': int,
'num_groups': int,
})
def __init__(self, params, model,
name="unidir_rnn_encoder_with_emb", mode='train'):
"""Initializes uni-directional encoder with embeddings.
Args:
params (dict): dictionary with encoder parameters
Must define:
* src_vocab_size - data vocabulary size
* src_emb_size - size of embedding to use
* encoder_cell_units - number of units in RNN cell
* encoder_cell_type - cell type: lstm, gru, etc.
* encoder_layers - number of layers
* encoder_dp_input_keep_prob -
* encoder_dp_output_keep_prob -
* encoder_use_skip_connections - true/false
* time_major (optional)
* use_swap_memory (optional)
* mode - train or infer
... add any cell-specific parameters here as well
"""
super(UnidirectionalRNNEncoderWithEmbedding, self).__init__(
params,
model,
name=name,
mode=mode,
)
self._src_vocab_size = self.params['src_vocab_size']
self._src_emb_size = self.params['src_emb_size']
self._enc_emb_w = None
self._encoder_cell_fw = None
def _encode(self, input_dict):
"""Encodes data into representation.
Args:
input_dict: a Python dictionary.
Must define:
* src_inputs - a Tensor of shape [batch_size, time] or
[time, batch_size]
(depending on time_major param)
* src_lengths - a Tensor of shape [batch_size]
Returns:
a Python dictionary with:
* encoder_outputs - a Tensor of shape
[batch_size, time, representation_dim]
or [time, batch_size, representation_dim]
* encoder_state - a Tensor of shape [batch_size, dim]
* src_lengths - (copy ref from input) a Tensor of shape [batch_size]
"""
# TODO: make a separate level of config for cell_params?
source_sequence = input_dict['source_tensors'][0]
source_length = input_dict['source_tensors'][1]
self._enc_emb_w = tf.get_variable(
name="EncoderEmbeddingMatrix",
shape=[self._src_vocab_size, self._src_emb_size],
dtype=tf.float32,
)
if self._mode == "train":
dp_input_keep_prob = self.params['encoder_dp_input_keep_prob']
dp_output_keep_prob = self.params['encoder_dp_output_keep_prob']
else:
dp_input_keep_prob = 1.0
dp_output_keep_prob = 1.0
fwd_cells = [
single_cell(
cell_class=self.params['core_cell'],
cell_params=self.params.get('core_cell_params', {}),
dp_input_keep_prob=dp_input_keep_prob,
dp_output_keep_prob=dp_output_keep_prob,
residual_connections=self.params['encoder_use_skip_connections']
) for _ in range(self.params['encoder_layers'])
]
# pylint: disable=no-member
self._encoder_cell_fw = tf.contrib.rnn.MultiRNNCell(fwd_cells)
time_major = self.params.get("time_major", False)
use_swap_memory = self.params.get("use_swap_memory", False)
embedded_inputs = tf.cast(
tf.nn.embedding_lookup(
self.enc_emb_w,
source_sequence,
),
self.params['dtype'],
)
encoder_outputs, encoder_state = tf.nn.dynamic_rnn(
cell=self._encoder_cell_fw,
inputs=embedded_inputs,
sequence_length=source_length,
time_major=time_major,
swap_memory=use_swap_memory,
dtype=embedded_inputs.dtype,
)
return {'outputs': encoder_outputs,
'state': encoder_state,
'src_lengths': source_length,
'encoder_input': source_sequence}
@property
def src_vocab_size(self):
return self._src_vocab_size
@property
def src_emb_size(self):
return self._src_emb_size
@property
def enc_emb_w(self):
return self._enc_emb_w
class BidirectionalRNNEncoderWithEmbedding(Encoder):
"""
Bi-directional RNN-based encoder with embeddings.
Can support various RNN cell types.
"""
@staticmethod
def get_required_params():
return dict(Encoder.get_required_params(), **{
'src_vocab_size': int,
'src_emb_size': int,
'encoder_layers': int,
'encoder_use_skip_connections': bool,
'core_cell': None,
'core_cell_params': dict,
})
@staticmethod
def get_optional_params():
return dict(Encoder.get_optional_params(), **{
'encoder_dp_input_keep_prob': float,
'encoder_dp_output_keep_prob': float,
'time_major': bool,
'use_swap_memory': bool,
'proj_size': int,
'num_groups': int,
})
def __init__(self, params, model,
name="bidir_rnn_encoder_with_emb", mode='train'):
"""Initializes bi-directional encoder with embeddings.
Args:
params (dict): dictionary with encoder parameters
Must define:
* src_vocab_size - data vocabulary size
* src_emb_size - size of embedding to use
* encoder_cell_units - number of units in RNN cell
* encoder_cell_type - cell type: lstm, gru, etc.
* encoder_layers - number of layers
* encoder_dp_input_keep_prob -
* encoder_dp_output_keep_prob -
* encoder_use_skip_connections - true/false
* time_major (optional)
* use_swap_memory (optional)
* mode - train or infer
... add any cell-specific parameters here as well
Returns:
encoder_params
"""
super(BidirectionalRNNEncoderWithEmbedding, self).__init__(
params, model, name=name, mode=mode,
)
self._src_vocab_size = self.params['src_vocab_size']
self._src_emb_size = self.params['src_emb_size']
self._enc_emb_w = None
self._encoder_cell_fw = None
self._encoder_cell_bw = None
def _encode(self, input_dict):
"""Encodes data into representation.
Args:
input_dict: a Python dictionary.
Must define:
*src_inputs - a Tensor of shape [batch_size, time] or
[time, batch_size]
(depending on time_major param)
* src_lengths - a Tensor of shape [batch_size]
Returns:
a Python dictionary with:
* encoder_outputs - a Tensor of shape
[batch_size, time, representation_dim]
or [time, batch_size, representation_dim]
* encoder_state - a Tensor of shape [batch_size, dim]
* src_lengths - (copy ref from input) a Tensor of shape [batch_size]
"""
source_sequence = input_dict['source_tensors'][0]
source_length = input_dict['source_tensors'][1]
time_major = self.params.get("time_major", False)
use_swap_memory = self.params.get("use_swap_memory", False)
self._enc_emb_w = tf.get_variable(
name="EncoderEmbeddingMatrix",
shape=[self._src_vocab_size, self._src_emb_size],
dtype=tf.float32
)
if self._mode == "train":
dp_input_keep_prob = self.params['encoder_dp_input_keep_prob']
dp_output_keep_prob = self.params['encoder_dp_output_keep_prob']
else:
dp_input_keep_prob = 1.0
dp_output_keep_prob = 1.0
fwd_cells = [
single_cell(
cell_class=self.params['core_cell'],
cell_params=self.params.get('core_cell_params', {}),
dp_input_keep_prob=dp_input_keep_prob,
dp_output_keep_prob=dp_output_keep_prob,
residual_connections=self.params['encoder_use_skip_connections'],
) for _ in range(self.params['encoder_layers'])
]
bwd_cells = [
single_cell(
cell_class=self.params['core_cell'],
cell_params=self.params.get('core_cell_params', {}),
dp_input_keep_prob=dp_input_keep_prob,
dp_output_keep_prob=dp_output_keep_prob,
residual_connections=self.params['encoder_use_skip_connections'],
) for _ in range(self.params['encoder_layers'])
]
with tf.variable_scope("FW"):
# pylint: disable=no-member
self._encoder_cell_fw = tf.contrib.rnn.MultiRNNCell(fwd_cells)
with tf.variable_scope("BW"):
# pylint: disable=no-member
self._encoder_cell_bw = tf.contrib.rnn.MultiRNNCell(bwd_cells)
embedded_inputs = tf.cast(
tf.nn.embedding_lookup(
self.enc_emb_w,
source_sequence,
),
self.params['dtype']
)
encoder_output, encoder_state = tf.nn.bidirectional_dynamic_rnn(
cell_fw=self._encoder_cell_fw,
cell_bw=self._encoder_cell_bw,
inputs=embedded_inputs,
sequence_length=source_length,
time_major=time_major,
swap_memory=use_swap_memory,
dtype=embedded_inputs.dtype,
)
encoder_outputs = tf.concat(encoder_output, 2)
return {'outputs': encoder_outputs,
'state': encoder_state,
'src_lengths': source_length,
'encoder_input': source_sequence}
@property
def src_vocab_size(self):
return self._src_vocab_size
@property
def src_emb_size(self):
return self._src_emb_size
@property
def enc_emb_w(self):
return self._enc_emb_w
class GNMTLikeEncoderWithEmbedding(Encoder):
"""
Encoder similar to the one used in
GNMT model: https://arxiv.org/abs/1609.08144.
Must have at least 2 layers
"""
@staticmethod
def get_required_params():
return dict(Encoder.get_required_params(), **{
'src_vocab_size': int,
'src_emb_size': int,
'core_cell': None,
'core_cell_params': dict,
'encoder_layers': int,
'encoder_use_skip_connections': bool,
})
@staticmethod
def get_optional_params():
return dict(Encoder.get_optional_params(), **{
'encoder_dp_input_keep_prob': float,
'encoder_dp_output_keep_prob': float,
'time_major': bool,
'use_swap_memory': bool,
'proj_size': int,
'num_groups': int,
})
def __init__(self, params, model,
name="gnmt_encoder_with_emb", mode='train'):
"""Encodes data into representation.
Args:
params (dict): a Python dictionary.
Must define:
* src_inputs - a Tensor of shape [batch_size, time] or
[time, batch_size]
(depending on time_major param)
* src_lengths - a Tensor of shape [batch_size]
Returns:
a Python dictionary with:
* encoder_outputs - a Tensor of shape
[batch_size, time, representation_dim]
or [time, batch_size, representation_dim]
* encoder_state - a Tensor of shape [batch_size, dim]
* src_lengths - (copy ref from input) a Tensor of shape [batch_size]
"""
super(GNMTLikeEncoderWithEmbedding, self).__init__(
params, model, name=name, mode=mode,
)
self._src_vocab_size = self.params['src_vocab_size']
self._src_emb_size = self.params['src_emb_size']
self._encoder_l1_cell_fw = None
self._encoder_l1_cell_bw = None
self._encoder_cells = None
self._enc_emb_w = None
def _encode(self, input_dict):
source_sequence = input_dict['source_tensors'][0]
source_length = input_dict['source_tensors'][1]
self._enc_emb_w = tf.get_variable(
name="EncoderEmbeddingMatrix",
shape=[self._src_vocab_size, self._src_emb_size],
dtype=tf.float32,
)
if self.params['encoder_layers'] < 2:
raise ValueError("GNMT encoder must have at least 2 layers")
with tf.variable_scope("Level1FW"):
self._encoder_l1_cell_fw = single_cell(
cell_class=self.params['core_cell'],
cell_params=self.params.get('core_cell_params', {}),
dp_input_keep_prob=1.0,
dp_output_keep_prob=1.0,
residual_connections=False,
)
with tf.variable_scope("Level1BW"):
self._encoder_l1_cell_bw = single_cell(
cell_class=self.params['core_cell'],
cell_params=self.params.get('core_cell_params', {}),
dp_input_keep_prob=1.0,
dp_output_keep_prob=1.0,
residual_connections=False,
)
if self._mode == "train":
dp_input_keep_prob = self.params['encoder_dp_input_keep_prob']
dp_output_keep_prob = self.params['encoder_dp_output_keep_prob']
else:
dp_input_keep_prob = 1.0
dp_output_keep_prob = 1.0
with tf.variable_scope("UniDirLevel"):
self._encoder_cells = [
single_cell(
cell_class=self.params['core_cell'],
cell_params=self.params.get('core_cell_params', {}),
dp_input_keep_prob=dp_input_keep_prob,
dp_output_keep_prob=dp_output_keep_prob,
residual_connections=False,
) for _ in range(self.params['encoder_layers'] - 1)
]
# add residual connections starting from the third layer
for idx, cell in enumerate(self._encoder_cells):
if idx > 0:
# pylint: disable=no-member
self._encoder_cells[idx] = tf.contrib.rnn.ResidualWrapper(cell)
time_major = self.params.get("time_major", False)
use_swap_memory = self.params.get("use_swap_memory", False)
embedded_inputs = tf.cast(
tf.nn.embedding_lookup(
self.enc_emb_w,
source_sequence,
),
self.params['dtype'],
)
# first bi-directional layer
_encoder_output, _ = tf.nn.bidirectional_dynamic_rnn(
cell_fw=self._encoder_l1_cell_fw,
cell_bw=self._encoder_l1_cell_bw,
inputs=embedded_inputs,
sequence_length=source_length,
swap_memory=use_swap_memory,
time_major=time_major,
dtype=embedded_inputs.dtype,
)
encoder_l1_outputs = tf.concat(_encoder_output, 2)
# stack of unidirectional layers
# pylint: disable=no-member
encoder_outputs, encoder_state = tf.nn.dynamic_rnn(
cell=tf.contrib.rnn.MultiRNNCell(self._encoder_cells),
inputs=encoder_l1_outputs,
sequence_length=source_length,
swap_memory=use_swap_memory,
time_major=time_major,
dtype=encoder_l1_outputs.dtype,
)
return {'outputs': encoder_outputs,
'state': encoder_state,
'src_lengths': source_length,
'encoder_input': source_sequence}
@property
def src_vocab_size(self):
return self._src_vocab_size
@property
def src_emb_size(self):
return self._src_emb_size
@property
def enc_emb_w(self):
return self._enc_emb_w
class GNMTLikeEncoderWithEmbedding_cuDNN(Encoder):
"""
Encoder similar to the one used in
GNMT model: https://arxiv.org/abs/1609.08144.
Must have at least 2 layers. Uses cuDNN RNN blocks for efficiency
"""
@staticmethod
def get_required_params():
return dict(Encoder.get_required_params(), **{
'src_vocab_size': int,
'src_emb_size': int,
'encoder_cell_units': int,
'encoder_cell_type': ['lstm', 'gru'],
'encoder_layers': int,
})
@staticmethod
def get_optional_params():
return dict(Encoder.get_optional_params(), **{
'encoder_dp_output_keep_prob': float,
})
def __init__(self, params, model,
name="gnmt_encoder_with_emb_cudnn", mode='train'):
"""Encodes data into representation
Args:
params (dict): a Python dictionary.
Must define:
* src_inputs - a Tensor of shape [batch_size, time] or
[time, batch_size]
(depending on time_major param)
* src_lengths - a Tensor of shape [batch_size]
Returns:
a Python dictionary with:
* encoder_outputs - a Tensor of shape
[batch_size, time, representation_dim]
or [time, batch_size, representation_dim]
* encoder_state - a Tensor of shape [batch_size, dim]
* src_lengths - (copy ref from input) a Tensor of shape [batch_size]
"""
super(GNMTLikeEncoderWithEmbedding_cuDNN, self).__init__(
params, model, name=name, mode=mode,
)
self._src_vocab_size = self.params['src_vocab_size']
self._src_emb_size = self.params['src_emb_size']
self._enc_emb_w = None
def _encode(self, input_dict):
source_sequence = input_dict['source_tensors'][0]
source_length = input_dict['source_tensors'][1]
self._enc_emb_w = tf.get_variable(
name="EncoderEmbeddingMatrix",
shape=[self._src_vocab_size, self._src_emb_size],
dtype=tf.float32
)
if self.params['encoder_layers'] < 2:
raise ValueError("GNMT encoder must have at least 2 layers")
if self._mode == "train":
dp_output_keep_prob = self.params['encoder_dp_output_keep_prob']
else:
dp_output_keep_prob = 1.0
# source_sequence is of [batch, time] shape
embedded_inputs = tf.cast(
tf.nn.embedding_lookup(
self.enc_emb_w,
tf.transpose(source_sequence), # cudnn wants [time, batch, ...]
),
self.params['dtype'],
)
with tf.variable_scope("Bi_Directional_Layer"):
direction = cudnn_rnn_ops.CUDNN_RNN_BIDIRECTION
if self.params['encoder_cell_type'] == "gru":
# pylint: disable=no-member
bidirectional_block = tf.contrib.cudnn_rnn.CudnnGRU(
num_layers=1,
num_units=self.params['encoder_cell_units'],
direction=direction,
dropout=0.0,
dtype=self.params['dtype'],
name="cudnn_gru_bidi",
)
elif self.params['encoder_cell_type'] == "lstm":
# pylint: disable=no-member
bidirectional_block = tf.contrib.cudnn_rnn.CudnnLSTM(
num_layers=1,
num_units=self.params['encoder_cell_units'],
direction=direction,
dropout=0.0,
dtype=self.params['dtype'],
name="cudnn_lstm_bidi",
)
else:
raise ValueError(
"{} is not a valid rnn_type for cudnn_rnn layers".format(
self.params['encoder_cell_units']
)
)
bidi_output, bidi_state = bidirectional_block(embedded_inputs)
with tf.variable_scope("Uni_Directional_Layer"):
direction = cudnn_rnn_ops.CUDNN_RNN_UNIDIRECTION
layer_input = bidi_output
for ind in range(self.params['encoder_layers'] - 1):
with tf.variable_scope("uni_layer_{}".format(ind)):
if self.params['encoder_cell_type'] == "gru":
# pylint: disable=no-member
unidirectional_block = tf.contrib.cudnn_rnn.CudnnGRU(
num_layers=1,
num_units=self.params['encoder_cell_units'],
direction=direction,
dropout=1.0 - dp_output_keep_prob,
dtype=self.params['dtype'],
name="cudnn_gru_uni_{}".format(ind),
)
elif self.params['encoder_cell_type'] == "lstm":
# pylint: disable=no-member
unidirectional_block = tf.contrib.cudnn_rnn.CudnnLSTM(
num_layers=1,
num_units=self.params['encoder_cell_units'],
direction=direction,
dropout=1.0 - dp_output_keep_prob,
dtype=self.params['dtype'],
name="cudnn_lstm_uni_{}".format(ind),
)
layer_output, encoder_state = unidirectional_block(layer_input)
if ind > 0: # add residual connection
layer_output = layer_input + layer_output
layer_input = layer_output
return {'outputs': tf.transpose(layer_input, perm=[1, 0, 2]),
'state': None,
'src_lengths': source_length,
'encoder_input': source_sequence}
@property
def src_vocab_size(self):
return self._src_vocab_size
@property
def src_emb_size(self):
return self._src_emb_size
@property
def enc_emb_w(self):
return self._enc_emb_w
| OpenSeq2Seq-master | open_seq2seq/encoders/rnn_encoders.py |
# Copyright (c) 2018 NVIDIA Corporation
"""
RNN-based encoders
"""
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import copy, inspect
import tensorflow as tf
from tensorflow.contrib.cudnn_rnn.python.ops import cudnn_rnn_ops
from open_seq2seq.optimizers.mp_wrapper import mp_regularizer_wrapper
from open_seq2seq.parts.rnns.utils import single_cell
from .encoder import Encoder
# from open_seq2seq.parts.rnns.weight_drop import WeightDropLayerNormBasicLSTMCell
class LMEncoder(Encoder):
"""
RNN-based encoder with embeddings for language modeling
"""
@staticmethod
def get_required_params():
return dict(Encoder.get_required_params(), **{
'vocab_size': int,
'emb_size': int,
'encoder_layers': int,
'encoder_use_skip_connections': bool,
'core_cell': None,
'core_cell_params': dict,
'end_token': int,
"batch_size": int,
"use_cudnn_rnn": bool,
"cudnn_rnn_type": None
})
@staticmethod
def get_optional_params():
return dict(Encoder.get_optional_params(), **{
'encoder_dp_input_keep_prob': float,
'encoder_dp_output_keep_prob': float,
"encoder_last_input_keep_prob": float,
"encoder_last_output_keep_prob": float,
'encoder_emb_keep_prob': float,
'variational_recurrent': bool,
'time_major': bool,
'use_swap_memory': bool,
'proj_size': int,
'num_groups': int,
'num_tokens_gen': int,
'fc_use_bias': bool,
'seed_tokens': list,
'sampling_prob': float,
'schedule_learning': bool,
'weight_tied': bool,
'awd_initializer': bool,
"recurrent_keep_prob": float,
"input_weight_keep_prob": float,
"recurrent_weight_keep_prob": float,
"weight_variational": bool,
"dropout_seed": int,
"num_sampled": int,
"fc_dim": int,
"use_cell_state": bool,
})
def __init__(self, params, model,
name="rnn_encoder_awd", mode='train'):
"""
Initializes bi-directional encoder with embeddings
:param params: dictionary with encoder parameters
Many of the techniques in this implementation is taken from the paper
"Regularizing and Optimizing LSTM Language Models" (Merity et al., 2017)
https://arxiv.org/pdf/1708.02182.pdf
Must define:
* vocab_size - data vocabulary size
* emb_size - size of embedding to use
* encoder_cell_units - number of units in RNN cell
* encoder_cell_type - cell type: lstm, gru, etc.
* encoder_layers - number of layers
* encoder_use_skip_connections - true/false
* time_major (optional)
* use_swap_memory (optional)
* mode - train or infer
* input_weight_keep_prob: keep probability for dropout of W
(kernel used to multiply with the input tensor)
* recurrent_weight_keep_prob: keep probability for dropout of U
(kernel used to multiply with last hidden state tensor)
* recurrent_keep_prob: keep probability for dropout
when applying tanh for the input transform step
* weight_variational: whether to keep the same weight dropout mask
at every timestep. This feature is not yet implemented.
* emb_keep_prob: keep probability for dropout of the embedding matrix
* encoder_dp_input_keep_prob: keep probability for dropout on input of a LSTM cell
in the layer which is not the last layer
* encoder_dp_output_keep_prob: keep probability for dropout on output of a LSTM cell
in the layer which is not the last layer
* encoder_last_input_keep_prob: like ``encoder_dp_input_keep_prob`` but for the
cell in the last layer
* encoder_dp_output_keep_prob: like ``encoder_dp_output_keep_prob`` but for the
cell in the last layer
* weight_tied: whether to tie the embedding matrix to the last linear layer.
can only do so if the dimension of the last output layer is
the same as the vocabulary size
* use_cell_state: if set to True, concat the last hidden state and
the last cell state to input into the last output layer.
This only works for the text classification task, not the
language modeling phase.
For different ways to do dropout for LSTM cells, please read this article:
https://medium.com/@bingobee01/a-review-of-dropout-as-applied-to-rnns-72e79ecd5b7b
:param encoder_params:
"""
super(LMEncoder, self).__init__(
params, model, name=name, mode=mode,
)
self._vocab_size = self.params['vocab_size']
self._emb_size = self.params['emb_size']
self._sampling_prob = self.params.get('sampling_prob', 0.0)
self._schedule_learning = self.params.get('schedule_learning', False)
self._weight_tied = self.params.get('weight_tied', False)
self.params['encoder_last_input_keep_prob'] = self.params.get('encoder_last_input_keep_prob', 1.0)
self.params['encoder_last_output_keep_prob'] = self.params.get('encoder_last_output_keep_prob', 1.0)
self.params['encoder_emb_keep_prob'] = self.params.get('encoder_emb_keep_prob', 1.0)
self.params['variational_recurrent'] = self.params.get('variational_recurrent', False)
self.params['awd_initializer'] = self.params.get('awd_initializer', False)
self.params['recurrent_keep_prob'] = self.params.get('recurrent_keep_prob', 1.0)
self.params['input_weight_keep_prob'] = self.params.get('input_weight_keep_prob', 1.0)
self.params['recurrent_weight_keep_prob'] = self.params.get('recurrent_weight_keep_prob', 1.0)
self.params['weight_variational'] = self.params.get('weight_variational', False)
self.params['dropout_seed'] = self.params.get('dropout_seed', 1822)
self._fc_dim = self.params.get('fc_dim', self._vocab_size)
self._num_sampled = self.params.get('num_sampled', self._fc_dim) # if num_sampled not defined, take full softmax
self._lm_phase = self._fc_dim == self._vocab_size
self._num_tokens_gen = self.params.get('num_tokens_gen', 200)
self._batch_size = self.params['batch_size']
if mode == 'infer' and self._lm_phase:
self._batch_size = len(self.params['seed_tokens'])
self._use_cell_state = self.params.get('use_cell_state', False)
def encode(self, input_dict):
"""Wrapper around :meth:`self._encode() <_encode>` method.
Here name, initializer and dtype are set in the variable scope and then
:meth:`self._encode() <_encode>` method is called.
Args:
input_dict (dict): see :meth:`self._encode() <_encode>` docs.
Returns:
see :meth:`self._encode() <_encode>` docs.
"""
if not self._compiled:
if 'regularizer' not in self._params:
if self._model and 'regularizer' in self._model.params:
self._params['regularizer'] = copy.deepcopy(
self._model.params['regularizer']
)
self._params['regularizer_params'] = copy.deepcopy(
self._model.params['regularizer_params']
)
if 'regularizer' in self._params:
init_dict = self._params.get('regularizer_params', {})
self._params['regularizer'] = self._params['regularizer'](**init_dict)
if self._params['dtype'] == 'mixed':
self._params['regularizer'] = mp_regularizer_wrapper(
self._params['regularizer'],
)
if self._params['dtype'] == 'mixed':
self._params['dtype'] = tf.float16
self._compiled = True
with tf.variable_scope(self._name, dtype=self.params['dtype']):
return self._encode(self._cast_types(input_dict))
def _encode(self, input_dict):
"""
Encodes data into representation
:param input_dict: a Python dictionary.
Must define:
* src_inputs - a Tensor of shape [batch_size, time] or [time, batch_size]
(depending on time_major param)
* src_lengths - a Tensor of shape [batch_size]
:return: a Python dictionary with:
* encoder_outputs - a Tensor of shape
[batch_size, time, representation_dim]
or [time, batch_size, representation_dim]
* encoder_state - a Tensor of shape [batch_size, dim]
* src_lengths - (copy ref from input) a Tensor of shape [batch_size]
"""
time_major = self.params.get("time_major", False)
use_swap_memory = self.params.get("use_swap_memory", False)
regularizer = self.params.get('regularizer', None)
fc_use_bias = self.params.get('fc_use_bias', True)
use_cudnn_rnn = self.params.get("use_cudnn_rnn", False)
cudnn_rnn_type = self.params.get("cudnn_rnn_type", None)
if 'initializer' in self.params:
init_dict = self.params.get('initializer_params', {})
initializer = self.params['initializer'](**init_dict)
else:
initializer = None
if self._mode == "train":
dp_input_keep_prob = self.params['encoder_dp_input_keep_prob']
dp_output_keep_prob = self.params['encoder_dp_output_keep_prob']
last_input_keep_prob = self.params['encoder_last_input_keep_prob']
last_output_keep_prob = self.params['encoder_last_output_keep_prob']
emb_keep_prob = self.params['encoder_emb_keep_prob']
recurrent_keep_prob = self.params['recurrent_keep_prob']
input_weight_keep_prob = self.params['input_weight_keep_prob']
recurrent_weight_keep_prob = self.params['recurrent_weight_keep_prob']
else:
dp_input_keep_prob, dp_output_keep_prob = 1.0, 1.0
last_input_keep_prob, last_output_keep_prob = 1.0, 1.0
emb_keep_prob, recurrent_keep_prob = 1.0, 1.0
input_weight_keep_prob, recurrent_weight_keep_prob = 1.0, 1.0
self._output_layer = tf.layers.Dense(
self._fc_dim,
kernel_regularizer=regularizer,
kernel_initializer=initializer,
use_bias=fc_use_bias,
dtype=self._params['dtype']
)
if self._weight_tied:
last_cell_params = copy.deepcopy(self.params['core_cell_params'])
last_cell_params['num_units'] = self._emb_size
else:
last_cell_params = self.params['core_cell_params']
last_output_dim = last_cell_params['num_units']
if self._use_cell_state:
last_output_dim = 2 * last_output_dim
fake_input = tf.zeros(shape=(1, last_output_dim),
dtype=self._params['dtype'])
fake_output = self._output_layer.apply(fake_input)
with tf.variable_scope("dense", reuse=True):
dense_weights = tf.get_variable("kernel")
dense_biases = tf.get_variable("bias")
if self._weight_tied and self._lm_phase:
enc_emb_w = tf.transpose(dense_weights)
else:
enc_emb_w = tf.get_variable(
name="EncoderEmbeddingMatrix",
shape=[self._vocab_size, self._emb_size],
dtype=self._params['dtype']
)
self._enc_emb_w = tf.nn.dropout(enc_emb_w, keep_prob=emb_keep_prob)
if use_cudnn_rnn:
if self._mode == 'train' or self._mode == 'eval':
all_cudnn_classes = [
i[1]
for i in inspect.getmembers(tf.contrib.cudnn_rnn, inspect.isclass)
]
if not cudnn_rnn_type in all_cudnn_classes:
raise TypeError("rnn_type must be a Cudnn RNN class")
rnn_block = cudnn_rnn_type(
num_layers=self.params['encoder_layers'],
num_units=self._emb_size,
dtype=self._params['dtype'],
name="cudnn_rnn"
)
else:
# Transferring weights from model trained with CudnnLSTM/CudnnGRU
# to CudnnCompatibleLSTMCell/CudnnCompatibleGRUCell for inference
if 'CudnnLSTM' in str(cudnn_rnn_type):
cell = lambda: tf.contrib.cudnn_rnn.CudnnCompatibleLSTMCell(num_units=self._emb_size)
elif 'CudnnGRU' in str(cudnn_rnn_type):
cell = lambda: tf.contrib.cudnn_rnn.CudnnCompatibleGRUCell(num_units=self._emb_size)
fwd_cells = [cell() for _ in range(self.params['encoder_layers'])]
self._encoder_cell_fw = tf.nn.rnn_cell.MultiRNNCell(fwd_cells)
else:
fwd_cells = [
single_cell(cell_class=self.params['core_cell'],
cell_params=self.params['core_cell_params'],
dp_input_keep_prob=dp_input_keep_prob,
dp_output_keep_prob=dp_output_keep_prob,
recurrent_keep_prob=recurrent_keep_prob,
input_weight_keep_prob=input_weight_keep_prob,
recurrent_weight_keep_prob=recurrent_weight_keep_prob,
weight_variational=self.params['weight_variational'],
dropout_seed=self.params['dropout_seed'],
residual_connections=self.params['encoder_use_skip_connections'],
awd_initializer=self.params['awd_initializer'],
dtype=self._params['dtype']
) for _ in range(self.params['encoder_layers'] - 1)]
fwd_cells.append(
single_cell(cell_class=self.params['core_cell'],
cell_params=last_cell_params,
dp_input_keep_prob=last_input_keep_prob,
dp_output_keep_prob=last_output_keep_prob,
recurrent_keep_prob=recurrent_keep_prob,
input_weight_keep_prob=input_weight_keep_prob,
recurrent_weight_keep_prob=recurrent_weight_keep_prob,
weight_variational=self.params['weight_variational'],
dropout_seed=self.params['dropout_seed'],
residual_connections=self.params['encoder_use_skip_connections'],
awd_initializer=self.params['awd_initializer'],
dtype=self._params['dtype']
)
)
self._encoder_cell_fw = tf.contrib.rnn.MultiRNNCell(fwd_cells)
time_major = self.params.get("time_major", False)
use_swap_memory = self.params.get("use_swap_memory", False)
source_sequence = input_dict['source_tensors'][0]
source_length = input_dict['source_tensors'][1]
# Inference for language modeling requires a different graph
if (not self._lm_phase) or self._mode == 'train' or self._mode == 'eval':
embedded_inputs = tf.cast(tf.nn.embedding_lookup(
self.enc_emb_w,
source_sequence,
), self.params['dtype'])
if use_cudnn_rnn:
# The CudnnLSTM will return encoder_state as a tuple of hidden
# and cell values that. The hidden and cell tensors are stored for
# each LSTM Layer.
# reshape to [B, T, C] --> [T, B, C]
if time_major == False:
embedded_inputs = tf.transpose(embedded_inputs, [1, 0, 2])
rnn_block.build(embedded_inputs.get_shape())
encoder_outputs, encoder_state = rnn_block(embedded_inputs)
encoder_outputs = tf.transpose(encoder_outputs, [1, 0, 2])
else:
encoder_outputs, encoder_state = tf.nn.dynamic_rnn(
cell=self._encoder_cell_fw,
inputs=embedded_inputs,
sequence_length=source_length,
time_major=time_major,
swap_memory=use_swap_memory,
dtype=self._params['dtype'],
scope='decoder',
)
if not self._lm_phase:
# CudnnLSTM stores cell and hidden state differently
if use_cudnn_rnn:
if self._use_cell_state:
encoder_outputs = tf.concat([encoder_state[0][-1], encoder_state[1][-1]], axis=1)
else:
encoder_outputs = encoder_state[0][-1]
else:
if self._use_cell_state:
encoder_outputs = tf.concat([encoder_state[-1].h, encoder_state[-1].c], axis=1)
else:
encoder_outputs = encoder_state[-1].h
if self._mode == 'train' and self._num_sampled < self._fc_dim: # sampled softmax
output_dict = {'weights': enc_emb_w,
'bias': dense_biases,
'inputs': encoder_outputs,
'logits': encoder_outputs,
'outputs': [encoder_outputs],
'num_sampled': self._num_sampled}
else: # full softmax
logits = self._output_layer.apply(encoder_outputs)
output_dict = {'logits': logits, 'outputs': [logits]}
else: # infer in LM phase
# This portion of graph is required to restore weights from CudnnLSTM to
# CudnnCompatibleLSTMCell/CudnnCompatibleGRUCell
if use_cudnn_rnn:
embedded_inputs = tf.cast(tf.nn.embedding_lookup(
self.enc_emb_w,
source_sequence,
), self.params['dtype'])
# Scope must remain unset to restore weights
encoder_outputs, encoder_state = tf.nn.dynamic_rnn(
cell=self._encoder_cell_fw,
inputs=embedded_inputs,
sequence_length=source_length,
time_major=time_major,
swap_memory=use_swap_memory,
dtype=self._params['dtype']
)
embedding_fn = lambda ids: tf.cast(tf.nn.embedding_lookup(
self.enc_emb_w,
ids,
), self.params['dtype'])
helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(
embedding=embedding_fn,#self._dec_emb_w,
start_tokens = tf.constant(self.params['seed_tokens']),
end_token=self.params['end_token'])
decoder = tf.contrib.seq2seq.BasicDecoder(
cell=self._encoder_cell_fw,
helper=helper,
initial_state=self._encoder_cell_fw.zero_state(
batch_size=self._batch_size, dtype=self._params['dtype'],
),
output_layer=self._output_layer,
)
maximum_iterations = tf.constant(self._num_tokens_gen)
final_outputs, final_state, final_sequence_lengths = tf.contrib.seq2seq.dynamic_decode(
decoder=decoder,
impute_finished=False,
maximum_iterations=maximum_iterations,
swap_memory=use_swap_memory,
output_time_major=time_major,
)
output_dict = {'logits': final_outputs.rnn_output,
'outputs': [tf.argmax(final_outputs.rnn_output, axis=-1)],
'final_state': final_state,
'final_sequence_lengths': final_sequence_lengths}
return output_dict
@property
def vocab_size(self):
return self._vocab_size
@property
def emb_size(self):
return self._emb_size
@property
def enc_emb_w(self):
return self._enc_emb_w | OpenSeq2Seq-master | open_seq2seq/encoders/lm_encoders.py |
import tensorflow as tf
from open_seq2seq.encoders import Encoder
from open_seq2seq.parts.centaur import ConvBlock
from open_seq2seq.parts.transformer import embedding_layer
from open_seq2seq.parts.transformer import utils
class CentaurEncoder(Encoder):
"""
Centaur encoder that consists of convolutional layers.
"""
@staticmethod
def get_required_params():
return dict(Encoder.get_required_params(), **{
"src_vocab_size": int,
"embedding_size": int,
"output_size": int,
"conv_layers": list
})
@staticmethod
def get_optional_params():
return dict(Encoder.get_optional_params(), **{
"pad_embeddings_2_eight": bool,
"regularizer": None,
"bn_momentum": float,
"bn_epsilon": float,
"cnn_dropout_prob": float,
"norm_type": str
})
def __init__(self, params, model, name="centaur_encoder", mode="train"):
"""
Centaur encoder constructor.
See parent class for arguments description.
Config parameters:
* **src_vocab_size** (int) --- number of symbols in alphabet.
* **embedding_size** (int) --- dimensionality of character embedding.
* **output_size** (int) --- dimensionality of output embedding.
* **conv_layers** (list) --- list with the description of convolutional
layers. For example::
"conv_layers": [
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME"
},
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME"
},
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME"
}
]
* **bn_momentum** (float) --- momentum for batch norm. Defaults to 0.95.
* **bn_epsilon** (float) --- epsilon for batch norm. Defaults to 1e-8.
* **cnn_dropout_prob** (float) --- dropout probabilty for cnn layers.
Defaults to 0.5.
"""
super(CentaurEncoder, self).__init__(params, model, name=name, mode=mode)
self.training = mode == "train"
self.layers = []
def _build_layers(self):
regularizer = self._params.get("regularizer", None)
embedding = embedding_layer.EmbeddingSharedWeights(
vocab_size=self._params["src_vocab_size"],
hidden_size=self._params["embedding_size"],
pad_vocab_to_eight=self.params.get("pad_embeddings_2_eight", False),
regularizer=regularizer
)
self.layers.append(embedding)
cnn_dropout_prob = self._params.get("cnn_dropout_prob", 0.5)
bn_momentum = self._params.get("bn_momentum", 0.95)
bn_epsilon = self._params.get("bn_epsilon", -1e8)
for index, params in enumerate(self._params["conv_layers"]):
layer = ConvBlock.create(
index=index,
conv_params=params,
regularizer=regularizer,
bn_momentum=bn_momentum,
bn_epsilon=bn_epsilon,
cnn_dropout_prob=cnn_dropout_prob,
training=self.training
)
self.layers.append(layer)
linear_projection = tf.layers.Dense(
name="linear_projection",
units=self._params["output_size"],
use_bias=False,
kernel_regularizer=regularizer
)
self.layers.append(linear_projection)
def _encode(self, input_dict):
if not self.layers:
self._build_layers()
x = input_dict["source_tensors"][0]
text_len = input_dict["source_tensors"][1]
# Apply all layers
y = x
for layer in self.layers:
y = layer(y)
inputs_attention_bias = utils.get_padding_bias(x)
return {
"outputs": y,
"inputs_attention_bias": inputs_attention_bias,
"src_lengths": text_len
}
| OpenSeq2Seq-master | open_seq2seq/encoders/centaur_encoder.py |
# Copyright (c) 2018 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import tensorflow as tf
from tensorflow.contrib.cudnn_rnn.python.ops import cudnn_rnn_ops
from six.moves import range
from open_seq2seq.parts.cnns.conv_blocks import conv_bn_actv
from .encoder import Encoder
def rnn_cell(rnn_cell_dim, layer_type, dropout_keep_prob=1.0):
"""Helper function that creates RNN cell."""
if layer_type == "layernorm_lstm":
# pylint: disable=no-member
cell = tf.contrib.rnn.LayerNormBasicLSTMCell(
num_units=rnn_cell_dim, dropout_keep_prob=dropout_keep_prob)
else:
if layer_type == "lstm":
cell = tf.nn.rnn_cell.BasicLSTMCell(rnn_cell_dim)
elif layer_type == "gru":
cell = tf.nn.rnn_cell.GRUCell(rnn_cell_dim)
elif layer_type == "cudnn_gru":
# pylint: disable=no-member
cell = tf.contrib.cudnn_rnn.CudnnCompatibleGRUCell(rnn_cell_dim)
elif layer_type == "cudnn_lstm":
# pylint: disable=no-member
cell = tf.contrib.cudnn_rnn.CudnnCompatibleLSTMCell(rnn_cell_dim)
else:
raise ValueError("Error: not supported rnn type:{}".format(layer_type))
cell = tf.nn.rnn_cell.DropoutWrapper(
cell, output_keep_prob=dropout_keep_prob)
return cell
def row_conv(name, input_layer, batch, channels, width, activation_fn,
regularizer, training, data_format, bn_momentum, bn_epsilon):
"""Helper function that applies "row" or "in plane" convolution."""
if width < 2:
return input_layer
if data_format == 'channels_last':
x = tf.reshape(input_layer, [batch, -1, 1, channels])
else:
input_layer = tf.transpose(input_layer, [0, 2, 1]) # B C T
x = tf.reshape(input_layer, [batch, channels, -1, 1])
cast_back = False
if x.dtype.base_dtype == tf.float16:
x = tf.cast(x, tf.float32)
cast_back = True
filters = tf.get_variable(
name + '/w',
shape=[width, 1, channels, 1],
regularizer=regularizer,
dtype=tf.float32,
)
strides = [1, 1, 1, 1]
y = tf.nn.depthwise_conv2d(
name=name + '/conv',
input=x,
filter=filters,
strides=strides,
padding='SAME',
data_format='NHWC' if data_format == 'channels_last' else 'NCHW',
)
bn = tf.layers.batch_normalization(
name="{}/bn".format(name),
inputs=y,
gamma_regularizer=regularizer,
training=training,
axis=-1 if data_format == 'channels_last' else 1,
momentum=bn_momentum,
epsilon=bn_epsilon,
)
output = activation_fn(bn)
if data_format == 'channels_first':
output = tf.transpose(output, [0, 2, 3, 1])
output = tf.reshape(output, [batch, -1, channels])
if cast_back:
output = tf.cast(output, tf.float16)
return output
class DeepSpeech2Encoder(Encoder):
"""DeepSpeech-2 like encoder."""
@staticmethod
def get_required_params():
return dict(Encoder.get_required_params(), **{
'dropout_keep_prob': float,
'conv_layers': list,
'activation_fn': None, # any valid callable
'num_rnn_layers': int,
'row_conv': bool,
'n_hidden': int,
'use_cudnn_rnn': bool,
'rnn_cell_dim': int,
'rnn_type': ['layernorm_lstm', 'lstm', 'gru',
'cudnn_gru', 'cudnn_lstm'],
'rnn_unidirectional': bool,
})
@staticmethod
def get_optional_params():
return dict(Encoder.get_optional_params(), **{
'row_conv_width': int,
'data_format': ['channels_first', 'channels_last', 'BCTF', 'BTFC', 'BCFT', 'BFTC'],
'bn_momentum': float,
'bn_epsilon': float,
})
def __init__(self, params, model, name="ds2_encoder", mode='train'):
"""DeepSpeech-2 like encoder constructor.
See parent class for arguments description.
Config parameters:
* **dropout_keep_prop** (float) --- keep probability for dropout.
* **conv_layers** (list) --- list with the description of convolutional
layers. For example::
"conv_layers": [
{
"kernel_size": [11, 41], "stride": [2, 2],
"num_channels": 32, "padding": "SAME",
},
{
"kernel_size": [11, 21], "stride": [1, 2],
"num_channels": 64, "padding": "SAME",
},
{
"kernel_size": [11, 21], "stride": [1, 2],
"num_channels": 96, "padding": "SAME",
},
]
* **activation_fn** --- activation function to use.
* **num_rnn_layers** --- number of RNN layers to use.
* **rnn_type** (string) --- could be "lstm", "gru", "cudnn_gru",
"cudnn_lstm" or "layernorm_lstm".
* **rnn_unidirectional** (bool) --- whether to use uni-directional or
bi-directional RNNs.
* **rnn_cell_dim** (int) --- dimension of RNN cells.
* **row_conv** (bool) --- whether to use a "row" ("in plane") convolutional
layer after RNNs.
* **row_conv_width** (int) --- width parameter for "row"
convolutional layer.
* **n_hidden** (int) --- number of hidden units for the last fully connected
layer.
* **data_format** (string) --- could be either
"channels_first", "channels_last", "BCTF", "BTFC", "BCFT", "BFTC".
Defaults to "channels_last".
* **bn_momentum** (float) --- momentum for batch norm. Defaults to 0.99.
* **bn_epsilon** (float) --- epsilon for batch norm. Defaults to 1e-3.
"""
super(DeepSpeech2Encoder, self).__init__(params, model, name, mode)
def _encode(self, input_dict):
"""Creates TensorFlow graph for DeepSpeech-2 like encoder.
Args:
input_dict (dict): input dictionary that has to contain
the following fields::
input_dict = {
"source_tensors": [
src_sequence (shape=[batch_size, sequence length, num features]),
src_length (shape=[batch_size])
]
}
Returns:
dict: dictionary with the following tensors::
{
'outputs': hidden state, shape=[batch_size, sequence length, n_hidden]
'src_length': tensor, shape=[batch_size]
}
"""
source_sequence, src_length = input_dict['source_tensors']
training = (self._mode == "train")
dropout_keep_prob = self.params['dropout_keep_prob'] if training else 1.0
regularizer = self.params.get('regularizer', None)
data_format = self.params.get('data_format', 'channels_last')
bn_momentum = self.params.get('bn_momentum', 0.99)
bn_epsilon = self.params.get('bn_epsilon', 1e-3)
input_layer = tf.expand_dims(source_sequence, axis=-1) # BTFC
# print("<<< input :", input_layer.get_shape().as_list())
batch_size = input_layer.get_shape().as_list()[0]
freq = input_layer.get_shape().as_list()[2]
# supported data_formats:
# BTFC = channel_last (legacy)
# BCTF = channel_first(legacy)
# BFTC
# BCFT
if data_format=='channels_last' or data_format=='BTFC':
layout = 'BTFC'
dformat = 'channels_last'
elif data_format=='channels_first' or data_format=='BCTF':
layout = 'BCTF'
dformat = 'channels_first'
elif data_format=='BFTC':
layout = 'BFTC'
dformat = 'channels_last'
elif data_format=='BCFT':
layout = 'BCFT'
dformat = 'channels_first'
else:
print("WARNING: unsupported data format: will use channels_last (BTFC) instead")
layout = 'BTFC'
dformat = 'channels_last'
#input_layer is BTFC
if layout == 'BCTF':
top_layer = tf.transpose(input_layer, [0, 3, 1, 2])
elif layout == 'BFTC':
top_layer = tf.transpose(input_layer, [0, 2, 1, 3])
elif layout == 'BCFT':
top_layer = tf.transpose(input_layer, [0, 3, 2, 1])
else:
top_layer = input_layer
# print("<<< pre-conv:", top_layer.get_shape().as_list())
# ----- Convolutional layers ---------------------------------------------
conv_layers = self.params['conv_layers']
for idx_conv in range(len(conv_layers)):
ch_out = conv_layers[idx_conv]['num_channels']
kernel_size = conv_layers[idx_conv]['kernel_size'] # [T,F] format
strides = conv_layers[idx_conv]['stride'] # [T,F] format
padding = conv_layers[idx_conv]['padding']
if padding == "VALID":
src_length = (src_length - kernel_size[0] + strides[0]) // strides[0]
freq = (freq - kernel_size[1] + strides[1]) // strides[1]
else:
src_length = (src_length + strides[0] - 1) // strides[0]
freq = (freq + strides[1] -1) // strides[1]
if layout == 'BFTC' or layout == 'BCFT':
kernel_size = kernel_size[::-1]
strides = strides[::-1]
# print(kernel_size, strides)
top_layer = conv_bn_actv(
layer_type="conv2d",
name="conv{}".format(idx_conv + 1),
inputs=top_layer,
filters=ch_out,
kernel_size=kernel_size,
activation_fn=self.params['activation_fn'],
strides=strides,
padding=padding,
regularizer=regularizer,
training=training,
data_format=dformat,
bn_momentum=bn_momentum,
bn_epsilon=bn_epsilon,
)
# print(idx_conv, "++++", top_layer.get_shape().as_list())
# convert layout --> BTFC
# if data_format == 'channels_first':
# top_layer = tf.transpose(top_layer, [0, 2, 3, 1])
if layout == 'BCTF': # BCTF --> BTFC
top_layer = tf.transpose(top_layer, [0, 2, 3, 1])
elif layout == 'BFTC': # BFTC --> BTFC
top_layer = tf.transpose(top_layer, [0, 2, 1, 3])
elif layout == 'BCFT': # BCFT --> BTFC
top_layer = tf.transpose(top_layer, [0, 3, 2, 1])
# print(">>> post-conv:", top_layer.get_shape().as_list())
# reshape to [B, T, FxC]
f = top_layer.get_shape().as_list()[2]
c = top_layer.get_shape().as_list()[3]
fc = f * c
top_layer = tf.reshape(top_layer, [batch_size, -1, fc])
# ----- RNN ---------------------------------------------------------------
num_rnn_layers = self.params['num_rnn_layers']
if num_rnn_layers > 0:
rnn_cell_dim = self.params['rnn_cell_dim']
rnn_type = self.params['rnn_type']
if self.params['use_cudnn_rnn']:
# reshape to [B, T, C] --> [T, B, C]
rnn_input = tf.transpose(top_layer, [1, 0, 2])
if self.params['rnn_unidirectional']:
direction = cudnn_rnn_ops.CUDNN_RNN_UNIDIRECTION
else:
direction = cudnn_rnn_ops.CUDNN_RNN_BIDIRECTION
if rnn_type == "cudnn_gru" or rnn_type == "gru":
# pylint: disable=no-member
rnn_block = tf.contrib.cudnn_rnn.CudnnGRU(
num_layers=num_rnn_layers,
num_units=rnn_cell_dim,
direction=direction,
dropout=1.0 - dropout_keep_prob,
dtype=rnn_input.dtype,
name="cudnn_gru",
)
elif rnn_type == "cudnn_lstm" or rnn_type == "lstm":
# pylint: disable=no-member
rnn_block = tf.contrib.cudnn_rnn.CudnnLSTM(
num_layers=num_rnn_layers,
num_units=rnn_cell_dim,
direction=direction,
dropout=1.0 - dropout_keep_prob,
dtype=rnn_input.dtype,
name="cudnn_lstm",
)
else:
raise ValueError(
"{} is not a valid rnn_type for cudnn_rnn layers".format(
rnn_type)
)
top_layer, state = rnn_block(rnn_input)
top_layer = tf.transpose(top_layer, [1, 0, 2])
else:
rnn_input = top_layer
multirnn_cell_fw = tf.nn.rnn_cell.MultiRNNCell(
[rnn_cell(rnn_cell_dim=rnn_cell_dim, layer_type=rnn_type,
dropout_keep_prob=dropout_keep_prob)
for _ in range(num_rnn_layers)]
)
if self.params['rnn_unidirectional']:
top_layer, state = tf.nn.dynamic_rnn(
cell=multirnn_cell_fw,
inputs=rnn_input,
sequence_length=src_length,
dtype=rnn_input.dtype,
time_major=False,
)
else:
multirnn_cell_bw = tf.nn.rnn_cell.MultiRNNCell(
[rnn_cell(rnn_cell_dim=rnn_cell_dim, layer_type=rnn_type,
dropout_keep_prob=dropout_keep_prob)
for _ in range(num_rnn_layers)]
)
top_layer, state = tf.nn.bidirectional_dynamic_rnn(
cell_fw=multirnn_cell_fw, cell_bw=multirnn_cell_bw,
inputs=rnn_input,
sequence_length=src_length,
dtype=rnn_input.dtype,
time_major=False
)
# concat 2 tensors [B, T, n_cell_dim] --> [B, T, 2*n_cell_dim]
top_layer = tf.concat(top_layer, 2)
# -- end of rnn------------------------------------------------------------
if self.params['row_conv']:
channels = top_layer.get_shape().as_list()[-1]
top_layer = row_conv(
name="row_conv",
input_layer=top_layer,
batch=batch_size,
channels=channels,
activation_fn=self.params['activation_fn'],
width=self.params['row_conv_width'],
regularizer=regularizer,
training=training,
data_format=data_format,
bn_momentum=bn_momentum,
bn_epsilon=bn_epsilon,
)
# Reshape [B, T, C] --> [B*T, C]
c = top_layer.get_shape().as_list()[-1]
top_layer = tf.reshape(top_layer, [-1, c])
# --- hidden layer with clipped ReLU activation and dropout---------------
top_layer = tf.layers.dense(
inputs=top_layer,
units=self.params['n_hidden'],
kernel_regularizer=regularizer,
activation=self.params['activation_fn'],
name='fully_connected',
)
outputs = tf.nn.dropout(x=top_layer, keep_prob=dropout_keep_prob)
# reshape from [B*T,A] --> [B, T, A].
# Output shape: [batch_size, n_steps, n_hidden]
outputs = tf.reshape(
outputs,
[batch_size, -1, self.params['n_hidden']],
)
return {
'outputs': outputs,
'src_length': src_length,
}
| OpenSeq2Seq-master | open_seq2seq/encoders/ds2_encoder.py |
# Copyright (c) 2018 NVIDIA Corporation
import tensorflow as tf
from math import ceil
from open_seq2seq.parts.cnns.conv_blocks import conv_actv, conv_bn_actv
from .encoder import Encoder
def _get_receptive_field(kernel_size, blocks, layers_per_block):
dilations = [2 ** i for i in range(layers_per_block)]
return (kernel_size - 1) * blocks * sum(dilations) + 1
def _mu_law_encode(signal, channels, dtype):
mu = tf.saturate_cast(channels - 1, dtype)
safe_audio_abs = tf.minimum(tf.abs(signal), 1.0)
magnitude = tf.log1p(mu * safe_audio_abs) / tf.log1p(mu)
signal = tf.sign(signal) * magnitude
return tf.cast((signal + 1) / 2 * mu + 0.5, tf.int32)
def _mu_law_decode(output, channels):
mu = channels - 1
signal = 2 * (tf.to_float(output) / mu) - 1
magnitude = (1 / mu) * ((1 + mu)**abs(signal) - 1)
return tf.sign(signal) * magnitude
def conv_1x1(
layer_type, name, inputs, filters, strides, regularizer, training,
data_format):
"""
Defines a single 1x1 convolution for convenience
"""
return conv_actv(
layer_type=layer_type,
name=name,
inputs=inputs,
filters=filters,
kernel_size=1,
activation_fn=None,
strides=strides,
padding="SAME",
regularizer=regularizer,
training=training,
data_format=data_format,
)
def causal_conv_bn_actv(
layer_type, name, inputs, filters, kernel_size, activation_fn, strides,
padding, regularizer, training, data_format, bn_momentum, bn_epsilon,
dilation=1):
"""
Defines a single dilated causal convolutional layer with batch norm
"""
block = conv_bn_actv(
layer_type=layer_type,
name=name,
inputs=inputs,
filters=filters,
kernel_size=kernel_size,
activation_fn=activation_fn,
strides=strides,
padding=padding,
regularizer=regularizer,
training=training,
data_format=data_format,
bn_momentum=bn_momentum,
bn_epsilon=bn_epsilon,
dilation=dilation
)
# pad the left side of the time-series with an amount of zeros based on the
# dilation rate
block = tf.pad(block, [[0, 0], [dilation * (kernel_size - 1), 0], [0, 0]])
return block
def wavenet_conv_block(
layer_type, name, inputs, condition_filter, condition_gate, filters,
kernel_size, strides, padding, regularizer, training, data_format,
bn_momentum, bn_epsilon, layers_per_block):
"""
Defines a single WaveNet block using the architecture specified in the
original paper, including skip and residual connections
"""
skips = None
for layer in range(layers_per_block):
# split source along channels
source_shape = inputs.get_shape().as_list()
source_filter = inputs[:, :, 0:int(source_shape[2] / 2)]
source_gate = inputs[:, :, int(source_shape[2] / 2):]
dilation = 2 ** layer
source_filter = causal_conv_bn_actv(
layer_type=layer_type,
name="filter_{}_{}".format(name, layer),
inputs=source_filter,
filters=filters,
kernel_size=kernel_size,
activation_fn=None,
strides=strides,
padding=padding,
regularizer=regularizer,
training=training,
data_format=data_format,
bn_momentum=bn_momentum,
bn_epsilon=bn_epsilon,
dilation=dilation
)
source_gate = causal_conv_bn_actv(
layer_type=layer_type,
name="gate_{}_{}".format(name, layer),
inputs=source_gate,
filters=filters,
kernel_size=kernel_size,
activation_fn=None,
strides=strides,
padding=padding,
regularizer=regularizer,
training=training,
data_format=data_format,
bn_momentum=bn_momentum,
bn_epsilon=bn_epsilon,
dilation=dilation
)
if condition_filter is not None and condition_gate is not None:
source_filter = tf.tanh(tf.add(source_filter, condition_filter))
source_gate = tf.sigmoid(tf.add(source_gate, condition_gate))
else:
source_filter = tf.tanh(source_filter)
source_gate = tf.sigmoid(source_gate)
conv_feats = tf.multiply(source_filter, source_gate)
residual = conv_1x1(
layer_type=layer_type,
name="residual_1x1_{}_{}".format(name, layer),
inputs=conv_feats,
filters=filters,
strides=strides,
regularizer=regularizer,
training=training,
data_format=data_format
)
inputs = tf.add(inputs, residual)
skip = conv_1x1(
layer_type=layer_type,
name="skip_1x1_{}_{}".format(name, layer),
inputs=conv_feats,
filters=filters,
strides=strides,
regularizer=regularizer,
training=training,
data_format=data_format
)
if skips is None:
skips = skip
else:
skips = tf.add(skips, skip)
return inputs, skips
class WavenetEncoder(Encoder):
"""
WaveNet like encoder.
Consists of several blocks of dilated causal convolutions.
"""
@staticmethod
def get_required_params():
return dict(
Encoder.get_required_params(),
**{
"layer_type": str,
"kernel_size": int,
"strides": int,
"padding": str,
"blocks": int,
"layers_per_block": int,
"filters": int,
"quantization_channels": int
}
)
@staticmethod
def get_optional_params():
return dict(
Encoder.get_optional_params(),
**{
"data_format": str,
"bn_momentum": float,
"bn_epsilon": float
}
)
def __init__(self, params, model, name="wavenet_encoder", mode="train"):
"""
WaveNet like encoder constructor.
Config parameters:
* **layer_type** (str) --- type of convolutional layer, currently only
supports "conv1d"
* **kernel_size** (int) --- size of kernel
* **strides** (int) --- size of stride
* **padding** (str) --- padding, can be "SAME" or "VALID"
* **blocks** (int) --- number of dilation cycles
* **layers_per_block** (int) --- number of dilated convolutional layers in
each block
* **filters** (int) --- number of output channels
* **quantization_channels** (int) --- depth of mu-law quantized input
* **data_format** (string) --- could be either "channels_first" or
"channels_last". Defaults to "channels_last".
* **bn_momentum** (float) --- momentum for batch norm. Defaults to 0.1.
* **bn_epsilon** (float) --- epsilon for batch norm. Defaults to 1e-5.
"""
super(WavenetEncoder, self).__init__(params, model, name, mode)
def _encode(self, input_dict):
"""
Creates TensorFlow graph for WaveNet like encoder.
...
"""
training = (self._mode == "train" or self._mode == "eval")
if training:
source, src_length, condition, spec_length = input_dict["source_tensors"]
spec_offset = 0
else:
source, src_length, condition, spec_length, spec_offset = \
input_dict["source_tensors"]
regularizer = self.params.get("regularizer", None)
data_format = self.params.get("data_format", "channels_last")
if data_format != "channels_last":
source = tf.transpose(source, [0, 2, 1])
condition = tf.transpose(condition, [0, 2, 1])
dtype = self.params["dtype"]
layer_type = self.params["layer_type"]
kernel_size = self.params["kernel_size"]
strides = self.params["strides"]
padding = self.params["padding"]
blocks = self.params["blocks"]
layers_per_block = self.params["layers_per_block"]
filters = self.params["filters"]
quantization_channels = self.params["quantization_channels"]
bn_momentum = self.params.get("bn_momentum", 0.1)
bn_epsilon = self.params.get("bn_epsilon", 1e-5)
local_conditioning = self.params.get("local_conditioning", True)
receptive_field = _get_receptive_field(
kernel_size, blocks, layers_per_block
)
# ----- Preprocessing -----------------------------------------------
encoded_inputs = _mu_law_encode(source, quantization_channels, dtype)
if training:
# remove last sample to maintain causality
inputs = tf.slice(
encoded_inputs, [0, 0], [-1, tf.shape(encoded_inputs)[1] - 1]
)
else:
inputs = encoded_inputs
inputs = tf.one_hot(inputs, depth=quantization_channels, axis=-1)
inputs = tf.saturate_cast(inputs, dtype)
if local_conditioning:
# split condition along channels
condition_shape = condition.get_shape().as_list()
condition_filter = condition[:, :, 0:int(condition_shape[2] / 2)]
condition_gate = condition[:, :, int(condition_shape[2] / 2):]
condition_filter = conv_1x1(
layer_type=layer_type,
name="filter_condition",
inputs=condition_filter,
filters=filters,
strides=strides,
regularizer=regularizer,
training=training,
data_format=data_format
)
condition_gate = conv_1x1(
layer_type=layer_type,
name="gate_condition",
inputs=condition_gate,
filters=filters,
strides=strides,
regularizer=regularizer,
training=training,
data_format=data_format
)
if training:
# remove last sample to maintain causality
condition_filter = condition_filter[:, :-1, :]
condition_gate = condition_gate[:, :-1, :]
else:
# pad with zeros to align the condition to the source for
# autoregressive inference
zeros = tf.saturate_cast(
tf.zeros([condition_shape[0], receptive_field, filters]),
dtype
)
condition_filter = tf.concat([zeros, condition_filter], axis=1)
condition_gate = tf.concat([zeros, condition_gate], axis=1)
condition_filter = condition_filter[
:, spec_offset:spec_offset + receptive_field, :
]
condition_gate = condition_gate[
:, spec_offset:spec_offset + receptive_field, :
]
else:
condition_filter = None
condition_gate = None
# ----- Convolutional layers -----------------------------------------------
# first causal convolutional layer
inputs = causal_conv_bn_actv(
layer_type=layer_type,
name="preprocess",
inputs=inputs,
filters=filters,
kernel_size=kernel_size,
activation_fn=None,
strides=strides,
padding=padding,
regularizer=regularizer,
training=training,
data_format=data_format,
bn_momentum=bn_momentum,
bn_epsilon=bn_epsilon,
dilation=1
)
# dilation stack
skips = None
for block in range(blocks):
inputs, skip = wavenet_conv_block(
layer_type=layer_type,
name=block,
inputs=inputs,
condition_filter=condition_filter,
condition_gate=condition_gate,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
regularizer=regularizer,
training=training,
data_format=data_format,
bn_momentum=bn_momentum,
bn_epsilon=bn_epsilon,
layers_per_block=layers_per_block
)
if skips is None:
skips = skip
else:
skips = tf.add(skips, skip)
outputs = tf.add(skips, inputs)
# postprocessing
outputs = tf.nn.relu(outputs)
outputs = conv_1x1(
layer_type=layer_type,
name="postprocess_1",
inputs=outputs,
filters=filters,
strides=strides,
regularizer=regularizer,
training=training,
data_format=data_format
)
outputs = tf.nn.relu(outputs)
outputs = conv_1x1(
layer_type=layer_type,
name="postprocess_2",
inputs=outputs,
filters=quantization_channels,
strides=strides,
regularizer=regularizer,
training=training,
data_format=data_format
)
if training:
# remove samples that would be predicted without the full receptive field
prediction = tf.slice(outputs, [0, receptive_field - 1, 0], [-1, -1, -1])
target_output = tf.slice(encoded_inputs, [0, receptive_field], [-1, -1])
else:
prediction = outputs
target_output = encoded_inputs
# decode the predicted signal as audio
audio = tf.argmax(tf.nn.softmax(outputs), axis=-1, output_type=tf.int32)
audio = tf.expand_dims(audio, -1)
audio = _mu_law_decode(audio, self.params["quantization_channels"])
audio = tf.cast(audio, tf.float32)
return { "logits": prediction, "outputs": [target_output, audio] }
| OpenSeq2Seq-master | open_seq2seq/encoders/wavenet_encoder.py |
# Copyright (c) 2018 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import abc
import copy
import six
import tensorflow as tf
from open_seq2seq.optimizers.mp_wrapper import mp_regularizer_wrapper
from open_seq2seq.utils.utils import check_params, cast_types
@six.add_metaclass(abc.ABCMeta)
class Decoder:
"""Abstract class from which all decoders must inherit.
"""
@staticmethod
def get_required_params():
"""Static method with description of required parameters.
Returns:
dict:
Dictionary containing all the parameters that **have to** be
included into the ``params`` parameter of the
class :meth:`__init__` method.
"""
return {}
@staticmethod
def get_optional_params():
"""Static method with description of optional parameters.
Returns:
dict:
Dictionary containing all the parameters that **can** be
included into the ``params`` parameter of the
class :meth:`__init__` method.
"""
return {
'regularizer': None, # any valid TensorFlow regularizer
'regularizer_params': dict,
'initializer': None, # any valid TensorFlow initializer
'initializer_params': dict,
'dtype': [tf.float32, tf.float16, 'mixed'],
}
def __init__(self, params, model, name="decoder", mode='train'):
"""Decoder constructor.
Note that decoder constructors should not modify TensorFlow graph, all
graph construction should happen in the :meth:`self._decode() <_decode>`
method.
Args:
params (dict): parameters describing the decoder.
All supported parameters are listed in :meth:`get_required_params`,
:meth:`get_optional_params` functions.
model (instance of a class derived from :class:`Model<models.model.Model>`):
parent model that created this decoder.
Could be None if no model access is required for the use case.
name (str): name for decoder variable scope.
mode (str): mode decoder is going to be run in.
Could be "train", "eval" or "infer".
Config parameters:
* **initializer** --- any valid TensorFlow initializer. If no initializer
is provided, model initializer will be used.
* **initializer_params** (dict) --- dictionary that will be passed to
initializer ``__init__`` method.
* **regularizer** --- and valid TensorFlow regularizer. If no regularizer
is provided, model regularizer will be used.
* **regularizer_params** (dict) --- dictionary that will be passed to
regularizer ``__init__`` method.
* **dtype** --- model dtype. Could be either ``tf.float16``, ``tf.float32``
or "mixed". For details see
:ref:`mixed precision training <mixed_precision>` section in docs. If no
dtype is provided, model dtype will be used.
"""
check_params(params, self.get_required_params(), self.get_optional_params())
self._params = copy.deepcopy(params)
self._model = model
if 'dtype' not in self._params:
if self._model:
self._params['dtype'] = self._model.params['dtype']
else:
self._params['dtype'] = tf.float32
self._name = name
self._mode = mode
self._compiled = False
def decode(self, input_dict):
"""Wrapper around :meth:`self._decode() <_decode>` method.
Here name, initializer and dtype are set in the variable scope and then
:meth:`self._decode() <_decode>` method is called.
Args:
input_dict (dict): see :meth:`self._decode() <_decode>` docs.
Returns:
see :meth:`self._decode() <_decode>` docs.
"""
if not self._compiled:
if 'regularizer' not in self._params:
if self._model and 'regularizer' in self._model.params:
self._params['regularizer'] = copy.deepcopy(
self._model.params['regularizer']
)
self._params['regularizer_params'] = copy.deepcopy(
self._model.params['regularizer_params']
)
if 'regularizer' in self._params:
init_dict = self._params.get('regularizer_params', {})
if self._params['regularizer'] is not None:
self._params['regularizer'] = self._params['regularizer'](**init_dict)
if self._params['dtype'] == 'mixed':
self._params['regularizer'] = mp_regularizer_wrapper(
self._params['regularizer'],
)
if self._params['dtype'] == 'mixed':
self._params['dtype'] = tf.float16
if 'initializer' in self.params:
init_dict = self.params.get('initializer_params', {})
initializer = self.params['initializer'](**init_dict)
else:
initializer = None
self._compiled = True
with tf.variable_scope(self._name, initializer=initializer,
dtype=self.params['dtype']):
return self._decode(self._cast_types(input_dict))
def _cast_types(self, input_dict):
"""This function performs automatic cast of all inputs to decoder dtype.
Args:
input_dict (dict): dictionary passed to :meth:`self._decode() <_decode>`
method.
Returns:
dict: same as input_dict, but with all Tensors cast to decoder dtype.
"""
return cast_types(input_dict, self.params['dtype'])
@abc.abstractmethod
def _decode(self, input_dict):
"""This is the main function which should construct decoder graph.
Typically, decoder will take hidden representation from encoder as an input
and produce some output sequence as an output.
Args:
input_dict (dict): dictionary containing decoder inputs.
If the decoder is used with :class:`models.encoder_decoder` class,
``input_dict`` will have the following content::
{
"encoder_output": dictionary returned from encoder.encode() method
"target_tensors": data_layer.input_tensors['target_tensors']
}
Returns:
dict: dictionary of decoder outputs. Typically this will be just::
{
"logits": logits that will be passed to Loss
"outputs": list with actual decoded outputs, e.g. characters
instead of logits
}
"""
pass
@property
def params(self):
"""Parameters used to construct the decoder (dictionary)"""
return self._params
@property
def mode(self):
"""Mode decoder is run in."""
return self._mode
@property
def name(self):
"""Decoder name."""
return self._name
| OpenSeq2Seq-master | open_seq2seq/decoders/decoder.py |
# Copyright (c) 2018 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import tensorflow as tf
from open_seq2seq.parts.rnns.attention_wrapper import BahdanauAttention, \
LuongAttention, \
LocationSensitiveAttention, \
AttentionWrapper
from open_seq2seq.parts.rnns.rnn_beam_search_decoder import BeamSearchDecoder
from open_seq2seq.parts.rnns.utils import single_cell
from open_seq2seq.parts.rnns.helper import TrainingHelper, GreedyEmbeddingHelper
from .decoder import Decoder
cells_dict = {
"lstm": tf.nn.rnn_cell.BasicLSTMCell,
"gru": tf.nn.rnn_cell.GRUCell,
}
class FullyConnected(tf.layers.Layer):
"""Fully connected layer
"""
def __init__(
self,
hidden_dims,
dropout_keep_prob=1.0,
mode='train',
name="fully_connected",
):
"""See parent class for arguments description.
Config parameters:
* **hidden_dims** (list) --- list of integers describing the hidden dimensions of a fully connected layer.
* **dropout_keep_prob** (float, optional) - dropout input keep probability.
"""
super(FullyConnected, self).__init__(name=name)
self.dense_layers = []
i = -1
for i in range(len(hidden_dims) - 1):
self.dense_layers.append(tf.layers.Dense(
name="{}_{}".format(name, i), units=hidden_dims[i], use_bias=True, activation=tf.nn.relu)
)
self.dense_layers.append(tf.layers.Dense(
name="{}_{}".format(name, i + 1), units=hidden_dims[i + 1], use_bias=True)
)
self.output_dim = hidden_dims[i + 1]
self.mode = mode
self.dropout_keep_prob = dropout_keep_prob
def call(self, inputs):
"""
Args:
inputs: Similar to tf.layers.Dense layer inputs. Internally calls a stack of dense layers.
"""
training = (self.mode == "train")
dropout_keep_prob = self.dropout_keep_prob if training else 1.0
for layer in self.dense_layers:
inputs = tf.nn.dropout(x=inputs, keep_prob=dropout_keep_prob)
inputs = layer(inputs)
return inputs
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
return tf.TensorShape([input_shape[0], self.output_dim])
class ListenAttendSpellDecoder(Decoder):
"""Listen Attend Spell like decoder with attention mechanism.
"""
@staticmethod
def get_required_params():
return dict(Decoder.get_required_params(), **{
'GO_SYMBOL': int, # symbol id
'END_SYMBOL': int, # symbol id
'tgt_vocab_size': int,
'tgt_emb_size': int,
'attention_params': dict,
'rnn_type': None,
'hidden_dim': int,
'num_layers': int,
})
@staticmethod
def get_optional_params():
return dict(Decoder.get_optional_params(), **{
'dropout_keep_prob': float,
'pos_embedding': bool,
'beam_width': int,
'use_language_model': bool,
})
def __init__(self, params, model, name='las_decoder', mode='train'):
"""Initializes decoder with embedding.
See parent class for arguments description.
Config parameters:
* **GO_SYMBOL** (int) --- GO symbol id, must be the same as used in
data layer.
* **END_SYMBOL** (int) --- END symbol id, must be the same as used in
data layer.
* **tgt_vocab_size** (int) --- vocabulary size of the targets to use for final softmax.
* **tgt_emb_size** (int) --- embedding size to use.
* **attention_params** (dict) - parameters for attention mechanism.
* **rnn_type** (String) - String indicating the rnn type. Accepts ['lstm', 'gru'].
* **hidden_dim** (int) - Hidden domension to be used the RNN decoder.
* **num_layers** (int) - Number of decoder RNN layers.
* **dropout_keep_prob** (float, optional) - dropout input keep probability.
* **pos_embedding** (bool, optional) - Whether to use encoder and decoder positional embedding. Default is False.
* **beam_width** (int, optional) - Beam width used while decoding with beam search. Uses greedy decoding if the value is set to 1. Default is 1.
* **use_language_model** (bool, optional) - Boolean indicating whether to use language model for decoding. Default is False.
"""
super(ListenAttendSpellDecoder, self).__init__(params, model, name, mode)
self.GO_SYMBOL = self.params['GO_SYMBOL']
self.END_SYMBOL = self.params['END_SYMBOL']
self._tgt_vocab_size = self.params['tgt_vocab_size']
self._tgt_emb_size = self.params['tgt_emb_size']
def _decode(self, input_dict):
"""Decodes representation into data.
Args:
input_dict (dict): Python dictionary with inputs to decoder.
Config parameters:
* **src_inputs** --- Decoder input Tensor of shape [batch_size, time, dim]
or [time, batch_size, dim].
* **src_lengths** --- Decoder input lengths Tensor of shape [batch_size]
* **tgt_inputs** --- Only during training. labels Tensor of the
shape [batch_size, time] or [time, batch_size].
* **tgt_lengths** --- Only during training. labels lengths
Tensor of the shape [batch_size].
Returns:
dict: Python dictionary with:
* outputs - [predictions, alignments, enc_src_lengths].
predictions are the final predictions of the model. tensor of shape [batch_size, time].
alignments are the attention probabilities if attention is used. None if 'plot_attention' in attention_params is set to False.
enc_src_lengths are the lengths of the input. tensor of shape [batch_size].
* logits - logits with the shape=[batch_size, output_dim].
* tgt_length - tensor of shape [batch_size] indicating the predicted sequence lengths.
"""
encoder_outputs = input_dict['encoder_output']['outputs']
enc_src_lengths = input_dict['encoder_output']['src_length']
self._batch_size = int(encoder_outputs.get_shape()[0])
self._beam_width = self.params.get("beam_width", 1)
tgt_inputs = None
tgt_lengths = None
if 'target_tensors' in input_dict:
tgt_inputs = input_dict['target_tensors'][0]
tgt_lengths = input_dict['target_tensors'][1]
tgt_inputs = tf.concat(
[tf.fill([self._batch_size, 1], self.GO_SYMBOL), tgt_inputs[:, :-1]], -1)
layer_type = self.params['rnn_type']
num_layers = self.params['num_layers']
attention_params = self.params['attention_params']
hidden_dim = self.params['hidden_dim']
dropout_keep_prob = self.params.get(
'dropout_keep_prob', 1.0) if self._mode == "train" else 1.0
# To-Do Seperate encoder and decoder position embeddings
use_positional_embedding = self.params.get("pos_embedding", False)
use_language_model = self.params.get("use_language_model", False)
use_beam_search_decoder = (
self._beam_width != 1) and (self._mode == "infer")
self._target_emb_layer = tf.get_variable(
name='TargetEmbeddingMatrix',
shape=[self._tgt_vocab_size, self._tgt_emb_size],
dtype=tf.float32,
)
if use_positional_embedding:
self.enc_pos_emb_size = int(encoder_outputs.get_shape()[-1])
self.enc_pos_emb_layer = tf.get_variable(
name='EncoderPositionEmbeddingMatrix',
shape=[1024, self.enc_pos_emb_size],
dtype=tf.float32,
)
encoder_output_positions = tf.range(
0,
tf.shape(encoder_outputs)[1],
delta=1,
dtype=tf.int32,
name='positional_inputs'
)
encoder_position_embeddings = tf.cast(
tf.nn.embedding_lookup(
self.enc_pos_emb_layer, encoder_output_positions),
dtype=encoder_outputs.dtype
)
encoder_outputs += encoder_position_embeddings
self.dec_pos_emb_size = self._tgt_emb_size
self.dec_pos_emb_layer = tf.get_variable(
name='DecoderPositionEmbeddingMatrix',
shape=[1024, self.dec_pos_emb_size],
dtype=tf.float32,
)
output_projection_layer = FullyConnected(
[self._tgt_vocab_size],
dropout_keep_prob=dropout_keep_prob,
mode=self._mode,
)
rnn_cell = cells_dict[layer_type]
dropout = tf.nn.rnn_cell.DropoutWrapper
multirnn_cell = tf.nn.rnn_cell.MultiRNNCell(
[dropout(rnn_cell(hidden_dim),
output_keep_prob=dropout_keep_prob)
for _ in range(num_layers)]
)
if use_beam_search_decoder:
encoder_outputs = tf.contrib.seq2seq.tile_batch(
encoder_outputs,
multiplier=self._beam_width,
)
enc_src_lengths = tf.contrib.seq2seq.tile_batch(
enc_src_lengths,
multiplier=self._beam_width,
)
attention_dim = attention_params["attention_dim"]
attention_type = attention_params["attention_type"]
num_heads = attention_params["num_heads"]
plot_attention = attention_params["plot_attention"]
if plot_attention:
if use_beam_search_decoder:
plot_attention = False
print("Plotting Attention is disabled for Beam Search Decoding")
if num_heads != 1:
plot_attention = False
print("Plotting Attention is disabled for Multi Head Attention")
if self.params['dtype'] != tf.float32:
plot_attention = False
print("Plotting Attention is disabled for Mixed Precision Mode")
attention_params_dict = {}
if attention_type == "bahadanu":
AttentionMechanism = BahdanauAttention
attention_params_dict["normalize"] = False,
elif attention_type == "chorowski":
AttentionMechanism = LocationSensitiveAttention
attention_params_dict["use_coverage"] = attention_params["use_coverage"]
attention_params_dict["location_attn_type"] = attention_type
attention_params_dict["location_attention_params"] = {
'filters': 10, 'kernel_size': 101}
elif attention_type == "zhaopeng":
AttentionMechanism = LocationSensitiveAttention
attention_params_dict["use_coverage"] = attention_params["use_coverage"]
attention_params_dict["query_dim"] = hidden_dim
attention_params_dict["location_attn_type"] = attention_type
attention_mechanism = []
for head in range(num_heads):
attention_mechanism.append(
AttentionMechanism(
num_units=attention_dim,
memory=encoder_outputs,
memory_sequence_length=enc_src_lengths,
probability_fn=tf.nn.softmax,
dtype=tf.get_variable_scope().dtype,
**attention_params_dict
)
)
multirnn_cell_with_attention = AttentionWrapper(
cell=multirnn_cell,
attention_mechanism=attention_mechanism,
attention_layer_size=[hidden_dim for i in range(num_heads)],
output_attention=True,
alignment_history=plot_attention,
)
if self._mode == "train":
decoder_output_positions = tf.range(
0,
tf.shape(tgt_inputs)[1],
delta=1,
dtype=tf.int32,
name='positional_inputs'
)
tgt_input_vectors = tf.nn.embedding_lookup(
self._target_emb_layer, tgt_inputs)
if use_positional_embedding:
tgt_input_vectors += tf.nn.embedding_lookup(self.dec_pos_emb_layer,
decoder_output_positions)
tgt_input_vectors = tf.cast(
tgt_input_vectors,
dtype=self.params['dtype'],
)
# helper = tf.contrib.seq2seq.TrainingHelper(
helper = TrainingHelper(
inputs=tgt_input_vectors,
sequence_length=tgt_lengths,
)
elif self._mode == "infer" or self._mode == "eval":
embedding_fn = lambda ids: tf.cast(
tf.nn.embedding_lookup(self._target_emb_layer, ids),
dtype=self.params['dtype'],
)
pos_embedding_fn = None
if use_positional_embedding:
pos_embedding_fn = lambda ids: tf.cast(
tf.nn.embedding_lookup(self.dec_pos_emb_layer, ids),
dtype=self.params['dtype'],
)
# helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(
helper = GreedyEmbeddingHelper(
embedding=embedding_fn,
start_tokens=tf.fill([self._batch_size], self.GO_SYMBOL),
end_token=self.END_SYMBOL,
positional_embedding=pos_embedding_fn
)
if self._mode != "infer":
maximum_iterations = tf.reduce_max(tgt_lengths)
else:
maximum_iterations = tf.reduce_max(enc_src_lengths)
if not use_beam_search_decoder:
decoder = tf.contrib.seq2seq.BasicDecoder(
cell=multirnn_cell_with_attention,
helper=helper,
initial_state=multirnn_cell_with_attention.zero_state(
batch_size=self._batch_size, dtype=encoder_outputs.dtype,
),
output_layer=output_projection_layer,
)
else:
batch_size_tensor = tf.constant(self._batch_size)
decoder = BeamSearchDecoder(
cell=multirnn_cell_with_attention,
embedding=embedding_fn,
start_tokens=tf.tile([self.GO_SYMBOL], [self._batch_size]),
end_token=self.END_SYMBOL,
initial_state=multirnn_cell_with_attention.zero_state(
dtype=encoder_outputs.dtype,
batch_size=batch_size_tensor * self._beam_width,
),
beam_width=self._beam_width,
output_layer=output_projection_layer,
length_penalty_weight=0.0,
)
final_outputs, final_state, final_sequence_lengths = tf.contrib.seq2seq.dynamic_decode(
decoder=decoder,
impute_finished=self.mode != "infer",
maximum_iterations=maximum_iterations,
)
if plot_attention:
alignments = tf.transpose(
final_state.alignment_history[0].stack(), [1, 0, 2]
)
else:
alignments = None
if not use_beam_search_decoder:
outputs = tf.argmax(final_outputs.rnn_output, axis=-1)
logits = final_outputs.rnn_output
return_outputs = [outputs, alignments, enc_src_lengths]
else:
outputs = final_outputs.predicted_ids[:, :, 0]
logits = final_outputs.predicted_ids[:, :, 0]
return_outputs = [outputs, enc_src_lengths]
if self.mode == "eval":
max_len = tf.reduce_max(tgt_lengths)
logits = tf.while_loop(
lambda logits: max_len > tf.shape(logits)[1],
lambda logits: tf.concat([logits, tf.fill(
[tf.shape(logits)[0], 1, tf.shape(logits)[2]], tf.cast(1.0, self.params['dtype']))], 1),
loop_vars=[logits],
back_prop=False,
)
return {
'outputs': return_outputs,
'logits': logits,
'tgt_length': final_sequence_lengths,
}
| OpenSeq2Seq-master | open_seq2seq/decoders/las_decoder.py |
# Copyright (c) 2018 NVIDIA Corporation
"""
Tacotron2 decoder
"""
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import tensorflow as tf
from tensorflow.python.framework import ops
from open_seq2seq.parts.rnns.utils import single_cell
from open_seq2seq.parts.rnns.attention_wrapper import BahdanauAttention, \
LocationSensitiveAttention, \
AttentionWrapper
from open_seq2seq.parts.tacotron.tacotron_helper import TacotronHelper, \
TacotronTrainingHelper
from open_seq2seq.parts.tacotron.tacotron_decoder import TacotronDecoder
from open_seq2seq.parts.cnns.conv_blocks import conv_bn_actv
from .decoder import Decoder
class Prenet():
"""
Fully connected prenet used in the decoder
"""
def __init__(
self,
num_units,
num_layers,
activation_fn=None,
dtype=None
):
"""Prenet initializer
Args:
num_units (int): number of units in the fully connected layer
num_layers (int): number of fully connected layers
activation_fn (callable): any valid activation function
dtype (dtype): the data format for this layer
"""
assert (
num_layers > 0
), "If the prenet is enabled, there must be at least 1 layer"
self.prenet_layers = []
self._output_size = num_units
for idx in range(num_layers):
self.prenet_layers.append(
tf.layers.Dense(
name="prenet_{}".format(idx + 1),
units=num_units,
activation=activation_fn,
use_bias=True,
dtype=dtype
)
)
def __call__(self, inputs):
"""
Applies the prenet to the inputs
"""
for layer in self.prenet_layers:
inputs = tf.layers.dropout(layer(inputs), rate=0.5, training=True)
return inputs
@property
def output_size(self):
return self._output_size
def add_regularization(self, regularizer):
"""
Adds regularization to all prenet kernels
"""
for layer in self.prenet_layers:
for weights in layer.trainable_variables:
if "bias" not in weights.name:
# print("Added regularizer to {}".format(weights.name))
if weights.dtype.base_dtype == tf.float16:
tf.add_to_collection(
'REGULARIZATION_FUNCTIONS', (weights, regularizer)
)
else:
tf.add_to_collection(
ops.GraphKeys.REGULARIZATION_LOSSES, regularizer(weights)
)
class Tacotron2Decoder(Decoder):
"""
Tacotron 2 Decoder
"""
@staticmethod
def get_required_params():
return dict(
Decoder.get_required_params(), **{
'attention_layer_size': int,
'attention_type': ['bahdanau', 'location', None],
'decoder_cell_units': int,
'decoder_cell_type': None,
'decoder_layers': int,
}
)
@staticmethod
def get_optional_params():
return dict(
Decoder.get_optional_params(), **{
'bahdanau_normalize': bool,
'time_major': bool,
'use_swap_memory': bool,
'enable_prenet': bool,
'prenet_layers': int,
'prenet_units': int,
'prenet_activation': None,
'enable_postnet': bool,
'postnet_conv_layers': list,
'postnet_bn_momentum': float,
'postnet_bn_epsilon': float,
'postnet_data_format': ['channels_first', 'channels_last'],
'postnet_keep_dropout_prob': float,
'mask_decoder_sequence': bool,
'attention_bias': bool,
'zoneout_prob': float,
'dropout_prob': float,
'parallel_iterations': int,
}
)
def __init__(self, params, model, name='tacotron_2_decoder', mode='train'):
"""Tacotron-2 like decoder constructor. A lot of optional configurations are
currently for testing. Not all configurations are supported. Use of thed
efault config is recommended.
See parent class for arguments description.
Config parameters:
* **attention_layer_size** (int) --- size of attention layer.
* **attention_type** (string) --- Determines whether attention mechanism to
use, should be one of 'bahdanau', 'location', or None.
Use of 'location'-sensitive attention is strongly recommended.
* **bahdanau_normalize** (bool) --- Whether to enable weight norm on the
attention parameters. Defaults to False.
* **decoder_cell_units** (int) --- dimension of decoder RNN cells.
* **decoder_layers** (int) --- number of decoder RNN layers to use.
* **decoder_cell_type** (callable) --- could be "lstm", "gru", "glstm", or
"slstm". Currently, only 'lstm' has been tested. Defaults to 'lstm'.
* **time_major** (bool) --- whether to output as time major or batch major.
Default is False for batch major.
* **use_swap_memory** (bool) --- default is False.
* **enable_prenet** (bool) --- whether to use the fully-connected prenet in
the decoder. Defaults to True
* **prenet_layers** (int) --- number of fully-connected layers to use.
Defaults to 2.
* **prenet_units** (int) --- number of units in each layer. Defaults to 256.
* **prenet_activation** (callable) --- activation function to use for the
prenet lyaers. Defaults to relu
* **enable_postnet** (bool) --- whether to use the convolutional postnet in
the decoder. Defaults to True
* **postnet_conv_layers** (bool) --- list with the description of
convolutional layers. Must be passed if postnet is enabled
For example::
"postnet_conv_layers": [
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME",
"activation_fn": tf.nn.tanh
},
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME",
"activation_fn": tf.nn.tanh
},
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME",
"activation_fn": tf.nn.tanh
},
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "SAME",
"activation_fn": tf.nn.tanh
},
{
"kernel_size": [5], "stride": [1],
"num_channels": 80, "padding": "SAME",
"activation_fn": None
}
]
* **postnet_bn_momentum** (float) --- momentum for batch norm.
Defaults to 0.1.
* **postnet_bn_epsilon** (float) --- epsilon for batch norm.
Defaults to 1e-5.
* **postnet_data_format** (string) --- could be either "channels_first" or
"channels_last". Defaults to "channels_last".
* **postnet_keep_dropout_prob** (float) --- keep probability for dropout in
the postnet conv layers. Default to 0.5.
* **mask_decoder_sequence** (bool) --- Defaults to True.
* **attention_bias** (bool) --- Wether to use a bias term when calculating
the attention. Only works for "location" attention. Defaults to False.
* **zoneout_prob** (float) --- zoneout probability for rnn layers.
Defaults to 0.
* **dropout_prob** (float) --- dropout probability for rnn layers.
Defaults to 0.1
* **parallel_iterations** (int) --- Number of parallel_iterations for
tf.while loop inside dynamic_decode. Defaults to 32.
"""
super(Tacotron2Decoder, self).__init__(params, model, name, mode)
self._model = model
self._n_feats = self._model.get_data_layer().params['num_audio_features']
if "both" in self._model.get_data_layer().params['output_type']:
self._both = True
if not self.params.get('enable_postnet', True):
raise ValueError(
"postnet must be enabled for both mode"
)
else:
self._both = False
def _build_attention(
self,
encoder_outputs,
encoder_sequence_length,
attention_bias,
):
"""
Builds Attention part of the graph.
Currently supports "bahdanau", and "location"
"""
with tf.variable_scope("AttentionMechanism"):
attention_depth = self.params['attention_layer_size']
if self.params['attention_type'] == 'location':
attention_mechanism = LocationSensitiveAttention(
num_units=attention_depth,
memory=encoder_outputs,
memory_sequence_length=encoder_sequence_length,
probability_fn=tf.nn.softmax,
dtype=tf.get_variable_scope().dtype,
use_bias=attention_bias,
)
elif self.params['attention_type'] == 'bahdanau':
bah_normalize = self.params.get('bahdanau_normalize', False)
attention_mechanism = BahdanauAttention(
num_units=attention_depth,
memory=encoder_outputs,
normalize=bah_normalize,
memory_sequence_length=encoder_sequence_length,
probability_fn=tf.nn.softmax,
dtype=tf.get_variable_scope().dtype
)
else:
raise ValueError('Unknown Attention Type')
return attention_mechanism
def _decode(self, input_dict):
"""
Decodes representation into data
Args:
input_dict (dict): Python dictionary with inputs to decoder. Must define:
* src_inputs - decoder input Tensor of shape [batch_size, time, dim]
or [time, batch_size, dim]
* src_lengths - decoder input lengths Tensor of shape [batch_size]
* tgt_inputs - Only during training. labels Tensor of the
shape [batch_size, time, num_features] or
[time, batch_size, num_features]
* stop_token_inputs - Only during training. labels Tensor of the
shape [batch_size, time, 1] or [time, batch_size, 1]
* tgt_lengths - Only during training. labels lengths
Tensor of the shape [batch_size]
Returns:
dict:
A python dictionary containing:
* outputs - array containing:
* decoder_output - tensor of shape [batch_size, time,
num_features] or [time, batch_size, num_features]. Spectrogram
representation learned by the decoder rnn
* spectrogram_prediction - tensor of shape [batch_size, time,
num_features] or [time, batch_size, num_features]. Spectrogram
containing the residual corrections from the postnet if enabled
* alignments - tensor of shape [batch_size, time, memory_size]
or [time, batch_size, memory_size]. The alignments learned by
the attention layer
* stop_token_prediction - tensor of shape [batch_size, time, 1]
or [time, batch_size, 1]. The stop token predictions
* final_sequence_lengths - tensor of shape [batch_size]
* stop_token_predictions - tensor of shape [batch_size, time, 1]
or [time, batch_size, 1]. The stop token predictions for use inside
the loss function.
"""
encoder_outputs = input_dict['encoder_output']['outputs']
enc_src_lengths = input_dict['encoder_output']['src_length']
if self._mode == "train":
spec = input_dict['target_tensors'][0] if 'target_tensors' in \
input_dict else None
spec_length = input_dict['target_tensors'][2] if 'target_tensors' in \
input_dict else None
_batch_size = encoder_outputs.get_shape().as_list()[0]
training = (self._mode == "train")
regularizer = self.params.get('regularizer', None)
if self.params.get('enable_postnet', True):
if "postnet_conv_layers" not in self.params:
raise ValueError(
"postnet_conv_layers must be passed from config file if postnet is"
"enabled"
)
if self._both:
num_audio_features = self._n_feats["mel"]
if self._mode == "train":
spec, _ = tf.split(
spec,
[self._n_feats['mel'], self._n_feats['magnitude']],
axis=2
)
else:
num_audio_features = self._n_feats
output_projection_layer = tf.layers.Dense(
name="output_proj",
units=num_audio_features,
use_bias=True,
)
stop_token_projection_layer = tf.layers.Dense(
name="stop_token_proj",
units=1,
use_bias=True,
)
prenet = None
if self.params.get('enable_prenet', True):
prenet = Prenet(
self.params.get('prenet_units', 256),
self.params.get('prenet_layers', 2),
self.params.get("prenet_activation", tf.nn.relu),
self.params["dtype"]
)
cell_params = {}
cell_params["num_units"] = self.params['decoder_cell_units']
decoder_cells = [
single_cell(
cell_class=self.params['decoder_cell_type'],
cell_params=cell_params,
zoneout_prob=self.params.get("zoneout_prob", 0.),
dp_output_keep_prob=1.-self.params.get("dropout_prob", 0.1),
training=training,
) for _ in range(self.params['decoder_layers'])
]
if self.params['attention_type'] is not None:
attention_mechanism = self._build_attention(
encoder_outputs, enc_src_lengths,
self.params.get("attention_bias", False)
)
attention_cell = tf.contrib.rnn.MultiRNNCell(decoder_cells)
attentive_cell = AttentionWrapper(
cell=attention_cell,
attention_mechanism=attention_mechanism,
alignment_history=True,
output_attention="both",
)
decoder_cell = attentive_cell
if self.params['attention_type'] is None:
decoder_cell = tf.contrib.rnn.MultiRNNCell(decoder_cells)
if self._mode == "train":
train_and_not_sampling = True
helper = TacotronTrainingHelper(
inputs=spec,
sequence_length=spec_length,
prenet=None,
model_dtype=self.params["dtype"],
mask_decoder_sequence=self.params.get("mask_decoder_sequence", True)
)
elif self._mode == "eval" or self._mode == "infer":
train_and_not_sampling = False
inputs = tf.zeros(
(_batch_size, 1, num_audio_features), dtype=self.params["dtype"]
)
helper = TacotronHelper(
inputs=inputs,
prenet=None,
mask_decoder_sequence=self.params.get("mask_decoder_sequence", True)
)
else:
raise ValueError("Unknown mode for decoder: {}".format(self._mode))
decoder = TacotronDecoder(
decoder_cell=decoder_cell,
helper=helper,
initial_decoder_state=decoder_cell.zero_state(
_batch_size, self.params["dtype"]
),
attention_type=self.params["attention_type"],
spec_layer=output_projection_layer,
stop_token_layer=stop_token_projection_layer,
prenet=prenet,
dtype=self.params["dtype"],
train=train_and_not_sampling
)
if self._mode == 'train':
maximum_iterations = tf.reduce_max(spec_length)
else:
maximum_iterations = tf.reduce_max(enc_src_lengths) * 10
outputs, final_state, sequence_lengths = tf.contrib.seq2seq.dynamic_decode(
# outputs, final_state, sequence_lengths, final_inputs = dynamic_decode(
decoder=decoder,
impute_finished=False,
maximum_iterations=maximum_iterations,
swap_memory=self.params.get("use_swap_memory", False),
output_time_major=self.params.get("time_major", False),
parallel_iterations=self.params.get("parallel_iterations", 32)
)
decoder_output = outputs.rnn_output
stop_token_logits = outputs.stop_token_output
with tf.variable_scope("decoder"):
# If we are in train and doing sampling, we need to do the projections
if train_and_not_sampling:
decoder_spec_output = output_projection_layer(decoder_output)
stop_token_logits = stop_token_projection_layer(decoder_spec_output)
decoder_output = decoder_spec_output
## Add the post net ##
if self.params.get('enable_postnet', True):
dropout_keep_prob = self.params.get('postnet_keep_dropout_prob', 0.5)
top_layer = decoder_output
for i, conv_params in enumerate(self.params['postnet_conv_layers']):
ch_out = conv_params['num_channels']
kernel_size = conv_params['kernel_size'] # [time, freq]
strides = conv_params['stride']
padding = conv_params['padding']
activation_fn = conv_params['activation_fn']
if ch_out == -1:
if self._both:
ch_out = self._n_feats["mel"]
else:
ch_out = self._n_feats
top_layer = conv_bn_actv(
layer_type="conv1d",
name="conv{}".format(i + 1),
inputs=top_layer,
filters=ch_out,
kernel_size=kernel_size,
activation_fn=activation_fn,
strides=strides,
padding=padding,
regularizer=regularizer,
training=training,
data_format=self.params.get('postnet_data_format', 'channels_last'),
bn_momentum=self.params.get('postnet_bn_momentum', 0.1),
bn_epsilon=self.params.get('postnet_bn_epsilon', 1e-5),
)
top_layer = tf.layers.dropout(
top_layer, rate=1. - dropout_keep_prob, training=training
)
else:
top_layer = tf.zeros(
[
_batch_size, maximum_iterations,
outputs.rnn_output.get_shape()[-1]
],
dtype=self.params["dtype"]
)
if regularizer and training:
vars_to_regularize = []
vars_to_regularize += attentive_cell.trainable_variables
vars_to_regularize += attention_mechanism.memory_layer.trainable_variables
vars_to_regularize += output_projection_layer.trainable_variables
vars_to_regularize += stop_token_projection_layer.trainable_variables
for weights in vars_to_regularize:
if "bias" not in weights.name:
# print("Added regularizer to {}".format(weights.name))
if weights.dtype.base_dtype == tf.float16:
tf.add_to_collection(
'REGULARIZATION_FUNCTIONS', (weights, regularizer)
)
else:
tf.add_to_collection(
ops.GraphKeys.REGULARIZATION_LOSSES, regularizer(weights)
)
if self.params.get('enable_prenet', True):
prenet.add_regularization(regularizer)
if self.params['attention_type'] is not None:
alignments = tf.transpose(
final_state.alignment_history.stack(), [1, 0, 2]
)
else:
alignments = tf.zeros([_batch_size, _batch_size, _batch_size])
spectrogram_prediction = decoder_output + top_layer
if self._both:
mag_spec_prediction = spectrogram_prediction
mag_spec_prediction = conv_bn_actv(
layer_type="conv1d",
name="conv_0",
inputs=mag_spec_prediction,
filters=256,
kernel_size=4,
activation_fn=tf.nn.relu,
strides=1,
padding="SAME",
regularizer=regularizer,
training=training,
data_format=self.params.get('postnet_data_format', 'channels_last'),
bn_momentum=self.params.get('postnet_bn_momentum', 0.1),
bn_epsilon=self.params.get('postnet_bn_epsilon', 1e-5),
)
mag_spec_prediction = conv_bn_actv(
layer_type="conv1d",
name="conv_1",
inputs=mag_spec_prediction,
filters=512,
kernel_size=4,
activation_fn=tf.nn.relu,
strides=1,
padding="SAME",
regularizer=regularizer,
training=training,
data_format=self.params.get('postnet_data_format', 'channels_last'),
bn_momentum=self.params.get('postnet_bn_momentum', 0.1),
bn_epsilon=self.params.get('postnet_bn_epsilon', 1e-5),
)
if self._model.get_data_layer()._exp_mag:
mag_spec_prediction = tf.exp(mag_spec_prediction)
mag_spec_prediction = tf.layers.conv1d(
mag_spec_prediction,
self._n_feats["magnitude"],
1,
name="post_net_proj",
use_bias=False,
)
else:
mag_spec_prediction = tf.zeros([_batch_size, _batch_size, _batch_size])
stop_token_prediction = tf.sigmoid(stop_token_logits)
outputs = [
decoder_output, spectrogram_prediction, alignments,
stop_token_prediction, sequence_lengths, mag_spec_prediction
]
return {
'outputs': outputs,
'stop_token_prediction': stop_token_logits,
}
| OpenSeq2Seq-master | open_seq2seq/decoders/tacotron2_decoder.py |
# Copyright (c) 2018 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import tensorflow as tf
from .decoder import Decoder
class JointCTCAttentionDecoder(Decoder):
"""Joint CTC Attention like decoder.
Combines CTC and Attention based decoder.
Use only outputs from the Attention decoder during inference.
"""
@staticmethod
def get_required_params():
return dict(Decoder.get_required_params(), **{
'ctc_decoder': None,
'attn_decoder': None,
'attn_decoder_params': dict,
'ctc_decoder_params': dict,
'beam_search_params': dict,
'language_model_params': dict,
'GO_SYMBOL': int, # symbol id
'END_SYMBOL': int, # symbol id
'tgt_vocab_size': int,
})
@staticmethod
def get_optional_params():
return dict(Decoder.get_optional_params(), **{
})
def __init__(self, params, model, name='jca_decoder', mode='train'):
"""Initializes RNN decoder with embedding.
See parent class for arguments description.
Config parameters:
* **ctc_decoder** (any class derived from
:class:`Decoder <decoders.decoder.Decoder>`) --- CTC decoder class to use.
* **attn_decoder** (any class derived from
:class:`Decoder <decoders.decoder.Decoder>`) --- Attention decoder class to use.
* **attn_decoder_params** (dict) --- parameters for the attention decoder.
* **ctc_decoder_params** (dict) --- parameters for the ctc decoder.
* **beam_search_params** (dict) --- beam search parameters for decoding using the attention based decoder.
* **language_model_params** (dict) --- language model parameters for decoding with an external language model.
* **GO_SYMBOL** (int) --- GO symbol id, must be the same as used in
data layer.
* **END_SYMBOL** (int) --- END symbol id, must be the same as used in
data layer.
* **tgt_vocab_size** (int) --- vocabulary size of the targets to use for final softmax.
"""
super(JointCTCAttentionDecoder, self).__init__(params, model, name, mode)
self.ctc_params = self.params['ctc_decoder_params']
self.attn_params = self.params['attn_decoder_params']
self.beam_search_params = self.params['beam_search_params']
self.lang_model_params = self.params['language_model_params']
self.attn_params.update(self.beam_search_params)
self.attn_params.update(self.lang_model_params)
self.ctc_params['tgt_vocab_size'] = self.params['tgt_vocab_size'] - 1
self.attn_params['tgt_vocab_size'] = self.params['tgt_vocab_size']
self.attn_params['GO_SYMBOL'] = self.params['GO_SYMBOL']
self.attn_params['END_SYMBOL'] = self.params['END_SYMBOL']
self.ctc_decoder = self.params['ctc_decoder'](
params=self.ctc_params, mode=mode, model=model)
self.attn_decoder = self.params['attn_decoder'](
params=self.attn_params, mode=mode, model=model)
def _decode(self, input_dict):
"""Joint decoder that combines Attention and CTC outputs.
Args:
input_dict (dict): Python dictionary with inputs to decoder.
Config parameters:
* **src_inputs** --- Decoder input Tensor of shape [batch_size, time, dim]
* **src_lengths** --- Decoder input lengths Tensor of shape [batch_size]
* **tgt_inputs** --- Only during training. labels Tensor of the
shape [batch_size, time].
* **tgt_lengths** --- Only during training. label lengths
Tensor of the shape [batch_size].
Returns:
dict: Python dictionary with:
* outputs - tensor of shape [batch_size, time] from the Attention decoder
* seq_outputs - output dictionary from the Attention decoder
* ctc_outputs - output dictionary from the CTC decoder
"""
seq_outputs = self.attn_decoder.decode(input_dict=input_dict)
ctc_outputs = self.ctc_decoder.decode(input_dict=input_dict)
return {
'outputs': seq_outputs['outputs'],
'seq_outputs': seq_outputs,
'ctc_outputs': ctc_outputs,
}
| OpenSeq2Seq-master | open_seq2seq/decoders/jca_decoder.py |
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import tensorflow as tf
import math
from .decoder import Decoder
from open_seq2seq.parts.transformer import beam_search
from open_seq2seq.parts.transformer import embedding_layer
from open_seq2seq.parts.transformer.utils import get_padding
from open_seq2seq.parts.convs2s import ffn_wn_layer, conv_wn_layer, attention_wn_layer
from open_seq2seq.parts.convs2s.utils import gated_linear_units
# Default value used if max_input_length is not given
MAX_INPUT_LENGTH = 128
class ConvS2SDecoder(Decoder):
@staticmethod
def get_required_params():
"""Static method with description of required parameters.
Returns:
dict:
Dictionary containing all the parameters that **have to** be
included into the ``params`` parameter of the
class :meth:`__init__` method.
"""
return dict(
Decoder.get_required_params(), **{
'batch_size': int,
'tgt_emb_size': int,
'tgt_vocab_size': int,
'shared_embed': bool,
'embedding_dropout_keep_prob': float,
'conv_nchannels_kwidth': list,
'hidden_dropout_keep_prob': float,
'out_dropout_keep_prob': float,
'beam_size': int,
'alpha': float,
'extra_decode_length': int,
'EOS_ID': int,
})
@staticmethod
def get_optional_params():
"""Static method with description of optional parameters.
Returns:
dict:
Dictionary containing all the parameters that **can** be
included into the ``params`` parameter of the
class :meth:`__init__` method.
"""
return dict(
Decoder.get_optional_params(),
**{
'pad_embeddings_2_eight': bool,
# set the default to False later.
"pos_embed": bool,
# if not provided, tgt_emb_size is used as the default value
'out_emb_size': int,
'max_input_length': int,
'GO_SYMBOL': int,
'PAD_SYMBOL': int,
'END_SYMBOL': int,
'conv_activation': None,
'normalization_type': str,
'scaling_factor': float,
'init_var': None,
})
def _cast_types(self, input_dict):
return input_dict
def __init__(self, params, model, name="convs2s_decoder", mode='train'):
super(ConvS2SDecoder, self).__init__(params, model, name, mode)
self.embedding_softmax_layer = None
self.position_embedding_layer = None
self.layers = []
self._tgt_vocab_size = self.params['tgt_vocab_size']
self._tgt_emb_size = self.params['tgt_emb_size']
self._mode = mode
self._pad_sym = self.params.get('PAD_SYMBOL', 0)
self._pad2eight = params.get('pad_embeddings_2_eight', False)
self.scaling_factor = self.params.get("scaling_factor", math.sqrt(0.5))
self.normalization_type = self.params.get("normalization_type", "weight_norm")
self.conv_activation = self.params.get("conv_activation", gated_linear_units)
self.max_input_length = self.params.get("max_input_length", MAX_INPUT_LENGTH)
self.init_var = self.params.get('init_var', None)
self.regularizer = self.params.get('regularizer', None)
def _decode(self, input_dict):
targets = input_dict['target_tensors'][0] \
if 'target_tensors' in input_dict else None
encoder_outputs = input_dict['encoder_output']['outputs']
encoder_outputs_b = input_dict['encoder_output'].get(
'outputs_b', encoder_outputs)
inputs_attention_bias = input_dict['encoder_output'].get(
'inputs_attention_bias_cs2s', None)
with tf.name_scope("decode"):
# prepare decoder layers
if len(self.layers) == 0:
knum_list = list(zip(*self.params.get("conv_nchannels_kwidth")))[0]
kwidth_list = list(zip(*self.params.get("conv_nchannels_kwidth")))[1]
# preparing embedding layers
with tf.variable_scope("embedding"):
if 'embedding_softmax_layer' in input_dict['encoder_output'] \
and self.params['shared_embed']:
self.embedding_softmax_layer = \
input_dict['encoder_output']['embedding_softmax_layer']
else:
self.embedding_softmax_layer = embedding_layer.EmbeddingSharedWeights(
vocab_size=self._tgt_vocab_size,
hidden_size=self._tgt_emb_size,
pad_vocab_to_eight=self._pad2eight,
init_var=0.1,
embed_scale=False,
pad_sym=self._pad_sym,
mask_paddings=True)
if self.params.get("pos_embed", True):
with tf.variable_scope("pos_embedding"):
if 'position_embedding_layer' in input_dict['encoder_output'] \
and self.params['shared_embed']:
self.position_embedding_layer = \
input_dict['encoder_output']['position_embedding_layer']
else:
self.position_embedding_layer = embedding_layer.EmbeddingSharedWeights(
vocab_size=self.max_input_length,
hidden_size=self._tgt_emb_size,
pad_vocab_to_eight=self._pad2eight,
init_var=0.1,
embed_scale=False,
pad_sym=self._pad_sym,
mask_paddings=True)
else:
self.position_embedding_layer = None
# linear projection before cnn layers
self.layers.append(
ffn_wn_layer.FeedFowardNetworkNormalized(
self._tgt_emb_size,
knum_list[0],
dropout=self.params["embedding_dropout_keep_prob"],
var_scope_name="linear_mapping_before_cnn_layers",
mode=self.mode,
normalization_type=self.normalization_type,
regularizer=self.regularizer,
init_var=self.init_var)
)
for i in range(len(knum_list)):
in_dim = knum_list[i] if i == 0 else knum_list[i - 1]
out_dim = knum_list[i]
# linear projection is needed for residual connections if
# input and output of a cnn layer do not match
if in_dim != out_dim:
linear_proj = ffn_wn_layer.FeedFowardNetworkNormalized(
in_dim,
out_dim,
var_scope_name="linear_mapping_cnn_" + str(i + 1),
dropout=1.0,
mode=self.mode,
normalization_type=self.normalization_type,
regularizer = self.regularizer,
init_var = self.init_var,
)
else:
linear_proj = None
conv_layer = conv_wn_layer.Conv1DNetworkNormalized(
in_dim,
out_dim,
kernel_width=kwidth_list[i],
mode=self.mode,
layer_id=i + 1,
hidden_dropout=self.params["hidden_dropout_keep_prob"],
conv_padding="VALID",
decode_padding=True,
activation=self.conv_activation,
normalization_type=self.normalization_type,
regularizer=self.regularizer,
init_var=self.init_var
)
att_layer = attention_wn_layer.AttentionLayerNormalized(
out_dim,
embed_size=self._tgt_emb_size,
layer_id=i + 1,
add_res=True,
mode=self.mode,
normalization_type=self.normalization_type,
scaling_factor=self.scaling_factor,
regularizer=self.regularizer,
init_var=self.init_var
)
self.layers.append([linear_proj, conv_layer, att_layer])
# linear projection after cnn layers
self.layers.append(
ffn_wn_layer.FeedFowardNetworkNormalized(
knum_list[-1],
self.params.get("out_emb_size", self._tgt_emb_size),
dropout=1.0,
var_scope_name="linear_mapping_after_cnn_layers",
mode=self.mode,
normalization_type=self.normalization_type,
regularizer=self.regularizer,
init_var=self.init_var))
if not self.params['shared_embed']:
self.layers.append(
ffn_wn_layer.FeedFowardNetworkNormalized(
self.params.get("out_emb_size", self._tgt_emb_size),
self._tgt_vocab_size,
dropout=self.params["out_dropout_keep_prob"],
var_scope_name="linear_mapping_to_vocabspace",
mode=self.mode,
normalization_type=self.normalization_type,
regularizer=self.regularizer,
init_var=self.init_var))
else:
# if embedding is shared,
# the shared embedding is used as the final linear projection to vocab space
self.layers.append(None)
if targets is None:
return self.predict(encoder_outputs, encoder_outputs_b,
inputs_attention_bias)
else:
logits = self.decode_pass(targets, encoder_outputs, encoder_outputs_b,
inputs_attention_bias)
return {
"logits": logits,
"outputs": [tf.argmax(logits, axis=-1)],
"final_state": None,
"final_sequence_lengths": None
}
def decode_pass(self, targets, encoder_outputs, encoder_outputs_b,
inputs_attention_bias):
"""Generate logits for each value in the target sequence.
Args:
targets: target values for the output sequence.
int tensor with shape [batch_size, target_length]
encoder_outputs: continuous representation of input sequence.
float tensor with shape [batch_size, input_length, hidden_size]
float tensor with shape [batch_size, input_length, hidden_size]
encoder_outputs_b: continuous representation of input sequence
which includes the source embeddings.
float tensor with shape [batch_size, input_length, hidden_size]
inputs_attention_bias: float tensor with shape [batch_size, 1, input_length]
Returns:
float32 tensor with shape [batch_size, target_length, vocab_size]
"""
# Prepare inputs to decoder layers by applying embedding
# and adding positional encoding.
decoder_inputs = self.embedding_softmax_layer(targets)
if self.position_embedding_layer is not None:
with tf.name_scope("add_pos_encoding"):
pos_input = tf.range(
0,
tf.shape(decoder_inputs)[1],
delta=1,
dtype=tf.int32,
name='range')
pos_encoding = self.position_embedding_layer(pos_input)
decoder_inputs = decoder_inputs + tf.cast(
x=pos_encoding, dtype=decoder_inputs.dtype)
if self.mode == "train":
decoder_inputs = tf.nn.dropout(decoder_inputs,
self.params["embedding_dropout_keep_prob"])
# mask the paddings in the target
inputs_padding = get_padding(
targets, padding_value=self._pad_sym, dtype=decoder_inputs.dtype)
decoder_inputs *= tf.expand_dims(1.0 - inputs_padding, 2)
# do decode
logits = self._call(
decoder_inputs=decoder_inputs,
encoder_outputs_a=encoder_outputs,
encoder_outputs_b=encoder_outputs_b,
input_attention_bias=inputs_attention_bias)
return logits
def _call(self, decoder_inputs, encoder_outputs_a, encoder_outputs_b,
input_attention_bias):
# run input into the decoder layers and returns the logits
target_embed = decoder_inputs
with tf.variable_scope("linear_layer_before_cnn_layers"):
outputs = self.layers[0](decoder_inputs)
for i in range(1, len(self.layers) - 2):
linear_proj, conv_layer, att_layer = self.layers[i]
with tf.variable_scope("layer_%d" % i):
if linear_proj is not None:
res_inputs = linear_proj(outputs)
else:
res_inputs = outputs
with tf.variable_scope("conv_layer"):
outputs = conv_layer(outputs)
with tf.variable_scope("attention_layer"):
outputs = att_layer(outputs, target_embed, encoder_outputs_a,
encoder_outputs_b, input_attention_bias)
outputs = (outputs + res_inputs) * self.scaling_factor
with tf.variable_scope("linear_layer_after_cnn_layers"):
outputs = self.layers[-2](outputs)
if self.mode == "train":
outputs = tf.nn.dropout(outputs, self.params["out_dropout_keep_prob"])
with tf.variable_scope("pre_softmax_projection"):
if self.layers[-1] is None:
logits = self.embedding_softmax_layer.linear(outputs)
else:
logits = self.layers[-1](outputs)
return tf.cast(logits, dtype=tf.float32)
def predict(self, encoder_outputs, encoder_outputs_b, inputs_attention_bias):
"""Return predicted sequence."""
batch_size = tf.shape(encoder_outputs)[0]
input_length = tf.shape(encoder_outputs)[1]
max_decode_length = input_length + self.params["extra_decode_length"]
symbols_to_logits_fn = self._get_symbols_to_logits_fn()
# Create initial set of IDs that will be passed into symbols_to_logits_fn.
initial_ids = tf.zeros(
[batch_size], dtype=tf.int32) + self.params["GO_SYMBOL"]
cache = {}
# Add encoder outputs and attention bias to the cache.
cache["encoder_outputs"] = encoder_outputs
cache["encoder_outputs_b"] = encoder_outputs_b
if inputs_attention_bias is not None:
cache["inputs_attention_bias"] = inputs_attention_bias
# Use beam search to find the top beam_size sequences and scores.
decoded_ids, scores = beam_search.sequence_beam_search(
symbols_to_logits_fn=symbols_to_logits_fn,
initial_ids=initial_ids,
initial_cache=cache,
vocab_size=self.params["tgt_vocab_size"],
beam_size=self.params["beam_size"],
alpha=self.params["alpha"],
max_decode_length=max_decode_length,
eos_id=self.params["EOS_ID"])
# Get the top sequence for each batch element
top_decoded_ids = decoded_ids[:, 0, :]
top_scores = scores[:, 0]
# this isn't particularly efficient
logits = self.decode_pass(top_decoded_ids, encoder_outputs,
encoder_outputs_b, inputs_attention_bias)
return {
"logits": logits,
"outputs": [top_decoded_ids],
"final_state": None,
"final_sequence_lengths": None
}
def _get_symbols_to_logits_fn(self):
"""Returns a decoding function that calculates logits of the next tokens."""
def symbols_to_logits_fn(ids, i, cache):
"""Generate logits for next potential IDs.
Args:
ids: Current decoded sequences.
int tensor with shape [batch_size * beam_size, i - 1]
i: Loop index
cache: dictionary of values storing the encoder output, encoder-decoder
attention bias, and previous decoder attention values.
Returns:
Tuple of
(logits with shape [batch_size * beam_size, vocab_size],
updated cache values)
"""
# pass the decoded ids from the beginneing up to the current into the decoder
# not efficient
decoder_outputs = self.decode_pass(ids, cache.get("encoder_outputs"),
cache.get("encoder_outputs_b"),
cache.get("inputs_attention_bias"))
logits = decoder_outputs[:, i, :]
return logits, cache
return symbols_to_logits_fn
| OpenSeq2Seq-master | open_seq2seq/decoders/convs2s_decoder.py |
# Copyright (c) 2018 NVIDIA Corporation
"""
This package contains various decoder.
A Decoder typically takes representation and produces data.
"""
from .decoder import Decoder
from .fc_decoders import FullyConnectedCTCDecoder, FullyConnectedDecoder, FullyConnectedSCDecoder
from .rnn_decoders import RNNDecoderWithAttention, \
BeamSearchRNNDecoderWithAttention
from .transformer_decoder import TransformerDecoder
from .convs2s_decoder import ConvS2SDecoder
from .lm_decoders import FakeDecoder
from .tacotron2_decoder import Tacotron2Decoder
from .las_decoder import ListenAttendSpellDecoder
from .jca_decoder import JointCTCAttentionDecoder
from .centaur_decoder import CentaurDecoder | OpenSeq2Seq-master | open_seq2seq/decoders/__init__.py |
# Copyright (c) 2018 NVIDIA Corporation
"""This module defines various fully-connected decoders (consisting of one
fully connected layer).
These classes are usually used for models that are not really
sequence-to-sequence and thus should be artificially split into encoder and
decoder by cutting, for example, on the last fully-connected layer.
"""
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import os
import tensorflow as tf
from .decoder import Decoder
class FullyConnectedDecoder(Decoder):
"""Simple decoder consisting of one fully-connected layer.
"""
@staticmethod
def get_required_params():
return dict(Decoder.get_required_params(), **{
'output_dim': int,
})
def __init__(self, params, model,
name="fully_connected_decoder", mode='train'):
"""Fully connected decoder constructor.
See parent class for arguments description.
Config parameters:
* **output_dim** (int) --- output dimension.
"""
super(FullyConnectedDecoder, self).__init__(params, model, name, mode)
def _decode(self, input_dict):
"""This method performs linear transformation of input.
Args:
input_dict (dict): input dictionary that has to contain
the following fields::
input_dict = {
'encoder_output': {
'outputs': output of encoder (shape=[batch_size, num_features])
}
}
Returns:
dict: dictionary with the following tensors::
{
'logits': logits with the shape=[batch_size, output_dim]
'outputs': [logits] (same as logits but wrapped in list)
}
"""
inputs = input_dict['encoder_output']['outputs']
regularizer = self.params.get('regularizer', None)
# activation is linear by default
logits = tf.layers.dense(
inputs=inputs,
units=self.params['output_dim'],
kernel_regularizer=regularizer,
name='fully_connected',
)
return {'logits': logits, 'outputs': [logits]}
class FullyConnectedTimeDecoder(Decoder):
"""Fully connected decoder that operates on inputs with time dimension.
That is, input shape should be ``[batch size, time length, num features]``.
"""
@staticmethod
def get_required_params():
return dict(Decoder.get_required_params(), **{
'tgt_vocab_size': int,
})
@staticmethod
def get_optional_params():
return dict(Decoder.get_optional_params(), **{
'logits_to_outputs_func': None, # user defined function
'infer_logits_to_pickle': bool,
})
def __init__(self, params, model,
name="fully_connected_time_decoder", mode='train'):
"""Fully connected time decoder constructor.
See parent class for arguments description.
Config parameters:
* **tgt_vocab_size** (int) --- target vocabulary size, i.e. number of
output features.
* **logits_to_outputs_func** --- function that maps produced logits to
decoder outputs, i.e. actual text sequences.
"""
super(FullyConnectedTimeDecoder, self).__init__(params, model, name, mode)
def _decode(self, input_dict):
"""Creates TensorFlow graph for fully connected time decoder.
Args:
input_dict (dict): input dictionary that has to contain
the following fields::
input_dict = {
'encoder_output': {
"outputs": tensor with shape [batch_size, time length, hidden dim]
"src_length": tensor with shape [batch_size]
}
}
Returns:
dict: dictionary with the following tensors::
{
'logits': logits with the shape=[time length, batch_size, tgt_vocab_size]
'outputs': logits_to_outputs_func(logits, input_dict)
}
"""
inputs = input_dict['encoder_output']['outputs']
regularizer = self.params.get('regularizer', None)
batch_size, _, n_hidden = inputs.get_shape().as_list()
# reshape from [B, T, A] --> [B*T, A].
# Output shape: [n_steps * batch_size, n_hidden]
inputs = tf.reshape(inputs, [-1, n_hidden])
# activation is linear by default
logits = tf.layers.dense(
inputs=inputs,
units=self.params['tgt_vocab_size'],
kernel_regularizer=regularizer,
name='fully_connected',
)
logits = tf.reshape(
logits,
[batch_size, -1, self.params['tgt_vocab_size']],
name="logits",
)
# converting to time_major=True shape
if not(self._mode=='infer' and self.params.get('infer_logits_to_pickle')):
logits = tf.transpose(logits, [1, 0, 2])
if 'logits_to_outputs_func' in self.params:
outputs = self.params['logits_to_outputs_func'](logits, input_dict)
return {
'outputs': outputs,
'logits': logits,
'src_length': input_dict['encoder_output']['src_length'],
}
return {'logits': logits,
'src_length': input_dict['encoder_output']['src_length']}
class FullyConnectedCTCDecoder(FullyConnectedTimeDecoder):
"""Fully connected time decoder that provides a CTC-based text
generation (either with or without language model). If language model is not
used, ``tf.nn.ctc_greedy_decoder`` will be used as text generation method.
"""
@staticmethod
def get_required_params():
return FullyConnectedTimeDecoder.get_required_params()
@staticmethod
def get_optional_params():
return dict(FullyConnectedTimeDecoder.get_optional_params(), **{
'use_language_model': bool,
'decoder_library_path': str,
'beam_width': int,
'alpha': float,
'beta': float,
'trie_weight': float,
'lm_path': str,
'trie_path': str,
'alphabet_config_path': str,
})
def __init__(self, params, model,
name="fully_connected_ctc_decoder", mode='train'):
"""Fully connected CTC decoder constructor.
See parent class for arguments description.
Config parameters:
* **use_language_model** (bool) --- whether to use language model for
output text generation. If False, other config parameters are not used.
* **decoder_library_path** (string) --- path to the ctc decoder with
language model library.
* **lm_path** (string) --- path to the language model file.
* **trie_path** (string) --- path to the prefix trie file.
* **alphabet_config_path** (string) --- path to the alphabet file.
* **beam_width** (int) --- beam width for beam search.
* **alpha** (float) --- weight that is assigned to language model
probabilities.
* **beta** (float) --- weight that is assigned to the
word count.
* **trie_weight** (float) --- weight for prefix tree vocabulary
based character level rescoring.
"""
super(FullyConnectedCTCDecoder, self).__init__(params, model, name, mode)
self.params['use_language_model'] = self.params.get('use_language_model',
False)
if self.params['use_language_model']:
# creating decode_with_lm function if it is compiled
lib_path = self.params['decoder_library_path']
if not os.path.exists(os.path.abspath(lib_path)):
raise IOError('Can\'t find the decoder with language model library. '
'Make sure you have built it and '
'check that you provide the correct '
'path in the --decoder_library_path parameter.')
custom_op_module = tf.load_op_library(lib_path)
def decode_with_lm(logits, decoder_input,
beam_width=self.params['beam_width'],
top_paths=1, merge_repeated=False):
sequence_length = decoder_input['encoder_output']['src_length']
if logits.dtype.base_dtype != tf.float32:
logits = tf.cast(logits, tf.float32)
decoded_ixs, decoded_vals, decoded_shapes, log_probabilities = (
custom_op_module.ctc_beam_search_decoder_with_lm(
logits, sequence_length, beam_width=beam_width,
model_path=self.params['lm_path'], trie_path=self.params['trie_path'],
alphabet_path=self.params['alphabet_config_path'],
alpha=self.params['alpha'],
beta=self.params['beta'],
trie_weight=self.params.get('trie_weight', 0.1),
top_paths=top_paths, merge_repeated=merge_repeated,
)
)
return [tf.SparseTensor(decoded_ixs[0], decoded_vals[0],
decoded_shapes[0])]
self.params['logits_to_outputs_func'] = decode_with_lm
else:
def decode_without_lm(logits, decoder_input, merge_repeated=True):
if logits.dtype.base_dtype != tf.float32:
logits = tf.cast(logits, tf.float32)
decoded, neg_sum_logits = tf.nn.ctc_greedy_decoder(
logits, decoder_input['encoder_output']['src_length'],
merge_repeated,
)
return decoded
self.params['logits_to_outputs_func'] = decode_without_lm
class FullyConnectedSCDecoder(Decoder):
"""Fully connected decoder constructor for speech commands.
"""
@staticmethod
def get_required_params():
return dict(Decoder.get_required_params(), **{
'output_dim': int,
})
def __init__(self, params, model,
name="fully_connected_decoder", mode='train'):
"""Fully connected decoder constructor.
See parent class for arguments description.
Config parameters:
* **output_dim** (int) --- output dimension.
"""
super(FullyConnectedSCDecoder, self).__init__(params, model, name, mode)
def _decode(self, input_dict):
"""This method performs linear transformation of input.
Args:
input_dict (dict): input dictionary that has to contain
the following fields::
input_dict = {
'encoder_output': {
'outputs': output of encoder (shape=[batch_size, num_features])
}
}
Returns:
dict: dictionary with the following tensors::
{
'logits': logits with the shape=[batch_size, output_dim]
'outputs': [logits] (same as logits but wrapped in list)
}
"""
inputs = input_dict['encoder_output']['outputs']
lengths = input_dict['encoder_output']['src_length']
regularizer = self.params.get('regularizer', None)
inputs = tf.layers.flatten(inputs=inputs)
# activation is linear by default
logits = tf.layers.dense(
inputs=inputs,
units=self.params['output_dim'],
kernel_regularizer=regularizer,
name='fully_connected',
)
return {'logits': logits, 'outputs': [logits]}
| OpenSeq2Seq-master | open_seq2seq/decoders/fc_decoders.py |
# This code is heavily based on the code from MLPerf
# https://github.com/mlperf/reference/tree/master/translation/tensorflow/transformer
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import tensorflow as tf
from six.moves import range
from open_seq2seq.parts.transformer import utils, attention_layer, \
ffn_layer, beam_search
from open_seq2seq.parts.transformer.common import PrePostProcessingWrapper, \
LayerNormalization, Transformer_BatchNorm
from .decoder import Decoder
class TransformerDecoder(Decoder):
@staticmethod
def get_required_params():
"""Static method with description of required parameters.
Returns:
dict:
Dictionary containing all the parameters that **have to** be
included into the ``params`` parameter of the
class :meth:`__init__` method.
"""
return dict(Decoder.get_required_params(), **{
'EOS_ID': int,
'layer_postprocess_dropout': float,
'num_hidden_layers': int,
'hidden_size': int,
'num_heads': int,
'attention_dropout': float,
'relu_dropout': float,
'filter_size': int,
'batch_size': int,
'tgt_vocab_size': int,
'beam_size': int,
'alpha': float,
'extra_decode_length': int,
})
@staticmethod
def get_optional_params():
"""Static method with description of optional parameters.
Returns:
dict:
Dictionary containing all the parameters that **can** be
included into the ``params`` parameter of the
class :meth:`__init__` method.
"""
return dict(Decoder.get_optional_params(), **{
'regularizer': None, # any valid TensorFlow regularizer
'regularizer_params': dict,
'initializer': None, # any valid TensorFlow initializer
'initializer_params': dict,
'GO_SYMBOL': int,
'PAD_SYMBOL': int,
'END_SYMBOL': int,
'norm_params': dict,
})
def _cast_types(self, input_dict):
return input_dict
def __init__(self, params, model,
name="transformer_decoder", mode='train'):
super(TransformerDecoder, self).__init__(params, model, name, mode)
self.embedding_softmax_layer = None
self.output_normalization = None
self._mode = mode
self.layers = []
# in original T paper embeddings are shared between encoder and decoder
# also final projection = transpose(E_weights), we currently only support
# this behaviour
self.params['shared_embed'] = True
self.norm_params = self.params.get("norm_params", {"type": "layernorm_L2" })
self.regularizer = self.params.get("regularizer", None)
if self.regularizer != None:
self.regularizer_params = params.get("regularizer_params", {'scale': 0.0})
self.regularizer=self.regularizer(self.regularizer_params['scale']) \
if self.regularizer_params['scale'] > 0.0 else None
#print("reg", self.regularizer)
def _decode(self, input_dict):
if 'target_tensors' in input_dict:
targets = input_dict['target_tensors'][0]
else:
targets = None
encoder_outputs = input_dict['encoder_output']['outputs']
inputs_attention_bias = (
input_dict['encoder_output']['inputs_attention_bias']
)
self.embedding_softmax_layer = (
input_dict['encoder_output']['embedding_softmax_layer']
)
with tf.name_scope("decode"):
training = (self.mode == "train")
# prepare decoder layers
if len(self.layers) == 0:
for _ in range(self.params["num_hidden_layers"]):
self_attention_layer = attention_layer.SelfAttention(
hidden_size=self.params["hidden_size"],
num_heads=self.params["num_heads"],
attention_dropout=self.params["attention_dropout"],
train=training,
regularizer=self.regularizer
)
enc_dec_attention_layer = attention_layer.Attention(
hidden_size=self.params["hidden_size"],
num_heads=self.params["num_heads"],
attention_dropout=self.params["attention_dropout"],
train=training,
regularizer=self.regularizer
)
feed_forward_network = ffn_layer.FeedFowardNetwork(
hidden_size=self.params["hidden_size"],
filter_size=self.params["filter_size"],
relu_dropout=self.params["relu_dropout"],
train=training,
regularizer=self.regularizer
)
self.layers.append([
PrePostProcessingWrapper(self_attention_layer, self.params,
training),
PrePostProcessingWrapper(enc_dec_attention_layer, self.params,
training),
PrePostProcessingWrapper(feed_forward_network, self.params,
training)
])
print("Decoder:", self.norm_params["type"], self.mode)
if self.norm_params["type"] == "batch_norm":
self.output_normalization = Transformer_BatchNorm(
training=training,
params=self.norm_params)
else:
self.output_normalization = LayerNormalization(
hidden_size=self.params["hidden_size"],
params=self.norm_params)
if targets is None:
return self.predict(encoder_outputs, inputs_attention_bias)
else:
logits = self.decode_pass(targets, encoder_outputs,
inputs_attention_bias)
return {"logits": logits,
"outputs": [tf.argmax(logits, axis=-1)],
"final_state": None,
"final_sequence_lengths": None}
def _call(self, decoder_inputs, encoder_outputs, decoder_self_attention_bias,
attention_bias, cache=None):
for n, layer in enumerate(self.layers):
self_attention_layer = layer[0]
enc_dec_attention_layer = layer[1]
feed_forward_network = layer[2]
# Run inputs through the sublayers.
layer_name = "layer_%d" % n
layer_cache = cache[layer_name] if cache is not None else None
with tf.variable_scope(layer_name):
with tf.variable_scope("self_attention"):
# TODO: Figure out why this is needed
# decoder_self_attention_bias = tf.cast(x=decoder_self_attention_bias,
# dtype=decoder_inputs.dtype)
decoder_inputs = self_attention_layer(
decoder_inputs, decoder_self_attention_bias, cache=layer_cache,
)
with tf.variable_scope("encdec_attention"):
decoder_inputs = enc_dec_attention_layer(
decoder_inputs, encoder_outputs, attention_bias,
)
with tf.variable_scope("ffn"):
decoder_inputs = feed_forward_network(decoder_inputs)
return self.output_normalization(decoder_inputs)
def decode_pass(self, targets, encoder_outputs, inputs_attention_bias):
"""Generate logits for each value in the target sequence.
Args:
targets: target values for the output sequence.
int tensor with shape [batch_size, target_length]
encoder_outputs: continuous representation of input sequence.
float tensor with shape [batch_size, input_length, hidden_size]
inputs_attention_bias: float tensor with shape [batch_size, 1, 1, input_length]
Returns:
float32 tensor with shape [batch_size, target_length, vocab_size]
"""
# Prepare inputs to decoder layers by shifting targets, adding positional
# encoding and applying dropout.
decoder_inputs = self.embedding_softmax_layer(targets)
with tf.name_scope("shift_targets"):
# Shift targets to the right, and remove the last element
decoder_inputs = tf.pad(
decoder_inputs, [[0, 0], [1, 0], [0, 0]],
)[:, :-1, :]
with tf.name_scope("add_pos_encoding"):
length = tf.shape(decoder_inputs)[1]
# decoder_inputs += utils.get_position_encoding(
# length, self.params["hidden_size"])
decoder_inputs += tf.cast(
utils.get_position_encoding(length, self.params["hidden_size"]),
dtype=self.params['dtype'],
)
if self.mode == "train":
decoder_inputs = tf.nn.dropout(decoder_inputs,
keep_prob = 1 - self.params["layer_postprocess_dropout"] )
# Run values
decoder_self_attention_bias = utils.get_decoder_self_attention_bias(length,
dtype = tf.float32
# dtype=self._params["dtype"]
)
# do decode
outputs = self._call(
decoder_inputs=decoder_inputs,
encoder_outputs=encoder_outputs,
decoder_self_attention_bias=decoder_self_attention_bias,
attention_bias=inputs_attention_bias,
)
logits = self.embedding_softmax_layer.linear(outputs)
return logits
def _get_symbols_to_logits_fn(self, max_decode_length):
"""Returns a decoding function that calculates logits of the next tokens."""
timing_signal = utils.get_position_encoding(
max_decode_length + 1, self.params["hidden_size"],
)
decoder_self_attention_bias = utils.get_decoder_self_attention_bias(
max_decode_length, dtype = tf.float32
# dtype=self._params["dtype"]
)
def symbols_to_logits_fn(ids, i, cache):
"""Generate logits for next potential IDs.
Args:
ids: Current decoded sequences.
int tensor with shape [batch_size * beam_size, i + 1]
i: Loop index
cache: dictionary of values storing the encoder output, encoder-decoder
attention bias, and previous decoder attention values.
Returns:
Tuple of
(logits with shape [batch_size * beam_size, vocab_size],
updated cache values)
"""
# Set decoder input to the last generated IDs
decoder_input = ids[:, -1:]
# Preprocess decoder input by getting embeddings and adding timing signal.
decoder_input = self.embedding_softmax_layer(decoder_input)
decoder_input += tf.cast(x=timing_signal[i:i + 1],
dtype=decoder_input.dtype)
self_attention_bias = decoder_self_attention_bias[:, :, i:i + 1, :i + 1]
decoder_outputs = self._call(
decoder_input, cache.get("encoder_outputs"), self_attention_bias,
cache.get("encoder_decoder_attention_bias"), cache,
)
logits = self.embedding_softmax_layer.linear(decoder_outputs)
logits = tf.squeeze(logits, axis=[1])
return tf.cast(logits, tf.float32), cache
return symbols_to_logits_fn
def predict(self, encoder_outputs, encoder_decoder_attention_bias):
"""Return predicted sequence."""
batch_size = tf.shape(encoder_outputs)[0]
input_length = tf.shape(encoder_outputs)[1]
max_decode_length = input_length + self.params["extra_decode_length"]
symbols_to_logits_fn = self._get_symbols_to_logits_fn(max_decode_length)
# Create initial set of IDs that will be passed into symbols_to_logits_fn.
initial_ids = tf.zeros([batch_size], dtype=tf.int32)
# Create cache storing decoder attention values for each layer.
cache = {
"layer_%d" % layer: {
"k": tf.zeros([batch_size, 0,
self.params["hidden_size"]],
dtype=encoder_outputs.dtype),
"v": tf.zeros([batch_size, 0,
self.params["hidden_size"]],
dtype=encoder_outputs.dtype),
} for layer in range(self.params["num_hidden_layers"])
}
# Add encoder output and attention bias to the cache.
cache["encoder_outputs"] = encoder_outputs
cache["encoder_decoder_attention_bias"] = encoder_decoder_attention_bias
# Use beam search to find the top beam_size sequences and scores.
decoded_ids, scores = beam_search.sequence_beam_search(
symbols_to_logits_fn=symbols_to_logits_fn,
initial_ids=initial_ids,
initial_cache=cache,
vocab_size=self.params["tgt_vocab_size"],
beam_size=self.params["beam_size"],
alpha=self.params["alpha"],
max_decode_length=max_decode_length,
eos_id=self.params["EOS_ID"],
)
# Get the top sequence for each batch element
top_decoded_ids = decoded_ids[:, 0, 1:]
# this isn't particularly efficient
logits = self.decode_pass(top_decoded_ids, encoder_outputs,
encoder_decoder_attention_bias)
return {"logits": logits,
"outputs": [top_decoded_ids],
"final_state": None,
"final_sequence_lengths": None}
| OpenSeq2Seq-master | open_seq2seq/decoders/transformer_decoder.py |
# Copyright (c) 2019 NVIDIA Corporation
import tensorflow as tf
from tensorflow.python.ops import math_ops
from open_seq2seq.parts.centaur import AttentionBlock
from open_seq2seq.parts.centaur import ConvBlock
from open_seq2seq.parts.centaur import Prenet
from open_seq2seq.parts.transformer import utils
from open_seq2seq.parts.transformer.common import LayerNormalization
from .decoder import Decoder
class CentaurDecoder(Decoder):
"""
Centaur decoder that consists of attention blocks
followed by convolutional layers.
"""
@staticmethod
def get_required_params():
return dict(Decoder.get_required_params(), **{
"prenet_layers": int,
"prenet_hidden_size": int,
"hidden_size": int,
"conv_layers": list,
"mag_conv_layers": None,
"attention_dropout": float,
"layer_postprocess_dropout": float
})
@staticmethod
def get_optional_params():
return dict(Decoder.get_optional_params(), **{
"prenet_activation_fn": None,
"prenet_dropout": float,
"prenet_use_inference_dropout": bool,
"cnn_dropout_prob": float,
"bn_momentum": float,
"bn_epsilon": float,
"reduction_factor": int,
"attention_layers": int,
"self_attention_conv_params": dict,
"attention_heads": int,
"attention_cnn_dropout_prob": float,
"window_size": int,
"back_step_size": int,
"force_layers": list
})
def __init__(self, params, model, name="centaur_decoder", mode="train"):
"""
Centaur decoder constructor.
See parent class for arguments description.
Config parameters:
* **prenet_layers** (int) --- number of fully-connected layers to use.
* **prenet_hidden_size** (int) --- number of units in each pre-net layer.
* **hidden_size** (int) --- dimensionality of hidden embeddings.
* **conv_layers** (list) --- list with the description of convolutional
layers. For example::
"conv_layers": [
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "VALID", "is_causal": True
},
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "VALID", "is_causal": True
},
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "VALID", "is_causal": True
},
{
"kernel_size": [5], "stride": [1],
"num_channels": 512, "padding": "VALID", "is_causal": True
}
]
* **mag_conv_layers** (list) --- list with the description of convolutional
layers to reconstruct magnitude.
* **attention_dropout** (float) --- dropout rate for attention layers.
* **layer_postprocess_dropout** (float) --- dropout rate for
transformer block sublayers.
* **prenet_activation_fn** (callable) --- activation function to use for the
prenet lyaers. Defaults to relu.
* **prenet_dropout** (float) --- dropout rate for the pre-net. Defaults to 0.5.
* **prenet_use_inference_dropout** (bool) --- whether to use dropout during the inference.
Defaults to False.
* **cnn_dropout_prob** (float) --- dropout probabilty for cnn layers.
Defaults to 0.5.
* **bn_momentum** (float) --- momentum for batch norm. Defaults to 0.95.
* **bn_epsilon** (float) --- epsilon for batch norm. Defaults to 1e-8.
* **reduction_factor** (int) --- number of frames to predict in a time.
Defaults to 1.
* **attention_layers** (int) --- number of attention blocks. Defaults to 4.
* **self_attention_conv_params** (dict) --- description of convolutional
layer inside attention blocks. Defaults to None.
* **attention_heads** (int) --- number of attention heads. Defaults to 1.
* **attention_cnn_dropout_prob** (float) --- dropout rate for convolutional
layers inside attention blocks. Defaults to 0.5.
* **window_size** (int) --- size of attention window for forcing
monotonic attention during the inference. Defaults to None.
* **back_step_size** (int) --- number of steps attention is allowed to
go back during the inference. Defaults to 0.
* **force_layers** (list) --- indices of layers where forcing of
monotonic attention should be enabled. Defaults to all layers.
"""
super(CentaurDecoder, self).__init__(params, model, name, mode)
data_layer_params = model.get_data_layer().params
n_feats = data_layer_params["num_audio_features"]
use_mag = "both" in data_layer_params["output_type"]
self.training = mode == "train"
self.prenet = None
self.linear_projection = None
self.attentions = []
self.output_normalization = None
self.conv_layers = []
self.mag_conv_layers = []
self.stop_token_projection_layer = None
self.mel_projection_layer = None
self.mag_projection_layer = None
self.n_mel = n_feats["mel"] if use_mag else n_feats
self.n_mag = n_feats["magnitude"] if use_mag else None
self.reduction_factor = params.get("reduction_factor", 1)
def _build_layers(self):
regularizer = self._params.get("regularizer", None)
inference_dropout = self._params.get("prenet_use_inference_dropout", False)
self.prenet = Prenet(
n_layers=self._params["prenet_layers"],
hidden_size=self._params["prenet_hidden_size"],
activation_fn=self._params.get("prenet_activation_fn", tf.nn.relu),
dropout=self._params.get("prenet_dropout", 0.5),
regularizer=regularizer,
training=self.training or inference_dropout,
dtype=self._params["dtype"]
)
cnn_dropout_prob = self._params.get("cnn_dropout_prob", 0.5)
bn_momentum = self._params.get("bn_momentum", 0.95)
bn_epsilon = self._params.get("bn_epsilon", -1e8)
self.linear_projection = tf.layers.Dense(
name="linear_projection",
units=self._params["hidden_size"],
use_bias=False,
kernel_regularizer=regularizer,
dtype=self._params["dtype"]
)
n_layers = self._params.get("attention_layers", 4)
n_heads = self._params.get("attention_heads", 1)
conv_params = self._params.get("self_attention_conv_params", None)
force_layers = self._params.get("force_layers", range(n_layers))
for index in range(n_layers):
window_size = None
if index in force_layers:
window_size = self._params.get("window_size", None)
attention = AttentionBlock(
name="attention_block_%d" % index,
hidden_size=self._params["hidden_size"],
attention_dropout=self._params["attention_dropout"],
layer_postprocess_dropout=self._params["layer_postprocess_dropout"],
regularizer=regularizer,
training=self.training,
cnn_dropout_prob=self._params.get("attention_cnn_dropout_prob", 0.5),
conv_params=conv_params,
n_heads=n_heads,
window_size=window_size,
back_step_size=self._params.get("back_step_size", None)
)
self.attentions.append(attention)
self.output_normalization = LayerNormalization(self._params["hidden_size"])
for index, params in enumerate(self._params["conv_layers"]):
if params["num_channels"] == -1:
params["num_channels"] = self.n_mel * self.reduction_factor
layer = ConvBlock.create(
index=index,
conv_params=params,
regularizer=regularizer,
bn_momentum=bn_momentum,
bn_epsilon=bn_epsilon,
cnn_dropout_prob=cnn_dropout_prob,
training=self.training
)
self.conv_layers.append(layer)
for index, params in enumerate(self._params["mag_conv_layers"]):
if params["num_channels"] == -1:
params["num_channels"] = self.n_mag * self.reduction_factor
layer = ConvBlock.create(
index=index,
conv_params=params,
regularizer=regularizer,
bn_momentum=bn_momentum,
bn_epsilon=bn_epsilon,
cnn_dropout_prob=cnn_dropout_prob,
training=self.training
)
self.mag_conv_layers.append(layer)
self.stop_token_projection_layer = tf.layers.Dense(
name="stop_token_projection",
units=1 * self.reduction_factor,
use_bias=True,
kernel_regularizer=regularizer
)
self.mel_projection_layer = tf.layers.Dense(
name="mel_projection",
units=self.n_mel * self.reduction_factor,
use_bias=True,
kernel_regularizer=regularizer
)
self.mag_projection_layer = tf.layers.Dense(
name="mag_projection",
units=self.n_mag * self.reduction_factor,
use_bias=True,
kernel_regularizer=regularizer
)
def _decode(self, input_dict):
self._build_layers()
if "target_tensors" in input_dict:
targets = input_dict["target_tensors"][0]
else:
targets = None
encoder_outputs = input_dict["encoder_output"]["outputs"]
attention_bias = input_dict["encoder_output"]["inputs_attention_bias"]
spec_length = None
if self.mode == "train" or self.mode == "eval":
spec_length = None
if "target_tensors" in input_dict:
spec_length = input_dict["target_tensors"][2]
if self.training:
return self._train(targets, encoder_outputs, attention_bias, spec_length)
return self._infer(encoder_outputs, attention_bias, spec_length)
def _decode_pass(self,
decoder_inputs,
encoder_outputs,
enc_dec_attention_bias,
sequence_lengths=None,
alignment_positions=None):
y = self.prenet(decoder_inputs)
y = self.linear_projection(y)
with tf.variable_scope("decoder_pos_encoding"):
pos_encoding = self._positional_encoding(y, self.params["dtype"])
y += pos_encoding
with tf.variable_scope("encoder_pos_encoding"):
pos_encoding = self._positional_encoding(encoder_outputs, self.params["dtype"])
encoder_outputs += pos_encoding
for i, attention in enumerate(self.attentions):
positions = None
if alignment_positions is not None:
positions = alignment_positions[i, :, :, :]
y = attention(y, encoder_outputs, enc_dec_attention_bias, positions=positions)
y = self.output_normalization(y)
with tf.variable_scope("conv_layers"):
for layer in self.conv_layers:
y = layer(y)
stop_token_logits = self.stop_token_projection_layer(y)
mel_spec = self.mel_projection_layer(y)
with tf.variable_scope("mag_conv_layers"):
for layer in self.mag_conv_layers:
y = layer(y)
mag_spec = self.mag_projection_layer(y)
if sequence_lengths is None:
batch_size = tf.shape(y)[0]
sequence_lengths = tf.zeros([batch_size])
return {
"spec": mel_spec,
"post_net_spec": mel_spec,
"alignments": None,
"stop_token_logits": stop_token_logits,
"lengths": sequence_lengths,
"mag_spec": mag_spec
}
def _train(self, targets, encoder_outputs, enc_dec_attention_bias, sequence_lengths):
# Shift targets to the right, and remove the last element
with tf.name_scope("shift_targets"):
n_features = self.n_mel + self.n_mag
targets = targets[:, :, :n_features]
targets = self._shrink(targets, n_features, self.reduction_factor)
decoder_inputs = tf.pad(targets, [[0, 0], [1, 0], [0, 0]])[:, :-1, :]
outputs = self._decode_pass(
decoder_inputs=decoder_inputs,
encoder_outputs=encoder_outputs,
enc_dec_attention_bias=enc_dec_attention_bias,
sequence_lengths=sequence_lengths
)
with tf.variable_scope("alignments"):
weights = []
for index in range(len(self.attentions)):
op = "ForwardPass/centaur_decoder/attention_block_%d/attention/attention/attention_weights" % index
weights_operation = tf.get_default_graph().get_operation_by_name(op)
weight = weights_operation.values()[0]
weights.append(weight)
outputs["alignments"] = [tf.stack(weights)]
return self._convert_outputs(
outputs,
self.reduction_factor,
self._model.params["batch_size_per_gpu"]
)
def _infer(self, encoder_outputs, enc_dec_attention_bias, sequence_lengths):
if sequence_lengths is None:
maximum_iterations = self._model.get_data_layer()._params.get("duration_max", 1000)
else:
maximum_iterations = tf.reduce_max(sequence_lengths)
maximum_iterations //= self.reduction_factor
state, state_shape_invariants = self._inference_initial_state(
encoder_outputs,
enc_dec_attention_bias
)
state = tf.while_loop(
cond=self._inference_cond,
body=self._inference_step,
loop_vars=[state],
shape_invariants=state_shape_invariants,
back_prop=False,
maximum_iterations=maximum_iterations,
parallel_iterations=1
)
return self._convert_outputs(
state["outputs"],
self.reduction_factor,
self._model.params["batch_size_per_gpu"]
)
def _inference_initial_state(self, encoder_outputs, encoder_decoder_attention_bias):
"""Create initial state for inference."""
with tf.variable_scope("inference_initial_state"):
batch_size = tf.shape(encoder_outputs)[0]
n_layers = self._params.get("attention_layers", 1)
n_heads = self._params.get("attention_heads", 1)
n_features = self.n_mel + self.n_mag
state = {
"iteration": tf.constant(0),
"inputs": tf.zeros([batch_size, 1, n_features * self.reduction_factor]),
"finished": tf.cast(tf.zeros([batch_size]), tf.bool),
"alignment_positions": tf.zeros([n_layers, batch_size, n_heads, 1], dtype=tf.int32),
"outputs": {
"spec": tf.zeros([batch_size, 0, self.n_mel * self.reduction_factor]),
"post_net_spec": tf.zeros([batch_size, 0, self.n_mel * self.reduction_factor]),
"alignments": [
tf.zeros([0, 0, 0, 0, 0])
],
"stop_token_logits": tf.zeros([batch_size, 0, 1 * self.reduction_factor]),
"lengths": tf.zeros([batch_size], dtype=tf.int32),
"mag_spec": tf.zeros([batch_size, 0, self.n_mag * self.reduction_factor])
},
"encoder_outputs": encoder_outputs,
"encoder_decoder_attention_bias": encoder_decoder_attention_bias
}
state_shape_invariants = {
"iteration": tf.TensorShape([]),
"inputs": tf.TensorShape([None, None, n_features * self.reduction_factor]),
"finished": tf.TensorShape([None]),
"alignment_positions": tf.TensorShape([n_layers, None, n_heads, None]),
"outputs": {
"spec": tf.TensorShape([None, None, self.n_mel * self.reduction_factor]),
"post_net_spec": tf.TensorShape([None, None, self.n_mel * self.reduction_factor]),
"alignments": [
tf.TensorShape([None, None, None, None, None]),
],
"stop_token_logits": tf.TensorShape([None, None, 1 * self.reduction_factor]),
"lengths": tf.TensorShape([None]),
"mag_spec": tf.TensorShape([None, None, None])
},
"encoder_outputs": encoder_outputs.shape,
"encoder_decoder_attention_bias": encoder_decoder_attention_bias.shape
}
return state, state_shape_invariants
def _inference_cond(self, state):
"""Check if it's time to stop inference."""
with tf.variable_scope("inference_cond"):
all_finished = math_ops.reduce_all(state["finished"])
return tf.logical_not(all_finished)
def _inference_step(self, state):
"""Make one inference step."""
decoder_inputs = state["inputs"]
encoder_outputs = state["encoder_outputs"]
enc_dec_attention_bias = state["encoder_decoder_attention_bias"]
alignment_positions = state["alignment_positions"]
outputs = self._decode_pass(
decoder_inputs=decoder_inputs,
encoder_outputs=encoder_outputs,
enc_dec_attention_bias=enc_dec_attention_bias,
alignment_positions=alignment_positions
)
with tf.variable_scope("inference_step"):
next_inputs_mel = outputs["post_net_spec"][:, -1:, :]
next_inputs_mel = self._expand(next_inputs_mel, self.reduction_factor)
next_inputs_mag = outputs["mag_spec"][:, -1:, :]
next_inputs_mag = self._expand(next_inputs_mag, self.reduction_factor)
next_inputs = tf.concat([next_inputs_mel, next_inputs_mag], axis=-1)
n_features = self.n_mel + self.n_mag
next_inputs = self._shrink(next_inputs, n_features, self.reduction_factor)
# Set zero if sequence is finished
next_inputs = tf.where(
state["finished"],
tf.zeros_like(next_inputs),
next_inputs
)
next_inputs = tf.concat([decoder_inputs, next_inputs], 1)
# Update lengths
lengths = state["outputs"]["lengths"]
lengths = tf.where(
state["finished"],
lengths,
lengths + 1 * self.reduction_factor
)
outputs["lengths"] = lengths
# Update spec, post_net_spec and mag_spec
for key in ["spec", "post_net_spec", "mag_spec"]:
output = outputs[key][:, -1:, :]
output = tf.where(state["finished"], tf.zeros_like(output), output)
outputs[key] = tf.concat([state["outputs"][key], output], 1)
# Update stop token logits
stop_token_logits = outputs["stop_token_logits"][:, -1:, :]
stop_token_logits = tf.where(
state["finished"],
tf.zeros_like(stop_token_logits) + 1e9,
stop_token_logits
)
stop_prediction = tf.sigmoid(stop_token_logits)
stop_prediction = tf.reduce_max(stop_prediction, axis=-1)
# Uncomment next line if you want to use stop token predictions
finished = tf.reshape(tf.cast(tf.round(stop_prediction), tf.bool), [-1])
finished = tf.reshape(finished, [-1])
stop_token_logits = tf.concat(
[state["outputs"]["stop_token_logits"], stop_token_logits],
axis=1
)
outputs["stop_token_logits"] = stop_token_logits
with tf.variable_scope("alignments"):
forward = "ForwardPass" if self.mode == "infer" else "ForwardPass_1"
weights = []
for index in range(len(self.attentions)):
op = forward + "/centaur_decoder/while/attention_block_%d/attention/attention/attention_weights" % index
weights_operation = tf.get_default_graph().get_operation_by_name(op)
weight = weights_operation.values()[0]
weights.append(weight)
weights = tf.stack(weights)
outputs["alignments"] = [weights]
alignment_positions = tf.argmax(
weights,
axis=-1,
output_type=tf.int32
)[:, :, :, -1:]
state["alignment_positions"] = tf.concat(
[state["alignment_positions"], alignment_positions],
axis=-1
)
state["iteration"] = state["iteration"] + 1
state["inputs"] = next_inputs
state["finished"] = finished
state["outputs"] = outputs
return state
@staticmethod
def _shrink(values, last_dim, reduction_factor):
"""Shrink the given input by reduction_factor."""
shape = tf.shape(values)
new_shape = [
shape[0],
shape[1] // reduction_factor,
last_dim * reduction_factor
]
values = tf.reshape(values, new_shape)
return values
@staticmethod
def _expand(values, reduction_factor):
"""Expand the given input by reduction_factor."""
shape = tf.shape(values)
new_shape = [
shape[0],
shape[1] * reduction_factor,
shape[2] // reduction_factor
]
values = tf.reshape(values, new_shape)
return values
@staticmethod
def _positional_encoding(x, dtype):
"""Add positional encoding to the given input."""
length = tf.shape(x)[1]
features_count = tf.shape(x)[2]
features_count += features_count % 2
pos_encoding = utils.get_position_encoding(length, features_count)
position_encoding = tf.cast(pos_encoding, dtype)
position_encoding = position_encoding[:, :features_count]
return position_encoding
@staticmethod
def _convert_outputs(outputs, reduction_factor, batch_size):
"""Convert output of the decoder to appropriate format."""
with tf.variable_scope("output_converter"):
for key in ["spec", "post_net_spec", "stop_token_logits", "mag_spec"]:
outputs[key] = CentaurDecoder._expand(outputs[key], reduction_factor)
alignments = []
for sample in range(batch_size):
alignments.append([outputs["alignments"][0][:, sample, :, :, :]])
return {
"outputs": [
outputs["spec"],
outputs["post_net_spec"],
alignments,
tf.sigmoid(outputs["stop_token_logits"]),
outputs["lengths"],
outputs["mag_spec"]
],
"stop_token_prediction": outputs["stop_token_logits"]
}
| OpenSeq2Seq-master | open_seq2seq/decoders/centaur_decoder.py |
# Copyright (c) 2018 NVIDIA Corporation
"""
RNN-based decoders.
"""
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import copy
import tensorflow as tf
from open_seq2seq.parts.rnns.attention_wrapper import BahdanauAttention, \
LuongAttention, \
AttentionWrapper
from open_seq2seq.parts.rnns.gnmt import GNMTAttentionMultiCell, \
gnmt_residual_fn
from open_seq2seq.parts.rnns.rnn_beam_search_decoder import BeamSearchDecoder
from open_seq2seq.parts.rnns.utils import single_cell
from .decoder import Decoder
class RNNDecoderWithAttention(Decoder):
"""Typical RNN decoder with attention mechanism.
"""
@staticmethod
def get_required_params():
return dict(Decoder.get_required_params(), **{
'GO_SYMBOL': int, # symbol id
'END_SYMBOL': int, # symbol id
'tgt_vocab_size': int,
'tgt_emb_size': int,
'attention_layer_size': int,
'attention_type': ['bahdanau', 'luong', 'gnmt', 'gnmt_v2'],
'core_cell': None,
'decoder_layers': int,
'decoder_use_skip_connections': bool,
'batch_size': int,
})
@staticmethod
def get_optional_params():
return dict(Decoder.get_optional_params(), **{
'core_cell_params': dict,
'bahdanau_normalize': bool,
'luong_scale': bool,
'decoder_dp_input_keep_prob': float,
'decoder_dp_output_keep_prob': float,
'time_major': bool,
'use_swap_memory': bool,
'proj_size': int,
'num_groups': int,
'PAD_SYMBOL': int, # symbol id
'weight_tied': bool,
})
def __init__(self, params, model,
name='rnn_decoder_with_attention', mode='train'):
"""Initializes RNN decoder with embedding.
See parent class for arguments description.
Config parameters:
* **batch_size** (int) --- batch size.
* **GO_SYMBOL** (int) --- GO symbol id, must be the same as used in
data layer.
* **END_SYMBOL** (int) --- END symbol id, must be the same as used in
data layer.
* **tgt_emb_size** (int) --- embedding size to use.
* **core_cell_params** (dict) - parameters for RNN class
* **core_cell** (string) - RNN class.
* **decoder_dp_input_keep_prob** (float) - dropout input keep probability.
* **decoder_dp_output_keep_prob** (float) - dropout output keep probability.
* **decoder_use_skip_connections** (bool) - use residual connections or not.
* **attention_type** (string) - bahdanau, luong, gnmt or gnmt_v2.
* **bahdanau_normalize** (bool, optional) - whether to use normalization in
bahdanau attention.
* **luong_scale** (bool, optional) - whether to use scale in luong attention
* ... add any cell-specific parameters here as well.
"""
super(RNNDecoderWithAttention, self).__init__(params, model, name, mode)
self._batch_size = self.params['batch_size']
self.GO_SYMBOL = self.params['GO_SYMBOL']
self.END_SYMBOL = self.params['END_SYMBOL']
self._tgt_vocab_size = self.params['tgt_vocab_size']
self._tgt_emb_size = self.params['tgt_emb_size']
self._weight_tied = self.params.get('weight_tied', False)
def _build_attention(self,
encoder_outputs,
encoder_sequence_length):
"""Builds Attention part of the graph.
Currently supports "bahdanau" and "luong".
"""
with tf.variable_scope("AttentionMechanism"):
attention_depth = self.params['attention_layer_size']
if self.params['attention_type'] == 'bahdanau':
if 'bahdanau_normalize' in self.params:
bah_normalize = self.params['bahdanau_normalize']
else:
bah_normalize = False
attention_mechanism = BahdanauAttention(
num_units=attention_depth,
memory=encoder_outputs,
normalize=bah_normalize,
memory_sequence_length=encoder_sequence_length,
probability_fn=tf.nn.softmax,
dtype=tf.get_variable_scope().dtype
)
elif self.params['attention_type'] == 'luong':
if 'luong_scale' in self.params:
luong_scale = self.params['luong_scale']
else:
luong_scale = False
attention_mechanism = LuongAttention(
num_units=attention_depth,
memory=encoder_outputs,
scale=luong_scale,
memory_sequence_length=encoder_sequence_length,
probability_fn=tf.nn.softmax,
dtype=tf.get_variable_scope().dtype
)
elif self.params['attention_type'] == 'gnmt' or \
self.params['attention_type'] == 'gnmt_v2':
attention_mechanism = BahdanauAttention(
num_units=attention_depth,
memory=encoder_outputs,
normalize=True,
memory_sequence_length=encoder_sequence_length,
probability_fn=tf.nn.softmax,
dtype=tf.get_variable_scope().dtype
)
else:
raise ValueError('Unknown Attention Type')
return attention_mechanism
@staticmethod
def _add_residual_wrapper(cells, start_ind=1):
for idx, cell in enumerate(cells):
if idx >= start_ind:
cells[idx] = tf.contrib.rnn.ResidualWrapper( # pylint: disable=no-member
cell,
residual_fn=gnmt_residual_fn,
)
return cells
def _decode(self, input_dict):
"""Decodes representation into data.
Args:
input_dict (dict): Python dictionary with inputs to decoder.
Config parameters:
* **src_inputs** --- Decoder input Tensor of shape [batch_size, time, dim]
or [time, batch_size, dim]
* **src_lengths** --- Decoder input lengths Tensor of shape [batch_size]
* **tgt_inputs** --- Only during training. labels Tensor of the
shape [batch_size, time] or [time, batch_size].
* **tgt_lengths** --- Only during training. labels lengths
Tensor of the shape [batch_size].
Returns:
dict: Python dictionary with:
* final_outputs - tensor of shape [batch_size, time, dim]
or [time, batch_size, dim]
* final_state - tensor with decoder final state
* final_sequence_lengths - tensor of shape [batch_size, time]
or [time, batch_size]
"""
encoder_outputs = input_dict['encoder_output']['outputs']
enc_src_lengths = input_dict['encoder_output']['src_lengths']
tgt_inputs = input_dict['target_tensors'][0] if 'target_tensors' in \
input_dict else None
tgt_lengths = input_dict['target_tensors'][1] if 'target_tensors' in \
input_dict else None
self._output_projection_layer = tf.layers.Dense(
self._tgt_vocab_size, use_bias=False,
)
if not self._weight_tied:
self._dec_emb_w = tf.get_variable(
name='DecoderEmbeddingMatrix',
shape=[self._tgt_vocab_size, self._tgt_emb_size],
dtype=tf.float32
)
else:
fake_input = tf.zeros(shape=(1, self._tgt_emb_size))
fake_output = self._output_projection_layer.apply(fake_input)
with tf.variable_scope("dense", reuse=True):
dense_weights = tf.get_variable("kernel")
self._dec_emb_w = tf.transpose(dense_weights)
if self._mode == "train":
dp_input_keep_prob = self.params['decoder_dp_input_keep_prob']
dp_output_keep_prob = self.params['decoder_dp_output_keep_prob']
else:
dp_input_keep_prob = 1.0
dp_output_keep_prob = 1.0
residual_connections = self.params['decoder_use_skip_connections']
# list of cells
cell_params = self.params.get('core_cell_params', {})
self._decoder_cells = [
single_cell(
cell_class=self.params['core_cell'],
cell_params=self.params.get('core_cell_params', {}),
dp_input_keep_prob=dp_input_keep_prob,
dp_output_keep_prob=dp_output_keep_prob,
# residual connections are added a little differently for GNMT
residual_connections=False if self.params['attention_type'].startswith('gnmt')
else residual_connections,
) for _ in range(self.params['decoder_layers'] - 1)
]
last_cell_params = copy.deepcopy(cell_params)
if self._weight_tied:
last_cell_params['num_units'] = self._tgt_emb_size
last_cell = single_cell(
cell_class=self.params['core_cell'],
cell_params=last_cell_params,
dp_input_keep_prob=dp_input_keep_prob,
dp_output_keep_prob=dp_output_keep_prob,
# residual connections are added a little differently for GNMT
residual_connections=False if self.params['attention_type'].startswith('gnmt')
else residual_connections,
)
self._decoder_cells.append(last_cell)
attention_mechanism = self._build_attention(
encoder_outputs,
enc_src_lengths,
)
if self.params['attention_type'].startswith('gnmt'):
attention_cell = self._decoder_cells.pop(0)
attention_cell = AttentionWrapper(
attention_cell,
attention_mechanism=attention_mechanism,
attention_layer_size=None,
output_attention=False,
name="gnmt_attention",
)
attentive_decoder_cell = GNMTAttentionMultiCell(
attention_cell,
self._add_residual_wrapper(self._decoder_cells) if residual_connections else self._decoder_cells,
use_new_attention=(self.params['attention_type'] == 'gnmt_v2'),
)
else:
attentive_decoder_cell = AttentionWrapper(
# pylint: disable=no-member
cell=tf.contrib.rnn.MultiRNNCell(self._decoder_cells),
attention_mechanism=attention_mechanism,
)
if self._mode == "train":
input_vectors = tf.cast(
tf.nn.embedding_lookup(self._dec_emb_w, tgt_inputs),
dtype=self.params['dtype'],
)
helper = tf.contrib.seq2seq.TrainingHelper( # pylint: disable=no-member
inputs=input_vectors,
sequence_length=tgt_lengths,
)
decoder = tf.contrib.seq2seq.BasicDecoder( # pylint: disable=no-member
cell=attentive_decoder_cell,
helper=helper,
output_layer=self._output_projection_layer,
initial_state=attentive_decoder_cell.zero_state(
self._batch_size, dtype=encoder_outputs.dtype,
),
)
elif self._mode == "infer" or self._mode == "eval":
embedding_fn = lambda ids: tf.cast(
tf.nn.embedding_lookup(self._dec_emb_w, ids),
dtype=self.params['dtype'],
)
# pylint: disable=no-member
helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(
embedding=embedding_fn,
start_tokens=tf.fill([self._batch_size], self.GO_SYMBOL),
end_token=self.END_SYMBOL,
)
decoder = tf.contrib.seq2seq.BasicDecoder( # pylint: disable=no-member
cell=attentive_decoder_cell,
helper=helper,
initial_state=attentive_decoder_cell.zero_state(
batch_size=self._batch_size, dtype=encoder_outputs.dtype,
),
output_layer=self._output_projection_layer,
)
else:
raise ValueError(
"Unknown mode for decoder: {}".format(self._mode)
)
time_major = self.params.get("time_major", False)
use_swap_memory = self.params.get("use_swap_memory", False)
if self._mode == 'train':
maximum_iterations = tf.reduce_max(tgt_lengths)
else:
maximum_iterations = tf.reduce_max(enc_src_lengths) * 2
# pylint: disable=no-member
final_outputs, final_state, final_sequence_lengths = tf.contrib.seq2seq.dynamic_decode(
decoder=decoder,
impute_finished=True,
maximum_iterations=maximum_iterations,
swap_memory=use_swap_memory,
output_time_major=time_major,
)
return {'logits': final_outputs.rnn_output if not time_major else
tf.transpose(final_outputs.rnn_output, perm=[1, 0, 2]),
'outputs': [tf.argmax(final_outputs.rnn_output, axis=-1)],
'final_state': final_state,
'final_sequence_lengths': final_sequence_lengths}
class BeamSearchRNNDecoderWithAttention(RNNDecoderWithAttention):
"""
Beam search version of RNN-based decoder with attention.
Can be used only during Inference (mode=infer)
"""
@staticmethod
def get_optional_params():
return dict(RNNDecoderWithAttention.get_optional_params(), **{
'length_penalty': float,
'beam_width': int,
})
def __init__(self, params, model,
name="rnn_decoder_with_attention", mode='train'):
"""Initializes beam search decoder.
Args:
params(dict): dictionary with decoder parameters
Config parameters:
* **batch_size** --- batch size
* **GO_SYMBOL** --- GO symbol id, must be the same as used in data layer
* **END_SYMBOL** --- END symbol id, must be the same as used in data layer
* **tgt_vocab_size** --- vocabulary size of target
* **tgt_emb_size** --- embedding to use
* **decoder_cell_units** --- number of units in RNN
* **decoder_cell_type** --- RNN type: lstm, gru, glstm, etc.
* **decoder_dp_input_keep_prob** ---
* **decoder_dp_output_keep_prob** ---
* **decoder_use_skip_connections** --- use residual connections or not
* **attention_type** --- bahdanau, luong, gnmt, gnmt_v2
* **bahdanau_normalize** --- (optional)
* **luong_scale** --- (optional)
* **mode** --- train or infer
... add any cell-specific parameters here as well
"""
super(BeamSearchRNNDecoderWithAttention, self).__init__(
params, model, name, mode,
)
if self._mode != 'infer':
raise ValueError(
'BeamSearch decoder only supports infer mode, but got {}'.format(
self._mode,
)
)
if "length_penalty" not in self.params:
self._length_penalty_weight = 0.0
else:
self._length_penalty_weight = self.params["length_penalty"]
# beam_width of 1 should be same as argmax decoder
if "beam_width" not in self.params:
self._beam_width = 1
else:
self._beam_width = self.params["beam_width"]
def _decode(self, input_dict):
"""Decodes representation into data.
Args:
input_dict (dict): Python dictionary with inputs to decoder
Must define:
* src_inputs - decoder input Tensor of shape [batch_size, time, dim]
or [time, batch_size, dim]
* src_lengths - decoder input lengths Tensor of shape [batch_size]
Does not need tgt_inputs and tgt_lengths
Returns:
dict: a Python dictionary with:
* final_outputs - tensor of shape [batch_size, time, dim] or
[time, batch_size, dim]
* final_state - tensor with decoder final state
* final_sequence_lengths - tensor of shape [batch_size, time] or
[time, batch_size]
"""
encoder_outputs = input_dict['encoder_output']['outputs']
enc_src_lengths = input_dict['encoder_output']['src_lengths']
self._output_projection_layer = tf.layers.Dense(
self._tgt_vocab_size, use_bias=False,
)
if not self._weight_tied:
self._dec_emb_w = tf.get_variable(
name='DecoderEmbeddingMatrix',
shape=[self._tgt_vocab_size, self._tgt_emb_size],
dtype=tf.float32
)
else:
fake_input = tf.zeros(shape=(1, self._tgt_emb_size))
fake_output = self._output_projection_layer.apply(fake_input)
with tf.variable_scope("dense", reuse=True):
dense_weights = tf.get_variable("kernel")
self._dec_emb_w = tf.transpose(dense_weights)
if self._mode == "train":
dp_input_keep_prob = self.params['decoder_dp_input_keep_prob']
dp_output_keep_prob = self.params['decoder_dp_output_keep_prob']
else:
dp_input_keep_prob = 1.0
dp_output_keep_prob = 1.0
residual_connections = self.params['decoder_use_skip_connections']
# list of cells
cell_params = self.params.get('core_cell_params', {})
self._decoder_cells = [
single_cell(
cell_class=self.params['core_cell'],
cell_params=cell_params,
dp_input_keep_prob=dp_input_keep_prob,
dp_output_keep_prob=dp_output_keep_prob,
# residual connections are added a little differently for GNMT
residual_connections=False if self.params['attention_type'].startswith('gnmt')
else residual_connections,
) for _ in range(self.params['decoder_layers'] - 1)
]
last_cell_params = copy.deepcopy(cell_params)
if self._weight_tied:
last_cell_params['num_units'] = self._tgt_emb_size
last_cell = single_cell(
cell_class=self.params['core_cell'],
cell_params=last_cell_params,
dp_input_keep_prob=dp_input_keep_prob,
dp_output_keep_prob=dp_output_keep_prob,
# residual connections are added a little differently for GNMT
residual_connections=False if self.params['attention_type'].startswith('gnmt')
else residual_connections,
)
self._decoder_cells.append(last_cell)
# pylint: disable=no-member
tiled_enc_outputs = tf.contrib.seq2seq.tile_batch(
encoder_outputs,
multiplier=self._beam_width,
)
# pylint: disable=no-member
tiled_enc_src_lengths = tf.contrib.seq2seq.tile_batch(
enc_src_lengths,
multiplier=self._beam_width,
)
attention_mechanism = self._build_attention(
tiled_enc_outputs,
tiled_enc_src_lengths,
)
if self.params['attention_type'].startswith('gnmt'):
attention_cell = self._decoder_cells.pop(0)
attention_cell = AttentionWrapper(
attention_cell,
attention_mechanism=attention_mechanism,
attention_layer_size=None, # don't use attention layer.
output_attention=False,
name="gnmt_attention",
)
attentive_decoder_cell = GNMTAttentionMultiCell(
attention_cell,
self._add_residual_wrapper(self._decoder_cells) if residual_connections else self._decoder_cells,
use_new_attention=(self.params['attention_type'] == 'gnmt_v2')
)
else: # non-GNMT
attentive_decoder_cell = AttentionWrapper(
# pylint: disable=no-member
cell=tf.contrib.rnn.MultiRNNCell(self._decoder_cells),
attention_mechanism=attention_mechanism,
)
batch_size_tensor = tf.constant(self._batch_size)
embedding_fn = lambda ids: tf.cast(
tf.nn.embedding_lookup(self._dec_emb_w, ids),
dtype=self.params['dtype'],
)
decoder = BeamSearchDecoder(
cell=attentive_decoder_cell,
embedding=embedding_fn,
start_tokens=tf.tile([self.GO_SYMBOL], [self._batch_size]),
end_token=self.END_SYMBOL,
initial_state=attentive_decoder_cell.zero_state(
dtype=encoder_outputs.dtype,
batch_size=batch_size_tensor * self._beam_width,
),
beam_width=self._beam_width,
output_layer=self._output_projection_layer,
length_penalty_weight=self._length_penalty_weight
)
time_major = self.params.get("time_major", False)
use_swap_memory = self.params.get("use_swap_memory", False)
final_outputs, final_state, final_sequence_lengths = \
tf.contrib.seq2seq.dynamic_decode( # pylint: disable=no-member
decoder=decoder,
maximum_iterations=tf.reduce_max(enc_src_lengths) * 2,
swap_memory=use_swap_memory,
output_time_major=time_major,
)
return {'logits': final_outputs.predicted_ids[:, :, 0] if not time_major else
tf.transpose(final_outputs.predicted_ids[:, :, 0], perm=[1, 0, 2]),
'outputs': [final_outputs.predicted_ids[:, :, 0]],
'final_state': final_state,
'final_sequence_lengths': final_sequence_lengths}
| OpenSeq2Seq-master | open_seq2seq/decoders/rnn_decoders.py |
# Copyright (c) 2018 NVIDIA Corporation
"""This module defines various fully-connected decoders (consisting of one
fully connected layer).
These classes are usually used for models that are not really
sequence-to-sequence and thus should be artificially split into encoder and
decoder by cutting, for example, on the last fully-connected layer.
"""
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
from six.moves import range
from .decoder import Decoder
class FakeDecoder(Decoder):
"""Fake decoder for LM
"""
def __init__(self, params, model,
name="fake_decoder", mode='train'):
super(FakeDecoder, self).__init__(params, model, name, mode)
def _decode(self, input_dict):
"""This method performs linear transformation of input.
Args:
input_dict (dict): input dictionary that has to contain
the following fields::
input_dict = {
'encoder_output': {
'outputs': output of encoder (shape=[batch_size, num_features])
}
}
Returns:
dict: dictionary with the following tensors::
{
'logits': logits with the shape=[batch_size, output_dim]
'outputs': [logits] (same as logits but wrapped in list)
}
"""
# return {'logits': input_dict['encoder_output']['logits'],
# 'outputs': [input_dict['encoder_output']['outputs']]}
# if 'logits' in input_dict['encoder_output']:
# return {'logits': input_dict['encoder_output']['logits'],
# 'outputs': [input_dict['encoder_output']['outputs']]}
# else:
# return {}
return input_dict['encoder_output']
| OpenSeq2Seq-master | open_seq2seq/decoders/lm_decoders.py |
# Copyright (c) 2017 NVIDIA Corporation
| OpenSeq2Seq-master | open_seq2seq/parts/__init__.py |
# Copyright (c) 2019 NVIDIA Corporation
import tensorflow as tf
from open_seq2seq.parts.centaur import ConvBlock
from open_seq2seq.parts.transformer import attention_layer
from open_seq2seq.parts.transformer.common import PrePostProcessingWrapper
from open_seq2seq.parts.transformer.ffn_layer import FeedFowardNetwork
class AttentionBlock:
"""
Attention block for Centaur model.
"""
def __init__(self,
hidden_size,
attention_dropout,
layer_postprocess_dropout,
training,
cnn_dropout_prob,
regularizer=None,
conv_params=None,
n_heads=1,
window_size=None,
back_step_size=None,
name="attention_block"):
"""
Attention block constructor.
Args:
hidden_size: dimensionality of hidden embeddings.
attention_dropout: dropout rate for attention layer.
layer_postprocess_dropout: dropout rate for sublayer.
training: whether it is training mode.
cnn_dropout_prob: dropout probabilty for cnn layers.
regularizer: regularizer for the convolution kernel.
conv_params: description of convolutional layer.
n_heads: number of attention heads. Defaults to 1.
window_size: size of attention window for forcing
monotonic attention during the inference. Defaults to None.
back_step_size: number of steps attention is allowed to
go back during the inference. Defaults to 0.
name: name of the block.
"""
self.name = name
self.conv = None
if conv_params:
self.conv = ConvBlock.create(
index=0,
conv_params=conv_params,
regularizer=regularizer,
bn_momentum=0.95,
bn_epsilon=1e-8,
cnn_dropout_prob=cnn_dropout_prob,
training=training
)
self.conv.name = "conv"
attention = attention_layer.Attention(
hidden_size=hidden_size,
num_heads=n_heads,
attention_dropout=attention_dropout,
regularizer=regularizer,
train=training,
window_size=window_size,
back_step_size=back_step_size,
)
feed_forward = tf.layers.Dense(
units=hidden_size,
use_bias=True,
kernel_regularizer=regularizer
)
wrapper_params = {
"hidden_size": hidden_size,
"layer_postprocess_dropout": layer_postprocess_dropout
}
self.attention = PrePostProcessingWrapper(
layer=attention,
params=wrapper_params,
training=training
)
self.feed_forward = PrePostProcessingWrapper(
layer=feed_forward,
params=wrapper_params,
training=training
)
def __call__(self,
decoder_inputs,
encoder_outputs,
attention_bias,
positions=None):
with tf.variable_scope(self.name):
y = decoder_inputs
if self.conv:
y = self.conv(y)
with tf.variable_scope("attention"):
y = self.attention(
y,
encoder_outputs,
attention_bias,
positions=positions
)
with tf.variable_scope("feed_forward"):
y = self.feed_forward(y)
return y
| OpenSeq2Seq-master | open_seq2seq/parts/centaur/attention.py |
# Copyright (c) 2019 NVIDIA Corporation
from .conv_block import ConvBlock
from .attention import AttentionBlock
from .batch_norm import BatchNorm1D
from .prenet import Prenet
| OpenSeq2Seq-master | open_seq2seq/parts/centaur/__init__.py |
# Copyright (c) 2019 NVIDIA Corporation
import tensorflow as tf
from .batch_norm import BatchNorm1D
class ConvBlock:
"""
Convolutional block for Centaur model.
"""
def __init__(self,
name,
conv,
norm,
activation_fn,
dropout,
training,
is_residual,
is_causal):
"""
Convolutional block constructor.
Args:
name: name of the block.
conv: convolutional layer.
norm: normalization layer to use after the convolutional layer.
activation_fn: activation function to use after the normalization.
dropout: dropout rate.
training: whether it is training mode.
is_residual: whether the block should contain a residual connection.
is_causal: whether the convolutional layer should be causal.
"""
self.name = name
self.conv = conv
self.norm = norm
self.activation_fn = activation_fn
self.dropout = dropout
self.training = training
self.is_residual = is_residual
self.is_casual = is_causal
def __call__(self, x):
with tf.variable_scope(self.name):
if self.is_casual:
# Add padding from the left side to avoid looking to the future
pad_size = self.conv.kernel_size[0] - 1
y = tf.pad(x, [[0, 0], [pad_size, 0], [0, 0]])
else:
y = x
y = self.conv(y)
if self.norm is not None:
y = self.norm(y, training=self.training)
if self.activation_fn is not None:
y = self.activation_fn(y)
if self.dropout is not None:
y = self.dropout(y, training=self.training)
return x + y if self.is_residual else y
@staticmethod
def create(index,
conv_params,
regularizer,
bn_momentum,
bn_epsilon,
cnn_dropout_prob,
training,
is_residual=True,
is_causal=False):
activation_fn = conv_params.get("activation_fn", tf.nn.relu)
conv = tf.layers.Conv1D(
name="conv_%d" % index,
filters=conv_params["num_channels"],
kernel_size=conv_params["kernel_size"],
strides=conv_params["stride"],
padding=conv_params["padding"],
kernel_regularizer=regularizer
)
norm = BatchNorm1D(
name="bn_%d" % index,
gamma_regularizer=regularizer,
momentum=bn_momentum,
epsilon=bn_epsilon
)
dropout = tf.layers.Dropout(
name="dropout_%d" % index,
rate=cnn_dropout_prob
)
if "is_causal" in conv_params:
is_causal = conv_params["is_causal"]
if "is_residual" in conv_params:
is_residual = conv_params["is_residual"]
return ConvBlock(
name="layer_%d" % index,
conv=conv,
norm=norm,
activation_fn=activation_fn,
dropout=dropout,
training=training,
is_residual=is_residual,
is_causal=is_causal
)
| OpenSeq2Seq-master | open_seq2seq/parts/centaur/conv_block.py |
# Copyright (c) 2019 NVIDIA Corporation
import tensorflow as tf
class BatchNorm1D:
"""
1D batch normalization layer.
"""
def __init__(self, *args, **kwargs):
super(BatchNorm1D, self).__init__()
self.norm = tf.layers.BatchNormalization(*args, **kwargs)
def __call__(self, x, training):
with tf.variable_scope("batch_norm_1d"):
y = tf.expand_dims(x, axis=1)
y = self.norm(y, training=training)
y = tf.squeeze(y, axis=1)
return y
| OpenSeq2Seq-master | open_seq2seq/parts/centaur/batch_norm.py |
# Copyright (c) 2019 NVIDIA Corporation
import tensorflow as tf
class Prenet:
"""
Centaur decoder pre-net.
"""
def __init__(self,
n_layers,
hidden_size,
activation_fn,
dropout=0.5,
regularizer=None,
training=True,
dtype=None,
name="prenet"):
"""
Pre-net constructor.
Args:
n_layers: number of fully-connected layers to use.
hidden_size: number of units in each pre-net layer.
activation_fn: activation function to use.
dropout: dropout rate. Defaults to 0.5.
regularizer: regularizer for the convolution kernel.
Defaults to None.
training: whether it is training mode. Defaults to None.
dtype: dtype of the layer's weights. Defaults to None.
name: name of the block.
"""
self.name = name
self.layers = []
self.dropout = dropout
self.training = training
for i in range(n_layers):
layer = tf.layers.Dense(
name="layer_%d" % i,
units=hidden_size,
use_bias=True,
activation=activation_fn,
kernel_regularizer=regularizer,
dtype=dtype
)
self.layers.append(layer)
def __call__(self, x):
with tf.variable_scope(self.name):
for layer in self.layers:
x = tf.layers.dropout(
layer(x),
rate=self.dropout,
training=self.training
)
return x
| OpenSeq2Seq-master | open_seq2seq/parts/centaur/prenet.py |
# Copyright (c) 2018 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import tensorflow as tf
class TemporalConvolutionalLayer(tf.layers.Conv1D):
"""Temporal Convolutional layer
"""
def __init__(
self,
filters,
kernel_size,
strides=1,
dilation_rate=1,
activation=None,
data_format='channels_last',
name="temporal_convolutional",
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
padding='valid',
**kwargs
):
super(TemporalConvolutionalLayer, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
data_format=data_format,
name=name,
padding='valid',
**kwargs
)
def call(self, inputs):
pads = (self.kernel_size[0] - 1) * self.dilation_rate[0]
padding = tf.fill([tf.shape(inputs)[0], pads, tf.shape(
inputs)[2]], tf.constant(0, dtype=inputs.dtype))
inputs = tf.concat([padding, inputs], 1)
return super(TemporalConvolutionalLayer, self).call(inputs)
def tcn(inputs,
filters,
kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None):
"""Functional interface for temporal convolution layer.
"""
layer = TemporalConvolutionalLayer(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
_reuse=reuse,
_scope=name)
return layer.apply(inputs)
| OpenSeq2Seq-master | open_seq2seq/parts/cnns/tcn.py |
# Copyright (c) 2018 NVIDIA Corporation
| OpenSeq2Seq-master | open_seq2seq/parts/cnns/__init__.py |
# Copyright (c) 2018 NVIDIA Corporation
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from six.moves import range
import tensorflow as tf
from .tcn import tcn
layers_dict = {
"conv1d": tf.layers.conv1d,
"sep_conv1d": tf.layers.separable_conv1d,
"conv2d": tf.layers.conv2d,
"tcn": tcn,
}
def conv_actv(layer_type, name, inputs, filters, kernel_size, activation_fn,
strides, padding, regularizer, training, data_format, dilation=1):
"""Helper function that applies convolution and activation.
Args:
layer_type: the following types are supported
'conv1d', 'conv2d'
"""
layer = layers_dict[layer_type]
if layer_type == 'sep_conv1d':
conv = layer(
name="{}".format(name),
inputs=inputs,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation_rate=dilation,
depthwise_regularizer=regularizer,
pointwise_regularizer=regularizer,
use_bias=False,
data_format=data_format,
)
else:
conv = layer(
name="{}".format(name),
inputs=inputs,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation_rate=dilation,
kernel_regularizer=regularizer,
use_bias=False,
data_format=data_format,
)
output = conv
if activation_fn is not None:
output = activation_fn(output)
return output
def conv_bn_res_bn_actv(layer_type, name, inputs, res_inputs, filters,
kernel_size, activation_fn, strides, padding,
regularizer, training, data_format, bn_momentum,
bn_epsilon, dilation=1,
drop_block_prob=0.0, drop_block=False):
layer = layers_dict[layer_type]
if not isinstance(res_inputs, list):
res_inputs = [res_inputs]
# For backwards compatibiliaty with earlier models
res_name = "{}/res"
res_bn_name = "{}/res_bn"
else:
res_name = "{}/res_{}"
res_bn_name = "{}/res_bn_{}"
res_aggregation = 0
for i, res in enumerate(res_inputs):
res = layer(
res,
filters,
1,
name=res_name.format(name, i),
use_bias=False,
)
squeeze = False
if "conv1d" in layer_type:
axis = 1 if data_format == 'channels_last' else 2
res = tf.expand_dims(res, axis=axis) # NWC --> NHWC
squeeze = True
res = tf.layers.batch_normalization(
name=res_bn_name.format(name, i),
inputs=res,
gamma_regularizer=regularizer,
training=training,
axis=-1 if data_format == 'channels_last' else 1,
momentum=bn_momentum,
epsilon=bn_epsilon,
)
if squeeze:
res = tf.squeeze(res, axis=axis)
res_aggregation += res
if layer_type == "sep_conv1d":
conv = layer(
name="{}".format(name),
inputs=inputs,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation_rate=dilation,
depthwise_regularizer=regularizer,
pointwise_regularizer=regularizer,
use_bias=False,
data_format=data_format,
)
else:
conv = layer(
name="{}".format(name),
inputs=inputs,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation_rate=dilation,
kernel_regularizer=regularizer,
use_bias=False,
data_format=data_format,
)
# trick to make batchnorm work for mixed precision training.
# To-Do check if batchnorm works smoothly for >4 dimensional tensors
squeeze = False
if "conv1d" in layer_type:
axis = 1 if data_format == 'channels_last' else 2
conv = tf.expand_dims(conv, axis=axis) # NWC --> NHWC
squeeze = True
bn = tf.layers.batch_normalization(
name="{}/bn".format(name),
inputs=conv,
gamma_regularizer=regularizer,
training=training,
axis=-1 if data_format == 'channels_last' else 1,
momentum=bn_momentum,
epsilon=bn_epsilon,
)
if squeeze:
bn = tf.squeeze(bn, axis=axis)
output = bn + res_aggregation
if drop_block_prob > 0:
if training:
output = tf.cond(
tf.random_uniform(shape=[]) < drop_block_prob,
lambda: res_aggregation,
lambda: bn + res_aggregation
)
elif drop_block:
output = res_aggregation
if activation_fn is not None:
output = activation_fn(output)
return output
def conv_bn_actv(layer_type, name, inputs, filters, kernel_size, activation_fn,
strides, padding, regularizer, training, data_format,
bn_momentum, bn_epsilon, dilation=1):
"""Helper function that applies convolution, batch norm and activation.
Args:
layer_type: the following types are supported
'conv1d', 'conv2d'
"""
layer = layers_dict[layer_type]
if layer_type == 'sep_conv1d':
conv = layer(
name="{}".format(name),
inputs=inputs,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation_rate=dilation,
depthwise_regularizer=regularizer,
pointwise_regularizer=regularizer,
use_bias=False,
data_format=data_format,
)
else:
conv = layer(
name="{}".format(name),
inputs=inputs,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation_rate=dilation,
kernel_regularizer=regularizer,
use_bias=False,
data_format=data_format,
)
# trick to make batchnorm work for mixed precision training.
# To-Do check if batchnorm works smoothly for >4 dimensional tensors
squeeze = False
if "conv1d" in layer_type:
axis = 1 if data_format == 'channels_last' else 2
conv = tf.expand_dims(conv, axis=axis) # NWC --> NHWC
squeeze = True
bn = tf.layers.batch_normalization(
name="{}/bn".format(name),
inputs=conv,
gamma_regularizer=regularizer,
training=training,
axis=-1 if data_format == 'channels_last' else 1,
momentum=bn_momentum,
epsilon=bn_epsilon,
)
if squeeze:
bn = tf.squeeze(bn, axis=axis)
output = bn
if activation_fn is not None:
output = activation_fn(output)
return output
def conv_ln_actv(layer_type, name, inputs, filters, kernel_size, activation_fn,
strides, padding, regularizer, training, data_format,
dilation=1):
"""Helper function that applies convolution, layer norm and activation.
Args:
layer_type: the following types are supported
'conv1d', 'conv2d'
"""
layer = layers_dict[layer_type]
conv = layer(
name="{}".format(name),
inputs=inputs,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation_rate=dilation,
kernel_regularizer=regularizer,
use_bias=False,
data_format=data_format,
)
if data_format == 'channels_first':
if layer_type == "conv1d":
conv = tf.transpose(conv, [0, 2, 1])
elif layer_type == "conv2d":
conv = tf.transpose(conv, [0, 2, 3, 1])
ln = tf.contrib.layers.layer_norm(
inputs=conv,
)
if data_format == 'channels_first':
if layer_type == "conv1d":
ln = tf.transpose(ln, [0, 2, 1])
elif layer_type == "conv2d":
ln = tf.transpose(ln, [0, 3, 1, 2])
output = ln
if activation_fn is not None:
output = activation_fn(output)
return output
def conv_in_actv(layer_type, name, inputs, filters, kernel_size, activation_fn,
strides, padding, regularizer, training, data_format,
dilation=1):
"""Helper function that applies convolution, instance norm and activation.
Args:
layer_type: the following types are supported
'conv1d', 'conv2d'
"""
layer = layers_dict[layer_type]
conv = layer(
name="{}".format(name),
inputs=inputs,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation_rate=dilation,
kernel_regularizer=regularizer,
use_bias=False,
data_format=data_format,
)
sn = tf.contrib.layers.instance_norm(
inputs=conv,
data_format="NHWC" if data_format == 'channels_last' else "NCHW"
)
output = sn
if activation_fn is not None:
output = activation_fn(output)
return output
| OpenSeq2Seq-master | open_seq2seq/parts/cnns/conv_blocks.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Modified by blisc to enable support for tacotron models, specfically enables
the prenet
"""
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import collections
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.contrib.seq2seq.python.ops import helper as helper_py
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import dtypes
from tensorflow.python.layers import base as layers_base
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.util import nest
class BasicDecoderOutput(
collections.namedtuple(
"BasicDecoderOutput", ("rnn_output", "stop_token_output")
)
):
pass
class TacotronDecoder(decoder.Decoder):
"""Basic sampling decoder."""
def __init__(
self,
decoder_cell,
helper,
initial_decoder_state,
attention_type,
spec_layer,
stop_token_layer,
prenet=None,
dtype=dtypes.float32,
train=True
):
"""Initialize TacotronDecoder.
Args:
decoder_cell: An `RNNCell` instance.
helper: A `Helper` instance.
initial_decoder_state: A (possibly nested tuple of...) tensors and
TensorArrays. The initial state of the RNNCell.
attention_type: The type of attention used
stop_token_layer: An instance of `tf.layers.Layer`, i.e.,
`tf.layers.Dense`. Stop token layer to apply to the RNN output to
predict when to stop the decoder
spec_layer: An instance of `tf.layers.Layer`, i.e.,
`tf.layers.Dense`. Output layer to apply to the RNN output to map
the ressult to a spectrogram
prenet: The prenet to apply to inputs
Raises:
TypeError: if `cell`, `helper` or `output_layer` have an incorrect type.
"""
rnn_cell_impl.assert_like_rnncell("cell", decoder_cell)
if not isinstance(helper, helper_py.Helper):
raise TypeError("helper must be a Helper, received: %s" % type(helper))
if (
spec_layer is not None and
not isinstance(spec_layer, layers_base.Layer)
):
raise TypeError(
"spec_layer must be a Layer, received: %s" % type(spec_layer)
)
self._decoder_cell = decoder_cell
self._helper = helper
self._decoder_initial_state = initial_decoder_state
self._spec_layer = spec_layer
self._stop_token_layer = stop_token_layer
self._attention_type = attention_type
self._dtype = dtype
self._prenet = prenet
if train:
self._spec_layer = None
self._stop_token_layer = None
@property
def batch_size(self):
return self._helper.batch_size
def _rnn_output_size(self):
size = self._decoder_cell.output_size
if self._spec_layer is None:
return size
output_shape_with_unknown_batch = nest.map_structure(
lambda s: tensor_shape.TensorShape([None]).concatenate(s), size
)
layer_output_shape = self._spec_layer.compute_output_shape(
output_shape_with_unknown_batch
)
return nest.map_structure(lambda s: s[1:], layer_output_shape)
def _stop_token_output_size(self):
size = self._decoder_cell.output_size
if self._stop_token_layer is None:
return size
output_shape_with_unknown_batch = nest.map_structure(
lambda s: tensor_shape.TensorShape([None]).concatenate(s), size
)
layer_output_shape = self._stop_token_layer.compute_output_shape(
output_shape_with_unknown_batch
)
return nest.map_structure(lambda s: s[1:], layer_output_shape)
@property
def output_size(self):
return BasicDecoderOutput(
rnn_output=self._rnn_output_size(),
stop_token_output=self._stop_token_output_size(),
)
@property
def output_dtype(self):
# dtype = nest.flatten(self._decoder_initial_state)[0].dtype
return BasicDecoderOutput(
nest.map_structure(lambda _: self._dtype, self._rnn_output_size()),
nest.map_structure(lambda _: self._dtype, self._stop_token_output_size()),
)
def initialize(self, name=None):
"""Initialize the decoder.
Args:
name: Name scope for any created operations.
"""
state = (self._decoder_initial_state, )
return self._helper.initialize() + state
def step(self, time, inputs, state, name=None):
"""Perform a decoding step.
Args:
time: scalar `int32` tensor.
inputs: A (structure of) input tensors.
state: A (structure of) state tensors and TensorArrays.
name: Name scope for any created operations.
Returns:
`(outputs, next_state, next_inputs, finished)`.
"""
with ops.name_scope(name, "BasicDecoderStep", (time, inputs, state)):
if self._prenet is not None:
inputs = self._prenet(inputs)
cell_outputs, cell_state = self._decoder_cell(inputs, state)
# If we are training and not using scheduled sampling, we can move
# all projection layers outside decoder,
# else we must project inside decoder
if self._spec_layer is not None:
spec_outputs = self._spec_layer(cell_outputs)
else:
spec_outputs = cell_outputs
if self._stop_token_layer is not None:
stop_token_output = self._stop_token_layer(spec_outputs)
else:
stop_token_output = cell_outputs
(finished, next_inputs, next_state) = self._helper.next_inputs(
time=time,
outputs=spec_outputs,
state=cell_state,
stop_token_predictions=stop_token_output
)
outputs = BasicDecoderOutput(spec_outputs, stop_token_output)
return (outputs, next_state, next_inputs, finished)
| OpenSeq2Seq-master | open_seq2seq/parts/tacotron/tacotron_decoder.py |
OpenSeq2Seq-master | open_seq2seq/parts/tacotron/__init__.py |
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Modified by blisc to enable support for tacotron models
Custom Helper class that implements the tacotron decoder pre and post nets
"""
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import tensorflow as tf
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.contrib.seq2seq.python.ops.helper import Helper
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.util import nest
_transpose_batch_time = decoder._transpose_batch_time
def _unstack_ta(inp):
return tensor_array_ops.TensorArray(
dtype=inp.dtype,
size=array_ops.shape(inp)[0],
element_shape=inp.get_shape()[1:]
).unstack(inp)
class TacotronTrainingHelper(Helper):
"""Helper funciton for training. Can be used for teacher forcing or scheduled
sampling"""
def __init__(
self,
inputs,
sequence_length,
prenet=None,
time_major=False,
sample_ids_shape=None,
sample_ids_dtype=None,
model_dtype=tf.float32,
mask_decoder_sequence=None
):
"""Initializer.
Args:
inputs (Tensor): inputs of shape [batch, time, n_feats]
sequence_length (Tensor): length of each input. shape [batch]
prenet: prenet to use, currently disabled and used in tacotron decoder
instead.
sampling_prob (float): see tacotron 2 decoder
time_major (bool): (float): see tacotron 2 decoder
mask_decoder_sequence (bool): whether to pass finished when the decoder
passed the sequence_length input or to pass unfinished to dynamic_decode
"""
self._sample_ids_shape = tensor_shape.TensorShape(sample_ids_shape or [])
self._sample_ids_dtype = sample_ids_dtype or dtypes.int32
if not time_major:
inputs = nest.map_structure(_transpose_batch_time, inputs)
self._input_tas = nest.map_structure(_unstack_ta, inputs)
self._sequence_length = sequence_length
self._batch_size = array_ops.size(sequence_length)
self._seed = None
self._mask_decoder_sequence = mask_decoder_sequence
self._prenet = prenet
self._zero_inputs = nest.map_structure(
lambda inp: array_ops.zeros_like(inp[0, :]), inputs
)
self._start_inputs = self._zero_inputs
if prenet is not None:
self._start_inputs = self._prenet(self._zero_inputs)
self._last_dim = self._start_inputs.get_shape()[-1]
self._dtype = model_dtype
@property
def batch_size(self):
return self._batch_size
@property
def sample_ids_shape(self):
return self._sample_ids_shape
@property
def sample_ids_dtype(self):
return self._sample_ids_dtype
def initialize(self, name=None):
finished = array_ops.tile([False], [self._batch_size])
return (finished, self._start_inputs)
def sample(self, time, outputs, state, name=None):
# Fully deterministic, output should already be projected
pass
def next_inputs(self, time, outputs, state, name=None, **unused_kwargs):
# Applies the fully connected pre-net to the decoder
# Also decides whether the decoder is finished
next_time = time + 1
if self._mask_decoder_sequence:
finished = (next_time >= self._sequence_length)
else:
finished = array_ops.tile([False], [self._batch_size])
all_finished = math_ops.reduce_all(finished)
def get_next_input(inp, out):
next_input = inp.read(time)
if self._prenet is not None:
next_input = self._prenet(next_input)
out = self._prenet(out)
return next_input
next_inputs = control_flow_ops.cond(
all_finished, lambda: self._start_inputs,
lambda: get_next_input(self._input_tas, outputs)
)
return (finished, next_inputs, state)
class TacotronHelper(Helper):
"""Helper for use during eval and infer. Does not use teacher forcing"""
def __init__(
self,
inputs,
prenet=None,
time_major=False,
sample_ids_shape=None,
sample_ids_dtype=None,
mask_decoder_sequence=None
):
"""Initializer.
Args:
inputs (Tensor): inputs of shape [batch, time, n_feats]
prenet: prenet to use, currently disabled and used in tacotron decoder
instead.
sampling_prob (float): see tacotron 2 decoder
anneal_teacher_forcing (float): see tacotron 2 decoder
stop_gradient (float): see tacotron 2 decoder
time_major (bool): (float): see tacotron 2 decoder
mask_decoder_sequence (bool): whether to pass finished when the decoder
passed the sequence_length input or to pass unfinished to dynamic_decode
"""
self._sample_ids_shape = tensor_shape.TensorShape(sample_ids_shape or [])
self._sample_ids_dtype = sample_ids_dtype or dtypes.int32
self._batch_size = inputs.get_shape()[0]
self._mask_decoder_sequence = mask_decoder_sequence
if not time_major:
inputs = nest.map_structure(_transpose_batch_time, inputs)
inputs = inputs[0, :, :]
self._prenet = prenet
if prenet is None:
self._start_inputs = inputs
else:
self._start_inputs = self._prenet(inputs)
@property
def batch_size(self):
return self._batch_size
@property
def sample_ids_shape(self):
return self._sample_ids_shape
@property
def sample_ids_dtype(self):
return self._sample_ids_dtype
def initialize(self, name=None):
finished = array_ops.tile([False], [self._batch_size])
return (finished, self._start_inputs)
def sample(self, time, outputs, state, name=None):
# Fully deterministic, output should already be projected
pass
def next_inputs(
self,
time,
outputs,
state,
stop_token_predictions,
name=None,
**unused_kwargs
):
# Applies the fully connected pre-net to the decoder
# Also decides whether the decoder is finished
next_time = time + 1
if self._mask_decoder_sequence:
stop_token_predictions = tf.sigmoid(stop_token_predictions)
finished = tf.cast(tf.round(stop_token_predictions), tf.bool)
finished = tf.squeeze(finished)
else:
finished = array_ops.tile([False], [self._batch_size])
all_finished = math_ops.reduce_all(finished)
def get_next_input(out):
if self._prenet is not None:
out = self._prenet(out)
return out
next_inputs = control_flow_ops.cond(
all_finished, lambda: self._start_inputs,
lambda: get_next_input(outputs)
)
return (finished, next_inputs, state)
| OpenSeq2Seq-master | open_seq2seq/parts/tacotron/tacotron_helper.py |
# Copyright 2018 MLBenchmark Group. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of fully connected network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class FeedFowardNetwork(tf.layers.Layer):
"""Fully connected feedforward network."""
def __init__(self, hidden_size, filter_size, relu_dropout, train, regularizer=None):
super(FeedFowardNetwork, self).__init__()
self.hidden_size = hidden_size
self.filter_size = filter_size
self.relu_dropout = relu_dropout
self.train = train
# regularizer = tf.contrib.layers.l2_regularizer(0.0005)
self.filter_dense_layer = tf.layers.Dense(
filter_size,
use_bias=True,
activation=tf.nn.relu,
name="filter_layer",
kernel_regularizer=regularizer,
bias_regularizer=regularizer
)
self.output_dense_layer = tf.layers.Dense(
hidden_size,
use_bias=True,
name="output_layer",
kernel_regularizer=regularizer,
bias_regularizer=regularizer )
def call(self, x, padding=None):
# Retrieve dynamically known shapes
batch_size = tf.shape(x)[0]
length = tf.shape(x)[1]
if padding is not None:
with tf.name_scope("remove_padding"):
# Flatten padding to [batch_size*length]
pad_mask = tf.reshape(padding, [-1])
nonpad_ids = tf.cast(tf.where(pad_mask < 1e-9), dtype=tf.int32)
# Reshape x to [batch_size*length, hidden_size] to remove padding
x = tf.reshape(x, [-1, self.hidden_size])
x = tf.gather_nd(x, indices=nonpad_ids)
# Reshape x from 2 dimensions to 3 dimensions.
x.set_shape([None, self.hidden_size])
x = tf.expand_dims(x, axis=0)
output = self.filter_dense_layer(x)
if self.train:
output = tf.nn.dropout(output, keep_prob = 1 - self.relu_dropout)
output = self.output_dense_layer(output)
if padding is not None:
with tf.name_scope("re_add_padding"):
output = tf.squeeze(output, axis=0)
output = tf.scatter_nd(
indices=nonpad_ids,
updates=output,
shape=[batch_size * length, self.hidden_size]
)
output = tf.reshape(output, [batch_size, length, self.hidden_size])
return output
| OpenSeq2Seq-master | open_seq2seq/parts/transformer/ffn_layer.py |
# Copyright 2018 MLBenchmark Group. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of embedding layer with shared weights."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from . import utils as model_utils
class EmbeddingSharedWeights(tf.layers.Layer):
"""Calculates input embeddings and pre-softmax linear with shared weights."""
def __init__(self, vocab_size, hidden_size, pad_vocab_to_eight=False, init_var=None,
embed_scale=True, pad_sym=0, mask_paddings=True, regularizer=None):
super(EmbeddingSharedWeights, self).__init__()
self.hidden_size = hidden_size
self.embed_scale = embed_scale
self.pad_sym = pad_sym
self.mask_paddings = mask_paddings
self.regularizer = regularizer
padf = lambda x: x if x % 8 == 0 else x + 8 - x % 8
if pad_vocab_to_eight:
self.vocab_size = padf(vocab_size)
else:
self.vocab_size = vocab_size
if init_var is None:
self.init_var = hidden_size ** -0.5
else:
self.init_var = init_var
def build(self, _):
with tf.variable_scope("embedding_and_softmax", reuse=tf.AUTO_REUSE):
# Create and initialize weights. The random normal initializer was chosen
# randomly, and works well.
self.shared_weights = tf.get_variable("weights", [self.vocab_size, self.hidden_size],
initializer=tf.random_normal_initializer(0., self.init_var), \
regularizer=self.regularizer)
self.built = True
def call(self, x):
"""Get token embeddings of x.
Args:
x: An int64 tensor with shape [batch_size, length]
Returns:
embeddings: float32 tensor with shape [batch_size, length, embedding_size]
padding: float32 tensor with shape [batch_size, length] indicating the
locations of the padding tokens in x.
"""
with tf.name_scope("embedding"):
# fills out of bound values with padding symbol
out_bound_mask = tf.cast(x > (self.vocab_size - 1), dtype=tf.int32)
x *= 1 - out_bound_mask
x += out_bound_mask * tf.cast(self.pad_sym, dtype=tf.int32)
embeddings = tf.gather(self.shared_weights, x)
if self.embed_scale:
# Scale embedding by the sqrt of the hidden size
embeddings *= self.hidden_size ** 0.5
if self.mask_paddings:
# Create binary array of size [batch_size, length]
# where 1 = padding, 0 = not padding
padding = model_utils.get_padding(x, padding_value=self.pad_sym)
# Set all padding embedding values to 0
#embeddings *= tf.expand_dims(1 - padding, -1)
embeddings *= tf.cast(tf.expand_dims(1.0 - padding, -1), dtype=embeddings.dtype)
return embeddings
def linear(self, x):
"""Computes logits by running x through a linear layer.
Args:
x: A float32 tensor with shape [batch_size, length, hidden_size]
Returns:
float32 tensor with shape [batch_size, length, vocab_size].
"""
with tf.name_scope("presoftmax_linear"):
batch_size = tf.shape(x)[0]
length = tf.shape(x)[1]
x = tf.reshape(x, [-1, self.hidden_size])
logits = tf.matmul(x, self.shared_weights, transpose_b=True)
return tf.reshape(logits, [batch_size, length, self.vocab_size])
| OpenSeq2Seq-master | open_seq2seq/parts/transformer/embedding_layer.py |
# Copyright 2018 MLBenchmark Group. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test Transformer model helper methods."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import open_seq2seq.parts.transformer.utils as model_utils
from open_seq2seq.parts.transformer.utils import _NEG_INF as NEG_INF
class ModelUtilsTest(tf.test.TestCase):
def test_get_padding(self):
x = tf.constant([[1, 0, 0, 0, 2], [3, 4, 0, 0, 0], [0, 5, 6, 0, 7]])
padding = model_utils.get_padding(x, padding_value=0)
with self.test_session() as sess:
padding = sess.run(padding)
self.assertAllEqual([[0, 1, 1, 1, 0], [0, 0, 1, 1, 1], [1, 0, 0, 1, 0]],
padding)
def test_get_padding_bias(self):
x = tf.constant([[1, 0, 0, 0, 2], [3, 4, 0, 0, 0], [0, 5, 6, 0, 7]])
bias = model_utils.get_padding_bias(x)
bias_shape = tf.shape(bias)
flattened_bias = tf.reshape(bias, [3, 5])
with self.test_session() as sess:
flattened_bias, bias_shape = sess.run((flattened_bias, bias_shape))
self.assertAllEqual([[0, NEG_INF, NEG_INF, NEG_INF, 0],
[0, 0, NEG_INF, NEG_INF, NEG_INF],
[NEG_INF, 0, 0, NEG_INF, 0]],
flattened_bias)
self.assertAllEqual([3, 1, 1, 5], bias_shape)
def test_get_decoder_self_attention_bias(self):
length = 5
bias = model_utils.get_decoder_self_attention_bias(length)
with self.test_session() as sess:
bias = sess.run(bias)
self.assertAllEqual([[[[0, NEG_INF, NEG_INF, NEG_INF, NEG_INF],
[0, 0, NEG_INF, NEG_INF, NEG_INF],
[0, 0, 0, NEG_INF, NEG_INF],
[0, 0, 0, 0, NEG_INF],
[0, 0, 0, 0, 0]]]],
bias)
if __name__ == "__main__":
tf.test.main()
| OpenSeq2Seq-master | open_seq2seq/parts/transformer/utils_test.py |
OpenSeq2Seq-master | open_seq2seq/parts/transformer/__init__.py |
|
# Copyright 2018 MLBenchmark Group. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Beam search to find the translated sequence with the highest probability.
Source implementation from Tensor2Tensor:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/beam_search.py
"""
import tensorflow as tf
from tensorflow.python.util import nest
# Default value for INF
#INF = 1. * 1e7
INF = 32768.0
class _StateKeys(object):
"""Keys to dictionary storing the state of the beam search loop."""
# Variable storing the loop index.
CUR_INDEX = "CUR_INDEX"
# Top sequences that are alive for each batch item. Alive sequences are ones
# that have not generated an EOS token. Sequences that reach EOS are marked as
# finished and moved to the FINISHED_SEQ tensor.
# Has shape [batch_size, beam_size, CUR_INDEX + 1]
ALIVE_SEQ = "ALIVE_SEQ"
# Log probabilities of each alive sequence. Shape [batch_size, beam_size]
ALIVE_LOG_PROBS = "ALIVE_LOG_PROBS"
# Dictionary of cached values for each alive sequence. The cache stores
# the encoder output, attention bias, and the decoder attention output from
# the previous iteration.
ALIVE_CACHE = "ALIVE_CACHE"
# Top finished sequences for each batch item.
# Has shape [batch_size, beam_size, CUR_INDEX + 1]. Sequences that are
# shorter than CUR_INDEX + 1 are padded with 0s.
FINISHED_SEQ = "FINISHED_SEQ"
# Scores for each finished sequence. Score = log probability / length norm
# Shape [batch_size, beam_size]
FINISHED_SCORES = "FINISHED_SCORES"
# Flags indicating which sequences in the finished sequences are finished.
# At the beginning, all of the sequences in FINISHED_SEQ are filler values.
# True -> finished sequence, False -> filler. Shape [batch_size, beam_size]
FINISHED_FLAGS = "FINISHED_FLAGS"
class SequenceBeamSearch(object):
"""Implementation of beam search loop."""
def __init__(self, symbols_to_logits_fn, vocab_size, batch_size,
beam_size, alpha, max_decode_length, eos_id):
self.symbols_to_logits_fn = symbols_to_logits_fn
self.vocab_size = vocab_size
self.batch_size = batch_size
self.beam_size = beam_size
self.alpha = alpha
self.max_decode_length = max_decode_length
self.eos_id = eos_id
def search(self, initial_ids, initial_cache):
"""Beam search for sequences with highest scores."""
state, state_shapes = self._create_initial_state(initial_ids, initial_cache)
finished_state = tf.while_loop(
self._continue_search, self._search_step, loop_vars=[state],
shape_invariants=[state_shapes], parallel_iterations=1, back_prop=False)
finished_state = finished_state[0]
alive_seq = finished_state[_StateKeys.ALIVE_SEQ]
alive_log_probs = finished_state[_StateKeys.ALIVE_LOG_PROBS]
finished_seq = finished_state[_StateKeys.FINISHED_SEQ]
finished_scores = finished_state[_StateKeys.FINISHED_SCORES]
finished_flags = finished_state[_StateKeys.FINISHED_FLAGS]
# Account for corner case where there are no finished sequences for a
# particular batch item. In that case, return alive sequences for that batch
# item.
finished_seq = tf.where(
tf.reduce_any(finished_flags, 1), finished_seq, alive_seq)
finished_scores = tf.where(
tf.reduce_any(finished_flags, 1), finished_scores, alive_log_probs)
return finished_seq, finished_scores
def _create_initial_state(self, initial_ids, initial_cache):
"""Return initial state dictionary and its shape invariants.
Args:
initial_ids: initial ids to pass into the symbols_to_logits_fn.
int tensor with shape [batch_size, 1]
initial_cache: dictionary storing values to be passed into the
symbols_to_logits_fn.
Returns:
state and shape invariant dictionaries with keys from _StateKeys
"""
# Current loop index (starts at 0)
cur_index = tf.constant(0)
# Create alive sequence with shape [batch_size, beam_size, 1]
alive_seq = _expand_to_beam_size(initial_ids, self.beam_size)
alive_seq = tf.expand_dims(alive_seq, axis=2)
# Create tensor for storing initial log probabilities.
# Assume initial_ids are prob 1.0
initial_log_probs = tf.constant(
[[0.] + [-float("inf")] * (self.beam_size - 1)])
alive_log_probs = tf.tile(initial_log_probs, [self.batch_size, 1])
# Expand all values stored in the dictionary to the beam size, so that each
# beam has a separate cache.
alive_cache = nest.map_structure(
lambda t: _expand_to_beam_size(t, self.beam_size), initial_cache)
# Initialize tensor storing finished sequences with filler values.
finished_seq = tf.zeros(tf.shape(alive_seq), tf.int32)
# Set scores of the initial finished seqs to negative infinity.
finished_scores = tf.ones([self.batch_size, self.beam_size]) * -INF
# Initialize finished flags with all False values.
finished_flags = tf.zeros([self.batch_size, self.beam_size], tf.bool)
# Create state dictionary
state = {
_StateKeys.CUR_INDEX: cur_index,
_StateKeys.ALIVE_SEQ: alive_seq,
_StateKeys.ALIVE_LOG_PROBS: alive_log_probs,
_StateKeys.ALIVE_CACHE: alive_cache,
_StateKeys.FINISHED_SEQ: finished_seq,
_StateKeys.FINISHED_SCORES: finished_scores,
_StateKeys.FINISHED_FLAGS: finished_flags
}
# Create state invariants for each value in the state dictionary. Each
# dimension must be a constant or None. A None dimension means either:
# 1) the dimension's value is a tensor that remains the same but may
# depend on the input sequence to the model (e.g. batch size).
# 2) the dimension may have different values on different iterations.
state_shape_invariants = {
_StateKeys.CUR_INDEX: tf.TensorShape([]),
_StateKeys.ALIVE_SEQ: tf.TensorShape([None, self.beam_size, None]),
_StateKeys.ALIVE_LOG_PROBS: tf.TensorShape([None, self.beam_size]),
_StateKeys.ALIVE_CACHE: nest.map_structure(
_get_shape_keep_last_dim, alive_cache),
_StateKeys.FINISHED_SEQ: tf.TensorShape([None, self.beam_size, None]),
_StateKeys.FINISHED_SCORES: tf.TensorShape([None, self.beam_size]),
_StateKeys.FINISHED_FLAGS: tf.TensorShape([None, self.beam_size])
}
return state, state_shape_invariants
def _continue_search(self, state):
"""Return whether to continue the search loop.
The loops should terminate when
1) when decode length has been reached, or
2) when the worst score in the finished sequences is better than the best
score in the alive sequences (i.e. the finished sequences are provably
unchanging)
Args:
state: A dictionary with the current loop state.
Returns:
Bool tensor with value True if loop should continue, False if loop should
terminate.
"""
i = state[_StateKeys.CUR_INDEX]
alive_log_probs = state[_StateKeys.ALIVE_LOG_PROBS]
finished_scores = state[_StateKeys.FINISHED_SCORES]
finished_flags = state[_StateKeys.FINISHED_FLAGS]
not_at_max_decode_length = tf.less(i, self.max_decode_length)
# Calculate largest length penalty (the larger penalty, the better score).
max_length_norm = _length_normalization(self.alpha, self.max_decode_length)
# Get the best possible scores from alive sequences.
best_alive_scores = alive_log_probs[:, 0] / max_length_norm
# Compute worst score in finished sequences for each batch element
finished_scores *= tf.to_float(finished_flags) # set filler scores to zero
lowest_finished_scores = tf.reduce_min(finished_scores, axis=1)
# If there are no finished sequences in a batch element, then set the lowest
# finished score to -INF for that element.
finished_batches = tf.reduce_any(finished_flags, 1)
lowest_finished_scores += (1. - tf.to_float(finished_batches)) * -INF
worst_finished_score_better_than_best_alive_score = tf.reduce_all(
tf.greater(lowest_finished_scores, best_alive_scores)
)
return tf.logical_and(
not_at_max_decode_length,
tf.logical_not(worst_finished_score_better_than_best_alive_score)
)
def _search_step(self, state):
"""Beam search loop body.
Grow alive sequences by a single ID. Sequences that have reached the EOS
token are marked as finished. The alive and finished sequences with the
highest log probabilities and scores are returned.
A sequence's finished score is calculating by dividing the log probability
by the length normalization factor. Without length normalization, the
search is more likely to return shorter sequences.
Args:
state: A dictionary with the current loop state.
Returns:
new state dictionary.
"""
# Grow alive sequences by one token.
new_seq, new_log_probs, new_cache = self._grow_alive_seq(state)
# Collect top beam_size alive sequences
alive_state = self._get_new_alive_state(new_seq, new_log_probs, new_cache)
# Combine newly finished sequences with existing finished sequences, and
# collect the top k scoring sequences.
finished_state = self._get_new_finished_state(state, new_seq, new_log_probs)
# Increment loop index and create new state dictionary
new_state = {_StateKeys.CUR_INDEX: state[_StateKeys.CUR_INDEX] + 1}
new_state.update(alive_state)
new_state.update(finished_state)
return [new_state]
def _grow_alive_seq(self, state):
"""Grow alive sequences by one token, and collect top 2*beam_size sequences.
2*beam_size sequences are collected because some sequences may have reached
the EOS token. 2*beam_size ensures that at least beam_size sequences are
still alive.
Args:
state: A dictionary with the current loop state.
Returns:
Tuple of
(Top 2*beam_size sequences [batch_size, 2 * beam_size, cur_index + 1],
Scores of returned sequences [batch_size, 2 * beam_size],
New alive cache, for each of the 2 * beam_size sequences)
"""
i = state[_StateKeys.CUR_INDEX]
alive_seq = state[_StateKeys.ALIVE_SEQ]
alive_log_probs = state[_StateKeys.ALIVE_LOG_PROBS]
alive_cache = state[_StateKeys.ALIVE_CACHE]
beams_to_keep = 2 * self.beam_size
# Get logits for the next candidate IDs for the alive sequences. Get the new
# cache values at the same time.
flat_ids = _flatten_beam_dim(alive_seq) # [batch_size * beam_size]
flat_cache = nest.map_structure(_flatten_beam_dim, alive_cache)
flat_logits, flat_cache = self.symbols_to_logits_fn(flat_ids, i, flat_cache)
# Unflatten logits to shape [batch_size, beam_size, vocab_size]
logits = _unflatten_beam_dim(flat_logits, self.batch_size, self.beam_size)
new_cache = nest.map_structure(
lambda t: _unflatten_beam_dim(t, self.batch_size, self.beam_size),
flat_cache)
# Convert logits to normalized log probs
candidate_log_probs = _log_prob_from_logits(logits)
# Calculate new log probabilities if each of the alive sequences were
# extended # by the the candidate IDs.
# Shape [batch_size, beam_size, vocab_size]
log_probs = candidate_log_probs + tf.expand_dims(alive_log_probs, axis=2)
# Each batch item has beam_size * vocab_size candidate sequences. For each
# batch item, get the k candidates with the highest log probabilities.
flat_log_probs = tf.reshape(log_probs,
[-1, self.beam_size * self.vocab_size])
topk_log_probs, topk_indices = tf.nn.top_k(flat_log_probs, k=beams_to_keep)
# Extract the alive sequences that generate the highest log probabilities
# after being extended.
topk_beam_indices = topk_indices // self.vocab_size
topk_seq, new_cache = _gather_beams(
[alive_seq, new_cache], topk_beam_indices, self.batch_size,
beams_to_keep)
# Append the most probable IDs to the topk sequences
topk_ids = topk_indices % self.vocab_size
topk_ids = tf.expand_dims(topk_ids, axis=2)
topk_seq = tf.concat([topk_seq, topk_ids], axis=2)
return topk_seq, topk_log_probs, new_cache
def _get_new_alive_state(self, new_seq, new_log_probs, new_cache):
"""Gather the top k sequences that are still alive.
Args:
new_seq: New sequences generated by growing the current alive sequences
int32 tensor with shape [batch_size, 2 * beam_size, cur_index + 1]
new_log_probs: Log probabilities of new sequences
float32 tensor with shape [batch_size, beam_size]
new_cache: Dict of cached values for each sequence.
Returns:
Dictionary with alive keys from _StateKeys:
{Top beam_size sequences that are still alive (don't end with eos_id)
Log probabilities of top alive sequences
Dict cache storing decoder states for top alive sequences}
"""
# To prevent finished sequences from being considered, set log probs to -INF
new_finished_flags = tf.equal(new_seq[:, :, -1], self.eos_id)
new_log_probs += tf.to_float(new_finished_flags) * -INF
top_alive_seq, top_alive_log_probs, top_alive_cache = _gather_topk_beams(
[new_seq, new_log_probs, new_cache], new_log_probs, self.batch_size,
self.beam_size)
return {
_StateKeys.ALIVE_SEQ: top_alive_seq,
_StateKeys.ALIVE_LOG_PROBS: top_alive_log_probs,
_StateKeys.ALIVE_CACHE: top_alive_cache
}
def _get_new_finished_state(self, state, new_seq, new_log_probs):
"""Combine new and old finished sequences, and gather the top k sequences.
Args:
state: A dictionary with the current loop state.
new_seq: New sequences generated by growing the current alive sequences
int32 tensor with shape [batch_size, beam_size, i + 1]
new_log_probs: Log probabilities of new sequences
float32 tensor with shape [batch_size, beam_size]
Returns:
Dictionary with finished keys from _StateKeys:
{Top beam_size finished sequences based on score,
Scores of finished sequences,
Finished flags of finished sequences}
"""
i = state[_StateKeys.CUR_INDEX]
finished_seq = state[_StateKeys.FINISHED_SEQ]
finished_scores = state[_StateKeys.FINISHED_SCORES]
finished_flags = state[_StateKeys.FINISHED_FLAGS]
# First append a column of 0-ids to finished_seq to increment the length.
# New shape of finished_seq: [batch_size, beam_size, i + 1]
finished_seq = tf.concat(
[finished_seq,
tf.zeros([self.batch_size, self.beam_size, 1], tf.int32)], axis=2)
# Calculate new seq scores from log probabilities.
length_norm = _length_normalization(self.alpha, i + 1)
new_scores = new_log_probs / length_norm
# Set the scores of the still-alive seq in new_seq to large negative values.
new_finished_flags = tf.equal(new_seq[:, :, -1], self.eos_id)
new_scores += (1. - tf.to_float(new_finished_flags)) * -INF
# Combine sequences, scores, and flags.
finished_seq = tf.concat([finished_seq, new_seq], axis=1)
finished_scores = tf.concat([finished_scores, new_scores], axis=1)
finished_flags = tf.concat([finished_flags, new_finished_flags], axis=1)
# Return the finished sequences with the best scores.
top_finished_seq, top_finished_scores, top_finished_flags = (
_gather_topk_beams([finished_seq, finished_scores, finished_flags],
finished_scores, self.batch_size, self.beam_size))
return {
_StateKeys.FINISHED_SEQ: top_finished_seq,
_StateKeys.FINISHED_SCORES: top_finished_scores,
_StateKeys.FINISHED_FLAGS: top_finished_flags
}
def sequence_beam_search(
symbols_to_logits_fn, initial_ids, initial_cache, vocab_size, beam_size,
alpha, max_decode_length, eos_id):
"""Search for sequence of subtoken ids with the largest probability.
Args:
symbols_to_logits_fn: A function that takes in ids, index, and cache as
arguments. The passed in arguments will have shape:
ids -> [batch_size * beam_size, index]
index -> [] (scalar)
cache -> nested dictionary of tensors [batch_size * beam_size, ...]
The function must return logits and new cache.
logits -> [batch * beam_size, vocab_size]
new cache -> same shape/structure as inputted cache
initial_ids: Starting ids for each batch item.
int32 tensor with shape [batch_size]
initial_cache: dict containing starting decoder variables information
vocab_size: int size of tokens
beam_size: int number of beams
alpha: float defining the strength of length normalization
max_decode_length: maximum length to decoded sequence
eos_id: int id of eos token, used to determine when a sequence has finished
Returns:
Top decoded sequences [batch_size, beam_size, max_decode_length]
sequence scores [batch_size, beam_size]
"""
batch_size = tf.shape(initial_ids)[0]
sbs = SequenceBeamSearch(symbols_to_logits_fn, vocab_size, batch_size,
beam_size, alpha, max_decode_length, eos_id)
return sbs.search(initial_ids, initial_cache)
def _log_prob_from_logits(logits):
return logits - tf.reduce_logsumexp(logits, axis=2, keep_dims=True)
def _length_normalization(alpha, length):
"""Return length normalization factor."""
return tf.pow(((5. + tf.to_float(length)) / 6.), alpha)
def _expand_to_beam_size(tensor, beam_size):
"""Tiles a given tensor by beam_size.
Args:
tensor: tensor to tile [batch_size, ...]
beam_size: How much to tile the tensor by.
Returns:
Tiled tensor [batch_size, beam_size, ...]
"""
tensor = tf.expand_dims(tensor, axis=1)
tile_dims = [1] * tensor.shape.ndims
tile_dims[1] = beam_size
return tf.tile(tensor, tile_dims)
def _shape_list(tensor):
"""Return a list of the tensor's shape, and ensure no None values in list."""
# Get statically known shape (may contain None's for unknown dimensions)
shape = tensor.get_shape().as_list()
# Ensure that the shape values are not None
dynamic_shape = tf.shape(tensor)
for i in range(len(shape)):
if shape[i] is None:
shape[i] = dynamic_shape[i]
return shape
def _get_shape_keep_last_dim(tensor):
shape_list = _shape_list(tensor)
# Only the last
for i in range(len(shape_list) - 1):
shape_list[i] = None
if isinstance(shape_list[-1], tf.Tensor):
shape_list[-1] = None
return tf.TensorShape(shape_list)
def _flatten_beam_dim(tensor):
"""Reshapes first two dimensions in to single dimension.
Args:
tensor: Tensor to reshape of shape [A, B, ...]
Returns:
Reshaped tensor of shape [A*B, ...]
"""
shape = _shape_list(tensor)
shape[0] *= shape[1]
shape.pop(1) # Remove beam dim
return tf.reshape(tensor, shape)
def _unflatten_beam_dim(tensor, batch_size, beam_size):
"""Reshapes first dimension back to [batch_size, beam_size].
Args:
tensor: Tensor to reshape of shape [batch_size*beam_size, ...]
batch_size: Tensor, original batch size.
beam_size: int, original beam size.
Returns:
Reshaped tensor of shape [batch_size, beam_size, ...]
"""
shape = _shape_list(tensor)
new_shape = [batch_size, beam_size] + shape[1:]
return tf.reshape(tensor, new_shape)
def _gather_beams(nested, beam_indices, batch_size, new_beam_size):
"""Gather beams from nested structure of tensors.
Each tensor in nested represents a batch of beams, where beam refers to a
single search state (beam search involves searching through multiple states
in parallel).
This function is used to gather the top beams, specified by
beam_indices, from the nested tensors.
Args:
nested: Nested structure (tensor, list, tuple or dict) containing tensors
with shape [batch_size, beam_size, ...].
beam_indices: int32 tensor with shape [batch_size, new_beam_size]. Each
value in beam_indices must be between [0, beam_size), and are not
necessarily unique.
batch_size: int size of batch
new_beam_size: int number of beams to be pulled from the nested tensors.
Returns:
Nested structure containing tensors with shape
[batch_size, new_beam_size, ...]
"""
# Computes the i'th coodinate that contains the batch index for gather_nd.
# Batch pos is a tensor like [[0,0,0,0,],[1,1,1,1],..].
batch_pos = tf.range(batch_size * new_beam_size) // new_beam_size
batch_pos = tf.reshape(batch_pos, [batch_size, new_beam_size])
# Create coordinates to be passed to tf.gather_nd. Stacking creates a tensor
# with shape [batch_size, beam_size, 2], where the last dimension contains
# the (i, j) gathering coordinates.
coordinates = tf.stack([batch_pos, beam_indices], axis=2)
return nest.map_structure(
lambda state: tf.gather_nd(state, coordinates), nested)
def _gather_topk_beams(nested, score_or_log_prob, batch_size, beam_size):
"""Gather top beams from nested structure."""
_, topk_indexes = tf.nn.top_k(score_or_log_prob, k=beam_size)
return _gather_beams(nested, topk_indexes, batch_size, beam_size) | OpenSeq2Seq-master | open_seq2seq/parts/transformer/beam_search.py |
# Copyright 2018 MLBenchmark Group. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test beam search helper methods."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from . import beam_search
#import beam_search
class BeamSearchHelperTests(tf.test.TestCase):
def test_expand_to_beam_size(self):
x = tf.ones([7, 4, 2, 5])
x = beam_search._expand_to_beam_size(x, 3)
with self.test_session() as sess:
shape = sess.run(tf.shape(x))
self.assertAllEqual([7, 3, 4, 2, 5], shape)
def test_shape_list(self):
y = tf.constant(4.0)
x = tf.ones([7, tf.cast(tf.sqrt(y),tf.int32), 2, 5])
shape = beam_search._shape_list(x)
self.assertIsInstance(shape[0], int)
#self.assertIsInstance(shape[1], tf.Tensor)
self.assertIsNotNone(shape[1])
self.assertIsInstance(shape[2], int)
self.assertIsInstance(shape[3], int)
def test_get_shape_keep_last_dim(self):
y = tf.constant(4.0)
x = tf.ones([7, tf.cast(tf.sqrt(y),tf.int32), 2, 5])
shape = beam_search._get_shape_keep_last_dim(x)
self.assertAllEqual([None, None, None, 5],
shape.as_list())
def test_flatten_beam_dim(self):
x = tf.ones([7, 4, 2, 5])
x = beam_search._flatten_beam_dim(x)
with self.test_session() as sess:
shape = sess.run(tf.shape(x))
self.assertAllEqual([28, 2, 5], shape)
def test_unflatten_beam_dim(self):
x = tf.ones([28, 2, 5])
x = beam_search._unflatten_beam_dim(x, 7, 4)
with self.test_session() as sess:
shape = sess.run(tf.shape(x))
self.assertAllEqual([7, 4, 2, 5], shape)
def test_gather_beams(self):
x = tf.reshape(tf.range(24), [2, 3, 4])
# x looks like: [[[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
#
# [[12 13 14 15]
# [16 17 18 19]
# [20 21 22 23]]]
y = beam_search._gather_beams(x, [[1, 2], [0, 2]], 2, 2)
with self.test_session() as sess:
y = sess.run(y)
self.assertAllEqual([[[4, 5, 6, 7],
[8, 9, 10, 11]],
[[12, 13, 14, 15],
[20, 21, 22, 23]]],
y)
def test_gather_topk_beams(self):
x = tf.reshape(tf.range(24), [2, 3, 4])
x_scores = [[0, 1, 1], [1, 0, 1]]
y = beam_search._gather_topk_beams(x, x_scores, 2, 2)
with self.test_session() as sess:
y = sess.run(y)
self.assertAllEqual([[[4, 5, 6, 7],
[8, 9, 10, 11]],
[[12, 13, 14, 15],
[20, 21, 22, 23]]],
y)
if __name__ == "__main__":
tf.test.main()
| OpenSeq2Seq-master | open_seq2seq/parts/transformer/beam_search_test.py |
# This code is heavily based on the code from MLPerf
# https://github.com/mlperf/reference/tree/master/translation/tensorflow/transformer
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class Transformer_BatchNorm(tf.layers.Layer):
"""Transformer batch norn: supports [BTC](default) and [BCT] formats. """
def __init__(self, training, params={}):
super(Transformer_BatchNorm, self).__init__()
self.training = training
self.data_format=params.get('data_format','channels_last')
self.momentum = params.get('momentum',0.95)
self.epsilon = params.get('epsilon',0.0001)
self.center_scale = params.get('center_scale', True)
self.regularizer = params.get('regularizer', None) if self.center_scale else None
if self.regularizer != None:
self.regularizer_params = params.get("regularizer_params", {'scale': 0.0})
self.regularizer=self.regularizer(self.regularizer_params['scale']) \
if self.regularizer_params['scale'] > 0.0 else None
#print("Batch norm, training=", training, params)
def call(self, x):
x = tf.expand_dims(x, axis=2)
axis = -1 if (self.data_format=='channels_last') else 1
y = tf.layers.batch_normalization(inputs=x, axis=axis,
momentum=self.momentum, epsilon=self.epsilon,
center=self.center_scale, scale=self.center_scale,
beta_regularizer=self.regularizer, gamma_regularizer=self.regularizer,
training=self.training,
)
y = tf.squeeze(y, axis=[2])
return y
class LayerNormalization(tf.layers.Layer):
"""Layer normalization for BTC format: supports L2(default) and L1 modes"""
def __init__(self, hidden_size, params={}):
super(LayerNormalization, self).__init__()
self.hidden_size = hidden_size
self.norm_type = params.get("type", "layernorm_L2")
self.epsilon = params.get("epsilon", 1e-6)
def build(self, _):
self.scale = tf.get_variable("layer_norm_scale", [self.hidden_size],
initializer= tf.keras.initializers.Ones(),
dtype=tf.float32)
self.bias = tf.get_variable("layer_norm_bias", [self.hidden_size],
initializer=tf.keras.initializers.Zeros(),
dtype=tf.float32)
self.built = True
def call(self, x):
if self.norm_type=="layernorm_L2":
epsilon = self.epsilon
dtype = x.dtype
x = tf.cast(x=x, dtype=tf.float32)
mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
variance = tf.reduce_mean(tf.square(x - mean), axis=[-1], keepdims=True)
norm_x = (x - mean) * tf.rsqrt(variance + epsilon)
result = norm_x * self.scale + self.bias
return tf.cast(x=result, dtype=dtype)
else:
dtype = x.dtype
if dtype==tf.float16:
x = tf.cast(x, dtype=tf.float32)
mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
x = x - mean
variance = tf.reduce_mean(tf.abs(x), axis=[-1], keepdims=True)
norm_x = tf.div(x , variance + self.epsilon)
y = norm_x * self.scale + self.bias
if dtype == tf.float16:
y = tf.saturate_cast(y, dtype)
return y
class PrePostProcessingWrapper(object):
"""Wrapper around layer, that applies pre-processing and post-processing."""
def __init__(self, layer, params, training):
self.layer = layer
self.postprocess_dropout = params["layer_postprocess_dropout"]
self.training = training
self.norm_params = params.get("norm_params", {"type": "layernorm_L2"})
# Create normalization layer
if self.norm_params["type"]=="batch_norm":
self.norm = Transformer_BatchNorm(training=training,
params=self.norm_params)
else:
self.norm = LayerNormalization(hidden_size=params["hidden_size"],
params=self.norm_params)
def __call__(self, x, *args, **kwargs):
# Preprocessing: normalization
y = self.norm(x)
y = self.layer(y, *args, **kwargs)
# Postprocessing: dropout and residual connection
if self.training:
y = tf.nn.dropout(y, keep_prob=1 - self.postprocess_dropout)
return x + y
| OpenSeq2Seq-master | open_seq2seq/parts/transformer/common.py |
# Copyright 2018 MLBenchmark Group. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Transformer model helper methods."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow as tf
_NEG_INF = -1e9
#_NEG_INF_FP16 = -1e4
def get_position_encoding(
length, hidden_size, min_timescale=1.0, max_timescale=1.0e4):
"""Return positional encoding.
Calculates the position encoding as a mix of sine and cosine functions with
geometrically increasing wavelengths.
Defined and formulized in Attention is All You Need, section 3.5.
Args:
length: Sequence length.
hidden_size: Size of the
min_timescale: Minimum scale that will be applied at each position
max_timescale: Maximum scale that will be applied at each position
Returns:
Tensor with shape [length, hidden_size]
"""
position = tf.cast(tf.range(length),dtype=tf.float32)
num_timescales = hidden_size // 2
log_timescale_increment = (
math.log(float(max_timescale) / float(min_timescale)) /
(tf.cast((num_timescales) - 1, dtype=tf.float32)))
inv_timescales = min_timescale * tf.exp(
tf.cast(tf.range(num_timescales),dtype=tf.float32 ) * -log_timescale_increment)
scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0)
signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
return signal
def get_decoder_self_attention_bias(length, dtype=tf.float32):
"""Calculate bias for decoder that maintains model's autoregressive property.
Creates a tensor that masks out locations that correspond to illegal
connections, so prediction at position i cannot draw information from future
positions.
Args:
length: int length of sequences in batch.
Returns:
float tensor of shape [1, 1, length, length]
"""
#print("get_decoder_self_attention_bias", dtype)
with tf.name_scope("decoder_self_attention_bias"):
#valid_locs = tf.matrix_band_part(tf.ones([length, length], dtype=dtype), -1, 0)
valid_locs = tf.matrix_band_part(tf.ones([length, length], dtype=tf.float32), -1, 0)
valid_locs = tf.reshape(valid_locs, [1, 1, length, length])
neg_inf=_NEG_INF #if (dtype==tf.float32) else _NEG_INF_FP16
bias = neg_inf * (1.0 - valid_locs)
#bias=tf.saturate_cast(bias, dtype=dtype)
return bias
def get_padding(x, padding_value=0, dtype=tf.float32):
"""Return float tensor representing the padding values in x.
Args:
x: int tensor with any shape
padding_value: int value that
dtype: type of the output
Returns:
float tensor with same shape as x containing values 0 or 1.
0 -> non-padding, 1 -> padding
"""
#print("get_padding", dtype)
with tf.name_scope("padding"):
return tf.cast(tf.equal(x, padding_value), dtype=dtype)
def get_padding_bias(x, res_rank=4, pad_sym=0, dtype=tf.float32):
"""Calculate bias tensor from padding values in tensor.
Bias tensor that is added to the pre-softmax multi-headed attention logits,
which has shape [batch_size, num_heads, length, length]. The tensor is zero at
non-padding locations, and -1e9 (negative infinity) at padding locations.
Args:
x: int tensor with shape [batch_size, length]
res_rank: int indicates the rank of attention_bias.
dtype: type of the output attention_bias
pad_sym: int the symbol used for padding
Returns:
Attention bias tensor of shape
[batch_size, 1, 1, length] if res_rank = 4 - for Transformer
or [batch_size, 1, length] if res_rank = 3 - for ConvS2S
"""
#print("get_padding_bias", dtype)
with tf.name_scope("attention_bias"):
padding = get_padding(x, padding_value=pad_sym, dtype=tf.float32)
# padding = get_padding(x, padding_value=pad_sym, dtype=dtype)
neg_inf=_NEG_INF #if dtype==tf.float32 else _NEG_INF_FP16
attention_bias = padding * neg_inf
if res_rank == 4:
attention_bias = tf.expand_dims(tf.expand_dims(attention_bias, axis=1), axis=1)
elif res_rank == 3:
attention_bias = tf.expand_dims(attention_bias, axis=1)
else:
raise ValueError("res_rank should be 3 or 4 but got {}".format(res_rank))
return attention_bias
| OpenSeq2Seq-master | open_seq2seq/parts/transformer/utils.py |
# Copyright 2018 MLBenchmark Group. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of multiheaded attention and self-attention layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class Attention(tf.layers.Layer):
"""Multi-headed attention layer."""
def __init__(
self,
hidden_size,
num_heads,
attention_dropout,
train,
mode="loung",
regularizer=None,
window_size=None,
back_step_size=None
):
if hidden_size % num_heads != 0:
raise ValueError("Hidden size must be evenly divisible by the number of "
"heads.")
super(Attention, self).__init__()
self.hidden_size = hidden_size
self.num_heads = num_heads
self.attention_dropout = attention_dropout
self.train = train
self.mode = mode
# Parameters for monotonic attention forcing during inference
self.window_size = window_size
self.back_step_size = back_step_size
# Layers for linearly projecting the queries, keys, and values.
self.q_dense_layer = tf.layers.Dense(hidden_size, use_bias=False, name="q",
kernel_regularizer=regularizer)
self.k_dense_layer = tf.layers.Dense(hidden_size, use_bias=False, name="k",
kernel_regularizer=regularizer)
self.v_dense_layer = tf.layers.Dense(hidden_size, use_bias=False, name="v",
kernel_regularizer=regularizer)
self.output_dense_layer = tf.layers.Dense(hidden_size, use_bias=False,
name="output_transform",
kernel_regularizer=regularizer)
def split_heads(self, x):
"""Split x into different heads, and transpose the resulting value.
The tensor is transposed to insure the inner dimensions hold the correct
values during the matrix multiplication.
Args:
x: A tensor with shape [batch_size, length, hidden_size]
Returns:
A tensor with shape [batch_size, num_heads, length, hidden_size/num_heads]
"""
with tf.name_scope("split_heads"):
batch_size = tf.shape(x)[0]
length = tf.shape(x)[1]
# Calculate depth of last dimension after it has been split.
depth = (self.hidden_size // self.num_heads)
# Split the last dimension
x = tf.reshape(x, [batch_size, length, self.num_heads, depth])
# Transpose the result
return tf.transpose(x, [0, 2, 1, 3])
def combine_heads(self, x):
"""Combine tensor that has been split.
Args:
x: A tensor [batch_size, num_heads, length, hidden_size/num_heads]
Returns:
A tensor with shape [batch_size, length, hidden_size]
"""
with tf.name_scope("combine_heads"):
batch_size = tf.shape(x)[0]
length = tf.shape(x)[2]
x = tf.transpose(x, [0, 2, 1, 3]) # --> [batch, length, num_heads, depth]
return tf.reshape(x, [batch_size, length, self.hidden_size])
def call(self, x, y, bias, cache=None, positions=None):
"""Apply attention mechanism to x and y.
Args:
x: a tensor with shape [batch_size, length_x, hidden_size]
y: a tensor with shape [batch_size, length_y, hidden_size]
bias: attention bias that will be added to the result of the dot product.
cache: (Used during prediction) dictionary with tensors containing results
of previous attentions. The dictionary must have the items:
{"k": tensor with shape [batch_size, i, key_channels],
"v": tensor with shape [batch_size, i, value_channels]}
where i is the current decoded length.
positions: decoder-encoder alignment for previous steps [batch_size, n_heads, length_x]
Returns:
Attention layer output with shape [batch_size, length_x, hidden_size]
"""
# Linearly project the query (q), key (k) and value (v) using different
# learned projections. This is in preparation of splitting them into
# multiple heads. Multi-head attention uses multiple queries, keys, and
# values rather than regular attention (which uses a single q, k, v).
q = self.q_dense_layer(x)
k = self.k_dense_layer(y)
v = self.v_dense_layer(y)
if cache is not None:
# Combine cached keys and values with new keys and values.
k = tf.concat([cache["k"], k], axis=1)
v = tf.concat([cache["v"], v], axis=1)
# Update cache
cache["k"] = k
cache["v"] = v
# Split q, k, v into heads.
q = self.split_heads(q)
k = self.split_heads(k)
v = self.split_heads(v)
if self.mode == "loung":
# Scale q to prevent the dot product between q and k from growing too large.
depth = (self.hidden_size // self.num_heads)
q *= depth ** -0.5
# Calculate dot product attention
# logits = tf.matmul(q, k, transpose_b=True)
# logits += bias
# weights = tf.nn.softmax(logits, name="attention_weights")
logits = tf.matmul(q, k, transpose_b=True)
dtype = logits.dtype
if dtype != tf.float32:
# upcast softmax inputs
logits = tf.cast(x=logits, dtype=tf.float32)
logits += bias
weights = tf.nn.softmax(logits, name="attention_weights")
# downcast softmax output
weights = tf.cast(weights, dtype=dtype)
else:
# Logits shape: [batch, head, decoder, encoder]
# Bias shape: [batch, 1, 1, encoder]
# Force monotonic attention during inference
if positions is not None and self.window_size is not None:
assert self.back_step_size is not None
max_length = tf.shape(logits)[-1]
# Allow to make back_step_size steps back
window_pos = tf.maximum(positions - self.back_step_size, tf.zeros_like(positions))
# Create attention mask
mask_large = tf.sequence_mask(window_pos + self.window_size, maxlen=max_length)
mask_large = tf.cast(mask_large, tf.float32)
mask_small = tf.sequence_mask(window_pos, maxlen=max_length)
mask_small = tf.cast(mask_small, tf.float32)
mask = mask_large - mask_small
mask = -1e9 * (1 - mask)
bias = mask + bias
# Clipping
bias = tf.maximum(bias, -1e9)
logits += bias
weights = tf.nn.softmax(logits, name="attention_weights")
elif self.mode == "bahdanau":
att_v = tf.get_variable(
"attention_v", [self.hidden_size // self.num_heads], dtype=q.dtype
)
# Compute the attention score
if bias is not None:
weights = tf.reduce_sum(
tf.nn.tanh(att_v * tf.nn.tanh(k + q + bias)), 3
)
else:
weights = tf.reduce_sum(
tf.nn.tanh(att_v * tf.nn.tanh(k + q)), 3
)
weights = tf.nn.softmax(weights)
weights = tf.expand_dims(weights, 2)
else:
raise ValueError(
"Mode for multi-head attention must be either loung for dot-product",
"attention, or bahdanau for content-based/additive/mlp-base attention"
)
if self.train:
weights = tf.nn.dropout(weights, keep_prob=1 - self.attention_dropout)
attention_output = tf.matmul(weights, v)
# Recombine heads --> [batch_size, length, hidden_size]
attention_output = self.combine_heads(attention_output)
# Run the combined outputs through another linear projection layer.
attention_output = self.output_dense_layer(attention_output)
return attention_output
class SelfAttention(Attention):
"""Multiheaded self-attention layer."""
def call(self, x, bias, cache=None):
return super(SelfAttention, self).call(x, x, bias, cache)
| OpenSeq2Seq-master | open_seq2seq/parts/transformer/attention_layer.py |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# THIS CODE WAS TAKEN FROM:
# https://raw.githubusercontent.com/tensorflow/nmt/master/nmt/gnmt_model.py
"""GNMT attention sequence-to-sequence model with dynamic RNN support."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from six.moves import range
import tensorflow as tf
from tensorflow.python.util import nest
# TODO: must implement all abstract methods
class GNMTAttentionMultiCell(tf.nn.rnn_cell.MultiRNNCell):
"""A MultiCell with GNMT attention style."""
def __init__(self, attention_cell, cells, use_new_attention=False):
"""Creates a GNMTAttentionMultiCell.
Args:
attention_cell: An instance of AttentionWrapper.
cells: A list of RNNCell wrapped with AttentionInputWrapper.
use_new_attention: Whether to use the attention generated from current
step bottom layer's output. Default is False.
"""
cells = [attention_cell] + cells
self.use_new_attention = use_new_attention
super(GNMTAttentionMultiCell, self).__init__(cells, state_is_tuple=True)
# TODO: does not match signature of the base method
def __call__(self, inputs, state, scope=None):
"""Run the cell with bottom layer's attention copied to all upper layers."""
if not nest.is_sequence(state):
raise ValueError(
"Expected state to be a tuple of length %d, but received: %s"
% (len(self.state_size), state))
with tf.variable_scope(scope or "multi_rnn_cell"):
new_states = []
with tf.variable_scope("cell_0_attention"):
attention_cell = self._cells[0]
attention_state = state[0]
cur_inp, new_attention_state = attention_cell(inputs, attention_state)
new_states.append(new_attention_state)
for i in range(1, len(self._cells)):
with tf.variable_scope("cell_%d" % i):
cell = self._cells[i]
cur_state = state[i]
if self.use_new_attention:
cur_inp = tf.concat([cur_inp, new_attention_state.attention], -1)
else:
cur_inp = tf.concat([cur_inp, attention_state.attention], -1)
cur_inp, new_state = cell(cur_inp, cur_state)
new_states.append(new_state)
return cur_inp, tuple(new_states)
def gnmt_residual_fn(inputs, outputs):
"""Residual function that handles different inputs and outputs inner dims.
Args:
inputs: cell inputs, this is actual inputs concatenated with the attention
vector.
outputs: cell outputs
Returns:
outputs + actual inputs
"""
def split_input(inp, out):
out_dim = out.get_shape().as_list()[-1]
inp_dim = inp.get_shape().as_list()[-1]
return tf.split(inp, [out_dim, inp_dim - out_dim], axis=-1)
actual_inputs, _ = nest.map_structure(split_input, inputs, outputs)
def assert_shape_match(inp, out):
inp.get_shape().assert_is_compatible_with(out.get_shape())
nest.assert_same_structure(actual_inputs, outputs)
nest.map_structure(assert_shape_match, actual_inputs, outputs)
return nest.map_structure(lambda inp, out: inp + out, actual_inputs, outputs)
| OpenSeq2Seq-master | open_seq2seq/parts/rnns/gnmt.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A decoder that performs beam search."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from six.moves import range
import collections
import numpy as np
import tensorflow as tf
from tensorflow.contrib.seq2seq.python.ops import beam_search_ops
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.layers import base as layers_base
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.util import nest
__all__ = [
"BeamSearchDecoderOutput",
"BeamSearchDecoderState",
"BeamSearchDecoder",
"FinalBeamSearchDecoderOutput",
"tile_batch",
]
class BeamSearchDecoderState(
collections.namedtuple("BeamSearchDecoderState",
("cell_state", "log_probs", "finished", "lengths"))):
pass
class BeamSearchDecoderOutput(
collections.namedtuple("BeamSearchDecoderOutput",
("scores", "predicted_ids", "parent_ids"))):
pass
class FinalBeamSearchDecoderOutput(
collections.namedtuple("FinalBeamDecoderOutput",
["predicted_ids", "beam_search_decoder_output"])):
"""Final outputs returned by the beam search after all decoding is finished.
Args:
predicted_ids: The final prediction. A tensor of shape
`[batch_size, T, beam_width]` (or `[T, batch_size, beam_width]` if
`output_time_major` is True). Beams are ordered from best to worst.
beam_search_decoder_output: An instance of `BeamSearchDecoderOutput` that
describes the state of the beam search.
"""
pass
def _tile_batch(t, multiplier):
"""Core single-tensor implementation of tile_batch."""
t = ops.convert_to_tensor(t, name="t")
shape_t = array_ops.shape(t)
if t.shape.ndims is None or t.shape.ndims < 1:
raise ValueError("t must have statically known rank")
tiling = [1] * (t.shape.ndims + 1)
tiling[1] = multiplier
tiled_static_batch_size = (
t.shape[0].value * multiplier if t.shape[0].value is not None else None)
tiled = array_ops.tile(array_ops.expand_dims(t, 1), tiling)
tiled = array_ops.reshape(tiled,
array_ops.concat(
([shape_t[0] * multiplier], shape_t[1:]), 0))
tiled.set_shape(
tensor_shape.TensorShape([tiled_static_batch_size]).concatenate(
t.shape[1:]))
return tiled
def tile_batch(t, multiplier, name=None):
"""Tile the batch dimension of a (possibly nested structure of) tensor(s) t.
For each tensor t in a (possibly nested structure) of tensors,
this function takes a tensor t shaped `[batch_size, s0, s1, ...]` composed of
minibatch entries `t[0], ..., t[batch_size - 1]` and tiles it to have a shape
`[batch_size * multiplier, s0, s1, ...]` composed of minibatch entries
`t[0], t[0], ..., t[1], t[1], ...` where each minibatch entry is repeated
`multiplier` times.
Args:
t: `Tensor` shaped `[batch_size, ...]`.
multiplier: Python int.
name: Name scope for any created operations.
Returns:
A (possibly nested structure of) `Tensor` shaped
`[batch_size * multiplier, ...]`.
Raises:
ValueError: if tensor(s) `t` do not have a statically known rank or
the rank is < 1.
"""
flat_t = nest.flatten(t)
with ops.name_scope(name, "tile_batch", flat_t + [multiplier]):
return nest.map_structure(lambda t_: _tile_batch(t_, multiplier), t)
def _check_maybe(t):
if isinstance(t, tensor_array_ops.TensorArray):
raise TypeError(
"TensorArray state is not supported by BeamSearchDecoder: %s" % t.name)
if t.shape.ndims is None:
raise ValueError(
"Expected tensor (%s) to have known rank, but ndims == None." % t)
class BeamSearchDecoder(decoder.Decoder):
"""BeamSearch sampling decoder.
**NOTE** If you are using the `BeamSearchDecoder` with a cell wrapped in
`AttentionWrapper`, then you must ensure that:
- The encoder output has been tiled to `beam_width` via
@{tf.contrib.seq2seq.tile_batch} (NOT `tf.tile`).
- The `batch_size` argument passed to the `zero_state` method of this
wrapper is equal to `true_batch_size * beam_width`.
- The initial state created with `zero_state` above contains a
`cell_state` value containing properly tiled final state from the
encoder.
An example:
```
tiled_encoder_outputs = tf.contrib.seq2seq.tile_batch(
encoder_outputs, multiplier=beam_width)
tiled_encoder_final_state = tf.conrib.seq2seq.tile_batch(
encoder_final_state, multiplier=beam_width)
tiled_sequence_length = tf.contrib.seq2seq.tile_batch(
sequence_length, multiplier=beam_width)
attention_mechanism = MyFavoriteAttentionMechanism(
num_units=attention_depth,
memory=tiled_inputs,
memory_sequence_length=tiled_sequence_length)
attention_cell = AttentionWrapper(cell, attention_mechanism, ...)
decoder_initial_state = attention_cell.zero_state(
dtype, batch_size=true_batch_size * beam_width)
decoder_initial_state = decoder_initial_state.clone(
cell_state=tiled_encoder_final_state)
```
"""
def __init__(self,
cell,
embedding,
start_tokens,
end_token,
initial_state,
beam_width,
output_layer=None,
length_penalty_weight=0.0,
positional_embedding=None):
"""Initialize the BeamSearchDecoder.
Args:
cell: An `RNNCell` instance.
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`.
start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.
end_token: `int32` scalar, the token that marks end of decoding.
initial_state: A (possibly nested tuple of...) tensors and TensorArrays.
beam_width: Python integer, the number of beams.
output_layer: (Optional) An instance of `tf.layers.Layer`, i.e.,
`tf.layers.Dense`. Optional layer to apply to the RNN output prior
to storing the result or sampling.
length_penalty_weight: Float weight to penalize length. Disabled with 0.0.
positional_embedding: A callable to use decoder positional embedding.
Default is None in which case positional embedding is disabled
Raises:
TypeError: if `cell` is not an instance of `RNNCell`,
or `output_layer` is not an instance of `tf.layers.Layer`.
ValueError: If `start_tokens` is not a vector or
`end_token` is not a scalar.
"""
rnn_cell_impl.assert_like_rnncell("cell", cell)
if (output_layer is not None and
not isinstance(output_layer, layers_base.Layer)):
raise TypeError(
"output_layer must be a Layer, received: %s" % type(output_layer))
self._cell = cell
self._output_layer = output_layer
if callable(embedding):
self._embedding_fn = embedding
else:
self._embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self._use_pos_embedding = False
if positional_embedding is not None:
if callable(positional_embedding):
self._pos_embedding_fn = positional_embedding
else:
self._pos_embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(positional_embedding, ids))
self._use_pos_embedding = True
self._start_tokens = ops.convert_to_tensor(
start_tokens, dtype=dtypes.int32, name="start_tokens")
if self._start_tokens.get_shape().ndims != 1:
raise ValueError("start_tokens must be a vector")
self._end_token = ops.convert_to_tensor(
end_token, dtype=dtypes.int32, name="end_token")
if self._end_token.get_shape().ndims != 0:
raise ValueError("end_token must be a scalar")
self._batch_size = array_ops.size(start_tokens)
self._beam_width = beam_width
self._length_penalty_weight = length_penalty_weight
self._initial_cell_state = nest.map_structure(
self._maybe_split_batch_beams, initial_state, self._cell.state_size)
self._start_tokens = array_ops.tile(
array_ops.expand_dims(self._start_tokens, 1), [1, self._beam_width])
self._start_inputs = self._embedding_fn(self._start_tokens)
if self._use_pos_embedding:
self._start_inputs += self._pos_embedding_fn(ops.convert_to_tensor(0))
self._finished = array_ops.one_hot(
array_ops.zeros([self._batch_size], dtype=dtypes.int32),
depth=self._beam_width,
on_value=False,
off_value=True,
dtype=dtypes.bool)
@property
def batch_size(self):
return self._batch_size
def _rnn_output_size(self):
size = self._cell.output_size
if self._output_layer is None:
return size
else:
# To use layer's compute_output_shape, we need to convert the
# RNNCell's output_size entries into shapes with an unknown
# batch size. We then pass this through the layer's
# compute_output_shape and read off all but the first (batch)
# dimensions to get the output size of the rnn with the layer
# applied to the top.
output_shape_with_unknown_batch = nest.map_structure(
lambda s: tensor_shape.TensorShape([None]).concatenate(s), size)
layer_output_shape = self._output_layer.compute_output_shape(
output_shape_with_unknown_batch)
return nest.map_structure(lambda s: s[1:], layer_output_shape)
@property
def tracks_own_finished(self):
"""The BeamSearchDecoder shuffles its beams and their finished state.
For this reason, it conflicts with the `dynamic_decode` function's
tracking of finished states. Setting this property to true avoids
early stopping of decoding due to mismanagement of the finished state
in `dynamic_decode`.
Returns:
`True`.
"""
return True
@property
def output_size(self):
# Return the cell output and the id
return BeamSearchDecoderOutput(
scores=tensor_shape.TensorShape([self._beam_width]),
predicted_ids=tensor_shape.TensorShape([self._beam_width]),
parent_ids=tensor_shape.TensorShape([self._beam_width]))
@property
def output_dtype(self):
# Assume the dtype of the cell is the output_size structure
# containing the input_state's first component's dtype.
# Return that structure and int32 (the id)
dtype = nest.flatten(self._initial_cell_state)[0].dtype
return BeamSearchDecoderOutput(
scores=nest.map_structure(lambda _: dtype, self._rnn_output_size()),
predicted_ids=dtypes.int32,
parent_ids=dtypes.int32)
def initialize(self, name=None):
"""Initialize the decoder.
Args:
name: Name scope for any created operations.
Returns:
`(finished, start_inputs, initial_state)`.
"""
finished, start_inputs = self._finished, self._start_inputs
dtype = nest.flatten(self._initial_cell_state)[0].dtype
log_probs = array_ops.one_hot( # shape(batch_sz, beam_sz)
array_ops.zeros([self._batch_size], dtype=dtypes.int32),
depth=self._beam_width,
on_value=math_ops.cast(0.0, dtype),
off_value=-np.float16('inf') if dtype == dtypes.float16 else -np.Inf,
dtype=dtype)
initial_state = BeamSearchDecoderState(
cell_state=self._initial_cell_state,
log_probs=log_probs,
finished=finished,
lengths=array_ops.zeros(
[self._batch_size, self._beam_width], dtype=dtypes.int64))
return (finished, start_inputs, initial_state)
def finalize(self, outputs, final_state, sequence_lengths):
"""Finalize and return the predicted_ids.
Args:
outputs: An instance of BeamSearchDecoderOutput.
final_state: An instance of BeamSearchDecoderState. Passed through to the
output.
sequence_lengths: An `int64` tensor shaped `[batch_size, beam_width]`.
The sequence lengths determined for each beam during decode.
**NOTE** These are ignored; the updated sequence lengths are stored in
`final_state.lengths`.
Returns:
outputs: An instance of `FinalBeamSearchDecoderOutput` where the
predicted_ids are the result of calling _gather_tree.
final_state: The same input instance of `BeamSearchDecoderState`.
"""
del sequence_lengths
# Get max_sequence_length across all beams for each batch.
max_sequence_lengths = tf.cast(
math_ops.reduce_max(final_state.lengths, axis=1),tf.int32)
predicted_ids = beam_search_ops.gather_tree(
outputs.predicted_ids,
outputs.parent_ids,
max_sequence_lengths=max_sequence_lengths,
end_token=self._end_token)
outputs = FinalBeamSearchDecoderOutput(
beam_search_decoder_output=outputs, predicted_ids=predicted_ids)
return outputs, final_state
def _merge_batch_beams(self, t, s=None):
"""Merges the tensor from a batch of beams into a batch by beams.
More exactly, t is a tensor of dimension [batch_size, beam_width, s]. We
reshape this into [batch_size*beam_width, s]
Args:
t: Tensor of dimension [batch_size, beam_width, s]
s: (Possibly known) depth shape.
Returns:
A reshaped version of t with dimension [batch_size * beam_width, s].
"""
if isinstance(s, ops.Tensor):
s = tensor_shape.as_shape(tensor_util.constant_value(s))
else:
s = tensor_shape.TensorShape(s)
t_shape = array_ops.shape(t)
static_batch_size = tensor_util.constant_value(self._batch_size)
batch_size_beam_width = (
None
if static_batch_size is None else static_batch_size * self._beam_width)
reshaped_t = array_ops.reshape(
t,
array_ops.concat(([self._batch_size * self._beam_width], t_shape[2:]),
0))
reshaped_t.set_shape(
(tensor_shape.TensorShape([batch_size_beam_width]).concatenate(s)))
return reshaped_t
def _split_batch_beams(self, t, s=None):
"""Splits the tensor from a batch by beams into a batch of beams.
More exactly, t is a tensor of dimension [batch_size*beam_width, s]. We
reshape this into [batch_size, beam_width, s]
Args:
t: Tensor of dimension [batch_size*beam_width, s].
s: (Possibly known) depth shape.
Returns:
A reshaped version of t with dimension [batch_size, beam_width, s].
Raises:
ValueError: If, after reshaping, the new tensor is not shaped
`[batch_size, beam_width, s]` (assuming batch_size and beam_width
are known statically).
"""
if isinstance(s, ops.Tensor):
s = tensor_shape.TensorShape(tensor_util.constant_value(s))
else:
s = tensor_shape.TensorShape(s)
t_shape = array_ops.shape(t)
reshaped_t = array_ops.reshape(
t,
array_ops.concat(([self._batch_size, self._beam_width], t_shape[1:]),
0))
static_batch_size = tensor_util.constant_value(self._batch_size)
expected_reshaped_shape = tensor_shape.TensorShape(
[static_batch_size, self._beam_width]).concatenate(s)
if not reshaped_t.shape.is_compatible_with(expected_reshaped_shape):
raise ValueError("Unexpected behavior when reshaping between beam width "
"and batch size. The reshaped tensor has shape: %s. "
"We expected it to have shape "
"(batch_size, beam_width, depth) == %s. Perhaps you "
"forgot to create a zero_state with "
"batch_size=encoder_batch_size * beam_width?" %
(reshaped_t.shape, expected_reshaped_shape))
reshaped_t.set_shape(expected_reshaped_shape)
return reshaped_t
def _maybe_split_batch_beams(self, t, s):
"""Maybe splits the tensor from a batch by beams into a batch of beams.
We do this so that we can use nest and not run into problems with shapes.
Args:
t: `Tensor`, either scalar or shaped `[batch_size * beam_width] + s`.
s: `Tensor`, Python int, or `TensorShape`.
Returns:
If `t` is a matrix or higher order tensor, then the return value is
`t` reshaped to `[batch_size, beam_width] + s`. Otherwise `t` is
returned unchanged.
Raises:
TypeError: If `t` is an instance of `TensorArray`.
ValueError: If the rank of `t` is not statically known.
"""
_check_maybe(t)
if t.shape.ndims >= 1:
return self._split_batch_beams(t, s)
else:
return t
def _maybe_merge_batch_beams(self, t, s):
"""Splits the tensor from a batch by beams into a batch of beams.
More exactly, `t` is a tensor of dimension `[batch_size * beam_width] + s`,
then we reshape it to `[batch_size, beam_width] + s`.
Args:
t: `Tensor` of dimension `[batch_size * beam_width] + s`.
s: `Tensor`, Python int, or `TensorShape`.
Returns:
A reshaped version of t with shape `[batch_size, beam_width] + s`.
Raises:
TypeError: If `t` is an instance of `TensorArray`.
ValueError: If the rank of `t` is not statically known.
"""
_check_maybe(t)
if t.shape.ndims >= 2:
return self._merge_batch_beams(t, s)
else:
return t
def step(self, time, inputs, state, name=None):
"""Perform a decoding step.
Args:
time: scalar `int32` tensor.
inputs: A (structure of) input tensors.
state: A (structure of) state tensors and TensorArrays.
name: Name scope for any created operations.
Returns:
`(outputs, next_state, next_inputs, finished)`.
"""
batch_size = self._batch_size
beam_width = self._beam_width
end_token = self._end_token
length_penalty_weight = self._length_penalty_weight
with ops.name_scope(name, "BeamSearchDecoderStep", (time, inputs, state)):
cell_state = state.cell_state
inputs = nest.map_structure(
lambda inp: self._merge_batch_beams(inp, s=inp.shape[2:]), inputs)
cell_state = nest.map_structure(self._maybe_merge_batch_beams, cell_state,
self._cell.state_size)
cell_outputs, next_cell_state = self._cell(inputs, cell_state)
cell_outputs = nest.map_structure(
lambda out: self._split_batch_beams(out, out.shape[1:]), cell_outputs)
next_cell_state = nest.map_structure(
self._maybe_split_batch_beams, next_cell_state, self._cell.state_size)
if self._output_layer is not None:
cell_outputs = self._output_layer(cell_outputs)
beam_search_output, beam_search_state = _beam_search_step(
time=time,
logits=cell_outputs,
next_cell_state=next_cell_state,
beam_state=state,
batch_size=batch_size,
beam_width=beam_width,
end_token=end_token,
length_penalty_weight=length_penalty_weight)
finished = beam_search_state.finished
sample_ids = beam_search_output.predicted_ids
next_inputs = control_flow_ops.cond(
math_ops.reduce_all(finished), lambda: self._start_inputs,
lambda: self._embedding_fn(sample_ids))
if self._use_pos_embedding:
next_inputs += self._pos_embedding_fn(ops.convert_to_tensor(time))
return (beam_search_output, beam_search_state, next_inputs, finished)
def _beam_search_step(time, logits, next_cell_state, beam_state, batch_size,
beam_width, end_token, length_penalty_weight):
"""Performs a single step of Beam Search Decoding.
Args:
time: Beam search time step, should start at 0. At time 0 we assume
that all beams are equal and consider only the first beam for
continuations.
logits: Logits at the current time step. A tensor of shape
`[batch_size, beam_width, vocab_size]`
next_cell_state: The next state from the cell, e.g. an instance of
AttentionWrapperState if the cell is attentional.
beam_state: Current state of the beam search.
An instance of `BeamSearchDecoderState`.
batch_size: The batch size for this input.
beam_width: Python int. The size of the beams.
end_token: The int32 end token.
length_penalty_weight: Float weight to penalize length. Disabled with 0.0.
Returns:
A new beam state.
"""
static_batch_size = tensor_util.constant_value(batch_size)
# Calculate the current lengths of the predictions
prediction_lengths = beam_state.lengths
previously_finished = beam_state.finished
# Calculate the total log probs for the new hypotheses
# Final Shape: [batch_size, beam_width, vocab_size]
step_log_probs = nn_ops.log_softmax(logits)
step_log_probs = _mask_probs(step_log_probs, end_token, previously_finished)
total_probs = array_ops.expand_dims(beam_state.log_probs, 2) + step_log_probs
# Calculate the continuation lengths by adding to all continuing beams.
vocab_size = logits.shape[-1].value or array_ops.shape(logits)[-1]
lengths_to_add = array_ops.one_hot(
indices=array_ops.fill([batch_size, beam_width], end_token),
depth=vocab_size,
on_value=np.int64(0),
off_value=np.int64(1),
dtype=dtypes.int64)
add_mask = tf.cast(math_ops.logical_not(previously_finished), tf.int64)
lengths_to_add *= array_ops.expand_dims(add_mask, 2)
new_prediction_lengths = (
lengths_to_add + array_ops.expand_dims(prediction_lengths, 2))
# Calculate the scores for each beam
scores = _get_scores(
log_probs=total_probs,
sequence_lengths=new_prediction_lengths,
length_penalty_weight=length_penalty_weight,
dtype=logits.dtype)
time = ops.convert_to_tensor(time, name="time")
# During the first time step we only consider the initial beam
scores_shape = array_ops.shape(scores)
scores_flat = array_ops.reshape(scores, [batch_size, -1])
# Pick the next beams according to the specified successors function
next_beam_size = ops.convert_to_tensor(
beam_width, dtype=dtypes.int32, name="beam_width")
next_beam_scores, word_indices = nn_ops.top_k(scores_flat, k=next_beam_size)
next_beam_scores.set_shape([static_batch_size, beam_width])
word_indices.set_shape([static_batch_size, beam_width])
# Pick out the probs, beam_ids, and states according to the chosen
# predictions
next_beam_probs = _tensor_gather_helper(
gather_indices=word_indices,
gather_from=total_probs,
batch_size=batch_size,
range_size=beam_width * vocab_size,
gather_shape=[-1],
name="next_beam_probs")
# Note: just doing the following
# math_ops.to_int32(word_indices % vocab_size,
# name="next_beam_word_ids")
# would be a lot cleaner but for reasons unclear, that hides the results of
# the op which prevents capturing it with tfdbg debug ops.
raw_next_word_ids = math_ops.mod(
word_indices, vocab_size, name="next_beam_word_ids")
next_word_ids = tf.cast(raw_next_word_ids, tf.int32)
next_beam_ids = tf.cast(word_indices / vocab_size,
name="next_beam_parent_ids", dtype=tf.int32)
# Append new ids to current predictions
previously_finished = _tensor_gather_helper(
gather_indices=next_beam_ids,
gather_from=previously_finished,
batch_size=batch_size,
range_size=beam_width,
gather_shape=[-1])
next_finished = math_ops.logical_or(
previously_finished,
math_ops.equal(next_word_ids, end_token),
name="next_beam_finished")
# Calculate the length of the next predictions.
# 1. Finished beams remain unchanged.
# 2. Beams that are now finished (EOS predicted) have their length
# increased by 1.
# 3. Beams that are not yet finished have their length increased by 1.
lengths_to_add = tf.cast(math_ops.logical_not(previously_finished), tf.int64)
next_prediction_len = _tensor_gather_helper(
gather_indices=next_beam_ids,
gather_from=beam_state.lengths,
batch_size=batch_size,
range_size=beam_width,
gather_shape=[-1])
next_prediction_len += lengths_to_add
# Pick out the cell_states according to the next_beam_ids. We use a
# different gather_shape here because the cell_state tensors, i.e.
# the tensors that would be gathered from, all have dimension
# greater than two and we need to preserve those dimensions.
# pylint: disable=g-long-lambda
next_cell_state = nest.map_structure(
lambda gather_from: _maybe_tensor_gather_helper(
gather_indices=next_beam_ids,
gather_from=gather_from,
batch_size=batch_size,
range_size=beam_width,
gather_shape=[batch_size * beam_width, -1]),
next_cell_state)
# pylint: enable=g-long-lambda
next_state = BeamSearchDecoderState(
cell_state=next_cell_state,
log_probs=next_beam_probs,
lengths=next_prediction_len,
finished=next_finished)
output = BeamSearchDecoderOutput(
scores=next_beam_scores,
predicted_ids=next_word_ids,
parent_ids=next_beam_ids)
return output, next_state
def _get_scores(log_probs, sequence_lengths, length_penalty_weight,
dtype=dtypes.float32):
"""Calculates scores for beam search hypotheses.
Args:
log_probs: The log probabilities with shape
`[batch_size, beam_width, vocab_size]`.
sequence_lengths: The array of sequence lengths.
length_penalty_weight: Float weight to penalize length. Disabled with 0.0.
Returns:
The scores normalized by the length_penalty.
"""
length_penality_ = _length_penalty(
sequence_lengths=sequence_lengths, penalty_factor=length_penalty_weight)
return log_probs / math_ops.cast(length_penality_, dtype)
def _length_penalty(sequence_lengths, penalty_factor):
"""Calculates the length penalty. See https://arxiv.org/abs/1609.08144.
Returns the length penalty tensor:
```
[(5+sequence_lengths)/6]**penalty_factor
```
where all operations are performed element-wise.
Args:
sequence_lengths: `Tensor`, the sequence lengths of each hypotheses.
penalty_factor: A scalar that weights the length penalty.
Returns:
If the penalty is `0`, returns the scalar `1.0`. Otherwise returns
the length penalty factor, a tensor with the same shape as
`sequence_lengths`.
"""
penalty_factor = ops.convert_to_tensor(penalty_factor, name="penalty_factor")
penalty_factor.set_shape(()) # penalty should be a scalar.
static_penalty = tensor_util.constant_value(penalty_factor)
if static_penalty is not None and static_penalty == 0:
return 1.0
return math_ops.div((5. + math_ops.to_float(sequence_lengths))
** penalty_factor, (5. + 1.)**penalty_factor)
def _mask_probs(probs, eos_token, finished):
"""Masks log probabilities.
The result is that finished beams allocate all probability mass to eos and
unfinished beams remain unchanged.
Args:
probs: Log probabiltiies of shape `[batch_size, beam_width, vocab_size]`
eos_token: An int32 id corresponding to the EOS token to allocate
probability to.
finished: A boolean tensor of shape `[batch_size, beam_width]` that
specifies which elements in the beam are finished already.
Returns:
A tensor of shape `[batch_size, beam_width, vocab_size]`, where unfinished
beams stay unchanged and finished beams are replaced with a tensor with all
probability on the EOS token.
"""
vocab_size = array_ops.shape(probs)[2]
# All finished examples are replaced with a vector that has all
# probability on EOS
finished_row = array_ops.one_hot(
eos_token,
vocab_size,
dtype=probs.dtype,
on_value=ops.convert_to_tensor(0., dtype=probs.dtype),
off_value=probs.dtype.min)
finished_probs = array_ops.tile(
array_ops.reshape(finished_row, [1, 1, -1]),
array_ops.concat([array_ops.shape(finished), [1]], 0))
finished_mask = array_ops.tile(
array_ops.expand_dims(finished, 2), [1, 1, vocab_size])
return array_ops.where(finished_mask, finished_probs, probs)
def _maybe_tensor_gather_helper(gather_indices, gather_from, batch_size,
range_size, gather_shape):
"""Maybe applies _tensor_gather_helper.
This applies _tensor_gather_helper when the gather_from dims is at least as
big as the length of gather_shape. This is used in conjunction with nest so
that we don't apply _tensor_gather_helper to inapplicable values like scalars.
Args:
gather_indices: The tensor indices that we use to gather.
gather_from: The tensor that we are gathering from.
batch_size: The batch size.
range_size: The number of values in each range. Likely equal to beam_width.
gather_shape: What we should reshape gather_from to in order to preserve the
correct values. An example is when gather_from is the attention from an
AttentionWrapperState with shape [batch_size, beam_width, attention_size].
There, we want to preserve the attention_size elements, so gather_shape is
[batch_size * beam_width, -1]. Then, upon reshape, we still have the
attention_size as desired.
Returns:
output: Gathered tensor of shape tf.shape(gather_from)[:1+len(gather_shape)]
or the original tensor if its dimensions are too small.
"""
_check_maybe(gather_from)
if gather_from.shape.ndims >= len(gather_shape):
return _tensor_gather_helper(
gather_indices=gather_indices,
gather_from=gather_from,
batch_size=batch_size,
range_size=range_size,
gather_shape=gather_shape)
else:
return gather_from
def _tensor_gather_helper(gather_indices,
gather_from,
batch_size,
range_size,
gather_shape,
name=None):
"""Helper for gathering the right indices from the tensor.
This works by reshaping gather_from to gather_shape (e.g. [-1]) and then
gathering from that according to the gather_indices, which are offset by
the right amounts in order to preserve the batch order.
Args:
gather_indices: The tensor indices that we use to gather.
gather_from: The tensor that we are gathering from.
batch_size: The input batch size.
range_size: The number of values in each range. Likely equal to beam_width.
gather_shape: What we should reshape gather_from to in order to preserve the
correct values. An example is when gather_from is the attention from an
AttentionWrapperState with shape [batch_size, beam_width, attention_size].
There, we want to preserve the attention_size elements, so gather_shape is
[batch_size * beam_width, -1]. Then, upon reshape, we still have the
attention_size as desired.
name: The tensor name for set of operations. By default this is
'tensor_gather_helper'. The final output is named 'output'.
Returns:
output: Gathered tensor of shape tf.shape(gather_from)[:1+len(gather_shape)]
"""
with ops.name_scope(name, "tensor_gather_helper"):
range_ = array_ops.expand_dims(math_ops.range(batch_size) * range_size, 1)
gather_indices = array_ops.reshape(gather_indices + range_, [-1])
output = array_ops.gather(
array_ops.reshape(gather_from, gather_shape), gather_indices)
final_shape = array_ops.shape(gather_from)[:1 + len(gather_shape)]
static_batch_size = tensor_util.constant_value(batch_size)
final_static_shape = (
tensor_shape.TensorShape([static_batch_size]).concatenate(
gather_from.shape[1:1 + len(gather_shape)]))
output = array_ops.reshape(output, final_shape, name="output")
output.set_shape(final_static_shape)
return output
| OpenSeq2Seq-master | open_seq2seq/parts/rnns/rnn_beam_search_decoder.py |
"""Module for constructing RNN Cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from six.moves import range
from tensorflow.contrib.rnn.python.ops import core_rnn_cell
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope as vs
# pylint: disable=protected-access
_Linear = core_rnn_cell._Linear # pylint: disable=invalid-name
# pylint: enable=protected-access
# TODO: must implement all abstract methods
class GLSTMCell(rnn_cell_impl.RNNCell):
"""Group LSTM cell (G-LSTM).
The implementation is based on:
https://arxiv.org/abs/1703.10722
O. Kuchaiev and B. Ginsburg
"Factorization Tricks for LSTM Networks", ICLR 2017 workshop.
"""
def __init__(self, num_units, initializer=None, num_proj=None,
number_of_groups=1, forget_bias=1.0, activation=math_ops.tanh,
reuse=None):
"""Initialize the parameters of G-LSTM cell.
Args:
num_units: int, The number of units in the G-LSTM cell
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
number_of_groups: (optional) int, number of groups to use.
If `number_of_groups` is 1, then it should be equivalent to LSTM cell
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of
the training.
activation: Activation function of the inner states.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already
has the given variables, an error is raised.
Raises:
ValueError: If `num_units` or `num_proj` is not divisible by
`number_of_groups`.
"""
super(GLSTMCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._initializer = initializer
self._num_proj = num_proj
self._forget_bias = forget_bias
self._activation = activation
self._number_of_groups = number_of_groups
if self._num_units % self._number_of_groups != 0:
raise ValueError("num_units must be divisible by number_of_groups")
if self._num_proj:
if self._num_proj % self._number_of_groups != 0:
raise ValueError("num_proj must be divisible by number_of_groups")
self._group_shape = [int(self._num_proj / self._number_of_groups),
int(self._num_units / self._number_of_groups)]
else:
self._group_shape = [int(self._num_units / self._number_of_groups),
int(self._num_units / self._number_of_groups)]
if num_proj:
self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_proj)
self._output_size = num_proj
else:
self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_units)
self._output_size = num_units
self._linear1 = [None] * self._number_of_groups
self._linear2 = None
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def _get_input_for_group(self, inputs, group_id, group_size):
"""Slices inputs into groups to prepare for processing by cell's groups
Args:
inputs: cell input or it's previous state,
a Tensor, 2D, [batch x num_units]
group_id: group id, a Scalar, for which to prepare input
group_size: size of the group
Returns:
subset of inputs corresponding to group "group_id",
a Tensor, 2D, [batch x num_units/number_of_groups]
"""
return array_ops.slice(input_=inputs,
begin=[0, group_id * group_size],
size=[self._batch_size, group_size],
name=("GLSTM_group%d_input_generation" % group_id))
# TODO: does not match signature of the base method
def call(self, inputs, state):
"""Run one step of G-LSTM.
Args:
inputs: input Tensor, 2D, [batch x num_units].
state: this must be a tuple of state Tensors, both `2-D`,
with column sizes `c_state` and `m_state`.
Returns:
A tuple containing:
- A `2-D, [batch x output_dim]`, Tensor representing the output of the
G-LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- LSTMStateTuple representing the new state of G-LSTM cell
after reading `inputs` when the previous state was `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
(c_prev, m_prev) = state
self._batch_size = inputs.shape[0].value or array_ops.shape(inputs)[0]
input_size = inputs.shape[-1].value or array_ops.shape(inputs)[-1]
dtype = inputs.dtype
scope = vs.get_variable_scope()
with vs.variable_scope(scope, initializer=self._initializer):
i_parts = []
j_parts = []
f_parts = []
o_parts = []
for group_id in range(self._number_of_groups):
with vs.variable_scope("group%d" % group_id):
x_g_id = array_ops.concat(
[self._get_input_for_group(inputs, group_id,
int(input_size / self._number_of_groups)),
# self._group_shape[0]), # this is only correct if inputs dim = num_units!!!
self._get_input_for_group(m_prev, group_id,
int(self._output_size / self._number_of_groups))], axis=1)
# self._group_shape[0])], axis=1)
if self._linear1[group_id] is None:
self._linear1[group_id] = _Linear(
x_g_id, 4 * self._group_shape[1],
False,
)
R_k = self._linear1[group_id](x_g_id) # pylint: disable=invalid-name
i_k, j_k, f_k, o_k = array_ops.split(R_k, 4, 1)
i_parts.append(i_k)
j_parts.append(j_k)
f_parts.append(f_k)
o_parts.append(o_k)
bi = vs.get_variable(
name="bias_i",
shape=[self._num_units],
dtype=dtype,
initializer=init_ops.constant_initializer(0.0, dtype=dtype),
)
bj = vs.get_variable(
name="bias_j",
shape=[self._num_units],
dtype=dtype,
initializer=init_ops.constant_initializer(0.0, dtype=dtype),
)
bf = vs.get_variable(
name="bias_f",
shape=[self._num_units],
dtype=dtype,
initializer=init_ops.constant_initializer(0.0, dtype=dtype),
)
bo = vs.get_variable(
name="bias_o",
shape=[self._num_units],
dtype=dtype,
initializer=init_ops.constant_initializer(0.0, dtype=dtype),
)
i = nn_ops.bias_add(array_ops.concat(i_parts, axis=1), bi)
j = nn_ops.bias_add(array_ops.concat(j_parts, axis=1), bj)
f = nn_ops.bias_add(array_ops.concat(f_parts, axis=1), bf)
o = nn_ops.bias_add(array_ops.concat(o_parts, axis=1), bo)
c = (math_ops.sigmoid(f + self._forget_bias) * c_prev +
math_ops.sigmoid(i) * math_ops.tanh(j))
m = math_ops.sigmoid(o) * self._activation(c)
if self._num_proj is not None:
with vs.variable_scope("projection"):
if self._linear2 is None:
self._linear2 = _Linear(m, self._num_proj, False)
m = self._linear2(m)
new_state = rnn_cell_impl.LSTMStateTuple(c, m)
return m, new_state
| OpenSeq2Seq-master | open_seq2seq/parts/rnns/glstm.py |
"""Implement https://arxiv.org/abs/1709.02755
Copy from LSTM, and make it functionally correct with minimum code change
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from six.moves import range
import tensorflow as tf
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import rnn_cell
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
_BIAS_VARIABLE_NAME = "biases" if tf.__version__ < "1.2.0" else "bias"
_WEIGHTS_VARIABLE_NAME = "weights" if tf.__version__ < "1.2.0" else "kernel"
# TODO: must implement all abstract methods
class BasicSLSTMCell(rnn_cell.RNNCell):
"""Basic SLSTM recurrent network cell.
The implementation is based on: https://arxiv.org/abs/1709.02755.
"""
def __init__(self, num_units, forget_bias=1.0,
state_is_tuple=True, activation=None, reuse=None):
"""Initialize the basic SLSTM cell.
Args:
num_units: int, The number of units in the SLSTM cell.
forget_bias: float, The bias added to forget gates (see above).
Must set to `0.0` manually when restoring from CudnnLSTM-trained
checkpoints.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. If False, they are concatenated
along the column axis. The latter behavior will soon be deprecated.
activation: Activation function of the inner states. Default: `tanh`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(BasicSLSTMCell, self).__init__(_reuse=reuse)
if not state_is_tuple:
logging.warn("%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
self._num_units = num_units
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self._activation = activation or math_ops.tanh
@property
def state_size(self):
return (rnn_cell.LSTMStateTuple(self._num_units, self._num_units)
if self._state_is_tuple else 2 * self._num_units)
@property
def output_size(self):
return self._num_units
# TODO: does not match signature of the base method
def call(self, inputs, state):
"""Long short-term memory cell (LSTM).
Args:
inputs: `2-D` tensor with shape `[batch_size x input_size]`.
state: An `LSTMStateTuple` of state tensors, each shaped
`[batch_size x self.state_size]`, if `state_is_tuple` has been set to
`True`. Otherwise, a `Tensor` shaped
`[batch_size x 2 * self.state_size]`.
Returns:
A pair containing the new hidden state, and the new state (either a
`LSTMStateTuple` or a concatenated state, depending on
`state_is_tuple`).
"""
sigmoid = math_ops.sigmoid
# Parameters of gates are concatenated into one multiply for efficiency.
if self._state_is_tuple:
c, h = state
else:
c, h = array_ops.split(value=state, num_or_size_splits=2, axis=1)
# concat = _linear([inputs, h], 4 * self._num_units, True)
concat = _linear(inputs, 4 * self._num_units, True)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = array_ops.split(value=concat, num_or_size_splits=4, axis=1)
new_c = (
c * sigmoid(f + self._forget_bias) + sigmoid(i) * self._activation(j))
new_h = self._activation(new_c) * sigmoid(o)
if self._state_is_tuple:
new_state = rnn_cell.LSTMStateTuple(new_c, new_h)
else:
new_state = array_ops.concat([new_c, new_h], 1)
return new_h, new_state
def _linear(args,
output_size,
bias,
bias_initializer=None,
kernel_initializer=None):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_initializer: starting value to initialize the bias
(default is all zeros).
kernel_initializer: starting value to initialize the weight.
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape() for a in args]
for shape in shapes:
if shape.ndims != 2:
raise ValueError("linear is expecting 2D arguments: %s" % shapes)
if shape[1].value is None:
raise ValueError("linear expects shape[1] to be provided for shape %s, "
"but saw %s" % (shape, shape[1]))
else:
total_arg_size += shape[1].value
dtype = [a.dtype for a in args][0]
# Now the computation.
scope = vs.get_variable_scope()
with vs.variable_scope(scope) as outer_scope:
weights = vs.get_variable(
_WEIGHTS_VARIABLE_NAME, [total_arg_size, output_size],
dtype=dtype,
initializer=kernel_initializer)
if len(args) == 1:
res = math_ops.matmul(args[0], weights)
else:
res = math_ops.matmul(array_ops.concat(args, 1), weights)
if not bias:
return res
with vs.variable_scope(outer_scope) as inner_scope:
inner_scope.set_partitioner(None)
if bias_initializer is None:
bias_initializer = init_ops.constant_initializer(0.0, dtype=dtype)
biases = vs.get_variable(
_BIAS_VARIABLE_NAME, [output_size],
dtype=dtype,
initializer=bias_initializer)
return nn_ops.bias_add(res, biases)
| OpenSeq2Seq-master | open_seq2seq/parts/rnns/slstm.py |
OpenSeq2Seq-master | open_seq2seq/parts/rnns/__init__.py |
|
# Copyright (c) 2017 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import math
from six.moves import range
import tensorflow as tf
from tensorflow.python.ops.rnn_cell import ResidualWrapper, DropoutWrapper
from open_seq2seq.parts.rnns.weight_drop import WeightDropLayerNormBasicLSTMCell
from open_seq2seq.parts.rnns.slstm import BasicSLSTMCell
from open_seq2seq.parts.rnns.glstm import GLSTMCell
from open_seq2seq.parts.rnns.zoneout import ZoneoutWrapper
def single_cell(
cell_class,
cell_params,
dp_input_keep_prob=1.0,
dp_output_keep_prob=1.0,
recurrent_keep_prob=1.0,
input_weight_keep_prob=1.0,
recurrent_weight_keep_prob=1.0,
weight_variational=False,
dropout_seed=None,
zoneout_prob=0.,
training=True,
residual_connections=False,
awd_initializer=False,
variational_recurrent=False, # in case they want to use DropoutWrapper
dtype=None,
):
"""Creates an instance of the rnn cell.
Such cell describes one step one layer and can include residual connection
and/or dropout
Args:
cell_class: Tensorflow RNN cell class
cell_params (dict): cell parameters
dp_input_keep_prob (float): (default: 1.0) input dropout keep
probability.
dp_output_keep_prob (float): (default: 1.0) output dropout keep
probability.
zoneout_prob(float): zoneout probability. Applying both zoneout and
droupout is currently not supported
residual_connections (bool): whether to add residual connection
Returns:
TF RNN instance
"""
if awd_initializer:
val = 1.0/math.sqrt(cell_params['num_units'])
cell_params['initializer'] = tf.random_uniform_initializer(minval=-val, maxval=val)
if 'WeightDropLayerNormBasicLSTMCell' in str(cell_class):
if recurrent_keep_prob < 1.0:
cell_params['recurrent_keep_prob'] = recurrent_keep_prob
if input_weight_keep_prob < 1.0:
cell_params['input_weight_keep_prob'] = input_weight_keep_prob
if recurrent_weight_keep_prob < 1.0:
cell_params['recurrent_weight_keep_prob'] = recurrent_weight_keep_prob
if weight_variational:
cell_params['weight_variational'] = weight_variational # which is basically True
if dropout_seed:
cell_params['dropout_seed'] = dropout_seed
cell = cell_class(**cell_params)
if residual_connections:
cell = ResidualWrapper(cell)
if zoneout_prob > 0. and (
dp_input_keep_prob < 1.0 or dp_output_keep_prob < 1.0
):
raise ValueError(
"Currently applying both dropout and zoneout on the same cell."
"This is currently not supported."
)
if dp_input_keep_prob != 1.0 or dp_output_keep_prob != 1.0 and training:
cell = DropoutWrapper(
cell,
input_keep_prob=dp_input_keep_prob,
output_keep_prob=dp_output_keep_prob,
variational_recurrent=variational_recurrent,
dtype=dtype,
seed=dropout_seed
)
if zoneout_prob > 0.:
cell = ZoneoutWrapper(cell, zoneout_prob, is_training=training)
return cell
| OpenSeq2Seq-master | open_seq2seq/parts/rnns/utils.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from six.moves import range
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops.nn_ops import dropout
class ZoneoutWrapper(rnn_cell_impl.RNNCell):
"""Operator adding zoneout to all states (states+cells) of the given cell.
Code taken from https://github.com/teganmaharaj/zoneout
applying zoneout as described in https://arxiv.org/pdf/1606.01305.pdf"""
def __init__(self, cell, zoneout_prob, is_training=True, seed=None):
if not isinstance(cell, rnn_cell_impl.RNNCell):
raise TypeError("The parameter cell is not an RNNCell.")
if (
isinstance(zoneout_prob, float) and
not (zoneout_prob >= 0.0 and zoneout_prob <= 1.0)
):
raise ValueError(
"Parameter zoneout_prob must be between 0 and 1: %d" % zoneout_prob
)
self._cell = cell
self._zoneout_prob = (zoneout_prob, zoneout_prob)
self._seed = seed
self._is_training = is_training
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def __call__(self, inputs, state, scope=None):
if isinstance(self.state_size,
tuple) != isinstance(self._zoneout_prob, tuple):
raise TypeError("Subdivided states need subdivided zoneouts.")
if isinstance(self.state_size,
tuple) and len(tuple(self.state_size)
) != len(tuple(self._zoneout_prob)):
raise ValueError("State and zoneout need equally many parts.")
output, new_state = self._cell(inputs, state, scope)
if isinstance(self.state_size, tuple):
if self._is_training:
new_state = tuple(
(1 - state_part_zoneout_prob) * dropout(
new_state_part - state_part, (1 - state_part_zoneout_prob),
seed=self._seed
) + state_part
for new_state_part, state_part, state_part_zoneout_prob in
zip(new_state, state, self._zoneout_prob)
)
else:
new_state = tuple(
state_part_zoneout_prob * state_part +
(1 - state_part_zoneout_prob) * new_state_part
for new_state_part, state_part, state_part_zoneout_prob in
zip(new_state, state, self._zoneout_prob)
)
new_state = rnn_cell_impl.LSTMStateTuple(new_state[0], new_state[1])
else:
raise ValueError("Only states that are tuples are supported")
return output, new_state
| OpenSeq2Seq-master | open_seq2seq/parts/rnns/zoneout.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library of helpers for use with SamplingDecoders.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops.distributions import bernoulli
from tensorflow.python.ops.distributions import categorical
from tensorflow.python.util import nest
from tensorflow.contrib.seq2seq.python.ops.helper import Helper
__all__ = [
"Helper",
"TrainingHelper",
"GreedyEmbeddingHelper",
"SampleEmbeddingHelper",
"CustomHelper",
"ScheduledEmbeddingTrainingHelper",
"ScheduledOutputTrainingHelper",
"InferenceHelper",
]
_transpose_batch_time = decoder._transpose_batch_time # pylint: disable=protected-access
def _unstack_ta(inp):
return tensor_array_ops.TensorArray(
dtype=inp.dtype, size=array_ops.shape(inp)[0],
element_shape=inp.get_shape()[1:]).unstack(inp)
class CustomHelper(Helper):
"""Base abstract class that allows the user to customize sampling."""
def __init__(self, initialize_fn, sample_fn, next_inputs_fn,
sample_ids_shape=None, sample_ids_dtype=None):
"""Initializer.
Args:
initialize_fn: callable that returns `(finished, next_inputs)`
for the first iteration.
sample_fn: callable that takes `(time, outputs, state)`
and emits tensor `sample_ids`.
next_inputs_fn: callable that takes `(time, outputs, state, sample_ids)`
and emits `(finished, next_inputs, next_state)`.
sample_ids_shape: Either a list of integers, or a 1-D Tensor of type
`int32`, the shape of each value in the `sample_ids` batch. Defaults to
a scalar.
sample_ids_dtype: The dtype of the `sample_ids` tensor. Defaults to int32.
"""
self._initialize_fn = initialize_fn
self._sample_fn = sample_fn
self._next_inputs_fn = next_inputs_fn
self._batch_size = None
self._sample_ids_shape = tensor_shape.TensorShape(sample_ids_shape or [])
self._sample_ids_dtype = sample_ids_dtype or dtypes.int32
@property
def batch_size(self):
if self._batch_size is None:
raise ValueError("batch_size accessed before initialize was called")
return self._batch_size
@property
def sample_ids_shape(self):
return self._sample_ids_shape
@property
def sample_ids_dtype(self):
return self._sample_ids_dtype
def initialize(self, name=None):
with ops.name_scope(name, "%sInitialize" % type(self).__name__):
(finished, next_inputs) = self._initialize_fn()
if self._batch_size is None:
self._batch_size = array_ops.size(finished)
return (finished, next_inputs)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(
name, "%sSample" % type(self).__name__, (time, outputs, state)):
return self._sample_fn(time=time, outputs=outputs, state=state)
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(
name, "%sNextInputs" % type(self).__name__, (time, outputs, state)):
return self._next_inputs_fn(
time=time, outputs=outputs, state=state, sample_ids=sample_ids)
class TrainingHelper(Helper):
"""A helper for use during training. Only reads inputs.
Returned sample_ids are the argmax of the RNN output logits.
"""
def __init__(self, inputs, sequence_length, time_major=False, name=None):
"""Initializer.
Args:
inputs: A (structure of) input tensors.
sequence_length: An int32 vector tensor.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
name: Name scope for any created operations.
Raises:
ValueError: if `sequence_length` is not a 1D tensor.
"""
with ops.name_scope(name, "TrainingHelper", [inputs, sequence_length]):
inputs = ops.convert_to_tensor(inputs, name="inputs")
self._inputs = inputs
if not time_major:
inputs = nest.map_structure(_transpose_batch_time, inputs)
self._input_tas = nest.map_structure(_unstack_ta, inputs)
self._sequence_length = ops.convert_to_tensor(
sequence_length, name="sequence_length")
if self._sequence_length.get_shape().ndims != 1:
raise ValueError(
"Expected sequence_length to be a vector, but received shape: %s" %
self._sequence_length.get_shape())
self._zero_inputs = nest.map_structure(
lambda inp: array_ops.zeros_like(inp[0, :]), inputs)
self._batch_size = array_ops.size(sequence_length)
@property
def inputs(self):
return self._inputs
@property
def sequence_length(self):
return self._sequence_length
@property
def batch_size(self):
return self._batch_size
@property
def sample_ids_shape(self):
return tensor_shape.TensorShape([])
@property
def sample_ids_dtype(self):
return dtypes.int32
def initialize(self, name=None):
with ops.name_scope(name, "TrainingHelperInitialize"):
finished = math_ops.equal(0, self._sequence_length)
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished, lambda: self._zero_inputs,
lambda: nest.map_structure(lambda inp: inp.read(0), self._input_tas))
return (finished, next_inputs)
def sample(self, time, outputs, name=None, **unused_kwargs):
with ops.name_scope(name, "TrainingHelperSample", [time, outputs]):
sample_ids = math_ops.cast(
math_ops.argmax(outputs, axis=-1), dtypes.int32)
return sample_ids
def next_inputs(self, time, outputs, state, name=None, **unused_kwargs):
"""next_inputs_fn for TrainingHelper."""
with ops.name_scope(name, "TrainingHelperNextInputs",
[time, outputs, state]):
next_time = time + 1
finished = (next_time >= self._sequence_length)
all_finished = math_ops.reduce_all(finished)
def read_from_ta(inp):
return inp.read(next_time)
next_inputs = control_flow_ops.cond(
all_finished, lambda: self._zero_inputs,
lambda: nest.map_structure(read_from_ta, self._input_tas))
return (finished, next_inputs, state)
class ScheduledEmbeddingTrainingHelper(TrainingHelper):
"""A training helper that adds scheduled sampling.
Returns -1s for sample_ids where no sampling took place; valid sample id
values elsewhere.
"""
def __init__(self, inputs, sequence_length, embedding, sampling_probability,
time_major=False, seed=None, scheduling_seed=None, name=None):
"""Initializer.
Args:
inputs: A (structure of) input tensors.
sequence_length: An int32 vector tensor.
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`.
sampling_probability: A 0D `float32` tensor: the probability of sampling
categorically from the output ids instead of reading directly from the
inputs.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
seed: The sampling seed.
scheduling_seed: The schedule decision rule sampling seed.
name: Name scope for any created operations.
Raises:
ValueError: if `sampling_probability` is not a scalar or vector.
"""
with ops.name_scope(name, "ScheduledEmbeddingSamplingWrapper",
[embedding, sampling_probability]):
if callable(embedding):
self._embedding_fn = embedding
else:
self._embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self._sampling_probability = ops.convert_to_tensor(
sampling_probability, name="sampling_probability")
if self._sampling_probability.get_shape().ndims not in (0, 1):
raise ValueError(
"sampling_probability must be either a scalar or a vector. "
"saw shape: %s" % (self._sampling_probability.get_shape()))
self._seed = seed
self._scheduling_seed = scheduling_seed
super(ScheduledEmbeddingTrainingHelper, self).__init__(
inputs=inputs,
sequence_length=sequence_length,
time_major=time_major,
name=name)
def initialize(self, name=None):
return super(ScheduledEmbeddingTrainingHelper, self).initialize(name=name)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperSample",
[time, outputs, state]):
# Return -1s where we did not sample, and sample_ids elsewhere
select_sampler = bernoulli.Bernoulli(
probs=self._sampling_probability, dtype=dtypes.bool)
select_sample = select_sampler.sample(
sample_shape=self.batch_size, seed=self._scheduling_seed)
sample_id_sampler = categorical.Categorical(logits=outputs)
return array_ops.where(
select_sample,
sample_id_sampler.sample(seed=self._seed),
gen_array_ops.fill([self.batch_size], -1))
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperNextInputs",
[time, outputs, state, sample_ids]):
(finished, base_next_inputs, state) = (
super(ScheduledEmbeddingTrainingHelper, self).next_inputs(
time=time,
outputs=outputs,
state=state,
sample_ids=sample_ids,
name=name))
def maybe_sample():
"""Perform scheduled sampling."""
where_sampling = math_ops.cast(
array_ops.where(sample_ids > -1), dtypes.int32)
where_not_sampling = math_ops.cast(
array_ops.where(sample_ids <= -1), dtypes.int32)
sample_ids_sampling = array_ops.gather_nd(sample_ids, where_sampling)
inputs_not_sampling = array_ops.gather_nd(
base_next_inputs, where_not_sampling)
sampled_next_inputs = self._embedding_fn(sample_ids_sampling)
base_shape = array_ops.shape(base_next_inputs)
return (array_ops.scatter_nd(indices=where_sampling,
updates=sampled_next_inputs,
shape=base_shape)
+ array_ops.scatter_nd(indices=where_not_sampling,
updates=inputs_not_sampling,
shape=base_shape))
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished, lambda: base_next_inputs, maybe_sample)
return (finished, next_inputs, state)
class ScheduledOutputTrainingHelper(TrainingHelper):
"""A training helper that adds scheduled sampling directly to outputs.
Returns False for sample_ids where no sampling took place; True elsewhere.
"""
def __init__(self, inputs, sequence_length, sampling_probability,
time_major=False, seed=None, next_inputs_fn=None,
auxiliary_inputs=None, name=None):
"""Initializer.
Args:
inputs: A (structure) of input tensors.
sequence_length: An int32 vector tensor.
sampling_probability: A 0D `float32` tensor: the probability of sampling
from the outputs instead of reading directly from the inputs.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
seed: The sampling seed.
next_inputs_fn: (Optional) callable to apply to the RNN outputs to create
the next input when sampling. If `None` (default), the RNN outputs will
be used as the next inputs.
auxiliary_inputs: An optional (structure of) auxiliary input tensors with
a shape that matches `inputs` in all but (potentially) the final
dimension. These tensors will be concatenated to the sampled output or
the `inputs` when not sampling for use as the next input.
name: Name scope for any created operations.
Raises:
ValueError: if `sampling_probability` is not a scalar or vector.
"""
with ops.name_scope(name, "ScheduledOutputTrainingHelper",
[inputs, auxiliary_inputs, sampling_probability]):
self._sampling_probability = ops.convert_to_tensor(
sampling_probability, name="sampling_probability")
if self._sampling_probability.get_shape().ndims not in (0, 1):
raise ValueError(
"sampling_probability must be either a scalar or a vector. "
"saw shape: %s" % (self._sampling_probability.get_shape()))
if auxiliary_inputs is None:
maybe_concatenated_inputs = inputs
else:
inputs = ops.convert_to_tensor(inputs, name="inputs")
auxiliary_inputs = ops.convert_to_tensor(
auxiliary_inputs, name="auxiliary_inputs")
maybe_concatenated_inputs = nest.map_structure(
lambda x, y: array_ops.concat((x, y), -1),
inputs, auxiliary_inputs)
if not time_major:
auxiliary_inputs = nest.map_structure(
_transpose_batch_time, auxiliary_inputs)
self._auxiliary_input_tas = (
nest.map_structure(_unstack_ta, auxiliary_inputs)
if auxiliary_inputs is not None else None)
self._seed = seed
self._next_inputs_fn = next_inputs_fn
super(ScheduledOutputTrainingHelper, self).__init__(
inputs=maybe_concatenated_inputs,
sequence_length=sequence_length,
time_major=time_major,
name=name)
def initialize(self, name=None):
return super(ScheduledOutputTrainingHelper, self).initialize(name=name)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(name, "ScheduledOutputTrainingHelperSample",
[time, outputs, state]):
sampler = bernoulli.Bernoulli(probs=self._sampling_probability)
return sampler.sample(sample_shape=self.batch_size, seed=self._seed)
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(name, "ScheduledOutputTrainingHelperNextInputs",
[time, outputs, state, sample_ids]):
(finished, base_next_inputs, state) = (
super(ScheduledOutputTrainingHelper, self).next_inputs(
time=time,
outputs=outputs,
state=state,
sample_ids=sample_ids,
name=name))
sample_ids = math_ops.cast(sample_ids, dtypes.bool)
def maybe_sample():
"""Perform scheduled sampling."""
def maybe_concatenate_auxiliary_inputs(outputs_, indices=None):
"""Concatenate outputs with auxiliary inputs, if they exist."""
if self._auxiliary_input_tas is None:
return outputs_
next_time = time + 1
auxiliary_inputs = nest.map_structure(
lambda ta: ta.read(next_time), self._auxiliary_input_tas)
if indices is not None:
auxiliary_inputs = array_ops.gather_nd(auxiliary_inputs, indices)
return nest.map_structure(
lambda x, y: array_ops.concat((x, y), -1),
outputs_, auxiliary_inputs)
if self._next_inputs_fn is None:
return array_ops.where(
sample_ids, maybe_concatenate_auxiliary_inputs(outputs),
base_next_inputs)
where_sampling = math_ops.cast(
array_ops.where(sample_ids), dtypes.int32)
where_not_sampling = math_ops.cast(
array_ops.where(math_ops.logical_not(sample_ids)), dtypes.int32)
outputs_sampling = array_ops.gather_nd(outputs, where_sampling)
inputs_not_sampling = array_ops.gather_nd(base_next_inputs,
where_not_sampling)
sampled_next_inputs = maybe_concatenate_auxiliary_inputs(
self._next_inputs_fn(outputs_sampling), where_sampling)
base_shape = array_ops.shape(base_next_inputs)
return (array_ops.scatter_nd(indices=where_sampling,
updates=sampled_next_inputs,
shape=base_shape)
+ array_ops.scatter_nd(indices=where_not_sampling,
updates=inputs_not_sampling,
shape=base_shape))
all_finished = math_ops.reduce_all(finished)
no_samples = math_ops.logical_not(math_ops.reduce_any(sample_ids))
next_inputs = control_flow_ops.cond(
math_ops.logical_or(all_finished, no_samples),
lambda: base_next_inputs, maybe_sample)
return (finished, next_inputs, state)
class GreedyEmbeddingHelper(Helper):
"""A helper for use during inference.
Uses the argmax of the output (treated as logits) and passes the
result through an embedding layer to get the next input.
"""
def __init__(self, embedding, start_tokens, end_token, positional_embedding=None):
"""Initializer.
Args:
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`. The returned tensor
will be passed to the decoder input.
start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.
end_token: `int32` scalar, the token that marks end of decoding.
Raises:
ValueError: if `start_tokens` is not a 1D tensor or `end_token` is not a
scalar.
"""
if callable(embedding):
self._embedding_fn = embedding
else:
self._embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self._use_pos_embedding = False
if positional_embedding is not None:
if callable(positional_embedding):
self._pos_embedding_fn = positional_embedding
else:
self._pos_embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(positional_embedding, ids))
self._use_pos_embedding = True
self._start_tokens = ops.convert_to_tensor(
start_tokens, dtype=dtypes.int32, name="start_tokens")
self._end_token = ops.convert_to_tensor(
end_token, dtype=dtypes.int32, name="end_token")
if self._start_tokens.get_shape().ndims != 1:
raise ValueError("start_tokens must be a vector")
self._batch_size = array_ops.size(start_tokens)
if self._end_token.get_shape().ndims != 0:
raise ValueError("end_token must be a scalar")
self._start_inputs = self._embedding_fn(self._start_tokens)
if self._use_pos_embedding:
# change dtype for mixed precision
self._start_inputs += self._pos_embedding_fn(ops.convert_to_tensor(0))
@property
def batch_size(self):
return self._batch_size
@property
def sample_ids_shape(self):
return tensor_shape.TensorShape([])
@property
def sample_ids_dtype(self):
return dtypes.int32
def initialize(self, name=None):
finished = array_ops.tile([False], [self._batch_size])
return (finished, self._start_inputs)
def sample(self, time, outputs, state, name=None):
"""sample for GreedyEmbeddingHelper."""
del time, state # unused by sample_fn
# Outputs are logits, use argmax to get the most probable id
if not isinstance(outputs, ops.Tensor):
raise TypeError("Expected outputs to be a single Tensor, got: %s" %
type(outputs))
sample_ids = math_ops.argmax(outputs, axis=-1, output_type=dtypes.int32)
return sample_ids
def next_inputs(self, time, outputs, state, sample_ids, name=None):
"""next_inputs_fn for GreedyEmbeddingHelper."""
del outputs # unused by next_inputs_fn
finished = math_ops.equal(sample_ids, self._end_token)
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished,
# If we're finished, the next_inputs value doesn't matter
lambda: self._start_inputs,
lambda: self._embedding_fn(sample_ids))
if self._use_pos_embedding:
next_inputs += self._pos_embedding_fn(ops.convert_to_tensor(time))
return (finished, next_inputs, state)
class SampleEmbeddingHelper(GreedyEmbeddingHelper):
"""A helper for use during inference.
Uses sampling (from a distribution) instead of argmax and passes the
result through an embedding layer to get the next input.
"""
def __init__(self, embedding, start_tokens, end_token,
softmax_temperature=None, seed=None):
"""Initializer.
Args:
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`. The returned tensor
will be passed to the decoder input.
start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.
end_token: `int32` scalar, the token that marks end of decoding.
softmax_temperature: (Optional) `float32` scalar, value to divide the
logits by before computing the softmax. Larger values (above 1.0) result
in more random samples, while smaller values push the sampling
distribution towards the argmax. Must be strictly greater than 0.
Defaults to 1.0.
seed: (Optional) The sampling seed.
Raises:
ValueError: if `start_tokens` is not a 1D tensor or `end_token` is not a
scalar.
"""
super(SampleEmbeddingHelper, self).__init__(
embedding, start_tokens, end_token)
self._softmax_temperature = softmax_temperature
self._seed = seed
def sample(self, time, outputs, state, name=None):
"""sample for SampleEmbeddingHelper."""
del time, state # unused by sample_fn
# Outputs are logits, we sample instead of argmax (greedy).
if not isinstance(outputs, ops.Tensor):
raise TypeError("Expected outputs to be a single Tensor, got: %s" %
type(outputs))
if self._softmax_temperature is None:
logits = outputs
else:
logits = outputs / self._softmax_temperature
sample_id_sampler = categorical.Categorical(logits=logits)
sample_ids = sample_id_sampler.sample(seed=self._seed)
return sample_ids
class InferenceHelper(Helper):
"""A helper to use during inference with a custom sampling function."""
def __init__(self, sample_fn, sample_shape, sample_dtype,
start_inputs, end_fn, next_inputs_fn=None):
"""Initializer.
Args:
sample_fn: A callable that takes `outputs` and emits tensor `sample_ids`.
sample_shape: Either a list of integers, or a 1-D Tensor of type `int32`,
the shape of the each sample in the batch returned by `sample_fn`.
sample_dtype: the dtype of the sample returned by `sample_fn`.
start_inputs: The initial batch of inputs.
end_fn: A callable that takes `sample_ids` and emits a `bool` vector
shaped `[batch_size]` indicating whether each sample is an end token.
next_inputs_fn: (Optional) A callable that takes `sample_ids` and returns
the next batch of inputs. If not provided, `sample_ids` is used as the
next batch of inputs.
"""
self._sample_fn = sample_fn
self._end_fn = end_fn
self._sample_shape = tensor_shape.TensorShape(sample_shape)
self._sample_dtype = sample_dtype
self._next_inputs_fn = next_inputs_fn
self._batch_size = array_ops.shape(start_inputs)[0]
self._start_inputs = ops.convert_to_tensor(
start_inputs, name="start_inputs")
@property
def batch_size(self):
return self._batch_size
@property
def sample_ids_shape(self):
return self._sample_shape
@property
def sample_ids_dtype(self):
return self._sample_dtype
def initialize(self, name=None):
finished = array_ops.tile([False], [self._batch_size])
return (finished, self._start_inputs)
def sample(self, time, outputs, state, name=None):
del time, state # unused by sample
return self._sample_fn(outputs)
def next_inputs(self, time, outputs, state, sample_ids, name=None):
del time, outputs # unused by next_inputs
if self._next_inputs_fn is None:
next_inputs = sample_ids
else:
next_inputs = self._next_inputs_fn(sample_ids)
finished = self._end_fn(sample_ids)
return (finished, next_inputs, state)
| OpenSeq2Seq-master | open_seq2seq/parts/rnns/helper.py |
# pylint: skip-file
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A powerful dynamic attention wrapper object.
Modified by blisc to add support for LocationSensitiveAttention and changed
the AttentionWrapper class to output both the cell_output and attention context
concatenated together.
New classes:
LocationSensitiveAttention
LocationLayer
New functions:
_bahdanau_score_with_location
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from six.moves import range
import collections
import functools
import math
import numpy as np
from tensorflow.contrib.framework.python.framework import tensor_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base as layers_base
from tensorflow.python.layers import core as layers_core
from tensorflow.python.layers.convolutional import Conv1D
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
__all__ = [
"AttentionMechanism", "AttentionWrapper", "AttentionWrapperState",
"LuongAttention", "BahdanauAttention", "hardmax", "safe_cumprod",
"monotonic_attention", "BahdanauMonotonicAttention",
"LuongMonotonicAttention", "LocationSensitiveAttention"
]
_zero_state_tensors = rnn_cell_impl._zero_state_tensors # pylint: disable=protected-access
class AttentionMechanism(object):
@property
def alignments_size(self):
raise NotImplementedError
@property
def state_size(self):
raise NotImplementedError
def _prepare_memory(memory, memory_sequence_length, check_inner_dims_defined):
"""Convert to tensor and possibly mask `memory`.
Args:
memory: `Tensor`, shaped `[batch_size, max_time, ...]`.
memory_sequence_length: `int32` `Tensor`, shaped `[batch_size]`.
check_inner_dims_defined: Python boolean. If `True`, the `memory`
argument's shape is checked to ensure all but the two outermost
dimensions are fully defined.
Returns:
A (possibly masked), checked, new `memory`.
Raises:
ValueError: If `check_inner_dims_defined` is `True` and not
`memory.shape[2:].is_fully_defined()`.
"""
memory = nest.map_structure(
lambda m: ops.convert_to_tensor(m, name="memory"), memory
)
if memory_sequence_length is not None:
memory_sequence_length = ops.convert_to_tensor(
memory_sequence_length, name="memory_sequence_length"
)
if check_inner_dims_defined:
def _check_dims(m):
if not m.get_shape()[2:].is_fully_defined():
raise ValueError(
"Expected memory %s to have fully defined inner dims, "
"but saw shape: %s" % (m.name, m.get_shape())
)
nest.map_structure(_check_dims, memory)
if memory_sequence_length is None:
seq_len_mask = None
else:
seq_len_mask = array_ops.sequence_mask(
memory_sequence_length,
maxlen=array_ops.shape(nest.flatten(memory)[0])[1],
dtype=nest.flatten(memory)[0].dtype
)
seq_len_batch_size = (
memory_sequence_length.shape[0].value or
array_ops.shape(memory_sequence_length)[0]
)
def _maybe_mask(m, seq_len_mask):
rank = m.get_shape().ndims
rank = rank if rank is not None else array_ops.rank(m)
extra_ones = array_ops.ones(rank - 2, dtype=dtypes.int32)
m_batch_size = m.shape[0].value or array_ops.shape(m)[0]
if memory_sequence_length is not None:
message = (
"memory_sequence_length and memory tensor batch sizes do not "
"match."
)
with ops.control_dependencies(
[
check_ops.assert_equal(
seq_len_batch_size, m_batch_size, message=message
)
]
):
seq_len_mask = array_ops.reshape(
seq_len_mask,
array_ops.concat((array_ops.shape(seq_len_mask), extra_ones), 0)
)
return m * seq_len_mask
else:
return m
return nest.map_structure(lambda m: _maybe_mask(m, seq_len_mask), memory)
def _maybe_mask_score(score, memory_sequence_length, score_mask_value):
if memory_sequence_length is None:
return score
message = ("All values in memory_sequence_length must greater than zero.")
with ops.control_dependencies(
[check_ops.assert_positive(memory_sequence_length, message=message)]
):
score_mask = array_ops.sequence_mask(
memory_sequence_length, maxlen=array_ops.shape(score)[1]
)
score_mask_values = score_mask_value * array_ops.ones_like(score)
return array_ops.where(score_mask, score, score_mask_values)
class _BaseAttentionMechanism(AttentionMechanism):
"""A base AttentionMechanism class providing common functionality.
Common functionality includes:
1. Storing the query and memory layers.
2. Preprocessing and storing the memory.
"""
def __init__(
self,
query_layer,
memory,
probability_fn,
memory_sequence_length=None,
memory_layer=None,
check_inner_dims_defined=True,
score_mask_value=None,
name=None
):
"""Construct base AttentionMechanism class.
Args:
query_layer: Callable. Instance of `tf.layers.Layer`. The layer's depth
must match the depth of `memory_layer`. If `query_layer` is not
provided, the shape of `query` must match that of `memory_layer`.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
probability_fn: A `callable`. Converts the score and previous alignments
to probabilities. Its signature should be:
`probabilities = probability_fn(score, state)`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
memory_layer: Instance of `tf.layers.Layer` (may be None). The layer's
depth must match the depth of `query_layer`.
If `memory_layer` is not provided, the shape of `memory` must match
that of `query_layer`.
check_inner_dims_defined: Python boolean. If `True`, the `memory`
argument's shape is checked to ensure all but the two outermost
dimensions are fully defined.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
name: Name to use when creating ops.
"""
if (
query_layer is not None and
not isinstance(query_layer, layers_base.Layer)
):
raise TypeError(
"query_layer is not a Layer: %s" % type(query_layer).__name__
)
if (
memory_layer is not None and
not isinstance(memory_layer, layers_base.Layer)
):
raise TypeError(
"memory_layer is not a Layer: %s" % type(memory_layer).__name__
)
self._query_layer = query_layer
self._memory_layer = memory_layer
self.dtype = memory_layer.dtype
if not callable(probability_fn):
raise TypeError(
"probability_fn must be callable, saw type: %s" %
type(probability_fn).__name__
)
if score_mask_value is None:
score_mask_value = dtypes.as_dtype(self._memory_layer.dtype
).as_numpy_dtype(-np.inf)
self._probability_fn = lambda score, prev: ( # pylint:disable=g-long-lambda
probability_fn(
_maybe_mask_score(score, memory_sequence_length, score_mask_value),
prev))
with ops.name_scope(
name, "BaseAttentionMechanismInit", nest.flatten(memory)
):
self._values = _prepare_memory(
memory,
memory_sequence_length,
check_inner_dims_defined=check_inner_dims_defined
)
self._keys = (
self.memory_layer(self._values) if self.memory_layer # pylint: disable=not-callable
else self._values
)
self._batch_size = (
self._keys.shape[0].value or array_ops.shape(self._keys)[0]
)
self._alignments_size = (
self._keys.shape[1].value or array_ops.shape(self._keys)[1]
)
@property
def memory_layer(self):
return self._memory_layer
@property
def query_layer(self):
return self._query_layer
@property
def values(self):
return self._values
@property
def keys(self):
return self._keys
@property
def batch_size(self):
return self._batch_size
@property
def alignments_size(self):
return self._alignments_size
@property
def state_size(self):
return self._alignments_size
def initial_alignments(self, batch_size, dtype):
"""Creates the initial alignment values for the `AttentionWrapper` class.
This is important for AttentionMechanisms that use the previous alignment
to calculate the alignment at the next time step (e.g. monotonic attention).
The default behavior is to return a tensor of all zeros.
Args:
batch_size: `int32` scalar, the batch_size.
dtype: The `dtype`.
Returns:
A `dtype` tensor shaped `[batch_size, alignments_size]`
(`alignments_size` is the values' `max_time`).
"""
max_time = self._alignments_size
return _zero_state_tensors(max_time, batch_size, dtype)
def initial_state(self, batch_size, dtype):
"""Creates the initial state values for the `AttentionWrapper` class.
This is important for AttentionMechanisms that use the previous alignment
to calculate the alignment at the next time step (e.g. monotonic attention).
The default behavior is to return the same output as initial_alignments.
Args:
batch_size: `int32` scalar, the batch_size.
dtype: The `dtype`.
Returns:
A structure of all-zero tensors with shapes as described by `state_size`.
"""
return self.initial_alignments(batch_size, dtype)
def _luong_score(query, keys, scale):
"""Implements Luong-style (multiplicative) scoring function.
This attention has two forms. The first is standard Luong attention,
as described in:
Minh-Thang Luong, Hieu Pham, Christopher D. Manning.
"Effective Approaches to Attention-based Neural Machine Translation."
EMNLP 2015. https://arxiv.org/abs/1508.04025
The second is the scaled form inspired partly by the normalized form of
Bahdanau attention.
To enable the second form, call this function with `scale=True`.
Args:
query: Tensor, shape `[batch_size, num_units]` to compare to keys.
keys: Processed memory, shape `[batch_size, max_time, num_units]`.
scale: Whether to apply a scale to the score function.
Returns:
A `[batch_size, max_time]` tensor of unnormalized score values.
Raises:
ValueError: If `key` and `query` depths do not match.
"""
depth = query.get_shape()[-1]
key_units = keys.get_shape()[-1]
if depth != key_units:
raise ValueError(
"Incompatible or unknown inner dimensions between query and keys. "
"Query (%s) has units: %s. Keys (%s) have units: %s. "
"Perhaps you need to set num_units to the keys' dimension (%s)?" %
(query, depth, keys, key_units, key_units)
)
dtype = query.dtype
# Reshape from [batch_size, depth] to [batch_size, 1, depth]
# for matmul.
query = array_ops.expand_dims(query, 1)
# Inner product along the query units dimension.
# matmul shapes: query is [batch_size, 1, depth] and
# keys is [batch_size, max_time, depth].
# the inner product is asked to **transpose keys' inner shape** to get a
# batched matmul on:
# [batch_size, 1, depth] . [batch_size, depth, max_time]
# resulting in an output shape of:
# [batch_size, 1, max_time].
# we then squeeze out the center singleton dimension.
score = math_ops.matmul(query, keys, transpose_b=True)
score = array_ops.squeeze(score, [1])
if scale:
# Scalar used in weight scaling
g = variable_scope.get_variable("attention_g", dtype=dtype, initializer=1.)
score = g * score
return score
class LuongAttention(_BaseAttentionMechanism):
"""Implements Luong-style (multiplicative) attention scoring.
This attention has two forms. The first is standard Luong attention,
as described in:
Minh-Thang Luong, Hieu Pham, Christopher D. Manning.
"Effective Approaches to Attention-based Neural Machine Translation."
EMNLP 2015. https://arxiv.org/abs/1508.04025
The second is the scaled form inspired partly by the normalized form of
Bahdanau attention.
To enable the second form, construct the object with parameter
`scale=True`.
"""
def __init__(
self,
num_units,
memory,
memory_sequence_length=None,
scale=False,
probability_fn=None,
score_mask_value=None,
dtype=None,
name="LuongAttention"
):
"""Construct the AttentionMechanism mechanism.
Args:
num_units: The depth of the attention mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length: (optional) Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
scale: Python boolean. Whether to scale the energy term.
probability_fn: (optional) A `callable`. Converts the score to
probabilities. The default is @{tf.nn.softmax}. Other options include
@{tf.contrib.seq2seq.hardmax} and @{tf.contrib.sparsemax.sparsemax}.
Its signature should be: `probabilities = probability_fn(score)`.
score_mask_value: (optional) The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
dtype: The data type for the memory layer of the attention mechanism.
name: Name to use when creating ops.
"""
# For LuongAttention, we only transform the memory layer; thus
# num_units **must** match expected the query depth.
if probability_fn is None:
probability_fn = nn_ops.softmax
if dtype is None:
dtype = dtypes.float32
wrapped_probability_fn = lambda score, _: probability_fn(score)
super(LuongAttention, self).__init__(
query_layer=None,
memory_layer=layers_core.Dense(
num_units, name="memory_layer", use_bias=False, dtype=dtype
),
memory=memory,
probability_fn=wrapped_probability_fn,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value,
name=name
)
self._num_units = num_units
self._scale = scale
self._name = name
def __call__(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape
`[batch_size, query_depth]`.
state: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]`
(`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
with variable_scope.variable_scope(None, "luong_attention", [query]):
score = _luong_score(query, self._keys, self._scale)
alignments = self._probability_fn(score, state)
next_state = alignments
return alignments, next_state
def _bahdanau_score(processed_query, keys, normalize):
"""Implements Bahdanau-style (additive) scoring function.
This attention has two forms. The first is Bhandanau attention,
as described in:
Dzmitry Bahdanau, Kyunghyun Cho, Yoshua Bengio.
"Neural Machine Translation by Jointly Learning to Align and Translate."
ICLR 2015. https://arxiv.org/abs/1409.0473
The second is the normalized form. This form is inspired by the
weight normalization article:
Tim Salimans, Diederik P. Kingma.
"Weight Normalization: A Simple Reparameterization to Accelerate
Training of Deep Neural Networks."
https://arxiv.org/abs/1602.07868
To enable the second form, set `normalize=True`.
Args:
processed_query: Tensor, shape `[batch_size, num_units]` to compare to keys.
keys: Processed memory, shape `[batch_size, max_time, num_units]`.
normalize: Whether to normalize the score function.
Returns:
A `[batch_size, max_time]` tensor of unnormalized score values.
"""
dtype = processed_query.dtype
# Get the number of hidden units from the trailing dimension of keys
num_units = keys.shape[2].value or array_ops.shape(keys)[2]
# Reshape from [batch_size, ...] to [batch_size, 1, ...] for broadcasting.
processed_query = array_ops.expand_dims(processed_query, 1)
v = variable_scope.get_variable("attention_v", [num_units], dtype=dtype)
if normalize:
# Scalar used in weight normalization
g = variable_scope.get_variable(
"attention_g",
dtype=dtype,
shape=[1],
# initializer=math.sqrt((1. / num_units)))
initializer=init_ops.constant_initializer(
math.sqrt(1. / num_units), dtype=dtype
)
)
# Bias added prior to the nonlinearity
b = variable_scope.get_variable(
"attention_b", [num_units],
dtype=dtype,
initializer=init_ops.zeros_initializer()
)
# normed_v = g * v / ||v||
normed_v = g * v * math_ops.rsqrt(math_ops.reduce_sum(math_ops.square(v)))
return math_ops.reduce_sum(
normed_v * math_ops.tanh(keys + processed_query + b), [2]
)
else:
return math_ops.reduce_sum(v * math_ops.tanh(keys + processed_query), [2])
class BahdanauAttention(_BaseAttentionMechanism):
"""Implements Bahdanau-style (additive) attention.
This attention has two forms. The first is Bahdanau attention,
as described in:
Dzmitry Bahdanau, Kyunghyun Cho, Yoshua Bengio.
"Neural Machine Translation by Jointly Learning to Align and Translate."
ICLR 2015. https://arxiv.org/abs/1409.0473
The second is the normalized form. This form is inspired by the
weight normalization article:
Tim Salimans, Diederik P. Kingma.
"Weight Normalization: A Simple Reparameterization to Accelerate
Training of Deep Neural Networks."
https://arxiv.org/abs/1602.07868
To enable the second form, construct the object with parameter
`normalize=True`.
"""
def __init__(
self,
num_units,
memory,
memory_sequence_length=None,
normalize=False,
probability_fn=None,
score_mask_value=None,
dtype=None,
name="BahdanauAttention"
):
"""Construct the Attention mechanism.
Args:
num_units: The depth of the query mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
normalize: Python boolean. Whether to normalize the energy term.
probability_fn: (optional) A `callable`. Converts the score to
probabilities. The default is @{tf.nn.softmax}. Other options include
@{tf.contrib.seq2seq.hardmax} and @{tf.contrib.sparsemax.sparsemax}.
Its signature should be: `probabilities = probability_fn(score)`.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
dtype: The data type for the query and memory layers of the attention
mechanism.
name: Name to use when creating ops.
"""
if probability_fn is None:
probability_fn = nn_ops.softmax
if dtype is None:
dtype = dtypes.float32
wrapped_probability_fn = lambda score, _: probability_fn(score)
super(BahdanauAttention, self).__init__(
query_layer=layers_core.Dense(
num_units, name="query_layer", use_bias=False, dtype=dtype
),
memory_layer=layers_core.Dense(
num_units, name="memory_layer", use_bias=False, dtype=dtype
),
memory=memory,
probability_fn=wrapped_probability_fn,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value,
name=name
)
self._num_units = num_units
self._normalize = normalize
self._name = name
def __call__(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape
`[batch_size, query_depth]`.
state: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]`
(`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
with variable_scope.variable_scope(None, "bahdanau_attention", [query]):
processed_query = self.query_layer(query) if self.query_layer else query
score = _bahdanau_score(processed_query, self._keys, self._normalize)
alignments = self._probability_fn(score, state)
next_state = alignments
return alignments, next_state
def _bahdanau_score_with_location(processed_query, keys, location, use_bias):
"""Implements Bahdanau-style (additive) scoring function with location
information.
The implementation is described in
Jan Chorowski, Dzmitry Bahdanau, Dmitriy Serdyuk, KyungHyun Cho, Yoshua Bengio
"Attention-Based Models for Speech Recognition"
https://arxiv.org/abs/1506.07503
Args:
processed_query: Tensor, shape `[batch_size, num_units]` to compare to keys.
keys: Processed memory, shape `[batch_size, max_time, num_units]`.
location: Tensor, shape `[batch_size, max_time, num_units]`
use_bias (bool): Whether to use a bias when computing alignments
Returns:
A `[batch_size, max_time]` tensor of unnormalized score values.
"""
dtype = processed_query.dtype
# Get the number of hidden units from the trailing dimension of keys
num_units = keys.shape[2].value or array_ops.shape(keys)[2]
# Reshape from [batch_size, ...] to [batch_size, 1, ...] for broadcasting.
processed_query = array_ops.expand_dims(processed_query, 1)
v = variable_scope.get_variable("attention_v", [num_units], dtype=dtype)
if use_bias:
b = variable_scope.get_variable("attention_bias", [num_units], dtype=dtype)
return math_ops.reduce_sum(
v * math_ops.tanh(keys + processed_query + location + b), [2]
)
return math_ops.reduce_sum(
v * math_ops.tanh(keys + processed_query + location), [2]
)
class ChorowskiLocationLayer(layers_base.Layer):
"""
The layer that processed the location information
"""
def __init__(
self,
filters,
kernel_size,
attention_units,
strides=1,
data_format="channels_last",
name="location",
dtype=None,
**kwargs
):
super(ChorowskiLocationLayer, self).__init__(name=name, **kwargs)
self.conv_layer = Conv1D(
name="{}_conv".format(name),
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding="SAME",
use_bias=True,
data_format=data_format,
)
self.location_dense = Conv1D(
name="{}_dense".format(name),
filters=attention_units,
kernel_size=1,
strides=strides,
padding="SAME",
use_bias=False,
data_format=data_format,
)
def __call__(self, prev_attention, query=None):
location_attention = self.conv_layer(prev_attention)
location_attention = self.location_dense(location_attention)
return location_attention
class ZhaopengLocationLayer(layers_base.Layer):
"""
The layer that processed the location information.
Similar to https://arxiv.org/abs/1805.03294 and https://arxiv.org/abs/1601.04811.
"""
def __init__(
self,
attention_units,
query_dim,
name="location",
dtype=None,
**kwargs
):
super(ZhaopengLocationLayer, self).__init__(name=name, **kwargs)
self.vbeta = variable_scope.get_variable(
"location_attention_vbeta", [query_dim], dtype=dtypes.float32)
self.location_dense = layers_core.Dense(
name="{}_dense".format(name), units=attention_units, use_bias=False
)
def __call__(self, prev_attention, query):
# To-Do add mixed precision support.
#query = math_ops.cast(query, dtypes.float32)
fertility = math_ops.sigmoid(math_ops.reduce_sum(
math_ops.multiply(self.vbeta, query)))
location_attention = fertility * prev_attention
location_attention = self.location_dense(location_attention)
return location_attention
class LocationSensitiveAttention(_BaseAttentionMechanism):
"""Implements Bahdanau-style (additive) scoring function with cumulative
location information.
The implementation is described in:
Jan Chorowski, Dzmitry Bahdanau, Dmitriy Serdyuk, KyungHyun Cho, Yoshua Bengio
"Attention-Based Models for Speech Recognition"
https://arxiv.org/abs/1506.07503
Jonathan Shen, Ruoming Pang, Ron J. Weiss, Mike Schuster, Navdeep Jaitly,
Zongheng Yang, Zhifeng Chen, Yu Zhang, Yuxuan Wang, RJ Skerry-Ryan,
Rif A. Saurous, Yannis Agiomyrgiannakis, Yonghui Wu
"Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions"
https://arxiv.org/abs/1712.05884
"""
def __init__(
self,
num_units,
memory,
query_dim=None,
memory_sequence_length=None,
probability_fn=None,
score_mask_value=None,
dtype=None,
use_bias=False,
use_coverage=True,
location_attn_type="chorowski",
location_attention_params=None,
name="LocationSensitiveAttention",
):
"""Construct the Attention mechanism.
Args:
num_units: The depth of the query mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
normalize: Python boolean. Whether to normalize the energy term.
probability_fn: (optional) A `callable`. Converts the score to
probabilities. The default is @{tf.nn.softmax}. Other options include
@{tf.contrib.seq2seq.hardmax} and @{tf.contrib.sparsemax.sparsemax}.
Its signature should be: `probabilities = probability_fn(score)`.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
dtype: The data type for the query and memory layers of the attention
mechanism.
use_bias (bool): Whether to use a bias when computing alignments.
location_attn_type (String): Accepts ["chorowski", "zhaopeng"].
location_attention_params (dict): Params required for location attention.
name: Name to use when creating ops.
"""
if probability_fn is None:
probability_fn = nn_ops.softmax
if dtype is None:
dtype = dtypes.float32
wrapped_probability_fn = lambda score, _: probability_fn(score)
super(LocationSensitiveAttention, self).__init__(
query_layer=layers_core.Dense(
num_units, name="query_layer", use_bias=False, dtype=dtype
),
memory_layer = Conv1D(
name="memory_layer".format(name),
filters=num_units,
kernel_size=1,
strides=1,
padding="SAME",
use_bias=False,
data_format="channels_last",
dtype=dtype
),
memory=memory,
probability_fn=wrapped_probability_fn,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value,
name=name
)
self._num_units = num_units
self._name = name
self.use_bias = use_bias
self._use_coverage = use_coverage
if location_attn_type == "chorowski":
kernel_size = 32
filters = 32
if location_attention_params is not None:
kernel_size = location_attention_params["kernel_size"]
filters = location_attention_params["filters"]
self.location_layer = ChorowskiLocationLayer(
filters, kernel_size, num_units)
elif location_attn_type == "zhaopeng":
self.location_layer = ZhaopengLocationLayer(num_units, query_dim)
self._use_coverage = True
def __call__(self, query, state):
"""Score the query based on the keys, values, and location.
Args:
query: Tensor of dtype matching `self.values` and shape
`[batch_size, query_depth]`.
state: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]`
(`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
with variable_scope.variable_scope(None, "location_attention", [query]):
processed_query = self.query_layer(query) if self.query_layer else query
location = array_ops.expand_dims(state, axis=-1)
processed_location = self.location_layer(location, query)
score = _bahdanau_score_with_location(
processed_query, self._keys, processed_location, self.use_bias
)
alignments = self._probability_fn(score, state)
if self._use_coverage:
next_state = alignments + state
else:
next_state = alignments
return alignments, next_state
def safe_cumprod(x, *args, **kwargs):
"""Computes cumprod of x in logspace using cumsum to avoid underflow.
The cumprod function and its gradient can result in numerical instabilities
when its argument has very small and/or zero values. As long as the argument
is all positive, we can instead compute the cumulative product as
exp(cumsum(log(x))). This function can be called identically to tf.cumprod.
Args:
x: Tensor to take the cumulative product of.
*args: Passed on to cumsum; these are identical to those in cumprod.
**kwargs: Passed on to cumsum; these are identical to those in cumprod.
Returns:
Cumulative product of x.
"""
with ops.name_scope(None, "SafeCumprod", [x]):
x = ops.convert_to_tensor(x, name="x")
tiny = np.finfo(x.dtype.as_numpy_dtype).tiny
return math_ops.exp(
math_ops.cumsum(
math_ops.log(clip_ops.clip_by_value(x, tiny, 1)), *args, **kwargs
)
)
def monotonic_attention(p_choose_i, previous_attention, mode):
"""Compute monotonic attention distribution from choosing probabilities.
Monotonic attention implies that the input sequence is processed in an
explicitly left-to-right manner when generating the output sequence. In
addition, once an input sequence element is attended to at a given output
timestep, elements occurring before it cannot be attended to at subsequent
output timesteps. This function generates attention distributions according
to these assumptions. For more information, see ``Online and Linear-Time
Attention by Enforcing Monotonic Alignments''.
Args:
p_choose_i: Probability of choosing input sequence/memory element i. Should
be of shape (batch_size, input_sequence_length), and should all be in the
range [0, 1].
previous_attention: The attention distribution from the previous output
timestep. Should be of shape (batch_size, input_sequence_length). For
the first output timestep, preevious_attention[n] should be [1, 0, 0, ...,
0] for all n in [0, ... batch_size - 1].
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'.
* 'recursive' uses tf.scan to recursively compute the distribution.
This is slowest but is exact, general, and does not suffer from
numerical instabilities.
* 'parallel' uses parallelized cumulative-sum and cumulative-product
operations to compute a closed-form solution to the recurrence
relation defining the attention distribution. This makes it more
efficient than 'recursive', but it requires numerical checks which
make the distribution non-exact. This can be a problem in particular
when input_sequence_length is long and/or p_choose_i has entries very
close to 0 or 1.
* 'hard' requires that the probabilities in p_choose_i are all either 0
or 1, and subsequently uses a more efficient and exact solution.
Returns:
A tensor of shape (batch_size, input_sequence_length) representing the
attention distributions for each sequence in the batch.
Raises:
ValueError: mode is not one of 'recursive', 'parallel', 'hard'.
"""
# Force things to be tensors
p_choose_i = ops.convert_to_tensor(p_choose_i, name="p_choose_i")
previous_attention = ops.convert_to_tensor(
previous_attention, name="previous_attention"
)
if mode == "recursive":
# Use .shape[0].value when it's not None, or fall back on symbolic shape
batch_size = p_choose_i.shape[0].value or array_ops.shape(p_choose_i)[0]
# Compute [1, 1 - p_choose_i[0], 1 - p_choose_i[1], ..., 1 - p_choose_i[-2]]
shifted_1mp_choose_i = array_ops.concat(
[array_ops.ones((batch_size, 1)), 1 - p_choose_i[:, :-1]], 1
)
# Compute attention distribution recursively as
# q[i] = (1 - p_choose_i[i])*q[i - 1] + previous_attention[i]
# attention[i] = p_choose_i[i]*q[i]
attention = p_choose_i * array_ops.transpose(
functional_ops.scan(
# Need to use reshape to remind TF of the shape between loop
# iterations
lambda x, yz: array_ops.reshape(yz[0] * x + yz[1], (batch_size,)),
# Loop variables yz[0] and yz[1]
[
array_ops.transpose(shifted_1mp_choose_i),
array_ops.transpose(previous_attention)
],
# Initial value of x is just zeros
array_ops.zeros((batch_size,))
)
)
elif mode == "parallel":
# safe_cumprod computes cumprod in logspace with numeric checks
cumprod_1mp_choose_i = safe_cumprod(1 - p_choose_i, axis=1, exclusive=True)
# Compute recurrence relation solution
attention = p_choose_i * cumprod_1mp_choose_i * math_ops.cumsum(
previous_attention /
# Clip cumprod_1mp to avoid divide-by-zero
clip_ops.clip_by_value(cumprod_1mp_choose_i, 1e-10, 1.),
axis=1
)
elif mode == "hard":
# Remove any probabilities before the index chosen last time step
p_choose_i *= math_ops.cumsum(previous_attention, axis=1)
# Now, use exclusive cumprod to remove probabilities after the first
# chosen index, like so:
# p_choose_i = [0, 0, 0, 1, 1, 0, 1, 1]
# cumprod(1 - p_choose_i, exclusive=True) = [1, 1, 1, 1, 0, 0, 0, 0]
# Product of above: [0, 0, 0, 1, 0, 0, 0, 0]
attention = p_choose_i * math_ops.cumprod(
1 - p_choose_i, axis=1, exclusive=True
)
else:
raise ValueError("mode must be 'recursive', 'parallel', or 'hard'.")
return attention
def _monotonic_probability_fn(
score, previous_alignments, sigmoid_noise, mode, seed=None
):
"""Attention probability function for monotonic attention.
Takes in unnormalized attention scores, adds pre-sigmoid noise to encourage
the model to make discrete attention decisions, passes them through a sigmoid
to obtain "choosing" probabilities, and then calls monotonic_attention to
obtain the attention distribution. For more information, see
Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck,
"Online and Linear-Time Attention by Enforcing Monotonic Alignments."
ICML 2017. https://arxiv.org/abs/1704.00784
Args:
score: Unnormalized attention scores, shape `[batch_size, alignments_size]`
previous_alignments: Previous attention distribution, shape
`[batch_size, alignments_size]`
sigmoid_noise: Standard deviation of pre-sigmoid noise. Setting this larger
than 0 will encourage the model to produce large attention scores,
effectively making the choosing probabilities discrete and the resulting
attention distribution one-hot. It should be set to 0 at test-time, and
when hard attention is not desired.
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'. See the docstring for
`tf.contrib.seq2seq.monotonic_attention` for more information.
seed: (optional) Random seed for pre-sigmoid noise.
Returns:
A `[batch_size, alignments_size]`-shape tensor corresponding to the
resulting attention distribution.
"""
# Optionally add pre-sigmoid noise to the scores
if sigmoid_noise > 0:
noise = random_ops.random_normal(
array_ops.shape(score), dtype=score.dtype, seed=seed
)
score += sigmoid_noise * noise
# Compute "choosing" probabilities from the attention scores
if mode == "hard":
# When mode is hard, use a hard sigmoid
p_choose_i = math_ops.cast(score > 0, score.dtype)
else:
p_choose_i = math_ops.sigmoid(score)
# Convert from choosing probabilities to attention distribution
return monotonic_attention(p_choose_i, previous_alignments, mode)
class _BaseMonotonicAttentionMechanism(_BaseAttentionMechanism):
"""Base attention mechanism for monotonic attention.
Simply overrides the initial_alignments function to provide a dirac
distribution,which is needed in order for the monotonic attention
distributions to have the correct behavior.
"""
def initial_alignments(self, batch_size, dtype):
"""Creates the initial alignment values for the monotonic attentions.
Initializes to dirac distributions, i.e. [1, 0, 0, ...memory length..., 0]
for all entries in the batch.
Args:
batch_size: `int32` scalar, the batch_size.
dtype: The `dtype`.
Returns:
A `dtype` tensor shaped `[batch_size, alignments_size]`
(`alignments_size` is the values' `max_time`).
"""
max_time = self._alignments_size
return array_ops.one_hot(
array_ops.zeros((batch_size,), dtype=dtypes.int32),
max_time,
dtype=dtype
)
class BahdanauMonotonicAttention(_BaseMonotonicAttentionMechanism):
"""Monotonic attention mechanism with Bahadanau-style energy function.
This type of attention encorces a monotonic constraint on the attention
distributions; that is once the model attends to a given point in the memory
it can't attend to any prior points at subsequence output timesteps. It
achieves this by using the _monotonic_probability_fn instead of softmax to
construct its attention distributions. Since the attention scores are passed
through a sigmoid, a learnable scalar bias parameter is applied after the
score function and before the sigmoid. Otherwise, it is equivalent to
BahdanauAttention. This approach is proposed in
Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck,
"Online and Linear-Time Attention by Enforcing Monotonic Alignments."
ICML 2017. https://arxiv.org/abs/1704.00784
"""
def __init__(
self,
num_units,
memory,
memory_sequence_length=None,
normalize=False,
score_mask_value=None,
sigmoid_noise=0.,
sigmoid_noise_seed=None,
score_bias_init=0.,
mode="parallel",
dtype=None,
name="BahdanauMonotonicAttention"
):
"""Construct the Attention mechanism.
Args:
num_units: The depth of the query mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
normalize: Python boolean. Whether to normalize the energy term.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
sigmoid_noise: Standard deviation of pre-sigmoid noise. See the docstring
for `_monotonic_probability_fn` for more information.
sigmoid_noise_seed: (optional) Random seed for pre-sigmoid noise.
score_bias_init: Initial value for score bias scalar. It's recommended to
initialize this to a negative value when the length of the memory is
large.
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'. See the docstring for
`tf.contrib.seq2seq.monotonic_attention` for more information.
dtype: The data type for the query and memory layers of the attention
mechanism.
name: Name to use when creating ops.
"""
# Set up the monotonic probability fn with supplied parameters
if dtype is None:
dtype = dtypes.float32
wrapped_probability_fn = functools.partial(
_monotonic_probability_fn,
sigmoid_noise=sigmoid_noise,
mode=mode,
seed=sigmoid_noise_seed
)
super(BahdanauMonotonicAttention, self).__init__(
query_layer=layers_core.Dense(
num_units, name="query_layer", use_bias=False, dtype=dtype
),
memory_layer=layers_core.Dense(
num_units, name="memory_layer", use_bias=False, dtype=dtype
),
memory=memory,
probability_fn=wrapped_probability_fn,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value,
name=name
)
self._num_units = num_units
self._normalize = normalize
self._name = name
self._score_bias_init = score_bias_init
def __call__(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape
`[batch_size, query_depth]`.
state: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]`
(`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
with variable_scope.variable_scope(
None, "bahdanau_monotonic_attention", [query]
):
processed_query = self.query_layer(query) if self.query_layer else query
score = _bahdanau_score(processed_query, self._keys, self._normalize)
score_bias = variable_scope.get_variable(
"attention_score_bias",
dtype=processed_query.dtype,
initializer=self._score_bias_init
)
score += score_bias
alignments = self._probability_fn(score, state)
next_state = alignments
return alignments, next_state
class LuongMonotonicAttention(_BaseMonotonicAttentionMechanism):
"""Monotonic attention mechanism with Luong-style energy function.
This type of attention encorces a monotonic constraint on the attention
distributions; that is once the model attends to a given point in the memory
it can't attend to any prior points at subsequence output timesteps. It
achieves this by using the _monotonic_probability_fn instead of softmax to
construct its attention distributions. Otherwise, it is equivalent to
LuongAttention. This approach is proposed in
Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck,
"Online and Linear-Time Attention by Enforcing Monotonic Alignments."
ICML 2017. https://arxiv.org/abs/1704.00784
"""
def __init__(
self,
num_units,
memory,
memory_sequence_length=None,
scale=False,
score_mask_value=None,
sigmoid_noise=0.,
sigmoid_noise_seed=None,
score_bias_init=0.,
mode="parallel",
dtype=None,
name="LuongMonotonicAttention"
):
"""Construct the Attention mechanism.
Args:
num_units: The depth of the query mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
scale: Python boolean. Whether to scale the energy term.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
sigmoid_noise: Standard deviation of pre-sigmoid noise. See the docstring
for `_monotonic_probability_fn` for more information.
sigmoid_noise_seed: (optional) Random seed for pre-sigmoid noise.
score_bias_init: Initial value for score bias scalar. It's recommended to
initialize this to a negative value when the length of the memory is
large.
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'. See the docstring for
`tf.contrib.seq2seq.monotonic_attention` for more information.
dtype: The data type for the query and memory layers of the attention
mechanism.
name: Name to use when creating ops.
"""
# Set up the monotonic probability fn with supplied parameters
if dtype is None:
dtype = dtypes.float32
wrapped_probability_fn = functools.partial(
_monotonic_probability_fn,
sigmoid_noise=sigmoid_noise,
mode=mode,
seed=sigmoid_noise_seed
)
super(LuongMonotonicAttention, self).__init__(
query_layer=None,
memory_layer=layers_core.Dense(
num_units, name="memory_layer", use_bias=False, dtype=dtype
),
memory=memory,
probability_fn=wrapped_probability_fn,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value,
name=name
)
self._num_units = num_units
self._scale = scale
self._score_bias_init = score_bias_init
self._name = name
def __call__(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape
`[batch_size, query_depth]`.
state: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]`
(`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
with variable_scope.variable_scope(
None, "luong_monotonic_attention", [query]
):
score = _luong_score(query, self._keys, self._scale)
score_bias = variable_scope.get_variable(
"attention_score_bias",
dtype=query.dtype,
initializer=self._score_bias_init
)
score += score_bias
alignments = self._probability_fn(score, state)
next_state = alignments
return alignments, next_state
class AttentionWrapperState(
collections.namedtuple(
"AttentionWrapperState", (
"cell_state", "attention", "time", "alignments",
"alignment_history", "attention_state"
)
)
):
"""`namedtuple` storing the state of a `AttentionWrapper`.
Contains:
- `cell_state`: The state of the wrapped `RNNCell` at the previous time
step.
- `attention`: The attention emitted at the previous time step.
- `time`: int32 scalar containing the current time step.
- `alignments`: A single or tuple of `Tensor`(s) containing the alignments
emitted at the previous time step for each attention mechanism.
- `alignment_history`: (if enabled) a single or tuple of `TensorArray`(s)
containing alignment matrices from all time steps for each attention
mechanism. Call `stack()` on each to convert to a `Tensor`.
- `attention_state`: A single or tuple of nested objects
containing attention mechanism state for each attention mechanism.
The objects may contain Tensors or TensorArrays.
"""
def clone(self, **kwargs):
"""Clone this object, overriding components provided by kwargs.
The new state fields' shape must match original state fields' shape. This
will be validated, and original fields' shape will be propagated to new
fields.
Example:
```python
initial_state = attention_wrapper.zero_state(dtype=..., batch_size=...)
initial_state = initial_state.clone(cell_state=encoder_state)
```
Args:
**kwargs: Any properties of the state object to replace in the returned
`AttentionWrapperState`.
Returns:
A new `AttentionWrapperState` whose properties are the same as
this one, except any overridden properties as provided in `kwargs`.
"""
def with_same_shape(old, new):
"""Check and set new tensor's shape."""
if isinstance(old, ops.Tensor) and isinstance(new, ops.Tensor):
return tensor_util.with_same_shape(old, new)
return new
return nest.map_structure(
with_same_shape, self,
super(AttentionWrapperState, self)._replace(**kwargs)
)
def hardmax(logits, name=None):
"""Returns batched one-hot vectors.
The depth index containing the `1` is that of the maximum logit value.
Args:
logits: A batch tensor of logit values.
name: Name to use when creating ops.
Returns:
A batched one-hot tensor.
"""
with ops.name_scope(name, "Hardmax", [logits]):
logits = ops.convert_to_tensor(logits, name="logits")
if logits.get_shape()[-1].value is not None:
depth = logits.get_shape()[-1].value
else:
depth = array_ops.shape(logits)[-1]
return array_ops.one_hot(
math_ops.argmax(logits, -1), depth, dtype=logits.dtype
)
def _compute_attention(
attention_mechanism, cell_output, attention_state, attention_layer
):
"""Computes the attention and alignments for a given attention_mechanism."""
alignments, next_attention_state = attention_mechanism(
cell_output, state=attention_state
)
# Reshape from [batch_size, memory_time] to [batch_size, 1, memory_time]
expanded_alignments = array_ops.expand_dims(alignments, 1)
# Context is the inner product of alignments and values along the
# memory time dimension.
# alignments shape is
# [batch_size, 1, memory_time]
# attention_mechanism.values shape is
# [batch_size, memory_time, memory_size]
# the batched matmul is over memory_time, so the output shape is
# [batch_size, 1, memory_size].
# we then squeeze out the singleton dim.
context = math_ops.matmul(expanded_alignments, attention_mechanism.values)
context = array_ops.squeeze(context, [1])
if attention_layer is not None:
attention = attention_layer(array_ops.concat([cell_output, context], 1))
else:
attention = context
return attention, alignments, next_attention_state
class AttentionWrapper(rnn_cell_impl.RNNCell):
"""Wraps another `RNNCell` with attention.
"""
def __init__(
self,
cell,
attention_mechanism,
attention_layer_size=None,
alignment_history=False,
cell_input_fn=None,
output_attention=True,
initial_cell_state=None,
name=None
):
"""Construct the `AttentionWrapper`.
**NOTE** If you are using the `BeamSearchDecoder` with a cell wrapped in
`AttentionWrapper`, then you must ensure that:
- The encoder output has been tiled to `beam_width` via
@{tf.contrib.seq2seq.tile_batch} (NOT `tf.tile`).
- The `batch_size` argument passed to the `zero_state` method of this
wrapper is equal to `true_batch_size * beam_width`.
- The initial state created with `zero_state` above contains a
`cell_state` value containing properly tiled final state from the
encoder.
An example:
```
tiled_encoder_outputs = tf.contrib.seq2seq.tile_batch(
encoder_outputs, multiplier=beam_width)
tiled_encoder_final_state = tf.conrib.seq2seq.tile_batch(
encoder_final_state, multiplier=beam_width)
tiled_sequence_length = tf.contrib.seq2seq.tile_batch(
sequence_length, multiplier=beam_width)
attention_mechanism = MyFavoriteAttentionMechanism(
num_units=attention_depth,
memory=tiled_inputs,
memory_sequence_length=tiled_sequence_length)
attention_cell = AttentionWrapper(cell, attention_mechanism, ...)
decoder_initial_state = attention_cell.zero_state(
dtype, batch_size=true_batch_size * beam_width)
decoder_initial_state = decoder_initial_state.clone(
cell_state=tiled_encoder_final_state)
```
Args:
cell: An instance of `RNNCell`.
attention_mechanism: A list of `AttentionMechanism` instances or a single
instance.
attention_layer_size: A list of Python integers or a single Python
integer, the depth of the attention (output) layer(s). If None
(default), use the context as attention at each time step. Otherwise,
feed the context and cell output into the attention layer to generate
attention at each time step. If attention_mechanism is a list,
attention_layer_size must be a list of the same length.
alignment_history: Python boolean, whether to store alignment history
from all time steps in the final output state (currently stored as a
time major `TensorArray` on which you must call `stack()`).
cell_input_fn: (optional) A `callable`. The default is:
`lambda inputs, attention: array_ops.concat([inputs, attention], -1)`.
output_attention: bool or "both". If `True` (default), the output at each
time step is the attention value. This is the behavior of Luong-style
attention mechanisms. If `False`, the output at each time step is
the output of `cell`. This is the beahvior of Bhadanau-style
attention mechanisms. If "both", the attention value and cell output
are concatenated together and set as the output. In all cases, the
`attention` tensor is propagated to the next time step via the state and
is used there. This flag only controls whether the attention mechanism
is propagated up to the next cell in an RNN stack or to the top RNN
output.
initial_cell_state: The initial state value to use for the cell when
the user calls `zero_state()`. Note that if this value is provided
now, and the user uses a `batch_size` argument of `zero_state` which
does not match the batch size of `initial_cell_state`, proper
behavior is not guaranteed.
name: Name to use when creating ops.
Raises:
TypeError: `attention_layer_size` is not None and (`attention_mechanism`
is a list but `attention_layer_size` is not; or vice versa).
ValueError: if `attention_layer_size` is not None, `attention_mechanism`
is a list, and its length does not match that of `attention_layer_size`.
"""
super(AttentionWrapper, self).__init__(name=name)
rnn_cell_impl.assert_like_rnncell("cell", cell)
if isinstance(attention_mechanism, (list, tuple)):
self._is_multi = True
attention_mechanisms = attention_mechanism
for attention_mechanism in attention_mechanisms:
if not isinstance(attention_mechanism, AttentionMechanism):
raise TypeError(
"attention_mechanism must contain only instances of "
"AttentionMechanism, saw type: %s" %
type(attention_mechanism).__name__
)
else:
self._is_multi = False
if not isinstance(attention_mechanism, AttentionMechanism):
raise TypeError(
"attention_mechanism must be an AttentionMechanism or list of "
"multiple AttentionMechanism instances, saw type: %s" %
type(attention_mechanism).__name__
)
attention_mechanisms = (attention_mechanism,)
if cell_input_fn is None:
cell_input_fn = (
lambda inputs, attention: array_ops.concat([inputs, attention], -1)
)
else:
if not callable(cell_input_fn):
raise TypeError(
"cell_input_fn must be callable, saw type: %s" %
type(cell_input_fn).__name__
)
if attention_layer_size is not None:
attention_layer_sizes = tuple(
attention_layer_size
if isinstance(attention_layer_size, (list, tuple
)) else (attention_layer_size,)
)
if len(attention_layer_sizes) != len(attention_mechanisms):
raise ValueError(
"If provided, attention_layer_size must contain exactly one "
"integer per attention_mechanism, saw: %d vs %d" %
(len(attention_layer_sizes), len(attention_mechanisms))
)
self._attention_layers = tuple(
layers_core.Dense(
attention_layer_size,
name="attention_layer",
use_bias=False,
dtype=attention_mechanisms[i].dtype
) for i, attention_layer_size in enumerate(attention_layer_sizes)
)
self._attention_layer_size = sum(attention_layer_sizes)
else:
self._attention_layers = None
self._attention_layer_size = sum(
attention_mechanism.values.get_shape()[-1].value
for attention_mechanism in attention_mechanisms
)
self._cell = cell
self._attention_mechanisms = attention_mechanisms
self._cell_input_fn = cell_input_fn
self._output_attention = output_attention
self._alignment_history = alignment_history
with ops.name_scope(name, "AttentionWrapperInit"):
if initial_cell_state is None:
self._initial_cell_state = None
else:
final_state_tensor = nest.flatten(initial_cell_state)[-1]
state_batch_size = (
final_state_tensor.shape[0].value or
array_ops.shape(final_state_tensor)[0]
)
error_message = (
"When constructing AttentionWrapper %s: " % self._base_name +
"Non-matching batch sizes between the memory "
"(encoder output) and initial_cell_state. Are you using "
"the BeamSearchDecoder? You may need to tile your initial state "
"via the tf.contrib.seq2seq.tile_batch function with argument "
"multiple=beam_width."
)
with ops.control_dependencies(
self._batch_size_checks(state_batch_size, error_message)
):
self._initial_cell_state = nest.map_structure(
lambda s: array_ops.identity(s, name="check_initial_cell_state"),
initial_cell_state
)
def _batch_size_checks(self, batch_size, error_message):
return [
check_ops.assert_equal(
batch_size, attention_mechanism.batch_size, message=error_message
) for attention_mechanism in self._attention_mechanisms
]
def _item_or_tuple(self, seq):
"""Returns `seq` as tuple or the singular element.
Which is returned is determined by how the AttentionMechanism(s) were passed
to the constructor.
Args:
seq: A non-empty sequence of items or generator.
Returns:
Either the values in the sequence as a tuple if AttentionMechanism(s)
were passed to the constructor as a sequence or the singular element.
"""
t = tuple(seq)
if self._is_multi:
return t
else:
return t[0]
@property
def output_size(self):
if self._output_attention == True:
return self._attention_layer_size
elif self._output_attention == False:
return self._cell.output_size
elif self._output_attention == "both":
return self._attention_layer_size + self._cell.output_size
else:
raise ValueError(
"output_attention: %s must be either True, False, or both" %
self._output_attention
)
@property
def state_size(self):
"""The `state_size` property of `AttentionWrapper`.
Returns:
An `AttentionWrapperState` tuple containing shapes used by this object.
"""
return AttentionWrapperState(
cell_state=self._cell.state_size,
time=tensor_shape.TensorShape([]),
attention=self._attention_layer_size,
alignments=self._item_or_tuple(
a.alignments_size for a in self._attention_mechanisms
),
attention_state=self._item_or_tuple(
a.state_size for a in self._attention_mechanisms
),
alignment_history=self._item_or_tuple(
() for _ in self._attention_mechanisms
)
) # sometimes a TensorArray
def zero_state(self, batch_size, dtype):
"""Return an initial (zero) state tuple for this `AttentionWrapper`.
**NOTE** Please see the initializer documentation for details of how
to call `zero_state` if using an `AttentionWrapper` with a
`BeamSearchDecoder`.
Args:
batch_size: `0D` integer tensor: the batch size.
dtype: The internal state data type.
Returns:
An `AttentionWrapperState` tuple containing zeroed out tensors and,
possibly, empty `TensorArray` objects.
Raises:
ValueError: (or, possibly at runtime, InvalidArgument), if
`batch_size` does not match the output size of the encoder passed
to the wrapper object at initialization time.
"""
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
if self._initial_cell_state is not None:
cell_state = self._initial_cell_state
else:
cell_state = self._cell.zero_state(batch_size, dtype)
error_message = (
"When calling zero_state of AttentionWrapper %s: " % self._base_name +
"Non-matching batch sizes between the memory "
"(encoder output) and the requested batch size. Are you using "
"the BeamSearchDecoder? If so, make sure your encoder output has "
"been tiled to beam_width via tf.contrib.seq2seq.tile_batch, and "
"the batch_size= argument passed to zero_state is "
"batch_size * beam_width."
)
with ops.control_dependencies(
self._batch_size_checks(batch_size, error_message)
):
cell_state = nest.map_structure(
lambda s: array_ops.identity(s, name="checked_cell_state"),
cell_state
)
return AttentionWrapperState(
cell_state=cell_state,
time=array_ops.zeros([], dtype=dtypes.int32),
attention=_zero_state_tensors(
self._attention_layer_size, batch_size, dtype
),
alignments=self._item_or_tuple(
attention_mechanism.initial_alignments(batch_size, dtype)
for attention_mechanism in self._attention_mechanisms
),
attention_state=self._item_or_tuple(
attention_mechanism.initial_state(batch_size, dtype)
for attention_mechanism in self._attention_mechanisms
),
alignment_history=self._item_or_tuple(
tensor_array_ops.TensorArray(
dtype=dtype, size=0, dynamic_size=True
) if self._alignment_history else ()
for _ in self._attention_mechanisms
)
)
def call(self, inputs, state):
"""Perform a step of attention-wrapped RNN.
- Step 1: Mix the `inputs` and previous step's `attention` output via
`cell_input_fn`.
- Step 2: Call the wrapped `cell` with this input and its previous state.
- Step 3: Score the cell's output with `attention_mechanism`.
- Step 4: Calculate the alignments by passing the score through the
`normalizer`.
- Step 5: Calculate the context vector as the inner product between the
alignments and the attention_mechanism's values (memory).
- Step 6: Calculate the attention output by concatenating the cell output
and context through the attention layer (a linear layer with
`attention_layer_size` outputs).
Args:
inputs: (Possibly nested tuple of) Tensor, the input at this time step.
state: An instance of `AttentionWrapperState` containing
tensors from the previous time step.
Returns:
A tuple `(attention_or_cell_output, next_state)`, where:
- `attention_or_cell_output` depending on `output_attention`.
- `next_state` is an instance of `AttentionWrapperState`
containing the state calculated at this time step.
Raises:
TypeError: If `state` is not an instance of `AttentionWrapperState`.
"""
if not isinstance(state, AttentionWrapperState):
raise TypeError(
"Expected state to be instance of AttentionWrapperState. "
"Received type %s instead." % type(state)
)
# Step 1: Calculate the true inputs to the cell based on the
# previous attention value.
cell_inputs = self._cell_input_fn(inputs, state.attention)
cell_state = state.cell_state
cell_output, next_cell_state = self._cell(cell_inputs, cell_state)
cell_batch_size = (
cell_output.shape[0].value or array_ops.shape(cell_output)[0]
)
error_message = (
"When applying AttentionWrapper %s: " % self.name +
"Non-matching batch sizes between the memory "
"(encoder output) and the query (decoder output). Are you using "
"the BeamSearchDecoder? You may need to tile your memory input via "
"the tf.contrib.seq2seq.tile_batch function with argument "
"multiple=beam_width."
)
with ops.control_dependencies(
self._batch_size_checks(cell_batch_size, error_message)
):
cell_output = array_ops.identity(cell_output, name="checked_cell_output")
if self._is_multi:
previous_attention_state = state.attention_state
previous_alignment_history = state.alignment_history
else:
previous_attention_state = [state.attention_state]
previous_alignment_history = [state.alignment_history]
all_alignments = []
all_attentions = []
all_attention_states = []
maybe_all_histories = []
for i, attention_mechanism in enumerate(self._attention_mechanisms):
attention, alignments, next_attention_state = _compute_attention(
attention_mechanism, cell_output, previous_attention_state[i],
self._attention_layers[i] if self._attention_layers else None
)
alignment_history = previous_alignment_history[i].write(
state.time, alignments
) if self._alignment_history else ()
all_attention_states.append(next_attention_state)
all_alignments.append(alignments)
all_attentions.append(attention)
maybe_all_histories.append(alignment_history)
attention = array_ops.concat(all_attentions, 1)
next_state = AttentionWrapperState(
time=state.time + 1,
cell_state=next_cell_state,
attention=attention,
attention_state=self._item_or_tuple(all_attention_states),
alignments=self._item_or_tuple(all_alignments),
alignment_history=self._item_or_tuple(maybe_all_histories)
)
if self._output_attention == True:
return attention, next_state
elif self._output_attention == False:
return cell_output, next_state
elif self._output_attention == "both":
return array_ops.concat((cell_output, attention), axis=-1), next_state
else:
raise ValueError(
"output_attention: %s must be either True, False, or both" %
self._output_attention
)
| OpenSeq2Seq-master | open_seq2seq/parts/rnns/attention_wrapper.py |
import tensorflow as tf
class WeightDropLayerNormBasicLSTMCell(tf.contrib.rnn.RNNCell):
"""LSTM unit with layer normalization, weight dropout, and recurrent dropout.
This is based on LSTM's standard implementation of LayerNormBasicLSTMCell.
This class adds layer normalization and recurrent dropout to a
basic LSTM unit. Layer normalization implementation is based on:
https://arxiv.org/abs/1607.06450.
"Layer Normalization"
Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton
and is applied before the internal nonlinearities.
Recurrent dropout is base on:
https://arxiv.org/abs/1603.05118
"Recurrent Dropout without Memory Loss"
Stanislau Semeniuta, Aliaksei Severyn, Erhardt Barth.
Code is basd on TensorFlow's LayerNormBasicLSTMCell
"""
def __init__(self,
num_units,
forget_bias=1.0,
activation=tf.tanh,
layer_norm=True,
norm_gain=1.0,
norm_shift=0.0,
recurrent_keep_prob=1.0,
input_weight_keep_prob=1.0,
recurrent_weight_keep_prob=1.0,
dropout_seed=None,
weight_variational=False,
reuse=None,
dtype=None):
"""Initializes the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
input_size: Deprecated and unused.
activation: Activation function of the inner states.
layer_norm: If `True`, layer normalization will be applied.
norm_gain: float, The layer normalization gain initial value. If
`layer_norm` has been set to `False`, this argument will be ignored.
norm_shift: float, The layer normalization shift initial value. If
`layer_norm` has been set to `False`, this argument will be ignored.
input_weight_keep_prob: keep probablility for dropout of W
(kernel used to multiply with the input tensor)
recurrent_weight_keep_prob: keep probablility for dropout of U
(kernel used to multiply with last hidden state tensor)
recurrent_keep_prob: keep probability for dropout
when applying tanh for the input transform step
weight_variational: whether to keep the same weight dropout mask
at every timestep. This feature is not yet implemented.
dropout_prob_seed: (optional) integer, the randomness seed.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(WeightDropLayerNormBasicLSTMCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._activation = activation
self._forget_bias = forget_bias
self._recurrent_keep_prob = recurrent_keep_prob
self._input_weight_keep_prob = input_weight_keep_prob
self._recurrent_weight_keep_prob = recurrent_weight_keep_prob
self._dropout_seed = dropout_seed
self._layer_norm = layer_norm
self._norm_gain = norm_gain
self._norm_shift = norm_shift
self._reuse = reuse
self._weight_variational = weight_variational
self._dtype = dtype
self._input_weight_noise = None
self._recurrent_weight_noise = None
if self._weight_variational:
if dtype is None:
raise ValueError(
"When weight_variational=True, dtype must be provided")
@property
def state_size(self):
return tf.contrib.rnn.LSTMStateTuple(self._num_units, self._num_units)
@property
def output_size(self):
return self._num_units
def _norm(self, inp, scope, dtype=tf.float32):
shape = inp.get_shape()[-1:]
gamma_init = tf.constant_initializer(self._norm_gain)
beta_init = tf.constant_initializer(self._norm_shift)
with tf.variable_scope(scope):
# Initialize beta and gamma for use by layer_norm.
tf.get_variable("gamma", shape=shape, initializer=gamma_init, dtype=dtype)
tf.get_variable("beta", shape=shape, initializer=beta_init, dtype=dtype)
normalized = tf.contrib.layers.layer_norm(inp, reuse=True, scope=scope)
return normalized
def _linear(self, args, inputs_shape, h_shape):
out_size = 4 * self._num_units
proj_size = args.get_shape()[-1]
dtype = args.dtype
weights = tf.get_variable("kernel", [proj_size, out_size], dtype=dtype)
w, u = tf.split(weights, [inputs_shape, h_shape], axis=0)
if self._should_drop(self._input_weight_keep_prob):
w = self._dropout(w, self._input_weight_noise, self._input_weight_keep_prob)
if self._should_drop(self._recurrent_weight_keep_prob):
u = self._dropout(u, self._recurrent_weight_noise, self._recurrent_weight_keep_prob)
weights = tf.concat([w, u], 0)
out = tf.matmul(args, weights)
if not self._layer_norm:
bias = tf.get_variable("bias", [out_size], dtype=dtype)
out = tf.nn.bias_add(out, bias)
return out
def _variational_dropout(self, values, noise, keep_prob):
'''
TODO: Implement variational dropout for weight dropout
'''
return tf.nn.dropout(values, keep_prob, seed=self._dropout_seed)
def _dropout(self, values, dropout_noise, keep_prob):
# when it gets in here, keep_prob < 1.0
if not self._weight_variational:
return tf.nn.dropout(values, keep_prob, seed=self._dropout_seed)
else:
return self._variational_dropout(values, dropout_noise, keep_prob)
def _should_drop(self, p):
return (not isinstance(p, float)) or p < 1
def call(self, inputs, state):
"""LSTM cell with layer normalization and recurrent dropout."""
c, h = state
args = tf.concat([inputs, h], 1)
concat = self._linear(args, inputs.get_shape().as_list()[-1], h.get_shape().as_list()[-1])
dtype = args.dtype
i, j, f, o = tf.split(value=concat, num_or_size_splits=4, axis=1)
if self._layer_norm:
i = self._norm(i, "input", dtype=dtype)
j = self._norm(j, "transform", dtype=dtype)
f = self._norm(f, "forget", dtype=dtype)
o = self._norm(o, "output", dtype=dtype)
g = self._activation(j)
if self._should_drop(self._recurrent_keep_prob):
g = tf.nn.dropout(g, self._recurrent_keep_prob, seed=self._dropout_seed)
new_c = (
c * tf.sigmoid(f + self._forget_bias) + tf.sigmoid(i) * g)
if self._layer_norm:
new_c = self._norm(new_c, "state", dtype=dtype)
new_h = self._activation(new_c) * tf.sigmoid(o)
new_state = tf.contrib.rnn.LSTMStateTuple(new_c, new_h)
return new_h, new_state | OpenSeq2Seq-master | open_seq2seq/parts/rnns/weight_drop.py |
"""Module for constructing RNN Cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from six.moves import range
from tensorflow.contrib.rnn.python.ops import core_rnn_cell
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope as vs
# pylint: disable=protected-access
_Linear = core_rnn_cell._Linear # pylint: disable=invalid-name
# pylint: enable=protected-access
# TODO: must implement all abstract methods
class FLSTMCell(rnn_cell_impl.RNNCell):
"""Group LSTM cell (G-LSTM).
The implementation is based on:
https://arxiv.org/abs/1703.10722
O. Kuchaiev and B. Ginsburg
"Factorization Tricks for LSTM Networks", ICLR 2017 workshop.
"""
def __init__(self, num_units, fact_size, initializer=None, num_proj=None,
forget_bias=1.0, activation=math_ops.tanh, reuse=None):
"""Initialize the parameters of G-LSTM cell.
Args:
num_units: int, The number of units in the G-LSTM cell
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of
the training.
activation: Activation function of the inner states.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already
has the given variables, an error is raised.
Raises:
ValueError: If `num_units` or `num_proj` is not divisible by
`number_of_groups`.
"""
super(FLSTMCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._initializer = initializer
self._fact_size = fact_size
self._forget_bias = forget_bias
self._activation = activation
self._num_proj = num_proj
if num_proj:
self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_proj)
self._output_size = num_proj
else:
self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_units)
self._output_size = num_units
self._linear1 = None
self._linear2 = None
self._linear3 = None
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
# TODO: does not match signature of the base method
def call(self, inputs, state):
"""
"""
(c_prev, m_prev) = state
self._batch_size = inputs.shape[0].value or array_ops.shape(inputs)[0]
scope = vs.get_variable_scope()
with vs.variable_scope(scope, initializer=self._initializer):
x = array_ops.concat([inputs, m_prev], axis=1)
with vs.variable_scope("first_gemm"):
if self._linear1 is None:
# no bias for bottleneck
self._linear1 = _Linear(x, self._fact_size, False)
R_fact = self._linear1(x)
with vs.variable_scope("second_gemm"):
if self._linear2 is None:
self._linear2 = _Linear(R_fact, 4*self._num_units, True)
R = self._linear2(R_fact)
i, j, f, o = array_ops.split(R, 4, 1)
c = (math_ops.sigmoid(f + self._forget_bias) * c_prev +
math_ops.sigmoid(i) * math_ops.tanh(j))
m = math_ops.sigmoid(o) * self._activation(c)
if self._num_proj is not None:
with vs.variable_scope("projection"):
if self._linear3 is None:
self._linear3 = _Linear(m, self._num_proj, False)
m = self._linear3(m)
new_state = rnn_cell_impl.LSTMStateTuple(c, m)
return m, new_state
| OpenSeq2Seq-master | open_seq2seq/parts/rnns/flstm.py |
"""Implementation of a 1d convolutional layer with weight normalization.
Inspired from https://github.com/tobyyouup/conv_seq2seq"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import tensorflow as tf
import math
from open_seq2seq.parts.convs2s.utils import gated_linear_units
from open_seq2seq.parts.transformer.common import LayerNormalization
class Conv1DNetworkNormalized(tf.layers.Layer):
"""1D convolutional layer with weight normalization"""
def __init__(self,
in_dim,
out_dim,
kernel_width,
mode,
layer_id,
hidden_dropout,
conv_padding,
decode_padding,
activation=gated_linear_units,
normalization_type="weight_norm",
regularizer=None, # tf.contrib.layers.l2_regularizer(scale=1e-4)
init_var=None,
):
"""initializes the 1D convolution layer.
It uses weight normalization (Salimans & Kingma, 2016) w = g * v/2-norm(v)
Args:
in_dim: int last dimension of the inputs
out_dim: int new dimension for the output
kernel_width: int width of kernel
mode: str the current mode
layer_id: int the id of current convolution layer
hidden_dropout: float the keep-dropout value used on the input.
Give 1.0 if no dropout.
It is used to initialize the weights of convolution.
conv_padding: str the type of padding done for convolution
decode_padding: bool specifies if this convolution layer is in decoder or not
in decoder padding is done explicitly before convolution
activation: the activation function applies after the convolution
normalization_type: str specifies the normalization used for the layer.
"weight_norm" for weight normalization or
"batch_norm" for batch normalization or
"layer_norm" for layer normalization
regularizer: the regularizer for the batch normalization
"""
super(Conv1DNetworkNormalized, self).__init__()
self.mode = mode
self.conv_padding = conv_padding
self.decode_padding = decode_padding
self.hidden_dropout = hidden_dropout
self.kernel_width = kernel_width
self.layer_id = layer_id
self.act_func = activation
self.regularizer = regularizer
if normalization_type == "batch_norm":
self.apply_batch_norm = True
self.bias_enabled = False
self.wn_enabled = False
self.apply_layer_norm = False
elif normalization_type == "weight_norm":
self.apply_batch_norm = False
self.bias_enabled = True
self.wn_enabled = True
self.apply_layer_norm = False
elif normalization_type == "layer_norm":
self.apply_batch_norm = False
self.bias_enabled = False
self.wn_enabled = False
self.apply_layer_norm = True
elif normalization_type is None:
self.apply_batch_norm = False
self.bias_enabled = True
self.wn_enabled = False
self.apply_layer_norm = False
else:
raise ValueError("Wrong normalization type: {}".format(normalization_type))
if activation == gated_linear_units:
conv_out_size = 2 * out_dim
else:
conv_out_size = out_dim
with tf.variable_scope("conv_layer_" + str(layer_id)):
if init_var is None:
V_std = math.sqrt(4.0 * hidden_dropout / (kernel_width * in_dim))
else:
V_std = init_var
if self.wn_enabled:
self.V = tf.get_variable(
'V',
shape=[kernel_width, in_dim, conv_out_size],
initializer=tf.random_normal_initializer(mean=0, stddev=V_std),
trainable=True)
self.V_norm = tf.norm(self.V.initialized_value(), axis=[0, 1])
self.g = tf.get_variable('g', initializer=self.V_norm, trainable=True)
self.W = tf.reshape(self.g, [1, 1, conv_out_size]) * tf.nn.l2_normalize(
self.V, [0, 1])
else:
self.W = tf.get_variable(
'W',
shape=[kernel_width, in_dim, conv_out_size],
initializer=tf.random_normal_initializer(mean=0, stddev=V_std),
trainable=True,
regularizer=self.regularizer)
if self.bias_enabled:
self.b = tf.get_variable(
'b',
shape=[conv_out_size],
initializer=tf.zeros_initializer(),
trainable=True)
else:
self.b = None
if self.apply_layer_norm:
self.layer_norm = LayerNormalization(out_dim)
else:
self.layer_norm = None
def call(self, input):
"""Applies convolution with gated linear units on x.
Args:
x: A float32 tensor with shape [batch_size, length, in_dim]
Returns:
float32 tensor with shape [batch_size, length, out_dim].
"""
output = input
if self.mode == "train":
output = tf.nn.dropout(output, self.hidden_dropout)
if self.decode_padding:
output = tf.pad(
output, [[0, 0], [self.kernel_width - 1, self.kernel_width - 1], [0, 0]],
"CONSTANT")
output = tf.nn.conv1d(
value=output, filters=self.W, stride=1, padding=self.conv_padding)
if self.decode_padding and self.kernel_width > 1:
output = output[:, 0:-self.kernel_width + 1, :]
if self.apply_batch_norm:
# trick to make batchnorm work for mixed precision training.
bn_input = tf.expand_dims(output, axis=1)
bn_output = tf.layers.batch_normalization(
name="batch_norm_" + str(self.layer_id),
inputs=bn_input,
training=self.mode == 'train',
axis=-1,
momentum=0.95,
epsilon=1e-4
)
output = tf.squeeze(bn_output, axis=1)
if self.apply_layer_norm:
output = self.layer_norm(output)
if self.b is not None:
output = tf.nn.bias_add(output, self.b)
if self.act_func is not None:
output = self.act_func(output)
return output
| OpenSeq2Seq-master | open_seq2seq/parts/convs2s/conv_wn_layer.py |
"""Implementation of fully connected network with weight normalization.
Inspired from https://github.com/tobyyouup/conv_seq2seq"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import tensorflow as tf
import math
from open_seq2seq.parts.transformer.common import LayerNormalization
class FeedFowardNetworkNormalized(tf.layers.Layer):
"""Fully connected feedforward network with weight normalization"""
def __init__(self,
in_dim,
out_dim,
dropout,
var_scope_name,
mode,
normalization_type="weight_norm",
regularizer=None,
init_var=None
):
"""initializes the linear layer.
This layer projects from in_dim-dimenstional space to out_dim-dimentional space.
It uses weight normalization (Salimans & Kingma, 2016) w = g * v/2-norm(v)
Args:
in_dim: int last dimension of the inputs
out_dim: int new dimension for the output
dropout: float the keep-dropout value used in the previous layer.
It is used to initialize the weights. Give 1.0 if no dropout.
var_scope_name: str the scope name for the weight variables
mode: str current mode
normalization_type: str specifies the normalization used for this layer.
"weight_norm" for weight normalization or
"batch_norm" for batch normalization
"""
super(FeedFowardNetworkNormalized, self).__init__()
self.out_dim = out_dim
self.in_dim = in_dim
self.normalization_type = normalization_type
self.regularizer = regularizer
self.var_scope_name = var_scope_name
self.mode = mode
if normalization_type == "batch_norm":
self.apply_batch_norm = True
self.bias_enabled = False
self.wn_enabled = False
self.apply_layer_norm = False
elif normalization_type == "weight_norm":
self.apply_batch_norm = False
self.bias_enabled = True
self.wn_enabled = True
self.apply_layer_norm = False
elif normalization_type == "layer_norm":
self.apply_batch_norm = False
self.bias_enabled = False
self.wn_enabled = False
self.apply_layer_norm = True
elif normalization_type is None:
self.apply_batch_norm = False
self.bias_enabled = True
self.wn_enabled = False
self.apply_layer_norm = False
else:
raise ValueError("Wrong normalization type: {}".format(normalization_type))
with tf.variable_scope(var_scope_name):
if init_var is None:
V_std = math.sqrt(dropout * 1.0 / in_dim)
else:
V_std = init_var
if self.wn_enabled:
V_initializer = \
tf.random_normal_initializer(mean=0, stddev=V_std)
self.V = tf.get_variable(
'V',
shape=[in_dim, out_dim],
initializer=V_initializer,
trainable=True)
self.V_norm = tf.norm(self.V.initialized_value(), axis=0)
self.g = tf.get_variable('g', initializer=self.V_norm, trainable=True)
else:
self.V = tf.get_variable(
'W',
shape=[in_dim, out_dim],
initializer=tf.random_normal_initializer(mean=0, stddev=V_std),
trainable=True, regularizer=self.regularizer)
if self.bias_enabled:
self.b = tf.get_variable(
'b',
shape=[out_dim],
initializer=tf.zeros_initializer(),
trainable=True)
else:
self.b = None
if self.apply_layer_norm:
self.layer_norm = LayerNormalization(out_dim)
else:
self.layer_norm = None
def call(self, x):
"""Projects x with its linear transformation.
Args:
x: A float32 tensor with shape [batch_size, length, in_dim]
Returns:
float32 tensor with shape [batch_size, length, out_dim].
"""
batch_size = tf.shape(x)[0]
x = tf.reshape(x, [-1, self.in_dim])
y = tf.matmul(x, self.V)
y = tf.reshape(y, [batch_size, -1, self.out_dim])
if self.wn_enabled:
# x*(v*(g/2-norm(v)))
scaler = tf.div(self.g, tf.norm(self.V, axis=0))
output = tf.reshape(scaler, [1, self.out_dim]) * y
elif self.apply_batch_norm:
bn_input = tf.expand_dims(y, axis=1)
bn_output = tf.layers.batch_normalization(
name=self.var_scope_name + "_batch_norm",
inputs=bn_input,
training=self.mode == 'train',
axis=-1,
momentum=0.95,
epsilon=1e-4
)
output = tf.squeeze(bn_output, axis=1)
elif self.apply_layer_norm:
output = self.layer_norm(y)
else:
output = y
if self.b is not None:
output = output + tf.reshape(self.b, [1, self.out_dim])
return output
| OpenSeq2Seq-master | open_seq2seq/parts/convs2s/ffn_wn_layer.py |
"""Implementation of the attention layer for convs2s.
Inspired from https://github.com/tobyyouup/conv_seq2seq"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import tensorflow as tf
import math
from open_seq2seq.parts.convs2s.ffn_wn_layer import FeedFowardNetworkNormalized
class AttentionLayerNormalized(tf.layers.Layer):
"""Attention layer for convs2s with weight normalization"""
def __init__(self, in_dim, embed_size, layer_id, add_res, mode,
scaling_factor=math.sqrt(0.5),
normalization_type="weight_norm",
regularizer=None,
init_var=None,
):
"""initializes the attention layer.
It uses weight normalization for linear projections
(Salimans & Kingma, 2016) w = g * v/2-norm(v)
Args:
in_dim: int last dimension of the inputs
embed_size: int target embedding size
layer_id: int the id of current convolution layer
add_res: bool whether residual connection should be added or not
mode: str current mode
"""
super(AttentionLayerNormalized, self).__init__()
self.add_res = add_res
self.scaling_factor = scaling_factor
self.regularizer = regularizer
with tf.variable_scope("attention_layer_" + str(layer_id)):
# linear projection layer to project the attention input to target space
self.tgt_embed_proj = FeedFowardNetworkNormalized(
in_dim,
embed_size,
dropout=1.0,
var_scope_name="att_linear_mapping_tgt_embed",
mode=mode,
normalization_type=normalization_type,
regularizer=self.regularizer,
init_var=init_var
)
# linear projection layer to project back to the input space
self.out_proj = FeedFowardNetworkNormalized(
embed_size,
in_dim,
dropout=1.0,
var_scope_name="att_linear_mapping_out",
mode=mode,
normalization_type=normalization_type,
regularizer=self.regularizer,
init_var=init_var
)
def call(self, input, target_embed, encoder_output_a, encoder_output_b,
input_attention_bias):
"""Calculates the attention vectors.
Args:
input: A float32 tensor with shape [batch_size, length, in_dim]
target_embed: A float32 tensor with shape [batch_size, length, in_dim]
containing the target embeddings
encoder_output_a: A float32 tensor with shape [batch_size, length, out_dim]
containing the first encoder outputs, uses as the keys
encoder_output_b: A float32 tensor with shape [batch_size, length, src_emb_dim]
containing the second encoder outputs, uses as the values
input_attention_bias: A float32 tensor with shape [batch_size, length, 1]
containing the bias used to mask the paddings
Returns:
float32 tensor with shape [batch_size, length, out_dim].
"""
h_proj = self.tgt_embed_proj(input)
d_proj = (h_proj + target_embed) * self.scaling_factor
att_score = tf.matmul(d_proj, encoder_output_a, transpose_b=True)
# Masking need to be done in float32. Added to support mixed-precision training.
att_score = tf.cast(x=att_score, dtype=tf.float32)
# mask out the paddings
if input_attention_bias is not None:
att_score = att_score + input_attention_bias
att_score = tf.nn.softmax(att_score)
# Cast back to original type
att_score = tf.cast(x=att_score, dtype=encoder_output_b.dtype)
length = tf.cast(tf.shape(encoder_output_b), encoder_output_b.dtype)
output = tf.matmul(att_score, encoder_output_b) * \
length[1] * tf.cast(tf.sqrt(1.0 / length[1]), dtype=encoder_output_b.dtype)
output = self.out_proj(output)
if self.add_res:
output = (output + input) * self.scaling_factor
return output
| OpenSeq2Seq-master | open_seq2seq/parts/convs2s/attention_wn_layer.py |
# Copyright (c) 2018 NVIDIA Corporation
| OpenSeq2Seq-master | open_seq2seq/parts/convs2s/__init__.py |
"""Implementation of a 1d convolutional layer with weight normalization.
Inspired from https://github.com/tobyyouup/conv_seq2seq"""
import tensorflow as tf
def gated_linear_units(inputs):
"""Gated Linear Units (GLU) on x.
Args:
x: A float32 tensor with shape [batch_size, length, 2*out_dim]
Returns:
float32 tensor with shape [batch_size, length, out_dim].
"""
input_shape = inputs.get_shape().as_list()
assert len(input_shape) == 3
input_pass = inputs[:, :, 0:int(input_shape[2] / 2)]
input_gate = inputs[:, :, int(input_shape[2] / 2):]
input_gate = tf.sigmoid(input_gate)
return tf.multiply(input_pass, input_gate)
| OpenSeq2Seq-master | open_seq2seq/parts/convs2s/utils.py |
# Copyright (c) 2017 NVIDIA Corporation
from .data_layer import DataLayer
from .speech2text.speech2text import Speech2TextDataLayer
from .speech2text.speech_commands import SpeechCommandsDataLayer
from .image2label.image2label import ImagenetDataLayer
from .lm.lmdata import WKTDataLayer, IMDBDataLayer, SSTDataLayer
from .text2speech.text2speech import Text2SpeechDataLayer
from .text2speech.text2speech_wavenet import WavenetDataLayer
| OpenSeq2Seq-master | open_seq2seq/data/__init__.py |
# Copyright (c) 2017 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import io
from six.moves import range
def pad_vocab_to_eight(vocab):
"""Pads vocabulary so that it is divisible by 8.
Args:
vocab (dict): vocabulary in the form token->id
Returns:
dict: vocab with new tokens added if necessary, such that the total
vocab size is divisible by 8.
"""
v_len = len(vocab)
if v_len % 8 == 0:
return vocab
for id_add in range(0, 8 - v_len % 8):
vocab['<$'+str(id_add)+'$>'] = v_len + id_add
return vocab
def load_pre_existing_vocabulary(path, min_idx=0, read_chars=False):
"""Loads pre-existing vocabulary into memory.
The vocabulary file should contain a token on each line with optional
token count on the same line that will be ignored. Example::
a 1234
b 4321
c 32342
d
e
word 234
Args:
path (str): path to vocabulary.
min_idx (int, optional): minimum id to assign for a token.
read_chars (bool, optional): whether to read only the
first symbol of the line.
Returns:
dict: vocabulary dictionary mapping tokens (chars/words) to int ids.
"""
idx = min_idx
vocab_dict = {}
with io.open(path, newline='', encoding='utf-8') as f:
for line in f:
# ignoring empty lines
if not line or line == '\n':
continue
if read_chars:
token = line[0]
else:
token = line.rstrip().split('\t')[0]
vocab_dict[token] = idx
idx += 1
return vocab_dict
| OpenSeq2Seq-master | open_seq2seq/data/utils.py |
# Copyright (c) 2017 NVIDIA Corporation
"""Data layer classes"""
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import abc
import copy
import six
import tensorflow as tf
from open_seq2seq.utils.utils import check_params
@six.add_metaclass(abc.ABCMeta)
class DataLayer:
"""Abstract class from which all data layers must inherit."""
@staticmethod
def get_required_params():
"""Static method with description of required parameters.
Returns:
dict:
Dictionary containing all the parameters that **have to** be
included into the ``params`` parameter of the
class :meth:`__init__` method.
"""
return {
'mode': ['train', 'eval', 'infer'],
}
@staticmethod
def get_optional_params():
"""Static method with description of optional parameters.
Returns:
dict:
Dictionary containing all the parameters that **can** be
included into the ``params`` parameter of the
class :meth:`__init__` method.
"""
return {
'batch_size': int,
'shuffle': bool,
'repeat': bool,
'dtype': [tf.float32, tf.float16],
'interactive': bool,
'cache_features': bool,
'cache_format': str,
'cache_regenerate': bool
}
@abc.abstractmethod
def __init__(self, params, model, num_workers, worker_id):
"""Data layer constructor.
The TensorFlow graph should not be created here, but rather in the
:meth:`self.build_graph() <build_graph>` method.
Args:
params (dict): parameters describing the data layer.
All supported parameters are listed in :meth:`get_required_params`,
:meth:`get_optional_params` functions.
model (instance of a class derived from :class:`Model<models.model.Model>`):
parent model that created this data layer.
Could be None if no model access is required for the use case.
num_workers (int): number of Horovod processes or number of GPUs
if Horovod is not used.
worker_id (int): Horovod process id or GPU id if Horovod is not used.
Config parameters:
* **shuffle** (bool) --- whether to shuffle dataset after an epoch.
Typically will be True for train and False for inference and evaluation.
* **dtype** --- data dtype. Could be either ``tf.float16`` or ``tf.float32``.
"""
check_params(params, self.get_required_params(), self.get_optional_params())
self._params = copy.deepcopy(params)
self._model = model
if 'dtype' not in self._params:
if self._model:
self._params['dtype'] = self._model.get_tf_dtype()
else:
self._params['dtype'] = tf.float32
if 'shuffle' not in params:
self._params['shuffle'] = (self._params['mode'] == 'train')
if self._params['mode'] != 'train' and self._params['shuffle']:
raise ValueError("Shuffle should not be performed in eval or infer modes")
# should be used for correct evaluation on multiple GPUs
self._num_workers = num_workers
self._worker_id = worker_id
@property
def params(self):
"""Parameters used to construct the data layer (dictionary)."""
return self._params
@abc.abstractmethod
def build_graph(self):
"""Here all TensorFlow graph construction should happen."""
pass
@property
@abc.abstractmethod
def iterator(self):
"""``tf.data.Dataset`` iterator.
Should be created by :meth:`self.build_graph()<build_graph>`.
"""
pass
@property
@abc.abstractmethod
def input_tensors(self):
"""Dictionary containing input tensors.
This dictionary has to define the following keys: `source_tensors`,
which should contain all tensors describing the input object (i.e. tensors
that are passed to the encoder, e.g. input sequence and input length). And
when ``self.params['mode'] != "infer"`` data layer should also define
`target_tensors` which is the list of all tensors related to the
corresponding target object (i.e. tensors taht are passed to the decoder and
loss, e.g. target sequence and target length). Note that all tensors have
to be created inside :meth:`self.build_graph()<build_graph>` method.
"""
pass
def create_interactive_placeholders(self):
"""A function that must be defined for data layers that support interactive
infer. This function is intended to create placeholders that will be passed
to self._input_tensors that will be passed to the model.
"""
pass
def create_feed_dict(self, model_in):
"""A function that must be defined for data layers that support interactive
infer. Given input which is an abstract data element to be defined by the
data layer. The intended use is for the user to build and pass model_in from
the jupyter notebook. Given model_in, the data layer must preprocess the raw
data, and create the feed dict that defines the placeholders defined in
create_interactive_placeholders().
"""
pass
def get_size_in_samples(self):
"""Should return the dataset size in samples.
That is, the number of objects in the dataset. This method is used to
calculate a valid epoch size. If this method is not defined, you will need
to make sure that your dataset for evaluation is created only for
one epoch. You will also not be able to use ``num_epochs`` parameter in the
base config.
Returns:
int: dataset size in samples.
"""
return None
| OpenSeq2Seq-master | open_seq2seq/data/data_layer.py |
OpenSeq2Seq-master | open_seq2seq/data/image2label/__init__.py |
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides utilities to preprocess images.
Training images are sampled using the provided bounding boxes, and subsequently
cropped to the sampled bounding box. Images are additionally flipped randomly,
then resized to the target output size (without aspect-ratio preservation).
Images used during evaluation are resized (with aspect-ratio preservation) and
centrally cropped.
All images undergo mean color subtraction.
Note that these steps are colloquially referred to as "ResNet preprocessing,"
and they differ from "VGG preprocessing," which does not use bounding boxes
and instead does an aspect-preserving resize followed by random crop during
training. (These both differ from "Inception preprocessing," which introduces
color distortion steps.)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
_R_MEAN = 123.68
_G_MEAN = 116.78
_B_MEAN = 103.94
_CHANNEL_MEANS = [_R_MEAN, _G_MEAN, _B_MEAN]
# The lower bound for the smallest side of the image for aspect-preserving
# resizing. For example, if an image is 500 x 1000, it will be resized to
# _RESIZE_MIN x (_RESIZE_MIN * 2).
_RESIZE_MIN = 256
def _decode_crop_and_flip(image_buffer, bbox, num_channels):
"""Crops the given image to a random part of the image, and randomly flips.
We use the fused decode_and_crop op, which performs better than the two ops
used separately in series, but note that this requires that the image be
passed in as an un-decoded string Tensor.
Args:
image_buffer: scalar string Tensor representing the raw JPEG image buffer.
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
num_channels: Integer depth of the image buffer for decoding.
Returns:
3-D tensor with cropped image.
"""
# A large fraction of image datasets contain a human-annotated bounding box
# delineating the region of the image containing the object of interest. We
# choose to create a new bounding box for the object which is a randomly
# distorted version of the human-annotated bounding box that obeys an
# allowed range of aspect ratios, sizes and overlap with the human-annotated
# bounding box. If no box is supplied, then we assume the bounding box is
# the entire image.
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
tf.image.extract_jpeg_shape(image_buffer),
bounding_boxes=bbox,
min_object_covered=0.1,
aspect_ratio_range=[0.75, 1.33],
area_range=[0.05, 1.0],
max_attempts=100,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, _ = sample_distorted_bounding_box
# Reassemble the bounding box in the format the crop op requires.
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
crop_window = tf.stack([offset_y, offset_x, target_height, target_width])
# Use the fused decode and crop op here, which is faster than each in series.
cropped = tf.image.decode_and_crop_jpeg(
image_buffer, crop_window, channels=num_channels)
# Flip to add a little more random distortion in.
cropped = tf.image.random_flip_left_right(cropped)
return cropped
def _central_crop(image, crop_height, crop_width):
"""Performs central crops of the given image list.
Args:
image: a 3-D image tensor
crop_height: the height of the image following the crop.
crop_width: the width of the image following the crop.
Returns:
3-D tensor with cropped image.
"""
shape = tf.shape(image)
height, width = shape[0], shape[1]
amount_to_be_cropped_h = (height - crop_height)
crop_top = amount_to_be_cropped_h // 2
amount_to_be_cropped_w = (width - crop_width)
crop_left = amount_to_be_cropped_w // 2
return tf.slice(
image, [crop_top, crop_left, 0], [crop_height, crop_width, -1])
def _mean_image_subtraction_and_normalization(image, means, num_channels):
"""Subtracts the given means from each image channel and divides by 127.5.
For example:
means = [123.68, 116.779, 103.939]
image = _mean_image_subtraction_and_normalization(image, means)
Note that the rank of `image` must be known.
Args:
image: a tensor of size [height, width, C].
means: a C-vector of values to subtract from each channel.
num_channels: number of color channels in the image that will be distorted.
Returns:
the centered image and normalized image.
Raises:
ValueError: If the rank of `image` is unknown, if `image` has a rank other
than three or if the number of channels in `image` doesn't match the
number of values in `means`.
"""
if image.get_shape().ndims != 3:
raise ValueError('Input must be of size [height, width, C>0]')
if len(means) != num_channels:
raise ValueError('len(means) must match the number of channels')
# We have a 1-D tensor of means; convert to 3-D.
means = tf.expand_dims(tf.expand_dims(means, 0), 0)
return (image - means) / 127.5
def _smallest_size_at_least(height, width, resize_min):
"""Computes new shape with the smallest side equal to `smallest_side`.
Computes new shape with the smallest side equal to `smallest_side` while
preserving the original aspect ratio.
Args:
height: an int32 scalar tensor indicating the current height.
width: an int32 scalar tensor indicating the current width.
resize_min: A python integer or scalar `Tensor` indicating the size of
the smallest side after resize.
Returns:
new_height: an int32 scalar tensor indicating the new height.
new_width: an int32 scalar tensor indicating the new width.
"""
resize_min = tf.cast(resize_min, tf.float32)
# Convert to floats to make subsequent calculations go smoothly.
height, width = tf.cast(height, tf.float32), tf.cast(width, tf.float32)
smaller_dim = tf.minimum(height, width)
scale_ratio = resize_min / smaller_dim
# Convert back to ints to make heights and widths that TF ops will accept.
new_height = tf.cast(height * scale_ratio, tf.int32)
new_width = tf.cast(width * scale_ratio, tf.int32)
return new_height, new_width
def _aspect_preserving_resize(image, resize_min):
"""Resize images preserving the original aspect ratio.
Args:
image: A 3-D image `Tensor`.
resize_min: A python integer or scalar `Tensor` indicating the size of
the smallest side after resize.
Returns:
resized_image: A 3-D tensor containing the resized image.
"""
shape = tf.shape(image)
height, width = shape[0], shape[1]
new_height, new_width = _smallest_size_at_least(height, width, resize_min)
return _resize_image(image, new_height, new_width)
def _resize_image(image, height, width):
"""Simple wrapper around tf.resize_images.
This is primarily to make sure we use the same `ResizeMethod` and other
details each time.
Args:
image: A 3-D image `Tensor`.
height: The target height for the resized image.
width: The target width for the resized image.
Returns:
resized_image: A 3-D tensor containing the resized image. The first two
dimensions have the shape [height, width].
"""
return tf.image.resize_images(
image, [height, width], method=tf.image.ResizeMethod.BILINEAR,
align_corners=False)
def preprocess_image(image_buffer, bbox, output_height, output_width,
num_channels, is_training=False):
"""Preprocesses the given image.
Preprocessing includes decoding, cropping, and resizing for both training
and eval images. Training preprocessing, however, introduces some random
distortion of the image to improve accuracy.
Args:
image_buffer: scalar string Tensor representing the raw JPEG image buffer.
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
num_channels: Integer depth of the image buffer for decoding.
is_training: `True` if we're preprocessing the image for training and
`False` otherwise.
Returns:
A preprocessed image.
"""
if is_training:
# For training, we want to randomize some of the distortions.
image = _decode_crop_and_flip(image_buffer, bbox, num_channels)
image = _resize_image(image, output_height, output_width)
else:
# For validation, we want to decode, resize, then just crop the middle.
image = tf.image.decode_jpeg(image_buffer, channels=num_channels)
image = _aspect_preserving_resize(image, _RESIZE_MIN)
image = _central_crop(image, output_height, output_width)
image.set_shape([output_height, output_width, num_channels])
return _mean_image_subtraction_and_normalization(image, _CHANNEL_MEANS,
num_channels)
def _parse_example_proto(example_serialized):
"""Parses an Example proto containing a training example of an image.
The output of the build_image_data.py image preprocessing script is a dataset
containing serialized Example protocol buffers. Each Example proto contains
the following fields (values are included as examples):
image/height: 462
image/width: 581
image/colorspace: 'RGB'
image/channels: 3
image/class/label: 615
image/class/synset: 'n03623198'
image/class/text: 'knee pad'
image/object/bbox/xmin: 0.1
image/object/bbox/xmax: 0.9
image/object/bbox/ymin: 0.2
image/object/bbox/ymax: 0.6
image/object/bbox/label: 615
image/format: 'JPEG'
image/filename: 'ILSVRC2012_val_00041207.JPEG'
image/encoded: <JPEG encoded string>
Args:
example_serialized: scalar Tensor tf.string containing a serialized
Example protocol buffer.
Returns:
image_buffer: Tensor tf.string containing the contents of a JPEG file.
label: Tensor tf.int32 containing the label.
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
"""
# Dense features in Example proto.
feature_map = {
'image/encoded': tf.FixedLenFeature([], dtype=tf.string,
default_value=''),
'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64,
default_value=-1),
'image/class/text': tf.FixedLenFeature([], dtype=tf.string,
default_value=''),
}
sparse_float32 = tf.VarLenFeature(dtype=tf.float32)
# Sparse features in Example proto.
feature_map.update(
{k: sparse_float32 for k in ['image/object/bbox/xmin',
'image/object/bbox/ymin',
'image/object/bbox/xmax',
'image/object/bbox/ymax']})
features = tf.parse_single_example(example_serialized, feature_map)
label = tf.cast(features['image/class/label'], dtype=tf.int32)
xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0)
ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0)
xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0)
ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0)
# Note that we impose an ordering of (y, x) just to make life difficult.
bbox = tf.concat([ymin, xmin, ymax, xmax], 0)
# Force the variable number of bounding boxes into the shape
# [1, num_boxes, coords].
bbox = tf.expand_dims(bbox, 0)
bbox = tf.transpose(bbox, [0, 2, 1])
return features['image/encoded'], label, bbox
def parse_record(raw_record, is_training, image_size=224, num_classes=1000):
"""Parses a record containing a training example of an image.
The input record is parsed into a label and image, and the image is passed
through preprocessing steps (cropping, flipping, and so on).
Args:
raw_record: scalar Tensor tf.string containing a serialized
Example protocol buffer.
is_training: A boolean denoting whether the input is for training.
image_size (int): size that images should be resized to.
num_classes (int): number of output classes.
Returns:
Tuple with processed image tensor and one-hot-encoded label tensor.
"""
image_buffer, label, bbox = _parse_example_proto(raw_record)
image = preprocess_image(
image_buffer=image_buffer,
bbox=bbox,
output_height=image_size,
output_width=image_size,
num_channels=3,
is_training=is_training)
# subtracting 1 to make labels go from 0 to 999
label = tf.one_hot(tf.reshape(label - 1, shape=[]), num_classes)
return image, label
| OpenSeq2Seq-master | open_seq2seq/data/image2label/imagenet_preprocessing.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Downloads and extracts the binary version of the CIFAR-10 dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import tarfile
from six.moves import urllib
import tensorflow as tf
DATA_URL = 'https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_dir', type=str, default='data/',
help='Directory to download data and extract the tarball')
def main(_):
"""Download and extract the tarball from Alex's website."""
if not os.path.exists(FLAGS.data_dir):
os.makedirs(FLAGS.data_dir)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(FLAGS.data_dir, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename, 100.0 * count * block_size / total_size))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(FLAGS.data_dir)
if __name__ == '__main__':
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(argv=[sys.argv[0]] + unparsed)
| OpenSeq2Seq-master | open_seq2seq/data/image2label/cifar10_download_and_extract.py |
# This code is heavily based on the code from TensorFlow official models
# https://github.com/tensorflow/models/tree/master/official/resnet
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import os
import numpy as np
import tensorflow as tf
from six.moves import range
from open_seq2seq.data.data_layer import DataLayer
from .imagenet_preprocessing import parse_record
class CifarDataLayer(DataLayer):
_HEIGHT = 28
_WIDTH = 28
_NUM_CHANNELS = 3
_DEFAULT_IMAGE_BYTES = 32 * 32 * 3
# The record is the image plus a one-byte label
_RECORD_BYTES = _DEFAULT_IMAGE_BYTES + 1
_NUM_CLASSES = 10
_NUM_DATA_FILES = 5
_NUM_IMAGES = {
'train': 50000,
'validation': 10000,
}
@staticmethod
def get_required_params():
return dict(DataLayer.get_required_params(), **{
'data_dir': str,
})
@staticmethod
def get_optional_params():
return dict(DataLayer.get_optional_params(), **{
'num_parallel_calls': int,
'shuffle_buffer': int,
'image_size': int,
'num_classes': int,
})
def __init__(self, params, model, num_workers, worker_id):
super(CifarDataLayer, self).__init__(params, model,
num_workers, worker_id)
if self.params['mode'] == 'infer':
raise ValueError('Inference is not supported on CifarDataLayer')
if self.params['mode'] == 'train':
filenames = [
os.path.join(self.params['data_dir'], 'data_batch_{}.bin'.format(i))
for i in range(1, self._NUM_DATA_FILES + 1)
]
else:
filenames = [os.path.join(self.params['data_dir'], 'test_batch.bin')]
self.file_names = filenames
self._train_size = 50000
self._valid_size = 10000
self._iterator = None
self._input_tensors = None
def preprocess_image(self, image, is_training):
"""Preprocess a single image of layout [height, width, depth]."""
if is_training:
# Resize the image to add four extra pixels on each side.
image = tf.image.resize_image_with_crop_or_pad(
image, self._HEIGHT + 8, self._WIDTH + 8
)
# Randomly crop a [_HEIGHT, _WIDTH] section of the image.
image = tf.random_crop(image, [self._HEIGHT, self._WIDTH,
self._NUM_CHANNELS])
# Randomly flip the image horizontally.
image = tf.image.random_flip_left_right(image)
else:
image = tf.image.resize_image_with_crop_or_pad(
image, self._HEIGHT, self._WIDTH
)
# Subtract off the mean and divide by the variance of the pixels.
image = tf.image.per_image_standardization(image)
return image
def parse_record(self, raw_record, is_training, num_classes=10):
"""Parse CIFAR-10 image and label from a raw record."""
# Convert bytes to a vector of uint8 that is record_bytes long.
record_vector = tf.decode_raw(raw_record, tf.uint8)
# The first byte represents the label, which we convert from uint8 to int32
# and then to one-hot.
label = tf.cast(record_vector[0], tf.int32)
# The remaining bytes after the label represent the image, which we reshape
# from [depth * height * width] to [depth, height, width].
depth_major = tf.reshape(record_vector[1:self._RECORD_BYTES],
[3, 32, 32])
# Convert from [depth, height, width] to [height, width, depth], and cast as
# float32.
image = tf.cast(tf.transpose(depth_major, [1, 2, 0]), tf.float32)
image = self.preprocess_image(image, is_training)
label = tf.one_hot(tf.reshape(label, shape=[]), num_classes)
return image, label
def build_graph(self):
dataset = tf.data.FixedLengthRecordDataset(self.file_names,
self._RECORD_BYTES)
dataset = dataset.prefetch(buffer_size=self.params['batch_size'])
if self.params['shuffle']:
# shuffling images
dataset = dataset.shuffle(buffer_size=self.params.get('shuffle_buffer',
1500))
dataset = dataset.repeat()
dataset = dataset.map(
lambda value: self.parse_record(
raw_record=value,
is_training=self.params['mode'] == 'train',
),
num_parallel_calls=self.params.get('num_parallel_calls', 16),
)
dataset = dataset.batch(self.params['batch_size'])
dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE)
self._iterator = dataset.make_initializable_iterator()
inputs, labels = self.iterator.get_next()
if self.params['mode'] == 'train':
tf.summary.image('augmented_images', inputs, max_outputs=1)
self._input_tensors = {
'source_tensors': [inputs],
'target_tensors': [labels],
}
@property
def input_tensors(self):
return self._input_tensors
@property
def iterator(self):
return self._iterator
def get_size_in_samples(self):
if self.params['mode'] == 'train':
return self._train_size
return len(np.arange(self._valid_size)[self._worker_id::self._num_workers])
class ImagenetDataLayer(DataLayer):
@staticmethod
def get_required_params():
return dict(DataLayer.get_required_params(), **{
'data_dir': str,
})
@staticmethod
def get_optional_params():
return dict(DataLayer.get_optional_params(), **{
'num_parallel_calls': int,
'shuffle_buffer': int,
'image_size': int,
'num_classes': int,
})
def __init__(self, params, model, num_workers, worker_id):
super(ImagenetDataLayer, self).__init__(params, model,
num_workers, worker_id)
if self.params['mode'] == 'infer':
raise ValueError('Inference is not supported on ImagenetDataLayer')
if self.params['mode'] == 'train':
filenames = [
os.path.join(self.params['data_dir'],
'train-{:05d}-of-01024'.format(i))
for i in range(1024) # number of training files
]
else:
filenames = [
os.path.join(self.params['data_dir'],
'validation-{:05d}-of-00128'.format(i))
for i in range(128) # number of validation files
]
self._train_size = 1281167
self._valid_size = 0
self.file_names = self.split_data(filenames)
# TODO: rewrite this somehow?
if self.params['mode'] != 'train':
for file_name in self.file_names:
for _ in tf.python_io.tf_record_iterator(file_name):
self._valid_size += 1
self._iterator = None
self._input_tensors = None
def build_graph(self):
dataset = tf.data.Dataset.from_tensor_slices(self.file_names)
if self.params['shuffle']:
# shuffling input files
dataset = dataset.shuffle(buffer_size=1024)
# convert to individual records
dataset = dataset.flat_map(tf.data.TFRecordDataset)
dataset = dataset.prefetch(buffer_size=self.params['batch_size']*10)
if self.params['mode'] == 'train' and self.params['shuffle']:
print("training with shuffle")
# shuffling images
dataset = dataset.shuffle(buffer_size=self.params.get('shuffle_buffer',
1024))
dataset = dataset.repeat()
dataset = dataset.map(
lambda value: parse_record(
raw_record=value,
is_training=self.params['mode'] == 'train',
image_size=self.params.get('image_size', 224),
num_classes=self.params.get('num_classes', 1000),
),
num_parallel_calls=self.params.get('num_parallel_calls', 16),
)
dataset = dataset.batch(self.params['batch_size'])
dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE)
self._iterator = dataset.make_initializable_iterator()
inputs, labels = self.iterator.get_next()
if self.params['mode'] == 'train':
tf.summary.image('augmented_images', inputs, max_outputs=1)
self._input_tensors = {
'source_tensors': [inputs],
'target_tensors': [labels],
}
def split_data(self, data):
if self.params['mode'] != 'train' and self._num_workers is not None:
size = len(data)
start = size // self._num_workers * self._worker_id
if self._worker_id == self._num_workers - 1:
end = size
else:
end = size // self._num_workers * (self._worker_id + 1)
return data[start:end]
return data
@property
def input_tensors(self):
return self._input_tensors
@property
def iterator(self):
return self._iterator
def get_size_in_samples(self):
if self.params['mode'] == 'train':
return self._train_size
return self._valid_size
| OpenSeq2Seq-master | open_seq2seq/data/image2label/image2label.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.