repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
models | models-master/research/adversarial_text/pretrain.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Pretrains a recurrent language model.
Computational time:
2 days to train 100000 steps on 1 layer 1024 hidden units LSTM,
256 embeddings, 400 truncated BP, 256 minibatch and on single GPU (Pascal
Titan X, cuDNNv5).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import tensorflow as tf
import graphs
import train_utils
FLAGS = tf.app.flags.FLAGS
def main(_):
"""Trains Language Model."""
tf.logging.set_verbosity(tf.logging.INFO)
with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)):
model = graphs.get_model()
train_op, loss, global_step = model.language_model_training()
train_utils.run_training(train_op, loss, global_step)
if __name__ == '__main__':
tf.app.run()
| 1,479 | 30.489362 | 80 | py |
models | models-master/research/adversarial_text/adversarial_losses.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adversarial losses for text models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from six.moves import xrange
import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
# Adversarial and virtual adversarial training parameters.
flags.DEFINE_float('perturb_norm_length', 5.0,
'Norm length of adversarial perturbation to be '
'optimized with validation. '
'5.0 is optimal on IMDB with virtual adversarial training. ')
# Virtual adversarial training parameters
flags.DEFINE_integer('num_power_iteration', 1, 'The number of power iteration')
flags.DEFINE_float('small_constant_for_finite_diff', 1e-1,
'Small constant for finite difference method')
# Parameters for building the graph
flags.DEFINE_string('adv_training_method', None,
'The flag which specifies training method. '
'"" : non-adversarial training (e.g. for running the '
' semi-supervised sequence learning model) '
'"rp" : random perturbation training '
'"at" : adversarial training '
'"vat" : virtual adversarial training '
'"atvat" : at + vat ')
flags.DEFINE_float('adv_reg_coeff', 1.0,
'Regularization coefficient of adversarial loss.')
def random_perturbation_loss(embedded, length, loss_fn):
"""Adds noise to embeddings and recomputes classification loss."""
noise = tf.random_normal(shape=tf.shape(embedded))
perturb = _scale_l2(_mask_by_length(noise, length), FLAGS.perturb_norm_length)
return loss_fn(embedded + perturb)
def adversarial_loss(embedded, loss, loss_fn):
"""Adds gradient to embedding and recomputes classification loss."""
grad, = tf.gradients(
loss,
embedded,
aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)
grad = tf.stop_gradient(grad)
perturb = _scale_l2(grad, FLAGS.perturb_norm_length)
return loss_fn(embedded + perturb)
def virtual_adversarial_loss(logits, embedded, inputs,
logits_from_embedding_fn):
"""Virtual adversarial loss.
Computes virtual adversarial perturbation by finite difference method and
power iteration, adds it to the embedding, and computes the KL divergence
between the new logits and the original logits.
Args:
logits: 3-D float Tensor, [batch_size, num_timesteps, m], where m=1 if
num_classes=2, otherwise m=num_classes.
embedded: 3-D float Tensor, [batch_size, num_timesteps, embedding_dim].
inputs: VatxtInput.
logits_from_embedding_fn: callable that takes embeddings and returns
classifier logits.
Returns:
kl: float scalar.
"""
# Stop gradient of logits. See https://arxiv.org/abs/1507.00677 for details.
logits = tf.stop_gradient(logits)
# Only care about the KL divergence on the final timestep.
weights = inputs.eos_weights
assert weights is not None
if FLAGS.single_label:
indices = tf.stack([tf.range(FLAGS.batch_size), inputs.length - 1], 1)
weights = tf.expand_dims(tf.gather_nd(inputs.eos_weights, indices), 1)
# Initialize perturbation with random noise.
# shape(embedded) = (batch_size, num_timesteps, embedding_dim)
d = tf.random_normal(shape=tf.shape(embedded))
# Perform finite difference method and power iteration.
# See Eq.(8) in the paper http://arxiv.org/pdf/1507.00677.pdf,
# Adding small noise to input and taking gradient with respect to the noise
# corresponds to 1 power iteration.
for _ in xrange(FLAGS.num_power_iteration):
d = _scale_l2(
_mask_by_length(d, inputs.length), FLAGS.small_constant_for_finite_diff)
d_logits = logits_from_embedding_fn(embedded + d)
kl = _kl_divergence_with_logits(logits, d_logits, weights)
d, = tf.gradients(
kl,
d,
aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)
d = tf.stop_gradient(d)
perturb = _scale_l2(d, FLAGS.perturb_norm_length)
vadv_logits = logits_from_embedding_fn(embedded + perturb)
return _kl_divergence_with_logits(logits, vadv_logits, weights)
def random_perturbation_loss_bidir(embedded, length, loss_fn):
"""Adds noise to embeddings and recomputes classification loss."""
noise = [tf.random_normal(shape=tf.shape(emb)) for emb in embedded]
masked = [_mask_by_length(n, length) for n in noise]
scaled = [_scale_l2(m, FLAGS.perturb_norm_length) for m in masked]
return loss_fn([e + s for (e, s) in zip(embedded, scaled)])
def adversarial_loss_bidir(embedded, loss, loss_fn):
"""Adds gradient to embeddings and recomputes classification loss."""
grads = tf.gradients(
loss,
embedded,
aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)
adv_exs = [
emb + _scale_l2(tf.stop_gradient(g), FLAGS.perturb_norm_length)
for emb, g in zip(embedded, grads)
]
return loss_fn(adv_exs)
def virtual_adversarial_loss_bidir(logits, embedded, inputs,
logits_from_embedding_fn):
"""Virtual adversarial loss for bidirectional models."""
logits = tf.stop_gradient(logits)
f_inputs, _ = inputs
weights = f_inputs.eos_weights
if FLAGS.single_label:
indices = tf.stack([tf.range(FLAGS.batch_size), f_inputs.length - 1], 1)
weights = tf.expand_dims(tf.gather_nd(f_inputs.eos_weights, indices), 1)
assert weights is not None
perturbs = [
_mask_by_length(tf.random_normal(shape=tf.shape(emb)), f_inputs.length)
for emb in embedded
]
for _ in xrange(FLAGS.num_power_iteration):
perturbs = [
_scale_l2(d, FLAGS.small_constant_for_finite_diff) for d in perturbs
]
d_logits = logits_from_embedding_fn(
[emb + d for (emb, d) in zip(embedded, perturbs)])
kl = _kl_divergence_with_logits(logits, d_logits, weights)
perturbs = tf.gradients(
kl,
perturbs,
aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)
perturbs = [tf.stop_gradient(d) for d in perturbs]
perturbs = [_scale_l2(d, FLAGS.perturb_norm_length) for d in perturbs]
vadv_logits = logits_from_embedding_fn(
[emb + d for (emb, d) in zip(embedded, perturbs)])
return _kl_divergence_with_logits(logits, vadv_logits, weights)
def _mask_by_length(t, length):
"""Mask t, 3-D [batch, time, dim], by length, 1-D [batch,]."""
maxlen = t.get_shape().as_list()[1]
# Subtract 1 from length to prevent the perturbation from going on 'eos'
mask = tf.sequence_mask(length - 1, maxlen=maxlen)
mask = tf.expand_dims(tf.cast(mask, tf.float32), -1)
# shape(mask) = (batch, num_timesteps, 1)
return t * mask
def _scale_l2(x, norm_length):
# shape(x) = (batch, num_timesteps, d)
# Divide x by max(abs(x)) for a numerically stable L2 norm.
# 2norm(x) = a * 2norm(x/a)
# Scale over the full sequence, dims (1, 2)
alpha = tf.reduce_max(tf.abs(x), (1, 2), keep_dims=True) + 1e-12
l2_norm = alpha * tf.sqrt(
tf.reduce_sum(tf.pow(x / alpha, 2), (1, 2), keep_dims=True) + 1e-6)
x_unit = x / l2_norm
return norm_length * x_unit
def _kl_divergence_with_logits(q_logits, p_logits, weights):
"""Returns weighted KL divergence between distributions q and p.
Args:
q_logits: logits for 1st argument of KL divergence shape
[batch_size, num_timesteps, num_classes] if num_classes > 2, and
[batch_size, num_timesteps] if num_classes == 2.
p_logits: logits for 2nd argument of KL divergence with same shape q_logits.
weights: 1-D float tensor with shape [batch_size, num_timesteps].
Elements should be 1.0 only on end of sequences
Returns:
KL: float scalar.
"""
# For logistic regression
if FLAGS.num_classes == 2:
q = tf.nn.sigmoid(q_logits)
kl = (-tf.nn.sigmoid_cross_entropy_with_logits(logits=q_logits, labels=q) +
tf.nn.sigmoid_cross_entropy_with_logits(logits=p_logits, labels=q))
kl = tf.squeeze(kl, 2)
# For softmax regression
else:
q = tf.nn.softmax(q_logits)
kl = tf.reduce_sum(
q * (tf.nn.log_softmax(q_logits) - tf.nn.log_softmax(p_logits)), -1)
num_labels = tf.reduce_sum(weights)
num_labels = tf.where(tf.equal(num_labels, 0.), 1., num_labels)
kl.get_shape().assert_has_rank(2)
weights.get_shape().assert_has_rank(2)
loss = tf.identity(tf.reduce_sum(weights * kl) / num_labels, name='kl')
return loss
| 9,185 | 37.759494 | 80 | py |
models | models-master/research/adversarial_text/evaluate.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Evaluates text classification model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import time
# Dependency imports
import tensorflow as tf
import graphs
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('master', '',
'BNS name prefix of the Tensorflow eval master, '
'or "local".')
flags.DEFINE_string('eval_dir', '/tmp/text_eval',
'Directory where to write event logs.')
flags.DEFINE_string('eval_data', 'test', 'Specify which dataset is used. '
'("train", "valid", "test") ')
flags.DEFINE_string('checkpoint_dir', '/tmp/text_train',
'Directory where to read model checkpoints.')
flags.DEFINE_integer('eval_interval_secs', 60, 'How often to run the eval.')
flags.DEFINE_integer('num_examples', 32, 'Number of examples to run.')
flags.DEFINE_bool('run_once', False, 'Whether to run eval only once.')
def restore_from_checkpoint(sess, saver):
"""Restore model from checkpoint.
Args:
sess: Session.
saver: Saver for restoring the checkpoint.
Returns:
bool: Whether the checkpoint was found and restored
"""
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
if not ckpt or not ckpt.model_checkpoint_path:
tf.logging.info('No checkpoint found at %s', FLAGS.checkpoint_dir)
return False
saver.restore(sess, ckpt.model_checkpoint_path)
return True
def run_eval(eval_ops, summary_writer, saver):
"""Runs evaluation over FLAGS.num_examples examples.
Args:
eval_ops: dict<metric name, tuple(value, update_op)>
summary_writer: Summary writer.
saver: Saver.
Returns:
dict<metric name, value>, with value being the average over all examples.
"""
sv = tf.train.Supervisor(
logdir=FLAGS.eval_dir, saver=None, summary_op=None, summary_writer=None)
with sv.managed_session(
master=FLAGS.master, start_standard_services=False) as sess:
if not restore_from_checkpoint(sess, saver):
return
sv.start_queue_runners(sess)
metric_names, ops = zip(*eval_ops.items())
value_ops, update_ops = zip(*ops)
value_ops_dict = dict(zip(metric_names, value_ops))
# Run update ops
num_batches = int(math.ceil(FLAGS.num_examples / FLAGS.batch_size))
tf.logging.info('Running %d batches for evaluation.', num_batches)
for i in range(num_batches):
if (i + 1) % 10 == 0:
tf.logging.info('Running batch %d/%d...', i + 1, num_batches)
if (i + 1) % 50 == 0:
_log_values(sess, value_ops_dict)
sess.run(update_ops)
_log_values(sess, value_ops_dict, summary_writer=summary_writer)
def _log_values(sess, value_ops, summary_writer=None):
"""Evaluate, log, and write summaries of the eval metrics in value_ops."""
metric_names, value_ops = zip(*value_ops.items())
values = sess.run(value_ops)
tf.logging.info('Eval metric values:')
summary = tf.summary.Summary()
for name, val in zip(metric_names, values):
summary.value.add(tag=name, simple_value=val)
tf.logging.info('%s = %.3f', name, val)
if summary_writer is not None:
global_step_val = sess.run(tf.train.get_global_step())
tf.logging.info('Finished eval for step ' + str(global_step_val))
summary_writer.add_summary(summary, global_step_val)
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
tf.gfile.MakeDirs(FLAGS.eval_dir)
tf.logging.info('Building eval graph...')
output = graphs.get_model().eval_graph(FLAGS.eval_data)
eval_ops, moving_averaged_variables = output
saver = tf.train.Saver(moving_averaged_variables)
summary_writer = tf.summary.FileWriter(
FLAGS.eval_dir, graph=tf.get_default_graph())
while True:
run_eval(eval_ops, summary_writer, saver)
if FLAGS.run_once:
break
time.sleep(FLAGS.eval_interval_secs)
if __name__ == '__main__':
tf.app.run()
| 4,616 | 31.744681 | 80 | py |
models | models-master/research/adversarial_text/train_classifier.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Trains LSTM text classification model.
Model trains with adversarial or virtual adversarial training.
Computational time:
1.8 hours to train 10000 steps without adversarial or virtual adversarial
training, on 1 layer 1024 hidden units LSTM, 256 embeddings, 400 truncated
BP, 64 minibatch and on single GPU (Pascal Titan X, cuDNNv5).
4 hours to train 10000 steps with adversarial or virtual adversarial
training, with above condition.
To initialize embedding and LSTM cell weights from a pretrained model, set
FLAGS.pretrained_model_dir to the pretrained model's checkpoint directory.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import tensorflow as tf
import graphs
import train_utils
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('pretrained_model_dir', None,
'Directory path to pretrained model to restore from')
def main(_):
"""Trains LSTM classification model."""
tf.logging.set_verbosity(tf.logging.INFO)
with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)):
model = graphs.get_model()
train_op, loss, global_step = model.classifier_training()
train_utils.run_training(
train_op,
loss,
global_step,
variables_to_restore=model.pretrained_variables,
pretrained_model_dir=FLAGS.pretrained_model_dir)
if __name__ == '__main__':
tf.app.run()
| 2,146 | 32.546875 | 80 | py |
models | models-master/research/adversarial_text/train_utils.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for training adversarial text models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
# Dependency imports
import numpy as np
import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('master', '', 'Master address.')
flags.DEFINE_integer('task', 0, 'Task id of the replica running the training.')
flags.DEFINE_integer('ps_tasks', 0, 'Number of parameter servers.')
flags.DEFINE_string('train_dir', '/tmp/text_train',
'Directory for logs and checkpoints.')
flags.DEFINE_integer('max_steps', 1000000, 'Number of batches to run.')
flags.DEFINE_boolean('log_device_placement', False,
'Whether to log device placement.')
def run_training(train_op,
loss,
global_step,
variables_to_restore=None,
pretrained_model_dir=None):
"""Sets up and runs training loop."""
tf.gfile.MakeDirs(FLAGS.train_dir)
# Create pretrain Saver
if pretrained_model_dir:
assert variables_to_restore
tf.logging.info('Will attempt restore from %s: %s', pretrained_model_dir,
variables_to_restore)
saver_for_restore = tf.train.Saver(variables_to_restore)
# Init ops
if FLAGS.sync_replicas:
local_init_op = tf.get_collection('local_init_op')[0]
ready_for_local_init_op = tf.get_collection('ready_for_local_init_op')[0]
else:
local_init_op = tf.train.Supervisor.USE_DEFAULT
ready_for_local_init_op = tf.train.Supervisor.USE_DEFAULT
is_chief = FLAGS.task == 0
sv = tf.train.Supervisor(
logdir=FLAGS.train_dir,
is_chief=is_chief,
save_summaries_secs=30,
save_model_secs=30,
local_init_op=local_init_op,
ready_for_local_init_op=ready_for_local_init_op,
global_step=global_step)
# Delay starting standard services to allow possible pretrained model restore.
with sv.managed_session(
master=FLAGS.master,
config=tf.ConfigProto(log_device_placement=FLAGS.log_device_placement),
start_standard_services=False) as sess:
# Initialization
if is_chief:
if pretrained_model_dir:
maybe_restore_pretrained_model(sess, saver_for_restore,
pretrained_model_dir)
if FLAGS.sync_replicas:
sess.run(tf.get_collection('chief_init_op')[0])
sv.start_standard_services(sess)
sv.start_queue_runners(sess)
# Training loop
global_step_val = 0
while not sv.should_stop() and global_step_val < FLAGS.max_steps:
global_step_val = train_step(sess, train_op, loss, global_step)
# Final checkpoint
if is_chief and global_step_val >= FLAGS.max_steps:
sv.saver.save(sess, sv.save_path, global_step=global_step)
def maybe_restore_pretrained_model(sess, saver_for_restore, model_dir):
"""Restores pretrained model if there is no ckpt model."""
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
checkpoint_exists = ckpt and ckpt.model_checkpoint_path
if checkpoint_exists:
tf.logging.info('Checkpoint exists in FLAGS.train_dir; skipping '
'pretraining restore')
return
pretrain_ckpt = tf.train.get_checkpoint_state(model_dir)
if not (pretrain_ckpt and pretrain_ckpt.model_checkpoint_path):
raise ValueError(
'Asked to restore model from %s but no checkpoint found.' % model_dir)
saver_for_restore.restore(sess, pretrain_ckpt.model_checkpoint_path)
def train_step(sess, train_op, loss, global_step):
"""Runs a single training step."""
start_time = time.time()
_, loss_val, global_step_val = sess.run([train_op, loss, global_step])
duration = time.time() - start_time
# Logging
if global_step_val % 10 == 0:
examples_per_sec = FLAGS.batch_size / duration
sec_per_batch = float(duration)
format_str = ('step %d, loss = %.2f (%.1f examples/sec; %.3f ' 'sec/batch)')
tf.logging.info(format_str % (global_step_val, loss_val, examples_per_sec,
sec_per_batch))
if np.isnan(loss_val):
raise OverflowError('Loss is nan')
return global_step_val
| 4,858 | 35.261194 | 80 | py |
models | models-master/research/adversarial_text/graphs_test.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import operator
import os
import random
import shutil
import string
import tempfile
# Dependency imports
import tensorflow as tf
import graphs
from data import data_utils
flags = tf.app.flags
FLAGS = flags.FLAGS
data = data_utils
flags.DEFINE_integer('task', 0, 'Task id; needed for SyncReplicas test')
def _build_random_vocabulary(vocab_size=100):
"""Builds and returns a dict<term, id>."""
vocab = set()
while len(vocab) < (vocab_size - 1):
rand_word = ''.join(
random.choice(string.ascii_lowercase)
for _ in range(random.randint(1, 10)))
vocab.add(rand_word)
vocab_ids = dict([(word, i) for i, word in enumerate(vocab)])
vocab_ids[data.EOS_TOKEN] = vocab_size - 1
return vocab_ids
def _build_random_sequence(vocab_ids):
seq_len = random.randint(10, 200)
ids = vocab_ids.values()
seq = data.SequenceWrapper()
for token_id in [random.choice(ids) for _ in range(seq_len)]:
seq.add_timestep().set_token(token_id)
return seq
def _build_vocab_frequencies(seqs, vocab_ids):
vocab_freqs = defaultdict(int)
ids_to_words = dict([(i, word) for word, i in vocab_ids.iteritems()])
for seq in seqs:
for timestep in seq:
vocab_freqs[ids_to_words[timestep.token]] += 1
vocab_freqs[data.EOS_TOKEN] = 0
return vocab_freqs
class GraphsTest(tf.test.TestCase):
"""Test graph construction methods."""
@classmethod
def setUpClass(cls):
# Make model small
FLAGS.batch_size = 2
FLAGS.num_timesteps = 3
FLAGS.embedding_dims = 4
FLAGS.rnn_num_layers = 2
FLAGS.rnn_cell_size = 4
FLAGS.cl_num_layers = 2
FLAGS.cl_hidden_size = 4
FLAGS.vocab_size = 10
# Set input/output flags
FLAGS.data_dir = tempfile.mkdtemp()
# Build and write sequence files.
vocab_ids = _build_random_vocabulary(FLAGS.vocab_size)
seqs = [_build_random_sequence(vocab_ids) for _ in range(5)]
seqs_label = [
data.build_labeled_sequence(seq, random.choice([True, False]))
for seq in seqs
]
seqs_lm = [data.build_lm_sequence(seq) for seq in seqs]
seqs_ae = [data.build_seq_ae_sequence(seq) for seq in seqs]
seqs_rev = [data.build_reverse_sequence(seq) for seq in seqs]
seqs_bidir = [
data.build_bidirectional_seq(seq, rev)
for seq, rev in zip(seqs, seqs_rev)
]
seqs_bidir_label = [
data.build_labeled_sequence(bd_seq, random.choice([True, False]))
for bd_seq in seqs_bidir
]
filenames = [
data.TRAIN_CLASS, data.TRAIN_LM, data.TRAIN_SA, data.TEST_CLASS,
data.TRAIN_REV_LM, data.TRAIN_BD_CLASS, data.TEST_BD_CLASS
]
seq_lists = [
seqs_label, seqs_lm, seqs_ae, seqs_label, seqs_rev, seqs_bidir,
seqs_bidir_label
]
for fname, seq_list in zip(filenames, seq_lists):
with tf.python_io.TFRecordWriter(
os.path.join(FLAGS.data_dir, fname)) as writer:
for seq in seq_list:
writer.write(seq.seq.SerializeToString())
# Write vocab.txt and vocab_freq.txt
vocab_freqs = _build_vocab_frequencies(seqs, vocab_ids)
ordered_vocab_freqs = sorted(
vocab_freqs.items(), key=operator.itemgetter(1), reverse=True)
with open(os.path.join(FLAGS.data_dir, 'vocab.txt'), 'w') as vocab_f:
with open(os.path.join(FLAGS.data_dir, 'vocab_freq.txt'), 'w') as freq_f:
for word, freq in ordered_vocab_freqs:
vocab_f.write('{}\n'.format(word))
freq_f.write('{}\n'.format(freq))
@classmethod
def tearDownClass(cls):
shutil.rmtree(FLAGS.data_dir)
def setUp(self):
# Reset FLAGS
FLAGS.rnn_num_layers = 1
FLAGS.sync_replicas = False
FLAGS.adv_training_method = None
FLAGS.num_candidate_samples = -1
FLAGS.num_classes = 2
FLAGS.use_seq2seq_autoencoder = False
# Reset Graph
tf.reset_default_graph()
def testClassifierGraph(self):
FLAGS.rnn_num_layers = 2
model = graphs.VatxtModel()
train_op, _, _ = model.classifier_training()
# Pretrained vars: embedding + LSTM layers
self.assertEqual(
len(model.pretrained_variables), 1 + 2 * FLAGS.rnn_num_layers)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess)
sess.run(train_op)
def testLanguageModelGraph(self):
train_op, _, _ = graphs.VatxtModel().language_model_training()
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess)
sess.run(train_op)
def testMulticlass(self):
FLAGS.num_classes = 10
graphs.VatxtModel().classifier_graph()
def testATMethods(self):
at_methods = [None, 'rp', 'at', 'vat', 'atvat']
for method in at_methods:
FLAGS.adv_training_method = method
with tf.Graph().as_default():
graphs.VatxtModel().classifier_graph()
# Ensure variables have been reused
# Embedding + LSTM layers + hidden layers + logits layer
expected_num_vars = 1 + 2 * FLAGS.rnn_num_layers + 2 * (
FLAGS.cl_num_layers) + 2
self.assertEqual(len(tf.trainable_variables()), expected_num_vars)
def testSyncReplicas(self):
FLAGS.sync_replicas = True
graphs.VatxtModel().language_model_training()
def testCandidateSampling(self):
FLAGS.num_candidate_samples = 10
graphs.VatxtModel().language_model_training()
def testSeqAE(self):
FLAGS.use_seq2seq_autoencoder = True
graphs.VatxtModel().language_model_training()
def testBidirLM(self):
graphs.VatxtBidirModel().language_model_graph()
def testBidirClassifier(self):
at_methods = [None, 'rp', 'at', 'vat', 'atvat']
for method in at_methods:
FLAGS.adv_training_method = method
with tf.Graph().as_default():
graphs.VatxtBidirModel().classifier_graph()
# Ensure variables have been reused
# Embedding + 2 LSTM layers + hidden layers + logits layer
expected_num_vars = 1 + 2 * 2 * FLAGS.rnn_num_layers + 2 * (
FLAGS.cl_num_layers) + 2
self.assertEqual(len(tf.trainable_variables()), expected_num_vars)
def testEvalGraph(self):
_, _ = graphs.VatxtModel().eval_graph()
def testBidirEvalGraph(self):
_, _ = graphs.VatxtBidirModel().eval_graph()
if __name__ == '__main__':
tf.test.main()
| 7,160 | 30.685841 | 80 | py |
models | models-master/research/adversarial_text/layers.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layers for VatxtModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from six.moves import xrange
import tensorflow as tf
K = tf.keras
def cl_logits_subgraph(layer_sizes, input_size, num_classes, keep_prob=1.):
"""Construct multiple ReLU layers with dropout and a linear layer."""
subgraph = K.models.Sequential(name='cl_logits')
for i, layer_size in enumerate(layer_sizes):
if i == 0:
subgraph.add(
K.layers.Dense(layer_size, activation='relu', input_dim=input_size))
else:
subgraph.add(K.layers.Dense(layer_size, activation='relu'))
if keep_prob < 1.:
subgraph.add(K.layers.Dropout(1. - keep_prob))
subgraph.add(K.layers.Dense(1 if num_classes == 2 else num_classes))
return subgraph
class Embedding(K.layers.Layer):
"""Embedding layer with frequency-based normalization and dropout."""
def __init__(self,
vocab_size,
embedding_dim,
normalize=False,
vocab_freqs=None,
keep_prob=1.,
**kwargs):
self.vocab_size = vocab_size
self.embedding_dim = embedding_dim
self.normalized = normalize
self.keep_prob = keep_prob
if normalize:
assert vocab_freqs is not None
self.vocab_freqs = tf.constant(
vocab_freqs, dtype=tf.float32, shape=(vocab_size, 1))
super(Embedding, self).__init__(**kwargs)
def build(self, input_shape):
with tf.device('/cpu:0'):
self.var = self.add_weight(
shape=(self.vocab_size, self.embedding_dim),
initializer=tf.random_uniform_initializer(-1., 1.),
name='embedding',
dtype=tf.float32)
if self.normalized:
self.var = self._normalize(self.var)
super(Embedding, self).build(input_shape)
def call(self, x):
embedded = tf.nn.embedding_lookup(self.var, x)
if self.keep_prob < 1.:
shape = embedded.get_shape().as_list()
# Use same dropout masks at each timestep with specifying noise_shape.
# This slightly improves performance.
# Please see https://arxiv.org/abs/1512.05287 for the theoretical
# explanation.
embedded = tf.nn.dropout(
embedded, self.keep_prob, noise_shape=(shape[0], 1, shape[2]))
return embedded
def _normalize(self, emb):
weights = self.vocab_freqs / tf.reduce_sum(self.vocab_freqs)
mean = tf.reduce_sum(weights * emb, 0, keep_dims=True)
var = tf.reduce_sum(weights * tf.pow(emb - mean, 2.), 0, keep_dims=True)
stddev = tf.sqrt(1e-6 + var)
return (emb - mean) / stddev
class LSTM(object):
"""LSTM layer using dynamic_rnn.
Exposes variables in `trainable_weights` property.
"""
def __init__(self, cell_size, num_layers=1, keep_prob=1., name='LSTM'):
self.cell_size = cell_size
self.num_layers = num_layers
self.keep_prob = keep_prob
self.reuse = None
self.trainable_weights = None
self.name = name
def __call__(self, x, initial_state, seq_length):
with tf.variable_scope(self.name, reuse=self.reuse) as vs:
cell = tf.contrib.rnn.MultiRNNCell([
tf.contrib.rnn.BasicLSTMCell(
self.cell_size,
forget_bias=0.0,
reuse=tf.get_variable_scope().reuse)
for _ in xrange(self.num_layers)
])
# shape(x) = (batch_size, num_timesteps, embedding_dim)
lstm_out, next_state = tf.nn.dynamic_rnn(
cell, x, initial_state=initial_state, sequence_length=seq_length)
# shape(lstm_out) = (batch_size, timesteps, cell_size)
if self.keep_prob < 1.:
lstm_out = tf.nn.dropout(lstm_out, self.keep_prob)
if self.reuse is None:
self.trainable_weights = vs.global_variables()
self.reuse = True
return lstm_out, next_state
class SoftmaxLoss(K.layers.Layer):
"""Softmax xentropy loss with candidate sampling."""
def __init__(self,
vocab_size,
num_candidate_samples=-1,
vocab_freqs=None,
**kwargs):
self.vocab_size = vocab_size
self.num_candidate_samples = num_candidate_samples
self.vocab_freqs = vocab_freqs
super(SoftmaxLoss, self).__init__(**kwargs)
self.multiclass_dense_layer = K.layers.Dense(self.vocab_size)
def build(self, input_shape):
input_shape = input_shape[0].as_list()
with tf.device('/cpu:0'):
self.lin_w = self.add_weight(
shape=(input_shape[-1], self.vocab_size),
name='lm_lin_w',
initializer=K.initializers.glorot_uniform())
self.lin_b = self.add_weight(
shape=(self.vocab_size,),
name='lm_lin_b',
initializer=K.initializers.glorot_uniform())
self.multiclass_dense_layer.build(input_shape)
super(SoftmaxLoss, self).build(input_shape)
def call(self, inputs):
x, labels, weights = inputs
if self.num_candidate_samples > -1:
assert self.vocab_freqs is not None
labels_reshaped = tf.reshape(labels, [-1])
labels_reshaped = tf.expand_dims(labels_reshaped, -1)
sampled = tf.nn.fixed_unigram_candidate_sampler(
true_classes=labels_reshaped,
num_true=1,
num_sampled=self.num_candidate_samples,
unique=True,
range_max=self.vocab_size,
unigrams=self.vocab_freqs)
inputs_reshaped = tf.reshape(x, [-1, int(x.get_shape()[2])])
lm_loss = tf.nn.sampled_softmax_loss(
weights=tf.transpose(self.lin_w),
biases=self.lin_b,
labels=labels_reshaped,
inputs=inputs_reshaped,
num_sampled=self.num_candidate_samples,
num_classes=self.vocab_size,
sampled_values=sampled)
lm_loss = tf.reshape(
lm_loss,
[int(x.get_shape()[0]), int(x.get_shape()[1])])
else:
logits = self.multiclass_dense_layer(x)
lm_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels)
lm_loss = tf.identity(
tf.reduce_sum(lm_loss * weights) / _num_labels(weights),
name='lm_xentropy_loss')
return lm_loss
def classification_loss(logits, labels, weights):
"""Computes cross entropy loss between logits and labels.
Args:
logits: 2-D [timesteps*batch_size, m] float tensor, where m=1 if
num_classes=2, otherwise m=num_classes.
labels: 1-D [timesteps*batch_size] integer tensor.
weights: 1-D [timesteps*batch_size] float tensor.
Returns:
Loss scalar of type float.
"""
inner_dim = logits.get_shape().as_list()[-1]
with tf.name_scope('classifier_loss'):
# Logistic loss
if inner_dim == 1:
loss = tf.nn.sigmoid_cross_entropy_with_logits(
logits=tf.squeeze(logits, -1), labels=tf.cast(labels, tf.float32))
# Softmax loss
else:
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels)
num_lab = _num_labels(weights)
tf.summary.scalar('num_labels', num_lab)
return tf.identity(
tf.reduce_sum(weights * loss) / num_lab, name='classification_xentropy')
def accuracy(logits, targets, weights):
"""Computes prediction accuracy.
Args:
logits: 2-D classifier logits [timesteps*batch_size, num_classes]
targets: 1-D [timesteps*batch_size] integer tensor.
weights: 1-D [timesteps*batch_size] float tensor.
Returns:
Accuracy: float scalar.
"""
with tf.name_scope('accuracy'):
eq = tf.cast(tf.equal(predictions(logits), targets), tf.float32)
return tf.identity(
tf.reduce_sum(weights * eq) / _num_labels(weights), name='accuracy')
def predictions(logits):
"""Class prediction from logits."""
inner_dim = logits.get_shape().as_list()[-1]
with tf.name_scope('predictions'):
# For binary classification
if inner_dim == 1:
pred = tf.cast(tf.greater(tf.squeeze(logits, -1), 0.), tf.int64)
# For multi-class classification
else:
pred = tf.argmax(logits, 2)
return pred
def _num_labels(weights):
"""Number of 1's in weights. Returns 1. if 0."""
num_labels = tf.reduce_sum(weights)
num_labels = tf.where(tf.equal(num_labels, 0.), 1., num_labels)
return num_labels
def optimize(loss,
global_step,
max_grad_norm,
lr,
lr_decay,
sync_replicas=False,
replicas_to_aggregate=1,
task_id=0):
"""Builds optimization graph.
* Creates an optimizer, and optionally wraps with SyncReplicasOptimizer
* Computes, clips, and applies gradients
* Maintains moving averages for all trainable variables
* Summarizes variables and gradients
Args:
loss: scalar loss to minimize.
global_step: integer scalar Variable.
max_grad_norm: float scalar. Grads will be clipped to this value.
lr: float scalar, learning rate.
lr_decay: float scalar, learning rate decay rate.
sync_replicas: bool, whether to use SyncReplicasOptimizer.
replicas_to_aggregate: int, number of replicas to aggregate when using
SyncReplicasOptimizer.
task_id: int, id of the current task; used to ensure proper initialization
of SyncReplicasOptimizer.
Returns:
train_op
"""
with tf.name_scope('optimization'):
# Compute gradients.
tvars = tf.trainable_variables()
grads = tf.gradients(
loss,
tvars,
aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)
# Clip non-embedding grads
non_embedding_grads_and_vars = [(g, v) for (g, v) in zip(grads, tvars)
if 'embedding' not in v.op.name]
embedding_grads_and_vars = [(g, v) for (g, v) in zip(grads, tvars)
if 'embedding' in v.op.name]
ne_grads, ne_vars = zip(*non_embedding_grads_and_vars)
ne_grads, _ = tf.clip_by_global_norm(ne_grads, max_grad_norm)
non_embedding_grads_and_vars = zip(ne_grads, ne_vars)
grads_and_vars = embedding_grads_and_vars + list(non_embedding_grads_and_vars)
# Summarize
_summarize_vars_and_grads(grads_and_vars)
# Decaying learning rate
lr = tf.train.exponential_decay(
lr, global_step, 1, lr_decay, staircase=True)
tf.summary.scalar('learning_rate', lr)
opt = tf.train.AdamOptimizer(lr)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(0.999, global_step)
# Apply gradients
if sync_replicas:
opt = tf.train.SyncReplicasOptimizer(
opt,
replicas_to_aggregate,
variable_averages=variable_averages,
variables_to_average=tvars,
total_num_replicas=replicas_to_aggregate)
apply_gradient_op = opt.apply_gradients(
grads_and_vars, global_step=global_step)
with tf.control_dependencies([apply_gradient_op]):
train_op = tf.no_op(name='train_op')
# Initialization ops
tf.add_to_collection(tf.GraphKeys.QUEUE_RUNNERS,
opt.get_chief_queue_runner())
if task_id == 0: # Chief task
local_init_op = opt.chief_init_op
tf.add_to_collection('chief_init_op', opt.get_init_tokens_op())
else:
local_init_op = opt.local_step_init_op
tf.add_to_collection('local_init_op', local_init_op)
tf.add_to_collection('ready_for_local_init_op',
opt.ready_for_local_init_op)
else:
# Non-sync optimizer
apply_gradient_op = opt.apply_gradients(grads_and_vars, global_step)
with tf.control_dependencies([apply_gradient_op]):
train_op = variable_averages.apply(tvars)
return train_op
def _summarize_vars_and_grads(grads_and_vars):
tf.logging.info('Trainable variables:')
tf.logging.info('-' * 60)
for grad, var in grads_and_vars:
tf.logging.info(var)
def tag(name, v=var):
return v.op.name + '_' + name
# Variable summary
mean = tf.reduce_mean(var)
tf.summary.scalar(tag('mean'), mean)
with tf.name_scope(tag('stddev')):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar(tag('stddev'), stddev)
tf.summary.scalar(tag('max'), tf.reduce_max(var))
tf.summary.scalar(tag('min'), tf.reduce_min(var))
tf.summary.histogram(tag('histogram'), var)
# Gradient summary
if grad is not None:
if isinstance(grad, tf.IndexedSlices):
grad_values = grad.values
else:
grad_values = grad
tf.summary.histogram(tag('gradient'), grad_values)
tf.summary.scalar(tag('gradient_norm'), tf.global_norm([grad_values]))
else:
tf.logging.info('Var %s has no gradient', var.op.name)
| 13,373 | 32.603015 | 82 | py |
models | models-master/research/adversarial_text/gen_data.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Create TFRecord files of SequenceExample protos from dataset.
Constructs 3 datasets:
1. Labeled data for the LSTM classification model, optionally with label gain.
"*_classification.tfrecords" (for both unidirectional and bidirectional
models).
2. Data for the unsupervised LM-LSTM model that predicts the next token.
"*_lm.tfrecords" (generates forward and reverse data).
3. Data for the unsupervised SA-LSTM model that uses Seq2Seq.
"*_sa.tfrecords".
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import string
# Dependency imports
import tensorflow as tf
from data import data_utils
from data import document_generators
data = data_utils
flags = tf.app.flags
FLAGS = flags.FLAGS
# Flags for input data are in document_generators.py
flags.DEFINE_string('vocab_file', '', 'Path to the vocabulary file. Defaults '
'to FLAGS.output_dir/vocab.txt.')
flags.DEFINE_string('output_dir', '', 'Path to save tfrecords.')
# Config
flags.DEFINE_boolean('label_gain', False,
'Enable linear label gain. If True, sentiment label will '
'be included at each timestep with linear weight '
'increase.')
def build_shuffling_tf_record_writer(fname):
return data.ShufflingTFRecordWriter(os.path.join(FLAGS.output_dir, fname))
def build_tf_record_writer(fname):
return tf.python_io.TFRecordWriter(os.path.join(FLAGS.output_dir, fname))
def build_input_sequence(doc, vocab_ids):
"""Builds input sequence from file.
Splits lines on whitespace. Treats punctuation as whitespace. For word-level
sequences, only keeps terms that are in the vocab.
Terms are added as token in the SequenceExample. The EOS_TOKEN is also
appended. Label and weight features are set to 0.
Args:
doc: Document (defined in `document_generators`) from which to build the
sequence.
vocab_ids: dict<term, id>.
Returns:
SequenceExampleWrapper.
"""
seq = data.SequenceWrapper()
for token in document_generators.tokens(doc):
if token in vocab_ids:
seq.add_timestep().set_token(vocab_ids[token])
# Add EOS token to end
seq.add_timestep().set_token(vocab_ids[data.EOS_TOKEN])
return seq
def make_vocab_ids(vocab_filename):
if FLAGS.output_char:
ret = dict([(char, i) for i, char in enumerate(string.printable)])
ret[data.EOS_TOKEN] = len(string.printable)
return ret
else:
with open(vocab_filename, encoding='utf-8') as vocab_f:
return dict([(line.strip(), i) for i, line in enumerate(vocab_f)])
def generate_training_data(vocab_ids, writer_lm_all, writer_seq_ae_all):
"""Generates training data."""
# Construct training data writers
writer_lm = build_shuffling_tf_record_writer(data.TRAIN_LM)
writer_seq_ae = build_shuffling_tf_record_writer(data.TRAIN_SA)
writer_class = build_shuffling_tf_record_writer(data.TRAIN_CLASS)
writer_valid_class = build_tf_record_writer(data.VALID_CLASS)
writer_rev_lm = build_shuffling_tf_record_writer(data.TRAIN_REV_LM)
writer_bd_class = build_shuffling_tf_record_writer(data.TRAIN_BD_CLASS)
writer_bd_valid_class = build_shuffling_tf_record_writer(data.VALID_BD_CLASS)
for doc in document_generators.documents(
dataset='train', include_unlabeled=True, include_validation=True):
input_seq = build_input_sequence(doc, vocab_ids)
if len(input_seq) < 2:
continue
rev_seq = data.build_reverse_sequence(input_seq)
lm_seq = data.build_lm_sequence(input_seq)
rev_lm_seq = data.build_lm_sequence(rev_seq)
seq_ae_seq = data.build_seq_ae_sequence(input_seq)
if doc.label is not None:
# Used for sentiment classification.
label_seq = data.build_labeled_sequence(
input_seq,
doc.label,
label_gain=(FLAGS.label_gain and not doc.is_validation))
bd_label_seq = data.build_labeled_sequence(
data.build_bidirectional_seq(input_seq, rev_seq),
doc.label,
label_gain=(FLAGS.label_gain and not doc.is_validation))
class_writer = writer_valid_class if doc.is_validation else writer_class
bd_class_writer = (writer_bd_valid_class
if doc.is_validation else writer_bd_class)
class_writer.write(label_seq.seq.SerializeToString())
bd_class_writer.write(bd_label_seq.seq.SerializeToString())
# Write
lm_seq_ser = lm_seq.seq.SerializeToString()
seq_ae_seq_ser = seq_ae_seq.seq.SerializeToString()
writer_lm_all.write(lm_seq_ser)
writer_seq_ae_all.write(seq_ae_seq_ser)
if not doc.is_validation:
writer_lm.write(lm_seq_ser)
writer_rev_lm.write(rev_lm_seq.seq.SerializeToString())
writer_seq_ae.write(seq_ae_seq_ser)
# Close writers
writer_lm.close()
writer_seq_ae.close()
writer_class.close()
writer_valid_class.close()
writer_rev_lm.close()
writer_bd_class.close()
writer_bd_valid_class.close()
def generate_test_data(vocab_ids, writer_lm_all, writer_seq_ae_all):
"""Generates test data."""
# Construct test data writers
writer_lm = build_shuffling_tf_record_writer(data.TEST_LM)
writer_rev_lm = build_shuffling_tf_record_writer(data.TEST_REV_LM)
writer_seq_ae = build_shuffling_tf_record_writer(data.TEST_SA)
writer_class = build_tf_record_writer(data.TEST_CLASS)
writer_bd_class = build_shuffling_tf_record_writer(data.TEST_BD_CLASS)
for doc in document_generators.documents(
dataset='test', include_unlabeled=False, include_validation=True):
input_seq = build_input_sequence(doc, vocab_ids)
if len(input_seq) < 2:
continue
rev_seq = data.build_reverse_sequence(input_seq)
lm_seq = data.build_lm_sequence(input_seq)
rev_lm_seq = data.build_lm_sequence(rev_seq)
seq_ae_seq = data.build_seq_ae_sequence(input_seq)
label_seq = data.build_labeled_sequence(input_seq, doc.label)
bd_label_seq = data.build_labeled_sequence(
data.build_bidirectional_seq(input_seq, rev_seq), doc.label)
# Write
writer_class.write(label_seq.seq.SerializeToString())
writer_bd_class.write(bd_label_seq.seq.SerializeToString())
lm_seq_ser = lm_seq.seq.SerializeToString()
seq_ae_seq_ser = seq_ae_seq.seq.SerializeToString()
writer_lm.write(lm_seq_ser)
writer_rev_lm.write(rev_lm_seq.seq.SerializeToString())
writer_seq_ae.write(seq_ae_seq_ser)
writer_lm_all.write(lm_seq_ser)
writer_seq_ae_all.write(seq_ae_seq_ser)
# Close test writers
writer_lm.close()
writer_rev_lm.close()
writer_seq_ae.close()
writer_class.close()
writer_bd_class.close()
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
tf.logging.info('Assigning vocabulary ids...')
vocab_ids = make_vocab_ids(
FLAGS.vocab_file or os.path.join(FLAGS.output_dir, 'vocab.txt'))
with build_shuffling_tf_record_writer(data.ALL_LM) as writer_lm_all:
with build_shuffling_tf_record_writer(data.ALL_SA) as writer_seq_ae_all:
tf.logging.info('Generating training data...')
generate_training_data(vocab_ids, writer_lm_all, writer_seq_ae_all)
tf.logging.info('Generating test data...')
generate_test_data(vocab_ids, writer_lm_all, writer_seq_ae_all)
if __name__ == '__main__':
tf.app.run()
| 7,967 | 35.550459 | 80 | py |
models | models-master/research/adversarial_text/inputs.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Input utils for virtual adversarial text classification."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
# Dependency imports
import tensorflow as tf
from data import data_utils
class VatxtInput(object):
"""Wrapper around NextQueuedSequenceBatch."""
def __init__(self,
batch,
state_name=None,
tokens=None,
num_states=0,
eos_id=None):
"""Construct VatxtInput.
Args:
batch: NextQueuedSequenceBatch.
state_name: str, name of state to fetch and save.
tokens: int Tensor, tokens. Defaults to batch's F_TOKEN_ID sequence.
num_states: int The number of states to store.
eos_id: int Id of end of Sequence.
"""
self._batch = batch
self._state_name = state_name
self._tokens = (tokens if tokens is not None else
batch.sequences[data_utils.SequenceWrapper.F_TOKEN_ID])
self._num_states = num_states
w = batch.sequences[data_utils.SequenceWrapper.F_WEIGHT]
self._weights = w
l = batch.sequences[data_utils.SequenceWrapper.F_LABEL]
self._labels = l
# eos weights
self._eos_weights = None
if eos_id:
ew = tf.cast(tf.equal(self._tokens, eos_id), tf.float32)
self._eos_weights = ew
@property
def tokens(self):
return self._tokens
@property
def weights(self):
return self._weights
@property
def eos_weights(self):
return self._eos_weights
@property
def labels(self):
return self._labels
@property
def length(self):
return self._batch.length
@property
def state_name(self):
return self._state_name
@property
def state(self):
# LSTM tuple states
state_names = _get_tuple_state_names(self._num_states, self._state_name)
return tuple([
tf.contrib.rnn.LSTMStateTuple(
self._batch.state(c_name), self._batch.state(h_name))
for c_name, h_name in state_names
])
def save_state(self, value):
# LSTM tuple states
state_names = _get_tuple_state_names(self._num_states, self._state_name)
save_ops = []
for (c_state, h_state), (c_name, h_name) in zip(value, state_names):
save_ops.append(self._batch.save_state(c_name, c_state))
save_ops.append(self._batch.save_state(h_name, h_state))
return tf.group(*save_ops)
def _get_tuple_state_names(num_states, base_name):
"""Returns state names for use with LSTM tuple state."""
state_names = [('{}_{}_c'.format(i, base_name), '{}_{}_h'.format(
i, base_name)) for i in range(num_states)]
return state_names
def _split_bidir_tokens(batch):
tokens = batch.sequences[data_utils.SequenceWrapper.F_TOKEN_ID]
# Tokens have shape [batch, time, 2]
# forward and reverse have shape [batch, time].
forward, reverse = [
tf.squeeze(t, axis=[2]) for t in tf.split(tokens, 2, axis=2)
]
return forward, reverse
def _filenames_for_data_spec(phase, bidir, pretrain, use_seq2seq):
"""Returns input filenames for configuration.
Args:
phase: str, 'train', 'test', or 'valid'.
bidir: bool, bidirectional model.
pretrain: bool, pretraining or classification.
use_seq2seq: bool, seq2seq data, only valid if pretrain=True.
Returns:
Tuple of filenames.
Raises:
ValueError: if an invalid combination of arguments is provided that does not
map to any data files (e.g. pretrain=False, use_seq2seq=True).
"""
data_spec = (phase, bidir, pretrain, use_seq2seq)
data_specs = {
('train', True, True, False): (data_utils.TRAIN_LM,
data_utils.TRAIN_REV_LM),
('train', True, False, False): (data_utils.TRAIN_BD_CLASS,),
('train', False, True, False): (data_utils.TRAIN_LM,),
('train', False, True, True): (data_utils.TRAIN_SA,),
('train', False, False, False): (data_utils.TRAIN_CLASS,),
('test', True, True, False): (data_utils.TEST_LM,
data_utils.TRAIN_REV_LM),
('test', True, False, False): (data_utils.TEST_BD_CLASS,),
('test', False, True, False): (data_utils.TEST_LM,),
('test', False, True, True): (data_utils.TEST_SA,),
('test', False, False, False): (data_utils.TEST_CLASS,),
('valid', True, False, False): (data_utils.VALID_BD_CLASS,),
('valid', False, False, False): (data_utils.VALID_CLASS,),
}
if data_spec not in data_specs:
raise ValueError(
'Data specification (phase, bidir, pretrain, use_seq2seq) %s not '
'supported' % str(data_spec))
return data_specs[data_spec]
def _read_single_sequence_example(file_list, tokens_shape=None):
"""Reads and parses SequenceExamples from TFRecord-encoded file_list."""
tf.logging.info('Constructing TFRecordReader from files: %s', file_list)
file_queue = tf.train.string_input_producer(file_list)
reader = tf.TFRecordReader()
seq_key, serialized_record = reader.read(file_queue)
ctx, sequence = tf.parse_single_sequence_example(
serialized_record,
sequence_features={
data_utils.SequenceWrapper.F_TOKEN_ID:
tf.FixedLenSequenceFeature(tokens_shape or [], dtype=tf.int64),
data_utils.SequenceWrapper.F_LABEL:
tf.FixedLenSequenceFeature([], dtype=tf.int64),
data_utils.SequenceWrapper.F_WEIGHT:
tf.FixedLenSequenceFeature([], dtype=tf.float32),
})
return seq_key, ctx, sequence
def _read_and_batch(data_dir,
fname,
state_name,
state_size,
num_layers,
unroll_steps,
batch_size,
bidir_input=False):
"""Inputs for text model.
Args:
data_dir: str, directory containing TFRecord files of SequenceExample.
fname: str, input file name.
state_name: string, key for saved state of LSTM.
state_size: int, size of LSTM state.
num_layers: int, the number of layers in the LSTM.
unroll_steps: int, number of timesteps to unroll for TBTT.
batch_size: int, batch size.
bidir_input: bool, whether the input is bidirectional. If True, creates 2
states, state_name and state_name + '_reverse'.
Returns:
Instance of NextQueuedSequenceBatch
Raises:
ValueError: if file for input specification is not found.
"""
data_path = os.path.join(data_dir, fname)
if not tf.gfile.Exists(data_path):
raise ValueError('Failed to find file: %s' % data_path)
tokens_shape = [2] if bidir_input else []
seq_key, ctx, sequence = _read_single_sequence_example(
[data_path], tokens_shape=tokens_shape)
# Set up stateful queue reader.
state_names = _get_tuple_state_names(num_layers, state_name)
initial_states = {}
for c_state, h_state in state_names:
initial_states[c_state] = tf.zeros(state_size)
initial_states[h_state] = tf.zeros(state_size)
if bidir_input:
rev_state_names = _get_tuple_state_names(num_layers,
'{}_reverse'.format(state_name))
for rev_c_state, rev_h_state in rev_state_names:
initial_states[rev_c_state] = tf.zeros(state_size)
initial_states[rev_h_state] = tf.zeros(state_size)
batch = tf.contrib.training.batch_sequences_with_states(
input_key=seq_key,
input_sequences=sequence,
input_context=ctx,
input_length=tf.shape(sequence['token_id'])[0],
initial_states=initial_states,
num_unroll=unroll_steps,
batch_size=batch_size,
allow_small_batch=False,
num_threads=4,
capacity=batch_size * 10,
make_keys_unique=True,
make_keys_unique_seed=29392)
return batch
def inputs(data_dir=None,
phase='train',
bidir=False,
pretrain=False,
use_seq2seq=False,
state_name='lstm',
state_size=None,
num_layers=0,
batch_size=32,
unroll_steps=100,
eos_id=None):
"""Inputs for text model.
Args:
data_dir: str, directory containing TFRecord files of SequenceExample.
phase: str, dataset for evaluation {'train', 'valid', 'test'}.
bidir: bool, bidirectional LSTM.
pretrain: bool, whether to read pretraining data or classification data.
use_seq2seq: bool, whether to read seq2seq data or the language model data.
state_name: string, key for saved state of LSTM.
state_size: int, size of LSTM state.
num_layers: int, the number of LSTM layers.
batch_size: int, batch size.
unroll_steps: int, number of timesteps to unroll for TBTT.
eos_id: int, id of end of sequence. used for the kl weights on vat
Returns:
Instance of VatxtInput (x2 if bidir=True and pretrain=True, i.e. forward and
reverse).
"""
with tf.name_scope('inputs'):
filenames = _filenames_for_data_spec(phase, bidir, pretrain, use_seq2seq)
if bidir and pretrain:
# Bidirectional pretraining
# Requires separate forward and reverse language model data.
forward_fname, reverse_fname = filenames
forward_batch = _read_and_batch(data_dir, forward_fname, state_name,
state_size, num_layers, unroll_steps,
batch_size)
state_name_rev = state_name + '_reverse'
reverse_batch = _read_and_batch(data_dir, reverse_fname, state_name_rev,
state_size, num_layers, unroll_steps,
batch_size)
forward_input = VatxtInput(
forward_batch,
state_name=state_name,
num_states=num_layers,
eos_id=eos_id)
reverse_input = VatxtInput(
reverse_batch,
state_name=state_name_rev,
num_states=num_layers,
eos_id=eos_id)
return forward_input, reverse_input
elif bidir:
# Classifier bidirectional LSTM
# Shared data source, but separate token/state streams
fname, = filenames
batch = _read_and_batch(
data_dir,
fname,
state_name,
state_size,
num_layers,
unroll_steps,
batch_size,
bidir_input=True)
forward_tokens, reverse_tokens = _split_bidir_tokens(batch)
forward_input = VatxtInput(
batch,
state_name=state_name,
tokens=forward_tokens,
num_states=num_layers)
reverse_input = VatxtInput(
batch,
state_name=state_name + '_reverse',
tokens=reverse_tokens,
num_states=num_layers)
return forward_input, reverse_input
else:
# Unidirectional LM or classifier
fname, = filenames
batch = _read_and_batch(
data_dir,
fname,
state_name,
state_size,
num_layers,
unroll_steps,
batch_size,
bidir_input=False)
return VatxtInput(
batch, state_name=state_name, num_states=num_layers, eos_id=eos_id)
| 11,740 | 33.230321 | 80 | py |
models | models-master/research/adversarial_text/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/adversarial_text/graphs.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Virtual adversarial text models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
# Dependency imports
import tensorflow as tf
import adversarial_losses as adv_lib
import inputs as inputs_lib
import layers as layers_lib
flags = tf.app.flags
FLAGS = flags.FLAGS
# Flags governing adversarial training are defined in adversarial_losses.py.
# Classifier
flags.DEFINE_integer('num_classes', 2, 'Number of classes for classification')
# Data path
flags.DEFINE_string('data_dir', '/tmp/IMDB',
'Directory path to preprocessed text dataset.')
flags.DEFINE_string('vocab_freq_path', None,
'Path to pre-calculated vocab frequency data. If '
'None, use FLAGS.data_dir/vocab_freq.txt.')
flags.DEFINE_integer('batch_size', 64, 'Size of the batch.')
flags.DEFINE_integer('num_timesteps', 100, 'Number of timesteps for BPTT')
# Model architechture
flags.DEFINE_bool('bidir_lstm', False, 'Whether to build a bidirectional LSTM.')
flags.DEFINE_bool('single_label', True, 'Whether the sequence has a single '
'label, for optimization.')
flags.DEFINE_integer('rnn_num_layers', 1, 'Number of LSTM layers.')
flags.DEFINE_integer('rnn_cell_size', 512,
'Number of hidden units in the LSTM.')
flags.DEFINE_integer('cl_num_layers', 1,
'Number of hidden layers of classification model.')
flags.DEFINE_integer('cl_hidden_size', 30,
'Number of hidden units in classification layer.')
flags.DEFINE_integer('num_candidate_samples', -1,
'Num samples used in the sampled output layer.')
flags.DEFINE_bool('use_seq2seq_autoencoder', False,
'If True, seq2seq auto-encoder is used to pretrain. '
'If False, standard language model is used.')
# Vocabulary and embeddings
flags.DEFINE_integer('embedding_dims', 256, 'Dimensions of embedded vector.')
flags.DEFINE_integer('vocab_size', 86934,
'The size of the vocaburary. This value '
'should be exactly same as the number of the '
'vocabulary used in dataset. Because the last '
'indexed vocabulary of the dataset preprocessed by '
'my preprocessed code, is always <eos> and here we '
'specify the <eos> with the the index.')
flags.DEFINE_bool('normalize_embeddings', True,
'Normalize word embeddings by vocab frequency')
# Optimization
flags.DEFINE_float('learning_rate', 0.001, 'Learning rate while fine-tuning.')
flags.DEFINE_float('learning_rate_decay_factor', 1.0,
'Learning rate decay factor')
flags.DEFINE_boolean('sync_replicas', False, 'sync_replica or not')
flags.DEFINE_integer('replicas_to_aggregate', 1,
'The number of replicas to aggregate')
# Regularization
flags.DEFINE_float('max_grad_norm', 1.0,
'Clip the global gradient norm to this value.')
flags.DEFINE_float('keep_prob_emb', 1.0, 'keep probability on embedding layer. '
'0.5 is optimal on IMDB with virtual adversarial training.')
flags.DEFINE_float('keep_prob_lstm_out', 1.0,
'keep probability on lstm output.')
flags.DEFINE_float('keep_prob_cl_hidden', 1.0,
'keep probability on classification hidden layer')
def get_model():
if FLAGS.bidir_lstm:
return VatxtBidirModel()
else:
return VatxtModel()
class VatxtModel(object):
"""Constructs training and evaluation graphs.
Main methods: `classifier_training()`, `language_model_training()`,
and `eval_graph()`.
Variable reuse is a critical part of the model, both for sharing variables
between the language model and the classifier, and for reusing variables for
the adversarial loss calculation. To ensure correct variable reuse, all
variables are created in Keras-style layers, wherein stateful layers (i.e.
layers with variables) are represented as callable instances of the Layer
class. Each time the Layer instance is called, it is using the same variables.
All Layers are constructed in the __init__ method and reused in the various
graph-building functions.
"""
def __init__(self, cl_logits_input_dim=None):
self.global_step = tf.train.get_or_create_global_step()
self.vocab_freqs = _get_vocab_freqs()
# Cache VatxtInput objects
self.cl_inputs = None
self.lm_inputs = None
# Cache intermediate Tensors that are reused
self.tensors = {}
# Construct layers which are reused in constructing the LM and
# Classification graphs. Instantiating them all once here ensures that
# variable reuse works correctly.
self.layers = {}
self.layers['embedding'] = layers_lib.Embedding(
FLAGS.vocab_size, FLAGS.embedding_dims, FLAGS.normalize_embeddings,
self.vocab_freqs, FLAGS.keep_prob_emb)
self.layers['lstm'] = layers_lib.LSTM(
FLAGS.rnn_cell_size, FLAGS.rnn_num_layers, FLAGS.keep_prob_lstm_out)
self.layers['lm_loss'] = layers_lib.SoftmaxLoss(
FLAGS.vocab_size,
FLAGS.num_candidate_samples,
self.vocab_freqs,
name='LM_loss')
cl_logits_input_dim = cl_logits_input_dim or FLAGS.rnn_cell_size
self.layers['cl_logits'] = layers_lib.cl_logits_subgraph(
[FLAGS.cl_hidden_size] * FLAGS.cl_num_layers, cl_logits_input_dim,
FLAGS.num_classes, FLAGS.keep_prob_cl_hidden)
@property
def pretrained_variables(self):
return (self.layers['embedding'].trainable_weights +
self.layers['lstm'].trainable_weights)
def classifier_training(self):
loss = self.classifier_graph()
train_op = optimize(loss, self.global_step)
return train_op, loss, self.global_step
def language_model_training(self):
loss = self.language_model_graph()
train_op = optimize(loss, self.global_step)
return train_op, loss, self.global_step
def classifier_graph(self):
"""Constructs classifier graph from inputs to classifier loss.
* Caches the VatxtInput object in `self.cl_inputs`
* Caches tensors: `cl_embedded`, `cl_logits`, `cl_loss`
Returns:
loss: scalar float.
"""
inputs = _inputs('train', pretrain=False)
self.cl_inputs = inputs
embedded = self.layers['embedding'](inputs.tokens)
self.tensors['cl_embedded'] = embedded
_, next_state, logits, loss = self.cl_loss_from_embedding(
embedded, return_intermediates=True)
tf.summary.scalar('classification_loss', loss)
self.tensors['cl_logits'] = logits
self.tensors['cl_loss'] = loss
if FLAGS.single_label:
indices = tf.stack([tf.range(FLAGS.batch_size), inputs.length - 1], 1)
labels = tf.expand_dims(tf.gather_nd(inputs.labels, indices), 1)
weights = tf.expand_dims(tf.gather_nd(inputs.weights, indices), 1)
else:
labels = inputs.labels
weights = inputs.weights
acc = layers_lib.accuracy(logits, labels, weights)
tf.summary.scalar('accuracy', acc)
adv_loss = (self.adversarial_loss() * tf.constant(
FLAGS.adv_reg_coeff, name='adv_reg_coeff'))
tf.summary.scalar('adversarial_loss', adv_loss)
total_loss = loss + adv_loss
with tf.control_dependencies([inputs.save_state(next_state)]):
total_loss = tf.identity(total_loss)
tf.summary.scalar('total_classification_loss', total_loss)
return total_loss
def language_model_graph(self, compute_loss=True):
"""Constructs LM graph from inputs to LM loss.
* Caches the VatxtInput object in `self.lm_inputs`
* Caches tensors: `lm_embedded`
Args:
compute_loss: bool, whether to compute and return the loss or stop after
the LSTM computation.
Returns:
loss: scalar float.
"""
inputs = _inputs('train', pretrain=True)
self.lm_inputs = inputs
return self._lm_loss(inputs, compute_loss=compute_loss)
def _lm_loss(self,
inputs,
emb_key='lm_embedded',
lstm_layer='lstm',
lm_loss_layer='lm_loss',
loss_name='lm_loss',
compute_loss=True):
embedded = self.layers['embedding'](inputs.tokens)
self.tensors[emb_key] = embedded
lstm_out, next_state = self.layers[lstm_layer](embedded, inputs.state,
inputs.length)
if compute_loss:
loss = self.layers[lm_loss_layer](
[lstm_out, inputs.labels, inputs.weights])
with tf.control_dependencies([inputs.save_state(next_state)]):
loss = tf.identity(loss)
tf.summary.scalar(loss_name, loss)
return loss
def eval_graph(self, dataset='test'):
"""Constructs classifier evaluation graph.
Args:
dataset: the labeled dataset to evaluate, {'train', 'test', 'valid'}.
Returns:
eval_ops: dict<metric name, tuple(value, update_op)>
var_restore_dict: dict mapping variable restoration names to variables.
Trainable variables will be mapped to their moving average names.
"""
inputs = _inputs(dataset, pretrain=False)
embedded = self.layers['embedding'](inputs.tokens)
_, next_state, logits, _ = self.cl_loss_from_embedding(
embedded, inputs=inputs, return_intermediates=True)
if FLAGS.single_label:
indices = tf.stack([tf.range(FLAGS.batch_size), inputs.length - 1], 1)
labels = tf.expand_dims(tf.gather_nd(inputs.labels, indices), 1)
weights = tf.expand_dims(tf.gather_nd(inputs.weights, indices), 1)
else:
labels = inputs.labels
weights = inputs.weights
eval_ops = {
'accuracy':
tf.contrib.metrics.streaming_accuracy(
layers_lib.predictions(logits), labels, weights)
}
with tf.control_dependencies([inputs.save_state(next_state)]):
acc, acc_update = eval_ops['accuracy']
acc_update = tf.identity(acc_update)
eval_ops['accuracy'] = (acc, acc_update)
var_restore_dict = make_restore_average_vars_dict()
return eval_ops, var_restore_dict
def cl_loss_from_embedding(self,
embedded,
inputs=None,
return_intermediates=False):
"""Compute classification loss from embedding.
Args:
embedded: 3-D float Tensor [batch_size, num_timesteps, embedding_dim]
inputs: VatxtInput, defaults to self.cl_inputs.
return_intermediates: bool, whether to return intermediate tensors or only
the final loss.
Returns:
If return_intermediates is True:
lstm_out, next_state, logits, loss
Else:
loss
"""
if inputs is None:
inputs = self.cl_inputs
lstm_out, next_state = self.layers['lstm'](embedded, inputs.state,
inputs.length)
if FLAGS.single_label:
indices = tf.stack([tf.range(FLAGS.batch_size), inputs.length - 1], 1)
lstm_out = tf.expand_dims(tf.gather_nd(lstm_out, indices), 1)
labels = tf.expand_dims(tf.gather_nd(inputs.labels, indices), 1)
weights = tf.expand_dims(tf.gather_nd(inputs.weights, indices), 1)
else:
labels = inputs.labels
weights = inputs.weights
logits = self.layers['cl_logits'](lstm_out)
loss = layers_lib.classification_loss(logits, labels, weights)
if return_intermediates:
return lstm_out, next_state, logits, loss
else:
return loss
def adversarial_loss(self):
"""Compute adversarial loss based on FLAGS.adv_training_method."""
def random_perturbation_loss():
return adv_lib.random_perturbation_loss(self.tensors['cl_embedded'],
self.cl_inputs.length,
self.cl_loss_from_embedding)
def adversarial_loss():
return adv_lib.adversarial_loss(self.tensors['cl_embedded'],
self.tensors['cl_loss'],
self.cl_loss_from_embedding)
def virtual_adversarial_loss():
"""Computes virtual adversarial loss.
Uses lm_inputs and constructs the language model graph if it hasn't yet
been constructed.
Also ensures that the LM input states are saved for LSTM state-saving
BPTT.
Returns:
loss: float scalar.
"""
if self.lm_inputs is None:
self.language_model_graph(compute_loss=False)
def logits_from_embedding(embedded, return_next_state=False):
_, next_state, logits, _ = self.cl_loss_from_embedding(
embedded, inputs=self.lm_inputs, return_intermediates=True)
if return_next_state:
return next_state, logits
else:
return logits
next_state, lm_cl_logits = logits_from_embedding(
self.tensors['lm_embedded'], return_next_state=True)
va_loss = adv_lib.virtual_adversarial_loss(
lm_cl_logits, self.tensors['lm_embedded'], self.lm_inputs,
logits_from_embedding)
with tf.control_dependencies([self.lm_inputs.save_state(next_state)]):
va_loss = tf.identity(va_loss)
return va_loss
def combo_loss():
return adversarial_loss() + virtual_adversarial_loss()
adv_training_methods = {
# Random perturbation
'rp': random_perturbation_loss,
# Adversarial training
'at': adversarial_loss,
# Virtual adversarial training
'vat': virtual_adversarial_loss,
# Both at and vat
'atvat': combo_loss,
'': lambda: tf.constant(0.),
None: lambda: tf.constant(0.),
}
with tf.name_scope('adversarial_loss'):
return adv_training_methods[FLAGS.adv_training_method]()
class VatxtBidirModel(VatxtModel):
"""Extension of VatxtModel that supports bidirectional input."""
def __init__(self):
super(VatxtBidirModel,
self).__init__(cl_logits_input_dim=FLAGS.rnn_cell_size * 2)
# Reverse LSTM and LM loss for bidirectional models
self.layers['lstm_reverse'] = layers_lib.LSTM(
FLAGS.rnn_cell_size,
FLAGS.rnn_num_layers,
FLAGS.keep_prob_lstm_out,
name='LSTM_Reverse')
self.layers['lm_loss_reverse'] = layers_lib.SoftmaxLoss(
FLAGS.vocab_size,
FLAGS.num_candidate_samples,
self.vocab_freqs,
name='LM_loss_reverse')
@property
def pretrained_variables(self):
variables = super(VatxtBidirModel, self).pretrained_variables
variables.extend(self.layers['lstm_reverse'].trainable_weights)
return variables
def classifier_graph(self):
"""Constructs classifier graph from inputs to classifier loss.
* Caches the VatxtInput objects in `self.cl_inputs`
* Caches tensors: `cl_embedded` (tuple of forward and reverse), `cl_logits`,
`cl_loss`
Returns:
loss: scalar float.
"""
inputs = _inputs('train', pretrain=False, bidir=True)
self.cl_inputs = inputs
f_inputs, _ = inputs
# Embed both forward and reverse with a shared embedding
embedded = [self.layers['embedding'](inp.tokens) for inp in inputs]
self.tensors['cl_embedded'] = embedded
_, next_states, logits, loss = self.cl_loss_from_embedding(
embedded, return_intermediates=True)
tf.summary.scalar('classification_loss', loss)
self.tensors['cl_logits'] = logits
self.tensors['cl_loss'] = loss
acc = layers_lib.accuracy(logits, f_inputs.labels, f_inputs.weights)
tf.summary.scalar('accuracy', acc)
adv_loss = (self.adversarial_loss() * tf.constant(
FLAGS.adv_reg_coeff, name='adv_reg_coeff'))
tf.summary.scalar('adversarial_loss', adv_loss)
total_loss = loss + adv_loss
saves = [inp.save_state(state) for (inp, state) in zip(inputs, next_states)]
with tf.control_dependencies(saves):
total_loss = tf.identity(total_loss)
tf.summary.scalar('total_classification_loss', total_loss)
return total_loss
def language_model_graph(self, compute_loss=True):
"""Constructs forward and reverse LM graphs from inputs to LM losses.
* Caches the VatxtInput objects in `self.lm_inputs`
* Caches tensors: `lm_embedded`, `lm_embedded_reverse`
Args:
compute_loss: bool, whether to compute and return the loss or stop after
the LSTM computation.
Returns:
loss: scalar float, sum of forward and reverse losses.
"""
inputs = _inputs('train', pretrain=True, bidir=True)
self.lm_inputs = inputs
f_inputs, r_inputs = inputs
f_loss = self._lm_loss(f_inputs, compute_loss=compute_loss)
r_loss = self._lm_loss(
r_inputs,
emb_key='lm_embedded_reverse',
lstm_layer='lstm_reverse',
lm_loss_layer='lm_loss_reverse',
loss_name='lm_loss_reverse',
compute_loss=compute_loss)
if compute_loss:
return f_loss + r_loss
def eval_graph(self, dataset='test'):
"""Constructs classifier evaluation graph.
Args:
dataset: the labeled dataset to evaluate, {'train', 'test', 'valid'}.
Returns:
eval_ops: dict<metric name, tuple(value, update_op)>
var_restore_dict: dict mapping variable restoration names to variables.
Trainable variables will be mapped to their moving average names.
"""
inputs = _inputs(dataset, pretrain=False, bidir=True)
embedded = [self.layers['embedding'](inp.tokens) for inp in inputs]
_, next_states, logits, _ = self.cl_loss_from_embedding(
embedded, inputs=inputs, return_intermediates=True)
f_inputs, _ = inputs
eval_ops = {
'accuracy':
tf.contrib.metrics.streaming_accuracy(
layers_lib.predictions(logits), f_inputs.labels,
f_inputs.weights)
}
# Save states on accuracy update
saves = [inp.save_state(state) for (inp, state) in zip(inputs, next_states)]
with tf.control_dependencies(saves):
acc, acc_update = eval_ops['accuracy']
acc_update = tf.identity(acc_update)
eval_ops['accuracy'] = (acc, acc_update)
var_restore_dict = make_restore_average_vars_dict()
return eval_ops, var_restore_dict
def cl_loss_from_embedding(self,
embedded,
inputs=None,
return_intermediates=False):
"""Compute classification loss from embedding.
Args:
embedded: Length 2 tuple of 3-D float Tensor
[batch_size, num_timesteps, embedding_dim].
inputs: Length 2 tuple of VatxtInput, defaults to self.cl_inputs.
return_intermediates: bool, whether to return intermediate tensors or only
the final loss.
Returns:
If return_intermediates is True:
lstm_out, next_states, logits, loss
Else:
loss
"""
if inputs is None:
inputs = self.cl_inputs
out = []
for (layer_name, emb, inp) in zip(['lstm', 'lstm_reverse'], embedded,
inputs):
out.append(self.layers[layer_name](emb, inp.state, inp.length))
lstm_outs, next_states = zip(*out)
# Concatenate output of forward and reverse LSTMs
lstm_out = tf.concat(lstm_outs, 1)
logits = self.layers['cl_logits'](lstm_out)
f_inputs, _ = inputs # pylint: disable=unpacking-non-sequence
loss = layers_lib.classification_loss(logits, f_inputs.labels,
f_inputs.weights)
if return_intermediates:
return lstm_out, next_states, logits, loss
else:
return loss
def adversarial_loss(self):
"""Compute adversarial loss based on FLAGS.adv_training_method."""
def random_perturbation_loss():
return adv_lib.random_perturbation_loss_bidir(self.tensors['cl_embedded'],
self.cl_inputs[0].length,
self.cl_loss_from_embedding)
def adversarial_loss():
return adv_lib.adversarial_loss_bidir(self.tensors['cl_embedded'],
self.tensors['cl_loss'],
self.cl_loss_from_embedding)
def virtual_adversarial_loss():
"""Computes virtual adversarial loss.
Uses lm_inputs and constructs the language model graph if it hasn't yet
been constructed.
Also ensures that the LM input states are saved for LSTM state-saving
BPTT.
Returns:
loss: float scalar.
"""
if self.lm_inputs is None:
self.language_model_graph(compute_loss=False)
def logits_from_embedding(embedded, return_next_state=False):
_, next_states, logits, _ = self.cl_loss_from_embedding(
embedded, inputs=self.lm_inputs, return_intermediates=True)
if return_next_state:
return next_states, logits
else:
return logits
lm_embedded = (self.tensors['lm_embedded'],
self.tensors['lm_embedded_reverse'])
next_states, lm_cl_logits = logits_from_embedding(
lm_embedded, return_next_state=True)
va_loss = adv_lib.virtual_adversarial_loss_bidir(
lm_cl_logits, lm_embedded, self.lm_inputs, logits_from_embedding)
saves = [
inp.save_state(state)
for (inp, state) in zip(self.lm_inputs, next_states)
]
with tf.control_dependencies(saves):
va_loss = tf.identity(va_loss)
return va_loss
def combo_loss():
return adversarial_loss() + virtual_adversarial_loss()
adv_training_methods = {
# Random perturbation
'rp': random_perturbation_loss,
# Adversarial training
'at': adversarial_loss,
# Virtual adversarial training
'vat': virtual_adversarial_loss,
# Both at and vat
'atvat': combo_loss,
'': lambda: tf.constant(0.),
None: lambda: tf.constant(0.),
}
with tf.name_scope('adversarial_loss'):
return adv_training_methods[FLAGS.adv_training_method]()
def _inputs(dataset='train', pretrain=False, bidir=False):
return inputs_lib.inputs(
data_dir=FLAGS.data_dir,
phase=dataset,
bidir=bidir,
pretrain=pretrain,
use_seq2seq=pretrain and FLAGS.use_seq2seq_autoencoder,
state_size=FLAGS.rnn_cell_size,
num_layers=FLAGS.rnn_num_layers,
batch_size=FLAGS.batch_size,
unroll_steps=FLAGS.num_timesteps,
eos_id=FLAGS.vocab_size - 1)
def _get_vocab_freqs():
"""Returns vocab frequencies.
Returns:
List of integers, length=FLAGS.vocab_size.
Raises:
ValueError: if the length of the frequency file is not equal to the vocab
size, or if the file is not found.
"""
path = FLAGS.vocab_freq_path or os.path.join(FLAGS.data_dir, 'vocab_freq.txt')
if tf.gfile.Exists(path):
with tf.gfile.Open(path) as f:
# Get pre-calculated frequencies of words.
reader = csv.reader(f, quoting=csv.QUOTE_NONE)
freqs = [int(row[-1]) for row in reader]
if len(freqs) != FLAGS.vocab_size:
raise ValueError('Frequency file length %d != vocab size %d' %
(len(freqs), FLAGS.vocab_size))
else:
if FLAGS.vocab_freq_path:
raise ValueError('vocab_freq_path not found')
freqs = [1] * FLAGS.vocab_size
return freqs
def make_restore_average_vars_dict():
"""Returns dict mapping moving average names to variables."""
var_restore_dict = {}
variable_averages = tf.train.ExponentialMovingAverage(0.999)
for v in tf.global_variables():
if v in tf.trainable_variables():
name = variable_averages.average_name(v)
else:
name = v.op.name
var_restore_dict[name] = v
return var_restore_dict
def optimize(loss, global_step):
return layers_lib.optimize(
loss, global_step, FLAGS.max_grad_norm, FLAGS.learning_rate,
FLAGS.learning_rate_decay_factor, FLAGS.sync_replicas,
FLAGS.replicas_to_aggregate, FLAGS.task)
| 24,710 | 34.917151 | 80 | py |
models | models-master/research/adversarial_text/gen_vocab.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generates vocabulary and term frequency files for datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six import iteritems
from collections import defaultdict
# Dependency imports
import tensorflow as tf
from data import data_utils
from data import document_generators
flags = tf.app.flags
FLAGS = flags.FLAGS
# Flags controlling input are in document_generators.py
flags.DEFINE_string('output_dir', '',
'Path to save vocab.txt and vocab_freq.txt.')
flags.DEFINE_boolean('use_unlabeled', True, 'Whether to use the '
'unlabeled sentiment dataset in the vocabulary.')
flags.DEFINE_boolean('include_validation', False, 'Whether to include the '
'validation set in the vocabulary.')
flags.DEFINE_integer('doc_count_threshold', 1, 'The minimum number of '
'documents a word or bigram should occur in to keep '
'it in the vocabulary.')
MAX_VOCAB_SIZE = 100 * 1000
def fill_vocab_from_doc(doc, vocab_freqs, doc_counts):
"""Fills vocabulary and doc counts with tokens from doc.
Args:
doc: Document to read tokens from.
vocab_freqs: dict<token, frequency count>
doc_counts: dict<token, document count>
Returns:
None
"""
doc_seen = set()
for token in document_generators.tokens(doc):
if doc.add_tokens or token in vocab_freqs:
vocab_freqs[token] += 1
if token not in doc_seen:
doc_counts[token] += 1
doc_seen.add(token)
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
vocab_freqs = defaultdict(int)
doc_counts = defaultdict(int)
# Fill vocabulary frequencies map and document counts map
for doc in document_generators.documents(
dataset='train',
include_unlabeled=FLAGS.use_unlabeled,
include_validation=FLAGS.include_validation):
fill_vocab_from_doc(doc, vocab_freqs, doc_counts)
# Filter out low-occurring terms
vocab_freqs = dict((term, freq) for term, freq in iteritems(vocab_freqs)
if doc_counts[term] > FLAGS.doc_count_threshold)
# Sort by frequency
ordered_vocab_freqs = data_utils.sort_vocab_by_frequency(vocab_freqs)
# Limit vocab size
ordered_vocab_freqs = ordered_vocab_freqs[:MAX_VOCAB_SIZE]
# Add EOS token
ordered_vocab_freqs.append((data_utils.EOS_TOKEN, 1))
# Write
tf.gfile.MakeDirs(FLAGS.output_dir)
data_utils.write_vocab_and_frequency(ordered_vocab_freqs, FLAGS.output_dir)
if __name__ == '__main__':
tf.app.run()
| 3,245 | 30.823529 | 80 | py |
models | models-master/research/adversarial_text/data/data_utils_test.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for data_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import tensorflow as tf
from data import data_utils
data = data_utils
class SequenceWrapperTest(tf.test.TestCase):
def testDefaultTimesteps(self):
seq = data.SequenceWrapper()
t1 = seq.add_timestep()
_ = seq.add_timestep()
self.assertEqual(len(seq), 2)
self.assertEqual(t1.weight, 0.0)
self.assertEqual(t1.label, 0)
self.assertEqual(t1.token, 0)
def testSettersAndGetters(self):
ts = data.SequenceWrapper().add_timestep()
ts.set_token(3)
ts.set_label(4)
ts.set_weight(2.0)
self.assertEqual(ts.token, 3)
self.assertEqual(ts.label, 4)
self.assertEqual(ts.weight, 2.0)
def testTimestepIteration(self):
seq = data.SequenceWrapper()
seq.add_timestep().set_token(0)
seq.add_timestep().set_token(1)
seq.add_timestep().set_token(2)
for i, ts in enumerate(seq):
self.assertEqual(ts.token, i)
def testFillsSequenceExampleCorrectly(self):
seq = data.SequenceWrapper()
seq.add_timestep().set_token(1).set_label(2).set_weight(3.0)
seq.add_timestep().set_token(10).set_label(20).set_weight(30.0)
seq_ex = seq.seq
fl = seq_ex.feature_lists.feature_list
fl_token = fl[data.SequenceWrapper.F_TOKEN_ID].feature
fl_label = fl[data.SequenceWrapper.F_LABEL].feature
fl_weight = fl[data.SequenceWrapper.F_WEIGHT].feature
_ = [self.assertEqual(len(f), 2) for f in [fl_token, fl_label, fl_weight]]
self.assertAllEqual([f.int64_list.value[0] for f in fl_token], [1, 10])
self.assertAllEqual([f.int64_list.value[0] for f in fl_label], [2, 20])
self.assertAllEqual([f.float_list.value[0] for f in fl_weight], [3.0, 30.0])
class DataUtilsTest(tf.test.TestCase):
def testSplitByPunct(self):
output = data.split_by_punct(
'hello! world, i\'ve been\nwaiting\tfor\ryou for.a long time')
expected = [
'hello', 'world', 'i', 've', 'been', 'waiting', 'for', 'you', 'for',
'a', 'long', 'time'
]
self.assertListEqual(output, expected)
def _buildDummySequence(self):
seq = data.SequenceWrapper()
for i in range(10):
seq.add_timestep().set_token(i)
return seq
def testBuildLMSeq(self):
seq = self._buildDummySequence()
lm_seq = data.build_lm_sequence(seq)
for i, ts in enumerate(lm_seq):
# For end of sequence, the token and label should be same, and weight
# should be 0.0.
if i == len(lm_seq) - 1:
self.assertEqual(ts.token, i)
self.assertEqual(ts.label, i)
self.assertEqual(ts.weight, 0.0)
else:
self.assertEqual(ts.token, i)
self.assertEqual(ts.label, i + 1)
self.assertEqual(ts.weight, 1.0)
def testBuildSAESeq(self):
seq = self._buildDummySequence()
sa_seq = data.build_seq_ae_sequence(seq)
self.assertEqual(len(sa_seq), len(seq) * 2 - 1)
# Tokens should be sequence twice, minus the EOS token at the end
for i, ts in enumerate(sa_seq):
self.assertEqual(ts.token, seq[i % 10].token)
# Weights should be len-1 0.0's and len 1.0's.
for i in range(len(seq) - 1):
self.assertEqual(sa_seq[i].weight, 0.0)
for i in range(len(seq) - 1, len(sa_seq)):
self.assertEqual(sa_seq[i].weight, 1.0)
# Labels should be len-1 0's, and then the sequence
for i in range(len(seq) - 1):
self.assertEqual(sa_seq[i].label, 0)
for i in range(len(seq) - 1, len(sa_seq)):
self.assertEqual(sa_seq[i].label, seq[i - (len(seq) - 1)].token)
def testBuildLabelSeq(self):
seq = self._buildDummySequence()
eos_id = len(seq) - 1
label_seq = data.build_labeled_sequence(seq, True)
for i, ts in enumerate(label_seq[:-1]):
self.assertEqual(ts.token, i)
self.assertEqual(ts.label, 0)
self.assertEqual(ts.weight, 0.0)
final_timestep = label_seq[-1]
self.assertEqual(final_timestep.token, eos_id)
self.assertEqual(final_timestep.label, 1)
self.assertEqual(final_timestep.weight, 1.0)
def testBuildBidirLabelSeq(self):
seq = self._buildDummySequence()
reverse_seq = data.build_reverse_sequence(seq)
bidir_seq = data.build_bidirectional_seq(seq, reverse_seq)
label_seq = data.build_labeled_sequence(bidir_seq, True)
for (i, ts), j in zip(
enumerate(label_seq[:-1]), reversed(range(len(seq) - 1))):
self.assertAllEqual(ts.tokens, [i, j])
self.assertEqual(ts.label, 0)
self.assertEqual(ts.weight, 0.0)
final_timestep = label_seq[-1]
eos_id = len(seq) - 1
self.assertAllEqual(final_timestep.tokens, [eos_id, eos_id])
self.assertEqual(final_timestep.label, 1)
self.assertEqual(final_timestep.weight, 1.0)
def testReverseSeq(self):
seq = self._buildDummySequence()
reverse_seq = data.build_reverse_sequence(seq)
for i, ts in enumerate(reversed(reverse_seq[:-1])):
self.assertEqual(ts.token, i)
self.assertEqual(ts.label, 0)
self.assertEqual(ts.weight, 0.0)
final_timestep = reverse_seq[-1]
eos_id = len(seq) - 1
self.assertEqual(final_timestep.token, eos_id)
self.assertEqual(final_timestep.label, 0)
self.assertEqual(final_timestep.weight, 0.0)
def testBidirSeq(self):
seq = self._buildDummySequence()
reverse_seq = data.build_reverse_sequence(seq)
bidir_seq = data.build_bidirectional_seq(seq, reverse_seq)
for (i, ts), j in zip(
enumerate(bidir_seq[:-1]), reversed(range(len(seq) - 1))):
self.assertAllEqual(ts.tokens, [i, j])
self.assertEqual(ts.label, 0)
self.assertEqual(ts.weight, 0.0)
final_timestep = bidir_seq[-1]
eos_id = len(seq) - 1
self.assertAllEqual(final_timestep.tokens, [eos_id, eos_id])
self.assertEqual(final_timestep.label, 0)
self.assertEqual(final_timestep.weight, 0.0)
def testLabelGain(self):
seq = self._buildDummySequence()
label_seq = data.build_labeled_sequence(seq, True, label_gain=True)
for i, ts in enumerate(label_seq):
self.assertEqual(ts.token, i)
self.assertEqual(ts.label, 1)
self.assertNear(ts.weight, float(i) / (len(seq) - 1), 1e-3)
if __name__ == '__main__':
tf.test.main()
| 6,937 | 33.517413 | 80 | py |
models | models-master/research/adversarial_text/data/data_utils.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for generating/preprocessing data for adversarial text models."""
import operator
import os
import random
import re
# Dependency imports
import tensorflow as tf
EOS_TOKEN = '</s>'
# Data filenames
# Sequence Autoencoder
ALL_SA = 'all_sa.tfrecords'
TRAIN_SA = 'train_sa.tfrecords'
TEST_SA = 'test_sa.tfrecords'
# Language Model
ALL_LM = 'all_lm.tfrecords'
TRAIN_LM = 'train_lm.tfrecords'
TEST_LM = 'test_lm.tfrecords'
# Classification
TRAIN_CLASS = 'train_classification.tfrecords'
TEST_CLASS = 'test_classification.tfrecords'
VALID_CLASS = 'validate_classification.tfrecords'
# LM with bidirectional LSTM
TRAIN_REV_LM = 'train_reverse_lm.tfrecords'
TEST_REV_LM = 'test_reverse_lm.tfrecords'
# Classification with bidirectional LSTM
TRAIN_BD_CLASS = 'train_bidir_classification.tfrecords'
TEST_BD_CLASS = 'test_bidir_classification.tfrecords'
VALID_BD_CLASS = 'validate_bidir_classification.tfrecords'
class ShufflingTFRecordWriter(object):
"""Thin wrapper around TFRecordWriter that shuffles records."""
def __init__(self, path):
self._path = path
self._records = []
self._closed = False
def write(self, record):
assert not self._closed
self._records.append(record)
def close(self):
assert not self._closed
random.shuffle(self._records)
with tf.python_io.TFRecordWriter(self._path) as f:
for record in self._records:
f.write(record)
self._closed = True
def __enter__(self):
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
self.close()
class Timestep(object):
"""Represents a single timestep in a SequenceWrapper."""
def __init__(self, token, label, weight, multivalent_tokens=False):
"""Constructs Timestep from empty Features."""
self._token = token
self._label = label
self._weight = weight
self._multivalent_tokens = multivalent_tokens
self._fill_with_defaults()
@property
def token(self):
if self._multivalent_tokens:
raise TypeError('Timestep may contain multiple values; use `tokens`')
return self._token.int64_list.value[0]
@property
def tokens(self):
return self._token.int64_list.value
@property
def label(self):
return self._label.int64_list.value[0]
@property
def weight(self):
return self._weight.float_list.value[0]
def set_token(self, token):
if self._multivalent_tokens:
raise TypeError('Timestep may contain multiple values; use `add_token`')
self._token.int64_list.value[0] = token
return self
def add_token(self, token):
self._token.int64_list.value.append(token)
return self
def set_label(self, label):
self._label.int64_list.value[0] = label
return self
def set_weight(self, weight):
self._weight.float_list.value[0] = weight
return self
def copy_from(self, timestep):
self.set_token(timestep.token).set_label(timestep.label).set_weight(
timestep.weight)
return self
def _fill_with_defaults(self):
if not self._multivalent_tokens:
self._token.int64_list.value.append(0)
self._label.int64_list.value.append(0)
self._weight.float_list.value.append(0.0)
class SequenceWrapper(object):
"""Wrapper around tf.SequenceExample."""
F_TOKEN_ID = 'token_id'
F_LABEL = 'label'
F_WEIGHT = 'weight'
def __init__(self, multivalent_tokens=False):
self._seq = tf.train.SequenceExample()
self._flist = self._seq.feature_lists.feature_list
self._timesteps = []
self._multivalent_tokens = multivalent_tokens
@property
def seq(self):
return self._seq
@property
def multivalent_tokens(self):
return self._multivalent_tokens
@property
def _tokens(self):
return self._flist[SequenceWrapper.F_TOKEN_ID].feature
@property
def _labels(self):
return self._flist[SequenceWrapper.F_LABEL].feature
@property
def _weights(self):
return self._flist[SequenceWrapper.F_WEIGHT].feature
def add_timestep(self):
timestep = Timestep(
self._tokens.add(),
self._labels.add(),
self._weights.add(),
multivalent_tokens=self._multivalent_tokens)
self._timesteps.append(timestep)
return timestep
def __iter__(self):
for timestep in self._timesteps:
yield timestep
def __len__(self):
return len(self._timesteps)
def __getitem__(self, idx):
return self._timesteps[idx]
def build_reverse_sequence(seq):
"""Builds a sequence that is the reverse of the input sequence."""
reverse_seq = SequenceWrapper()
# Copy all but last timestep
for timestep in reversed(seq[:-1]):
reverse_seq.add_timestep().copy_from(timestep)
# Copy final timestep
reverse_seq.add_timestep().copy_from(seq[-1])
return reverse_seq
def build_bidirectional_seq(seq, rev_seq):
bidir_seq = SequenceWrapper(multivalent_tokens=True)
for forward_ts, reverse_ts in zip(seq, rev_seq):
bidir_seq.add_timestep().add_token(forward_ts.token).add_token(
reverse_ts.token)
return bidir_seq
def build_lm_sequence(seq):
"""Builds language model sequence from input sequence.
Args:
seq: SequenceWrapper.
Returns:
SequenceWrapper with `seq` tokens copied over to output sequence tokens and
labels (offset by 1, i.e. predict next token) with weights set to 1.0,
except for <eos> token.
"""
lm_seq = SequenceWrapper()
for i, timestep in enumerate(seq):
if i == len(seq) - 1:
lm_seq.add_timestep().set_token(timestep.token).set_label(
seq[i].token).set_weight(0.0)
else:
lm_seq.add_timestep().set_token(timestep.token).set_label(
seq[i + 1].token).set_weight(1.0)
return lm_seq
def build_seq_ae_sequence(seq):
"""Builds seq_ae sequence from input sequence.
Args:
seq: SequenceWrapper.
Returns:
SequenceWrapper with `seq` inputs copied and concatenated, and with labels
copied in on the right-hand (i.e. decoder) side with weights set to 1.0.
The new sequence will have length `len(seq) * 2 - 1`, as the last timestep
of the encoder section and the first step of the decoder section will
overlap.
"""
seq_ae_seq = SequenceWrapper()
for i in range(len(seq) * 2 - 1):
ts = seq_ae_seq.add_timestep()
if i < len(seq) - 1:
# Encoder
ts.set_token(seq[i].token)
elif i == len(seq) - 1:
# Transition step
ts.set_token(seq[i].token)
ts.set_label(seq[0].token)
ts.set_weight(1.0)
else:
# Decoder
ts.set_token(seq[i % len(seq)].token)
ts.set_label(seq[(i + 1) % len(seq)].token)
ts.set_weight(1.0)
return seq_ae_seq
def build_labeled_sequence(seq, class_label, label_gain=False):
"""Builds labeled sequence from input sequence.
Args:
seq: SequenceWrapper.
class_label: integer, starting from 0.
label_gain: bool. If True, class_label will be put on every timestep and
weight will increase linearly from 0 to 1.
Returns:
SequenceWrapper with `seq` copied in and `class_label` added as label to
final timestep.
"""
label_seq = SequenceWrapper(multivalent_tokens=seq.multivalent_tokens)
# Copy sequence without labels
seq_len = len(seq)
final_timestep = None
for i, timestep in enumerate(seq):
label_timestep = label_seq.add_timestep()
if seq.multivalent_tokens:
for token in timestep.tokens:
label_timestep.add_token(token)
else:
label_timestep.set_token(timestep.token)
if label_gain:
label_timestep.set_label(int(class_label))
weight = 1.0 if seq_len < 2 else float(i) / (seq_len - 1)
label_timestep.set_weight(weight)
if i == (seq_len - 1):
final_timestep = label_timestep
# Edit final timestep to have class label and weight = 1.
final_timestep.set_label(int(class_label)).set_weight(1.0)
return label_seq
def split_by_punct(segment):
"""Splits str segment by punctuation, filters our empties and spaces."""
return [s for s in re.split(r'\W+', segment) if s and not s.isspace()]
def sort_vocab_by_frequency(vocab_freq_map):
"""Sorts vocab_freq_map by count.
Args:
vocab_freq_map: dict<str term, int count>, vocabulary terms with counts.
Returns:
list<tuple<str term, int count>> sorted by count, descending.
"""
return sorted(
vocab_freq_map.items(), key=operator.itemgetter(1), reverse=True)
def write_vocab_and_frequency(ordered_vocab_freqs, output_dir):
"""Writes ordered_vocab_freqs into vocab.txt and vocab_freq.txt."""
tf.gfile.MakeDirs(output_dir)
with open(os.path.join(output_dir, 'vocab.txt'), 'w', encoding='utf-8') as vocab_f:
with open(os.path.join(output_dir, 'vocab_freq.txt'), 'w', encoding='utf-8') as freq_f:
for word, freq in ordered_vocab_freqs:
vocab_f.write('{}\n'.format(word))
freq_f.write('{}\n'.format(freq))
| 9,493 | 27.510511 | 91 | py |
models | models-master/research/adversarial_text/data/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/adversarial_text/data/document_generators.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Input readers and document/token generators for datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import csv
import os
import random
# Dependency imports
import tensorflow as tf
from data import data_utils
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('dataset', '', 'Which dataset to generate data for')
# Preprocessing config
flags.DEFINE_boolean('output_unigrams', True, 'Whether to output unigrams.')
flags.DEFINE_boolean('output_bigrams', False, 'Whether to output bigrams.')
flags.DEFINE_boolean('output_char', False, 'Whether to output characters.')
flags.DEFINE_boolean('lowercase', True, 'Whether to lowercase document terms.')
# IMDB
flags.DEFINE_string('imdb_input_dir', '', 'The input directory containing the '
'IMDB sentiment dataset.')
flags.DEFINE_integer('imdb_validation_pos_start_id', 10621, 'File id of the '
'first file in the pos sentiment validation set.')
flags.DEFINE_integer('imdb_validation_neg_start_id', 10625, 'File id of the '
'first file in the neg sentiment validation set.')
# DBpedia
flags.DEFINE_string('dbpedia_input_dir', '',
'Path to DBpedia directory containing train.csv and '
'test.csv.')
# Reuters Corpus (rcv1)
flags.DEFINE_string('rcv1_input_dir', '',
'Path to rcv1 directory containing train.csv, unlab.csv, '
'and test.csv.')
# Rotten Tomatoes
flags.DEFINE_string('rt_input_dir', '',
'The Rotten Tomatoes dataset input directory.')
# The amazon reviews input file to use in either the RT or IMDB datasets.
flags.DEFINE_string('amazon_unlabeled_input_file', '',
'The unlabeled Amazon Reviews dataset input file. If set, '
'the input file is used to augment RT and IMDB vocab.')
Document = namedtuple('Document',
'content is_validation is_test label add_tokens')
def documents(dataset='train',
include_unlabeled=False,
include_validation=False):
"""Generates Documents based on FLAGS.dataset.
Args:
dataset: str, identifies folder within IMDB data directory, test or train.
include_unlabeled: bool, whether to include the unsup directory. Only valid
when dataset=train.
include_validation: bool, whether to include validation data.
Yields:
Document
Raises:
ValueError: if include_unlabeled is true but dataset is not 'train'
"""
if include_unlabeled and dataset != 'train':
raise ValueError('If include_unlabeled=True, must use train dataset')
# Set the random seed so that we have the same validation set when running
# gen_data and gen_vocab.
random.seed(302)
ds = FLAGS.dataset
if ds == 'imdb':
docs_gen = imdb_documents
elif ds == 'dbpedia':
docs_gen = dbpedia_documents
elif ds == 'rcv1':
docs_gen = rcv1_documents
elif ds == 'rt':
docs_gen = rt_documents
else:
raise ValueError('Unrecognized dataset %s' % FLAGS.dataset)
for doc in docs_gen(dataset, include_unlabeled, include_validation):
yield doc
def tokens(doc):
"""Given a Document, produces character or word tokens.
Tokens can be either characters, or word-level tokens (unigrams and/or
bigrams).
Args:
doc: Document to produce tokens from.
Yields:
token
Raises:
ValueError: if all FLAGS.{output_unigrams, output_bigrams, output_char}
are False.
"""
if not (FLAGS.output_unigrams or FLAGS.output_bigrams or FLAGS.output_char):
raise ValueError(
'At least one of {FLAGS.output_unigrams, FLAGS.output_bigrams, '
'FLAGS.output_char} must be true')
content = doc.content.strip()
if FLAGS.lowercase:
content = content.lower()
if FLAGS.output_char:
for char in content:
yield char
else:
tokens_ = data_utils.split_by_punct(content)
for i, token in enumerate(tokens_):
if FLAGS.output_unigrams:
yield token
if FLAGS.output_bigrams:
previous_token = (tokens_[i - 1] if i > 0 else data_utils.EOS_TOKEN)
bigram = '_'.join([previous_token, token])
yield bigram
if (i + 1) == len(tokens_):
bigram = '_'.join([token, data_utils.EOS_TOKEN])
yield bigram
def imdb_documents(dataset='train',
include_unlabeled=False,
include_validation=False):
"""Generates Documents for IMDB dataset.
Data from http://ai.stanford.edu/~amaas/data/sentiment/
Args:
dataset: str, identifies folder within IMDB data directory, test or train.
include_unlabeled: bool, whether to include the unsup directory. Only valid
when dataset=train.
include_validation: bool, whether to include validation data.
Yields:
Document
Raises:
ValueError: if FLAGS.imdb_input_dir is empty.
"""
if not FLAGS.imdb_input_dir:
raise ValueError('Must provide FLAGS.imdb_input_dir')
tf.logging.info('Generating IMDB documents...')
def check_is_validation(filename, class_label):
if class_label is None:
return False
file_idx = int(filename.split('_')[0])
is_pos_valid = (class_label and
file_idx >= FLAGS.imdb_validation_pos_start_id)
is_neg_valid = (not class_label and
file_idx >= FLAGS.imdb_validation_neg_start_id)
return is_pos_valid or is_neg_valid
dirs = [(dataset + '/pos', True), (dataset + '/neg', False)]
if include_unlabeled:
dirs.append(('train/unsup', None))
for d, class_label in dirs:
for filename in os.listdir(os.path.join(FLAGS.imdb_input_dir, d)):
is_validation = check_is_validation(filename, class_label)
if is_validation and not include_validation:
continue
with open(os.path.join(FLAGS.imdb_input_dir, d, filename), encoding='utf-8') as imdb_f:
content = imdb_f.read()
yield Document(
content=content,
is_validation=is_validation,
is_test=False,
label=class_label,
add_tokens=True)
if FLAGS.amazon_unlabeled_input_file and include_unlabeled:
with open(FLAGS.amazon_unlabeled_input_file, encoding='utf-8') as rt_f:
for content in rt_f:
yield Document(
content=content,
is_validation=False,
is_test=False,
label=None,
add_tokens=False)
def dbpedia_documents(dataset='train',
include_unlabeled=False,
include_validation=False):
"""Generates Documents for DBpedia dataset.
Dataset linked to at https://github.com/zhangxiangxiao/Crepe.
Args:
dataset: str, identifies the csv file within the DBpedia data directory,
test or train.
include_unlabeled: bool, unused.
include_validation: bool, whether to include validation data, which is a
randomly selected 10% of the data.
Yields:
Document
Raises:
ValueError: if FLAGS.dbpedia_input_dir is empty.
"""
del include_unlabeled
if not FLAGS.dbpedia_input_dir:
raise ValueError('Must provide FLAGS.dbpedia_input_dir')
tf.logging.info('Generating DBpedia documents...')
with open(os.path.join(FLAGS.dbpedia_input_dir, dataset + '.csv')) as db_f:
reader = csv.reader(db_f)
for row in reader:
# 10% of the data is randomly held out
is_validation = random.randint(1, 10) == 1
if is_validation and not include_validation:
continue
content = row[1] + ' ' + row[2]
yield Document(
content=content,
is_validation=is_validation,
is_test=False,
label=int(row[0]) - 1, # Labels should start from 0
add_tokens=True)
def rcv1_documents(dataset='train',
include_unlabeled=True,
include_validation=False):
# pylint:disable=line-too-long
"""Generates Documents for Reuters Corpus (rcv1) dataset.
Dataset described at
http://www.ai.mit.edu/projects/jmlr/papers/volume5/lewis04a/lyrl2004_rcv1v2_README.htm
Args:
dataset: str, identifies the csv file within the rcv1 data directory.
include_unlabeled: bool, whether to include the unlab file. Only valid
when dataset=train.
include_validation: bool, whether to include validation data, which is a
randomly selected 10% of the data.
Yields:
Document
Raises:
ValueError: if FLAGS.rcv1_input_dir is empty.
"""
# pylint:enable=line-too-long
if not FLAGS.rcv1_input_dir:
raise ValueError('Must provide FLAGS.rcv1_input_dir')
tf.logging.info('Generating rcv1 documents...')
datasets = [dataset]
if include_unlabeled:
if dataset == 'train':
datasets.append('unlab')
for dset in datasets:
with open(os.path.join(FLAGS.rcv1_input_dir, dset + '.csv')) as db_f:
reader = csv.reader(db_f)
for row in reader:
# 10% of the data is randomly held out
is_validation = random.randint(1, 10) == 1
if is_validation and not include_validation:
continue
content = row[1]
yield Document(
content=content,
is_validation=is_validation,
is_test=False,
label=int(row[0]),
add_tokens=True)
def rt_documents(dataset='train',
include_unlabeled=True,
include_validation=False):
# pylint:disable=line-too-long
"""Generates Documents for the Rotten Tomatoes dataset.
Dataset available at http://www.cs.cornell.edu/people/pabo/movie-review-data/
In this dataset, amazon reviews are used for the unlabeled data.
Args:
dataset: str, identifies the data subdirectory.
include_unlabeled: bool, whether to include the unlabeled data. Only valid
when dataset=train.
include_validation: bool, whether to include validation data, which is a
randomly selected 10% of the data.
Yields:
Document
Raises:
ValueError: if FLAGS.rt_input_dir is empty.
"""
# pylint:enable=line-too-long
if not FLAGS.rt_input_dir:
raise ValueError('Must provide FLAGS.rt_input_dir')
tf.logging.info('Generating rt documents...')
data_files = []
input_filenames = os.listdir(FLAGS.rt_input_dir)
for inp_fname in input_filenames:
if inp_fname.endswith('.pos'):
data_files.append((os.path.join(FLAGS.rt_input_dir, inp_fname), True))
elif inp_fname.endswith('.neg'):
data_files.append((os.path.join(FLAGS.rt_input_dir, inp_fname), False))
if include_unlabeled and FLAGS.amazon_unlabeled_input_file:
data_files.append((FLAGS.amazon_unlabeled_input_file, None))
for filename, class_label in data_files:
with open(filename) as rt_f:
for content in rt_f:
if class_label is None:
# Process Amazon Review data for unlabeled dataset
if content.startswith('review/text'):
yield Document(
content=content,
is_validation=False,
is_test=False,
label=None,
add_tokens=False)
else:
# 10% of the data is randomly held out for the validation set and
# another 10% of it is randomly held out for the test set
random_int = random.randint(1, 10)
is_validation = random_int == 1
is_test = random_int == 2
if (is_test and dataset != 'test') or (is_validation and
not include_validation):
continue
yield Document(
content=content,
is_validation=is_validation,
is_test=is_test,
label=class_label,
add_tokens=True)
| 12,467 | 31.46875 | 93 | py |
models | models-master/research/marco/Automated_Marco.py | #!/usr/bin/python
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
import csv
import os
import argparse
"""
usage:
Processes all .jpg, .png, .bmp and .gif files found in the specified directory and its subdirectories.
--PATH ( Path to directory of images or path to directory with subdirectory of images). e.g Path/To/Directory/
--Model_PATH path to the tensorflow model
"""
parser = argparse.ArgumentParser(description='Crystal Detection Program')
parser.add_argument('--PATH', type=str, help='path to image directory. Recursively finds all image files in directory and sub directories') # path to image directory or containing sub directories.
parser.add_argument('--MODEL_PATH', type=str, default='./savedmodel',help='the file path to the tensorflow model ')
args = vars(parser.parse_args())
PATH = args['PATH']
model_path = args['MODEL_PATH']
crystal_images = [os.path.join(dp, f) for dp, dn, filenames in os.walk(PATH) for f in filenames if os.path.splitext(f)[1] in ['.jpg','png','bmp','gif']]
size = len(crystal_images)
def load_images(file_list):
for i in file_list:
files = open(i,'rb')
yield {"image_bytes":[files.read()]},i
iterator = load_images(crystal_images)
with open(PATH +'results.csv', 'w') as csvfile:
Writer = csv.writer(csvfile, delimiter=' ',quotechar=' ', quoting=csv.QUOTE_MINIMAL)
predicter= tf.contrib.predictor.from_saved_model(model_path)
dic = {}
k = 0
for _ in range(size):
data,name = next(iterator)
results = predicter(data)
vals =results['scores'][0]
classes = results['classes'][0]
dictionary = dict(zip(classes,vals))
print('Image path: '+ name+' Crystal: '+str(dictionary[b'Crystals'])+' Other: '+ str(dictionary[b'Other'])+' Precipitate: '+ str(dictionary[b'Precipitate'])+' Clear: '+ str(dictionary[b'Clear']))
Writer.writerow(['Image path: '+ name,'Crystal: '+str(dictionary[b'Crystals']),'Other: '+ str(dictionary[b'Other']),'Precipitate: '+ str(dictionary[b'Precipitate']),'Clear: '+ str(dictionary[b'Clear'])])
| 2,831 | 37.794521 | 219 | py |
models | models-master/research/marco/jpeg2json.py | #!/usr/bin/python
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""jpeg2json.py: Converts a JPEG image into a json request to CloudML.
Usage:
python jpeg2json.py 002s_C6_ImagerDefaults_9.jpg > request.json
See:
https://cloud.google.com/ml-engine/docs/concepts/prediction-overview#online_prediction_input_data
"""
import base64
import sys
def to_json(data):
return '{"image_bytes":{"b64": "%s"}}' % base64.b64encode(data)
if __name__ == '__main__':
file = open(sys.argv[1]) if len(sys.argv) > 1 else sys.stdin
print(to_json(file.read()))
| 1,193 | 32.166667 | 97 | py |
models | models-master/research/audioset/vggish/vggish_postprocess.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Post-process embeddings from VGGish."""
import numpy as np
import vggish_params
class Postprocessor(object):
"""Post-processes VGGish embeddings.
The initial release of AudioSet included 128-D VGGish embeddings for each
segment of AudioSet. These released embeddings were produced by applying
a PCA transformation (technically, a whitening transform is included as well)
and 8-bit quantization to the raw embedding output from VGGish, in order to
stay compatible with the YouTube-8M project which provides visual embeddings
in the same format for a large set of YouTube videos. This class implements
the same PCA (with whitening) and quantization transformations.
"""
def __init__(self, pca_params_npz_path):
"""Constructs a postprocessor.
Args:
pca_params_npz_path: Path to a NumPy-format .npz file that
contains the PCA parameters used in postprocessing.
"""
params = np.load(pca_params_npz_path)
self._pca_matrix = params[vggish_params.PCA_EIGEN_VECTORS_NAME]
# Load means into a column vector for easier broadcasting later.
self._pca_means = params[vggish_params.PCA_MEANS_NAME].reshape(-1, 1)
assert self._pca_matrix.shape == (
vggish_params.EMBEDDING_SIZE, vggish_params.EMBEDDING_SIZE), (
'Bad PCA matrix shape: %r' % (self._pca_matrix.shape,))
assert self._pca_means.shape == (vggish_params.EMBEDDING_SIZE, 1), (
'Bad PCA means shape: %r' % (self._pca_means.shape,))
def postprocess(self, embeddings_batch):
"""Applies postprocessing to a batch of embeddings.
Args:
embeddings_batch: An nparray of shape [batch_size, embedding_size]
containing output from the embedding layer of VGGish.
Returns:
An nparray of the same shape as the input but of type uint8,
containing the PCA-transformed and quantized version of the input.
"""
assert len(embeddings_batch.shape) == 2, (
'Expected 2-d batch, got %r' % (embeddings_batch.shape,))
assert embeddings_batch.shape[1] == vggish_params.EMBEDDING_SIZE, (
'Bad batch shape: %r' % (embeddings_batch.shape,))
# Apply PCA.
# - Embeddings come in as [batch_size, embedding_size].
# - Transpose to [embedding_size, batch_size].
# - Subtract pca_means column vector from each column.
# - Premultiply by PCA matrix of shape [output_dims, input_dims]
# where both are are equal to embedding_size in our case.
# - Transpose result back to [batch_size, embedding_size].
pca_applied = np.dot(self._pca_matrix,
(embeddings_batch.T - self._pca_means)).T
# Quantize by:
# - clipping to [min, max] range
clipped_embeddings = np.clip(
pca_applied, vggish_params.QUANTIZE_MIN_VAL,
vggish_params.QUANTIZE_MAX_VAL)
# - convert to 8-bit in range [0.0, 255.0]
quantized_embeddings = (
(clipped_embeddings - vggish_params.QUANTIZE_MIN_VAL) *
(255.0 /
(vggish_params.QUANTIZE_MAX_VAL - vggish_params.QUANTIZE_MIN_VAL)))
# - cast 8-bit float to uint8
quantized_embeddings = quantized_embeddings.astype(np.uint8)
return quantized_embeddings
| 3,868 | 41.054348 | 80 | py |
models | models-master/research/audioset/vggish/vggish_params.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Global parameters for the VGGish model.
See vggish_slim.py for more information.
"""
# Architectural constants.
NUM_FRAMES = 96 # Frames in input mel-spectrogram patch.
NUM_BANDS = 64 # Frequency bands in input mel-spectrogram patch.
EMBEDDING_SIZE = 128 # Size of embedding layer.
# Hyperparameters used in feature and example generation.
SAMPLE_RATE = 16000
STFT_WINDOW_LENGTH_SECONDS = 0.025
STFT_HOP_LENGTH_SECONDS = 0.010
NUM_MEL_BINS = NUM_BANDS
MEL_MIN_HZ = 125
MEL_MAX_HZ = 7500
LOG_OFFSET = 0.01 # Offset used for stabilized log of input mel-spectrogram.
EXAMPLE_WINDOW_SECONDS = 0.96 # Each example contains 96 10ms frames
EXAMPLE_HOP_SECONDS = 0.96 # with zero overlap.
# Parameters used for embedding postprocessing.
PCA_EIGEN_VECTORS_NAME = 'pca_eigen_vectors'
PCA_MEANS_NAME = 'pca_means'
QUANTIZE_MIN_VAL = -2.0
QUANTIZE_MAX_VAL = +2.0
# Hyperparameters used in training.
INIT_STDDEV = 0.01 # Standard deviation used to initialize weights.
LEARNING_RATE = 1e-4 # Learning rate for the Adam optimizer.
ADAM_EPSILON = 1e-8 # Epsilon for the Adam optimizer.
# Names of ops, tensors, and features.
INPUT_OP_NAME = 'vggish/input_features'
INPUT_TENSOR_NAME = INPUT_OP_NAME + ':0'
OUTPUT_OP_NAME = 'vggish/embedding'
OUTPUT_TENSOR_NAME = OUTPUT_OP_NAME + ':0'
AUDIO_EMBEDDING_FEATURE_NAME = 'audio_embedding'
| 2,028 | 36.574074 | 80 | py |
models | models-master/research/audioset/vggish/vggish_slim.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines the 'VGGish' model used to generate AudioSet embedding features.
The public AudioSet release (https://research.google.com/audioset/download.html)
includes 128-D features extracted from the embedding layer of a VGG-like model
that was trained on a large Google-internal YouTube dataset. Here we provide
a TF-Slim definition of the same model, without any dependences on libraries
internal to Google. We call it 'VGGish'.
Note that we only define the model up to the embedding layer, which is the
penultimate layer before the final classifier layer. We also provide various
hyperparameter values (in vggish_params.py) that were used to train this model
internally.
For comparison, here is TF-Slim's VGG definition:
https://github.com/tensorflow/models/blob/master/research/slim/nets/vgg.py
"""
import tensorflow.compat.v1 as tf
import tf_slim as slim
import vggish_params as params
def define_vggish_slim(features_tensor=None, training=False):
"""Defines the VGGish TensorFlow model.
All ops are created in the current default graph, under the scope 'vggish/'.
The input is either a tensor passed in via the optional 'features_tensor'
argument or a placeholder created below named 'vggish/input_features'. The
input is expected to have dtype float32 and shape [batch_size, num_frames,
num_bands] where batch_size is variable and num_frames and num_bands are
constants, and [num_frames, num_bands] represents a log-mel-scale spectrogram
patch covering num_bands frequency bands and num_frames time frames (where
each frame step is usually 10ms). This is produced by computing the stabilized
log(mel-spectrogram + params.LOG_OFFSET). The output is a tensor named
'vggish/embedding' which produces the pre-activation values of a 128-D
embedding layer, which is usually the penultimate layer when used as part of a
full model with a final classifier layer.
Args:
features_tensor: If not None, the tensor containing the input features.
If None, a placeholder input is created.
training: If true, all parameters are marked trainable.
Returns:
The op 'vggish/embeddings'.
"""
# Defaults:
# - All weights are initialized to N(0, INIT_STDDEV).
# - All biases are initialized to 0.
# - All activations are ReLU.
# - All convolutions are 3x3 with stride 1 and SAME padding.
# - All max-pools are 2x2 with stride 2 and SAME padding.
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(
stddev=params.INIT_STDDEV),
biases_initializer=tf.zeros_initializer(),
activation_fn=tf.nn.relu,
trainable=training), \
slim.arg_scope([slim.conv2d],
kernel_size=[3, 3], stride=1, padding='SAME'), \
slim.arg_scope([slim.max_pool2d],
kernel_size=[2, 2], stride=2, padding='SAME'), \
tf.variable_scope('vggish'):
# Input: a batch of 2-D log-mel-spectrogram patches.
if features_tensor is None:
features_tensor = tf.placeholder(
tf.float32, shape=(None, params.NUM_FRAMES, params.NUM_BANDS),
name='input_features')
# Reshape to 4-D so that we can convolve a batch with conv2d().
net = tf.reshape(features_tensor,
[-1, params.NUM_FRAMES, params.NUM_BANDS, 1])
# The VGG stack of alternating convolutions and max-pools.
net = slim.conv2d(net, 64, scope='conv1')
net = slim.max_pool2d(net, scope='pool1')
net = slim.conv2d(net, 128, scope='conv2')
net = slim.max_pool2d(net, scope='pool2')
net = slim.repeat(net, 2, slim.conv2d, 256, scope='conv3')
net = slim.max_pool2d(net, scope='pool3')
net = slim.repeat(net, 2, slim.conv2d, 512, scope='conv4')
net = slim.max_pool2d(net, scope='pool4')
# Flatten before entering fully-connected layers
net = slim.flatten(net)
net = slim.repeat(net, 2, slim.fully_connected, 4096, scope='fc1')
# The embedding layer.
net = slim.fully_connected(net, params.EMBEDDING_SIZE, scope='fc2',
activation_fn=None)
return tf.identity(net, name='embedding')
def load_vggish_slim_checkpoint(session, checkpoint_path):
"""Loads a pre-trained VGGish-compatible checkpoint.
This function can be used as an initialization function (referred to as
init_fn in TensorFlow documentation) which is called in a Session after
initializating all variables. When used as an init_fn, this will load
a pre-trained checkpoint that is compatible with the VGGish model
definition. Only variables defined by VGGish will be loaded.
Args:
session: an active TensorFlow session.
checkpoint_path: path to a file containing a checkpoint that is
compatible with the VGGish model definition.
"""
# Get the list of names of all VGGish variables that exist in
# the checkpoint (i.e., all inference-mode VGGish variables).
with tf.Graph().as_default():
define_vggish_slim(training=False)
vggish_var_names = [v.name for v in tf.global_variables()]
# Get the list of all currently existing variables that match
# the list of variable names we just computed.
vggish_vars = [v for v in tf.global_variables() if v.name in vggish_var_names]
# Use a Saver to restore just the variables selected above.
saver = tf.train.Saver(vggish_vars, name='vggish_load_pretrained',
write_version=1)
saver.restore(session, checkpoint_path)
| 6,237 | 44.532847 | 80 | py |
models | models-master/research/audioset/vggish/vggish_smoke_test.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A smoke test for VGGish.
This is a simple smoke test of a local install of VGGish and its associated
downloaded files. We create a synthetic sound, extract log mel spectrogram
features, run them through VGGish, post-process the embedding ouputs, and
check some simple statistics of the results, allowing for variations that
might occur due to platform/version differences in the libraries we use.
Usage:
- Download the VGGish checkpoint and PCA parameters into the same directory as
the VGGish source code. If you keep them elsewhere, update the checkpoint_path
and pca_params_path variables below.
- Run:
$ python vggish_smoke_test.py
"""
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
import vggish_input
import vggish_params
import vggish_postprocess
import vggish_slim
print('\nTesting your install of VGGish\n')
# Paths to downloaded VGGish files.
checkpoint_path = 'vggish_model.ckpt'
pca_params_path = 'vggish_pca_params.npz'
# Relative tolerance of errors in mean and standard deviation of embeddings.
rel_error = 0.1 # Up to 10%
# Generate a 1 kHz sine wave at 44.1 kHz (we use a high sampling rate
# to test resampling to 16 kHz during feature extraction).
num_secs = 3
freq = 1000
sr = 44100
t = np.arange(0, num_secs, 1 / sr)
x = np.sin(2 * np.pi * freq * t)
# Produce a batch of log mel spectrogram examples.
input_batch = vggish_input.waveform_to_examples(x, sr)
print('Log Mel Spectrogram example: ', input_batch[0])
np.testing.assert_equal(
input_batch.shape,
[num_secs, vggish_params.NUM_FRAMES, vggish_params.NUM_BANDS])
# Define VGGish, load the checkpoint, and run the batch through the model to
# produce embeddings.
with tf.Graph().as_default(), tf.Session() as sess:
vggish_slim.define_vggish_slim()
vggish_slim.load_vggish_slim_checkpoint(sess, checkpoint_path)
features_tensor = sess.graph.get_tensor_by_name(
vggish_params.INPUT_TENSOR_NAME)
embedding_tensor = sess.graph.get_tensor_by_name(
vggish_params.OUTPUT_TENSOR_NAME)
[embedding_batch] = sess.run([embedding_tensor],
feed_dict={features_tensor: input_batch})
print('VGGish embedding: ', embedding_batch[0])
expected_embedding_mean = -0.0333
expected_embedding_std = 0.380
np.testing.assert_allclose(
[np.mean(embedding_batch), np.std(embedding_batch)],
[expected_embedding_mean, expected_embedding_std],
rtol=rel_error)
# Postprocess the results to produce whitened quantized embeddings.
pproc = vggish_postprocess.Postprocessor(pca_params_path)
postprocessed_batch = pproc.postprocess(embedding_batch)
print('Postprocessed VGGish embedding: ', postprocessed_batch[0])
expected_postprocessed_mean = 122.0
expected_postprocessed_std = 93.5
np.testing.assert_allclose(
[np.mean(postprocessed_batch), np.std(postprocessed_batch)],
[expected_postprocessed_mean, expected_postprocessed_std],
rtol=rel_error)
print('\nLooks Good To Me!\n')
| 3,674 | 36.5 | 80 | py |
models | models-master/research/audioset/vggish/vggish_input.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Compute input examples for VGGish from audio waveform."""
import numpy as np
import resampy
import mel_features
import vggish_params
try:
import soundfile as sf
def wav_read(wav_file):
wav_data, sr = sf.read(wav_file, dtype='int16')
return wav_data, sr
except ImportError:
def wav_read(wav_file):
raise NotImplementedError('WAV file reading requires soundfile package.')
def waveform_to_examples(data, sample_rate):
"""Converts audio waveform into an array of examples for VGGish.
Args:
data: np.array of either one dimension (mono) or two dimensions
(multi-channel, with the outer dimension representing channels).
Each sample is generally expected to lie in the range [-1.0, +1.0],
although this is not required.
sample_rate: Sample rate of data.
Returns:
3-D np.array of shape [num_examples, num_frames, num_bands] which represents
a sequence of examples, each of which contains a patch of log mel
spectrogram, covering num_frames frames of audio and num_bands mel frequency
bands, where the frame length is vggish_params.STFT_HOP_LENGTH_SECONDS.
"""
# Convert to mono.
if len(data.shape) > 1:
data = np.mean(data, axis=1)
# Resample to the rate assumed by VGGish.
if sample_rate != vggish_params.SAMPLE_RATE:
data = resampy.resample(data, sample_rate, vggish_params.SAMPLE_RATE)
# Compute log mel spectrogram features.
log_mel = mel_features.log_mel_spectrogram(
data,
audio_sample_rate=vggish_params.SAMPLE_RATE,
log_offset=vggish_params.LOG_OFFSET,
window_length_secs=vggish_params.STFT_WINDOW_LENGTH_SECONDS,
hop_length_secs=vggish_params.STFT_HOP_LENGTH_SECONDS,
num_mel_bins=vggish_params.NUM_MEL_BINS,
lower_edge_hertz=vggish_params.MEL_MIN_HZ,
upper_edge_hertz=vggish_params.MEL_MAX_HZ)
# Frame features into examples.
features_sample_rate = 1.0 / vggish_params.STFT_HOP_LENGTH_SECONDS
example_window_length = int(round(
vggish_params.EXAMPLE_WINDOW_SECONDS * features_sample_rate))
example_hop_length = int(round(
vggish_params.EXAMPLE_HOP_SECONDS * features_sample_rate))
log_mel_examples = mel_features.frame(
log_mel,
window_length=example_window_length,
hop_length=example_hop_length)
return log_mel_examples
def wavfile_to_examples(wav_file):
"""Convenience wrapper around waveform_to_examples() for a common WAV format.
Args:
wav_file: String path to a file, or a file-like object. The file
is assumed to contain WAV audio data with signed 16-bit PCM samples.
Returns:
See waveform_to_examples.
"""
wav_data, sr = wav_read(wav_file)
assert wav_data.dtype == np.int16, 'Bad sample type: %r' % wav_data.dtype
samples = wav_data / 32768.0 # Convert to [-1.0, +1.0]
return waveform_to_examples(samples, sr)
| 3,536 | 35.091837 | 80 | py |
models | models-master/research/audioset/vggish/mel_features.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines routines to compute mel spectrogram features from audio waveform."""
import numpy as np
def frame(data, window_length, hop_length):
"""Convert array into a sequence of successive possibly overlapping frames.
An n-dimensional array of shape (num_samples, ...) is converted into an
(n+1)-D array of shape (num_frames, window_length, ...), where each frame
starts hop_length points after the preceding one.
This is accomplished using stride_tricks, so the original data is not
copied. However, there is no zero-padding, so any incomplete frames at the
end are not included.
Args:
data: np.array of dimension N >= 1.
window_length: Number of samples in each frame.
hop_length: Advance (in samples) between each window.
Returns:
(N+1)-D np.array with as many rows as there are complete frames that can be
extracted.
"""
num_samples = data.shape[0]
num_frames = 1 + int(np.floor((num_samples - window_length) / hop_length))
shape = (num_frames, window_length) + data.shape[1:]
strides = (data.strides[0] * hop_length,) + data.strides
return np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides)
def periodic_hann(window_length):
"""Calculate a "periodic" Hann window.
The classic Hann window is defined as a raised cosine that starts and
ends on zero, and where every value appears twice, except the middle
point for an odd-length window. Matlab calls this a "symmetric" window
and np.hanning() returns it. However, for Fourier analysis, this
actually represents just over one cycle of a period N-1 cosine, and
thus is not compactly expressed on a length-N Fourier basis. Instead,
it's better to use a raised cosine that ends just before the final
zero value - i.e. a complete cycle of a period-N cosine. Matlab
calls this a "periodic" window. This routine calculates it.
Args:
window_length: The number of points in the returned window.
Returns:
A 1D np.array containing the periodic hann window.
"""
return 0.5 - (0.5 * np.cos(2 * np.pi / window_length *
np.arange(window_length)))
def stft_magnitude(signal, fft_length,
hop_length=None,
window_length=None):
"""Calculate the short-time Fourier transform magnitude.
Args:
signal: 1D np.array of the input time-domain signal.
fft_length: Size of the FFT to apply.
hop_length: Advance (in samples) between each frame passed to FFT.
window_length: Length of each block of samples to pass to FFT.
Returns:
2D np.array where each row contains the magnitudes of the fft_length/2+1
unique values of the FFT for the corresponding frame of input samples.
"""
frames = frame(signal, window_length, hop_length)
# Apply frame window to each frame. We use a periodic Hann (cosine of period
# window_length) instead of the symmetric Hann of np.hanning (period
# window_length-1).
window = periodic_hann(window_length)
windowed_frames = frames * window
return np.abs(np.fft.rfft(windowed_frames, int(fft_length)))
# Mel spectrum constants and functions.
_MEL_BREAK_FREQUENCY_HERTZ = 700.0
_MEL_HIGH_FREQUENCY_Q = 1127.0
def hertz_to_mel(frequencies_hertz):
"""Convert frequencies to mel scale using HTK formula.
Args:
frequencies_hertz: Scalar or np.array of frequencies in hertz.
Returns:
Object of same size as frequencies_hertz containing corresponding values
on the mel scale.
"""
return _MEL_HIGH_FREQUENCY_Q * np.log(
1.0 + (frequencies_hertz / _MEL_BREAK_FREQUENCY_HERTZ))
def spectrogram_to_mel_matrix(num_mel_bins=20,
num_spectrogram_bins=129,
audio_sample_rate=8000,
lower_edge_hertz=125.0,
upper_edge_hertz=3800.0):
"""Return a matrix that can post-multiply spectrogram rows to make mel.
Returns a np.array matrix A that can be used to post-multiply a matrix S of
spectrogram values (STFT magnitudes) arranged as frames x bins to generate a
"mel spectrogram" M of frames x num_mel_bins. M = S A.
The classic HTK algorithm exploits the complementarity of adjacent mel bands
to multiply each FFT bin by only one mel weight, then add it, with positive
and negative signs, to the two adjacent mel bands to which that bin
contributes. Here, by expressing this operation as a matrix multiply, we go
from num_fft multiplies per frame (plus around 2*num_fft adds) to around
num_fft^2 multiplies and adds. However, because these are all presumably
accomplished in a single call to np.dot(), it's not clear which approach is
faster in Python. The matrix multiplication has the attraction of being more
general and flexible, and much easier to read.
Args:
num_mel_bins: How many bands in the resulting mel spectrum. This is
the number of columns in the output matrix.
num_spectrogram_bins: How many bins there are in the source spectrogram
data, which is understood to be fft_size/2 + 1, i.e. the spectrogram
only contains the nonredundant FFT bins.
audio_sample_rate: Samples per second of the audio at the input to the
spectrogram. We need this to figure out the actual frequencies for
each spectrogram bin, which dictates how they are mapped into mel.
lower_edge_hertz: Lower bound on the frequencies to be included in the mel
spectrum. This corresponds to the lower edge of the lowest triangular
band.
upper_edge_hertz: The desired top edge of the highest frequency band.
Returns:
An np.array with shape (num_spectrogram_bins, num_mel_bins).
Raises:
ValueError: if frequency edges are incorrectly ordered or out of range.
"""
nyquist_hertz = audio_sample_rate / 2.
if lower_edge_hertz < 0.0:
raise ValueError("lower_edge_hertz %.1f must be >= 0" % lower_edge_hertz)
if lower_edge_hertz >= upper_edge_hertz:
raise ValueError("lower_edge_hertz %.1f >= upper_edge_hertz %.1f" %
(lower_edge_hertz, upper_edge_hertz))
if upper_edge_hertz > nyquist_hertz:
raise ValueError("upper_edge_hertz %.1f is greater than Nyquist %.1f" %
(upper_edge_hertz, nyquist_hertz))
spectrogram_bins_hertz = np.linspace(0.0, nyquist_hertz, num_spectrogram_bins)
spectrogram_bins_mel = hertz_to_mel(spectrogram_bins_hertz)
# The i'th mel band (starting from i=1) has center frequency
# band_edges_mel[i], lower edge band_edges_mel[i-1], and higher edge
# band_edges_mel[i+1]. Thus, we need num_mel_bins + 2 values in
# the band_edges_mel arrays.
band_edges_mel = np.linspace(hertz_to_mel(lower_edge_hertz),
hertz_to_mel(upper_edge_hertz), num_mel_bins + 2)
# Matrix to post-multiply feature arrays whose rows are num_spectrogram_bins
# of spectrogram values.
mel_weights_matrix = np.empty((num_spectrogram_bins, num_mel_bins))
for i in range(num_mel_bins):
lower_edge_mel, center_mel, upper_edge_mel = band_edges_mel[i:i + 3]
# Calculate lower and upper slopes for every spectrogram bin.
# Line segments are linear in the *mel* domain, not hertz.
lower_slope = ((spectrogram_bins_mel - lower_edge_mel) /
(center_mel - lower_edge_mel))
upper_slope = ((upper_edge_mel - spectrogram_bins_mel) /
(upper_edge_mel - center_mel))
# .. then intersect them with each other and zero.
mel_weights_matrix[:, i] = np.maximum(0.0, np.minimum(lower_slope,
upper_slope))
# HTK excludes the spectrogram DC bin; make sure it always gets a zero
# coefficient.
mel_weights_matrix[0, :] = 0.0
return mel_weights_matrix
def log_mel_spectrogram(data,
audio_sample_rate=8000,
log_offset=0.0,
window_length_secs=0.025,
hop_length_secs=0.010,
**kwargs):
"""Convert waveform to a log magnitude mel-frequency spectrogram.
Args:
data: 1D np.array of waveform data.
audio_sample_rate: The sampling rate of data.
log_offset: Add this to values when taking log to avoid -Infs.
window_length_secs: Duration of each window to analyze.
hop_length_secs: Advance between successive analysis windows.
**kwargs: Additional arguments to pass to spectrogram_to_mel_matrix.
Returns:
2D np.array of (num_frames, num_mel_bins) consisting of log mel filterbank
magnitudes for successive frames.
"""
window_length_samples = int(round(audio_sample_rate * window_length_secs))
hop_length_samples = int(round(audio_sample_rate * hop_length_secs))
fft_length = 2 ** int(np.ceil(np.log(window_length_samples) / np.log(2.0)))
spectrogram = stft_magnitude(
data,
fft_length=fft_length,
hop_length=hop_length_samples,
window_length=window_length_samples)
mel_spectrogram = np.dot(spectrogram, spectrogram_to_mel_matrix(
num_spectrogram_bins=spectrogram.shape[1],
audio_sample_rate=audio_sample_rate, **kwargs))
return np.log(mel_spectrogram + log_offset)
| 9,874 | 43.084821 | 80 | py |
models | models-master/research/audioset/vggish/vggish_inference_demo.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""A simple demonstration of running VGGish in inference mode.
This is intended as a toy example that demonstrates how the various building
blocks (feature extraction, model definition and loading, postprocessing) work
together in an inference context.
A WAV file (assumed to contain signed 16-bit PCM samples) is read in, converted
into log mel spectrogram examples, fed into VGGish, the raw embedding output is
whitened and quantized, and the postprocessed embeddings are optionally written
in a SequenceExample to a TFRecord file (using the same format as the embedding
features released in AudioSet).
Usage:
# Run a WAV file through the model and print the embeddings. The model
# checkpoint is loaded from vggish_model.ckpt and the PCA parameters are
# loaded from vggish_pca_params.npz in the current directory.
$ python vggish_inference_demo.py --wav_file /path/to/a/wav/file
# Run a WAV file through the model and also write the embeddings to
# a TFRecord file. The model checkpoint and PCA parameters are explicitly
# passed in as well.
$ python vggish_inference_demo.py --wav_file /path/to/a/wav/file \
--tfrecord_file /path/to/tfrecord/file \
--checkpoint /path/to/model/checkpoint \
--pca_params /path/to/pca/params
# Run a built-in input (a sine wav) through the model and print the
# embeddings. Associated model files are read from the current directory.
$ python vggish_inference_demo.py
"""
from __future__ import print_function
import numpy as np
import six
import soundfile
import tensorflow.compat.v1 as tf
import vggish_input
import vggish_params
import vggish_postprocess
import vggish_slim
flags = tf.app.flags
flags.DEFINE_string(
'wav_file', None,
'Path to a wav file. Should contain signed 16-bit PCM samples. '
'If none is provided, a synthetic sound is used.')
flags.DEFINE_string(
'checkpoint', 'vggish_model.ckpt',
'Path to the VGGish checkpoint file.')
flags.DEFINE_string(
'pca_params', 'vggish_pca_params.npz',
'Path to the VGGish PCA parameters file.')
flags.DEFINE_string(
'tfrecord_file', None,
'Path to a TFRecord file where embeddings will be written.')
FLAGS = flags.FLAGS
def main(_):
# In this simple example, we run the examples from a single audio file through
# the model. If none is provided, we generate a synthetic input.
if FLAGS.wav_file:
wav_file = FLAGS.wav_file
else:
# Write a WAV of a sine wav into an in-memory file object.
num_secs = 5
freq = 1000
sr = 44100
t = np.arange(0, num_secs, 1 / sr)
x = np.sin(2 * np.pi * freq * t)
# Convert to signed 16-bit samples.
samples = np.clip(x * 32768, -32768, 32767).astype(np.int16)
wav_file = six.BytesIO()
soundfile.write(wav_file, samples, sr, format='WAV', subtype='PCM_16')
wav_file.seek(0)
examples_batch = vggish_input.wavfile_to_examples(wav_file)
print(examples_batch)
# Prepare a postprocessor to munge the model embeddings.
pproc = vggish_postprocess.Postprocessor(FLAGS.pca_params)
# If needed, prepare a record writer to store the postprocessed embeddings.
writer = tf.python_io.TFRecordWriter(
FLAGS.tfrecord_file) if FLAGS.tfrecord_file else None
with tf.Graph().as_default(), tf.Session() as sess:
# Define the model in inference mode, load the checkpoint, and
# locate input and output tensors.
vggish_slim.define_vggish_slim(training=False)
vggish_slim.load_vggish_slim_checkpoint(sess, FLAGS.checkpoint)
features_tensor = sess.graph.get_tensor_by_name(
vggish_params.INPUT_TENSOR_NAME)
embedding_tensor = sess.graph.get_tensor_by_name(
vggish_params.OUTPUT_TENSOR_NAME)
# Run inference and postprocessing.
[embedding_batch] = sess.run([embedding_tensor],
feed_dict={features_tensor: examples_batch})
print(embedding_batch)
postprocessed_batch = pproc.postprocess(embedding_batch)
print(postprocessed_batch)
# Write the postprocessed embeddings as a SequenceExample, in a similar
# format as the features released in AudioSet. Each row of the batch of
# embeddings corresponds to roughly a second of audio (96 10ms frames), and
# the rows are written as a sequence of bytes-valued features, where each
# feature value contains the 128 bytes of the whitened quantized embedding.
seq_example = tf.train.SequenceExample(
feature_lists=tf.train.FeatureLists(
feature_list={
vggish_params.AUDIO_EMBEDDING_FEATURE_NAME:
tf.train.FeatureList(
feature=[
tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[embedding.tobytes()]))
for embedding in postprocessed_batch
]
)
}
)
)
print(seq_example)
if writer:
writer.write(seq_example.SerializeToString())
if writer:
writer.close()
if __name__ == '__main__':
tf.app.run()
| 5,895 | 37.285714 | 80 | py |
models | models-master/research/audioset/vggish/vggish_train_demo.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""A simple demonstration of running VGGish in training mode.
This is intended as a toy example that demonstrates how to use the VGGish model
definition within a larger model that adds more layers on top, and then train
the larger model. If you let VGGish train as well, then this allows you to
fine-tune the VGGish model parameters for your application. If you don't let
VGGish train, then you use VGGish as a feature extractor for the layers above
it.
For this toy task, we are training a classifier to distinguish between three
classes: sine waves, constant signals, and white noise. We generate synthetic
waveforms from each of these classes, convert into shuffled batches of log mel
spectrogram examples with associated labels, and feed the batches into a model
that includes VGGish at the bottom and a couple of additional layers on top. We
also plumb in labels that are associated with the examples, which feed a label
loss used for training.
Usage:
# Run training for 100 steps using a model checkpoint in the default
# location (vggish_model.ckpt in the current directory). Allow VGGish
# to get fine-tuned.
$ python vggish_train_demo.py --num_batches 100
# Same as before but run for fewer steps and don't change VGGish parameters
# and use a checkpoint in a different location
$ python vggish_train_demo.py --num_batches 50 \
--train_vggish=False \
--checkpoint /path/to/model/checkpoint
"""
from __future__ import print_function
from random import shuffle
import numpy as np
import tensorflow.compat.v1 as tf
import tf_slim as slim
import vggish_input
import vggish_params
import vggish_slim
flags = tf.app.flags
flags.DEFINE_integer(
'num_batches', 30,
'Number of batches of examples to feed into the model. Each batch is of '
'variable size and contains shuffled examples of each class of audio.')
flags.DEFINE_boolean(
'train_vggish', True,
'If True, allow VGGish parameters to change during training, thus '
'fine-tuning VGGish. If False, VGGish parameters are fixed, thus using '
'VGGish as a fixed feature extractor.')
flags.DEFINE_string(
'checkpoint', 'vggish_model.ckpt',
'Path to the VGGish checkpoint file.')
FLAGS = flags.FLAGS
_NUM_CLASSES = 3
def _get_examples_batch():
"""Returns a shuffled batch of examples of all audio classes.
Note that this is just a toy function because this is a simple demo intended
to illustrate how the training code might work.
Returns:
a tuple (features, labels) where features is a NumPy array of shape
[batch_size, num_frames, num_bands] where the batch_size is variable and
each row is a log mel spectrogram patch of shape [num_frames, num_bands]
suitable for feeding VGGish, while labels is a NumPy array of shape
[batch_size, num_classes] where each row is a multi-hot label vector that
provides the labels for corresponding rows in features.
"""
# Make a waveform for each class.
num_seconds = 5
sr = 44100 # Sampling rate.
t = np.arange(0, num_seconds, 1 / sr) # Time axis
# Random sine wave.
freq = np.random.uniform(100, 1000)
sine = np.sin(2 * np.pi * freq * t)
# Random constant signal.
magnitude = np.random.uniform(-1, 1)
const = magnitude * t
# White noise.
noise = np.random.normal(-1, 1, size=t.shape)
# Make examples of each signal and corresponding labels.
# Sine is class index 0, Const class index 1, Noise class index 2.
sine_examples = vggish_input.waveform_to_examples(sine, sr)
sine_labels = np.array([[1, 0, 0]] * sine_examples.shape[0])
const_examples = vggish_input.waveform_to_examples(const, sr)
const_labels = np.array([[0, 1, 0]] * const_examples.shape[0])
noise_examples = vggish_input.waveform_to_examples(noise, sr)
noise_labels = np.array([[0, 0, 1]] * noise_examples.shape[0])
# Shuffle (example, label) pairs across all classes.
all_examples = np.concatenate((sine_examples, const_examples, noise_examples))
all_labels = np.concatenate((sine_labels, const_labels, noise_labels))
labeled_examples = list(zip(all_examples, all_labels))
shuffle(labeled_examples)
# Separate and return the features and labels.
features = [example for (example, _) in labeled_examples]
labels = [label for (_, label) in labeled_examples]
return (features, labels)
def main(_):
with tf.Graph().as_default(), tf.Session() as sess:
# Define VGGish.
embeddings = vggish_slim.define_vggish_slim(training=FLAGS.train_vggish)
# Define a shallow classification model and associated training ops on top
# of VGGish.
with tf.variable_scope('mymodel'):
# Add a fully connected layer with 100 units. Add an activation function
# to the embeddings since they are pre-activation.
num_units = 100
fc = slim.fully_connected(tf.nn.relu(embeddings), num_units)
# Add a classifier layer at the end, consisting of parallel logistic
# classifiers, one per class. This allows for multi-class tasks.
logits = slim.fully_connected(
fc, _NUM_CLASSES, activation_fn=None, scope='logits')
tf.sigmoid(logits, name='prediction')
# Add training ops.
with tf.variable_scope('train'):
global_step = tf.train.create_global_step()
# Labels are assumed to be fed as a batch multi-hot vectors, with
# a 1 in the position of each positive class label, and 0 elsewhere.
labels_input = tf.placeholder(
tf.float32, shape=(None, _NUM_CLASSES), name='labels')
# Cross-entropy label loss.
xent = tf.nn.sigmoid_cross_entropy_with_logits(
logits=logits, labels=labels_input, name='xent')
loss = tf.reduce_mean(xent, name='loss_op')
tf.summary.scalar('loss', loss)
# We use the same optimizer and hyperparameters as used to train VGGish.
optimizer = tf.train.AdamOptimizer(
learning_rate=vggish_params.LEARNING_RATE,
epsilon=vggish_params.ADAM_EPSILON)
train_op = optimizer.minimize(loss, global_step=global_step)
# Initialize all variables in the model, and then load the pre-trained
# VGGish checkpoint.
sess.run(tf.global_variables_initializer())
vggish_slim.load_vggish_slim_checkpoint(sess, FLAGS.checkpoint)
# The training loop.
features_input = sess.graph.get_tensor_by_name(
vggish_params.INPUT_TENSOR_NAME)
for _ in range(FLAGS.num_batches):
(features, labels) = _get_examples_batch()
[num_steps, loss_value, _] = sess.run(
[global_step, loss, train_op],
feed_dict={features_input: features, labels_input: labels})
print('Step %d: loss %g' % (num_steps, loss_value))
if __name__ == '__main__':
tf.app.run()
| 7,476 | 39.416216 | 80 | py |
models | models-master/research/audioset/vggish/vggish_export_tfhub.py | """Exports VGGish as a SavedModel for publication to TF Hub.
The exported SavedModel accepts a 1-d float32 Tensor of arbitrary shape
containing an audio waveform (assumed to be mono 16 kHz samples in the [-1, +1]
range) and returns a 2-d float32 batch of 128-d VGGish embeddings, one per
0.96s example generated from the waveform.
Requires pip-installing tensorflow_hub.
Usage:
vggish_export_tfhub.py <path/to/VGGish/checkpoint> <path/to/tfhub/export>
"""
import sys
sys.path.append('..') # Lets us import yamnet modules from sibling directory.
import numpy as np
import resampy
import tensorflow as tf
assert tf.version.VERSION >= '2.0.0', (
'Need at least TF 2.0, you have TF v{}'.format(tf.version.VERSION))
import tensorflow_hub as tfhub
import vggish_input
import vggish_params
import vggish_slim
from yamnet import features as yamnet_features
from yamnet import params as yamnet_params
def vggish_definer(variables, checkpoint_path):
"""Defines VGGish with variables tracked and initialized from a checkpoint."""
reader = tf.compat.v1.train.NewCheckpointReader(checkpoint_path)
def var_tracker(next_creator, **kwargs):
"""Variable creation hook that assigns initial values from a checkpoint."""
var_name = kwargs['name']
var_value = reader.get_tensor(var_name)
kwargs.update({'initial_value': var_value})
var = next_creator(**kwargs)
variables.append(var)
return var
def waveform_to_features(waveform):
"""Creates VGGish features using the YAMNet feature extractor."""
params = yamnet_params.Params(
sample_rate=vggish_params.SAMPLE_RATE,
stft_window_seconds=vggish_params.STFT_WINDOW_LENGTH_SECONDS,
stft_hop_seconds=vggish_params.STFT_HOP_LENGTH_SECONDS,
mel_bands=vggish_params.NUM_MEL_BINS,
mel_min_hz=vggish_params.MEL_MIN_HZ,
mel_max_hz=vggish_params.MEL_MAX_HZ,
log_offset=vggish_params.LOG_OFFSET,
patch_window_seconds=vggish_params.EXAMPLE_WINDOW_SECONDS,
patch_hop_seconds=vggish_params.EXAMPLE_HOP_SECONDS)
log_mel_spectrogram, features = yamnet_features.waveform_to_log_mel_spectrogram_patches(
waveform, params)
return features
def define_vggish(waveform):
with tf.variable_creator_scope(var_tracker):
features = waveform_to_features(waveform)
return vggish_slim.define_vggish_slim(features, training=False)
return define_vggish
class VGGish(tf.Module):
"""A TF2 Module wrapper around VGGish."""
def __init__(self, checkpoint_path):
super().__init__()
self._variables = []
self._vggish_fn = tf.compat.v1.wrap_function(
vggish_definer(self._variables, checkpoint_path),
signature=(tf.TensorSpec(shape=[None], dtype=tf.float32),))
@tf.function(input_signature=(tf.TensorSpec(shape=[None], dtype=tf.float32),))
def __call__(self, waveform):
return self._vggish_fn(waveform)
def check_model(model_fn):
"""Applies vggish_smoke_test's sanity check to an instance of VGGish."""
num_secs = 3
freq = 1000
sr = 44100
t = np.arange(0, num_secs, 1 / sr)
waveform = np.sin(2 * np.pi * freq * t)
waveform = resampy.resample(waveform, sr, vggish_params.SAMPLE_RATE)
embeddings = model_fn(waveform)
expected_embedding_mean = -0.0333
expected_embedding_std = 0.380
rel_error = 0.1
np.testing.assert_allclose(
[np.mean(embeddings), np.std(embeddings)],
[expected_embedding_mean, expected_embedding_std],
rtol=rel_error)
def main(args):
# Create a TF2 wrapper around VGGish.
vggish_checkpoint_path = args[0]
vggish = VGGish(vggish_checkpoint_path)
check_model(vggish)
# Make TF-Hub export.
vggish_tfhub_export_path = args[1]
tf.saved_model.save(vggish, vggish_tfhub_export_path)
# Check export in TF2.
model = tfhub.load(vggish_tfhub_export_path)
check_model(model)
# Check export in TF1.
with tf.compat.v1.Graph().as_default(), tf.compat.v1.Session() as sess:
model = tfhub.load(vggish_tfhub_export_path)
sess.run(tf.compat.v1.global_variables_initializer())
def run_model(waveform):
embeddings = model(waveform)
return sess.run(embeddings)
check_model(run_model)
if __name__ == '__main__':
main(sys.argv[1:])
| 4,231 | 32.322835 | 92 | py |
models | models-master/research/audioset/yamnet/inference.py | # Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Inference demo for YAMNet."""
from __future__ import division, print_function
import sys
import numpy as np
import resampy
import soundfile as sf
import tensorflow as tf
import params as yamnet_params
import yamnet as yamnet_model
def main(argv):
assert argv, 'Usage: inference.py <wav file> <wav file> ...'
params = yamnet_params.Params()
yamnet = yamnet_model.yamnet_frames_model(params)
yamnet.load_weights('yamnet.h5')
yamnet_classes = yamnet_model.class_names('yamnet_class_map.csv')
for file_name in argv:
# Decode the WAV file.
wav_data, sr = sf.read(file_name, dtype=np.int16)
assert wav_data.dtype == np.int16, 'Bad sample type: %r' % wav_data.dtype
waveform = wav_data / 32768.0 # Convert to [-1.0, +1.0]
waveform = waveform.astype('float32')
# Convert to mono and the sample rate expected by YAMNet.
if len(waveform.shape) > 1:
waveform = np.mean(waveform, axis=1)
if sr != params.sample_rate:
waveform = resampy.resample(waveform, sr, params.sample_rate)
# Predict YAMNet classes.
scores, embeddings, spectrogram = yamnet(waveform)
# Scores is a matrix of (time_frames, num_classes) classifier scores.
# Average them along time to get an overall classifier output for the clip.
prediction = np.mean(scores, axis=0)
# Report the highest-scoring classes and their scores.
top5_i = np.argsort(prediction)[::-1][:5]
print(file_name, ':\n' +
'\n'.join(' {:12s}: {:.3f}'.format(yamnet_classes[i], prediction[i])
for i in top5_i))
if __name__ == '__main__':
main(sys.argv[1:])
| 2,307 | 34.507692 | 80 | py |
models | models-master/research/audioset/yamnet/export.py | """Exports YAMNet as: TF2 SavedModel, TF-Lite model, TF-JS model.
The exported models all accept as input:
- 1-d float32 Tensor of arbitrary shape containing an audio waveform
(assumed to be mono 16 kHz samples in the [-1, +1] range)
and return as output:
- a 2-d float32 Tensor of shape [num_frames, num_classes] containing
predicted class scores for each frame of audio extracted from the input.
- a 2-d float32 Tensor of shape [num_frames, embedding_size] containing
embeddings of each frame of audio.
- a 2-d float32 Tensor of shape [num_spectrogram_frames, num_mel_bins]
containing the log mel spectrogram of the entire waveform.
The SavedModels will also contain (as an asset) a class map CSV file that maps
class indices to AudioSet class names and Freebase MIDs. The path to the class
map is available as the 'class_map_path()' method of the restored model.
Requires pip-installing tensorflow_hub and tensorflowjs.
Usage:
export.py <path/to/YAMNet/weights-hdf-file> <path/to/output/directory>
and the various exports will be created in subdirectories of the output directory.
Assumes that it will be run in the yamnet source directory from where it loads
the class map. Skips an export if the corresponding directory already exists.
"""
import os
import sys
import tempfile
import time
import numpy as np
import tensorflow as tf
assert tf.version.VERSION >= '2.0.0', (
'Need at least TF 2.0, you have TF v{}'.format(tf.version.VERSION))
import tensorflow_hub as tfhub
from tensorflowjs.converters import tf_saved_model_conversion_v2 as tfjs_saved_model_converter
import params as yamnet_params
import yamnet
def log(msg):
print('\n=====\n{} | {}\n=====\n'.format(time.asctime(), msg), flush=True)
class YAMNet(tf.Module):
"""A TF2 Module wrapper around YAMNet."""
def __init__(self, weights_path, params):
super().__init__()
self._yamnet = yamnet.yamnet_frames_model(params)
self._yamnet.load_weights(weights_path)
self._class_map_asset = tf.saved_model.Asset('yamnet_class_map.csv')
@tf.function(input_signature=[])
def class_map_path(self):
return self._class_map_asset.asset_path
@tf.function(input_signature=[tf.TensorSpec(shape=[None], dtype=tf.float32)])
def __call__(self, waveform):
predictions, embeddings, log_mel_spectrogram = self._yamnet(waveform)
return {'predictions': predictions,
'embeddings': embeddings,
'log_mel_spectrogram': log_mel_spectrogram}
def check_model(model_fn, class_map_path, params):
yamnet_classes = yamnet.class_names(class_map_path)
"""Applies yamnet_test's sanity checks to an instance of YAMNet."""
def clip_test(waveform, expected_class_name, top_n=10):
results = model_fn(waveform=waveform)
predictions = results['predictions']
embeddings = results['embeddings']
log_mel_spectrogram = results['log_mel_spectrogram']
clip_predictions = np.mean(predictions, axis=0)
top_n_indices = np.argsort(clip_predictions)[-top_n:]
top_n_scores = clip_predictions[top_n_indices]
top_n_class_names = yamnet_classes[top_n_indices]
top_n_predictions = list(zip(top_n_class_names, top_n_scores))
assert expected_class_name in top_n_class_names, (
'Did not find expected class {} in top {} predictions: {}'.format(
expected_class_name, top_n, top_n_predictions))
clip_test(
waveform=np.zeros((int(3 * params.sample_rate),), dtype=np.float32),
expected_class_name='Silence')
np.random.seed(51773) # Ensure repeatability.
clip_test(
waveform=np.random.uniform(-1.0, +1.0,
(int(3 * params.sample_rate),)).astype(np.float32),
expected_class_name='White noise')
clip_test(
waveform=np.sin(2 * np.pi * 440 *
np.arange(0, 3, 1 / params.sample_rate), dtype=np.float32),
expected_class_name='Sine wave')
def make_tf2_export(weights_path, export_dir):
if os.path.exists(export_dir):
log('TF2 export already exists in {}, skipping TF2 export'.format(
export_dir))
return
# Create a TF2 Module wrapper around YAMNet.
log('Building and checking TF2 Module ...')
params = yamnet_params.Params()
yamnet = YAMNet(weights_path, params)
check_model(yamnet, yamnet.class_map_path(), params)
log('Done')
# Make TF2 SavedModel export.
log('Making TF2 SavedModel export ...')
tf.saved_model.save(
yamnet, export_dir,
signatures={'serving_default': yamnet.__call__.get_concrete_function()})
log('Done')
# Check export with TF-Hub in TF2.
log('Checking TF2 SavedModel export in TF2 ...')
model = tfhub.load(export_dir)
check_model(model, model.class_map_path(), params)
log('Done')
# Check export with TF-Hub in TF1.
log('Checking TF2 SavedModel export in TF1 ...')
with tf.compat.v1.Graph().as_default(), tf.compat.v1.Session() as sess:
model = tfhub.load(export_dir)
sess.run(tf.compat.v1.global_variables_initializer())
def run_model(waveform):
return sess.run(model(waveform))
check_model(run_model, model.class_map_path().eval(), params)
log('Done')
def make_tflite_export(weights_path, export_dir):
if os.path.exists(export_dir):
log('TF-Lite export already exists in {}, skipping TF-Lite export'.format(
export_dir))
return
# Create a TF-Lite compatible Module wrapper around YAMNet.
log('Building and checking TF-Lite Module ...')
params = yamnet_params.Params(tflite_compatible=True)
yamnet = YAMNet(weights_path, params)
check_model(yamnet, yamnet.class_map_path(), params)
log('Done')
# Make TF-Lite SavedModel export.
log('Making TF-Lite SavedModel export ...')
saved_model_dir = os.path.join(export_dir, 'saved_model')
os.makedirs(saved_model_dir)
tf.saved_model.save(
yamnet, saved_model_dir,
signatures={'serving_default': yamnet.__call__.get_concrete_function()})
log('Done')
# Check that the export can be loaded and works.
log('Checking TF-Lite SavedModel export in TF2 ...')
model = tf.saved_model.load(saved_model_dir)
check_model(model, model.class_map_path(), params)
log('Done')
# Make a TF-Lite model from the SavedModel.
log('Making TF-Lite model ...')
tflite_converter = tf.lite.TFLiteConverter.from_saved_model(
saved_model_dir, signature_keys=['serving_default'])
tflite_model = tflite_converter.convert()
tflite_model_path = os.path.join(export_dir, 'yamnet.tflite')
with open(tflite_model_path, 'wb') as f:
f.write(tflite_model)
log('Done')
# Check the TF-Lite export.
log('Checking TF-Lite model ...')
interpreter = tf.lite.Interpreter(tflite_model_path)
runner = interpreter.get_signature_runner('serving_default')
check_model(runner, 'yamnet_class_map.csv', params)
log('Done')
return saved_model_dir
def make_tfjs_export(tflite_saved_model_dir, export_dir):
if os.path.exists(export_dir):
log('TF-JS export already exists in {}, skipping TF-JS export'.format(
export_dir))
return
# Make a TF-JS model from the TF-Lite SavedModel export.
log('Making TF-JS model ...')
os.makedirs(export_dir)
tfjs_saved_model_converter.convert_tf_saved_model(
tflite_saved_model_dir, export_dir)
log('Done')
def main(args):
weights_path = args[0]
output_dir = args[1]
tf2_export_dir = os.path.join(output_dir, 'tf2')
make_tf2_export(weights_path, tf2_export_dir)
tflite_export_dir = os.path.join(output_dir, 'tflite')
tflite_saved_model_dir = make_tflite_export(weights_path, tflite_export_dir)
tfjs_export_dir = os.path.join(output_dir, 'tfjs')
make_tfjs_export(tflite_saved_model_dir, tfjs_export_dir)
if __name__ == '__main__':
main(sys.argv[1:])
| 7,705 | 34.84186 | 94 | py |
models | models-master/research/audioset/yamnet/features.py | # Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Feature computation for YAMNet."""
import numpy as np
import tensorflow as tf
def waveform_to_log_mel_spectrogram_patches(waveform, params):
"""Compute log mel spectrogram patches of a 1-D waveform."""
with tf.name_scope('log_mel_features'):
# waveform has shape [<# samples>]
# Convert waveform into spectrogram using a Short-Time Fourier Transform.
# Note that tf.signal.stft() uses a periodic Hann window by default.
window_length_samples = int(
round(params.sample_rate * params.stft_window_seconds))
hop_length_samples = int(
round(params.sample_rate * params.stft_hop_seconds))
fft_length = 2 ** int(np.ceil(np.log(window_length_samples) / np.log(2.0)))
num_spectrogram_bins = fft_length // 2 + 1
if params.tflite_compatible:
magnitude_spectrogram = _tflite_stft_magnitude(
signal=waveform,
frame_length=window_length_samples,
frame_step=hop_length_samples,
fft_length=fft_length)
else:
magnitude_spectrogram = tf.abs(tf.signal.stft(
signals=waveform,
frame_length=window_length_samples,
frame_step=hop_length_samples,
fft_length=fft_length))
# magnitude_spectrogram has shape [<# STFT frames>, num_spectrogram_bins]
# Convert spectrogram into log mel spectrogram.
linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(
num_mel_bins=params.mel_bands,
num_spectrogram_bins=num_spectrogram_bins,
sample_rate=params.sample_rate,
lower_edge_hertz=params.mel_min_hz,
upper_edge_hertz=params.mel_max_hz)
mel_spectrogram = tf.matmul(
magnitude_spectrogram, linear_to_mel_weight_matrix)
log_mel_spectrogram = tf.math.log(mel_spectrogram + params.log_offset)
# log_mel_spectrogram has shape [<# STFT frames>, params.mel_bands]
# Frame spectrogram (shape [<# STFT frames>, params.mel_bands]) into patches
# (the input examples). Only complete frames are emitted, so if there is
# less than params.patch_window_seconds of waveform then nothing is emitted
# (to avoid this, zero-pad before processing).
spectrogram_hop_length_samples = int(
round(params.sample_rate * params.stft_hop_seconds))
spectrogram_sample_rate = params.sample_rate / spectrogram_hop_length_samples
patch_window_length_samples = int(
round(spectrogram_sample_rate * params.patch_window_seconds))
patch_hop_length_samples = int(
round(spectrogram_sample_rate * params.patch_hop_seconds))
features = tf.signal.frame(
signal=log_mel_spectrogram,
frame_length=patch_window_length_samples,
frame_step=patch_hop_length_samples,
axis=0)
# features has shape [<# patches>, <# STFT frames in an patch>, params.mel_bands]
return log_mel_spectrogram, features
def pad_waveform(waveform, params):
"""Pads waveform with silence if needed to get an integral number of patches."""
# In order to produce one patch of log mel spectrogram input to YAMNet, we
# need at least one patch window length of waveform plus enough extra samples
# to complete the final STFT analysis window.
min_waveform_seconds = (
params.patch_window_seconds +
params.stft_window_seconds - params.stft_hop_seconds)
min_num_samples = tf.cast(min_waveform_seconds * params.sample_rate, tf.int32)
num_samples = tf.shape(waveform)[0]
num_padding_samples = tf.maximum(0, min_num_samples - num_samples)
# In addition, there might be enough waveform for one or more additional
# patches formed by hopping forward. If there are more samples than one patch,
# round up to an integral number of hops.
num_samples = tf.maximum(num_samples, min_num_samples)
num_samples_after_first_patch = num_samples - min_num_samples
hop_samples = tf.cast(params.patch_hop_seconds * params.sample_rate, tf.int32)
num_hops_after_first_patch = tf.cast(tf.math.ceil(
tf.cast(num_samples_after_first_patch, tf.float32) /
tf.cast(hop_samples, tf.float32)), tf.int32)
num_padding_samples += (
hop_samples * num_hops_after_first_patch - num_samples_after_first_patch)
padded_waveform = tf.pad(waveform, [[0, num_padding_samples]],
mode='CONSTANT', constant_values=0.0)
return padded_waveform
def _tflite_stft_magnitude(signal, frame_length, frame_step, fft_length):
"""TF-Lite-compatible version of tf.abs(tf.signal.stft())."""
def _hann_window():
return tf.reshape(
tf.constant(
(0.5 - 0.5 * np.cos(2 * np.pi * np.arange(0, 1.0, 1.0 / frame_length))
).astype(np.float32),
name='hann_window'), [1, frame_length])
def _dft_matrix(dft_length):
"""Calculate the full DFT matrix in NumPy."""
# See https://en.wikipedia.org/wiki/DFT_matrix
omega = (0 + 1j) * 2.0 * np.pi / float(dft_length)
# Don't include 1/sqrt(N) scaling, tf.signal.rfft doesn't apply it.
return np.exp(omega * np.outer(np.arange(dft_length), np.arange(dft_length)))
def _rdft(framed_signal, fft_length):
"""Implement real-input Discrete Fourier Transform by matmul."""
# We are right-multiplying by the DFT matrix, and we are keeping only the
# first half ("positive frequencies"). So discard the second half of rows,
# but transpose the array for right-multiplication. The DFT matrix is
# symmetric, so we could have done it more directly, but this reflects our
# intention better.
complex_dft_matrix_kept_values = _dft_matrix(fft_length)[:(
fft_length // 2 + 1), :].transpose()
real_dft_matrix = tf.constant(
np.real(complex_dft_matrix_kept_values).astype(np.float32),
name='real_dft_matrix')
imag_dft_matrix = tf.constant(
np.imag(complex_dft_matrix_kept_values).astype(np.float32),
name='imaginary_dft_matrix')
signal_frame_length = tf.shape(framed_signal)[-1]
half_pad = (fft_length - signal_frame_length) // 2
padded_frames = tf.pad(
framed_signal,
[
# Don't add any padding in the frame dimension.
[0, 0],
# Pad before and after the signal within each frame.
[half_pad, fft_length - signal_frame_length - half_pad]
],
mode='CONSTANT',
constant_values=0.0)
real_stft = tf.matmul(padded_frames, real_dft_matrix)
imag_stft = tf.matmul(padded_frames, imag_dft_matrix)
return real_stft, imag_stft
def _complex_abs(real, imag):
return tf.sqrt(tf.add(real * real, imag * imag))
framed_signal = tf.signal.frame(signal, frame_length, frame_step)
windowed_signal = framed_signal * _hann_window()
real_stft, imag_stft = _rdft(windowed_signal, fft_length)
stft_magnitude = _complex_abs(real_stft, imag_stft)
return stft_magnitude
| 7,490 | 44.126506 | 85 | py |
models | models-master/research/audioset/yamnet/yamnet_test.py | # Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Installation test for YAMNet."""
import numpy as np
import tensorflow as tf
import params
import yamnet
class YAMNetTest(tf.test.TestCase):
_params = None
_yamnet = None
_yamnet_classes = None
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._params = params.Params()
cls._yamnet = yamnet.yamnet_frames_model(cls._params)
cls._yamnet.load_weights('yamnet.h5')
cls._yamnet_classes = yamnet.class_names('yamnet_class_map.csv')
def clip_test(self, waveform, expected_class_name, top_n=10):
"""Run the model on the waveform, check that expected class is in top-n."""
predictions, embeddings, log_mel_spectrogram = YAMNetTest._yamnet(waveform)
clip_predictions = np.mean(predictions, axis=0)
top_n_indices = np.argsort(clip_predictions)[-top_n:]
top_n_scores = clip_predictions[top_n_indices]
top_n_class_names = YAMNetTest._yamnet_classes[top_n_indices]
top_n_predictions = list(zip(top_n_class_names, top_n_scores))
self.assertIn(expected_class_name, top_n_class_names,
'Did not find expected class {} in top {} predictions: {}'.format(
expected_class_name, top_n, top_n_predictions))
def testZeros(self):
self.clip_test(
waveform=np.zeros((int(3 * YAMNetTest._params.sample_rate),)),
expected_class_name='Silence')
def testRandom(self):
np.random.seed(51773) # Ensure repeatability.
self.clip_test(
waveform=np.random.uniform(-1.0, +1.0,
(int(3 * YAMNetTest._params.sample_rate),)),
expected_class_name='White noise')
def testSine(self):
self.clip_test(
waveform=np.sin(2 * np.pi * 440 *
np.arange(0, 3, 1 / YAMNetTest._params.sample_rate)),
expected_class_name='Sine wave')
if __name__ == '__main__':
tf.test.main()
| 2,564 | 35.126761 | 84 | py |
models | models-master/research/audioset/yamnet/params.py | # Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Hyperparameters for YAMNet."""
from dataclasses import dataclass
# The following hyperparameters (except patch_hop_seconds) were used to train YAMNet,
# so expect some variability in performance if you change these. The patch hop can
# be changed arbitrarily: a smaller hop should give you more patches from the same
# clip and possibly better performance at a larger computational cost.
@dataclass(frozen=True) # Instances of this class are immutable.
class Params:
sample_rate: float = 16000.0
stft_window_seconds: float = 0.025
stft_hop_seconds: float = 0.010
mel_bands: int = 64
mel_min_hz: float = 125.0
mel_max_hz: float = 7500.0
log_offset: float = 0.001
patch_window_seconds: float = 0.96
patch_hop_seconds: float = 0.48
@property
def patch_frames(self):
return int(round(self.patch_window_seconds / self.stft_hop_seconds))
@property
def patch_bands(self):
return self.mel_bands
num_classes: int = 521
conv_padding: str = 'same'
batchnorm_center: bool = True
batchnorm_scale: bool = False
batchnorm_epsilon: float = 1e-4
classifier_activation: str = 'sigmoid'
tflite_compatible: bool = False
| 1,847 | 34.538462 | 85 | py |
models | models-master/research/audioset/yamnet/yamnet.py | # Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Core model definition of YAMNet."""
import csv
import numpy as np
import tensorflow as tf
from tensorflow.keras import Model, layers
import features as features_lib
def _batch_norm(name, params):
def _bn_layer(layer_input):
return layers.BatchNormalization(
name=name,
center=params.batchnorm_center,
scale=params.batchnorm_scale,
epsilon=params.batchnorm_epsilon)(layer_input)
return _bn_layer
def _conv(name, kernel, stride, filters, params):
def _conv_layer(layer_input):
output = layers.Conv2D(name='{}/conv'.format(name),
filters=filters,
kernel_size=kernel,
strides=stride,
padding=params.conv_padding,
use_bias=False,
activation=None)(layer_input)
output = _batch_norm('{}/conv/bn'.format(name), params)(output)
output = layers.ReLU(name='{}/relu'.format(name))(output)
return output
return _conv_layer
def _separable_conv(name, kernel, stride, filters, params):
def _separable_conv_layer(layer_input):
output = layers.DepthwiseConv2D(name='{}/depthwise_conv'.format(name),
kernel_size=kernel,
strides=stride,
depth_multiplier=1,
padding=params.conv_padding,
use_bias=False,
activation=None)(layer_input)
output = _batch_norm('{}/depthwise_conv/bn'.format(name), params)(output)
output = layers.ReLU(name='{}/depthwise_conv/relu'.format(name))(output)
output = layers.Conv2D(name='{}/pointwise_conv'.format(name),
filters=filters,
kernel_size=(1, 1),
strides=1,
padding=params.conv_padding,
use_bias=False,
activation=None)(output)
output = _batch_norm('{}/pointwise_conv/bn'.format(name), params)(output)
output = layers.ReLU(name='{}/pointwise_conv/relu'.format(name))(output)
return output
return _separable_conv_layer
_YAMNET_LAYER_DEFS = [
# (layer_function, kernel, stride, num_filters)
(_conv, [3, 3], 2, 32),
(_separable_conv, [3, 3], 1, 64),
(_separable_conv, [3, 3], 2, 128),
(_separable_conv, [3, 3], 1, 128),
(_separable_conv, [3, 3], 2, 256),
(_separable_conv, [3, 3], 1, 256),
(_separable_conv, [3, 3], 2, 512),
(_separable_conv, [3, 3], 1, 512),
(_separable_conv, [3, 3], 1, 512),
(_separable_conv, [3, 3], 1, 512),
(_separable_conv, [3, 3], 1, 512),
(_separable_conv, [3, 3], 1, 512),
(_separable_conv, [3, 3], 2, 1024),
(_separable_conv, [3, 3], 1, 1024)
]
def yamnet(features, params):
"""Define the core YAMNet mode in Keras."""
net = layers.Reshape(
(params.patch_frames, params.patch_bands, 1),
input_shape=(params.patch_frames, params.patch_bands))(features)
for (i, (layer_fun, kernel, stride, filters)) in enumerate(_YAMNET_LAYER_DEFS):
net = layer_fun('layer{}'.format(i + 1), kernel, stride, filters, params)(net)
embeddings = layers.GlobalAveragePooling2D()(net)
logits = layers.Dense(units=params.num_classes, use_bias=True)(embeddings)
predictions = layers.Activation(activation=params.classifier_activation)(logits)
return predictions, embeddings
def yamnet_frames_model(params):
"""Defines the YAMNet waveform-to-class-scores model.
Args:
params: An instance of Params containing hyperparameters.
Returns:
A model accepting (num_samples,) waveform input and emitting:
- predictions: (num_patches, num_classes) matrix of class scores per time frame
- embeddings: (num_patches, embedding size) matrix of embeddings per time frame
- log_mel_spectrogram: (num_spectrogram_frames, num_mel_bins) spectrogram feature matrix
"""
waveform = layers.Input(batch_shape=(None,), dtype=tf.float32)
waveform_padded = features_lib.pad_waveform(waveform, params)
log_mel_spectrogram, features = features_lib.waveform_to_log_mel_spectrogram_patches(
waveform_padded, params)
predictions, embeddings = yamnet(features, params)
frames_model = Model(
name='yamnet_frames', inputs=waveform,
outputs=[predictions, embeddings, log_mel_spectrogram])
return frames_model
def class_names(class_map_csv):
"""Read the class name definition file and return a list of strings."""
if tf.is_tensor(class_map_csv):
class_map_csv = class_map_csv.numpy()
with open(class_map_csv) as csv_file:
reader = csv.reader(csv_file)
next(reader) # Skip header
return np.array([display_name for (_, _, display_name) in reader])
| 5,549 | 38.928058 | 92 | py |
models | models-master/research/lstm_object_detection/export_tflite_lstd_graph.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Exports an LSTM detection model to use with tf-lite.
Outputs file:
* A tflite compatible frozen graph - $output_directory/tflite_graph.pb
The exported graph has the following input and output nodes.
Inputs:
'input_video_tensor': a float32 tensor of shape
[unroll_length, height, width, 3] containing the normalized input image.
Note that the height and width must be compatible with the height and
width configured in the fixed_shape_image resizer options in the pipeline
config proto.
Outputs:
If add_postprocessing_op is true: frozen graph adds a
TFLite_Detection_PostProcess custom op node has four outputs:
detection_boxes: a float32 tensor of shape [1, num_boxes, 4] with box
locations
detection_classes: a float32 tensor of shape [1, num_boxes]
with class indices
detection_scores: a float32 tensor of shape [1, num_boxes]
with class scores
num_boxes: a float32 tensor of size 1 containing the number of detected boxes
else:
the graph has three outputs:
'raw_outputs/box_encodings': a float32 tensor of shape [1, num_anchors, 4]
containing the encoded box predictions.
'raw_outputs/class_predictions': a float32 tensor of shape
[1, num_anchors, num_classes] containing the class scores for each anchor
after applying score conversion.
'anchors': a float32 constant tensor of shape [num_anchors, 4]
containing the anchor boxes.
Example Usage:
--------------
python lstm_object_detection/export_tflite_lstd_graph.py \
--pipeline_config_path path/to/lstm_pipeline.config \
--trained_checkpoint_prefix path/to/model.ckpt \
--output_directory path/to/exported_model_directory
The expected output would be in the directory
path/to/exported_model_directory (which is created if it does not exist)
with contents:
- tflite_graph.pbtxt
- tflite_graph.pb
Config overrides (see the `config_override` flag) are text protobufs
(also of type pipeline_pb2.TrainEvalPipelineConfig) which are used to override
certain fields in the provided pipeline_config_path. These are useful for
making small changes to the inference graph that differ from the training or
eval config.
Example Usage (in which we change the NMS iou_threshold to be 0.5 and
NMS score_threshold to be 0.0):
python lstm_object_detection/export_tflite_lstd_graph.py \
--pipeline_config_path path/to/lstm_pipeline.config \
--trained_checkpoint_prefix path/to/model.ckpt \
--output_directory path/to/exported_model_directory
--config_override " \
model{ \
ssd{ \
post_processing { \
batch_non_max_suppression { \
score_threshold: 0.0 \
iou_threshold: 0.5 \
} \
} \
} \
} \
"
"""
import tensorflow.compat.v1 as tf
from lstm_object_detection import export_tflite_lstd_graph_lib
from lstm_object_detection.utils import config_util
flags = tf.app.flags
flags.DEFINE_string('output_directory', None, 'Path to write outputs.')
flags.DEFINE_string(
'pipeline_config_path', None,
'Path to a pipeline_pb2.TrainEvalPipelineConfig config '
'file.')
flags.DEFINE_string('trained_checkpoint_prefix', None, 'Checkpoint prefix.')
flags.DEFINE_integer('max_detections', 10,
'Maximum number of detections (boxes) to show.')
flags.DEFINE_integer('max_classes_per_detection', 1,
'Maximum number of classes to output per detection box.')
flags.DEFINE_integer(
'detections_per_class', 100,
'Number of anchors used per class in Regular Non-Max-Suppression.')
flags.DEFINE_bool('add_postprocessing_op', True,
'Add TFLite custom op for postprocessing to the graph.')
flags.DEFINE_bool(
'use_regular_nms', False,
'Flag to set postprocessing op to use Regular NMS instead of Fast NMS.')
flags.DEFINE_string(
'config_override', '', 'pipeline_pb2.TrainEvalPipelineConfig '
'text proto to override pipeline_config_path.')
FLAGS = flags.FLAGS
def main(argv):
del argv # Unused.
flags.mark_flag_as_required('output_directory')
flags.mark_flag_as_required('pipeline_config_path')
flags.mark_flag_as_required('trained_checkpoint_prefix')
pipeline_config = config_util.get_configs_from_pipeline_file(
FLAGS.pipeline_config_path)
export_tflite_lstd_graph_lib.export_tflite_graph(
pipeline_config,
FLAGS.trained_checkpoint_prefix,
FLAGS.output_directory,
FLAGS.add_postprocessing_op,
FLAGS.max_detections,
FLAGS.max_classes_per_detection,
use_regular_nms=FLAGS.use_regular_nms)
if __name__ == '__main__':
tf.app.run(main)
| 5,337 | 37.402878 | 80 | py |
models | models-master/research/lstm_object_detection/model_builder_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lstm_object_detection.tensorflow.model_builder."""
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from lstm_object_detection import model_builder
from lstm_object_detection.meta_architectures import lstm_ssd_meta_arch
from lstm_object_detection.protos import pipeline_pb2 as internal_pipeline_pb2
from object_detection.protos import pipeline_pb2
class ModelBuilderTest(tf.test.TestCase):
def create_train_model(self, model_config, lstm_config):
"""Builds a DetectionModel based on the model config.
Args:
model_config: A model.proto object containing the config for the desired
DetectionModel.
lstm_config: LstmModel config proto that specifies LSTM train/eval
configs.
Returns:
DetectionModel based on the config.
"""
return model_builder.build(model_config, lstm_config, is_training=True)
def create_eval_model(self, model_config, lstm_config):
"""Builds a DetectionModel based on the model config.
Args:
model_config: A model.proto object containing the config for the desired
DetectionModel.
lstm_config: LstmModel config proto that specifies LSTM train/eval
configs.
Returns:
DetectionModel based on the config.
"""
return model_builder.build(model_config, lstm_config, is_training=False)
def get_model_configs_from_proto(self):
"""Creates a model text proto for testing.
Returns:
A dictionary of model configs.
"""
model_text_proto = """
[lstm_object_detection.protos.lstm_model] {
train_unroll_length: 4
eval_unroll_length: 4
}
model {
ssd {
feature_extractor {
type: 'lstm_ssd_mobilenet_v1'
conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
negative_class_weight: 2.0
box_coder {
faster_rcnn_box_coder {
}
}
matcher {
argmax_matcher {
}
}
similarity_calculator {
iou_similarity {
}
}
anchor_generator {
ssd_anchor_generator {
aspect_ratios: 1.0
}
}
image_resizer {
fixed_shape_resizer {
height: 320
width: 320
}
}
box_predictor {
convolutional_box_predictor {
conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
}
normalize_loc_loss_by_codesize: true
loss {
classification_loss {
weighted_softmax {
}
}
localization_loss {
weighted_smooth_l1 {
}
}
}
}
}"""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
text_format.Merge(model_text_proto, pipeline_config)
configs = {}
configs['model'] = pipeline_config.model
configs['lstm_model'] = pipeline_config.Extensions[
internal_pipeline_pb2.lstm_model]
return configs
def get_interleaved_model_configs_from_proto(self):
"""Creates an interleaved model text proto for testing.
Returns:
A dictionary of model configs.
"""
model_text_proto = """
[lstm_object_detection.protos.lstm_model] {
train_unroll_length: 4
eval_unroll_length: 10
lstm_state_depth: 320
depth_multipliers: 1.4
depth_multipliers: 0.35
pre_bottleneck: true
low_res: true
train_interleave_method: 'RANDOM_SKIP_SMALL'
eval_interleave_method: 'SKIP3'
}
model {
ssd {
feature_extractor {
type: 'lstm_ssd_interleaved_mobilenet_v2'
conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
negative_class_weight: 2.0
box_coder {
faster_rcnn_box_coder {
}
}
matcher {
argmax_matcher {
}
}
similarity_calculator {
iou_similarity {
}
}
anchor_generator {
ssd_anchor_generator {
aspect_ratios: 1.0
}
}
image_resizer {
fixed_shape_resizer {
height: 320
width: 320
}
}
box_predictor {
convolutional_box_predictor {
conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
}
normalize_loc_loss_by_codesize: true
loss {
classification_loss {
weighted_softmax {
}
}
localization_loss {
weighted_smooth_l1 {
}
}
}
}
}"""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
text_format.Merge(model_text_proto, pipeline_config)
configs = {}
configs['model'] = pipeline_config.model
configs['lstm_model'] = pipeline_config.Extensions[
internal_pipeline_pb2.lstm_model]
return configs
def test_model_creation_from_valid_configs(self):
configs = self.get_model_configs_from_proto()
# Test model properties.
self.assertEqual(configs['model'].ssd.negative_class_weight, 2.0)
self.assertTrue(configs['model'].ssd.normalize_loc_loss_by_codesize)
self.assertEqual(configs['model'].ssd.feature_extractor.type,
'lstm_ssd_mobilenet_v1')
model = self.create_train_model(configs['model'], configs['lstm_model'])
# Test architechture type.
self.assertIsInstance(model, lstm_ssd_meta_arch.LSTMSSDMetaArch)
# Test LSTM unroll length.
self.assertEqual(model.unroll_length, 4)
model = self.create_eval_model(configs['model'], configs['lstm_model'])
# Test architechture type.
self.assertIsInstance(model, lstm_ssd_meta_arch.LSTMSSDMetaArch)
# Test LSTM configs.
self.assertEqual(model.unroll_length, 4)
def test_interleaved_model_creation_from_valid_configs(self):
configs = self.get_interleaved_model_configs_from_proto()
# Test model properties.
self.assertEqual(configs['model'].ssd.negative_class_weight, 2.0)
self.assertTrue(configs['model'].ssd.normalize_loc_loss_by_codesize)
self.assertEqual(configs['model'].ssd.feature_extractor.type,
'lstm_ssd_interleaved_mobilenet_v2')
model = self.create_train_model(configs['model'], configs['lstm_model'])
# Test architechture type.
self.assertIsInstance(model, lstm_ssd_meta_arch.LSTMSSDMetaArch)
# Test LSTM configs.
self.assertEqual(model.unroll_length, 4)
self.assertEqual(model._feature_extractor.lstm_state_depth, 320)
self.assertAllClose(model._feature_extractor.depth_multipliers, (1.4, 0.35))
self.assertTrue(model._feature_extractor.pre_bottleneck)
self.assertTrue(model._feature_extractor.low_res)
self.assertEqual(model._feature_extractor.interleave_method,
'RANDOM_SKIP_SMALL')
model = self.create_eval_model(configs['model'], configs['lstm_model'])
# Test architechture type.
self.assertIsInstance(model, lstm_ssd_meta_arch.LSTMSSDMetaArch)
# Test LSTM configs.
self.assertEqual(model.unroll_length, 10)
self.assertEqual(model._feature_extractor.lstm_state_depth, 320)
self.assertAllClose(model._feature_extractor.depth_multipliers, (1.4, 0.35))
self.assertTrue(model._feature_extractor.pre_bottleneck)
self.assertTrue(model._feature_extractor.low_res)
self.assertEqual(model._feature_extractor.interleave_method, 'SKIP3')
def test_model_creation_from_invalid_configs(self):
configs = self.get_model_configs_from_proto()
# Test model build failure with wrong input configs.
with self.assertRaises(AttributeError):
_ = self.create_train_model(configs['model'], configs['model'])
with self.assertRaises(AttributeError):
_ = self.create_eval_model(configs['model'], configs['model'])
if __name__ == '__main__':
tf.test.main()
| 9,319 | 29.759076 | 80 | py |
models | models-master/research/lstm_object_detection/test_tflite_model.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test a tflite model using random input data."""
from __future__ import print_function
from absl import flags
import numpy as np
import tensorflow.compat.v1 as tf
flags.DEFINE_string('model_path', None, 'Path to model.')
FLAGS = flags.FLAGS
def main(_):
flags.mark_flag_as_required('model_path')
# Load TFLite model and allocate tensors.
interpreter = tf.lite.Interpreter(model_path=FLAGS.model_path)
interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
print('input_details:', input_details)
output_details = interpreter.get_output_details()
print('output_details:', output_details)
# Test model on random input data.
input_shape = input_details[0]['shape']
# change the following line to feed into your own data.
input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
print(output_data)
if __name__ == '__main__':
tf.app.run()
| 1,797 | 32.296296 | 80 | py |
models | models-master/research/lstm_object_detection/export_tflite_lstd_graph_lib.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Exports detection models to use with tf-lite.
See export_tflite_lstd_graph.py for usage.
"""
import os
import tempfile
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.tools.graph_transforms import TransformGraph
from lstm_object_detection import model_builder
from object_detection import exporter
from object_detection.builders import graph_rewriter_builder
from object_detection.builders import post_processing_builder
from object_detection.core import box_list
_DEFAULT_NUM_CHANNELS = 3
_DEFAULT_NUM_COORD_BOX = 4
def get_const_center_size_encoded_anchors(anchors):
"""Exports center-size encoded anchors as a constant tensor.
Args:
anchors: a float32 tensor of shape [num_anchors, 4] containing the anchor
boxes
Returns:
encoded_anchors: a float32 constant tensor of shape [num_anchors, 4]
containing the anchor boxes.
"""
anchor_boxlist = box_list.BoxList(anchors)
y, x, h, w = anchor_boxlist.get_center_coordinates_and_sizes()
num_anchors = y.get_shape().as_list()
with tf.Session() as sess:
y_out, x_out, h_out, w_out = sess.run([y, x, h, w])
encoded_anchors = tf.constant(
np.transpose(np.stack((y_out, x_out, h_out, w_out))),
dtype=tf.float32,
shape=[num_anchors[0], _DEFAULT_NUM_COORD_BOX],
name='anchors')
return encoded_anchors
def append_postprocessing_op(frozen_graph_def,
max_detections,
max_classes_per_detection,
nms_score_threshold,
nms_iou_threshold,
num_classes,
scale_values,
detections_per_class=100,
use_regular_nms=False):
"""Appends postprocessing custom op.
Args:
frozen_graph_def: Frozen GraphDef for SSD model after freezing the
checkpoint
max_detections: Maximum number of detections (boxes) to show
max_classes_per_detection: Number of classes to display per detection
nms_score_threshold: Score threshold used in Non-maximal suppression in
post-processing
nms_iou_threshold: Intersection-over-union threshold used in Non-maximal
suppression in post-processing
num_classes: number of classes in SSD detector
scale_values: scale values is a dict with following key-value pairs
{y_scale: 10, x_scale: 10, h_scale: 5, w_scale: 5} that are used in decode
centersize boxes
detections_per_class: In regular NonMaxSuppression, number of anchors used
for NonMaxSuppression per class
use_regular_nms: Flag to set postprocessing op to use Regular NMS instead of
Fast NMS.
Returns:
transformed_graph_def: Frozen GraphDef with postprocessing custom op
appended
TFLite_Detection_PostProcess custom op node has four outputs:
detection_boxes: a float32 tensor of shape [1, num_boxes, 4] with box
locations
detection_classes: a float32 tensor of shape [1, num_boxes]
with class indices
detection_scores: a float32 tensor of shape [1, num_boxes]
with class scores
num_boxes: a float32 tensor of size 1 containing the number of detected
boxes
"""
new_output = frozen_graph_def.node.add()
new_output.op = 'TFLite_Detection_PostProcess'
new_output.name = 'TFLite_Detection_PostProcess'
new_output.attr['_output_quantized'].CopyFrom(
attr_value_pb2.AttrValue(b=True))
new_output.attr['_output_types'].list.type.extend([
types_pb2.DT_FLOAT, types_pb2.DT_FLOAT, types_pb2.DT_FLOAT,
types_pb2.DT_FLOAT
])
new_output.attr['_support_output_type_float_in_quantized_op'].CopyFrom(
attr_value_pb2.AttrValue(b=True))
new_output.attr['max_detections'].CopyFrom(
attr_value_pb2.AttrValue(i=max_detections))
new_output.attr['max_classes_per_detection'].CopyFrom(
attr_value_pb2.AttrValue(i=max_classes_per_detection))
new_output.attr['nms_score_threshold'].CopyFrom(
attr_value_pb2.AttrValue(f=nms_score_threshold.pop()))
new_output.attr['nms_iou_threshold'].CopyFrom(
attr_value_pb2.AttrValue(f=nms_iou_threshold.pop()))
new_output.attr['num_classes'].CopyFrom(
attr_value_pb2.AttrValue(i=num_classes))
new_output.attr['y_scale'].CopyFrom(
attr_value_pb2.AttrValue(f=scale_values['y_scale'].pop()))
new_output.attr['x_scale'].CopyFrom(
attr_value_pb2.AttrValue(f=scale_values['x_scale'].pop()))
new_output.attr['h_scale'].CopyFrom(
attr_value_pb2.AttrValue(f=scale_values['h_scale'].pop()))
new_output.attr['w_scale'].CopyFrom(
attr_value_pb2.AttrValue(f=scale_values['w_scale'].pop()))
new_output.attr['detections_per_class'].CopyFrom(
attr_value_pb2.AttrValue(i=detections_per_class))
new_output.attr['use_regular_nms'].CopyFrom(
attr_value_pb2.AttrValue(b=use_regular_nms))
new_output.input.extend(
['raw_outputs/box_encodings', 'raw_outputs/class_predictions', 'anchors'])
# Transform the graph to append new postprocessing op
input_names = []
output_names = ['TFLite_Detection_PostProcess']
transforms = ['strip_unused_nodes']
transformed_graph_def = TransformGraph(frozen_graph_def, input_names,
output_names, transforms)
return transformed_graph_def
def export_tflite_graph(pipeline_config,
trained_checkpoint_prefix,
output_dir,
add_postprocessing_op,
max_detections,
max_classes_per_detection,
detections_per_class=100,
use_regular_nms=False,
binary_graph_name='tflite_graph.pb',
txt_graph_name='tflite_graph.pbtxt'):
"""Exports a tflite compatible graph and anchors for ssd detection model.
Anchors are written to a tensor and tflite compatible graph
is written to output_dir/tflite_graph.pb.
Args:
pipeline_config: Dictionary of configuration objects. Keys are `model`,
`train_config`, `train_input_config`, `eval_config`, `eval_input_config`,
`lstm_model`. Value are the corresponding config objects.
trained_checkpoint_prefix: a file prefix for the checkpoint containing the
trained parameters of the SSD model.
output_dir: A directory to write the tflite graph and anchor file to.
add_postprocessing_op: If add_postprocessing_op is true: frozen graph adds a
TFLite_Detection_PostProcess custom op
max_detections: Maximum number of detections (boxes) to show
max_classes_per_detection: Number of classes to display per detection
detections_per_class: In regular NonMaxSuppression, number of anchors used
for NonMaxSuppression per class
use_regular_nms: Flag to set postprocessing op to use Regular NMS instead of
Fast NMS.
binary_graph_name: Name of the exported graph file in binary format.
txt_graph_name: Name of the exported graph file in text format.
Raises:
ValueError: if the pipeline config contains models other than ssd or uses an
fixed_shape_resizer and provides a shape as well.
"""
model_config = pipeline_config['model']
lstm_config = pipeline_config['lstm_model']
eval_config = pipeline_config['eval_config']
tf.gfile.MakeDirs(output_dir)
if model_config.WhichOneof('model') != 'ssd':
raise ValueError('Only ssd models are supported in tflite. '
'Found {} in config'.format(
model_config.WhichOneof('model')))
num_classes = model_config.ssd.num_classes
nms_score_threshold = {
model_config.ssd.post_processing.batch_non_max_suppression.score_threshold
}
nms_iou_threshold = {
model_config.ssd.post_processing.batch_non_max_suppression.iou_threshold
}
scale_values = {}
scale_values['y_scale'] = {
model_config.ssd.box_coder.faster_rcnn_box_coder.y_scale
}
scale_values['x_scale'] = {
model_config.ssd.box_coder.faster_rcnn_box_coder.x_scale
}
scale_values['h_scale'] = {
model_config.ssd.box_coder.faster_rcnn_box_coder.height_scale
}
scale_values['w_scale'] = {
model_config.ssd.box_coder.faster_rcnn_box_coder.width_scale
}
image_resizer_config = model_config.ssd.image_resizer
image_resizer = image_resizer_config.WhichOneof('image_resizer_oneof')
num_channels = _DEFAULT_NUM_CHANNELS
if image_resizer == 'fixed_shape_resizer':
height = image_resizer_config.fixed_shape_resizer.height
width = image_resizer_config.fixed_shape_resizer.width
if image_resizer_config.fixed_shape_resizer.convert_to_grayscale:
num_channels = 1
shape = [lstm_config.eval_unroll_length, height, width, num_channels]
else:
raise ValueError(
'Only fixed_shape_resizer'
'is supported with tflite. Found {}'.format(
image_resizer_config.WhichOneof('image_resizer_oneof')))
video_tensor = tf.placeholder(
tf.float32, shape=shape, name='input_video_tensor')
detection_model = model_builder.build(
model_config, lstm_config, is_training=False)
preprocessed_video, true_image_shapes = detection_model.preprocess(
tf.to_float(video_tensor))
predicted_tensors = detection_model.predict(preprocessed_video,
true_image_shapes)
# predicted_tensors = detection_model.postprocess(predicted_tensors,
# true_image_shapes)
# The score conversion occurs before the post-processing custom op
_, score_conversion_fn = post_processing_builder.build(
model_config.ssd.post_processing)
class_predictions = score_conversion_fn(
predicted_tensors['class_predictions_with_background'])
with tf.name_scope('raw_outputs'):
# 'raw_outputs/box_encodings': a float32 tensor of shape [1, num_anchors, 4]
# containing the encoded box predictions. Note that these are raw
# predictions and no Non-Max suppression is applied on them and
# no decode center size boxes is applied to them.
tf.identity(predicted_tensors['box_encodings'], name='box_encodings')
# 'raw_outputs/class_predictions': a float32 tensor of shape
# [1, num_anchors, num_classes] containing the class scores for each anchor
# after applying score conversion.
tf.identity(class_predictions, name='class_predictions')
# 'anchors': a float32 tensor of shape
# [4, num_anchors] containing the anchors as a constant node.
tf.identity(
get_const_center_size_encoded_anchors(predicted_tensors['anchors']),
name='anchors')
# Add global step to the graph, so we know the training step number when we
# evaluate the model.
tf.train.get_or_create_global_step()
# graph rewriter
is_quantized = ('graph_rewriter' in pipeline_config)
if is_quantized:
graph_rewriter_config = pipeline_config['graph_rewriter']
graph_rewriter_fn = graph_rewriter_builder.build(
graph_rewriter_config, is_training=False, is_export=True)
graph_rewriter_fn()
if model_config.ssd.feature_extractor.HasField('fpn'):
exporter.rewrite_nn_resize_op(is_quantized)
# freeze the graph
saver_kwargs = {}
if eval_config.use_moving_averages:
saver_kwargs['write_version'] = saver_pb2.SaverDef.V1
moving_average_checkpoint = tempfile.NamedTemporaryFile()
exporter.replace_variable_values_with_moving_averages(
tf.get_default_graph(), trained_checkpoint_prefix,
moving_average_checkpoint.name)
checkpoint_to_use = moving_average_checkpoint.name
else:
checkpoint_to_use = trained_checkpoint_prefix
saver = tf.train.Saver(**saver_kwargs)
input_saver_def = saver.as_saver_def()
frozen_graph_def = exporter.freeze_graph_with_def_protos(
input_graph_def=tf.get_default_graph().as_graph_def(),
input_saver_def=input_saver_def,
input_checkpoint=checkpoint_to_use,
output_node_names=','.join([
'raw_outputs/box_encodings', 'raw_outputs/class_predictions',
'anchors'
]),
restore_op_name='save/restore_all',
filename_tensor_name='save/Const:0',
clear_devices=True,
output_graph='',
initializer_nodes='')
# Add new operation to do post processing in a custom op (TF Lite only)
if add_postprocessing_op:
transformed_graph_def = append_postprocessing_op(
frozen_graph_def, max_detections, max_classes_per_detection,
nms_score_threshold, nms_iou_threshold, num_classes, scale_values,
detections_per_class, use_regular_nms)
else:
# Return frozen without adding post-processing custom op
transformed_graph_def = frozen_graph_def
binary_graph = os.path.join(output_dir, binary_graph_name)
with tf.gfile.GFile(binary_graph, 'wb') as f:
f.write(transformed_graph_def.SerializeToString())
txt_graph = os.path.join(output_dir, txt_graph_name)
with tf.gfile.GFile(txt_graph, 'w') as f:
f.write(str(transformed_graph_def))
| 13,791 | 41.04878 | 80 | py |
models | models-master/research/lstm_object_detection/model_builder.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A function to build a DetectionModel from configuration."""
from lstm_object_detection.meta_architectures import lstm_ssd_meta_arch
from lstm_object_detection.models import lstm_ssd_interleaved_mobilenet_v2_feature_extractor
from lstm_object_detection.models import lstm_ssd_mobilenet_v1_feature_extractor
from object_detection.builders import anchor_generator_builder
from object_detection.builders import box_coder_builder
from object_detection.builders import box_predictor_builder
from object_detection.builders import hyperparams_builder
from object_detection.builders import image_resizer_builder
from object_detection.builders import losses_builder
from object_detection.builders import matcher_builder
from object_detection.builders import model_builder
from object_detection.builders import post_processing_builder
from object_detection.builders import region_similarity_calculator_builder as sim_calc
from object_detection.core import target_assigner
model_builder.SSD_FEATURE_EXTRACTOR_CLASS_MAP.update({
'lstm_ssd_mobilenet_v1':
lstm_ssd_mobilenet_v1_feature_extractor
.LSTMSSDMobileNetV1FeatureExtractor,
'lstm_ssd_interleaved_mobilenet_v2':
lstm_ssd_interleaved_mobilenet_v2_feature_extractor
.LSTMSSDInterleavedMobilenetV2FeatureExtractor,
})
SSD_FEATURE_EXTRACTOR_CLASS_MAP = model_builder.SSD_FEATURE_EXTRACTOR_CLASS_MAP
def build(model_config, lstm_config, is_training):
"""Builds a DetectionModel based on the model config.
Args:
model_config: A model.proto object containing the config for the desired
DetectionModel.
lstm_config: LstmModel config proto that specifies LSTM train/eval configs.
is_training: True if this model is being built for training purposes.
Returns:
DetectionModel based on the config.
Raises:
ValueError: On invalid meta architecture or model.
"""
return _build_lstm_model(model_config.ssd, lstm_config, is_training)
def _build_lstm_feature_extractor(feature_extractor_config,
is_training,
lstm_config,
reuse_weights=None):
"""Builds a ssd_meta_arch.SSDFeatureExtractor based on config.
Args:
feature_extractor_config: A SSDFeatureExtractor proto config from ssd.proto.
is_training: True if this feature extractor is being built for training.
lstm_config: LSTM-SSD specific configs.
reuse_weights: If the feature extractor should reuse weights.
Returns:
ssd_meta_arch.SSDFeatureExtractor based on config.
Raises:
ValueError: On invalid feature extractor type.
"""
feature_type = feature_extractor_config.type
depth_multiplier = feature_extractor_config.depth_multiplier
min_depth = feature_extractor_config.min_depth
pad_to_multiple = feature_extractor_config.pad_to_multiple
use_explicit_padding = feature_extractor_config.use_explicit_padding
use_depthwise = feature_extractor_config.use_depthwise
conv_hyperparams = hyperparams_builder.build(
feature_extractor_config.conv_hyperparams, is_training)
override_base_feature_extractor_hyperparams = (
feature_extractor_config.override_base_feature_extractor_hyperparams)
if feature_type not in SSD_FEATURE_EXTRACTOR_CLASS_MAP:
raise ValueError('Unknown ssd feature_extractor: {}'.format(feature_type))
feature_extractor_class = SSD_FEATURE_EXTRACTOR_CLASS_MAP[feature_type]
feature_extractor = feature_extractor_class(
is_training, depth_multiplier, min_depth, pad_to_multiple,
conv_hyperparams, reuse_weights, use_explicit_padding, use_depthwise,
override_base_feature_extractor_hyperparams)
# Extra configs for LSTM-SSD.
feature_extractor.lstm_state_depth = lstm_config.lstm_state_depth
feature_extractor.flatten_state = lstm_config.flatten_state
feature_extractor.clip_state = lstm_config.clip_state
feature_extractor.scale_state = lstm_config.scale_state
feature_extractor.is_quantized = lstm_config.is_quantized
feature_extractor.low_res = lstm_config.low_res
# Extra configs for interleaved LSTM-SSD.
if 'interleaved' in feature_extractor_config.type:
feature_extractor.pre_bottleneck = lstm_config.pre_bottleneck
feature_extractor.depth_multipliers = lstm_config.depth_multipliers
if is_training:
feature_extractor.interleave_method = lstm_config.train_interleave_method
else:
feature_extractor.interleave_method = lstm_config.eval_interleave_method
return feature_extractor
def _build_lstm_model(ssd_config, lstm_config, is_training):
"""Builds an LSTM detection model based on the model config.
Args:
ssd_config: A ssd.proto object containing the config for the desired
LSTMSSDMetaArch.
lstm_config: LstmModel config proto that specifies LSTM train/eval configs.
is_training: True if this model is being built for training purposes.
Returns:
LSTMSSDMetaArch based on the config.
Raises:
ValueError: If ssd_config.type is not recognized (i.e. not registered in
model_class_map), or if lstm_config.interleave_strategy is not recognized.
ValueError: If unroll_length is not specified in the config file.
"""
feature_extractor = _build_lstm_feature_extractor(
ssd_config.feature_extractor, is_training, lstm_config)
box_coder = box_coder_builder.build(ssd_config.box_coder)
matcher = matcher_builder.build(ssd_config.matcher)
region_similarity_calculator = sim_calc.build(
ssd_config.similarity_calculator)
num_classes = ssd_config.num_classes
ssd_box_predictor = box_predictor_builder.build(hyperparams_builder.build,
ssd_config.box_predictor,
is_training, num_classes)
anchor_generator = anchor_generator_builder.build(ssd_config.anchor_generator)
image_resizer_fn = image_resizer_builder.build(ssd_config.image_resizer)
non_max_suppression_fn, score_conversion_fn = post_processing_builder.build(
ssd_config.post_processing)
(classification_loss, localization_loss, classification_weight,
localization_weight, miner, _, _) = losses_builder.build(ssd_config.loss)
normalize_loss_by_num_matches = ssd_config.normalize_loss_by_num_matches
encode_background_as_zeros = ssd_config.encode_background_as_zeros
negative_class_weight = ssd_config.negative_class_weight
# Extra configs for lstm unroll length.
unroll_length = None
if 'lstm' in ssd_config.feature_extractor.type:
if is_training:
unroll_length = lstm_config.train_unroll_length
else:
unroll_length = lstm_config.eval_unroll_length
if unroll_length is None:
raise ValueError('No unroll length found in the config file')
target_assigner_instance = target_assigner.TargetAssigner(
region_similarity_calculator,
matcher,
box_coder,
negative_class_weight=negative_class_weight)
lstm_model = lstm_ssd_meta_arch.LSTMSSDMetaArch(
is_training=is_training,
anchor_generator=anchor_generator,
box_predictor=ssd_box_predictor,
box_coder=box_coder,
feature_extractor=feature_extractor,
encode_background_as_zeros=encode_background_as_zeros,
image_resizer_fn=image_resizer_fn,
non_max_suppression_fn=non_max_suppression_fn,
score_conversion_fn=score_conversion_fn,
classification_loss=classification_loss,
localization_loss=localization_loss,
classification_loss_weight=classification_weight,
localization_loss_weight=localization_weight,
normalize_loss_by_num_matches=normalize_loss_by_num_matches,
hard_example_miner=miner,
unroll_length=unroll_length,
target_assigner_instance=target_assigner_instance)
return lstm_model
| 8,460 | 42.839378 | 92 | py |
models | models-master/research/lstm_object_detection/evaluator.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Detection model evaluator.
This file provides a generic evaluation method that can be used to evaluate a
DetectionModel.
"""
import tensorflow.compat.v1 as tf
from tensorflow.contrib import tfprof as contrib_tfprof
from lstm_object_detection.metrics import coco_evaluation_all_frames
from object_detection import eval_util
from object_detection.core import prefetcher
from object_detection.core import standard_fields as fields
from object_detection.metrics import coco_evaluation
from object_detection.utils import object_detection_evaluation
# A dictionary of metric names to classes that implement the metric. The classes
# in the dictionary must implement
# utils.object_detection_evaluation.DetectionEvaluator interface.
EVAL_METRICS_CLASS_DICT = {
'pascal_voc_detection_metrics':
object_detection_evaluation.PascalDetectionEvaluator,
'weighted_pascal_voc_detection_metrics':
object_detection_evaluation.WeightedPascalDetectionEvaluator,
'pascal_voc_instance_segmentation_metrics':
object_detection_evaluation.PascalInstanceSegmentationEvaluator,
'weighted_pascal_voc_instance_segmentation_metrics':
object_detection_evaluation.WeightedPascalInstanceSegmentationEvaluator,
'open_images_detection_metrics':
object_detection_evaluation.OpenImagesDetectionEvaluator,
'coco_detection_metrics':
coco_evaluation.CocoDetectionEvaluator,
'coco_mask_metrics':
coco_evaluation.CocoMaskEvaluator,
'coco_evaluation_all_frames':
coco_evaluation_all_frames.CocoEvaluationAllFrames,
}
EVAL_DEFAULT_METRIC = 'pascal_voc_detection_metrics'
def _create_detection_op(model, input_dict, batch):
"""Create detection ops.
Args:
model: model to perform predictions with.
input_dict: A dict holds input data.
batch: batch size for evaluation.
Returns:
Detection tensor ops.
"""
video_tensor = tf.stack(list(input_dict[fields.InputDataFields.image]))
preprocessed_video, true_image_shapes = model.preprocess(
tf.to_float(video_tensor))
if batch is not None:
prediction_dict = model.predict(preprocessed_video, true_image_shapes,
batch)
else:
prediction_dict = model.predict(preprocessed_video, true_image_shapes)
return model.postprocess(prediction_dict, true_image_shapes)
def _extract_prediction_tensors(model,
create_input_dict_fn,
ignore_groundtruth=False):
"""Restores the model in a tensorflow session.
Args:
model: model to perform predictions with.
create_input_dict_fn: function to create input tensor dictionaries.
ignore_groundtruth: whether groundtruth should be ignored.
Returns:
tensor_dict: A tensor dictionary with evaluations.
"""
input_dict = create_input_dict_fn()
batch = None
if 'batch' in input_dict:
batch = input_dict.pop('batch')
else:
prefetch_queue = prefetcher.prefetch(input_dict, capacity=500)
input_dict = prefetch_queue.dequeue()
# consistent format for images and videos
for key, value in input_dict.iteritems():
input_dict[key] = (value,)
detections = _create_detection_op(model, input_dict, batch)
# Print out anaylsis of the model.
contrib_tfprof.model_analyzer.print_model_analysis(
tf.get_default_graph(),
tfprof_options=contrib_tfprof.model_analyzer
.TRAINABLE_VARS_PARAMS_STAT_OPTIONS)
contrib_tfprof.model_analyzer.print_model_analysis(
tf.get_default_graph(),
tfprof_options=contrib_tfprof.model_analyzer.FLOAT_OPS_OPTIONS)
num_frames = len(input_dict[fields.InputDataFields.image])
ret = []
for i in range(num_frames):
original_image = tf.expand_dims(input_dict[fields.InputDataFields.image][i],
0)
groundtruth = None
if not ignore_groundtruth:
groundtruth = {
fields.InputDataFields.groundtruth_boxes:
input_dict[fields.InputDataFields.groundtruth_boxes][i],
fields.InputDataFields.groundtruth_classes:
input_dict[fields.InputDataFields.groundtruth_classes][i],
}
optional_keys = (
fields.InputDataFields.groundtruth_area,
fields.InputDataFields.groundtruth_is_crowd,
fields.InputDataFields.groundtruth_difficult,
fields.InputDataFields.groundtruth_group_of,
)
for opt_key in optional_keys:
if opt_key in input_dict:
groundtruth[opt_key] = input_dict[opt_key][i]
if fields.DetectionResultFields.detection_masks in detections:
groundtruth[fields.InputDataFields.groundtruth_instance_masks] = (
input_dict[fields.InputDataFields.groundtruth_instance_masks][i])
detections_frame = {
key: tf.expand_dims(value[i], 0)
for key, value in detections.iteritems()
}
source_id = (
batch.key[0] if batch is not None else
input_dict[fields.InputDataFields.source_id][i])
ret.append(
eval_util.result_dict_for_single_example(
original_image,
source_id,
detections_frame,
groundtruth,
class_agnostic=(fields.DetectionResultFields.detection_classes
not in detections),
scale_to_absolute=True))
return ret
def get_evaluators(eval_config, categories):
"""Returns the evaluator class according to eval_config, valid for categories.
Args:
eval_config: evaluation configurations.
categories: a list of categories to evaluate.
Returns:
An list of instances of DetectionEvaluator.
Raises:
ValueError: if metric is not in the metric class dictionary.
"""
eval_metric_fn_keys = eval_config.metrics_set
if not eval_metric_fn_keys:
eval_metric_fn_keys = [EVAL_DEFAULT_METRIC]
evaluators_list = []
for eval_metric_fn_key in eval_metric_fn_keys:
if eval_metric_fn_key not in EVAL_METRICS_CLASS_DICT:
raise ValueError('Metric not found: {}'.format(eval_metric_fn_key))
else:
evaluators_list.append(
EVAL_METRICS_CLASS_DICT[eval_metric_fn_key](categories=categories))
return evaluators_list
def evaluate(create_input_dict_fn,
create_model_fn,
eval_config,
categories,
checkpoint_dir,
eval_dir,
graph_hook_fn=None):
"""Evaluation function for detection models.
Args:
create_input_dict_fn: a function to create a tensor input dictionary.
create_model_fn: a function that creates a DetectionModel.
eval_config: a eval_pb2.EvalConfig protobuf.
categories: a list of category dictionaries. Each dict in the list should
have an integer 'id' field and string 'name' field.
checkpoint_dir: directory to load the checkpoints to evaluate from.
eval_dir: directory to write evaluation metrics summary to.
graph_hook_fn: Optional function that is called after the training graph is
completely built. This is helpful to perform additional changes to the
training graph such as optimizing batchnorm. The function should modify
the default graph.
Returns:
metrics: A dictionary containing metric names and values from the latest
run.
"""
model = create_model_fn()
if eval_config.ignore_groundtruth and not eval_config.export_path:
tf.logging.fatal('If ignore_groundtruth=True then an export_path is '
'required. Aborting!!!')
tensor_dicts = _extract_prediction_tensors(
model=model,
create_input_dict_fn=create_input_dict_fn,
ignore_groundtruth=eval_config.ignore_groundtruth)
def _process_batch(tensor_dicts,
sess,
batch_index,
counters,
losses_dict=None):
"""Evaluates tensors in tensor_dicts, visualizing the first K examples.
This function calls sess.run on tensor_dicts, evaluating the original_image
tensor only on the first K examples and visualizing detections overlaid
on this original_image.
Args:
tensor_dicts: a dictionary of tensors
sess: tensorflow session
batch_index: the index of the batch amongst all batches in the run.
counters: a dictionary holding 'success' and 'skipped' fields which can
be updated to keep track of number of successful and failed runs,
respectively. If these fields are not updated, then the success/skipped
counter values shown at the end of evaluation will be incorrect.
losses_dict: Optional dictonary of scalar loss tensors. Necessary only
for matching function signiture in third_party eval_util.py.
Returns:
result_dict: a dictionary of numpy arrays
result_losses_dict: a dictionary of scalar losses. This is empty if input
losses_dict is None. Necessary only for matching function signiture in
third_party eval_util.py.
"""
if batch_index % 10 == 0:
tf.logging.info('Running eval ops batch %d', batch_index)
if not losses_dict:
losses_dict = {}
try:
result_dicts, result_losses_dict = sess.run([tensor_dicts, losses_dict])
counters['success'] += 1
except tf.errors.InvalidArgumentError:
tf.logging.info('Skipping image')
counters['skipped'] += 1
return {}
num_images = len(tensor_dicts)
for i in range(num_images):
result_dict = result_dicts[i]
global_step = tf.train.global_step(sess, tf.train.get_global_step())
tag = 'image-%d' % (batch_index * num_images + i)
if batch_index < eval_config.num_visualizations / num_images:
eval_util.visualize_detection_results(
result_dict,
tag,
global_step,
categories=categories,
summary_dir=eval_dir,
export_dir=eval_config.visualization_export_dir,
show_groundtruth=eval_config.visualize_groundtruth_boxes,
groundtruth_box_visualization_color=eval_config.
groundtruth_box_visualization_color,
min_score_thresh=eval_config.min_score_threshold,
max_num_predictions=eval_config.max_num_boxes_to_visualize,
skip_scores=eval_config.skip_scores,
skip_labels=eval_config.skip_labels,
keep_image_id_for_visualization_export=eval_config.
keep_image_id_for_visualization_export)
if num_images > 1:
return result_dicts, result_losses_dict
else:
return result_dicts[0], result_losses_dict
variables_to_restore = tf.global_variables()
global_step = tf.train.get_or_create_global_step()
variables_to_restore.append(global_step)
if graph_hook_fn:
graph_hook_fn()
if eval_config.use_moving_averages:
variable_averages = tf.train.ExponentialMovingAverage(0.0)
variables_to_restore = variable_averages.variables_to_restore()
for key in variables_to_restore.keys():
if 'moving_mean' in key:
variables_to_restore[key.replace(
'moving_mean', 'moving_mean/ExponentialMovingAverage')] = (
variables_to_restore[key])
del variables_to_restore[key]
if 'moving_variance' in key:
variables_to_restore[key.replace(
'moving_variance', 'moving_variance/ExponentialMovingAverage')] = (
variables_to_restore[key])
del variables_to_restore[key]
saver = tf.train.Saver(variables_to_restore)
def _restore_latest_checkpoint(sess):
latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
saver.restore(sess, latest_checkpoint)
metrics = eval_util.repeated_checkpoint_run(
tensor_dict=tensor_dicts,
summary_dir=eval_dir,
evaluators=get_evaluators(eval_config, categories),
batch_processor=_process_batch,
checkpoint_dirs=[checkpoint_dir],
variables_to_restore=None,
restore_fn=_restore_latest_checkpoint,
num_batches=eval_config.num_examples,
eval_interval_secs=eval_config.eval_interval_secs,
max_number_of_evaluations=(1 if eval_config.ignore_groundtruth else
eval_config.max_evals
if eval_config.max_evals else None),
master=eval_config.eval_master,
save_graph=eval_config.save_graph,
save_graph_dir=(eval_dir if eval_config.save_graph else ''))
return metrics
| 13,112 | 37.795858 | 80 | py |
models | models-master/research/lstm_object_detection/export_tflite_lstd_model.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Export a LSTD model in tflite format."""
import os
from absl import flags
import tensorflow.compat.v1 as tf
from lstm_object_detection.utils import config_util
flags.DEFINE_string('export_path', None, 'Path to export model.')
flags.DEFINE_string('frozen_graph_path', None, 'Path to frozen graph.')
flags.DEFINE_string(
'pipeline_config_path', '',
'Path to a pipeline_pb2.TrainEvalPipelineConfig config file.')
FLAGS = flags.FLAGS
def main(_):
flags.mark_flag_as_required('export_path')
flags.mark_flag_as_required('frozen_graph_path')
flags.mark_flag_as_required('pipeline_config_path')
configs = config_util.get_configs_from_pipeline_file(
FLAGS.pipeline_config_path)
lstm_config = configs['lstm_model']
input_arrays = ['input_video_tensor']
output_arrays = [
'TFLite_Detection_PostProcess',
'TFLite_Detection_PostProcess:1',
'TFLite_Detection_PostProcess:2',
'TFLite_Detection_PostProcess:3',
]
input_shapes = {
'input_video_tensor': [lstm_config.eval_unroll_length, 320, 320, 3],
}
converter = tf.lite.TFLiteConverter.from_frozen_graph(
FLAGS.frozen_graph_path,
input_arrays,
output_arrays,
input_shapes=input_shapes)
converter.allow_custom_ops = True
tflite_model = converter.convert()
ofilename = os.path.join(FLAGS.export_path)
open(ofilename, 'wb').write(tflite_model)
if __name__ == '__main__':
tf.app.run()
| 2,120 | 31.136364 | 80 | py |
models | models-master/research/lstm_object_detection/eval.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Evaluation executable for detection models.
This executable is used to evaluate DetectionModels. Example usage:
./eval \
--logtostderr \
--checkpoint_dir=path/to/checkpoint_dir \
--eval_dir=path/to/eval_dir \
--pipeline_config_path=pipeline_config.pbtxt
"""
import functools
import os
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from lstm_object_detection import evaluator
from lstm_object_detection import model_builder
from lstm_object_detection.inputs import seq_dataset_builder
from lstm_object_detection.utils import config_util
from object_detection.utils import label_map_util
tf.logging.set_verbosity(tf.logging.INFO)
flags = tf.app.flags
flags.DEFINE_boolean('eval_training_data', False,
'If training data should be evaluated for this job.')
flags.DEFINE_string('checkpoint_dir', '',
'Directory containing checkpoints to evaluate, typically '
'set to `train_dir` used in the training job.')
flags.DEFINE_string('eval_dir', '', 'Directory to write eval summaries to.')
flags.DEFINE_string('pipeline_config_path', '',
'Path to a pipeline_pb2.TrainEvalPipelineConfig config '
'file. If provided, other configs are ignored')
flags.DEFINE_boolean('run_once', False, 'Option to only run a single pass of '
'evaluation. Overrides the `max_evals` parameter in the '
'provided config.')
FLAGS = flags.FLAGS
def main(unused_argv):
assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.'
assert FLAGS.eval_dir, '`eval_dir` is missing.'
if FLAGS.pipeline_config_path:
configs = config_util.get_configs_from_pipeline_file(
FLAGS.pipeline_config_path)
else:
configs = config_util.get_configs_from_multiple_files(
model_config_path=FLAGS.model_config_path,
eval_config_path=FLAGS.eval_config_path,
eval_input_config_path=FLAGS.input_config_path)
pipeline_proto = config_util.create_pipeline_proto_from_configs(configs)
config_text = text_format.MessageToString(pipeline_proto)
tf.gfile.MakeDirs(FLAGS.eval_dir)
with tf.gfile.Open(os.path.join(FLAGS.eval_dir, 'pipeline.config'),
'wb') as f:
f.write(config_text)
model_config = configs['model']
lstm_config = configs['lstm_model']
eval_config = configs['eval_config']
input_config = configs['eval_input_config']
if FLAGS.eval_training_data:
input_config.external_input_reader.CopyFrom(
configs['train_input_config'].external_input_reader)
lstm_config.eval_unroll_length = lstm_config.train_unroll_length
model_fn = functools.partial(
model_builder.build,
model_config=model_config,
lstm_config=lstm_config,
is_training=False)
def get_next(config, model_config, lstm_config, unroll_length):
return seq_dataset_builder.build(config, model_config, lstm_config,
unroll_length)
create_input_dict_fn = functools.partial(get_next, input_config, model_config,
lstm_config,
lstm_config.eval_unroll_length)
label_map = label_map_util.load_labelmap(input_config.label_map_path)
max_num_classes = max([item.id for item in label_map.item])
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes)
if FLAGS.run_once:
eval_config.max_evals = 1
evaluator.evaluate(create_input_dict_fn, model_fn, eval_config, categories,
FLAGS.checkpoint_dir, FLAGS.eval_dir)
if __name__ == '__main__':
tf.app.run()
| 4,387 | 39.256881 | 80 | py |
models | models-master/research/lstm_object_detection/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/lstm_object_detection/train.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Training executable for detection models.
This executable is used to train DetectionModels. There are two ways of
configuring the training job:
1) A single pipeline_pb2.TrainEvalPipelineConfig configuration file
can be specified by --pipeline_config_path.
Example usage:
./train \
--logtostderr \
--train_dir=path/to/train_dir \
--pipeline_config_path=pipeline_config.pbtxt
2) Three configuration files can be provided: a model_pb2.DetectionModel
configuration file to define what type of DetectionModel is being trained, an
input_reader_pb2.InputReader file to specify what training data will be used and
a train_pb2.TrainConfig file to configure training parameters.
Example usage:
./train \
--logtostderr \
--train_dir=path/to/train_dir \
--model_config_path=model_config.pbtxt \
--train_config_path=train_config.pbtxt \
--input_config_path=train_input_config.pbtxt
"""
import functools
import json
import os
from absl import flags
import tensorflow.compat.v1 as tf
from lstm_object_detection import model_builder
from lstm_object_detection import trainer
from lstm_object_detection.inputs import seq_dataset_builder
from lstm_object_detection.utils import config_util
from object_detection.builders import preprocessor_builder
flags.DEFINE_string('master', '', 'Name of the TensorFlow master to use.')
flags.DEFINE_integer('task', 0, 'task id')
flags.DEFINE_integer('num_clones', 1, 'Number of clones to deploy per worker.')
flags.DEFINE_boolean(
'clone_on_cpu', False,
'Force clones to be deployed on CPU. Note that even if '
'set to False (allowing ops to run on gpu), some ops may '
'still be run on the CPU if they have no GPU kernel.')
flags.DEFINE_integer('worker_replicas', 1, 'Number of worker+trainer '
'replicas.')
flags.DEFINE_integer(
'ps_tasks', 0, 'Number of parameter server tasks. If None, does not use '
'a parameter server.')
flags.DEFINE_string(
'train_dir', '',
'Directory to save the checkpoints and training summaries.')
flags.DEFINE_string(
'pipeline_config_path', '',
'Path to a pipeline_pb2.TrainEvalPipelineConfig config '
'file. If provided, other configs are ignored')
flags.DEFINE_string('train_config_path', '',
'Path to a train_pb2.TrainConfig config file.')
flags.DEFINE_string('input_config_path', '',
'Path to an input_reader_pb2.InputReader config file.')
flags.DEFINE_string('model_config_path', '',
'Path to a model_pb2.DetectionModel config file.')
FLAGS = flags.FLAGS
def main(_):
assert FLAGS.train_dir, '`train_dir` is missing.'
if FLAGS.task == 0:
tf.gfile.MakeDirs(FLAGS.train_dir)
if FLAGS.pipeline_config_path:
configs = config_util.get_configs_from_pipeline_file(
FLAGS.pipeline_config_path)
if FLAGS.task == 0:
tf.gfile.Copy(
FLAGS.pipeline_config_path,
os.path.join(FLAGS.train_dir, 'pipeline.config'),
overwrite=True)
else:
configs = config_util.get_configs_from_multiple_files(
model_config_path=FLAGS.model_config_path,
train_config_path=FLAGS.train_config_path,
train_input_config_path=FLAGS.input_config_path)
if FLAGS.task == 0:
for name, config in [('model.config', FLAGS.model_config_path),
('train.config', FLAGS.train_config_path),
('input.config', FLAGS.input_config_path)]:
tf.gfile.Copy(
config, os.path.join(FLAGS.train_dir, name), overwrite=True)
model_config = configs['model']
lstm_config = configs['lstm_model']
train_config = configs['train_config']
input_config = configs['train_input_config']
model_fn = functools.partial(
model_builder.build,
model_config=model_config,
lstm_config=lstm_config,
is_training=True)
def get_next(config, model_config, lstm_config, unroll_length):
data_augmentation_options = [
preprocessor_builder.build(step)
for step in train_config.data_augmentation_options
]
return seq_dataset_builder.build(
config,
model_config,
lstm_config,
unroll_length,
data_augmentation_options,
batch_size=train_config.batch_size)
create_input_dict_fn = functools.partial(get_next, input_config, model_config,
lstm_config,
lstm_config.train_unroll_length)
env = json.loads(os.environ.get('TF_CONFIG', '{}'))
cluster_data = env.get('cluster', None)
cluster = tf.train.ClusterSpec(cluster_data) if cluster_data else None
task_data = env.get('task', None) or {'type': 'master', 'index': 0}
task_info = type('TaskSpec', (object,), task_data)
# Parameters for a single worker.
ps_tasks = 0
worker_replicas = 1
worker_job_name = 'lonely_worker'
task = 0
is_chief = True
master = ''
if cluster_data and 'worker' in cluster_data:
# Number of total worker replicas include "worker"s and the "master".
worker_replicas = len(cluster_data['worker']) + 1
if cluster_data and 'ps' in cluster_data:
ps_tasks = len(cluster_data['ps'])
if worker_replicas > 1 and ps_tasks < 1:
raise ValueError('At least 1 ps task is needed for distributed training.')
if worker_replicas >= 1 and ps_tasks > 0:
# Set up distributed training.
server = tf.train.Server(
tf.train.ClusterSpec(cluster),
protocol='grpc',
job_name=task_info.type,
task_index=task_info.index)
if task_info.type == 'ps':
server.join()
return
worker_job_name = '%s/task:%d' % (task_info.type, task_info.index)
task = task_info.index
is_chief = (task_info.type == 'master')
master = server.target
trainer.train(create_input_dict_fn, model_fn, train_config, master, task,
FLAGS.num_clones, worker_replicas, FLAGS.clone_on_cpu, ps_tasks,
worker_job_name, is_chief, FLAGS.train_dir)
if __name__ == '__main__':
tf.app.run()
| 6,792 | 35.521505 | 80 | py |
models | models-master/research/lstm_object_detection/trainer.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Detection model trainer.
This file provides a generic training method that can be used to train a
DetectionModel.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import six
from six.moves import range
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.builders import optimizer_builder
from object_detection.core import standard_fields as fields
from object_detection.utils import ops as util_ops
from object_detection.utils import variables_helper
from deployment import model_deploy
def create_input_queue(create_tensor_dict_fn):
"""Sets up reader, prefetcher and returns input queue.
Args:
create_tensor_dict_fn: function to create tensor dictionary.
Returns:
all_dict: A dictionary holds tensors for images, boxes, and targets.
"""
tensor_dict = create_tensor_dict_fn()
all_dict = {}
num_images = len(tensor_dict[fields.InputDataFields.image])
all_dict['batch'] = tensor_dict['batch']
del tensor_dict['batch']
for i in range(num_images):
suffix = str(i)
for key, val in tensor_dict.items():
all_dict[key + suffix] = val[i]
all_dict[fields.InputDataFields.image + suffix] = tf.to_float(
tf.expand_dims(all_dict[fields.InputDataFields.image + suffix], 0))
return all_dict
def get_inputs(input_queue, num_classes, merge_multiple_label_boxes=False):
"""Dequeues batch and constructs inputs to object detection model.
Args:
input_queue: BatchQueue object holding enqueued tensor_dicts.
num_classes: Number of classes.
merge_multiple_label_boxes: Whether to merge boxes with multiple labels
or not. Defaults to false. Merged boxes are represented with a single
box and a k-hot encoding of the multiple labels associated with the
boxes.
Returns:
images: a list of 3-D float tensor of images.
image_keys: a list of string keys for the images.
locations: a list of tensors of shape [num_boxes, 4] containing the corners
of the groundtruth boxes.
classes: a list of padded one-hot tensors containing target classes.
masks: a list of 3-D float tensors of shape [num_boxes, image_height,
image_width] containing instance masks for objects if present in the
input_queue. Else returns None.
keypoints: a list of 3-D float tensors of shape [num_boxes, num_keypoints,
2] containing keypoints for objects if present in the
input queue. Else returns None.
"""
read_data_list = input_queue
label_id_offset = 1
def extract_images_and_targets(read_data):
"""Extract images and targets from the input dict."""
suffix = 0
images = []
keys = []
locations = []
classes = []
masks = []
keypoints = []
while fields.InputDataFields.image + str(suffix) in read_data:
image = read_data[fields.InputDataFields.image + str(suffix)]
key = ''
if fields.InputDataFields.source_id in read_data:
key = read_data[fields.InputDataFields.source_id + str(suffix)]
location_gt = (
read_data[fields.InputDataFields.groundtruth_boxes + str(suffix)])
classes_gt = tf.cast(
read_data[fields.InputDataFields.groundtruth_classes + str(suffix)],
tf.int32)
classes_gt -= label_id_offset
masks_gt = read_data.get(
fields.InputDataFields.groundtruth_instance_masks + str(suffix))
keypoints_gt = read_data.get(
fields.InputDataFields.groundtruth_keypoints + str(suffix))
if merge_multiple_label_boxes:
location_gt, classes_gt, _ = util_ops.merge_boxes_with_multiple_labels(
location_gt, classes_gt, num_classes)
else:
classes_gt = util_ops.padded_one_hot_encoding(
indices=classes_gt, depth=num_classes, left_pad=0)
# Batch read input data and groundtruth. Images and locations, classes by
# default should have the same number of items.
images.append(image)
keys.append(key)
locations.append(location_gt)
classes.append(classes_gt)
masks.append(masks_gt)
keypoints.append(keypoints_gt)
suffix += 1
return (images, keys, locations, classes, masks, keypoints)
return extract_images_and_targets(read_data_list)
def _create_losses(input_queue, create_model_fn, train_config):
"""Creates loss function for a DetectionModel.
Args:
input_queue: BatchQueue object holding enqueued tensor_dicts.
create_model_fn: A function to create the DetectionModel.
train_config: a train_pb2.TrainConfig protobuf.
"""
detection_model = create_model_fn()
(images, _, groundtruth_boxes_list, groundtruth_classes_list,
groundtruth_masks_list, groundtruth_keypoints_list) = get_inputs(
input_queue, detection_model.num_classes,
train_config.merge_multiple_label_boxes)
preprocessed_images = []
true_image_shapes = []
for image in images:
resized_image, true_image_shape = detection_model.preprocess(image)
preprocessed_images.append(resized_image)
true_image_shapes.append(true_image_shape)
images = tf.concat(preprocessed_images, 0)
true_image_shapes = tf.concat(true_image_shapes, 0)
if any(mask is None for mask in groundtruth_masks_list):
groundtruth_masks_list = None
if any(keypoints is None for keypoints in groundtruth_keypoints_list):
groundtruth_keypoints_list = None
detection_model.provide_groundtruth(
groundtruth_boxes_list, groundtruth_classes_list, groundtruth_masks_list,
groundtruth_keypoints_list)
prediction_dict = detection_model.predict(images, true_image_shapes,
input_queue['batch'])
losses_dict = detection_model.loss(prediction_dict, true_image_shapes)
for loss_tensor in losses_dict.values():
tf.losses.add_loss(loss_tensor)
def get_restore_checkpoint_ops(restore_checkpoints, detection_model,
train_config):
"""Restore checkpoint from saved checkpoints.
Args:
restore_checkpoints: loaded checkpoints.
detection_model: Object detection model built from config file.
train_config: a train_pb2.TrainConfig protobuf.
Returns:
restorers: A list ops to init the model from checkpoints.
"""
restorers = []
vars_restored = []
for restore_checkpoint in restore_checkpoints:
var_map = detection_model.restore_map(
fine_tune_checkpoint_type=train_config.fine_tune_checkpoint_type)
available_var_map = (
variables_helper.get_variables_available_in_checkpoint(
var_map, restore_checkpoint))
for var_name, var in six.iteritems(available_var_map):
if var in vars_restored:
tf.logging.info('Variable %s contained in multiple checkpoints',
var.op.name)
del available_var_map[var_name]
else:
vars_restored.append(var)
# Initialize from ExponentialMovingAverages if possible.
available_ema_var_map = {}
ckpt_reader = tf.train.NewCheckpointReader(restore_checkpoint)
ckpt_vars_to_shape_map = ckpt_reader.get_variable_to_shape_map()
for var_name, var in six.iteritems(available_var_map):
var_name_ema = var_name + '/ExponentialMovingAverage'
if var_name_ema in ckpt_vars_to_shape_map:
available_ema_var_map[var_name_ema] = var
else:
available_ema_var_map[var_name] = var
available_var_map = available_ema_var_map
init_saver = tf.train.Saver(available_var_map)
if list(available_var_map.keys()):
restorers.append(init_saver)
else:
tf.logging.info('WARNING: Checkpoint %s has no restorable variables',
restore_checkpoint)
return restorers
def train(create_tensor_dict_fn,
create_model_fn,
train_config,
master,
task,
num_clones,
worker_replicas,
clone_on_cpu,
ps_tasks,
worker_job_name,
is_chief,
train_dir,
graph_hook_fn=None):
"""Training function for detection models.
Args:
create_tensor_dict_fn: a function to create a tensor input dictionary.
create_model_fn: a function that creates a DetectionModel and generates
losses.
train_config: a train_pb2.TrainConfig protobuf.
master: BNS name of the TensorFlow master to use.
task: The task id of this training instance.
num_clones: The number of clones to run per machine.
worker_replicas: The number of work replicas to train with.
clone_on_cpu: True if clones should be forced to run on CPU.
ps_tasks: Number of parameter server tasks.
worker_job_name: Name of the worker job.
is_chief: Whether this replica is the chief replica.
train_dir: Directory to write checkpoints and training summaries to.
graph_hook_fn: Optional function that is called after the training graph is
completely built. This is helpful to perform additional changes to the
training graph such as optimizing batchnorm. The function should modify
the default graph.
"""
detection_model = create_model_fn()
with tf.Graph().as_default():
# Build a configuration specifying multi-GPU and multi-replicas.
deploy_config = model_deploy.DeploymentConfig(
num_clones=num_clones,
clone_on_cpu=clone_on_cpu,
replica_id=task,
num_replicas=worker_replicas,
num_ps_tasks=ps_tasks,
worker_job_name=worker_job_name)
# Place the global step on the device storing the variables.
with tf.device(deploy_config.variables_device()):
global_step = slim.create_global_step()
with tf.device(deploy_config.inputs_device()):
input_queue = create_input_queue(create_tensor_dict_fn)
# Gather initial summaries.
# TODO(rathodv): See if summaries can be added/extracted from global tf
# collections so that they don't have to be passed around.
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
global_summaries = set([])
model_fn = functools.partial(
_create_losses,
create_model_fn=create_model_fn,
train_config=train_config)
clones = model_deploy.create_clones(deploy_config, model_fn, [input_queue])
first_clone_scope = clones[0].scope
# Gather update_ops from the first clone. These contain, for example,
# the updates for the batch_norm variables created by model_fn.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope)
with tf.device(deploy_config.optimizer_device()):
training_optimizer, optimizer_summary_vars = optimizer_builder.build(
train_config.optimizer)
for var in optimizer_summary_vars:
tf.summary.scalar(var.op.name, var)
sync_optimizer = None
if train_config.sync_replicas:
training_optimizer = tf.train.SyncReplicasOptimizer(
training_optimizer,
replicas_to_aggregate=train_config.replicas_to_aggregate,
total_num_replicas=train_config.worker_replicas)
sync_optimizer = training_optimizer
# Create ops required to initialize the model from a given checkpoint.
init_fn = None
if train_config.fine_tune_checkpoint:
restore_checkpoints = [
path.strip() for path in train_config.fine_tune_checkpoint.split(',')
]
restorers = get_restore_checkpoint_ops(restore_checkpoints,
detection_model, train_config)
def initializer_fn(sess):
for i, restorer in enumerate(restorers):
restorer.restore(sess, restore_checkpoints[i])
init_fn = initializer_fn
with tf.device(deploy_config.optimizer_device()):
regularization_losses = (
None if train_config.add_regularization_loss else [])
total_loss, grads_and_vars = model_deploy.optimize_clones(
clones,
training_optimizer,
regularization_losses=regularization_losses)
total_loss = tf.check_numerics(total_loss, 'LossTensor is inf or nan.')
# Optionally multiply bias gradients by train_config.bias_grad_multiplier.
if train_config.bias_grad_multiplier:
biases_regex_list = ['.*/biases']
grads_and_vars = variables_helper.multiply_gradients_matching_regex(
grads_and_vars,
biases_regex_list,
multiplier=train_config.bias_grad_multiplier)
# Optionally clip gradients
if train_config.gradient_clipping_by_norm > 0:
with tf.name_scope('clip_grads'):
grads_and_vars = slim.learning.clip_gradient_norms(
grads_and_vars, train_config.gradient_clipping_by_norm)
moving_average_variables = slim.get_model_variables()
variable_averages = tf.train.ExponentialMovingAverage(0.9999, global_step)
update_ops.append(variable_averages.apply(moving_average_variables))
# Create gradient updates.
grad_updates = training_optimizer.apply_gradients(
grads_and_vars, global_step=global_step)
update_ops.append(grad_updates)
update_op = tf.group(*update_ops, name='update_barrier')
with tf.control_dependencies([update_op]):
train_tensor = tf.identity(total_loss, name='train_op')
if graph_hook_fn:
with tf.device(deploy_config.variables_device()):
graph_hook_fn()
# Add summaries.
for model_var in slim.get_model_variables():
global_summaries.add(tf.summary.histogram(model_var.op.name, model_var))
for loss_tensor in tf.losses.get_losses():
global_summaries.add(tf.summary.scalar(loss_tensor.op.name, loss_tensor))
global_summaries.add(
tf.summary.scalar('TotalLoss', tf.losses.get_total_loss()))
# Add the summaries from the first clone. These contain the summaries
# created by model_fn and either optimize_clones() or _gather_clone_loss().
summaries |= set(
tf.get_collection(tf.GraphKeys.SUMMARIES, first_clone_scope))
summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES, 'critic_loss'))
summaries |= global_summaries
# Merge all summaries together.
summary_op = tf.summary.merge(list(summaries), name='summary_op')
# Soft placement allows placing on CPU ops without GPU implementation.
session_config = tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False)
# Save checkpoints regularly.
keep_checkpoint_every_n_hours = train_config.keep_checkpoint_every_n_hours
saver = tf.train.Saver(
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)
slim.learning.train(
train_tensor,
logdir=train_dir,
master=master,
is_chief=is_chief,
session_config=session_config,
startup_delay_steps=train_config.startup_delay_steps,
init_fn=init_fn,
summary_op=summary_op,
number_of_steps=(train_config.num_steps
if train_config.num_steps else None),
save_summaries_secs=120,
sync_optimizer=sync_optimizer,
saver=saver)
| 15,764 | 36.987952 | 80 | py |
models | models-master/research/lstm_object_detection/models/lstm_ssd_interleaved_mobilenet_v2_feature_extractor_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lstm_ssd_interleaved_mobilenet_v2_feature_extractor."""
import numpy as np
import tensorflow.compat.v1 as tf
import tf_slim as slim
from tensorflow.contrib import training as contrib_training
from lstm_object_detection.models import lstm_ssd_interleaved_mobilenet_v2_feature_extractor
from object_detection.models import ssd_feature_extractor_test
class LSTMSSDInterleavedMobilenetV2FeatureExtractorTest(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
def _create_feature_extractor(self,
depth_multiplier,
pad_to_multiple,
is_quantized=False):
"""Constructs a new feature extractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
is_quantized: whether to quantize the graph.
Returns:
an ssd_meta_arch.SSDFeatureExtractor object.
"""
min_depth = 32
def conv_hyperparams_fn():
with slim.arg_scope([slim.conv2d], normalizer_fn=slim.batch_norm), \
slim.arg_scope([slim.batch_norm], is_training=False) as sc:
return sc
feature_extractor = (
lstm_ssd_interleaved_mobilenet_v2_feature_extractor
.LSTMSSDInterleavedMobilenetV2FeatureExtractor(False, depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn))
feature_extractor.lstm_state_depth = int(320 * depth_multiplier)
feature_extractor.depth_multipliers = [
depth_multiplier, depth_multiplier / 4.0
]
feature_extractor.is_quantized = is_quantized
return feature_extractor
def test_feature_extractor_construct_with_expected_params(self):
def conv_hyperparams_fn():
with (slim.arg_scope([slim.conv2d], normalizer_fn=slim.batch_norm) and
slim.arg_scope([slim.batch_norm], decay=0.97, epsilon=1e-3)) as sc:
return sc
params = {
'is_training': True,
'depth_multiplier': .55,
'min_depth': 9,
'pad_to_multiple': 3,
'conv_hyperparams_fn': conv_hyperparams_fn,
'reuse_weights': False,
'use_explicit_padding': True,
'use_depthwise': False,
'override_base_feature_extractor_hyperparams': True}
feature_extractor = (
lstm_ssd_interleaved_mobilenet_v2_feature_extractor
.LSTMSSDInterleavedMobilenetV2FeatureExtractor(**params))
self.assertEqual(params['is_training'],
feature_extractor._is_training)
self.assertEqual(params['depth_multiplier'],
feature_extractor._depth_multiplier)
self.assertEqual(params['min_depth'],
feature_extractor._min_depth)
self.assertEqual(params['pad_to_multiple'],
feature_extractor._pad_to_multiple)
self.assertEqual(params['conv_hyperparams_fn'],
feature_extractor._conv_hyperparams_fn)
self.assertEqual(params['reuse_weights'],
feature_extractor._reuse_weights)
self.assertEqual(params['use_explicit_padding'],
feature_extractor._use_explicit_padding)
self.assertEqual(params['use_depthwise'],
feature_extractor._use_depthwise)
self.assertEqual(params['override_base_feature_extractor_hyperparams'],
(feature_extractor.
_override_base_feature_extractor_hyperparams))
def test_extract_features_returns_correct_shapes_128(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 4, 4, 640),
(2, 2, 2, 256), (2, 1, 1, 256),
(2, 1, 1, 256), (2, 1, 1, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape)
def test_extract_features_returns_correct_shapes_unroll10(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(10, 4, 4, 640),
(10, 2, 2, 256), (10, 1, 1, 256),
(10, 1, 1, 256), (10, 1, 1, 256)]
self.check_extract_features_returns_correct_shape(
10, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, unroll_length=10)
def test_extract_features_returns_correct_shapes_320(self):
image_height = 320
image_width = 320
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 10, 10, 640),
(2, 5, 5, 256), (2, 3, 3, 256),
(2, 2, 2, 256), (2, 1, 1, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape)
def test_extract_features_returns_correct_shapes_enforcing_min_depth(self):
image_height = 320
image_width = 320
depth_multiplier = 0.5**12
pad_to_multiple = 1
expected_feature_map_shape = [(2, 10, 10, 64),
(2, 5, 5, 32), (2, 3, 3, 32),
(2, 2, 2, 32), (2, 1, 1, 32)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape)
def test_extract_features_returns_correct_shapes_with_pad_to_multiple(self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 32
expected_feature_map_shape = [(2, 10, 10, 640),
(2, 5, 5, 256), (2, 3, 3, 256),
(2, 2, 2, 256), (2, 1, 1, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape)
def test_preprocess_returns_correct_value_range(self):
image_height = 128
image_width = 128
depth_multiplier = 1
pad_to_multiple = 1
test_image = np.random.rand(4, image_height, image_width, 3)
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple)
preprocessed_image = feature_extractor.preprocess(test_image)
self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))
def test_variables_only_created_in_scope(self):
depth_multiplier = 1
pad_to_multiple = 1
scope_names = ['MobilenetV2', 'LSTM', 'FeatureMap']
self.check_feature_extractor_variables_under_scopes(
depth_multiplier, pad_to_multiple, scope_names)
def test_has_fused_batchnorm(self):
image_height = 40
image_width = 40
depth_multiplier = 1
pad_to_multiple = 32
image_placeholder = tf.placeholder(tf.float32,
[1, image_height, image_width, 3])
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple)
preprocessed_image = feature_extractor.preprocess(image_placeholder)
_ = feature_extractor.extract_features(preprocessed_image, unroll_length=1)
self.assertTrue(any(op.type.startswith('FusedBatchNorm')
for op in tf.get_default_graph().get_operations()))
def test_variables_for_tflite(self):
image_height = 40
image_width = 40
depth_multiplier = 1
pad_to_multiple = 32
image_placeholder = tf.placeholder(tf.float32,
[1, image_height, image_width, 3])
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple)
preprocessed_image = feature_extractor.preprocess(image_placeholder)
tflite_unsupported = ['SquaredDifference']
_ = feature_extractor.extract_features(preprocessed_image, unroll_length=1)
self.assertFalse(any(op.type in tflite_unsupported
for op in tf.get_default_graph().get_operations()))
def test_output_nodes_for_tflite(self):
image_height = 64
image_width = 64
depth_multiplier = 1.0
pad_to_multiple = 1
image_placeholder = tf.placeholder(tf.float32,
[1, image_height, image_width, 3])
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple)
preprocessed_image = feature_extractor.preprocess(image_placeholder)
_ = feature_extractor.extract_features(preprocessed_image, unroll_length=1)
tflite_nodes = [
'raw_inputs/init_lstm_c',
'raw_inputs/init_lstm_h',
'raw_inputs/base_endpoint',
'raw_outputs/lstm_c',
'raw_outputs/lstm_h',
'raw_outputs/base_endpoint_1',
'raw_outputs/base_endpoint_2'
]
ops_names = [op.name for op in tf.get_default_graph().get_operations()]
for node in tflite_nodes:
self.assertTrue(any(node in s for s in ops_names))
def test_fixed_concat_nodes(self):
image_height = 64
image_width = 64
depth_multiplier = 1.0
pad_to_multiple = 1
image_placeholder = tf.placeholder(tf.float32,
[1, image_height, image_width, 3])
feature_extractor = self._create_feature_extractor(
depth_multiplier, pad_to_multiple, is_quantized=True)
preprocessed_image = feature_extractor.preprocess(image_placeholder)
_ = feature_extractor.extract_features(preprocessed_image, unroll_length=1)
concat_nodes = [
'MobilenetV2_1/expanded_conv_16/project/Relu6',
'MobilenetV2_2/expanded_conv_16/project/Relu6'
]
ops_names = [op.name for op in tf.get_default_graph().get_operations()]
for node in concat_nodes:
self.assertTrue(any(node in s for s in ops_names))
def test_lstm_states(self):
image_height = 256
image_width = 256
depth_multiplier = 1
pad_to_multiple = 1
state_channel = 320
init_state1 = {
'lstm_state_c': tf.zeros(
[image_height // 32, image_width // 32, state_channel]),
'lstm_state_h': tf.zeros(
[image_height // 32, image_width // 32, state_channel]),
'lstm_state_step': tf.zeros([1])
}
init_state2 = {
'lstm_state_c': tf.random_uniform(
[image_height // 32, image_width // 32, state_channel]),
'lstm_state_h': tf.random_uniform(
[image_height // 32, image_width // 32, state_channel]),
'lstm_state_step': tf.zeros([1])
}
seq = {'dummy': tf.random_uniform([2, 1, 1, 1])}
stateful_reader1 = contrib_training.SequenceQueueingStateSaver(
batch_size=1,
num_unroll=1,
input_length=2,
input_key='',
input_sequences=seq,
input_context={},
initial_states=init_state1,
capacity=1)
stateful_reader2 = contrib_training.SequenceQueueingStateSaver(
batch_size=1,
num_unroll=1,
input_length=2,
input_key='',
input_sequences=seq,
input_context={},
initial_states=init_state2,
capacity=1)
image = tf.random_uniform([1, image_height, image_width, 3])
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple)
with tf.variable_scope('zero_state'):
feature_maps1 = feature_extractor.extract_features(
image, stateful_reader1.next_batch, unroll_length=1)
with tf.variable_scope('random_state'):
feature_maps2 = feature_extractor.extract_features(
image, stateful_reader2.next_batch, unroll_length=1)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.get_collection(tf.GraphKeys.TABLE_INITIALIZERS))
sess.run([stateful_reader1.prefetch_op, stateful_reader2.prefetch_op])
maps1, maps2 = sess.run([feature_maps1, feature_maps2])
state = sess.run(stateful_reader1.next_batch.state('lstm_state_c'))
# feature maps should be different because states are different
self.assertFalse(np.all(np.equal(maps1[0], maps2[0])))
# state should no longer be zero after update
self.assertTrue(state.any())
def check_extract_features_returns_correct_shape(
self, batch_size, image_height, image_width, depth_multiplier,
pad_to_multiple, expected_feature_map_shapes, unroll_length=1):
def graph_fn(image_tensor):
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple)
feature_maps = feature_extractor.extract_features(
image_tensor, unroll_length=unroll_length)
return feature_maps
image_tensor = np.random.rand(batch_size, image_height, image_width,
3).astype(np.float32)
feature_maps = self.execute(graph_fn, [image_tensor])
for feature_map, expected_shape in zip(
feature_maps, expected_feature_map_shapes):
self.assertAllEqual(feature_map.shape, expected_shape)
def check_feature_extractor_variables_under_scopes(
self, depth_multiplier, pad_to_multiple, scope_names):
g = tf.Graph()
with g.as_default():
feature_extractor = self._create_feature_extractor(
depth_multiplier, pad_to_multiple)
preprocessed_inputs = tf.placeholder(tf.float32, (4, 320, 320, 3))
feature_extractor.extract_features(
preprocessed_inputs, unroll_length=1)
variables = g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
for variable in variables:
self.assertTrue(
any([
variable.name.startswith(scope_name)
for scope_name in scope_names
]), 'Variable name: ' + variable.name +
' is not under any provided scopes: ' + ','.join(scope_names))
if __name__ == '__main__':
tf.test.main()
| 15,119 | 41.832861 | 92 | py |
models | models-master/research/lstm_object_detection/models/mobilenet_defs_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lstm_object_detection.models.mobilenet_defs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from lstm_object_detection.models import mobilenet_defs
from nets import mobilenet_v1
from nets.mobilenet import mobilenet_v2
class MobilenetV1DefsTest(tf.test.TestCase):
def test_mobilenet_v1_lite_def(self):
net, _ = mobilenet_v1.mobilenet_v1_base(
tf.placeholder(tf.float32, (10, 320, 320, 3)),
final_endpoint='Conv2d_13_pointwise',
min_depth=8,
depth_multiplier=1.0,
conv_defs=mobilenet_defs.mobilenet_v1_lite_def(1.0),
use_explicit_padding=True,
scope='MobilenetV1')
self.assertEqual(net.get_shape().as_list(), [10, 10, 10, 1024])
def test_mobilenet_v1_lite_def_depthmultiplier_half(self):
net, _ = mobilenet_v1.mobilenet_v1_base(
tf.placeholder(tf.float32, (10, 320, 320, 3)),
final_endpoint='Conv2d_13_pointwise',
min_depth=8,
depth_multiplier=0.5,
conv_defs=mobilenet_defs.mobilenet_v1_lite_def(0.5),
use_explicit_padding=True,
scope='MobilenetV1')
self.assertEqual(net.get_shape().as_list(), [10, 10, 10, 1024])
def test_mobilenet_v1_lite_def_depthmultiplier_2x(self):
net, _ = mobilenet_v1.mobilenet_v1_base(
tf.placeholder(tf.float32, (10, 320, 320, 3)),
final_endpoint='Conv2d_13_pointwise',
min_depth=8,
depth_multiplier=2.0,
conv_defs=mobilenet_defs.mobilenet_v1_lite_def(2.0),
use_explicit_padding=True,
scope='MobilenetV1')
self.assertEqual(net.get_shape().as_list(), [10, 10, 10, 1024])
def test_mobilenet_v1_lite_def_low_res(self):
net, _ = mobilenet_v1.mobilenet_v1_base(
tf.placeholder(tf.float32, (10, 320, 320, 3)),
final_endpoint='Conv2d_13_pointwise',
min_depth=8,
depth_multiplier=1.0,
conv_defs=mobilenet_defs.mobilenet_v1_lite_def(1.0, low_res=True),
use_explicit_padding=True,
scope='MobilenetV1')
self.assertEqual(net.get_shape().as_list(), [10, 20, 20, 1024])
class MobilenetV2DefsTest(tf.test.TestCase):
def test_mobilenet_v2_lite_def(self):
net, features = mobilenet_v2.mobilenet_base(
tf.placeholder(tf.float32, (10, 320, 320, 3)),
min_depth=8,
depth_multiplier=1.0,
conv_defs=mobilenet_defs.mobilenet_v2_lite_def(),
use_explicit_padding=True,
scope='MobilenetV2')
self.assertEqual(net.get_shape().as_list(), [10, 10, 10, 320])
self._assert_contains_op('MobilenetV2/expanded_conv_16/project/Identity')
self.assertEqual(
features['layer_3/expansion_output'].get_shape().as_list(),
[10, 160, 160, 96])
self.assertEqual(
features['layer_4/expansion_output'].get_shape().as_list(),
[10, 80, 80, 144])
def test_mobilenet_v2_lite_def_is_quantized(self):
net, _ = mobilenet_v2.mobilenet_base(
tf.placeholder(tf.float32, (10, 320, 320, 3)),
min_depth=8,
depth_multiplier=1.0,
conv_defs=mobilenet_defs.mobilenet_v2_lite_def(is_quantized=True),
use_explicit_padding=True,
scope='MobilenetV2')
self.assertEqual(net.get_shape().as_list(), [10, 10, 10, 320])
self._assert_contains_op('MobilenetV2/expanded_conv_16/project/Relu6')
def test_mobilenet_v2_lite_def_low_res(self):
net, _ = mobilenet_v2.mobilenet_base(
tf.placeholder(tf.float32, (10, 320, 320, 3)),
min_depth=8,
depth_multiplier=1.0,
conv_defs=mobilenet_defs.mobilenet_v2_lite_def(low_res=True),
use_explicit_padding=True,
scope='MobilenetV2')
self.assertEqual(net.get_shape().as_list(), [10, 20, 20, 320])
def test_mobilenet_v2_lite_def_reduced(self):
net, features = mobilenet_v2.mobilenet_base(
tf.placeholder(tf.float32, (10, 320, 320, 3)),
min_depth=8,
depth_multiplier=1.0,
conv_defs=mobilenet_defs.mobilenet_v2_lite_def(reduced=True),
use_explicit_padding=True,
scope='MobilenetV2')
self.assertEqual(net.get_shape().as_list(), [10, 10, 10, 320])
self.assertEqual(
features['layer_3/expansion_output'].get_shape().as_list(),
[10, 160, 160, 48])
self.assertEqual(
features['layer_4/expansion_output'].get_shape().as_list(),
[10, 80, 80, 72])
def _assert_contains_op(self, op_name):
op_names = [op.name for op in tf.get_default_graph().get_operations()]
self.assertIn(op_name, op_names)
if __name__ == '__main__':
tf.test.main()
| 5,320 | 37.839416 | 80 | py |
models | models-master/research/lstm_object_detection/models/lstm_ssd_interleaved_mobilenet_v2_feature_extractor.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""LSTDInterleavedFeatureExtractor which interleaves multiple MobileNet V2."""
import tensorflow.compat.v1 as tf
import tf_slim as slim
from tensorflow.python.framework import ops as tf_ops
from lstm_object_detection.lstm import lstm_cells
from lstm_object_detection.lstm import rnn_decoder
from lstm_object_detection.meta_architectures import lstm_ssd_meta_arch
from lstm_object_detection.models import mobilenet_defs
from object_detection.models import feature_map_generators
from object_detection.utils import ops
from object_detection.utils import shape_utils
from nets.mobilenet import mobilenet
from nets.mobilenet import mobilenet_v2
class LSTMSSDInterleavedMobilenetV2FeatureExtractor(
lstm_ssd_meta_arch.LSTMSSDInterleavedFeatureExtractor):
"""LSTM-SSD Interleaved Feature Extractor using MobilenetV2 features."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=True,
override_base_feature_extractor_hyperparams=False):
"""Interleaved Feature Extractor for LSTD Models with MobileNet v2.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the
base feature extractor.
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is True.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
"""
super(LSTMSSDInterleavedMobilenetV2FeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams_fn=conv_hyperparams_fn,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams)
# RANDOM_SKIP_SMALL means the training policy is random and the small model
# does not update state during training.
if self._is_training:
self._interleave_method = 'RANDOM_SKIP_SMALL'
else:
self._interleave_method = 'SKIP9'
self._flatten_state = False
self._scale_state = False
self._clip_state = True
self._pre_bottleneck = True
self._feature_map_layout = {
'from_layer': ['layer_19', '', '', '', ''],
'layer_depth': [-1, 256, 256, 256, 256],
'use_depthwise': self._use_depthwise,
'use_explicit_padding': self._use_explicit_padding,
}
self._low_res = True
self._base_network_scope = 'MobilenetV2'
def extract_base_features_large(self, preprocessed_inputs):
"""Extract the large base model features.
Variables are created under the scope of <scope>/MobilenetV2_1/
Args:
preprocessed_inputs: preprocessed input images of shape:
[batch, width, height, depth].
Returns:
net: the last feature map created from the base feature extractor.
end_points: a dictionary of feature maps created.
"""
scope_name = self._base_network_scope + '_1'
with tf.variable_scope(scope_name, reuse=self._reuse_weights) as base_scope:
net, end_points = mobilenet_v2.mobilenet_base(
preprocessed_inputs,
depth_multiplier=self._depth_multipliers[0],
conv_defs=mobilenet_defs.mobilenet_v2_lite_def(
is_quantized=self._is_quantized),
use_explicit_padding=self._use_explicit_padding,
scope=base_scope)
return net, end_points
def extract_base_features_small(self, preprocessed_inputs):
"""Extract the small base model features.
Variables are created under the scope of <scope>/MobilenetV2_2/
Args:
preprocessed_inputs: preprocessed input images of shape:
[batch, width, height, depth].
Returns:
net: the last feature map created from the base feature extractor.
end_points: a dictionary of feature maps created.
"""
scope_name = self._base_network_scope + '_2'
with tf.variable_scope(scope_name, reuse=self._reuse_weights) as base_scope:
if self._low_res:
height_small = preprocessed_inputs.get_shape().as_list()[1] // 2
width_small = preprocessed_inputs.get_shape().as_list()[2] // 2
inputs_small = tf.image.resize_images(preprocessed_inputs,
[height_small, width_small])
# Create end point handle for tflite deployment.
with tf.name_scope(None):
inputs_small = tf.identity(
inputs_small, name='normalized_input_image_tensor_small')
else:
inputs_small = preprocessed_inputs
net, end_points = mobilenet_v2.mobilenet_base(
inputs_small,
depth_multiplier=self._depth_multipliers[1],
conv_defs=mobilenet_defs.mobilenet_v2_lite_def(
is_quantized=self._is_quantized, low_res=self._low_res),
use_explicit_padding=self._use_explicit_padding,
scope=base_scope)
return net, end_points
def create_lstm_cell(self, batch_size, output_size, state_saver, state_name,
dtype=tf.float32):
"""Create the LSTM cell, and initialize state if necessary.
Args:
batch_size: input batch size.
output_size: output size of the lstm cell, [width, height].
state_saver: a state saver object with methods `state` and `save_state`.
state_name: string, the name to use with the state_saver.
dtype: dtype to initialize lstm state.
Returns:
lstm_cell: the lstm cell unit.
init_state: initial state representations.
step: the step
"""
lstm_cell = lstm_cells.GroupedConvLSTMCell(
filter_size=(3, 3),
output_size=output_size,
num_units=max(self._min_depth, self._lstm_state_depth),
is_training=self._is_training,
activation=tf.nn.relu6,
flatten_state=self._flatten_state,
scale_state=self._scale_state,
clip_state=self._clip_state,
output_bottleneck=True,
pre_bottleneck=self._pre_bottleneck,
is_quantized=self._is_quantized,
visualize_gates=False)
if state_saver is None:
init_state = lstm_cell.init_state('lstm_state', batch_size, dtype)
step = None
else:
step = state_saver.state(state_name + '_step')
c = state_saver.state(state_name + '_c')
h = state_saver.state(state_name + '_h')
c.set_shape([batch_size] + c.get_shape().as_list()[1:])
h.set_shape([batch_size] + h.get_shape().as_list()[1:])
init_state = (c, h)
return lstm_cell, init_state, step
def extract_features(self, preprocessed_inputs, state_saver=None,
state_name='lstm_state', unroll_length=10, scope=None):
"""Extract features from preprocessed inputs.
The features include the base network features, lstm features and SSD
features, organized in the following name scope:
<scope>/MobilenetV2_1/...
<scope>/MobilenetV2_2/...
<scope>/LSTM/...
<scope>/FeatureMap/...
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of consecutive frames from video clips.
state_saver: A state saver object with methods `state` and `save_state`.
state_name: Python string, the name to use with the state_saver.
unroll_length: number of steps to unroll the lstm.
scope: Scope for the base network of the feature extractor.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
Raises:
ValueError: if interleave_method not recognized or large and small base
network output feature maps of different sizes.
"""
preprocessed_inputs = shape_utils.check_min_image_dim(
33, preprocessed_inputs)
preprocessed_inputs = ops.pad_to_multiple(
preprocessed_inputs, self._pad_to_multiple)
batch_size = preprocessed_inputs.shape[0].value // unroll_length
batch_axis = 0
nets = []
# Batch processing of mobilenet features.
with slim.arg_scope(mobilenet_v2.training_scope(
is_training=self._is_training,
bn_decay=0.9997)), \
slim.arg_scope([mobilenet.depth_multiplier],
min_depth=self._min_depth, divisible_by=8):
# Big model.
net, _ = self.extract_base_features_large(preprocessed_inputs)
nets.append(net)
large_base_feature_shape = net.shape
# Small models
net, _ = self.extract_base_features_small(preprocessed_inputs)
nets.append(net)
small_base_feature_shape = net.shape
if not (large_base_feature_shape[1] == small_base_feature_shape[1] and
large_base_feature_shape[2] == small_base_feature_shape[2]):
raise ValueError('Large and Small base network feature map dimension '
'not equal!')
with slim.arg_scope(self._conv_hyperparams_fn()):
with tf.variable_scope('LSTM', reuse=self._reuse_weights):
output_size = (large_base_feature_shape[1], large_base_feature_shape[2])
lstm_cell, init_state, step = self.create_lstm_cell(
batch_size, output_size, state_saver, state_name,
dtype=preprocessed_inputs.dtype)
nets_seq = [
tf.split(net, unroll_length, axis=batch_axis) for net in nets
]
net_seq, states_out = rnn_decoder.multi_input_rnn_decoder(
nets_seq,
init_state,
lstm_cell,
step,
selection_strategy=self._interleave_method,
is_training=self._is_training,
is_quantized=self._is_quantized,
pre_bottleneck=self._pre_bottleneck,
flatten_state=self._flatten_state,
scope=None)
self._states_out = states_out
image_features = {}
if state_saver is not None:
self._step = state_saver.state(state_name + '_step')
batcher_ops = [
state_saver.save_state(state_name + '_c', states_out[-1][0]),
state_saver.save_state(state_name + '_h', states_out[-1][1]),
state_saver.save_state(state_name + '_step', self._step + 1)]
with tf_ops.control_dependencies(batcher_ops):
image_features['layer_19'] = tf.concat(net_seq, 0)
else:
image_features['layer_19'] = tf.concat(net_seq, 0)
# SSD layers.
with tf.variable_scope('FeatureMap'):
feature_maps = feature_map_generators.multi_resolution_feature_maps(
feature_map_layout=self._feature_map_layout,
depth_multiplier=self._depth_multiplier,
min_depth=self._min_depth,
insert_1x1_conv=True,
image_features=image_features,
pool_residual=True)
return list(feature_maps.values())
| 12,340 | 40.274247 | 80 | py |
models | models-master/research/lstm_object_detection/models/lstm_ssd_mobilenet_v1_feature_extractor_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for models.lstm_ssd_mobilenet_v1_feature_extractor."""
import numpy as np
import tensorflow.compat.v1 as tf
import tf_slim as slim
from tensorflow.contrib import training as contrib_training
from lstm_object_detection.models import lstm_ssd_mobilenet_v1_feature_extractor as feature_extractor
from object_detection.models import ssd_feature_extractor_test
class LstmSsdMobilenetV1FeatureExtractorTest(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
def _create_feature_extractor(self,
depth_multiplier=1.0,
pad_to_multiple=1,
is_training=True,
use_explicit_padding=False):
"""Constructs a new feature extractor.
Args:
depth_multiplier: A float depth multiplier for feature extractor.
pad_to_multiple: The nearest multiple to zero pad the input height and
width dimensions to.
is_training: A boolean whether the network is in training mode.
use_explicit_padding: A boolean whether to use explicit padding.
Returns:
An lstm_ssd_meta_arch.LSTMSSDMobileNetV1FeatureExtractor object.
"""
min_depth = 32
extractor = (
feature_extractor.LSTMSSDMobileNetV1FeatureExtractor(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
self.conv_hyperparams_fn,
use_explicit_padding=use_explicit_padding))
extractor.lstm_state_depth = int(256 * depth_multiplier)
return extractor
def test_feature_extractor_construct_with_expected_params(self):
def conv_hyperparams_fn():
with (slim.arg_scope([slim.conv2d], normalizer_fn=slim.batch_norm) and
slim.arg_scope([slim.batch_norm], decay=0.97, epsilon=1e-3)) as sc:
return sc
params = {
'is_training': True,
'depth_multiplier': .55,
'min_depth': 9,
'pad_to_multiple': 3,
'conv_hyperparams_fn': conv_hyperparams_fn,
'reuse_weights': False,
'use_explicit_padding': True,
'use_depthwise': False,
'override_base_feature_extractor_hyperparams': True}
extractor = (
feature_extractor.LSTMSSDMobileNetV1FeatureExtractor(**params))
self.assertEqual(params['is_training'],
extractor._is_training)
self.assertEqual(params['depth_multiplier'],
extractor._depth_multiplier)
self.assertEqual(params['min_depth'],
extractor._min_depth)
self.assertEqual(params['pad_to_multiple'],
extractor._pad_to_multiple)
self.assertEqual(params['conv_hyperparams_fn'],
extractor._conv_hyperparams_fn)
self.assertEqual(params['reuse_weights'],
extractor._reuse_weights)
self.assertEqual(params['use_explicit_padding'],
extractor._use_explicit_padding)
self.assertEqual(params['use_depthwise'],
extractor._use_depthwise)
self.assertEqual(params['override_base_feature_extractor_hyperparams'],
(extractor.
_override_base_feature_extractor_hyperparams))
def test_extract_features_returns_correct_shapes_256(self):
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
batch_size = 5
expected_feature_map_shape = [(batch_size, 8, 8, 256), (batch_size, 4, 4,
512),
(batch_size, 2, 2, 256), (batch_size, 1, 1,
256)]
self.check_extract_features_returns_correct_shape(
batch_size,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False)
self.check_extract_features_returns_correct_shape(
batch_size,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True)
def test_preprocess_returns_correct_value_range(self):
test_image = np.random.rand(5, 128, 128, 3)
extractor = self._create_feature_extractor()
preprocessed_image = extractor.preprocess(test_image)
self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))
def test_variables_only_created_in_scope(self):
scope_name = 'MobilenetV1'
g = tf.Graph()
with g.as_default():
preprocessed_inputs = tf.placeholder(tf.float32, (5, 256, 256, 3))
extractor = self._create_feature_extractor()
extractor.extract_features(preprocessed_inputs)
variables = g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
find_scope = False
for variable in variables:
if scope_name in variable.name:
find_scope = True
break
self.assertTrue(find_scope)
def test_lstm_non_zero_state(self):
init_state = {
'lstm_state_c': tf.zeros([8, 8, 256]),
'lstm_state_h': tf.zeros([8, 8, 256]),
'lstm_state_step': tf.zeros([1])
}
seq = {'test': tf.random_uniform([3, 1, 1, 1])}
stateful_reader = contrib_training.SequenceQueueingStateSaver(
batch_size=1,
num_unroll=1,
input_length=2,
input_key='',
input_sequences=seq,
input_context={},
initial_states=init_state,
capacity=1)
extractor = self._create_feature_extractor()
image = tf.random_uniform([5, 256, 256, 3])
with tf.variable_scope('zero_state'):
feature_map = extractor.extract_features(
image, stateful_reader.next_batch)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run([stateful_reader.prefetch_op])
_ = sess.run([feature_map])
# Update states with the next batch.
state = sess.run(stateful_reader.next_batch.state('lstm_state_c'))
# State should no longer be zero after update.
self.assertTrue(state.any())
if __name__ == '__main__':
tf.test.main()
| 6,867 | 37.155556 | 101 | py |
models | models-master/research/lstm_object_detection/models/mobilenet_defs.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Definitions for modified MobileNet models used in LSTD."""
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets import mobilenet_v1
from nets.mobilenet import conv_blocks as mobilenet_convs
from nets.mobilenet import mobilenet
def mobilenet_v1_lite_def(depth_multiplier, low_res=False):
"""Conv definitions for a lite MobileNet v1 model.
Args:
depth_multiplier: float depth multiplier for MobileNet.
low_res: An option of low-res conv input for interleave model.
Returns:
Array of convolutions.
Raises:
ValueError: On invalid channels with provided depth multiplier.
"""
conv = mobilenet_v1.Conv
sep_conv = mobilenet_v1.DepthSepConv
def _find_target_depth(original, depth_multiplier):
# Find the target depth such that:
# int(target * depth_multiplier) == original
pseudo_target = int(original / depth_multiplier)
for target in range(pseudo_target - 1, pseudo_target + 2):
if int(target * depth_multiplier) == original:
return target
raise ValueError('Cannot have %d channels with depth multiplier %0.2f' %
(original, depth_multiplier))
return [
conv(kernel=[3, 3], stride=2, depth=32),
sep_conv(kernel=[3, 3], stride=1, depth=64),
sep_conv(kernel=[3, 3], stride=2, depth=128),
sep_conv(kernel=[3, 3], stride=1, depth=128),
sep_conv(kernel=[3, 3], stride=2, depth=256),
sep_conv(kernel=[3, 3], stride=1, depth=256),
sep_conv(kernel=[3, 3], stride=2, depth=512),
sep_conv(kernel=[3, 3], stride=1, depth=512),
sep_conv(kernel=[3, 3], stride=1, depth=512),
sep_conv(kernel=[3, 3], stride=1, depth=512),
sep_conv(kernel=[3, 3], stride=1, depth=512),
sep_conv(kernel=[3, 3], stride=1, depth=512),
sep_conv(kernel=[3, 3], stride=1 if low_res else 2, depth=1024),
sep_conv(
kernel=[3, 3],
stride=1,
depth=int(_find_target_depth(1024, depth_multiplier)))
]
def mobilenet_v2_lite_def(reduced=False, is_quantized=False, low_res=False):
"""Conv definitions for a lite MobileNet v2 model.
Args:
reduced: Determines the scaling factor for expanded conv. If True, a factor
of 6 is used. If False, a factor of 3 is used.
is_quantized: Whether the model is trained in quantized mode.
low_res: Whether the input to the model is of half resolution.
Returns:
Array of convolutions.
"""
expanded_conv = mobilenet_convs.expanded_conv
expand_input = mobilenet_convs.expand_input_by_factor
op = mobilenet.op
return dict(
defaults={
# Note: these parameters of batch norm affect the architecture
# that's why they are here and not in training_scope.
(slim.batch_norm,): {
'center': True,
'scale': True
},
(slim.conv2d, slim.fully_connected, slim.separable_conv2d): {
'normalizer_fn': slim.batch_norm,
'activation_fn': tf.nn.relu6
},
(expanded_conv,): {
'expansion_size': expand_input(6),
'split_expansion': 1,
'normalizer_fn': slim.batch_norm,
'residual': True
},
(slim.conv2d, slim.separable_conv2d): {
'padding': 'SAME'
}
},
spec=[
op(slim.conv2d, stride=2, num_outputs=32, kernel_size=[3, 3]),
op(expanded_conv,
expansion_size=expand_input(1, divisible_by=1),
num_outputs=16),
op(expanded_conv,
expansion_size=(expand_input(3, divisible_by=1)
if reduced else expand_input(6)),
stride=2,
num_outputs=24),
op(expanded_conv,
expansion_size=(expand_input(3, divisible_by=1)
if reduced else expand_input(6)),
stride=1,
num_outputs=24),
op(expanded_conv, stride=2, num_outputs=32),
op(expanded_conv, stride=1, num_outputs=32),
op(expanded_conv, stride=1, num_outputs=32),
op(expanded_conv, stride=2, num_outputs=64),
op(expanded_conv, stride=1, num_outputs=64),
op(expanded_conv, stride=1, num_outputs=64),
op(expanded_conv, stride=1, num_outputs=64),
op(expanded_conv, stride=1, num_outputs=96),
op(expanded_conv, stride=1, num_outputs=96),
op(expanded_conv, stride=1, num_outputs=96),
op(expanded_conv, stride=1 if low_res else 2, num_outputs=160),
op(expanded_conv, stride=1, num_outputs=160),
op(expanded_conv, stride=1, num_outputs=160),
op(expanded_conv,
stride=1,
num_outputs=320,
project_activation_fn=(tf.nn.relu6
if is_quantized else tf.identity))
],
)
| 5,568 | 37.944056 | 80 | py |
models | models-master/research/lstm_object_detection/models/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/lstm_object_detection/models/lstm_ssd_mobilenet_v1_feature_extractor.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""LSTMSSDFeatureExtractor for MobilenetV1 features."""
import tensorflow.compat.v1 as tf
import tf_slim as slim
from tensorflow.python.framework import ops as tf_ops
from lstm_object_detection.lstm import lstm_cells
from lstm_object_detection.lstm import rnn_decoder
from lstm_object_detection.meta_architectures import lstm_ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.utils import context_manager
from object_detection.utils import ops
from object_detection.utils import shape_utils
from nets import mobilenet_v1
class LSTMSSDMobileNetV1FeatureExtractor(
lstm_ssd_meta_arch.LSTMSSDFeatureExtractor):
"""LSTM Feature Extractor using MobilenetV1 features."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=True,
override_base_feature_extractor_hyperparams=False,
lstm_state_depth=256):
"""Initializes instance of MobileNetV1 Feature Extractor for LSTMSSD Models.
Args:
is_training: A boolean whether the network is in training mode.
depth_multiplier: A float depth multiplier for feature extractor.
min_depth: A number representing minimum feature extractor depth.
pad_to_multiple: The nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the
base feature extractor.
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is True.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
lstm_state_depth: An integter of the depth of the lstm state.
"""
super(LSTMSSDMobileNetV1FeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams_fn=conv_hyperparams_fn,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams)
self._feature_map_layout = {
'from_layer': ['Conv2d_13_pointwise_lstm', '', '', '', ''],
'layer_depth': [-1, 512, 256, 256, 128],
'use_explicit_padding': self._use_explicit_padding,
'use_depthwise': self._use_depthwise,
}
self._base_network_scope = 'MobilenetV1'
self._lstm_state_depth = lstm_state_depth
def create_lstm_cell(self, batch_size, output_size, state_saver, state_name,
dtype=tf.float32):
"""Create the LSTM cell, and initialize state if necessary.
Args:
batch_size: input batch size.
output_size: output size of the lstm cell, [width, height].
state_saver: a state saver object with methods `state` and `save_state`.
state_name: string, the name to use with the state_saver.
dtype: dtype to initialize lstm state.
Returns:
lstm_cell: the lstm cell unit.
init_state: initial state representations.
step: the step
"""
lstm_cell = lstm_cells.BottleneckConvLSTMCell(
filter_size=(3, 3),
output_size=output_size,
num_units=max(self._min_depth, self._lstm_state_depth),
activation=tf.nn.relu6,
visualize_gates=False)
if state_saver is None:
init_state = lstm_cell.init_state(state_name, batch_size, dtype)
step = None
else:
step = state_saver.state(state_name + '_step')
c = state_saver.state(state_name + '_c')
h = state_saver.state(state_name + '_h')
init_state = (c, h)
return lstm_cell, init_state, step
def extract_features(self,
preprocessed_inputs,
state_saver=None,
state_name='lstm_state',
unroll_length=5,
scope=None):
"""Extracts features from preprocessed inputs.
The features include the base network features, lstm features and SSD
features, organized in the following name scope:
<parent scope>/MobilenetV1/...
<parent scope>/LSTM/...
<parent scope>/FeatureMaps/...
Args:
preprocessed_inputs: A [batch, height, width, channels] float tensor
representing a batch of consecutive frames from video clips.
state_saver: A state saver object with methods `state` and `save_state`.
state_name: A python string for the name to use with the state_saver.
unroll_length: The number of steps to unroll the lstm.
scope: The scope for the base network of the feature extractor.
Returns:
A list of tensors where the ith tensor has shape [batch, height_i,
width_i, depth_i]
"""
preprocessed_inputs = shape_utils.check_min_image_dim(
33, preprocessed_inputs)
with slim.arg_scope(
mobilenet_v1.mobilenet_v1_arg_scope(is_training=self._is_training)):
with (slim.arg_scope(self._conv_hyperparams_fn())
if self._override_base_feature_extractor_hyperparams else
context_manager.IdentityContextManager()):
with slim.arg_scope([slim.batch_norm], fused=False):
# Base network.
with tf.variable_scope(
scope, self._base_network_scope,
reuse=self._reuse_weights) as scope:
net, image_features = mobilenet_v1.mobilenet_v1_base(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
final_endpoint='Conv2d_13_pointwise',
min_depth=self._min_depth,
depth_multiplier=self._depth_multiplier,
scope=scope)
with slim.arg_scope(self._conv_hyperparams_fn()):
with slim.arg_scope(
[slim.batch_norm], fused=False, is_training=self._is_training):
# ConvLSTM layers.
batch_size = net.shape[0].value // unroll_length
with tf.variable_scope('LSTM', reuse=self._reuse_weights) as lstm_scope:
lstm_cell, init_state, _ = self.create_lstm_cell(
batch_size,
(net.shape[1].value, net.shape[2].value),
state_saver,
state_name,
dtype=preprocessed_inputs.dtype)
net_seq = list(tf.split(net, unroll_length))
# Identities added for inputing state tensors externally.
c_ident = tf.identity(init_state[0], name='lstm_state_in_c')
h_ident = tf.identity(init_state[1], name='lstm_state_in_h')
init_state = (c_ident, h_ident)
net_seq, states_out = rnn_decoder.rnn_decoder(
net_seq, init_state, lstm_cell, scope=lstm_scope)
batcher_ops = None
self._states_out = states_out
if state_saver is not None:
self._step = state_saver.state('%s_step' % state_name)
batcher_ops = [
state_saver.save_state('%s_c' % state_name, states_out[-1][0]),
state_saver.save_state('%s_h' % state_name, states_out[-1][1]),
state_saver.save_state('%s_step' % state_name, self._step + 1)
]
with tf_ops.control_dependencies(batcher_ops):
image_features['Conv2d_13_pointwise_lstm'] = tf.concat(net_seq, 0)
# Identities added for reading output states, to be reused externally.
tf.identity(states_out[-1][0], name='lstm_state_out_c')
tf.identity(states_out[-1][1], name='lstm_state_out_h')
# SSD layers.
with tf.variable_scope('FeatureMaps', reuse=self._reuse_weights):
feature_maps = feature_map_generators.multi_resolution_feature_maps(
feature_map_layout=self._feature_map_layout,
depth_multiplier=(self._depth_multiplier),
min_depth=self._min_depth,
insert_1x1_conv=True,
image_features=image_features)
return list(feature_maps.values())
| 9,197 | 42.386792 | 80 | py |
models | models-master/research/lstm_object_detection/meta_architectures/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/lstm_object_detection/meta_architectures/lstm_ssd_meta_arch.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""LSTM SSD Meta-architecture definition.
General tensorflow implementation of convolutional Multibox/SSD detection
models with LSTM states, for use on video data. This implementation supports
both regular LSTM-SSD and interleaved LSTM-SSD framework.
See https://arxiv.org/abs/1711.06368 and https://arxiv.org/abs/1903.10172
for details.
"""
import abc
import re
import tensorflow.compat.v1 as tf
from object_detection.core import box_list_ops
from object_detection.core import matcher
from object_detection.core import standard_fields as fields
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.utils import ops
from object_detection.utils import shape_utils
class LSTMSSDMetaArch(ssd_meta_arch.SSDMetaArch):
"""LSTM Meta-architecture definition."""
def __init__(self,
is_training,
anchor_generator,
box_predictor,
box_coder,
feature_extractor,
encode_background_as_zeros,
image_resizer_fn,
non_max_suppression_fn,
score_conversion_fn,
classification_loss,
localization_loss,
classification_loss_weight,
localization_loss_weight,
normalize_loss_by_num_matches,
hard_example_miner,
unroll_length,
target_assigner_instance,
add_summaries=True):
super(LSTMSSDMetaArch, self).__init__(
is_training=is_training,
anchor_generator=anchor_generator,
box_predictor=box_predictor,
box_coder=box_coder,
feature_extractor=feature_extractor,
encode_background_as_zeros=encode_background_as_zeros,
image_resizer_fn=image_resizer_fn,
non_max_suppression_fn=non_max_suppression_fn,
score_conversion_fn=score_conversion_fn,
classification_loss=classification_loss,
localization_loss=localization_loss,
classification_loss_weight=classification_loss_weight,
localization_loss_weight=localization_loss_weight,
normalize_loss_by_num_matches=normalize_loss_by_num_matches,
hard_example_miner=hard_example_miner,
target_assigner_instance=target_assigner_instance,
add_summaries=add_summaries)
self._unroll_length = unroll_length
@property
def unroll_length(self):
return self._unroll_length
@unroll_length.setter
def unroll_length(self, unroll_length):
self._unroll_length = unroll_length
def predict(self, preprocessed_inputs, true_image_shapes, states=None,
state_name='lstm_state', feature_scope=None):
with tf.variable_scope(self._extract_features_scope,
values=[preprocessed_inputs], reuse=tf.AUTO_REUSE):
feature_maps = self._feature_extractor.extract_features(
preprocessed_inputs, states, state_name,
unroll_length=self._unroll_length, scope=feature_scope)
feature_map_spatial_dims = self._get_feature_map_spatial_dims(feature_maps)
image_shape = shape_utils.combined_static_and_dynamic_shape(
preprocessed_inputs)
self._batch_size = preprocessed_inputs.shape[0].value / self._unroll_length
self._states = states
anchors = self._anchor_generator.generate(feature_map_spatial_dims,
im_height=image_shape[1],
im_width=image_shape[2])
with tf.variable_scope('MultipleGridAnchorGenerator', reuse=tf.AUTO_REUSE):
self._anchors = box_list_ops.concatenate(anchors)
prediction_dict = self._box_predictor.predict(
feature_maps, self._anchor_generator.num_anchors_per_location())
with tf.variable_scope('Loss', reuse=tf.AUTO_REUSE):
box_encodings = tf.concat(prediction_dict['box_encodings'], axis=1)
if box_encodings.shape.ndims == 4 and box_encodings.shape[2] == 1:
box_encodings = tf.squeeze(box_encodings, axis=2)
class_predictions_with_background = tf.concat(
prediction_dict['class_predictions_with_background'], axis=1)
predictions_dict = {
'preprocessed_inputs': preprocessed_inputs,
'box_encodings': box_encodings,
'class_predictions_with_background': class_predictions_with_background,
'feature_maps': feature_maps,
'anchors': self._anchors.get(),
'states_and_outputs': self._feature_extractor.states_and_outputs,
}
# In cases such as exporting the model, the states is always zero. Thus the
# step should be ignored.
if states is not None:
predictions_dict['step'] = self._feature_extractor.step
return predictions_dict
def loss(self, prediction_dict, true_image_shapes, scope=None):
"""Computes scalar loss tensors with respect to provided groundtruth.
Calling this function requires that groundtruth tensors have been
provided via the provide_groundtruth function.
Args:
prediction_dict: a dictionary holding prediction tensors with
1) box_encodings: 3-D float tensor of shape [batch_size, num_anchors,
box_code_dimension] containing predicted boxes.
2) class_predictions_with_background: 3-D float tensor of shape
[batch_size, num_anchors, num_classes+1] containing class predictions
(logits) for each of the anchors. Note that this tensor *includes*
background class predictions.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
scope: Optional scope name.
Returns:
a dictionary mapping loss keys (`localization_loss` and
`classification_loss`) to scalar tensors representing corresponding loss
values.
"""
with tf.name_scope(scope, 'Loss', prediction_dict.values()):
keypoints = None
if self.groundtruth_has_field(fields.BoxListFields.keypoints):
keypoints = self.groundtruth_lists(fields.BoxListFields.keypoints)
weights = None
if self.groundtruth_has_field(fields.BoxListFields.weights):
weights = self.groundtruth_lists(fields.BoxListFields.weights)
(batch_cls_targets, batch_cls_weights, batch_reg_targets,
batch_reg_weights, batch_match) = self._assign_targets(
self.groundtruth_lists(fields.BoxListFields.boxes),
self.groundtruth_lists(fields.BoxListFields.classes),
keypoints, weights)
match_list = [matcher.Match(match) for match in tf.unstack(batch_match)]
if self._add_summaries:
self._summarize_target_assignment(
self.groundtruth_lists(fields.BoxListFields.boxes), match_list)
location_losses = self._localization_loss(
prediction_dict['box_encodings'],
batch_reg_targets,
ignore_nan_targets=True,
weights=batch_reg_weights)
cls_losses = ops.reduce_sum_trailing_dimensions(
self._classification_loss(
prediction_dict['class_predictions_with_background'],
batch_cls_targets,
weights=batch_cls_weights),
ndims=2)
if self._hard_example_miner:
(loc_loss_list, cls_loss_list) = self._apply_hard_mining(
location_losses, cls_losses, prediction_dict, match_list)
localization_loss = tf.reduce_sum(tf.stack(loc_loss_list))
classification_loss = tf.reduce_sum(tf.stack(cls_loss_list))
if self._add_summaries:
self._hard_example_miner.summarize()
else:
if self._add_summaries:
class_ids = tf.argmax(batch_cls_targets, axis=2)
flattened_class_ids = tf.reshape(class_ids, [-1])
flattened_classification_losses = tf.reshape(cls_losses, [-1])
self._summarize_anchor_classification_loss(
flattened_class_ids, flattened_classification_losses)
localization_loss = tf.reduce_sum(location_losses)
classification_loss = tf.reduce_sum(cls_losses)
# Optionally normalize by number of positive matches
normalizer = tf.constant(1.0, dtype=tf.float32)
if self._normalize_loss_by_num_matches:
normalizer = tf.maximum(tf.to_float(tf.reduce_sum(batch_reg_weights)),
1.0)
with tf.name_scope('localization_loss'):
localization_loss_normalizer = normalizer
if self._normalize_loc_loss_by_codesize:
localization_loss_normalizer *= self._box_coder.code_size
localization_loss = ((self._localization_loss_weight / (
localization_loss_normalizer)) * localization_loss)
with tf.name_scope('classification_loss'):
classification_loss = ((self._classification_loss_weight / normalizer) *
classification_loss)
loss_dict = {
'localization_loss': localization_loss,
'classification_loss': classification_loss
}
return loss_dict
def restore_map(self, fine_tune_checkpoint_type='lstm'):
"""Returns a map of variables to load from a foreign checkpoint.
See parent class for details.
Args:
fine_tune_checkpoint_type: the type of checkpoint to restore from, either
SSD/LSTM detection checkpoint (with compatible variable names)
classification checkpoint for initialization prior to training.
Available options: `classification`, `detection`, `interleaved`,
and `lstm`.
Returns:
A dict mapping variable names (to load from a checkpoint) to variables in
the model graph.
Raises:
ValueError: if fine_tune_checkpoint_type is not among
`classification`/`detection`/`interleaved`/`lstm`.
"""
if fine_tune_checkpoint_type not in [
'classification', 'detection', 'interleaved', 'lstm',
'interleaved_pretrain'
]:
raise ValueError('Not supported fine_tune_checkpoint_type: {}'.format(
fine_tune_checkpoint_type))
self._restored_networks += 1
base_network_scope = self.get_base_network_scope()
if base_network_scope:
scope_to_replace = '{0}_{1}'.format(base_network_scope,
self._restored_networks)
interleaved_model = False
for variable in tf.global_variables():
if scope_to_replace in variable.op.name:
interleaved_model = True
break
variables_to_restore = {}
for variable in tf.global_variables():
var_name = variable.op.name
if 'global_step' in var_name:
continue
# Remove FeatureExtractor prefix for classification checkpoints.
if (fine_tune_checkpoint_type == 'classification' or
fine_tune_checkpoint_type == 'interleaved_pretrain'):
var_name = (
re.split('^' + self._extract_features_scope + '/', var_name)[-1])
# When loading from single frame detection checkpoints, we need to
# remap FeatureMaps variable names.
if ('FeatureMaps' in var_name and
fine_tune_checkpoint_type == 'detection'):
var_name = var_name.replace('FeatureMaps',
self.get_base_network_scope())
# Load interleaved checkpoint specifically.
if interleaved_model: # Interleaved LSTD.
if 'interleaved' in fine_tune_checkpoint_type:
variables_to_restore[var_name] = variable
else:
# Restore non-base layers from the first checkpoint only.
if self._restored_networks == 1:
if base_network_scope + '_' not in var_name: # LSTM and FeatureMap
variables_to_restore[var_name] = variable
if scope_to_replace in var_name:
var_name = var_name.replace(scope_to_replace, base_network_scope)
variables_to_restore[var_name] = variable
else:
# Restore from the first model of interleaved checkpoints
if 'interleaved' in fine_tune_checkpoint_type:
var_name = var_name.replace(self.get_base_network_scope(),
self.get_base_network_scope() + '_1', 1)
variables_to_restore[var_name] = variable
return variables_to_restore
def get_base_network_scope(self):
"""Returns the variable scope of the base network.
Returns:
The variable scope of the feature extractor base network, e.g. MobilenetV1
"""
return self._feature_extractor.get_base_network_scope()
class LSTMSSDFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""LSTM SSD Meta-architecture Feature Extractor definition."""
__metaclass__ = abc.ABCMeta
@property
def clip_state(self):
return self._clip_state
@clip_state.setter
def clip_state(self, clip_state):
self._clip_state = clip_state
@property
def depth_multipliers(self):
return self._depth_multipliers
@depth_multipliers.setter
def depth_multipliers(self, depth_multipliers):
self._depth_multipliers = depth_multipliers
@property
def lstm_state_depth(self):
return self._lstm_state_depth
@lstm_state_depth.setter
def lstm_state_depth(self, lstm_state_depth):
self._lstm_state_depth = lstm_state_depth
@property
def is_quantized(self):
return self._is_quantized
@is_quantized.setter
def is_quantized(self, is_quantized):
self._is_quantized = is_quantized
@property
def interleaved(self):
return False
@property
def states_and_outputs(self):
"""LSTM states and outputs.
This variable includes both LSTM states {C_t} and outputs {h_t}.
Returns:
states_and_outputs: A list of 4-D float tensors, including the lstm state
and output at each timestep.
"""
return self._states_out
@property
def step(self):
return self._step
def preprocess(self, resized_inputs):
"""SSD preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def get_base_network_scope(self):
"""Returns the variable scope of the base network.
Returns:
The variable scope of the base network, e.g. MobilenetV1
"""
return self._base_network_scope
@abc.abstractmethod
def create_lstm_cell(self, batch_size, output_size, state_saver, state_name):
"""Create the LSTM cell, and initialize state if necessary.
Args:
batch_size: input batch size.
output_size: output size of the lstm cell, [width, height].
state_saver: a state saver object with methods `state` and `save_state`.
state_name: string, the name to use with the state_saver.
Returns:
lstm_cell: the lstm cell unit.
init_state: initial state representations.
step: the step
"""
pass
class LSTMSSDInterleavedFeatureExtractor(LSTMSSDFeatureExtractor):
"""LSTM SSD Meta-architecture Interleaved Feature Extractor definition."""
__metaclass__ = abc.ABCMeta
@property
def pre_bottleneck(self):
return self._pre_bottleneck
@pre_bottleneck.setter
def pre_bottleneck(self, pre_bottleneck):
self._pre_bottleneck = pre_bottleneck
@property
def low_res(self):
return self._low_res
@low_res.setter
def low_res(self, low_res):
self._low_res = low_res
@property
def interleaved(self):
return True
@property
def interleave_method(self):
return self._interleave_method
@interleave_method.setter
def interleave_method(self, interleave_method):
self._interleave_method = interleave_method
@abc.abstractmethod
def extract_base_features_large(self, preprocessed_inputs):
"""Extract the large base model features.
Args:
preprocessed_inputs: preprocessed input images of shape:
[batch, width, height, depth].
Returns:
net: the last feature map created from the base feature extractor.
end_points: a dictionary of feature maps created.
"""
pass
@abc.abstractmethod
def extract_base_features_small(self, preprocessed_inputs):
"""Extract the small base model features.
Args:
preprocessed_inputs: preprocessed input images of shape:
[batch, width, height, depth].
Returns:
net: the last feature map created from the base feature extractor.
end_points: a dictionary of feature maps created.
"""
pass
| 17,317 | 36.323276 | 80 | py |
models | models-master/research/lstm_object_detection/meta_architectures/lstm_ssd_meta_arch_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for meta_architectures.lstm_ssd_meta_arch."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
import tensorflow.compat.v1 as tf
import tf_slim as slim
from lstm_object_detection.lstm import lstm_cells
from lstm_object_detection.meta_architectures import lstm_ssd_meta_arch
from object_detection.core import anchor_generator
from object_detection.core import box_list
from object_detection.core import losses
from object_detection.core import post_processing
from object_detection.core import region_similarity_calculator as sim_calc
from object_detection.core import standard_fields as fields
from object_detection.core import target_assigner
from object_detection.models import feature_map_generators
from object_detection.utils import test_case
from object_detection.utils import test_utils
MAX_TOTAL_NUM_BOXES = 5
NUM_CLASSES = 1
class FakeLSTMFeatureExtractor(
lstm_ssd_meta_arch.LSTMSSDFeatureExtractor):
def __init__(self):
super(FakeLSTMFeatureExtractor, self).__init__(
is_training=True,
depth_multiplier=1.0,
min_depth=0,
pad_to_multiple=1,
conv_hyperparams_fn=self.scope_fn)
self._lstm_state_depth = 256
def scope_fn(self):
with slim.arg_scope([slim.conv2d], activation_fn=tf.nn.relu6) as sc:
return sc
def create_lstm_cell(self):
pass
def extract_features(self, preprocessed_inputs, state_saver=None,
state_name='lstm_state', unroll_length=5, scope=None):
with tf.variable_scope('mock_model'):
net = slim.conv2d(inputs=preprocessed_inputs, num_outputs=32,
kernel_size=1, scope='layer1')
image_features = {'last_layer': net}
self._states_out = {}
feature_map_layout = {
'from_layer': ['last_layer'],
'layer_depth': [-1],
'use_explicit_padding': self._use_explicit_padding,
'use_depthwise': self._use_depthwise,
}
feature_maps = feature_map_generators.multi_resolution_feature_maps(
feature_map_layout=feature_map_layout,
depth_multiplier=(self._depth_multiplier),
min_depth=self._min_depth,
insert_1x1_conv=True,
image_features=image_features)
return list(feature_maps.values())
class FakeLSTMInterleavedFeatureExtractor(
lstm_ssd_meta_arch.LSTMSSDInterleavedFeatureExtractor):
def __init__(self):
super(FakeLSTMInterleavedFeatureExtractor, self).__init__(
is_training=True,
depth_multiplier=1.0,
min_depth=0,
pad_to_multiple=1,
conv_hyperparams_fn=self.scope_fn)
self._lstm_state_depth = 256
def scope_fn(self):
with slim.arg_scope([slim.conv2d], activation_fn=tf.nn.relu6) as sc:
return sc
def create_lstm_cell(self):
pass
def extract_base_features_large(self, preprocessed_inputs):
with tf.variable_scope('base_large'):
net = slim.conv2d(inputs=preprocessed_inputs, num_outputs=32,
kernel_size=1, scope='layer1')
return net
def extract_base_features_small(self, preprocessed_inputs):
with tf.variable_scope('base_small'):
net = slim.conv2d(inputs=preprocessed_inputs, num_outputs=32,
kernel_size=1, scope='layer1')
return net
def extract_features(self, preprocessed_inputs, state_saver=None,
state_name='lstm_state', unroll_length=5, scope=None):
with tf.variable_scope('mock_model'):
net_large = self.extract_base_features_large(preprocessed_inputs)
net_small = self.extract_base_features_small(preprocessed_inputs)
net = slim.conv2d(
inputs=tf.concat([net_large, net_small], axis=3),
num_outputs=32,
kernel_size=1,
scope='layer1')
image_features = {'last_layer': net}
self._states_out = {}
feature_map_layout = {
'from_layer': ['last_layer'],
'layer_depth': [-1],
'use_explicit_padding': self._use_explicit_padding,
'use_depthwise': self._use_depthwise,
}
feature_maps = feature_map_generators.multi_resolution_feature_maps(
feature_map_layout=feature_map_layout,
depth_multiplier=(self._depth_multiplier),
min_depth=self._min_depth,
insert_1x1_conv=True,
image_features=image_features)
return list(feature_maps.values())
class MockAnchorGenerator2x2(anchor_generator.AnchorGenerator):
"""Sets up a simple 2x2 anchor grid on the unit square."""
def name_scope(self):
return 'MockAnchorGenerator'
def num_anchors_per_location(self):
return [1]
def _generate(self, feature_map_shape_list, im_height, im_width):
return [box_list.BoxList(
tf.constant([[0, 0, .5, .5],
[0, .5, .5, 1],
[.5, 0, 1, .5],
[1., 1., 1.5, 1.5] # Anchor that is outside clip_window.
], tf.float32))]
def num_anchors(self):
return 4
class LSTMSSDMetaArchTest(test_case.TestCase):
def _create_model(self,
interleaved=False,
apply_hard_mining=True,
normalize_loc_loss_by_codesize=False,
add_background_class=True,
random_example_sampling=False,
use_expected_classification_loss_under_sampling=False,
min_num_negative_samples=1,
desired_negative_sampling_ratio=3,
unroll_length=1):
num_classes = NUM_CLASSES
is_training = False
mock_anchor_generator = MockAnchorGenerator2x2()
mock_box_predictor = test_utils.MockBoxPredictor(is_training, num_classes)
mock_box_coder = test_utils.MockBoxCoder()
if interleaved:
fake_feature_extractor = FakeLSTMInterleavedFeatureExtractor()
else:
fake_feature_extractor = FakeLSTMFeatureExtractor()
mock_matcher = test_utils.MockMatcher()
region_similarity_calculator = sim_calc.IouSimilarity()
encode_background_as_zeros = False
def image_resizer_fn(image):
return [tf.identity(image), tf.shape(image)]
classification_loss = losses.WeightedSigmoidClassificationLoss()
localization_loss = losses.WeightedSmoothL1LocalizationLoss()
non_max_suppression_fn = functools.partial(
post_processing.batch_multiclass_non_max_suppression,
score_thresh=-20.0,
iou_thresh=1.0,
max_size_per_class=5,
max_total_size=MAX_TOTAL_NUM_BOXES)
classification_loss_weight = 1.0
localization_loss_weight = 1.0
negative_class_weight = 1.0
normalize_loss_by_num_matches = False
hard_example_miner = None
if apply_hard_mining:
# This hard example miner is expected to be a no-op.
hard_example_miner = losses.HardExampleMiner(
num_hard_examples=None,
iou_threshold=1.0)
target_assigner_instance = target_assigner.TargetAssigner(
region_similarity_calculator,
mock_matcher,
mock_box_coder,
negative_class_weight=negative_class_weight)
code_size = 4
model = lstm_ssd_meta_arch.LSTMSSDMetaArch(
is_training=is_training,
anchor_generator=mock_anchor_generator,
box_predictor=mock_box_predictor,
box_coder=mock_box_coder,
feature_extractor=fake_feature_extractor,
encode_background_as_zeros=encode_background_as_zeros,
image_resizer_fn=image_resizer_fn,
non_max_suppression_fn=non_max_suppression_fn,
score_conversion_fn=tf.identity,
classification_loss=classification_loss,
localization_loss=localization_loss,
classification_loss_weight=classification_loss_weight,
localization_loss_weight=localization_loss_weight,
normalize_loss_by_num_matches=normalize_loss_by_num_matches,
hard_example_miner=hard_example_miner,
unroll_length=unroll_length,
target_assigner_instance=target_assigner_instance,
add_summaries=False)
return model, num_classes, mock_anchor_generator.num_anchors(), code_size
def _get_value_for_matching_key(self, dictionary, suffix):
for key in dictionary.keys():
if key.endswith(suffix):
return dictionary[key]
raise ValueError('key not found {}'.format(suffix))
def test_predict_returns_correct_items_and_sizes(self):
batch_size = 3
height = width = 2
num_unroll = 1
graph = tf.Graph()
with graph.as_default():
model, num_classes, num_anchors, code_size = self._create_model()
preprocessed_images = tf.random_uniform(
[batch_size * num_unroll, height, width, 3],
minval=-1.,
maxval=1.)
true_image_shapes = tf.tile(
[[height, width, 3]], [batch_size, 1])
prediction_dict = model.predict(preprocessed_images, true_image_shapes)
self.assertIn('preprocessed_inputs', prediction_dict)
self.assertIn('box_encodings', prediction_dict)
self.assertIn('class_predictions_with_background', prediction_dict)
self.assertIn('feature_maps', prediction_dict)
self.assertIn('anchors', prediction_dict)
self.assertAllEqual(
[batch_size * num_unroll, height, width, 3],
prediction_dict['preprocessed_inputs'].shape.as_list())
self.assertAllEqual(
[batch_size * num_unroll, num_anchors, code_size],
prediction_dict['box_encodings'].shape.as_list())
self.assertAllEqual(
[batch_size * num_unroll, num_anchors, num_classes + 1],
prediction_dict['class_predictions_with_background'].shape.as_list())
self.assertAllEqual(
[num_anchors, code_size],
prediction_dict['anchors'].shape.as_list())
def test_interleaved_predict_returns_correct_items_and_sizes(self):
batch_size = 3
height = width = 2
num_unroll = 1
graph = tf.Graph()
with graph.as_default():
model, num_classes, num_anchors, code_size = self._create_model(
interleaved=True)
preprocessed_images = tf.random_uniform(
[batch_size * num_unroll, height, width, 3],
minval=-1.,
maxval=1.)
true_image_shapes = tf.tile(
[[height, width, 3]], [batch_size, 1])
prediction_dict = model.predict(preprocessed_images, true_image_shapes)
self.assertIn('preprocessed_inputs', prediction_dict)
self.assertIn('box_encodings', prediction_dict)
self.assertIn('class_predictions_with_background', prediction_dict)
self.assertIn('feature_maps', prediction_dict)
self.assertIn('anchors', prediction_dict)
self.assertAllEqual(
[batch_size * num_unroll, height, width, 3],
prediction_dict['preprocessed_inputs'].shape.as_list())
self.assertAllEqual(
[batch_size * num_unroll, num_anchors, code_size],
prediction_dict['box_encodings'].shape.as_list())
self.assertAllEqual(
[batch_size * num_unroll, num_anchors, num_classes + 1],
prediction_dict['class_predictions_with_background'].shape.as_list())
self.assertAllEqual(
[num_anchors, code_size],
prediction_dict['anchors'].shape.as_list())
if __name__ == '__main__':
tf.test.main()
| 11,991 | 36.358255 | 80 | py |
models | models-master/research/lstm_object_detection/metrics/coco_evaluation_all_frames_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for video_object_detection.metrics.coco_video_evaluation."""
import numpy as np
import tensorflow.compat.v1 as tf
from lstm_object_detection.metrics import coco_evaluation_all_frames
from object_detection.core import standard_fields
class CocoEvaluationAllFramesTest(tf.test.TestCase):
def testGroundtruthAndDetectionsDisagreeOnAllFrames(self):
"""Tests that mAP is calculated on several different frame results."""
category_list = [{'id': 0, 'name': 'dog'}, {'id': 1, 'name': 'cat'}]
video_evaluator = coco_evaluation_all_frames.CocoEvaluationAllFrames(
category_list)
video_evaluator.add_single_ground_truth_image_info(
image_id='image1',
groundtruth_dict=[{
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[50., 50., 200., 200.]]),
standard_fields.InputDataFields.groundtruth_classes:
np.array([1])
}, {
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[50., 50., 100., 100.]]),
standard_fields.InputDataFields.groundtruth_classes:
np.array([1])
}])
video_evaluator.add_single_detected_image_info(
image_id='image1',
# A different groundtruth box on the frame other than the last one.
detections_dict=[{
standard_fields.DetectionResultFields.detection_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1])
}, {
standard_fields.DetectionResultFields.detection_boxes:
np.array([[50., 50., 100., 100.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1])
}])
metrics = video_evaluator.evaluate()
self.assertNotEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
def testGroundtruthAndDetections(self):
"""Tests that mAP is calculated correctly on GT and Detections."""
category_list = [{'id': 0, 'name': 'dog'}, {'id': 1, 'name': 'cat'}]
video_evaluator = coco_evaluation_all_frames.CocoEvaluationAllFrames(
category_list)
video_evaluator.add_single_ground_truth_image_info(
image_id='image1',
groundtruth_dict=[{
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.InputDataFields.groundtruth_classes:
np.array([1])
}])
video_evaluator.add_single_ground_truth_image_info(
image_id='image2',
groundtruth_dict=[{
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[50., 50., 100., 100.]]),
standard_fields.InputDataFields.groundtruth_classes:
np.array([1])
}])
video_evaluator.add_single_ground_truth_image_info(
image_id='image3',
groundtruth_dict=[{
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[50., 100., 100., 120.]]),
standard_fields.InputDataFields.groundtruth_classes:
np.array([1])
}])
video_evaluator.add_single_detected_image_info(
image_id='image1',
detections_dict=[{
standard_fields.DetectionResultFields.detection_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1])
}])
video_evaluator.add_single_detected_image_info(
image_id='image2',
detections_dict=[{
standard_fields.DetectionResultFields.detection_boxes:
np.array([[50., 50., 100., 100.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1])
}])
video_evaluator.add_single_detected_image_info(
image_id='image3',
detections_dict=[{
standard_fields.DetectionResultFields.detection_boxes:
np.array([[50., 100., 100., 120.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1])
}])
metrics = video_evaluator.evaluate()
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
def testMissingDetectionResults(self):
"""Tests if groundtrue is missing, raises ValueError."""
category_list = [{'id': 0, 'name': 'dog'}]
video_evaluator = coco_evaluation_all_frames.CocoEvaluationAllFrames(
category_list)
video_evaluator.add_single_ground_truth_image_info(
image_id='image1',
groundtruth_dict=[{
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.InputDataFields.groundtruth_classes:
np.array([1])
}])
with self.assertRaisesRegexp(ValueError,
r'Missing groundtruth for image-frame id:.*'):
video_evaluator.add_single_detected_image_info(
image_id='image3',
detections_dict=[{
standard_fields.DetectionResultFields.detection_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1])
}])
if __name__ == '__main__':
tf.test.main()
| 6,730 | 41.872611 | 80 | py |
models | models-master/research/lstm_object_detection/metrics/coco_evaluation_all_frames.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class for evaluating video object detections with COCO metrics."""
import tensorflow.compat.v1 as tf
from object_detection.core import standard_fields
from object_detection.metrics import coco_evaluation
from object_detection.metrics import coco_tools
class CocoEvaluationAllFrames(coco_evaluation.CocoDetectionEvaluator):
"""Class to evaluate COCO detection metrics for frame sequences.
The class overrides two functions: add_single_ground_truth_image_info and
add_single_detected_image_info.
For the evaluation of sequence video detection, by iterating through the
entire groundtruth_dict, all the frames in the unrolled frames in one LSTM
training sample are considered. Therefore, both groundtruth and detection
results of all frames are added for the evaluation. This is used when all the
frames are labeled in the video object detection training job.
"""
def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
"""Add groundtruth results of all frames to the eval pipeline.
This method overrides the function defined in the base class.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A list of dictionary containing -
InputDataFields.groundtruth_boxes: float32 numpy array of shape
[num_boxes, 4] containing `num_boxes` groundtruth boxes of the format
[ymin, xmin, ymax, xmax] in absolute image coordinates.
InputDataFields.groundtruth_classes: integer numpy array of shape
[num_boxes] containing 1-indexed groundtruth classes for the boxes.
InputDataFields.groundtruth_is_crowd (optional): integer numpy array of
shape [num_boxes] containing iscrowd flag for groundtruth boxes.
"""
for idx, gt in enumerate(groundtruth_dict):
if not gt:
continue
image_frame_id = '{}_{}'.format(image_id, idx)
if image_frame_id in self._image_ids:
tf.logging.warning(
'Ignoring ground truth with image id %s since it was '
'previously added', image_frame_id)
continue
self._groundtruth_list.extend(
coco_tools.ExportSingleImageGroundtruthToCoco(
image_id=image_frame_id,
next_annotation_id=self._annotation_id,
category_id_set=self._category_id_set,
groundtruth_boxes=gt[
standard_fields.InputDataFields.groundtruth_boxes],
groundtruth_classes=gt[
standard_fields.InputDataFields.groundtruth_classes]))
self._annotation_id += (
gt[standard_fields.InputDataFields.groundtruth_boxes].shape[0])
# Boolean to indicate whether a detection has been added for this image.
self._image_ids[image_frame_id] = False
def add_single_detected_image_info(self, image_id, detections_dict):
"""Add detection results of all frames to the eval pipeline.
This method overrides the function defined in the base class.
Args:
image_id: A unique string/integer identifier for the image.
detections_dict: A list of dictionary containing -
DetectionResultFields.detection_boxes: float32 numpy array of shape
[num_boxes, 4] containing `num_boxes` detection boxes of the format
[ymin, xmin, ymax, xmax] in absolute image coordinates.
DetectionResultFields.detection_scores: float32 numpy array of shape
[num_boxes] containing detection scores for the boxes.
DetectionResultFields.detection_classes: integer numpy array of shape
[num_boxes] containing 1-indexed detection classes for the boxes.
Raises:
ValueError: If groundtruth for the image_id is not available.
"""
for idx, det in enumerate(detections_dict):
if not det:
continue
image_frame_id = '{}_{}'.format(image_id, idx)
if image_frame_id not in self._image_ids:
raise ValueError(
'Missing groundtruth for image-frame id: {}'.format(image_frame_id))
if self._image_ids[image_frame_id]:
tf.logging.warning(
'Ignoring detection with image id %s since it was '
'previously added', image_frame_id)
continue
self._detection_boxes_list.extend(
coco_tools.ExportSingleImageDetectionBoxesToCoco(
image_id=image_frame_id,
category_id_set=self._category_id_set,
detection_boxes=det[
standard_fields.DetectionResultFields.detection_boxes],
detection_scores=det[
standard_fields.DetectionResultFields.detection_scores],
detection_classes=det[
standard_fields.DetectionResultFields.detection_classes]))
self._image_ids[image_frame_id] = True
| 5,494 | 42.96 | 80 | py |
models | models-master/research/lstm_object_detection/metrics/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/lstm_object_detection/protos/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/lstm_object_detection/lstm/rnn_decoder.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom RNN decoder."""
import tensorflow.compat.v1 as tf
import lstm_object_detection.lstm.utils as lstm_utils
class _NoVariableScope(object):
def __enter__(self):
return
def __exit__(self, exc_type, exc_value, traceback):
return False
def rnn_decoder(decoder_inputs,
initial_state,
cell,
loop_function=None,
scope=None):
"""RNN decoder for the LSTM-SSD model.
This decoder returns a list of all states, rather than only the final state.
Args:
decoder_inputs: A list of 4D Tensors with shape [batch_size x input_size].
initial_state: 2D Tensor with shape [batch_size x cell.state_size].
cell: rnn_cell.RNNCell defining the cell function and size.
loop_function: If not None, this function will be applied to the i-th output
in order to generate the i+1-st input, and decoder_inputs will be ignored,
except for the first element ("GO" symbol). This can be used for decoding,
but also for training to emulate http://arxiv.org/abs/1506.03099.
Signature -- loop_function(prev, i) = next
* prev is a 2D Tensor of shape [batch_size x output_size],
* i is an integer, the step number (when advanced control is needed),
* next is a 2D Tensor of shape [batch_size x input_size].
scope: optional VariableScope for the created subgraph.
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 4D Tensors with
shape [batch_size x output_size] containing generated outputs.
states: A list of the same length as decoder_inputs of the state of each
cell at each time-step. It is a 2D Tensor of shape
[batch_size x cell.state_size].
"""
with tf.variable_scope(scope) if scope else _NoVariableScope():
state_tuple = initial_state
outputs = []
states = []
prev = None
for local_step, decoder_input in enumerate(decoder_inputs):
if loop_function is not None and prev is not None:
with tf.variable_scope('loop_function', reuse=True):
decoder_input = loop_function(prev, local_step)
output, state_tuple = cell(decoder_input, state_tuple)
outputs.append(output)
states.append(state_tuple)
if loop_function is not None:
prev = output
return outputs, states
def multi_input_rnn_decoder(decoder_inputs,
initial_state,
cell,
sequence_step,
selection_strategy='RANDOM',
is_training=None,
is_quantized=False,
preprocess_fn_list=None,
pre_bottleneck=False,
flatten_state=False,
scope=None):
"""RNN decoder for the Interleaved LSTM-SSD model.
This decoder takes multiple sequences of inputs and selects the input to feed
to the rnn at each timestep using its selection_strategy, which can be random,
learned, or deterministic.
This decoder returns a list of all states, rather than only the final state.
Args:
decoder_inputs: A list of lists of 2D Tensors [batch_size x input_size].
initial_state: 2D Tensor with shape [batch_size x cell.state_size].
cell: rnn_cell.RNNCell defining the cell function and size.
sequence_step: Tensor [batch_size] of the step number of the first elements
in the sequence.
selection_strategy: Method for picking the decoder_input to use at each
timestep. Must be 'RANDOM', 'SKIPX' for integer X, where X is the number
of times to use the second input before using the first.
is_training: boolean, whether the network is training. When using learned
selection, attempts exploration if training.
is_quantized: flag to enable/disable quantization mode.
preprocess_fn_list: List of functions accepting two tensor arguments: one
timestep of decoder_inputs and the lstm state. If not None,
decoder_inputs[i] will be updated with preprocess_fn[i] at the start of
each timestep.
pre_bottleneck: if True, use separate bottleneck weights for each sequence.
Useful when input sequences have differing numbers of channels. Final
bottlenecks will have the same dimension.
flatten_state: Whether the LSTM state is flattened.
scope: optional VariableScope for the created subgraph.
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x output_size] containing generated outputs.
states: A list of the same length as decoder_inputs of the state of each
cell at each time-step. It is a 2D Tensor of shape
[batch_size x cell.state_size].
Raises:
ValueError: If selection_strategy is not recognized or unexpected unroll
length.
"""
if flatten_state and len(decoder_inputs[0]) > 1:
raise ValueError('In export mode, unroll length should not be more than 1')
with tf.variable_scope(scope) if scope else _NoVariableScope():
state_tuple = initial_state
outputs = []
states = []
batch_size = decoder_inputs[0][0].shape[0].value
num_sequences = len(decoder_inputs)
sequence_length = len(decoder_inputs[0])
for local_step in range(sequence_length):
for sequence_index in range(num_sequences):
if preprocess_fn_list is not None:
decoder_inputs[sequence_index][local_step] = (
preprocess_fn_list[sequence_index](
decoder_inputs[sequence_index][local_step], state_tuple[0]))
if pre_bottleneck:
decoder_inputs[sequence_index][local_step] = cell.pre_bottleneck(
inputs=decoder_inputs[sequence_index][local_step],
state=state_tuple[1],
input_index=sequence_index)
action = generate_action(selection_strategy, local_step, sequence_step,
[batch_size, 1, 1, 1])
inputs, _ = (
select_inputs(decoder_inputs, action, local_step, is_training,
is_quantized))
# Mark base network endpoints under raw_inputs/
with tf.name_scope(None):
inputs = tf.identity(inputs, 'raw_inputs/base_endpoint')
output, state_tuple_out = cell(inputs, state_tuple)
state_tuple = select_state(state_tuple, state_tuple_out, action)
outputs.append(output)
states.append(state_tuple)
return outputs, states
def generate_action(selection_strategy, local_step, sequence_step,
action_shape):
"""Generate current (binary) action based on selection strategy.
Args:
selection_strategy: Method for picking the decoder_input to use at each
timestep. Must be 'RANDOM', 'SKIPX' for integer X, where X is the number
of times to use the second input before using the first.
local_step: Tensor [batch_size] of the step number within the current
unrolled batch.
sequence_step: Tensor [batch_size] of the step number of the first elements
in the sequence.
action_shape: The shape of action tensor to be generated.
Returns:
A tensor of shape action_shape, each element is an individual action.
Raises:
ValueError: if selection_strategy is not supported or if 'SKIP' is not
followed by numerics.
"""
if selection_strategy.startswith('RANDOM'):
action = tf.random.uniform(action_shape, maxval=2, dtype=tf.int32)
action = tf.minimum(action, 1)
# First step always runs large network.
if local_step == 0 and sequence_step is not None:
action *= tf.minimum(
tf.reshape(tf.cast(sequence_step, tf.int32), action_shape), 1)
elif selection_strategy.startswith('SKIP'):
inter_count = int(selection_strategy[4:])
if local_step % (inter_count + 1) == 0:
action = tf.zeros(action_shape)
else:
action = tf.ones(action_shape)
else:
raise ValueError('Selection strategy %s not recognized' %
selection_strategy)
return tf.cast(action, tf.int32)
def select_inputs(decoder_inputs, action, local_step, is_training, is_quantized,
get_alt_inputs=False):
"""Selects sequence from decoder_inputs based on 1D actions.
Given multiple input batches, creates a single output batch by
selecting from the action[i]-ith input for the i-th batch element.
Args:
decoder_inputs: A 2-D list of tensor inputs.
action: A tensor of shape [batch_size]. Each element corresponds to an index
of decoder_inputs to choose.
local_step: The current timestep.
is_training: boolean, whether the network is training. When using learned
selection, attempts exploration if training.
is_quantized: flag to enable/disable quantization mode.
get_alt_inputs: Whether the non-chosen inputs should also be returned.
Returns:
The constructed output. Also outputs the elements that were not chosen
if get_alt_inputs is True, otherwise None.
Raises:
ValueError: if the decoder inputs contains other than two sequences.
"""
num_seqs = len(decoder_inputs)
if not num_seqs == 2:
raise ValueError('Currently only supports two sets of inputs.')
stacked_inputs = tf.stack(
[decoder_inputs[seq_index][local_step] for seq_index in range(num_seqs)],
axis=-1)
action_index = tf.one_hot(action, num_seqs)
selected_inputs = (
lstm_utils.quantize_op(stacked_inputs * action_index, is_training,
is_quantized, scope='quant_selected_inputs'))
inputs = tf.reduce_sum(selected_inputs, axis=-1)
inputs_alt = None
# Only works for 2 models.
if get_alt_inputs:
# Reverse of action_index.
action_index_alt = tf.one_hot(action, num_seqs, on_value=0.0, off_value=1.0)
selected_inputs = (
lstm_utils.quantize_op(stacked_inputs * action_index_alt, is_training,
is_quantized, scope='quant_selected_inputs_alt'))
inputs_alt = tf.reduce_sum(selected_inputs, axis=-1)
return inputs, inputs_alt
def select_state(previous_state, new_state, action):
"""Select state given action.
Currently only supports binary action. If action is 0, it means the state is
generated from the large model, and thus we will update the state. Otherwise,
if the action is 1, it means the state is generated from the small model, and
in interleaved model, we skip this state update.
Args:
previous_state: A state tuple representing state from previous step.
new_state: A state tuple representing newly computed state.
action: A tensor the same shape as state.
Returns:
A state tuple selected based on the given action.
"""
action = tf.cast(action, tf.float32)
state_c = previous_state[0] * action + new_state[0] * (1 - action)
state_h = previous_state[1] * action + new_state[1] * (1 - action)
return (state_c, state_h)
| 11,709 | 42.37037 | 80 | py |
models | models-master/research/lstm_object_detection/lstm/utils.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Quantization related ops for LSTM."""
from __future__ import absolute_import
from __future__ import division
import tensorflow.compat.v1 as tf
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers as contrib_layers
from tensorflow.python.training import moving_averages
def _quant_var(
name,
initializer_val,
vars_collection=tf.GraphKeys.MOVING_AVERAGE_VARIABLES,
):
"""Create an var for storing the min/max quantization range."""
return contrib_framework.model_variable(
name,
shape=[],
initializer=tf.constant_initializer(initializer_val),
collections=[vars_collection],
trainable=False)
def quantizable_concat(inputs,
axis,
is_training,
is_quantized=True,
default_min=0,
default_max=6,
ema_decay=0.999,
scope='quantized_concat'):
"""Concat replacement with quantization option.
Allows concat inputs to share the same min max ranges,
from experimental/gazelle/synthetic/model/tpu/utils.py.
Args:
inputs: list of tensors to concatenate.
axis: dimension along which to concatenate.
is_training: true if the graph is a training graph.
is_quantized: flag to enable/disable quantization.
default_min: default min value for fake quant op.
default_max: default max value for fake quant op.
ema_decay: the moving average decay for the quantization variables.
scope: Optional scope for variable_scope.
Returns:
Tensor resulting from concatenation of input tensors
"""
if is_quantized:
with tf.variable_scope(scope):
tf.logging.info('inputs: {}'.format(inputs))
for t in inputs:
tf.logging.info(t)
min_var = _quant_var('min', default_min)
max_var = _quant_var('max', default_max)
if not is_training:
# If we are building an eval graph just use the values in the variables.
quant_inputs = [
tf.fake_quant_with_min_max_vars(t, min_var, max_var) for t in inputs
]
tf.logging.info('min_val: {}'.format(min_var))
tf.logging.info('max_val: {}'.format(max_var))
else:
concat_tensors = tf.concat(inputs, axis=axis)
tf.logging.info('concat_tensors: {}'.format(concat_tensors))
# TFLite requires that 0.0 is always in the [min; max] range.
range_min = tf.minimum(
tf.reduce_min(concat_tensors), 0.0, name='SafeQuantRangeMin')
range_max = tf.maximum(
tf.reduce_max(concat_tensors), 0.0, name='SafeQuantRangeMax')
# Otherwise we need to keep track of the moving averages of the min and
# of the elements of the input tensor max.
min_val = moving_averages.assign_moving_average(
min_var,
range_min,
ema_decay,
name='AssignMinEma')
max_val = moving_averages.assign_moving_average(
max_var,
range_max,
ema_decay,
name='AssignMaxEma')
tf.logging.info('min_val: {}'.format(min_val))
tf.logging.info('max_val: {}'.format(max_val))
quant_inputs = [
tf.fake_quant_with_min_max_vars(t, min_val, max_val) for t in inputs
]
tf.logging.info('quant_inputs: {}'.format(quant_inputs))
outputs = tf.concat(quant_inputs, axis=axis)
tf.logging.info('outputs: {}'.format(outputs))
else:
outputs = tf.concat(inputs, axis=axis)
return outputs
def quantizable_separable_conv2d(inputs,
num_outputs,
kernel_size,
is_quantized=True,
depth_multiplier=1,
stride=1,
activation_fn=tf.nn.relu6,
normalizer_fn=None,
weights_initializer=None,
pointwise_initializer=None,
scope=None):
"""Quantization friendly backward compatible separable conv2d.
This op has the same API is separable_conv2d. The main difference is that an
additional BiasAdd is manually inserted after the depthwise conv, such that
the depthwise bias will not have name conflict with pointwise bias. The
motivation of this op is that quantization script need BiasAdd in order to
recognize the op, in which a native call to separable_conv2d do not create
for the depthwise conv.
Args:
inputs: A tensor of size [batch_size, height, width, channels].
num_outputs: The number of pointwise convolution output filters. If is
None, then we skip the pointwise convolution stage.
kernel_size: A list of length 2: [kernel_height, kernel_width] of the
filters. Can be an int if both values are the same.
is_quantized: flag to enable/disable quantization.
depth_multiplier: The number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to num_filters_in * depth_multiplier.
stride: A list of length 2: [stride_height, stride_width], specifying the
depthwise convolution stride. Can be an int if both strides are the same.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of biases.
weights_initializer: An initializer for the depthwise weights.
pointwise_initializer: An initializer for the pointwise weights.
scope: Optional scope for variable_scope.
Returns:
Tensor resulting from concatenation of input tensors
"""
if is_quantized:
outputs = contrib_layers.separable_conv2d(
inputs,
None,
kernel_size,
depth_multiplier=depth_multiplier,
stride=1,
activation_fn=None,
normalizer_fn=None,
biases_initializer=None,
weights_initializer=weights_initializer,
pointwise_initializer=None,
scope=scope)
outputs = contrib_layers.bias_add(
outputs, trainable=True, scope='%s_bias' % scope)
outputs = contrib_layers.conv2d(
outputs,
num_outputs, [1, 1],
activation_fn=activation_fn,
stride=stride,
normalizer_fn=normalizer_fn,
weights_initializer=pointwise_initializer,
scope=scope)
else:
outputs = contrib_layers.separable_conv2d(
inputs,
num_outputs,
kernel_size,
depth_multiplier=depth_multiplier,
stride=stride,
activation_fn=activation_fn,
normalizer_fn=normalizer_fn,
weights_initializer=weights_initializer,
pointwise_initializer=pointwise_initializer,
scope=scope)
return outputs
def quantize_op(inputs,
is_training=True,
is_quantized=True,
default_min=0,
default_max=6,
ema_decay=0.999,
scope='quant'):
"""Inserts a fake quantization op after inputs.
Args:
inputs: A tensor of size [batch_size, height, width, channels].
is_training: true if the graph is a training graph.
is_quantized: flag to enable/disable quantization.
default_min: default min value for fake quant op.
default_max: default max value for fake quant op.
ema_decay: the moving average decay for the quantization variables.
scope: Optional scope for variable_scope.
Returns:
Tensor resulting from quantizing the input tensors.
"""
if not is_quantized:
return inputs
with tf.variable_scope(scope):
min_var = _quant_var('min', default_min)
max_var = _quant_var('max', default_max)
if not is_training:
# Just use variables in the checkpoint.
return tf.fake_quant_with_min_max_vars(inputs, min_var, max_var)
# While training, collect EMAs of ranges seen, store in min_var, max_var.
# TFLite requires that 0.0 is always in the [min; max] range.
range_min = tf.minimum(tf.reduce_min(inputs), 0.0, 'SafeQuantRangeMin')
# We set the lower_bound of max_range to prevent range collapse.
range_max = tf.maximum(tf.reduce_max(inputs), 1e-5, 'SafeQuantRangeMax')
min_val = moving_averages.assign_moving_average(
min_var, range_min, ema_decay, name='AssignMinEma')
max_val = moving_averages.assign_moving_average(
max_var, range_max, ema_decay, name='AssignMaxEma')
return tf.fake_quant_with_min_max_vars(inputs, min_val, max_val)
def fixed_quantize_op(inputs, is_quantized=True,
fixed_min=0.0, fixed_max=6.0, scope='quant'):
"""Inserts a fake quantization op with fixed range after inputs.
Args:
inputs: A tensor of size [batch_size, height, width, channels].
is_quantized: flag to enable/disable quantization.
fixed_min: fixed min value for fake quant op.
fixed_max: fixed max value for fake quant op.
scope: Optional scope for variable_scope.
Returns:
Tensor resulting from quantizing the input tensors.
"""
if not is_quantized:
return inputs
with tf.variable_scope(scope):
# Just use fixed quantization range.
return tf.fake_quant_with_min_max_args(inputs, fixed_min, fixed_max)
| 10,134 | 38.282946 | 80 | py |
models | models-master/research/lstm_object_detection/lstm/rnn_decoder_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lstm_object_detection.lstm.rnn_decoder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.contrib import layers as contrib_layers
from tensorflow.contrib import rnn as contrib_rnn
from lstm_object_detection.lstm import rnn_decoder
class MockRnnCell(contrib_rnn.RNNCell):
def __init__(self, input_size, num_units):
self._input_size = input_size
self._num_units = num_units
self._filter_size = [3, 3]
def __call__(self, inputs, state_tuple):
outputs = tf.concat([inputs, state_tuple[0]], axis=3)
new_state_tuple = (tf.multiply(state_tuple[0], 2), state_tuple[1])
return outputs, new_state_tuple
def state_size(self):
return self._num_units
def output_size(self):
return self._input_size + self._num_units
def pre_bottleneck(self, inputs, state, input_index):
with tf.variable_scope('bottleneck_%d' % input_index, reuse=tf.AUTO_REUSE):
inputs = contrib_layers.separable_conv2d(
tf.concat([inputs, state], 3),
self._input_size,
self._filter_size,
depth_multiplier=1,
activation_fn=tf.nn.relu6,
normalizer_fn=None)
return inputs
class RnnDecoderTest(tf.test.TestCase):
def test_rnn_decoder_single_unroll(self):
batch_size = 2
num_unroll = 1
num_units = 64
width = 8
height = 10
input_channels = 128
initial_state = tf.random_normal((batch_size, width, height, num_units))
inputs = tf.random_normal([batch_size, width, height, input_channels])
rnn_cell = MockRnnCell(input_channels, num_units)
outputs, states = rnn_decoder.rnn_decoder(
decoder_inputs=[inputs] * num_unroll,
initial_state=(initial_state, initial_state),
cell=rnn_cell)
self.assertEqual(len(outputs), num_unroll)
self.assertEqual(len(states), num_unroll)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
results = sess.run((outputs, states, inputs, initial_state))
outputs_results = results[0]
states_results = results[1]
inputs_results = results[2]
initial_states_results = results[3]
self.assertEqual(outputs_results[0].shape,
(batch_size, width, height, input_channels + num_units))
self.assertAllEqual(
outputs_results[0],
np.concatenate((inputs_results, initial_states_results), axis=3))
self.assertEqual(states_results[0][0].shape,
(batch_size, width, height, num_units))
self.assertEqual(states_results[0][1].shape,
(batch_size, width, height, num_units))
self.assertAllEqual(states_results[0][0],
np.multiply(initial_states_results, 2.0))
self.assertAllEqual(states_results[0][1], initial_states_results)
def test_rnn_decoder_multiple_unroll(self):
batch_size = 2
num_unroll = 3
num_units = 64
width = 8
height = 10
input_channels = 128
initial_state = tf.random_normal((batch_size, width, height, num_units))
inputs = tf.random_normal([batch_size, width, height, input_channels])
rnn_cell = MockRnnCell(input_channels, num_units)
outputs, states = rnn_decoder.rnn_decoder(
decoder_inputs=[inputs] * num_unroll,
initial_state=(initial_state, initial_state),
cell=rnn_cell)
self.assertEqual(len(outputs), num_unroll)
self.assertEqual(len(states), num_unroll)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
results = sess.run((outputs, states, inputs, initial_state))
outputs_results = results[0]
states_results = results[1]
inputs_results = results[2]
initial_states_results = results[3]
for i in range(num_unroll):
previous_state = ([initial_states_results, initial_states_results]
if i == 0 else states_results[i - 1])
self.assertEqual(
outputs_results[i].shape,
(batch_size, width, height, input_channels + num_units))
self.assertAllEqual(
outputs_results[i],
np.concatenate((inputs_results, previous_state[0]), axis=3))
self.assertEqual(states_results[i][0].shape,
(batch_size, width, height, num_units))
self.assertEqual(states_results[i][1].shape,
(batch_size, width, height, num_units))
self.assertAllEqual(states_results[i][0],
np.multiply(previous_state[0], 2.0))
self.assertAllEqual(states_results[i][1], previous_state[1])
class MultiInputRnnDecoderTest(tf.test.TestCase):
def test_rnn_decoder_single_unroll(self):
batch_size = 2
num_unroll = 1
num_units = 12
width = 8
height = 10
input_channels_large = 24
input_channels_small = 12
bottleneck_channels = 20
initial_state_c = tf.random_normal((batch_size, width, height, num_units))
initial_state_h = tf.random_normal((batch_size, width, height, num_units))
initial_state = (initial_state_c, initial_state_h)
inputs_large = tf.random_normal(
[batch_size, width, height, input_channels_large])
inputs_small = tf.random_normal(
[batch_size, width, height, input_channels_small])
rnn_cell = MockRnnCell(bottleneck_channels, num_units)
outputs, states = rnn_decoder.multi_input_rnn_decoder(
decoder_inputs=[[inputs_large] * num_unroll,
[inputs_small] * num_unroll],
initial_state=initial_state,
cell=rnn_cell,
sequence_step=tf.zeros([batch_size]),
pre_bottleneck=True)
self.assertEqual(len(outputs), num_unroll)
self.assertEqual(len(states), num_unroll)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
results = sess.run(
(outputs, states, inputs_large, inputs_small, initial_state))
outputs_results = results[0]
states_results = results[1]
initial_states_results = results[4]
self.assertEqual(
outputs_results[0].shape,
(batch_size, width, height, bottleneck_channels + num_units))
self.assertEqual(states_results[0][0].shape,
(batch_size, width, height, num_units))
self.assertEqual(states_results[0][1].shape,
(batch_size, width, height, num_units))
# The first step should always update state.
self.assertAllEqual(states_results[0][0],
np.multiply(initial_states_results[0], 2))
self.assertAllEqual(states_results[0][1], initial_states_results[1])
def test_rnn_decoder_multiple_unroll(self):
batch_size = 2
num_unroll = 3
num_units = 12
width = 8
height = 10
input_channels_large = 24
input_channels_small = 12
bottleneck_channels = 20
initial_state_c = tf.random_normal((batch_size, width, height, num_units))
initial_state_h = tf.random_normal((batch_size, width, height, num_units))
initial_state = (initial_state_c, initial_state_h)
inputs_large = tf.random_normal(
[batch_size, width, height, input_channels_large])
inputs_small = tf.random_normal(
[batch_size, width, height, input_channels_small])
rnn_cell = MockRnnCell(bottleneck_channels, num_units)
outputs, states = rnn_decoder.multi_input_rnn_decoder(
decoder_inputs=[[inputs_large] * num_unroll,
[inputs_small] * num_unroll],
initial_state=initial_state,
cell=rnn_cell,
sequence_step=tf.zeros([batch_size]),
pre_bottleneck=True)
self.assertEqual(len(outputs), num_unroll)
self.assertEqual(len(states), num_unroll)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
results = sess.run(
(outputs, states, inputs_large, inputs_small, initial_state))
outputs_results = results[0]
states_results = results[1]
initial_states_results = results[4]
# The first step should always update state.
self.assertAllEqual(states_results[0][0],
np.multiply(initial_states_results[0], 2))
self.assertAllEqual(states_results[0][1], initial_states_results[1])
for i in range(num_unroll):
self.assertEqual(
outputs_results[i].shape,
(batch_size, width, height, bottleneck_channels + num_units))
self.assertEqual(states_results[i][0].shape,
(batch_size, width, height, num_units))
self.assertEqual(states_results[i][1].shape,
(batch_size, width, height, num_units))
def test_rnn_decoder_multiple_unroll_with_skip(self):
batch_size = 2
num_unroll = 5
num_units = 12
width = 8
height = 10
input_channels_large = 24
input_channels_small = 12
bottleneck_channels = 20
skip = 2
initial_state_c = tf.random_normal((batch_size, width, height, num_units))
initial_state_h = tf.random_normal((batch_size, width, height, num_units))
initial_state = (initial_state_c, initial_state_h)
inputs_large = tf.random_normal(
[batch_size, width, height, input_channels_large])
inputs_small = tf.random_normal(
[batch_size, width, height, input_channels_small])
rnn_cell = MockRnnCell(bottleneck_channels, num_units)
outputs, states = rnn_decoder.multi_input_rnn_decoder(
decoder_inputs=[[inputs_large] * num_unroll,
[inputs_small] * num_unroll],
initial_state=initial_state,
cell=rnn_cell,
sequence_step=tf.zeros([batch_size]),
pre_bottleneck=True,
selection_strategy='SKIP%d' % skip)
self.assertEqual(len(outputs), num_unroll)
self.assertEqual(len(states), num_unroll)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
results = sess.run(
(outputs, states, inputs_large, inputs_small, initial_state))
outputs_results = results[0]
states_results = results[1]
initial_states_results = results[4]
for i in range(num_unroll):
self.assertEqual(
outputs_results[i].shape,
(batch_size, width, height, bottleneck_channels + num_units))
self.assertEqual(states_results[i][0].shape,
(batch_size, width, height, num_units))
self.assertEqual(states_results[i][1].shape,
(batch_size, width, height, num_units))
previous_state = (
initial_states_results if i == 0 else states_results[i - 1])
# State only updates during key frames
if i % (skip + 1) == 0:
self.assertAllEqual(states_results[i][0],
np.multiply(previous_state[0], 2))
self.assertAllEqual(states_results[i][1], previous_state[1])
else:
self.assertAllEqual(states_results[i][0], previous_state[0])
self.assertAllEqual(states_results[i][1], previous_state[1])
if __name__ == '__main__':
tf.test.main()
| 11,883 | 37.710098 | 80 | py |
models | models-master/research/lstm_object_detection/lstm/lstm_cells_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lstm_object_detection.lstm.lstm_cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from lstm_object_detection.lstm import lstm_cells
class BottleneckConvLstmCellsTest(tf.test.TestCase):
def test_run_lstm_cell(self):
filter_size = [3, 3]
output_size = [10, 10]
num_units = 15
state_name = 'lstm_state'
batch_size = 4
dtype = tf.float32
learned_state = False
inputs = tf.zeros([4, 10, 10, 3], dtype=tf.float32)
cell = lstm_cells.BottleneckConvLSTMCell(
filter_size=filter_size,
output_size=output_size,
num_units=num_units)
init_state = cell.init_state(
state_name, batch_size, dtype, learned_state)
output, state_tuple = cell(inputs, init_state)
self.assertAllEqual([4, 10, 10, 15], output.shape.as_list())
self.assertAllEqual([4, 10, 10, 15], state_tuple[0].shape.as_list())
self.assertAllEqual([4, 10, 10, 15], state_tuple[1].shape.as_list())
def test_run_lstm_cell_with_flattened_state(self):
filter_size = [3, 3]
output_dim = 10
output_size = [output_dim] * 2
num_units = 15
state_name = 'lstm_state'
batch_size = 4
dtype = tf.float32
learned_state = False
inputs = tf.zeros([batch_size, output_dim, output_dim, 3], dtype=tf.float32)
cell = lstm_cells.BottleneckConvLSTMCell(
filter_size=filter_size,
output_size=output_size,
num_units=num_units,
flatten_state=True)
init_state = cell.init_state(
state_name, batch_size, dtype, learned_state)
output, state_tuple = cell(inputs, init_state)
self.assertAllEqual([4, 10, 10, 15], output.shape.as_list())
self.assertAllEqual([4, 1500], state_tuple[0].shape.as_list())
self.assertAllEqual([4, 1500], state_tuple[1].shape.as_list())
def test_run_lstm_cell_with_output_bottleneck(self):
filter_size = [3, 3]
output_dim = 10
output_size = [output_dim] * 2
num_units = 15
state_name = 'lstm_state'
batch_size = 4
dtype = tf.float32
learned_state = False
inputs = tf.zeros([batch_size, output_dim, output_dim, 3], dtype=tf.float32)
cell = lstm_cells.BottleneckConvLSTMCell(
filter_size=filter_size,
output_size=output_size,
num_units=num_units,
output_bottleneck=True)
init_state = cell.init_state(
state_name, batch_size, dtype, learned_state)
output, state_tuple = cell(inputs, init_state)
self.assertAllEqual([4, 10, 10, 30], output.shape.as_list())
self.assertAllEqual([4, 10, 10, 15], state_tuple[0].shape.as_list())
self.assertAllEqual([4, 10, 10, 15], state_tuple[1].shape.as_list())
def test_get_init_state(self):
filter_size = [3, 3]
output_dim = 10
output_size = [output_dim] * 2
num_units = 15
state_name = 'lstm_state'
batch_size = 4
dtype = tf.float32
learned_state = False
cell = lstm_cells.BottleneckConvLSTMCell(
filter_size=filter_size,
output_size=output_size,
num_units=num_units)
init_c, init_h = cell.init_state(
state_name, batch_size, dtype, learned_state)
self.assertEqual(tf.float32, init_c.dtype)
self.assertEqual(tf.float32, init_h.dtype)
with self.test_session() as sess:
init_c_res, init_h_res = sess.run([init_c, init_h])
self.assertAllClose(np.zeros((4, 10, 10, 15)), init_c_res)
self.assertAllClose(np.zeros((4, 10, 10, 15)), init_h_res)
def test_get_init_learned_state(self):
filter_size = [3, 3]
output_size = [10, 10]
num_units = 15
state_name = 'lstm_state'
batch_size = 4
dtype = tf.float32
learned_state = True
cell = lstm_cells.BottleneckConvLSTMCell(
filter_size=filter_size,
output_size=output_size,
num_units=num_units)
init_c, init_h = cell.init_state(
state_name, batch_size, dtype, learned_state)
self.assertEqual(tf.float32, init_c.dtype)
self.assertEqual(tf.float32, init_h.dtype)
self.assertAllEqual([4, 10, 10, 15], init_c.shape.as_list())
self.assertAllEqual([4, 10, 10, 15], init_h.shape.as_list())
def test_unroll(self):
filter_size = [3, 3]
output_size = [10, 10]
num_units = 15
state_name = 'lstm_state'
batch_size = 4
dtype = tf.float32
unroll = 10
learned_state = False
inputs = tf.zeros([4, 10, 10, 3], dtype=tf.float32)
cell = lstm_cells.BottleneckConvLSTMCell(
filter_size=filter_size,
output_size=output_size,
num_units=num_units)
state = cell.init_state(
state_name, batch_size, dtype, learned_state)
for step in range(unroll):
output, state = cell(inputs, state)
self.assertAllEqual([4, 10, 10, 15], output.shape.as_list())
self.assertAllEqual([4, 10, 10, 15], state[0].shape.as_list())
self.assertAllEqual([4, 10, 10, 15], state[1].shape.as_list())
def test_prebottleneck(self):
filter_size = [3, 3]
output_size = [10, 10]
num_units = 15
state_name = 'lstm_state'
batch_size = 4
dtype = tf.float32
unroll = 10
learned_state = False
inputs_large = tf.zeros([4, 10, 10, 5], dtype=tf.float32)
inputs_small = tf.zeros([4, 10, 10, 3], dtype=tf.float32)
cell = lstm_cells.BottleneckConvLSTMCell(
filter_size=filter_size,
output_size=output_size,
num_units=num_units,
pre_bottleneck=True)
state = cell.init_state(
state_name, batch_size, dtype, learned_state)
for step in range(unroll):
if step % 2 == 0:
inputs = cell.pre_bottleneck(inputs_large, state[1], 0)
else:
inputs = cell.pre_bottleneck(inputs_small, state[1], 1)
output, state = cell(inputs, state)
self.assertAllEqual([4, 10, 10, 15], output.shape.as_list())
self.assertAllEqual([4, 10, 10, 15], state[0].shape.as_list())
self.assertAllEqual([4, 10, 10, 15], state[1].shape.as_list())
def test_flatten_state(self):
filter_size = [3, 3]
output_size = [10, 10]
num_units = 15
state_name = 'lstm_state'
batch_size = 4
dtype = tf.float32
unroll = 10
learned_state = False
inputs_large = tf.zeros([4, 10, 10, 5], dtype=tf.float32)
inputs_small = tf.zeros([4, 10, 10, 3], dtype=tf.float32)
cell = lstm_cells.BottleneckConvLSTMCell(
filter_size=filter_size,
output_size=output_size,
num_units=num_units,
pre_bottleneck=True,
flatten_state=True)
state = cell.init_state(
state_name, batch_size, dtype, learned_state)
for step in range(unroll):
if step % 2 == 0:
inputs = cell.pre_bottleneck(inputs_large, state[1], 0)
else:
inputs = cell.pre_bottleneck(inputs_small, state[1], 1)
output, state = cell(inputs, state)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output_result, state_result = sess.run([output, state])
self.assertAllEqual((4, 10, 10, 15), output_result.shape)
self.assertAllEqual((4, 10*10*15), state_result[0].shape)
self.assertAllEqual((4, 10*10*15), state_result[1].shape)
class GroupedConvLstmCellsTest(tf.test.TestCase):
def test_run_lstm_cell(self):
filter_size = [3, 3]
output_size = [10, 10]
num_units = 16
state_name = 'lstm_state'
batch_size = 4
dtype = tf.float32
learned_state = False
inputs = tf.zeros([4, 10, 10, 3], dtype=tf.float32)
cell = lstm_cells.GroupedConvLSTMCell(
filter_size=filter_size,
output_size=output_size,
num_units=num_units,
is_training=True)
init_state = cell.init_state(
state_name, batch_size, dtype, learned_state)
output, state_tuple = cell(inputs, init_state)
self.assertAllEqual([4, 10, 10, 16], output.shape.as_list())
self.assertAllEqual([4, 10, 10, 16], state_tuple[0].shape.as_list())
self.assertAllEqual([4, 10, 10, 16], state_tuple[1].shape.as_list())
def test_run_lstm_cell_with_output_bottleneck(self):
filter_size = [3, 3]
output_dim = 10
output_size = [output_dim] * 2
num_units = 16
state_name = 'lstm_state'
batch_size = 4
dtype = tf.float32
learned_state = False
inputs = tf.zeros([batch_size, output_dim, output_dim, 3], dtype=tf.float32)
cell = lstm_cells.GroupedConvLSTMCell(
filter_size=filter_size,
output_size=output_size,
num_units=num_units,
is_training=True,
output_bottleneck=True)
init_state = cell.init_state(
state_name, batch_size, dtype, learned_state)
output, state_tuple = cell(inputs, init_state)
self.assertAllEqual([4, 10, 10, 32], output.shape.as_list())
self.assertAllEqual([4, 10, 10, 16], state_tuple[0].shape.as_list())
self.assertAllEqual([4, 10, 10, 16], state_tuple[1].shape.as_list())
def test_get_init_state(self):
filter_size = [3, 3]
output_dim = 10
output_size = [output_dim] * 2
num_units = 16
state_name = 'lstm_state'
batch_size = 4
dtype = tf.float32
learned_state = False
cell = lstm_cells.GroupedConvLSTMCell(
filter_size=filter_size,
output_size=output_size,
num_units=num_units,
is_training=True)
init_c, init_h = cell.init_state(
state_name, batch_size, dtype, learned_state)
self.assertEqual(tf.float32, init_c.dtype)
self.assertEqual(tf.float32, init_h.dtype)
with self.test_session() as sess:
init_c_res, init_h_res = sess.run([init_c, init_h])
self.assertAllClose(np.zeros((4, 10, 10, 16)), init_c_res)
self.assertAllClose(np.zeros((4, 10, 10, 16)), init_h_res)
def test_get_init_learned_state(self):
filter_size = [3, 3]
output_size = [10, 10]
num_units = 16
state_name = 'lstm_state'
batch_size = 4
dtype = tf.float32
learned_state = True
cell = lstm_cells.GroupedConvLSTMCell(
filter_size=filter_size,
output_size=output_size,
num_units=num_units,
is_training=True)
init_c, init_h = cell.init_state(
state_name, batch_size, dtype, learned_state)
self.assertEqual(tf.float32, init_c.dtype)
self.assertEqual(tf.float32, init_h.dtype)
self.assertAllEqual([4, 10, 10, 16], init_c.shape.as_list())
self.assertAllEqual([4, 10, 10, 16], init_h.shape.as_list())
def test_unroll(self):
filter_size = [3, 3]
output_size = [10, 10]
num_units = 16
state_name = 'lstm_state'
batch_size = 4
dtype = tf.float32
unroll = 10
learned_state = False
inputs = tf.zeros([4, 10, 10, 3], dtype=tf.float32)
cell = lstm_cells.GroupedConvLSTMCell(
filter_size=filter_size,
output_size=output_size,
num_units=num_units,
is_training=True)
state = cell.init_state(
state_name, batch_size, dtype, learned_state)
for step in range(unroll):
output, state = cell(inputs, state)
self.assertAllEqual([4, 10, 10, 16], output.shape.as_list())
self.assertAllEqual([4, 10, 10, 16], state[0].shape.as_list())
self.assertAllEqual([4, 10, 10, 16], state[1].shape.as_list())
def test_prebottleneck(self):
filter_size = [3, 3]
output_size = [10, 10]
num_units = 16
state_name = 'lstm_state'
batch_size = 4
dtype = tf.float32
unroll = 10
learned_state = False
inputs_large = tf.zeros([4, 10, 10, 5], dtype=tf.float32)
inputs_small = tf.zeros([4, 10, 10, 3], dtype=tf.float32)
cell = lstm_cells.GroupedConvLSTMCell(
filter_size=filter_size,
output_size=output_size,
num_units=num_units,
is_training=True,
pre_bottleneck=True)
state = cell.init_state(
state_name, batch_size, dtype, learned_state)
for step in range(unroll):
if step % 2 == 0:
inputs = cell.pre_bottleneck(inputs_large, state[1], 0)
else:
inputs = cell.pre_bottleneck(inputs_small, state[1], 1)
output, state = cell(inputs, state)
self.assertAllEqual([4, 10, 10, 16], output.shape.as_list())
self.assertAllEqual([4, 10, 10, 16], state[0].shape.as_list())
self.assertAllEqual([4, 10, 10, 16], state[1].shape.as_list())
def test_flatten_state(self):
filter_size = [3, 3]
output_size = [10, 10]
num_units = 16
state_name = 'lstm_state'
batch_size = 4
dtype = tf.float32
unroll = 10
learned_state = False
inputs_large = tf.zeros([4, 10, 10, 5], dtype=tf.float32)
inputs_small = tf.zeros([4, 10, 10, 3], dtype=tf.float32)
cell = lstm_cells.GroupedConvLSTMCell(
filter_size=filter_size,
output_size=output_size,
num_units=num_units,
is_training=True,
pre_bottleneck=True,
flatten_state=True)
state = cell.init_state(
state_name, batch_size, dtype, learned_state)
for step in range(unroll):
if step % 2 == 0:
inputs = cell.pre_bottleneck(inputs_large, state[1], 0)
else:
inputs = cell.pre_bottleneck(inputs_small, state[1], 1)
output, state = cell(inputs, state)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output_result, state_result = sess.run([output, state])
self.assertAllEqual((4, 10, 10, 16), output_result.shape)
self.assertAllEqual((4, 10*10*16), state_result[0].shape)
self.assertAllEqual((4, 10*10*16), state_result[1].shape)
if __name__ == '__main__':
tf.test.main()
| 14,237 | 33.474576 | 80 | py |
models | models-master/research/lstm_object_detection/lstm/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/lstm_object_detection/lstm/utils_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lstm_object_detection.lstm.utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from lstm_object_detection.lstm import utils
class QuantizableUtilsTest(tf.test.TestCase):
def test_quantizable_concat_is_training(self):
inputs_1 = tf.zeros([4, 10, 10, 1], dtype=tf.float32)
inputs_2 = tf.ones([4, 10, 10, 2], dtype=tf.float32)
concat_in_train = utils.quantizable_concat([inputs_1, inputs_2],
axis=3,
is_training=True)
self.assertAllEqual([4, 10, 10, 3], concat_in_train.shape.as_list())
self._check_min_max_ema(tf.get_default_graph())
self._check_min_max_vars(tf.get_default_graph())
def test_quantizable_concat_inference(self):
inputs_1 = tf.zeros([4, 10, 10, 1], dtype=tf.float32)
inputs_2 = tf.ones([4, 10, 10, 2], dtype=tf.float32)
concat_in_train = utils.quantizable_concat([inputs_1, inputs_2],
axis=3,
is_training=False)
self.assertAllEqual([4, 10, 10, 3], concat_in_train.shape.as_list())
self._check_no_min_max_ema(tf.get_default_graph())
self._check_min_max_vars(tf.get_default_graph())
def test_quantizable_concat_not_quantized_is_training(self):
inputs_1 = tf.zeros([4, 10, 10, 1], dtype=tf.float32)
inputs_2 = tf.ones([4, 10, 10, 2], dtype=tf.float32)
concat_in_train = utils.quantizable_concat([inputs_1, inputs_2],
axis=3,
is_training=True,
is_quantized=False)
self.assertAllEqual([4, 10, 10, 3], concat_in_train.shape.as_list())
self._check_no_min_max_ema(tf.get_default_graph())
self._check_no_min_max_vars(tf.get_default_graph())
def test_quantizable_concat_not_quantized_inference(self):
inputs_1 = tf.zeros([4, 10, 10, 1], dtype=tf.float32)
inputs_2 = tf.ones([4, 10, 10, 2], dtype=tf.float32)
concat_in_train = utils.quantizable_concat([inputs_1, inputs_2],
axis=3,
is_training=False,
is_quantized=False)
self.assertAllEqual([4, 10, 10, 3], concat_in_train.shape.as_list())
self._check_no_min_max_ema(tf.get_default_graph())
self._check_no_min_max_vars(tf.get_default_graph())
def test_quantize_op_is_training(self):
inputs = tf.zeros([4, 10, 10, 128], dtype=tf.float32)
outputs = utils.quantize_op(inputs)
self.assertAllEqual(inputs.shape.as_list(), outputs.shape.as_list())
self._check_min_max_ema(tf.get_default_graph())
self._check_min_max_vars(tf.get_default_graph())
def test_quantize_op_inference(self):
inputs = tf.zeros([4, 10, 10, 128], dtype=tf.float32)
outputs = utils.quantize_op(inputs, is_training=False)
self.assertAllEqual(inputs.shape.as_list(), outputs.shape.as_list())
self._check_no_min_max_ema(tf.get_default_graph())
self._check_min_max_vars(tf.get_default_graph())
def test_fixed_quantize_op(self):
inputs = tf.zeros([4, 10, 10, 128], dtype=tf.float32)
outputs = utils.fixed_quantize_op(inputs)
self.assertAllEqual(inputs.shape.as_list(), outputs.shape.as_list())
self._check_no_min_max_ema(tf.get_default_graph())
self._check_no_min_max_vars(tf.get_default_graph())
def _check_min_max_vars(self, graph):
op_types = [op.type for op in graph.get_operations()]
self.assertTrue(
any('FakeQuantWithMinMaxVars' in op_type for op_type in op_types))
def _check_min_max_ema(self, graph):
op_names = [op.name for op in graph.get_operations()]
self.assertTrue(any('AssignMinEma' in name for name in op_names))
self.assertTrue(any('AssignMaxEma' in name for name in op_names))
self.assertTrue(any('SafeQuantRangeMin' in name for name in op_names))
self.assertTrue(any('SafeQuantRangeMax' in name for name in op_names))
def _check_no_min_max_vars(self, graph):
op_types = [op.type for op in graph.get_operations()]
self.assertFalse(
any('FakeQuantWithMinMaxVars' in op_type for op_type in op_types))
def _check_no_min_max_ema(self, graph):
op_names = [op.name for op in graph.get_operations()]
self.assertFalse(any('AssignMinEma' in name for name in op_names))
self.assertFalse(any('AssignMaxEma' in name for name in op_names))
self.assertFalse(any('SafeQuantRangeMin' in name for name in op_names))
self.assertFalse(any('SafeQuantRangeMax' in name for name in op_names))
class QuantizableSeparableConv2dTest(tf.test.TestCase):
def test_quantizable_separable_conv2d(self):
inputs = tf.zeros([4, 10, 10, 128], dtype=tf.float32)
num_outputs = 64
kernel_size = [3, 3]
scope = 'QuantSeparable'
outputs = utils.quantizable_separable_conv2d(
inputs, num_outputs, kernel_size, scope=scope)
self.assertAllEqual([4, 10, 10, num_outputs], outputs.shape.as_list())
self._check_depthwise_bias_add(tf.get_default_graph(), scope)
def test_quantizable_separable_conv2d_not_quantized(self):
inputs = tf.zeros([4, 10, 10, 128], dtype=tf.float32)
num_outputs = 64
kernel_size = [3, 3]
scope = 'QuantSeparable'
outputs = utils.quantizable_separable_conv2d(
inputs, num_outputs, kernel_size, is_quantized=False, scope=scope)
self.assertAllEqual([4, 10, 10, num_outputs], outputs.shape.as_list())
self._check_no_depthwise_bias_add(tf.get_default_graph(), scope)
def _check_depthwise_bias_add(self, graph, scope):
op_names = [op.name for op in graph.get_operations()]
self.assertTrue(
any('%s_bias/BiasAdd' % scope in name for name in op_names))
def _check_no_depthwise_bias_add(self, graph, scope):
op_names = [op.name for op in graph.get_operations()]
self.assertFalse(
any('%s_bias/BiasAdd' % scope in name for name in op_names))
if __name__ == '__main__':
tf.test.main()
| 6,843 | 44.626667 | 80 | py |
models | models-master/research/lstm_object_detection/lstm/lstm_cells.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""BottleneckConvLSTMCell implementation."""
import functools
import tensorflow.compat.v1 as tf
import tf_slim as slim
from tensorflow.contrib import rnn as contrib_rnn
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
import lstm_object_detection.lstm.utils as lstm_utils
class BottleneckConvLSTMCell(contrib_rnn.RNNCell):
"""Basic LSTM recurrent network cell using separable convolutions.
The implementation is based on:
Mobile Video Object Detection with Temporally-Aware Feature Maps
https://arxiv.org/abs/1711.06368.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
This LSTM first projects inputs to the size of the output before doing gate
computations. This saves params unless the input is less than a third of the
state size channel-wise.
"""
def __init__(self,
filter_size,
output_size,
num_units,
forget_bias=1.0,
activation=tf.tanh,
flatten_state=False,
clip_state=False,
output_bottleneck=False,
pre_bottleneck=False,
visualize_gates=False):
"""Initializes the basic LSTM cell.
Args:
filter_size: collection, conv filter size.
output_size: collection, the width/height dimensions of the cell/output.
num_units: int, The number of channels in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
activation: Activation function of the inner states.
flatten_state: if True, state tensor will be flattened and stored as a 2-d
tensor. Use for exporting the model to tfmini.
clip_state: if True, clip state between [-6, 6].
output_bottleneck: if True, the cell bottleneck will be concatenated to
the cell output.
pre_bottleneck: if True, cell assumes that bottlenecking was performing
before the function was called.
visualize_gates: if True, add histogram summaries of all gates and outputs
to tensorboard.
"""
self._filter_size = list(filter_size)
self._output_size = list(output_size)
self._num_units = num_units
self._forget_bias = forget_bias
self._activation = activation
self._viz_gates = visualize_gates
self._flatten_state = flatten_state
self._clip_state = clip_state
self._output_bottleneck = output_bottleneck
self._pre_bottleneck = pre_bottleneck
self._param_count = self._num_units
for dim in self._output_size:
self._param_count *= dim
@property
def state_size(self):
return contrib_rnn.LSTMStateTuple(self._output_size + [self._num_units],
self._output_size + [self._num_units])
@property
def state_size_flat(self):
return contrib_rnn.LSTMStateTuple([self._param_count], [self._param_count])
@property
def output_size(self):
return self._output_size + [self._num_units]
def __call__(self, inputs, state, scope=None):
"""Long short-term memory cell (LSTM) with bottlenecking.
Args:
inputs: Input tensor at the current timestep.
state: Tuple of tensors, the state and output at the previous timestep.
scope: Optional scope.
Returns:
A tuple where the first element is the LSTM output and the second is
a LSTMStateTuple of the state at the current timestep.
"""
scope = scope or 'conv_lstm_cell'
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
c, h = state
# unflatten state if necessary
if self._flatten_state:
c = tf.reshape(c, [-1] + self.output_size)
h = tf.reshape(h, [-1] + self.output_size)
# summary of input passed into cell
if self._viz_gates:
slim.summaries.add_histogram_summary(inputs, 'cell_input')
if self._pre_bottleneck:
bottleneck = inputs
else:
bottleneck = slim.separable_conv2d(
tf.concat([inputs, h], 3),
self._num_units,
self._filter_size,
depth_multiplier=1,
activation_fn=self._activation,
normalizer_fn=None,
scope='bottleneck')
if self._viz_gates:
slim.summaries.add_histogram_summary(bottleneck, 'bottleneck')
concat = slim.separable_conv2d(
bottleneck,
4 * self._num_units,
self._filter_size,
depth_multiplier=1,
activation_fn=None,
normalizer_fn=None,
scope='gates')
i, j, f, o = tf.split(concat, 4, 3)
new_c = (
c * tf.sigmoid(f + self._forget_bias) +
tf.sigmoid(i) * self._activation(j))
if self._clip_state:
new_c = tf.clip_by_value(new_c, -6, 6)
new_h = self._activation(new_c) * tf.sigmoid(o)
# summary of cell output and new state
if self._viz_gates:
slim.summaries.add_histogram_summary(new_h, 'cell_output')
slim.summaries.add_histogram_summary(new_c, 'cell_state')
output = new_h
if self._output_bottleneck:
output = tf.concat([new_h, bottleneck], axis=3)
# reflatten state to store it
if self._flatten_state:
new_c = tf.reshape(new_c, [-1, self._param_count])
new_h = tf.reshape(new_h, [-1, self._param_count])
return output, contrib_rnn.LSTMStateTuple(new_c, new_h)
def init_state(self, state_name, batch_size, dtype, learned_state=False):
"""Creates an initial state compatible with this cell.
Args:
state_name: name of the state tensor
batch_size: model batch size
dtype: dtype for the tensor values i.e. tf.float32
learned_state: whether the initial state should be learnable. If false,
the initial state is set to all 0's
Returns:
The created initial state.
"""
state_size = (
self.state_size_flat if self._flatten_state else self.state_size)
# list of 2 zero tensors or variables tensors, depending on if
# learned_state is true
# pylint: disable=g-long-ternary,g-complex-comprehension
ret_flat = [(contrib_variables.model_variable(
state_name + str(i),
shape=s,
dtype=dtype,
initializer=tf.truncated_normal_initializer(stddev=0.03))
if learned_state else tf.zeros(
[batch_size] + s, dtype=dtype, name=state_name))
for i, s in enumerate(state_size)]
# duplicates initial state across the batch axis if it's learned
if learned_state:
ret_flat = [
tf.stack([tensor
for i in range(int(batch_size))])
for tensor in ret_flat
]
for s, r in zip(state_size, ret_flat):
r.set_shape([None] + s)
return tf.nest.pack_sequence_as(structure=[1, 1], flat_sequence=ret_flat)
def pre_bottleneck(self, inputs, state, input_index):
"""Apply pre-bottleneck projection to inputs.
Pre-bottleneck operation maps features of different channels into the same
dimension. The purpose of this op is to share the features from both large
and small models in the same LSTM cell.
Args:
inputs: 4D Tensor with shape [batch_size x width x height x input_size].
state: 4D Tensor with shape [batch_size x width x height x state_size].
input_index: integer index indicating which base features the inputs
correspoding to.
Returns:
inputs: pre-bottlenecked inputs.
Raises:
ValueError: If pre_bottleneck is not set or inputs is not rank 4.
"""
# Sometimes state is a tuple, in which case it cannot be modified, e.g.
# during training, tf.contrib.training.SequenceQueueingStateSaver
# returns the state as a tuple. This should not be an issue since we
# only need to modify state[1] during export, when state should be a
# list.
if len(inputs.shape) != 4:
raise ValueError('Expect rank 4 feature tensor.')
if not self._flatten_state and len(state.shape) != 4:
raise ValueError('Expect rank 4 state tensor.')
if self._flatten_state and len(state.shape) != 2:
raise ValueError('Expect rank 2 state tensor when flatten_state is set.')
with tf.name_scope(None):
state = tf.identity(state, name='raw_inputs/init_lstm_h')
if self._flatten_state:
batch_size = inputs.shape[0]
height = inputs.shape[1]
width = inputs.shape[2]
state = tf.reshape(state, [batch_size, height, width, -1])
with tf.variable_scope('conv_lstm_cell', reuse=tf.AUTO_REUSE):
scope_name = 'bottleneck_%d' % input_index
inputs = slim.separable_conv2d(
tf.concat([inputs, state], 3),
self.output_size[-1],
self._filter_size,
depth_multiplier=1,
activation_fn=tf.nn.relu6,
normalizer_fn=None,
scope=scope_name)
# For exporting inference graph, we only mark the first timestep.
with tf.name_scope(None):
inputs = tf.identity(
inputs, name='raw_outputs/base_endpoint_%d' % (input_index + 1))
return inputs
class GroupedConvLSTMCell(contrib_rnn.RNNCell):
"""Basic LSTM recurrent network cell using separable convolutions.
The implementation is based on: https://arxiv.org/abs/1903.10172.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
This LSTM first projects inputs to the size of the output before doing gate
computations. This saves params unless the input is less than a third of the
state size channel-wise. Computation of bottlenecks and gates is divided
into independent groups for further savings.
"""
def __init__(self,
filter_size,
output_size,
num_units,
is_training,
forget_bias=1.0,
activation=tf.tanh,
use_batch_norm=False,
flatten_state=False,
groups=4,
clip_state=False,
scale_state=False,
output_bottleneck=False,
pre_bottleneck=False,
is_quantized=False,
visualize_gates=False,
conv_op_overrides=None):
"""Initialize the basic LSTM cell.
Args:
filter_size: collection, conv filter size
output_size: collection, the width/height dimensions of the cell/output
num_units: int, The number of channels in the LSTM cell.
is_training: Whether the LSTM is in training mode.
forget_bias: float, The bias added to forget gates (see above).
activation: Activation function of the inner states.
use_batch_norm: if True, use batch norm after convolution
flatten_state: if True, state tensor will be flattened and stored as a 2-d
tensor. Use for exporting the model to tfmini
groups: Number of groups to split the state into. Must evenly divide
num_units.
clip_state: if True, clips state between [-6, 6].
scale_state: if True, scales state so that all values are under 6 at all
times.
output_bottleneck: if True, the cell bottleneck will be concatenated to
the cell output.
pre_bottleneck: if True, cell assumes that bottlenecking was performing
before the function was called.
is_quantized: if True, the model is in quantize mode, which requires
quantization friendly concat and separable_conv2d ops.
visualize_gates: if True, add histogram summaries of all gates and outputs
to tensorboard
conv_op_overrides: A list of convolutional operations that override the
'bottleneck' and 'convolution' layers before lstm gates. If None, the
original implementation of seperable_conv will be used. The length of
the list should be two.
Raises:
ValueError: when both clip_state and scale_state are enabled.
"""
if clip_state and scale_state:
raise ValueError('clip_state and scale_state cannot both be enabled.')
self._filter_size = list(filter_size)
self._output_size = list(output_size)
self._num_units = num_units
self._is_training = is_training
self._forget_bias = forget_bias
self._activation = activation
self._use_batch_norm = use_batch_norm
self._viz_gates = visualize_gates
self._flatten_state = flatten_state
self._param_count = self._num_units
self._groups = groups
self._scale_state = scale_state
self._clip_state = clip_state
self._output_bottleneck = output_bottleneck
self._pre_bottleneck = pre_bottleneck
self._is_quantized = is_quantized
for dim in self._output_size:
self._param_count *= dim
self._conv_op_overrides = conv_op_overrides
if self._conv_op_overrides and len(self._conv_op_overrides) != 2:
raise ValueError('Bottleneck and Convolutional layer should be overriden'
'together')
@property
def state_size(self):
return contrib_rnn.LSTMStateTuple(self._output_size + [self._num_units],
self._output_size + [self._num_units])
@property
def state_size_flat(self):
return contrib_rnn.LSTMStateTuple([self._param_count], [self._param_count])
@property
def output_size(self):
return self._output_size + [self._num_units]
@property
def filter_size(self):
return self._filter_size
@property
def num_groups(self):
return self._groups
def __call__(self, inputs, state, scope=None):
"""Long short-term memory cell (LSTM) with bottlenecking.
Includes logic for quantization-aware training. Note that all concats and
activations use fixed ranges unless stated otherwise.
Args:
inputs: Input tensor at the current timestep.
state: Tuple of tensors, the state at the previous timestep.
scope: Optional scope.
Returns:
A tuple where the first element is the LSTM output and the second is
a LSTMStateTuple of the state at the current timestep.
"""
scope = scope or 'conv_lstm_cell'
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
c, h = state
# Set nodes to be under raw_inputs/ name scope for tfmini export.
with tf.name_scope(None):
c = tf.identity(c, name='raw_inputs/init_lstm_c')
# When pre_bottleneck is enabled, input h handle is in rnn_decoder.py
if not self._pre_bottleneck:
h = tf.identity(h, name='raw_inputs/init_lstm_h')
# unflatten state if necessary
if self._flatten_state:
c = tf.reshape(c, [-1] + self.output_size)
h = tf.reshape(h, [-1] + self.output_size)
c_list = tf.split(c, self._groups, axis=3)
if self._pre_bottleneck:
inputs_list = tf.split(inputs, self._groups, axis=3)
else:
h_list = tf.split(h, self._groups, axis=3)
out_bottleneck = []
out_c = []
out_h = []
# summary of input passed into cell
if self._viz_gates:
slim.summaries.add_histogram_summary(inputs, 'cell_input')
for k in range(self._groups):
if self._pre_bottleneck:
bottleneck = inputs_list[k]
else:
if self._conv_op_overrides:
bottleneck_fn = self._conv_op_overrides[0]
else:
bottleneck_fn = functools.partial(
lstm_utils.quantizable_separable_conv2d,
kernel_size=self._filter_size,
activation_fn=self._activation)
if self._use_batch_norm:
b_x = bottleneck_fn(
inputs=inputs,
num_outputs=self._num_units // self._groups,
is_quantized=self._is_quantized,
depth_multiplier=1,
normalizer_fn=None,
scope='bottleneck_%d_x' % k)
b_h = bottleneck_fn(
inputs=h_list[k],
num_outputs=self._num_units // self._groups,
is_quantized=self._is_quantized,
depth_multiplier=1,
normalizer_fn=None,
scope='bottleneck_%d_h' % k)
b_x = slim.batch_norm(
b_x,
scale=True,
is_training=self._is_training,
scope='BatchNorm_%d_X' % k)
b_h = slim.batch_norm(
b_h,
scale=True,
is_training=self._is_training,
scope='BatchNorm_%d_H' % k)
bottleneck = b_x + b_h
else:
# All concats use fixed quantization ranges to prevent rescaling
# at inference. Both |inputs| and |h_list| are tensors resulting
# from Relu6 operations so we fix the ranges to [0, 6].
bottleneck_concat = lstm_utils.quantizable_concat(
[inputs, h_list[k]],
axis=3,
is_training=False,
is_quantized=self._is_quantized,
scope='bottleneck_%d/quantized_concat' % k)
bottleneck = bottleneck_fn(
inputs=bottleneck_concat,
num_outputs=self._num_units // self._groups,
is_quantized=self._is_quantized,
depth_multiplier=1,
normalizer_fn=None,
scope='bottleneck_%d' % k)
if self._conv_op_overrides:
conv_fn = self._conv_op_overrides[1]
else:
conv_fn = functools.partial(
lstm_utils.quantizable_separable_conv2d,
kernel_size=self._filter_size,
activation_fn=None)
concat = conv_fn(
inputs=bottleneck,
num_outputs=4 * self._num_units // self._groups,
is_quantized=self._is_quantized,
depth_multiplier=1,
normalizer_fn=None,
scope='concat_conv_%d' % k)
# Since there is no activation in the previous separable conv, we
# quantize here. A starting range of [-6, 6] is used because the
# tensors are input to a Sigmoid function that saturates at these
# ranges.
concat = lstm_utils.quantize_op(
concat,
is_training=self._is_training,
default_min=-6,
default_max=6,
is_quantized=self._is_quantized,
scope='gates_%d/act_quant' % k)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = tf.split(concat, 4, 3)
f_add = f + self._forget_bias
f_add = lstm_utils.quantize_op(
f_add,
is_training=self._is_training,
default_min=-6,
default_max=6,
is_quantized=self._is_quantized,
scope='forget_gate_%d/add_quant' % k)
f_act = tf.sigmoid(f_add)
a = c_list[k] * f_act
a = lstm_utils.quantize_op(
a,
is_training=self._is_training,
is_quantized=self._is_quantized,
scope='forget_gate_%d/mul_quant' % k)
i_act = tf.sigmoid(i)
j_act = self._activation(j)
# The quantization range is fixed for the relu6 to ensure that zero
# is exactly representable.
j_act = lstm_utils.fixed_quantize_op(
j_act,
fixed_min=0.0,
fixed_max=6.0,
is_quantized=self._is_quantized,
scope='new_input_%d/act_quant' % k)
b = i_act * j_act
b = lstm_utils.quantize_op(
b,
is_training=self._is_training,
is_quantized=self._is_quantized,
scope='input_gate_%d/mul_quant' % k)
new_c = a + b
# The quantization range is fixed to [0, 6] due to an optimization in
# TFLite. The order of operations is as fllows:
# Add -> FakeQuant -> Relu6 -> FakeQuant -> Concat.
# The fakequant ranges to the concat must be fixed to ensure all inputs
# to the concat have the same range, removing the need for rescaling.
# The quantization ranges input to the relu6 are propagated to its
# output. Any mismatch between these two ranges will cause an error.
new_c = lstm_utils.fixed_quantize_op(
new_c,
fixed_min=0.0,
fixed_max=6.0,
is_quantized=self._is_quantized,
scope='new_c_%d/add_quant' % k)
if not self._is_quantized:
if self._scale_state:
normalizer = tf.maximum(1.0,
tf.reduce_max(new_c, axis=(1, 2, 3)) / 6)
new_c /= tf.reshape(normalizer, [tf.shape(new_c)[0], 1, 1, 1])
elif self._clip_state:
new_c = tf.clip_by_value(new_c, -6, 6)
new_c_act = self._activation(new_c)
# The quantization range is fixed for the relu6 to ensure that zero
# is exactly representable.
new_c_act = lstm_utils.fixed_quantize_op(
new_c_act,
fixed_min=0.0,
fixed_max=6.0,
is_quantized=self._is_quantized,
scope='new_c_%d/act_quant' % k)
o_act = tf.sigmoid(o)
new_h = new_c_act * o_act
# The quantization range is fixed since it is input to a concat.
# A range of [0, 6] is used since |new_h| is a product of ranges [0, 6]
# and [0, 1].
new_h_act = lstm_utils.fixed_quantize_op(
new_h,
fixed_min=0.0,
fixed_max=6.0,
is_quantized=self._is_quantized,
scope='new_h_%d/act_quant' % k)
out_bottleneck.append(bottleneck)
out_c.append(new_c_act)
out_h.append(new_h_act)
# Since all inputs to the below concats are already quantized, we can use
# a regular concat operation.
new_c = tf.concat(out_c, axis=3)
new_h = tf.concat(out_h, axis=3)
# |bottleneck| is input to a concat with |new_h|. We must use
# quantizable_concat() with a fixed range that matches |new_h|.
bottleneck = lstm_utils.quantizable_concat(
out_bottleneck,
axis=3,
is_training=False,
is_quantized=self._is_quantized,
scope='out_bottleneck/quantized_concat')
# summary of cell output and new state
if self._viz_gates:
slim.summaries.add_histogram_summary(new_h, 'cell_output')
slim.summaries.add_histogram_summary(new_c, 'cell_state')
output = new_h
if self._output_bottleneck:
output = lstm_utils.quantizable_concat(
[new_h, bottleneck],
axis=3,
is_training=False,
is_quantized=self._is_quantized,
scope='new_output/quantized_concat')
# reflatten state to store it
if self._flatten_state:
new_c = tf.reshape(new_c, [-1, self._param_count], name='lstm_c')
new_h = tf.reshape(new_h, [-1, self._param_count], name='lstm_h')
# Set nodes to be under raw_outputs/ name scope for tfmini export.
with tf.name_scope(None):
new_c = tf.identity(new_c, name='raw_outputs/lstm_c')
new_h = tf.identity(new_h, name='raw_outputs/lstm_h')
states_and_output = contrib_rnn.LSTMStateTuple(new_c, new_h)
return output, states_and_output
def init_state(self, state_name, batch_size, dtype, learned_state=False):
"""Creates an initial state compatible with this cell.
Args:
state_name: name of the state tensor
batch_size: model batch size
dtype: dtype for the tensor values i.e. tf.float32
learned_state: whether the initial state should be learnable. If false,
the initial state is set to all 0's
Returns:
ret: the created initial state
"""
state_size = (
self.state_size_flat if self._flatten_state else self.state_size)
# list of 2 zero tensors or variables tensors,
# depending on if learned_state is true
# pylint: disable=g-long-ternary,g-complex-comprehension
ret_flat = [(contrib_variables.model_variable(
state_name + str(i),
shape=s,
dtype=dtype,
initializer=tf.truncated_normal_initializer(stddev=0.03))
if learned_state else tf.zeros(
[batch_size] + s, dtype=dtype, name=state_name))
for i, s in enumerate(state_size)]
# duplicates initial state across the batch axis if it's learned
if learned_state:
ret_flat = [tf.stack([tensor for i in range(int(batch_size))])
for tensor in ret_flat]
for s, r in zip(state_size, ret_flat):
r = tf.reshape(r, [-1] + s)
ret = tf.nest.pack_sequence_as(structure=[1, 1], flat_sequence=ret_flat)
return ret
def pre_bottleneck(self, inputs, state, input_index):
"""Apply pre-bottleneck projection to inputs.
Pre-bottleneck operation maps features of different channels into the same
dimension. The purpose of this op is to share the features from both large
and small models in the same LSTM cell.
Args:
inputs: 4D Tensor with shape [batch_size x width x height x input_size].
state: 4D Tensor with shape [batch_size x width x height x state_size].
input_index: integer index indicating which base features the inputs
correspoding to.
Returns:
inputs: pre-bottlenecked inputs.
Raises:
ValueError: If pre_bottleneck is not set or inputs is not rank 4.
"""
# Sometimes state is a tuple, in which case it cannot be modified, e.g.
# during training, tf.contrib.training.SequenceQueueingStateSaver
# returns the state as a tuple. This should not be an issue since we
# only need to modify state[1] during export, when state should be a
# list.
if not self._pre_bottleneck:
raise ValueError('Only applied when pre_bottleneck is set to true.')
if len(inputs.shape) != 4:
raise ValueError('Expect a rank 4 feature tensor.')
if not self._flatten_state and len(state.shape) != 4:
raise ValueError('Expect rank 4 state tensor.')
if self._flatten_state and len(state.shape) != 2:
raise ValueError('Expect rank 2 state tensor when flatten_state is set.')
with tf.name_scope(None):
state = tf.identity(
state, name='raw_inputs/init_lstm_h_%d' % (input_index + 1))
if self._flatten_state:
batch_size = inputs.shape[0]
height = inputs.shape[1]
width = inputs.shape[2]
state = tf.reshape(state, [batch_size, height, width, -1])
with tf.variable_scope('conv_lstm_cell', reuse=tf.AUTO_REUSE):
state_split = tf.split(state, self._groups, axis=3)
with tf.variable_scope('bottleneck_%d' % input_index):
bottleneck_out = []
for k in range(self._groups):
with tf.variable_scope('group_%d' % k):
bottleneck_out.append(
lstm_utils.quantizable_separable_conv2d(
lstm_utils.quantizable_concat(
[inputs, state_split[k]],
axis=3,
is_training=self._is_training,
is_quantized=self._is_quantized,
scope='quantized_concat'),
self.output_size[-1] / self._groups,
self._filter_size,
is_quantized=self._is_quantized,
depth_multiplier=1,
activation_fn=tf.nn.relu6,
normalizer_fn=None,
scope='project'))
inputs = lstm_utils.quantizable_concat(
bottleneck_out,
axis=3,
is_training=self._is_training,
is_quantized=self._is_quantized,
scope='bottleneck_out/quantized_concat')
# For exporting inference graph, we only mark the first timestep.
with tf.name_scope(None):
inputs = tf.identity(
inputs, name='raw_outputs/base_endpoint_%d' % (input_index + 1))
return inputs
| 28,682 | 38.02449 | 82 | py |
models | models-master/research/lstm_object_detection/utils/config_util.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Added functionality to load from pipeline config for lstm framework."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from lstm_object_detection.protos import input_reader_google_pb2 # pylint: disable=unused-import
from lstm_object_detection.protos import pipeline_pb2 as internal_pipeline_pb2
from object_detection.protos import pipeline_pb2
from object_detection.utils import config_util
def get_configs_from_pipeline_file(pipeline_config_path):
"""Reads configuration from a pipeline_pb2.TrainEvalPipelineConfig.
Args:
pipeline_config_path: Path to pipeline_pb2.TrainEvalPipelineConfig text
proto.
Returns:
Dictionary of configuration objects. Keys are `model`, `train_config`,
`train_input_config`, `eval_config`, `eval_input_config`, `lstm_model`.
Value are the corresponding config objects.
"""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
with tf.gfile.GFile(pipeline_config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, pipeline_config)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
if pipeline_config.HasExtension(internal_pipeline_pb2.lstm_model):
configs["lstm_model"] = pipeline_config.Extensions[
internal_pipeline_pb2.lstm_model]
return configs
def create_pipeline_proto_from_configs(configs):
"""Creates a pipeline_pb2.TrainEvalPipelineConfig from configs dictionary.
This function nearly performs the inverse operation of
get_configs_from_pipeline_file(). Instead of returning a file path, it returns
a `TrainEvalPipelineConfig` object.
Args:
configs: Dictionary of configs. See get_configs_from_pipeline_file().
Returns:
A fully populated pipeline_pb2.TrainEvalPipelineConfig.
"""
pipeline_config = config_util.create_pipeline_proto_from_configs(configs)
if "lstm_model" in configs:
pipeline_config.Extensions[internal_pipeline_pb2.lstm_model].CopyFrom(
configs["lstm_model"])
return pipeline_config
def get_configs_from_multiple_files(model_config_path="",
train_config_path="",
train_input_config_path="",
eval_config_path="",
eval_input_config_path="",
lstm_config_path=""):
"""Reads training configuration from multiple config files.
Args:
model_config_path: Path to model_pb2.DetectionModel.
train_config_path: Path to train_pb2.TrainConfig.
train_input_config_path: Path to input_reader_pb2.InputReader.
eval_config_path: Path to eval_pb2.EvalConfig.
eval_input_config_path: Path to input_reader_pb2.InputReader.
lstm_config_path: Path to pipeline_pb2.LstmModel.
Returns:
Dictionary of configuration objects. Keys are `model`, `train_config`,
`train_input_config`, `eval_config`, `eval_input_config`, `lstm_model`.
Key/Values are returned only for valid (non-empty) strings.
"""
configs = config_util.get_configs_from_multiple_files(
model_config_path=model_config_path,
train_config_path=train_config_path,
train_input_config_path=train_input_config_path,
eval_config_path=eval_config_path,
eval_input_config_path=eval_input_config_path)
if lstm_config_path:
lstm_config = internal_pipeline_pb2.LstmModel()
with tf.gfile.GFile(lstm_config_path, "r") as f:
text_format.Merge(f.read(), lstm_config)
configs["lstm_model"] = lstm_config
return configs
| 4,369 | 39.841121 | 97 | py |
models | models-master/research/lstm_object_detection/utils/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/lstm_object_detection/utils/config_util_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.config_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from lstm_object_detection.protos import pipeline_pb2 as internal_pipeline_pb2
from lstm_object_detection.utils import config_util
from object_detection.protos import pipeline_pb2
def _write_config(config, config_path):
"""Writes a config object to disk."""
config_text = text_format.MessageToString(config)
with tf.gfile.Open(config_path, "wb") as f:
f.write(config_text)
class ConfigUtilTest(tf.test.TestCase):
def test_get_configs_from_pipeline_file(self):
"""Test that proto configs can be read from pipeline config file."""
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.ssd.num_classes = 10
pipeline_config.train_config.batch_size = 32
pipeline_config.train_input_reader.label_map_path = "path/to/label_map"
pipeline_config.eval_config.num_examples = 20
pipeline_config.eval_input_reader.add().queue_capacity = 100
pipeline_config.Extensions[
internal_pipeline_pb2.lstm_model].train_unroll_length = 5
pipeline_config.Extensions[
internal_pipeline_pb2.lstm_model].eval_unroll_length = 10
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
self.assertProtoEquals(pipeline_config.model, configs["model"])
self.assertProtoEquals(pipeline_config.train_config,
configs["train_config"])
self.assertProtoEquals(pipeline_config.train_input_reader,
configs["train_input_config"])
self.assertProtoEquals(pipeline_config.eval_config, configs["eval_config"])
self.assertProtoEquals(pipeline_config.eval_input_reader,
configs["eval_input_configs"])
self.assertProtoEquals(
pipeline_config.Extensions[internal_pipeline_pb2.lstm_model],
configs["lstm_model"])
def test_create_pipeline_proto_from_configs(self):
"""Tests that proto can be reconstructed from configs dictionary."""
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.ssd.num_classes = 10
pipeline_config.train_config.batch_size = 32
pipeline_config.train_input_reader.label_map_path = "path/to/label_map"
pipeline_config.eval_config.num_examples = 20
pipeline_config.eval_input_reader.add().queue_capacity = 100
pipeline_config.Extensions[
internal_pipeline_pb2.lstm_model].train_unroll_length = 5
pipeline_config.Extensions[
internal_pipeline_pb2.lstm_model].eval_unroll_length = 10
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
pipeline_config_reconstructed = (
config_util.create_pipeline_proto_from_configs(configs))
self.assertEqual(pipeline_config, pipeline_config_reconstructed)
if __name__ == "__main__":
tf.test.main()
| 3,986 | 40.968421 | 80 | py |
models | models-master/research/lstm_object_detection/inputs/tf_sequence_example_decoder.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensorflow Sequence Example proto decoder.
A decoder to decode string tensors containing serialized
tensorflow.SequenceExample protos.
"""
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.core import data_decoder
from object_detection.core import standard_fields as fields
tfexample_decoder = slim.tfexample_decoder
class BoundingBoxSequence(tfexample_decoder.ItemHandler):
"""An ItemHandler that concatenates SparseTensors to Bounding Boxes.
"""
def __init__(self, keys=None, prefix=None, return_dense=True,
default_value=-1.0):
"""Initialize the bounding box handler.
Args:
keys: A list of four key names representing the ymin, xmin, ymax, xmax
in the Example or SequenceExample.
prefix: An optional prefix for each of the bounding box keys in the
Example or SequenceExample. If provided, `prefix` is prepended to each
key in `keys`.
return_dense: if True, returns a dense tensor; if False, returns as
sparse tensor.
default_value: The value used when the `tensor_key` is not found in a
particular `TFExample`.
Raises:
ValueError: if keys is not `None` and also not a list of exactly 4 keys
"""
if keys is None:
keys = ['ymin', 'xmin', 'ymax', 'xmax']
elif len(keys) != 4:
raise ValueError('BoundingBoxSequence expects 4 keys but got {}'.format(
len(keys)))
self._prefix = prefix
self._keys = keys
self._full_keys = [prefix + k for k in keys]
self._return_dense = return_dense
self._default_value = default_value
super(BoundingBoxSequence, self).__init__(self._full_keys)
def tensors_to_item(self, keys_to_tensors):
"""Maps the given dictionary of tensors to a concatenated list of bboxes.
Args:
keys_to_tensors: a mapping of TF-Example keys to parsed tensors.
Returns:
[time, num_boxes, 4] tensor of bounding box coordinates, in order
[y_min, x_min, y_max, x_max]. Whether the tensor is a SparseTensor
or a dense Tensor is determined by the return_dense parameter. Empty
positions in the sparse tensor are filled with -1.0 values.
"""
sides = []
for key in self._full_keys:
value = keys_to_tensors[key]
expanded_dims = tf.concat(
[tf.to_int64(tf.shape(value)),
tf.constant([1], dtype=tf.int64)], 0)
side = tf.sparse_reshape(value, expanded_dims)
sides.append(side)
bounding_boxes = tf.sparse_concat(2, sides)
if self._return_dense:
bounding_boxes = tf.sparse_tensor_to_dense(
bounding_boxes, default_value=self._default_value)
return bounding_boxes
class TFSequenceExampleDecoder(data_decoder.DataDecoder):
"""Tensorflow Sequence Example proto decoder."""
def __init__(self):
"""Constructor sets keys_to_features and items_to_handlers."""
self.keys_to_context_features = {
'image/format':
tf.FixedLenFeature((), tf.string, default_value='jpeg'),
'image/filename':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/key/sha256':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/source_id':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/height':
tf.FixedLenFeature((), tf.int64, 1),
'image/width':
tf.FixedLenFeature((), tf.int64, 1),
}
self.keys_to_features = {
'image/encoded': tf.FixedLenSequenceFeature((), tf.string),
'bbox/xmin': tf.VarLenFeature(dtype=tf.float32),
'bbox/xmax': tf.VarLenFeature(dtype=tf.float32),
'bbox/ymin': tf.VarLenFeature(dtype=tf.float32),
'bbox/ymax': tf.VarLenFeature(dtype=tf.float32),
'bbox/label/index': tf.VarLenFeature(dtype=tf.int64),
'bbox/label/string': tf.VarLenFeature(tf.string),
'area': tf.VarLenFeature(tf.float32),
'is_crowd': tf.VarLenFeature(tf.int64),
'difficult': tf.VarLenFeature(tf.int64),
'group_of': tf.VarLenFeature(tf.int64),
}
self.items_to_handlers = {
fields.InputDataFields.image:
tfexample_decoder.Image(
image_key='image/encoded',
format_key='image/format',
channels=3,
repeated=True),
fields.InputDataFields.source_id: (
tfexample_decoder.Tensor('image/source_id')),
fields.InputDataFields.key: (
tfexample_decoder.Tensor('image/key/sha256')),
fields.InputDataFields.filename: (
tfexample_decoder.Tensor('image/filename')),
# Object boxes and classes.
fields.InputDataFields.groundtruth_boxes:
BoundingBoxSequence(prefix='bbox/'),
fields.InputDataFields.groundtruth_classes: (
tfexample_decoder.Tensor('bbox/label/index')),
fields.InputDataFields.groundtruth_area:
tfexample_decoder.Tensor('area'),
fields.InputDataFields.groundtruth_is_crowd: (
tfexample_decoder.Tensor('is_crowd')),
fields.InputDataFields.groundtruth_difficult: (
tfexample_decoder.Tensor('difficult')),
fields.InputDataFields.groundtruth_group_of: (
tfexample_decoder.Tensor('group_of'))
}
def decode(self, tf_seq_example_string_tensor, items=None):
"""Decodes serialized tf.SequenceExample and returns a tensor dictionary.
Args:
tf_seq_example_string_tensor: A string tensor holding a serialized
tensorflow example proto.
items: The list of items to decode. These must be a subset of the item
keys in self._items_to_handlers. If `items` is left as None, then all
of the items in self._items_to_handlers are decoded.
Returns:
A dictionary of the following tensors.
fields.InputDataFields.image - 3D uint8 tensor of shape [None, None, seq]
containing image(s).
fields.InputDataFields.source_id - string tensor containing original
image id.
fields.InputDataFields.key - string tensor with unique sha256 hash key.
fields.InputDataFields.filename - string tensor with original dataset
filename.
fields.InputDataFields.groundtruth_boxes - 2D float32 tensor of shape
[None, 4] containing box corners.
fields.InputDataFields.groundtruth_classes - 1D int64 tensor of shape
[None] containing classes for the boxes.
fields.InputDataFields.groundtruth_area - 1D float32 tensor of shape
[None] containing object mask area in pixel squared.
fields.InputDataFields.groundtruth_is_crowd - 1D bool tensor of shape
[None] indicating if the boxes enclose a crowd.
fields.InputDataFields.groundtruth_difficult - 1D bool tensor of shape
[None] indicating if the boxes represent `difficult` instances.
"""
serialized_example = tf.reshape(tf_seq_example_string_tensor, shape=[])
decoder = TFSequenceExampleDecoderHelper(self.keys_to_context_features,
self.keys_to_features,
self.items_to_handlers)
if not items:
items = decoder.list_items()
tensors = decoder.decode(serialized_example, items=items)
tensor_dict = dict(zip(items, tensors))
return tensor_dict
class TFSequenceExampleDecoderHelper(data_decoder.DataDecoder):
"""A decoder helper class for TensorFlow SequenceExamples.
To perform this decoding operation, a SequenceExampleDecoder is given a list
of ItemHandlers. Each ItemHandler indicates the set of features.
"""
def __init__(self, keys_to_context_features, keys_to_sequence_features,
items_to_handlers):
"""Constructs the decoder.
Args:
keys_to_context_features: A dictionary from TF-SequenceExample context
keys to either tf.VarLenFeature or tf.FixedLenFeature instances.
See tensorflow's parsing_ops.py.
keys_to_sequence_features: A dictionary from TF-SequenceExample sequence
keys to either tf.VarLenFeature or tf.FixedLenSequenceFeature instances.
items_to_handlers: A dictionary from items (strings) to ItemHandler
instances. Note that the ItemHandler's are provided the keys that they
use to return the final item Tensors.
Raises:
ValueError: If the same key is present for context features and sequence
features.
"""
unique_keys = set()
unique_keys.update(keys_to_context_features)
unique_keys.update(keys_to_sequence_features)
if len(unique_keys) != (
len(keys_to_context_features) + len(keys_to_sequence_features)):
# This situation is ambiguous in the decoder's keys_to_tensors variable.
raise ValueError('Context and sequence keys are not unique. \n'
' Context keys: %s \n Sequence keys: %s' %
(list(keys_to_context_features.keys()),
list(keys_to_sequence_features.keys())))
self._keys_to_context_features = keys_to_context_features
self._keys_to_sequence_features = keys_to_sequence_features
self._items_to_handlers = items_to_handlers
def list_items(self):
"""Returns keys of items."""
return self._items_to_handlers.keys()
def decode(self, serialized_example, items=None):
"""Decodes the given serialized TF-SequenceExample.
Args:
serialized_example: A serialized TF-SequenceExample tensor.
items: The list of items to decode. These must be a subset of the item
keys in self._items_to_handlers. If `items` is left as None, then all
of the items in self._items_to_handlers are decoded.
Returns:
The decoded items, a list of tensor.
"""
context, feature_list = tf.parse_single_sequence_example(
serialized_example, self._keys_to_context_features,
self._keys_to_sequence_features)
# Reshape non-sparse elements just once:
for k in self._keys_to_context_features:
v = self._keys_to_context_features[k]
if isinstance(v, tf.FixedLenFeature):
context[k] = tf.reshape(context[k], v.shape)
if not items:
items = self._items_to_handlers.keys()
outputs = []
for item in items:
handler = self._items_to_handlers[item]
keys_to_tensors = {
key: context[key] if key in context else feature_list[key]
for key in handler.keys
}
outputs.append(handler.tensors_to_item(keys_to_tensors))
return outputs
| 11,225 | 41.522727 | 80 | py |
models | models-master/research/lstm_object_detection/inputs/tf_sequence_example_decoder_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lstm_object_detection.tf_sequence_example_decoder."""
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import parsing_ops
from lstm_object_detection.inputs import tf_sequence_example_decoder
from object_detection.core import standard_fields as fields
class TFSequenceExampleDecoderTest(tf.test.TestCase):
"""Tests for sequence example decoder."""
def _EncodeImage(self, image_tensor, encoding_type='jpeg'):
with self.test_session():
if encoding_type == 'jpeg':
image_encoded = tf.image.encode_jpeg(tf.constant(image_tensor)).eval()
else:
raise ValueError('Invalid encoding type.')
return image_encoded
def _DecodeImage(self, image_encoded, encoding_type='jpeg'):
with self.test_session():
if encoding_type == 'jpeg':
image_decoded = tf.image.decode_jpeg(tf.constant(image_encoded)).eval()
else:
raise ValueError('Invalid encoding type.')
return image_decoded
def testDecodeJpegImageAndBoundingBox(self):
"""Test if the decoder can correctly decode the image and bounding box.
A set of random images (represented as an image tensor) is first decoded as
the groundtrue image. Meanwhile, the image tensor will be encoded and pass
through the sequence example, and then decoded as images. The groundtruth
image and the decoded image are expected to be equal. Similar tests are
also applied to labels such as bounding box.
"""
image_tensor = np.random.randint(256, size=(256, 256, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
decoded_jpeg = self._DecodeImage(encoded_jpeg)
sequence_example = example_pb2.SequenceExample(
feature_lists=feature_pb2.FeatureLists(
feature_list={
'image/encoded':
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=[encoded_jpeg])),
]),
'bbox/xmin':
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=[0.0])),
]),
'bbox/xmax':
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=[1.0]))
]),
'bbox/ymin':
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=[0.0])),
]),
'bbox/ymax':
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=[1.0]))
]),
})).SerializeToString()
example_decoder = tf_sequence_example_decoder.TFSequenceExampleDecoder()
tensor_dict = example_decoder.decode(tf.convert_to_tensor(sequence_example))
# Test tensor dict image dimension.
self.assertAllEqual(
(tensor_dict[fields.InputDataFields.image].get_shape().as_list()),
[None, None, None, 3])
with self.test_session() as sess:
tensor_dict[fields.InputDataFields.image] = tf.squeeze(
tensor_dict[fields.InputDataFields.image])
tensor_dict[fields.InputDataFields.groundtruth_boxes] = tf.squeeze(
tensor_dict[fields.InputDataFields.groundtruth_boxes])
tensor_dict = sess.run(tensor_dict)
# Test decoded image.
self.assertAllEqual(decoded_jpeg, tensor_dict[fields.InputDataFields.image])
# Test decoded bounding box.
self.assertAllEqual([0.0, 0.0, 1.0, 1.0],
tensor_dict[fields.InputDataFields.groundtruth_boxes])
if __name__ == '__main__':
tf.test.main()
| 4,820 | 41.289474 | 80 | py |
models | models-master/research/lstm_object_detection/inputs/seq_dataset_builder.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""tf.data.Dataset builder.
Creates data sources for DetectionModels from an InputReader config. See
input_reader.proto for options.
Note: If users wishes to also use their own InputReaders with the Object
Detection configuration framework, they should define their own builder function
that wraps the build function.
"""
import tensorflow.compat.v1 as tf
import tf_slim as slim
from tensorflow.contrib.training.python.training import sequence_queueing_state_saver as sqss
from lstm_object_detection.inputs import tf_sequence_example_decoder
from lstm_object_detection.protos import input_reader_google_pb2
from object_detection.core import preprocessor
from object_detection.core import preprocessor_cache
from object_detection.core import standard_fields as fields
from object_detection.protos import input_reader_pb2
from object_detection.utils import ops as util_ops
parallel_reader = slim.parallel_reader
# TODO(yinxiao): Make the following variable into configurable proto.
# Padding size for the labeled objects in each frame. Here we assume each
# frame has a total number of objects less than _PADDING_SIZE.
_PADDING_SIZE = 30
def _build_training_batch_dict(batch_sequences_with_states, unroll_length,
batch_size):
"""Builds training batch samples.
Args:
batch_sequences_with_states: A batch_sequences_with_states object.
unroll_length: Unrolled length for LSTM training.
batch_size: Batch size for queue outputs.
Returns:
A dictionary of tensors based on items in input_reader_config.
"""
seq_tensors_dict = {
fields.InputDataFields.image: [],
fields.InputDataFields.groundtruth_boxes: [],
fields.InputDataFields.groundtruth_classes: [],
'batch': batch_sequences_with_states,
}
for i in range(unroll_length):
for j in range(batch_size):
filtered_dict = util_ops.filter_groundtruth_with_nan_box_coordinates({
fields.InputDataFields.groundtruth_boxes: (
batch_sequences_with_states.sequences['groundtruth_boxes'][j][i]),
fields.InputDataFields.groundtruth_classes: (
batch_sequences_with_states.sequences['groundtruth_classes'][j][i]
),
})
filtered_dict = util_ops.retain_groundtruth_with_positive_classes(
filtered_dict)
seq_tensors_dict[fields.InputDataFields.image].append(
batch_sequences_with_states.sequences['image'][j][i])
seq_tensors_dict[fields.InputDataFields.groundtruth_boxes].append(
filtered_dict[fields.InputDataFields.groundtruth_boxes])
seq_tensors_dict[fields.InputDataFields.groundtruth_classes].append(
filtered_dict[fields.InputDataFields.groundtruth_classes])
seq_tensors_dict[fields.InputDataFields.image] = tuple(
seq_tensors_dict[fields.InputDataFields.image])
seq_tensors_dict[fields.InputDataFields.groundtruth_boxes] = tuple(
seq_tensors_dict[fields.InputDataFields.groundtruth_boxes])
seq_tensors_dict[fields.InputDataFields.groundtruth_classes] = tuple(
seq_tensors_dict[fields.InputDataFields.groundtruth_classes])
return seq_tensors_dict
def build(input_reader_config,
model_config,
lstm_config,
unroll_length,
data_augmentation_options=None,
batch_size=1):
"""Builds a tensor dictionary based on the InputReader config.
Args:
input_reader_config: An input_reader_builder.InputReader object.
model_config: A model.proto object containing the config for the desired
DetectionModel.
lstm_config: LSTM specific configs.
unroll_length: Unrolled length for LSTM training.
data_augmentation_options: A list of tuples, where each tuple contains a
data augmentation function and a dictionary containing arguments and their
values (see preprocessor.py).
batch_size: Batch size for queue outputs.
Returns:
A dictionary of tensors based on items in the input_reader_config.
Raises:
ValueError: On invalid input reader proto.
ValueError: If no input paths are specified.
"""
if not isinstance(input_reader_config, input_reader_pb2.InputReader):
raise ValueError('input_reader_config not of type '
'input_reader_pb2.InputReader.')
external_reader_config = input_reader_config.external_input_reader
external_input_reader_config = external_reader_config.Extensions[
input_reader_google_pb2.GoogleInputReader.google_input_reader]
input_reader_type = external_input_reader_config.WhichOneof('input_reader')
if input_reader_type == 'tf_record_video_input_reader':
config = external_input_reader_config.tf_record_video_input_reader
reader_type_class = tf.TFRecordReader
else:
raise ValueError(
'Unsupported reader in input_reader_config: %s' % input_reader_type)
if not config.input_path:
raise ValueError('At least one input path must be specified in '
'`input_reader_config`.')
key, value = parallel_reader.parallel_read(
config.input_path[:], # Convert `RepeatedScalarContainer` to list.
reader_class=reader_type_class,
num_epochs=(input_reader_config.num_epochs
if input_reader_config.num_epochs else None),
num_readers=input_reader_config.num_readers,
shuffle=input_reader_config.shuffle,
dtypes=[tf.string, tf.string],
capacity=input_reader_config.queue_capacity,
min_after_dequeue=input_reader_config.min_after_dequeue)
# TODO(yinxiao): Add loading instance mask option.
decoder = tf_sequence_example_decoder.TFSequenceExampleDecoder()
keys_to_decode = [
fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes
]
tensor_dict = decoder.decode(value, items=keys_to_decode)
tensor_dict['image'].set_shape([None, None, None, 3])
tensor_dict['groundtruth_boxes'].set_shape([None, None, 4])
height = model_config.ssd.image_resizer.fixed_shape_resizer.height
width = model_config.ssd.image_resizer.fixed_shape_resizer.width
# If data augmentation is specified in the config file, the preprocessor
# will be called here to augment the data as specified. Most common
# augmentations include horizontal flip and cropping.
if data_augmentation_options:
images_pre = tf.split(tensor_dict['image'], config.video_length, axis=0)
bboxes_pre = tf.split(
tensor_dict['groundtruth_boxes'], config.video_length, axis=0)
labels_pre = tf.split(
tensor_dict['groundtruth_classes'], config.video_length, axis=0)
images_proc, bboxes_proc, labels_proc = [], [], []
cache = preprocessor_cache.PreprocessorCache()
for i, _ in enumerate(images_pre):
image_dict = {
fields.InputDataFields.image:
images_pre[i],
fields.InputDataFields.groundtruth_boxes:
tf.squeeze(bboxes_pre[i], axis=0),
fields.InputDataFields.groundtruth_classes:
tf.squeeze(labels_pre[i], axis=0),
}
image_dict = preprocessor.preprocess(
image_dict,
data_augmentation_options,
func_arg_map=preprocessor.get_default_func_arg_map(),
preprocess_vars_cache=cache)
# Pads detection count to _PADDING_SIZE.
image_dict[fields.InputDataFields.groundtruth_boxes] = tf.pad(
image_dict[fields.InputDataFields.groundtruth_boxes],
[[0, _PADDING_SIZE], [0, 0]])
image_dict[fields.InputDataFields.groundtruth_boxes] = tf.slice(
image_dict[fields.InputDataFields.groundtruth_boxes], [0, 0],
[_PADDING_SIZE, -1])
image_dict[fields.InputDataFields.groundtruth_classes] = tf.pad(
image_dict[fields.InputDataFields.groundtruth_classes],
[[0, _PADDING_SIZE]])
image_dict[fields.InputDataFields.groundtruth_classes] = tf.slice(
image_dict[fields.InputDataFields.groundtruth_classes], [0],
[_PADDING_SIZE])
images_proc.append(image_dict[fields.InputDataFields.image])
bboxes_proc.append(image_dict[fields.InputDataFields.groundtruth_boxes])
labels_proc.append(image_dict[fields.InputDataFields.groundtruth_classes])
tensor_dict['image'] = tf.concat(images_proc, axis=0)
tensor_dict['groundtruth_boxes'] = tf.stack(bboxes_proc, axis=0)
tensor_dict['groundtruth_classes'] = tf.stack(labels_proc, axis=0)
else:
# Pads detection count to _PADDING_SIZE per frame.
tensor_dict['groundtruth_boxes'] = tf.pad(
tensor_dict['groundtruth_boxes'], [[0, 0], [0, _PADDING_SIZE], [0, 0]])
tensor_dict['groundtruth_boxes'] = tf.slice(
tensor_dict['groundtruth_boxes'], [0, 0, 0], [-1, _PADDING_SIZE, -1])
tensor_dict['groundtruth_classes'] = tf.pad(
tensor_dict['groundtruth_classes'], [[0, 0], [0, _PADDING_SIZE]])
tensor_dict['groundtruth_classes'] = tf.slice(
tensor_dict['groundtruth_classes'], [0, 0], [-1, _PADDING_SIZE])
tensor_dict['image'], _ = preprocessor.resize_image(
tensor_dict['image'], new_height=height, new_width=width)
num_steps = config.video_length / unroll_length
init_states = {
'lstm_state_c':
tf.zeros([height / 32, width / 32, lstm_config.lstm_state_depth]),
'lstm_state_h':
tf.zeros([height / 32, width / 32, lstm_config.lstm_state_depth]),
'lstm_state_step':
tf.constant(num_steps, shape=[]),
}
batch = sqss.batch_sequences_with_states(
input_key=key,
input_sequences=tensor_dict,
input_context={},
input_length=None,
initial_states=init_states,
num_unroll=unroll_length,
batch_size=batch_size,
num_threads=batch_size,
make_keys_unique=True,
capacity=batch_size * batch_size)
return _build_training_batch_dict(batch, unroll_length, batch_size)
| 10,531 | 42.341564 | 93 | py |
models | models-master/research/lstm_object_detection/inputs/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/lstm_object_detection/inputs/seq_dataset_builder_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dataset_builder."""
import os
import numpy as np
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from lstm_object_detection.inputs import seq_dataset_builder
from lstm_object_detection.protos import pipeline_pb2 as internal_pipeline_pb2
from object_detection.builders import preprocessor_builder
from object_detection.core import standard_fields as fields
from object_detection.protos import input_reader_pb2
from object_detection.protos import pipeline_pb2
from object_detection.protos import preprocessor_pb2
class DatasetBuilderTest(tf.test.TestCase):
def _create_tf_record(self):
path = os.path.join(self.get_temp_dir(), 'tfrecord')
writer = tf.python_io.TFRecordWriter(path)
image_tensor = np.random.randint(255, size=(16, 16, 3)).astype(np.uint8)
with self.test_session():
encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).eval()
sequence_example = example_pb2.SequenceExample(
context=feature_pb2.Features(
feature={
'image/format':
feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=['jpeg'.encode('utf-8')])),
'image/height':
feature_pb2.Feature(
int64_list=feature_pb2.Int64List(value=[16])),
'image/width':
feature_pb2.Feature(
int64_list=feature_pb2.Int64List(value=[16])),
}),
feature_lists=feature_pb2.FeatureLists(
feature_list={
'image/encoded':
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=[encoded_jpeg])),
]),
'image/object/bbox/xmin':
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=[0.0])),
]),
'image/object/bbox/xmax':
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=[1.0]))
]),
'image/object/bbox/ymin':
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=[0.0])),
]),
'image/object/bbox/ymax':
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=[1.0]))
]),
'image/object/class/label':
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(
int64_list=feature_pb2.Int64List(value=[2]))
]),
}))
writer.write(sequence_example.SerializeToString())
writer.close()
return path
def _get_model_configs_from_proto(self):
"""Creates a model text proto for testing.
Returns:
A dictionary of model configs.
"""
model_text_proto = """
[lstm_object_detection.protos.lstm_model] {
train_unroll_length: 4
eval_unroll_length: 4
}
model {
ssd {
feature_extractor {
type: 'lstm_mobilenet_v1_fpn'
conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
negative_class_weight: 2.0
box_coder {
faster_rcnn_box_coder {
}
}
matcher {
argmax_matcher {
}
}
similarity_calculator {
iou_similarity {
}
}
anchor_generator {
ssd_anchor_generator {
aspect_ratios: 1.0
}
}
image_resizer {
fixed_shape_resizer {
height: 32
width: 32
}
}
box_predictor {
convolutional_box_predictor {
conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
}
normalize_loc_loss_by_codesize: true
loss {
classification_loss {
weighted_softmax {
}
}
localization_loss {
weighted_smooth_l1 {
}
}
}
}
}"""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
text_format.Merge(model_text_proto, pipeline_config)
configs = {}
configs['model'] = pipeline_config.model
configs['lstm_model'] = pipeline_config.Extensions[
internal_pipeline_pb2.lstm_model]
return configs
def _get_data_augmentation_preprocessor_proto(self):
preprocessor_text_proto = """
random_horizontal_flip {
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
return preprocessor_proto
def _create_training_dict(self, tensor_dict):
image_dict = {}
all_dict = {}
all_dict['batch'] = tensor_dict.pop('batch')
for i, _ in enumerate(tensor_dict[fields.InputDataFields.image]):
for key, val in tensor_dict.items():
image_dict[key] = val[i]
image_dict[fields.InputDataFields.image] = tf.to_float(
tf.expand_dims(image_dict[fields.InputDataFields.image], 0))
suffix = str(i)
for key, val in image_dict.items():
all_dict[key + suffix] = val
return all_dict
def _get_input_proto(self, input_reader):
return """
external_input_reader {
[lstm_object_detection.protos.GoogleInputReader.google_input_reader] {
%s: {
input_path: '{0}'
data_type: TF_SEQUENCE_EXAMPLE
video_length: 4
}
}
}
""" % input_reader
def test_video_input_reader(self):
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(
self._get_input_proto('tf_record_video_input_reader'),
input_reader_proto)
configs = self._get_model_configs_from_proto()
tensor_dict = seq_dataset_builder.build(
input_reader_proto,
configs['model'],
configs['lstm_model'],
unroll_length=1)
all_dict = self._create_training_dict(tensor_dict)
self.assertEqual((1, 32, 32, 3), all_dict['image0'].shape)
self.assertEqual(4, all_dict['groundtruth_boxes0'].shape[1])
def test_build_with_data_augmentation(self):
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(
self._get_input_proto('tf_record_video_input_reader'),
input_reader_proto)
configs = self._get_model_configs_from_proto()
data_augmentation_options = [
preprocessor_builder.build(
self._get_data_augmentation_preprocessor_proto())
]
tensor_dict = seq_dataset_builder.build(
input_reader_proto,
configs['model'],
configs['lstm_model'],
unroll_length=1,
data_augmentation_options=data_augmentation_options)
all_dict = self._create_training_dict(tensor_dict)
self.assertEqual((1, 32, 32, 3), all_dict['image0'].shape)
self.assertEqual(4, all_dict['groundtruth_boxes0'].shape[1])
def test_raises_error_without_input_paths(self):
input_reader_text_proto = """
shuffle: false
num_readers: 1
load_instance_masks: true
"""
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
configs = self._get_model_configs_from_proto()
with self.assertRaises(ValueError):
_ = seq_dataset_builder.build(
input_reader_proto,
configs['model'],
configs['lstm_model'],
unroll_length=1)
if __name__ == '__main__':
tf.test.main()
| 9,180 | 31.441696 | 80 | py |
models | models-master/research/lstm_object_detection/builders/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/lstm_object_detection/builders/graph_rewriter_builder.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom version for quantized training and evaluation functions.
The main difference between this and the third_party graph_rewriter_builder.py
is that this version uses experimental_create_training_graph which allows the
customization of freeze_bn_delay.
"""
import re
import tensorflow.compat.v1 as tf
from tensorflow.contrib import layers as contrib_layers
from tensorflow.contrib import quantize as contrib_quantize
from tensorflow.contrib.quantize.python import common
from tensorflow.contrib.quantize.python import input_to_ops
from tensorflow.contrib.quantize.python import quant_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
def build(graph_rewriter_config,
quant_overrides_config=None,
is_training=True,
is_export=False):
"""Returns a function that modifies default graph based on options.
Args:
graph_rewriter_config: graph_rewriter_pb2.GraphRewriter proto.
quant_overrides_config: quant_overrides_pb2.QuantOverrides proto.
is_training: whether in training or eval mode.
is_export: whether exporting the graph.
"""
def graph_rewrite_fn():
"""Function to quantize weights and activation of the default graph."""
if (graph_rewriter_config.quantization.weight_bits != 8 or
graph_rewriter_config.quantization.activation_bits != 8):
raise ValueError('Only 8bit quantization is supported')
graph = tf.get_default_graph()
# Insert custom quant ops.
if quant_overrides_config is not None:
input_to_ops_map = input_to_ops.InputToOps(graph)
for q in quant_overrides_config.quant_configs:
producer = graph.get_operation_by_name(q.op_name)
if producer is None:
raise ValueError('Op name does not exist in graph.')
context = _get_context_from_op(producer)
consumers = input_to_ops_map.ConsumerOperations(producer)
if q.fixed_range:
_insert_fixed_quant_op(
context,
q.quant_op_name,
producer,
consumers,
init_min=q.min,
init_max=q.max,
quant_delay=q.delay if is_training else 0)
else:
raise ValueError('Learned ranges are not yet supported.')
# Quantize the graph by inserting quantize ops for weights and activations
if is_training:
contrib_quantize.experimental_create_training_graph(
input_graph=graph,
quant_delay=graph_rewriter_config.quantization.delay,
freeze_bn_delay=graph_rewriter_config.quantization.delay)
else:
contrib_quantize.experimental_create_eval_graph(
input_graph=graph,
quant_delay=graph_rewriter_config.quantization.delay
if not is_export else 0)
contrib_layers.summarize_collection('quant_vars')
return graph_rewrite_fn
def _get_context_from_op(op):
"""Gets the root context name from the op name."""
context_re = re.search(r'^(.*)/([^/]+)', op.name)
if context_re:
return context_re.group(1)
return ''
def _insert_fixed_quant_op(context,
name,
producer,
consumers,
init_min=-6.0,
init_max=6.0,
quant_delay=None):
"""Adds a fake quant op with fixed ranges.
Args:
context: The parent scope of the op to be quantized.
name: The name of the fake quant op.
producer: The producer op to be quantized.
consumers: The consumer ops to the producer op.
init_min: The minimum range for the fake quant op.
init_max: The maximum range for the fake quant op.
quant_delay: Number of steps to wait before activating the fake quant op.
Raises:
ValueError: When producer operation is not directly connected to the
consumer operation.
"""
name_prefix = name if not context else context + '/' + name
inputs = producer.outputs[0]
quant = quant_ops.FixedQuantize(
inputs, init_min=init_min, init_max=init_max, scope=name_prefix)
if quant_delay and quant_delay > 0:
activate_quant = math_ops.greater_equal(
common.CreateOrGetQuantizationStep(),
quant_delay,
name=name_prefix + '/activate_quant')
quant = control_flow_ops.cond(
activate_quant,
lambda: quant,
lambda: inputs,
name=name_prefix + '/delayed_quant')
if consumers:
tensors_modified_count = common.RerouteTensor(
quant, inputs, can_modify=consumers)
# Some operations can have multiple output tensors going to the same
# consumer. Since consumers is a set, we need to ensure that
# tensors_modified_count is greater than or equal to the length of the set
# of consumers.
if tensors_modified_count < len(consumers):
raise ValueError('No inputs quantized for ops: [%s]' % ', '.join(
[consumer.name for consumer in consumers]))
| 5,658 | 37.236486 | 80 | py |
models | models-master/research/lstm_object_detection/builders/graph_rewriter_builder_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for graph_rewriter_builder."""
import mock
import tensorflow.compat.v1 as tf
from tensorflow.contrib import layers as contrib_layers
from tensorflow.contrib import quantize as contrib_quantize
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from lstm_object_detection.builders import graph_rewriter_builder
from lstm_object_detection.protos import quant_overrides_pb2
from object_detection.protos import graph_rewriter_pb2
class QuantizationBuilderTest(tf.test.TestCase):
def testQuantizationBuilderSetsUpCorrectTrainArguments(self):
with mock.patch.object(
contrib_quantize,
'experimental_create_training_graph') as mock_quant_fn:
with mock.patch.object(contrib_layers,
'summarize_collection') as mock_summarize_col:
graph_rewriter_proto = graph_rewriter_pb2.GraphRewriter()
graph_rewriter_proto.quantization.delay = 10
graph_rewriter_proto.quantization.weight_bits = 8
graph_rewriter_proto.quantization.activation_bits = 8
graph_rewrite_fn = graph_rewriter_builder.build(
graph_rewriter_proto, is_training=True)
graph_rewrite_fn()
_, kwargs = mock_quant_fn.call_args
self.assertEqual(kwargs['input_graph'], tf.get_default_graph())
self.assertEqual(kwargs['quant_delay'], 10)
mock_summarize_col.assert_called_with('quant_vars')
def testQuantizationBuilderSetsUpCorrectEvalArguments(self):
with mock.patch.object(contrib_quantize,
'experimental_create_eval_graph') as mock_quant_fn:
with mock.patch.object(contrib_layers,
'summarize_collection') as mock_summarize_col:
graph_rewriter_proto = graph_rewriter_pb2.GraphRewriter()
graph_rewriter_proto.quantization.delay = 10
graph_rewrite_fn = graph_rewriter_builder.build(
graph_rewriter_proto, is_training=False)
graph_rewrite_fn()
_, kwargs = mock_quant_fn.call_args
self.assertEqual(kwargs['input_graph'], tf.get_default_graph())
mock_summarize_col.assert_called_with('quant_vars')
def testQuantizationBuilderAddsQuantOverride(self):
graph = ops.Graph()
with graph.as_default():
self._buildGraph()
quant_overrides_proto = quant_overrides_pb2.QuantOverrides()
quant_config = quant_overrides_proto.quant_configs.add()
quant_config.op_name = 'test_graph/add_ab'
quant_config.quant_op_name = 'act_quant'
quant_config.fixed_range = True
quant_config.min = 0
quant_config.max = 6
quant_config.delay = 100
graph_rewriter_proto = graph_rewriter_pb2.GraphRewriter()
graph_rewriter_proto.quantization.delay = 10
graph_rewriter_proto.quantization.weight_bits = 8
graph_rewriter_proto.quantization.activation_bits = 8
graph_rewrite_fn = graph_rewriter_builder.build(
graph_rewriter_proto,
quant_overrides_config=quant_overrides_proto,
is_training=True)
graph_rewrite_fn()
act_quant_found = False
quant_delay_found = False
for op in graph.get_operations():
if (quant_config.quant_op_name in op.name and
op.type == 'FakeQuantWithMinMaxArgs'):
act_quant_found = True
min_val = op.get_attr('min')
max_val = op.get_attr('max')
self.assertEqual(min_val, quant_config.min)
self.assertEqual(max_val, quant_config.max)
if ('activate_quant' in op.name and
quant_config.quant_op_name in op.name and op.type == 'Const'):
tensor = op.get_attr('value')
if tensor.int64_val[0] == quant_config.delay:
quant_delay_found = True
self.assertTrue(act_quant_found)
self.assertTrue(quant_delay_found)
def _buildGraph(self, scope='test_graph'):
with ops.name_scope(scope):
a = tf.constant(10, dtype=dtypes.float32, name='input_a')
b = tf.constant(20, dtype=dtypes.float32, name='input_b')
ab = tf.add(a, b, name='add_ab')
c = tf.constant(30, dtype=dtypes.float32, name='input_c')
abc = tf.multiply(ab, c, name='mul_ab_c')
return abc
if __name__ == '__main__':
tf.test.main()
| 4,956 | 41.008475 | 80 | py |
models | models-master/research/lfads/distributions.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import numpy as np
import tensorflow as tf
from utils import linear, log_sum_exp
class Poisson(object):
"""Poisson distributon
Computes the log probability under the model.
"""
def __init__(self, log_rates):
""" Create Poisson distributions with log_rates parameters.
Args:
log_rates: a tensor-like list of log rates underlying the Poisson dist.
"""
self.logr = log_rates
def logp(self, bin_counts):
"""Compute the log probability for the counts in the bin, under the model.
Args:
bin_counts: array-like integer counts
Returns:
The log-probability under the Poisson models for each element of
bin_counts.
"""
k = tf.to_float(bin_counts)
# log poisson(k, r) = log(r^k * e^(-r) / k!) = k log(r) - r - log k!
# log poisson(k, r=exp(x)) = k * x - exp(x) - lgamma(k + 1)
return k * self.logr - tf.exp(self.logr) - tf.lgamma(k + 1)
def diag_gaussian_log_likelihood(z, mu=0.0, logvar=0.0):
"""Log-likelihood under a Gaussian distribution with diagonal covariance.
Returns the log-likelihood for each dimension. One should sum the
results for the log-likelihood under the full multidimensional model.
Args:
z: The value to compute the log-likelihood.
mu: The mean of the Gaussian
logvar: The log variance of the Gaussian.
Returns:
The log-likelihood under the Gaussian model.
"""
return -0.5 * (logvar + np.log(2*np.pi) + \
tf.square((z-mu)/tf.exp(0.5*logvar)))
def gaussian_pos_log_likelihood(unused_mean, logvar, noise):
"""Gaussian log-likelihood function for a posterior in VAE
Note: This function is specialized for a posterior distribution, that has the
form of z = mean + sigma * noise.
Args:
unused_mean: ignore
logvar: The log variance of the distribution
noise: The noise used in the sampling of the posterior.
Returns:
The log-likelihood under the Gaussian model.
"""
# ln N(z; mean, sigma) = - ln(sigma) - 0.5 ln 2pi - noise^2 / 2
return - 0.5 * (logvar + np.log(2 * np.pi) + tf.square(noise))
class Gaussian(object):
"""Base class for Gaussian distribution classes."""
pass
class DiagonalGaussian(Gaussian):
"""Diagonal Gaussian with different constant mean and variances in each
dimension.
"""
def __init__(self, batch_size, z_size, mean, logvar):
"""Create a diagonal gaussian distribution.
Args:
batch_size: The size of the batch, i.e. 0th dim in 2D tensor of samples.
z_size: The dimension of the distribution, i.e. 1st dim in 2D tensor.
mean: The N-D mean of the distribution.
logvar: The N-D log variance of the diagonal distribution.
"""
size__xz = [None, z_size]
self.mean = mean # bxn already
self.logvar = logvar # bxn already
self.noise = noise = tf.random_normal(tf.shape(logvar))
self.sample = mean + tf.exp(0.5 * logvar) * noise
mean.set_shape(size__xz)
logvar.set_shape(size__xz)
self.sample.set_shape(size__xz)
def logp(self, z=None):
"""Compute the log-likelihood under the distribution.
Args:
z (optional): value to compute likelihood for, if None, use sample.
Returns:
The likelihood of z under the model.
"""
if z is None:
z = self.sample
# This is needed to make sure that the gradients are simple.
# The value of the function shouldn't change.
if z == self.sample:
return gaussian_pos_log_likelihood(self.mean, self.logvar, self.noise)
return diag_gaussian_log_likelihood(z, self.mean, self.logvar)
class LearnableDiagonalGaussian(Gaussian):
"""Diagonal Gaussian whose mean and variance are learned parameters."""
def __init__(self, batch_size, z_size, name, mean_init=0.0,
var_init=1.0, var_min=0.0, var_max=1000000.0):
"""Create a learnable diagonal gaussian distribution.
Args:
batch_size: The size of the batch, i.e. 0th dim in 2D tensor of samples.
z_size: The dimension of the distribution, i.e. 1st dim in 2D tensor.
name: prefix name for the mean and log TF variables.
mean_init (optional): The N-D mean initialization of the distribution.
var_init (optional): The N-D variance initialization of the diagonal
distribution.
var_min (optional): The minimum value the learned variance can take in any
dimension.
var_max (optional): The maximum value the learned variance can take in any
dimension.
"""
size_1xn = [1, z_size]
size__xn = [None, z_size]
size_bx1 = tf.stack([batch_size, 1])
assert var_init > 0.0, "Problems"
assert var_max >= var_min, "Problems"
assert var_init >= var_min, "Problems"
assert var_max >= var_init, "Problems"
z_mean_1xn = tf.get_variable(name=name+"/mean", shape=size_1xn,
initializer=tf.constant_initializer(mean_init))
self.mean_bxn = mean_bxn = tf.tile(z_mean_1xn, size_bx1)
mean_bxn.set_shape(size__xn) # tile loses shape
log_var_init = np.log(var_init)
if var_max > var_min:
var_is_trainable = True
else:
var_is_trainable = False
z_logvar_1xn = \
tf.get_variable(name=(name+"/logvar"), shape=size_1xn,
initializer=tf.constant_initializer(log_var_init),
trainable=var_is_trainable)
if var_is_trainable:
z_logit_var_1xn = tf.exp(z_logvar_1xn)
z_var_1xn = tf.nn.sigmoid(z_logit_var_1xn)*(var_max-var_min) + var_min
z_logvar_1xn = tf.log(z_var_1xn)
logvar_bxn = tf.tile(z_logvar_1xn, size_bx1)
self.logvar_bxn = logvar_bxn
self.noise_bxn = noise_bxn = tf.random_normal(tf.shape(logvar_bxn))
self.sample_bxn = mean_bxn + tf.exp(0.5 * logvar_bxn) * noise_bxn
def logp(self, z=None):
"""Compute the log-likelihood under the distribution.
Args:
z (optional): value to compute likelihood for, if None, use sample.
Returns:
The likelihood of z under the model.
"""
if z is None:
z = self.sample
# This is needed to make sure that the gradients are simple.
# The value of the function shouldn't change.
if z == self.sample_bxn:
return gaussian_pos_log_likelihood(self.mean_bxn, self.logvar_bxn,
self.noise_bxn)
return diag_gaussian_log_likelihood(z, self.mean_bxn, self.logvar_bxn)
@property
def mean(self):
return self.mean_bxn
@property
def logvar(self):
return self.logvar_bxn
@property
def sample(self):
return self.sample_bxn
class DiagonalGaussianFromInput(Gaussian):
"""Diagonal Gaussian whose mean and variance are conditioned on other
variables.
Note: the parameters to convert from input to the learned mean and log
variance are held in this class.
"""
def __init__(self, x_bxu, z_size, name, var_min=0.0):
"""Create an input dependent diagonal Gaussian distribution.
Args:
x: The input tensor from which the mean and variance are computed,
via a linear transformation of x. I.e.
mu = Wx + b, log(var) = Mx + c
z_size: The size of the distribution.
name: The name to prefix to learned variables.
var_min (optional): Minimal variance allowed. This is an additional
way to control the amount of information getting through the stochastic
layer.
"""
size_bxn = tf.stack([tf.shape(x_bxu)[0], z_size])
self.mean_bxn = mean_bxn = linear(x_bxu, z_size, name=(name+"/mean"))
logvar_bxn = linear(x_bxu, z_size, name=(name+"/logvar"))
if var_min > 0.0:
logvar_bxn = tf.log(tf.exp(logvar_bxn) + var_min)
self.logvar_bxn = logvar_bxn
self.noise_bxn = noise_bxn = tf.random_normal(size_bxn)
self.noise_bxn.set_shape([None, z_size])
self.sample_bxn = mean_bxn + tf.exp(0.5 * logvar_bxn) * noise_bxn
def logp(self, z=None):
"""Compute the log-likelihood under the distribution.
Args:
z (optional): value to compute likelihood for, if None, use sample.
Returns:
The likelihood of z under the model.
"""
if z is None:
z = self.sample
# This is needed to make sure that the gradients are simple.
# The value of the function shouldn't change.
if z == self.sample_bxn:
return gaussian_pos_log_likelihood(self.mean_bxn,
self.logvar_bxn, self.noise_bxn)
return diag_gaussian_log_likelihood(z, self.mean_bxn, self.logvar_bxn)
@property
def mean(self):
return self.mean_bxn
@property
def logvar(self):
return self.logvar_bxn
@property
def sample(self):
return self.sample_bxn
class GaussianProcess:
"""Base class for Gaussian processes."""
pass
class LearnableAutoRegressive1Prior(GaussianProcess):
"""AR(1) model where autocorrelation and process variance are learned
parameters. Assumed zero mean.
"""
def __init__(self, batch_size, z_size,
autocorrelation_taus, noise_variances,
do_train_prior_ar_atau, do_train_prior_ar_nvar,
num_steps, name):
"""Create a learnable autoregressive (1) process.
Args:
batch_size: The size of the batch, i.e. 0th dim in 2D tensor of samples.
z_size: The dimension of the distribution, i.e. 1st dim in 2D tensor.
autocorrelation_taus: The auto correlation time constant of the AR(1)
process.
A value of 0 is uncorrelated gaussian noise.
noise_variances: The variance of the additive noise, *not* the process
variance.
do_train_prior_ar_atau: Train or leave as constant, the autocorrelation?
do_train_prior_ar_nvar: Train or leave as constant, the noise variance?
num_steps: Number of steps to run the process.
name: The name to prefix to learned TF variables.
"""
# Note the use of the plural in all of these quantities. This is intended
# to mark that even though a sample z_t from the posterior is thought of a
# single sample of a multidimensional gaussian, the prior is actually
# thought of as U AR(1) processes, where U is the dimension of the inferred
# input.
size_bx1 = tf.stack([batch_size, 1])
size__xu = [None, z_size]
# process variance, the variance at time t over all instantiations of AR(1)
# with these parameters.
log_evar_inits_1xu = tf.expand_dims(tf.log(noise_variances), 0)
self.logevars_1xu = logevars_1xu = \
tf.Variable(log_evar_inits_1xu, name=name+"/logevars", dtype=tf.float32,
trainable=do_train_prior_ar_nvar)
self.logevars_bxu = logevars_bxu = tf.tile(logevars_1xu, size_bx1)
logevars_bxu.set_shape(size__xu) # tile loses shape
# \tau, which is the autocorrelation time constant of the AR(1) process
log_atau_inits_1xu = tf.expand_dims(tf.log(autocorrelation_taus), 0)
self.logataus_1xu = logataus_1xu = \
tf.Variable(log_atau_inits_1xu, name=name+"/logatau", dtype=tf.float32,
trainable=do_train_prior_ar_atau)
# phi in x_t = \mu + phi x_tm1 + \eps
# phi = exp(-1/tau)
# phi = exp(-1/exp(logtau))
# phi = exp(-exp(-logtau))
phis_1xu = tf.exp(-tf.exp(-logataus_1xu))
self.phis_bxu = phis_bxu = tf.tile(phis_1xu, size_bx1)
phis_bxu.set_shape(size__xu)
# process noise
# pvar = evar / (1- phi^2)
# logpvar = log ( exp(logevar) / (1 - phi^2) )
# logpvar = logevar - log(1-phi^2)
# logpvar = logevar - (log(1-phi) + log(1+phi))
self.logpvars_1xu = \
logevars_1xu - tf.log(1.0-phis_1xu) - tf.log(1.0+phis_1xu)
self.logpvars_bxu = logpvars_bxu = tf.tile(self.logpvars_1xu, size_bx1)
logpvars_bxu.set_shape(size__xu)
# process mean (zero but included in for completeness)
self.pmeans_bxu = pmeans_bxu = tf.zeros_like(phis_bxu)
# For sampling from the prior during de-novo generation.
self.means_t = means_t = [None] * num_steps
self.logvars_t = logvars_t = [None] * num_steps
self.samples_t = samples_t = [None] * num_steps
self.gaussians_t = gaussians_t = [None] * num_steps
sample_bxu = tf.zeros_like(phis_bxu)
for t in range(num_steps):
# process variance used here to make process completely stationary
if t == 0:
logvar_pt_bxu = self.logpvars_bxu
else:
logvar_pt_bxu = self.logevars_bxu
z_mean_pt_bxu = pmeans_bxu + phis_bxu * sample_bxu
gaussians_t[t] = DiagonalGaussian(batch_size, z_size,
mean=z_mean_pt_bxu,
logvar=logvar_pt_bxu)
sample_bxu = gaussians_t[t].sample
samples_t[t] = sample_bxu
logvars_t[t] = logvar_pt_bxu
means_t[t] = z_mean_pt_bxu
def logp_t(self, z_t_bxu, z_tm1_bxu=None):
"""Compute the log-likelihood under the distribution for a given time t,
not the whole sequence.
Args:
z_t_bxu: sample to compute likelihood for at time t.
z_tm1_bxu (optional): sample condition probability of z_t upon.
Returns:
The likelihood of p_t under the model at time t. i.e.
p(z_t|z_tm1_bxu) = N(z_tm1_bxu * phis, eps^2)
"""
if z_tm1_bxu is None:
return diag_gaussian_log_likelihood(z_t_bxu, self.pmeans_bxu,
self.logpvars_bxu)
else:
means_t_bxu = self.pmeans_bxu + self.phis_bxu * z_tm1_bxu
logp_tgtm1_bxu = diag_gaussian_log_likelihood(z_t_bxu,
means_t_bxu,
self.logevars_bxu)
return logp_tgtm1_bxu
class KLCost_GaussianGaussian(object):
"""log p(x|z) + KL(q||p) terms for Gaussian posterior and Gaussian prior. See
eqn 10 and Appendix B in VAE for latter term,
http://arxiv.org/abs/1312.6114
The log p(x|z) term is the reconstruction error under the model.
The KL term represents the penalty for passing information from the encoder
to the decoder.
To sample KL(q||p), we simply sample
ln q - ln p
by drawing samples from q and averaging.
"""
def __init__(self, zs, prior_zs):
"""Create a lower bound in three parts, normalized reconstruction
cost, normalized KL divergence cost, and their sum.
E_q[ln p(z_i | z_{i+1}) / q(z_i | x)
\int q(z) ln p(z) dz = - 0.5 ln(2pi) - 0.5 \sum (ln(sigma_p^2) + \
sigma_q^2 / sigma_p^2 + (mean_p - mean_q)^2 / sigma_p^2)
\int q(z) ln q(z) dz = - 0.5 ln(2pi) - 0.5 \sum (ln(sigma_q^2) + 1)
Args:
zs: posterior z ~ q(z|x)
prior_zs: prior zs
"""
# L = -KL + log p(x|z), to maximize bound on likelihood
# -L = KL - log p(x|z), to minimize bound on NLL
# so 'KL cost' is postive KL divergence
kl_b = 0.0
for z, prior_z in zip(zs, prior_zs):
assert isinstance(z, Gaussian)
assert isinstance(prior_z, Gaussian)
# ln(2pi) terms cancel
kl_b += 0.5 * tf.reduce_sum(
prior_z.logvar - z.logvar
+ tf.exp(z.logvar - prior_z.logvar)
+ tf.square((z.mean - prior_z.mean) / tf.exp(0.5 * prior_z.logvar))
- 1.0, [1])
self.kl_cost_b = kl_b
self.kl_cost = tf.reduce_mean(kl_b)
class KLCost_GaussianGaussianProcessSampled(object):
""" log p(x|z) + KL(q||p) terms for Gaussian posterior and Gaussian process
prior via sampling.
The log p(x|z) term is the reconstruction error under the model.
The KL term represents the penalty for passing information from the encoder
to the decoder.
To sample KL(q||p), we simply sample
ln q - ln p
by drawing samples from q and averaging.
"""
def __init__(self, post_zs, prior_z_process):
"""Create a lower bound in three parts, normalized reconstruction
cost, normalized KL divergence cost, and their sum.
Args:
post_zs: posterior z ~ q(z|x)
prior_z_process: prior AR(1) process
"""
assert len(post_zs) > 1, "GP is for time, need more than 1 time step."
assert isinstance(prior_z_process, GaussianProcess), "Must use GP."
# L = -KL + log p(x|z), to maximize bound on likelihood
# -L = KL - log p(x|z), to minimize bound on NLL
# so 'KL cost' is postive KL divergence
z0_bxu = post_zs[0].sample
logq_bxu = post_zs[0].logp(z0_bxu)
logp_bxu = prior_z_process.logp_t(z0_bxu)
z_tm1_bxu = z0_bxu
for z_t in post_zs[1:]:
# posterior is independent in time, prior is not
z_t_bxu = z_t.sample
logq_bxu += z_t.logp(z_t_bxu)
logp_bxu += prior_z_process.logp_t(z_t_bxu, z_tm1_bxu)
z_tm1_bxu = z_t_bxu
kl_bxu = logq_bxu - logp_bxu
kl_b = tf.reduce_sum(kl_bxu, [1])
self.kl_cost_b = kl_b
self.kl_cost = tf.reduce_mean(kl_b)
| 17,394 | 34.212551 | 80 | py |
models | models-master/research/lfads/run_lfads.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from lfads import LFADS
import numpy as np
import os
import tensorflow as tf
import re
import utils
import sys
MAX_INT = sys.maxsize
# Lots of hyperparameters, but most are pretty insensitive. The
# explanation of these hyperparameters is found below, in the flags
# session.
CHECKPOINT_PB_LOAD_NAME = "checkpoint"
CHECKPOINT_NAME = "lfads_vae"
CSV_LOG = "fitlog"
OUTPUT_FILENAME_STEM = ""
DEVICE = "gpu:0" # "cpu:0", or other gpus, e.g. "gpu:1"
MAX_CKPT_TO_KEEP = 5
MAX_CKPT_TO_KEEP_LVE = 5
PS_NEXAMPLES_TO_PROCESS = MAX_INT # if larger than number of examples, process all
EXT_INPUT_DIM = 0
IC_DIM = 64
FACTORS_DIM = 50
IC_ENC_DIM = 128
GEN_DIM = 200
GEN_CELL_INPUT_WEIGHT_SCALE = 1.0
GEN_CELL_REC_WEIGHT_SCALE = 1.0
CELL_WEIGHT_SCALE = 1.0
BATCH_SIZE = 128
LEARNING_RATE_INIT = 0.01
LEARNING_RATE_DECAY_FACTOR = 0.95
LEARNING_RATE_STOP = 0.00001
LEARNING_RATE_N_TO_COMPARE = 6
INJECT_EXT_INPUT_TO_GEN = False
DO_TRAIN_IO_ONLY = False
DO_TRAIN_ENCODER_ONLY = False
DO_RESET_LEARNING_RATE = False
FEEDBACK_FACTORS_OR_RATES = "factors"
DO_TRAIN_READIN = True
# Calibrated just above the average value for the rnn synthetic data.
MAX_GRAD_NORM = 200.0
CELL_CLIP_VALUE = 5.0
KEEP_PROB = 0.95
TEMPORAL_SPIKE_JITTER_WIDTH = 0
OUTPUT_DISTRIBUTION = 'poisson' # 'poisson' or 'gaussian'
NUM_STEPS_FOR_GEN_IC = MAX_INT # set to num_steps if greater than num_steps
DATA_DIR = "/tmp/rnn_synth_data_v1.0/"
DATA_FILENAME_STEM = "chaotic_rnn_inputs_g1p5"
LFADS_SAVE_DIR = "/tmp/lfads_chaotic_rnn_inputs_g1p5/"
CO_DIM = 1
DO_CAUSAL_CONTROLLER = False
DO_FEED_FACTORS_TO_CONTROLLER = True
CONTROLLER_INPUT_LAG = 1
PRIOR_AR_AUTOCORRELATION = 10.0
PRIOR_AR_PROCESS_VAR = 0.1
DO_TRAIN_PRIOR_AR_ATAU = True
DO_TRAIN_PRIOR_AR_NVAR = True
CI_ENC_DIM = 128
CON_DIM = 128
CO_PRIOR_VAR_SCALE = 0.1
KL_INCREASE_STEPS = 2000
L2_INCREASE_STEPS = 2000
L2_GEN_SCALE = 2000.0
L2_CON_SCALE = 0.0
# scale of regularizer on time correlation of inferred inputs
CO_MEAN_CORR_SCALE = 0.0
KL_IC_WEIGHT = 1.0
KL_CO_WEIGHT = 1.0
KL_START_STEP = 0
L2_START_STEP = 0
IC_PRIOR_VAR_MIN = 0.1
IC_PRIOR_VAR_SCALE = 0.1
IC_PRIOR_VAR_MAX = 0.1
IC_POST_VAR_MIN = 0.0001 # protection from KL blowing up
flags = tf.app.flags
flags.DEFINE_string("kind", "train",
"Type of model to build {train, \
posterior_sample_and_average, \
posterior_push_mean, \
prior_sample, write_model_params")
flags.DEFINE_string("output_dist", OUTPUT_DISTRIBUTION,
"Type of output distribution, 'poisson' or 'gaussian'")
flags.DEFINE_boolean("allow_gpu_growth", False,
"If true, only allocate amount of memory needed for \
Session. Otherwise, use full GPU memory.")
# DATA
flags.DEFINE_string("data_dir", DATA_DIR, "Data for training")
flags.DEFINE_string("data_filename_stem", DATA_FILENAME_STEM,
"Filename stem for data dictionaries.")
flags.DEFINE_string("lfads_save_dir", LFADS_SAVE_DIR, "model save dir")
flags.DEFINE_string("checkpoint_pb_load_name", CHECKPOINT_PB_LOAD_NAME,
"Name of checkpoint files, use 'checkpoint_lve' for best \
error")
flags.DEFINE_string("checkpoint_name", CHECKPOINT_NAME,
"Name of checkpoint files (.ckpt appended)")
flags.DEFINE_string("output_filename_stem", OUTPUT_FILENAME_STEM,
"Name of output file (postfix will be added)")
flags.DEFINE_string("device", DEVICE,
"Which device to use (default: \"gpu:0\", can also be \
\"cpu:0\", \"gpu:1\", etc)")
flags.DEFINE_string("csv_log", CSV_LOG,
"Name of file to keep running log of fit likelihoods, \
etc (.csv appended)")
flags.DEFINE_integer("max_ckpt_to_keep", MAX_CKPT_TO_KEEP,
"Max # of checkpoints to keep (rolling)")
flags.DEFINE_integer("ps_nexamples_to_process", PS_NEXAMPLES_TO_PROCESS,
"Number of examples to process for posterior sample and \
average (not number of samples to average over).")
flags.DEFINE_integer("max_ckpt_to_keep_lve", MAX_CKPT_TO_KEEP_LVE,
"Max # of checkpoints to keep for lowest validation error \
models (rolling)")
flags.DEFINE_integer("ext_input_dim", EXT_INPUT_DIM, "Dimension of external \
inputs")
flags.DEFINE_integer("num_steps_for_gen_ic", NUM_STEPS_FOR_GEN_IC,
"Number of steps to train the generator initial conditon.")
# If there are observed inputs, there are two ways to add that observed
# input to the model. The first is by treating as something to be
# inferred, and thus encoding the observed input via the encoders, and then
# input to the generator via the "inferred inputs" channel. Second, one
# can input the input directly into the generator. This has the downside
# of making the generation process strictly dependent on knowing the
# observed input for any generated trial.
flags.DEFINE_boolean("inject_ext_input_to_gen",
INJECT_EXT_INPUT_TO_GEN,
"Should observed inputs be input to model via encoders, \
or injected directly into generator?")
# CELL
# The combined recurrent and input weights of the encoder and
# controller cells are by default set to scale at ws/sqrt(#inputs),
# with ws=1.0. You can change this scaling with this parameter.
flags.DEFINE_float("cell_weight_scale", CELL_WEIGHT_SCALE,
"Input scaling for input weights in generator.")
# GENERATION
# Note that the dimension of the initial conditions is separated from the
# dimensions of the generator initial conditions (and a linear matrix will
# adapt the shapes if necessary). This is just another way to control
# complexity. In all likelihood, setting the ic dims to the size of the
# generator hidden state is just fine.
flags.DEFINE_integer("ic_dim", IC_DIM, "Dimension of h0")
# Setting the dimensions of the factors to something smaller than the data
# dimension is a way to get a reduced dimensionality representation of your
# data.
flags.DEFINE_integer("factors_dim", FACTORS_DIM,
"Number of factors from generator")
flags.DEFINE_integer("ic_enc_dim", IC_ENC_DIM,
"Cell hidden size, encoder of h0")
# Controlling the size of the generator is one way to control complexity of
# the dynamics (there is also l2, which will squeeze out unnecessary
# dynamics also). The modern deep learning approach is to make these cells
# as large as tolerable (from a waiting perspective), and then regularize
# them to death with drop out or whatever. I don't know if this is correct
# for the LFADS application or not.
flags.DEFINE_integer("gen_dim", GEN_DIM,
"Cell hidden size, generator.")
# The weights of the generator cell by default set to scale at
# ws/sqrt(#inputs), with ws=1.0. You can change ws for
# the input weights or the recurrent weights with these hyperparameters.
flags.DEFINE_float("gen_cell_input_weight_scale", GEN_CELL_INPUT_WEIGHT_SCALE,
"Input scaling for input weights in generator.")
flags.DEFINE_float("gen_cell_rec_weight_scale", GEN_CELL_REC_WEIGHT_SCALE,
"Input scaling for rec weights in generator.")
# KL DISTRIBUTIONS
# If you don't know what you are donig here, please leave alone, the
# defaults should be fine for most cases, irregardless of other parameters.
#
# If you don't want the prior variance to be learned, set the
# following values to the same thing: ic_prior_var_min,
# ic_prior_var_scale, ic_prior_var_max. The prior mean will be
# learned regardless.
flags.DEFINE_float("ic_prior_var_min", IC_PRIOR_VAR_MIN,
"Minimum variance in posterior h0 codes.")
flags.DEFINE_float("ic_prior_var_scale", IC_PRIOR_VAR_SCALE,
"Variance of ic prior distribution")
flags.DEFINE_float("ic_prior_var_max", IC_PRIOR_VAR_MAX,
"Maximum variance of IC prior distribution.")
# If you really want to limit the information from encoder to decoder,
# Increase ic_post_var_min above 0.0.
flags.DEFINE_float("ic_post_var_min", IC_POST_VAR_MIN,
"Minimum variance of IC posterior distribution.")
flags.DEFINE_float("co_prior_var_scale", CO_PRIOR_VAR_SCALE,
"Variance of control input prior distribution.")
flags.DEFINE_float("prior_ar_atau", PRIOR_AR_AUTOCORRELATION,
"Initial autocorrelation of AR(1) priors.")
flags.DEFINE_float("prior_ar_nvar", PRIOR_AR_PROCESS_VAR,
"Initial noise variance for AR(1) priors.")
flags.DEFINE_boolean("do_train_prior_ar_atau", DO_TRAIN_PRIOR_AR_ATAU,
"Is the value for atau an init, or the constant value?")
flags.DEFINE_boolean("do_train_prior_ar_nvar", DO_TRAIN_PRIOR_AR_NVAR,
"Is the value for noise variance an init, or the constant \
value?")
# CONTROLLER
# This parameter critically controls whether or not there is a controller
# (along with controller encoders placed into the LFADS graph. If CO_DIM >
# 1, that means there is a 1 dimensional controller outputs, if equal to 0,
# then no controller.
flags.DEFINE_integer("co_dim", CO_DIM,
"Number of control net outputs (>0 builds that graph).")
# The controller will be more powerful if it can see the encoding of the entire
# trial. However, this allows the controller to create inferred inputs that are
# acausal with respect to the actual data generation process. E.g. the data
# generator could have an input at time t, but the controller, after seeing the
# entirety of the trial could infer that the input is coming a little before
# time t, because there are no restrictions on the data the controller sees.
# One can force the controller to be causal (with respect to perturbations in
# the data generator) so that it only sees forward encodings of the data at time
# t that originate at times before or at time t. One can also control the data
# the controller sees by using an input lag (forward encoding at time [t-tlag]
# for controller input at time t. The same can be done in the reverse direction
# (controller input at time t from reverse encoding at time [t+tlag], in the
# case of an acausal controller). Setting this lag > 0 (even lag=1) can be a
# powerful way of avoiding very spiky decodes. Finally, one can manually control
# whether the factors at time t-1 are fed to the controller at time t.
#
# If you don't care about any of this, and just want to smooth your data, set
# do_causal_controller = False
# do_feed_factors_to_controller = True
# causal_input_lag = 0
flags.DEFINE_boolean("do_causal_controller",
DO_CAUSAL_CONTROLLER,
"Restrict the controller create only causal inferred \
inputs?")
# Strictly speaking, feeding either the factors or the rates to the controller
# violates causality, since the g0 gets to see all the data. This may or may not
# be only a theoretical concern.
flags.DEFINE_boolean("do_feed_factors_to_controller",
DO_FEED_FACTORS_TO_CONTROLLER,
"Should factors[t-1] be input to controller at time t?")
flags.DEFINE_string("feedback_factors_or_rates", FEEDBACK_FACTORS_OR_RATES,
"Feedback the factors or the rates to the controller? \
Acceptable values: 'factors' or 'rates'.")
flags.DEFINE_integer("controller_input_lag", CONTROLLER_INPUT_LAG,
"Time lag on the encoding to controller t-lag for \
forward, t+lag for reverse.")
flags.DEFINE_integer("ci_enc_dim", CI_ENC_DIM,
"Cell hidden size, encoder of control inputs")
flags.DEFINE_integer("con_dim", CON_DIM,
"Cell hidden size, controller")
# OPTIMIZATION
flags.DEFINE_integer("batch_size", BATCH_SIZE,
"Batch size to use during training.")
flags.DEFINE_float("learning_rate_init", LEARNING_RATE_INIT,
"Learning rate initial value")
flags.DEFINE_float("learning_rate_decay_factor", LEARNING_RATE_DECAY_FACTOR,
"Learning rate decay, decay by this fraction every so \
often.")
flags.DEFINE_float("learning_rate_stop", LEARNING_RATE_STOP,
"The lr is adaptively reduced, stop training at this value.")
# Rather put the learning rate on an exponentially decreasiong schedule,
# the current algorithm pays attention to the learning rate, and if it
# isn't regularly decreasing, it will decrease the learning rate. So far,
# it works fine, though it is not perfect.
flags.DEFINE_integer("learning_rate_n_to_compare", LEARNING_RATE_N_TO_COMPARE,
"Number of previous costs current cost has to be worse \
than, to lower learning rate.")
# This sets a value, above which, the gradients will be clipped. This hp
# is extremely useful to avoid an infrequent, but highly pathological
# problem whereby the gradient is so large that it destroys the
# optimziation by setting parameters too large, leading to a vicious cycle
# that ends in NaNs. If it's too large, it's useless, if it's too small,
# it essentially becomes the learning rate. It's pretty insensitive, though.
flags.DEFINE_float("max_grad_norm", MAX_GRAD_NORM,
"Max norm of gradient before clipping.")
# If your optimizations start "NaN-ing out", reduce this value so that
# the values of the network don't grow out of control. Typically, once
# this parameter is set to a reasonable value, one stops having numerical
# problems.
flags.DEFINE_float("cell_clip_value", CELL_CLIP_VALUE,
"Max value recurrent cell can take before being clipped.")
# This flag is used for an experiment where one sees if training a model with
# many days data can be used to learn the dynamics from a held-out days data.
# If you don't care about that particular experiment, this flag should always be
# false.
flags.DEFINE_boolean("do_train_io_only", DO_TRAIN_IO_ONLY,
"Train only the input (readin) and output (readout) \
affine functions.")
# This flag is used for an experiment where one wants to know if the dynamics
# learned by the generator generalize across conditions. In that case, you might
# train up a model on one set of data, and then only further train the encoder
# on another set of data (the conditions to be tested) so that the model is
# forced to use the same dynamics to describe that data. If you don't care about
# that particular experiment, this flag should always be false.
flags.DEFINE_boolean("do_train_encoder_only", DO_TRAIN_ENCODER_ONLY,
"Train only the encoder weights.")
flags.DEFINE_boolean("do_reset_learning_rate", DO_RESET_LEARNING_RATE,
"Reset the learning rate to initial value.")
# for multi-session "stitching" models, the per-session readin matrices map from
# neurons to input factors which are fed into the shared encoder. These are
# initialized by alignment_matrix_cxf and alignment_bias_c in the input .h5
# files. They can be fixed or made trainable.
flags.DEFINE_boolean("do_train_readin", DO_TRAIN_READIN, "Whether to train the \
readin matrices and bias vectors. False leaves them fixed \
at their initial values specified by the alignment \
matrices and vectors.")
# OVERFITTING
# Dropout is done on the input data, on controller inputs (from
# encoder), on outputs from generator to factors.
flags.DEFINE_float("keep_prob", KEEP_PROB, "Dropout keep probability.")
# It appears that the system will happily fit spikes (blessing or
# curse, depending). You may not want this. Jittering the spikes a
# bit will help (-/+ bin size, as specified here).
flags.DEFINE_integer("temporal_spike_jitter_width",
TEMPORAL_SPIKE_JITTER_WIDTH,
"Shuffle spikes around this window.")
# General note about helping ascribe controller inputs vs dynamics:
#
# If controller is heavily penalized, then it won't have any output.
# If dynamics are heavily penalized, then generator won't make
# dynamics. Note this l2 penalty is only on the recurrent portion of
# the RNNs, as dropout is also available, penalizing the feed-forward
# connections.
flags.DEFINE_float("l2_gen_scale", L2_GEN_SCALE,
"L2 regularization cost for the generator only.")
flags.DEFINE_float("l2_con_scale", L2_CON_SCALE,
"L2 regularization cost for the controller only.")
flags.DEFINE_float("co_mean_corr_scale", CO_MEAN_CORR_SCALE,
"Cost of correlation (thru time)in the means of \
controller output.")
# UNDERFITTING
# If the primary task of LFADS is "filtering" of data and not
# generation, then it is possible that the KL penalty is too strong.
# Empirically, we have found this to be the case. So we add a
# hyperparameter in front of the the two KL terms (one for the initial
# conditions to the generator, the other for the controller outputs).
# You should always think of the the default values as 1.0, and that
# leads to a standard VAE formulation whereby the numbers that are
# optimized are a lower-bound on the log-likelihood of the data. When
# these 2 HPs deviate from 1.0, one cannot make any statement about
# what those LL lower bounds mean anymore, and they cannot be compared
# (AFAIK).
flags.DEFINE_float("kl_ic_weight", KL_IC_WEIGHT,
"Strength of KL weight on initial conditions KL penatly.")
flags.DEFINE_float("kl_co_weight", KL_CO_WEIGHT,
"Strength of KL weight on controller output KL penalty.")
# Sometimes the task can be sufficiently hard to learn that the
# optimizer takes the 'easy route', and simply minimizes the KL
# divergence, setting it to near zero, and the optimization gets
# stuck. These two parameters will help avoid that by by getting the
# optimization to 'latch' on to the main optimization, and only
# turning in the regularizers later.
flags.DEFINE_integer("kl_start_step", KL_START_STEP,
"Start increasing weight after this many steps.")
# training passes, not epochs, increase by 0.5 every kl_increase_steps
flags.DEFINE_integer("kl_increase_steps", KL_INCREASE_STEPS,
"Increase weight of kl cost to avoid local minimum.")
# Same story for l2 regularizer. One wants a simple generator, for scientific
# reasons, but not at the expense of hosing the optimization.
flags.DEFINE_integer("l2_start_step", L2_START_STEP,
"Start increasing l2 weight after this many steps.")
flags.DEFINE_integer("l2_increase_steps", L2_INCREASE_STEPS,
"Increase weight of l2 cost to avoid local minimum.")
FLAGS = flags.FLAGS
def build_model(hps, kind="train", datasets=None):
"""Builds a model from either random initialization, or saved parameters.
Args:
hps: The hyper parameters for the model.
kind: (optional) The kind of model to build. Training vs inference require
different graphs.
datasets: The datasets structure (see top of lfads.py).
Returns:
an LFADS model.
"""
build_kind = kind
if build_kind == "write_model_params":
build_kind = "train"
with tf.variable_scope("LFADS", reuse=None):
model = LFADS(hps, kind=build_kind, datasets=datasets)
if not os.path.exists(hps.lfads_save_dir):
print("Save directory %s does not exist, creating it." % hps.lfads_save_dir)
os.makedirs(hps.lfads_save_dir)
cp_pb_ln = hps.checkpoint_pb_load_name
cp_pb_ln = 'checkpoint' if cp_pb_ln == "" else cp_pb_ln
if cp_pb_ln == 'checkpoint':
print("Loading latest training checkpoint in: ", hps.lfads_save_dir)
saver = model.seso_saver
elif cp_pb_ln == 'checkpoint_lve':
print("Loading lowest validation checkpoint in: ", hps.lfads_save_dir)
saver = model.lve_saver
else:
print("Loading checkpoint: ", cp_pb_ln, ", in: ", hps.lfads_save_dir)
saver = model.seso_saver
ckpt = tf.train.get_checkpoint_state(hps.lfads_save_dir,
latest_filename=cp_pb_ln)
session = tf.get_default_session()
print("ckpt: ", ckpt)
if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
print("Reading model parameters from %s" % ckpt.model_checkpoint_path)
saver.restore(session, ckpt.model_checkpoint_path)
else:
print("Created model with fresh parameters.")
if kind in ["posterior_sample_and_average", "posterior_push_mean",
"prior_sample", "write_model_params"]:
print("Possible error!!! You are running ", kind, " on a newly \
initialized model!")
# cannot print ckpt.model_check_point path if no ckpt
print("Are you sure you sure a checkpoint in ", hps.lfads_save_dir,
" exists?")
tf.global_variables_initializer().run()
if ckpt:
train_step_str = re.search('-[0-9]+$', ckpt.model_checkpoint_path).group()
else:
train_step_str = '-0'
fname = 'hyperparameters' + train_step_str + '.txt'
hp_fname = os.path.join(hps.lfads_save_dir, fname)
hps_for_saving = jsonify_dict(hps)
utils.write_data(hp_fname, hps_for_saving, use_json=True)
return model
def jsonify_dict(d):
"""Turns python booleans into strings so hps dict can be written in json.
Creates a shallow-copied dictionary first, then accomplishes string
conversion.
Args:
d: hyperparameter dictionary
Returns: hyperparameter dictionary with bool's as strings
"""
d2 = d.copy() # shallow copy is fine by assumption of d being shallow
def jsonify_bool(boolean_value):
if boolean_value:
return "true"
else:
return "false"
for key in d2.keys():
if isinstance(d2[key], bool):
d2[key] = jsonify_bool(d2[key])
return d2
def build_hyperparameter_dict(flags):
"""Simple script for saving hyper parameters. Under the hood the
flags structure isn't a dictionary, so it has to be simplified since we
want to be able to view file as text.
Args:
flags: From tf.app.flags
Returns:
dictionary of hyper parameters (ignoring other flag types).
"""
d = {}
# Data
d['output_dist'] = flags.output_dist
d['data_dir'] = flags.data_dir
d['lfads_save_dir'] = flags.lfads_save_dir
d['checkpoint_pb_load_name'] = flags.checkpoint_pb_load_name
d['checkpoint_name'] = flags.checkpoint_name
d['output_filename_stem'] = flags.output_filename_stem
d['max_ckpt_to_keep'] = flags.max_ckpt_to_keep
d['max_ckpt_to_keep_lve'] = flags.max_ckpt_to_keep_lve
d['ps_nexamples_to_process'] = flags.ps_nexamples_to_process
d['ext_input_dim'] = flags.ext_input_dim
d['data_filename_stem'] = flags.data_filename_stem
d['device'] = flags.device
d['csv_log'] = flags.csv_log
d['num_steps_for_gen_ic'] = flags.num_steps_for_gen_ic
d['inject_ext_input_to_gen'] = flags.inject_ext_input_to_gen
# Cell
d['cell_weight_scale'] = flags.cell_weight_scale
# Generation
d['ic_dim'] = flags.ic_dim
d['factors_dim'] = flags.factors_dim
d['ic_enc_dim'] = flags.ic_enc_dim
d['gen_dim'] = flags.gen_dim
d['gen_cell_input_weight_scale'] = flags.gen_cell_input_weight_scale
d['gen_cell_rec_weight_scale'] = flags.gen_cell_rec_weight_scale
# KL distributions
d['ic_prior_var_min'] = flags.ic_prior_var_min
d['ic_prior_var_scale'] = flags.ic_prior_var_scale
d['ic_prior_var_max'] = flags.ic_prior_var_max
d['ic_post_var_min'] = flags.ic_post_var_min
d['co_prior_var_scale'] = flags.co_prior_var_scale
d['prior_ar_atau'] = flags.prior_ar_atau
d['prior_ar_nvar'] = flags.prior_ar_nvar
d['do_train_prior_ar_atau'] = flags.do_train_prior_ar_atau
d['do_train_prior_ar_nvar'] = flags.do_train_prior_ar_nvar
# Controller
d['do_causal_controller'] = flags.do_causal_controller
d['controller_input_lag'] = flags.controller_input_lag
d['do_feed_factors_to_controller'] = flags.do_feed_factors_to_controller
d['feedback_factors_or_rates'] = flags.feedback_factors_or_rates
d['co_dim'] = flags.co_dim
d['ci_enc_dim'] = flags.ci_enc_dim
d['con_dim'] = flags.con_dim
d['co_mean_corr_scale'] = flags.co_mean_corr_scale
# Optimization
d['batch_size'] = flags.batch_size
d['learning_rate_init'] = flags.learning_rate_init
d['learning_rate_decay_factor'] = flags.learning_rate_decay_factor
d['learning_rate_stop'] = flags.learning_rate_stop
d['learning_rate_n_to_compare'] = flags.learning_rate_n_to_compare
d['max_grad_norm'] = flags.max_grad_norm
d['cell_clip_value'] = flags.cell_clip_value
d['do_train_io_only'] = flags.do_train_io_only
d['do_train_encoder_only'] = flags.do_train_encoder_only
d['do_reset_learning_rate'] = flags.do_reset_learning_rate
d['do_train_readin'] = flags.do_train_readin
# Overfitting
d['keep_prob'] = flags.keep_prob
d['temporal_spike_jitter_width'] = flags.temporal_spike_jitter_width
d['l2_gen_scale'] = flags.l2_gen_scale
d['l2_con_scale'] = flags.l2_con_scale
# Underfitting
d['kl_ic_weight'] = flags.kl_ic_weight
d['kl_co_weight'] = flags.kl_co_weight
d['kl_start_step'] = flags.kl_start_step
d['kl_increase_steps'] = flags.kl_increase_steps
d['l2_start_step'] = flags.l2_start_step
d['l2_increase_steps'] = flags.l2_increase_steps
d['_clip_value'] = 80 # bounds the tf.exp to avoid INF
return d
class hps_dict_to_obj(dict):
"""Helper class allowing us to access hps dictionary more easily."""
def __getattr__(self, key):
if key in self:
return self[key]
else:
assert False, ("%s does not exist." % key)
def __setattr__(self, key, value):
self[key] = value
def train(hps, datasets):
"""Train the LFADS model.
Args:
hps: The dictionary of hyperparameters.
datasets: A dictionary of data dictionaries. The dataset dict is simply a
name(string)-> data dictionary mapping (See top of lfads.py).
"""
model = build_model(hps, kind="train", datasets=datasets)
if hps.do_reset_learning_rate:
sess = tf.get_default_session()
sess.run(model.learning_rate.initializer)
model.train_model(datasets)
def write_model_runs(hps, datasets, output_fname=None, push_mean=False):
"""Run the model on the data in data_dict, and save the computed values.
LFADS generates a number of outputs for each examples, and these are all
saved. They are:
The mean and variance of the prior of g0.
The mean and variance of approximate posterior of g0.
The control inputs (if enabled)
The initial conditions, g0, for all examples.
The generator states for all time.
The factors for all time.
The rates for all time.
Args:
hps: The dictionary of hyperparameters.
datasets: A dictionary of data dictionaries. The dataset dict is simply a
name(string)-> data dictionary mapping (See top of lfads.py).
output_fname (optional): output filename stem to write the model runs.
push_mean: if False (default), generates batch_size samples for each trial
and averages the results. if True, runs each trial once without noise,
pushing the posterior mean initial conditions and control inputs through
the trained model. False is used for posterior_sample_and_average, True
is used for posterior_push_mean.
"""
model = build_model(hps, kind=hps.kind, datasets=datasets)
model.write_model_runs(datasets, output_fname, push_mean)
def write_model_samples(hps, datasets, dataset_name=None, output_fname=None):
"""Use the prior distribution to generate samples from the model.
Generates batch_size number of samples (set through FLAGS).
LFADS generates a number of outputs for each examples, and these are all
saved. They are:
The mean and variance of the prior of g0.
The control inputs (if enabled)
The initial conditions, g0, for all examples.
The generator states for all time.
The factors for all time.
The output distribution parameters (e.g. rates) for all time.
Args:
hps: The dictionary of hyperparameters.
datasets: A dictionary of data dictionaries. The dataset dict is simply a
name(string)-> data dictionary mapping (See top of lfads.py).
dataset_name: The name of the dataset to grab the factors -> rates
alignment matrices from. Only a concern with models trained on
multi-session data. By default, uses the first dataset in the data dict.
output_fname: The name prefix of the file in which to save the generated
samples.
"""
if not output_fname:
output_fname = "model_runs_" + hps.kind
else:
output_fname = output_fname + "model_runs_" + hps.kind
if not dataset_name:
dataset_name = datasets.keys()[0]
else:
if dataset_name not in datasets.keys():
raise ValueError("Invalid dataset name '%s'."%(dataset_name))
model = build_model(hps, kind=hps.kind, datasets=datasets)
model.write_model_samples(dataset_name, output_fname)
def write_model_parameters(hps, output_fname=None, datasets=None):
"""Save all the model parameters
Save all the parameters to hps.lfads_save_dir.
Args:
hps: The dictionary of hyperparameters.
output_fname: The prefix of the file in which to save the generated
samples.
datasets: A dictionary of data dictionaries. The dataset dict is simply a
name(string)-> data dictionary mapping (See top of lfads.py).
"""
if not output_fname:
output_fname = "model_params"
else:
output_fname = output_fname + "_model_params"
fname = os.path.join(hps.lfads_save_dir, output_fname)
print("Writing model parameters to: ", fname)
# save the optimizer params as well
model = build_model(hps, kind="write_model_params", datasets=datasets)
model_params = model.eval_model_parameters(use_nested=False,
include_strs="LFADS")
utils.write_data(fname, model_params, compression=None)
print("Done.")
def clean_data_dict(data_dict):
"""Add some key/value pairs to the data dict, if they are missing.
Args:
data_dict - dictionary containing data for LFADS
Returns:
data_dict with some keys filled in, if they are absent.
"""
keys = ['train_truth', 'train_ext_input', 'valid_data',
'valid_truth', 'valid_ext_input', 'valid_train']
for k in keys:
if k not in data_dict:
data_dict[k] = None
return data_dict
def load_datasets(data_dir, data_filename_stem):
"""Load the datasets from a specified directory.
Example files look like
>data_dir/my_dataset_first_day
>data_dir/my_dataset_second_day
If my_dataset (filename) stem is in the directory, the read routine will try
and load it. The datasets dictionary will then look like
dataset['first_day'] -> (first day data dictionary)
dataset['second_day'] -> (first day data dictionary)
Args:
data_dir: The directory from which to load the datasets.
data_filename_stem: The stem of the filename for the datasets.
Returns:
datasets: a dataset dictionary, with one name->data dictionary pair for
each dataset file.
"""
print("Reading data from ", data_dir)
datasets = utils.read_datasets(data_dir, data_filename_stem)
for k, data_dict in datasets.items():
datasets[k] = clean_data_dict(data_dict)
train_total_size = len(data_dict['train_data'])
if train_total_size == 0:
print("Did not load training set.")
else:
print("Found training set with number examples: ", train_total_size)
valid_total_size = len(data_dict['valid_data'])
if valid_total_size == 0:
print("Did not load validation set.")
else:
print("Found validation set with number examples: ", valid_total_size)
return datasets
def main(_):
"""Get this whole shindig off the ground."""
d = build_hyperparameter_dict(FLAGS)
hps = hps_dict_to_obj(d) # hyper parameters
kind = FLAGS.kind
# Read the data, if necessary.
train_set = valid_set = None
if kind in ["train", "posterior_sample_and_average", "posterior_push_mean",
"prior_sample", "write_model_params"]:
datasets = load_datasets(hps.data_dir, hps.data_filename_stem)
else:
raise ValueError('Kind {} is not supported.'.format(kind))
# infer the dataset names and dataset dimensions from the loaded files
hps.kind = kind # needs to be added here, cuz not saved as hyperparam
hps.dataset_names = []
hps.dataset_dims = {}
for key in datasets:
hps.dataset_names.append(key)
hps.dataset_dims[key] = datasets[key]['data_dim']
# also store down the dimensionality of the data
# - just pull from one set, required to be same for all sets
hps.num_steps = datasets.values()[0]['num_steps']
hps.ndatasets = len(hps.dataset_names)
if hps.num_steps_for_gen_ic > hps.num_steps:
hps.num_steps_for_gen_ic = hps.num_steps
# Build and run the model, for varying purposes.
config = tf.ConfigProto(allow_soft_placement=True,
log_device_placement=False)
if FLAGS.allow_gpu_growth:
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
with sess.as_default():
with tf.device(hps.device):
if kind == "train":
train(hps, datasets)
elif kind == "posterior_sample_and_average":
write_model_runs(hps, datasets, hps.output_filename_stem,
push_mean=False)
elif kind == "posterior_push_mean":
write_model_runs(hps, datasets, hps.output_filename_stem,
push_mean=True)
elif kind == "prior_sample":
write_model_samples(hps, datasets, hps.output_filename_stem)
elif kind == "write_model_params":
write_model_parameters(hps, hps.output_filename_stem, datasets)
else:
assert False, ("Kind %s is not implemented. " % kind)
if __name__ == "__main__":
tf.app.run()
| 34,738 | 41.572304 | 82 | py |
models | models-master/research/lfads/utils.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
from __future__ import print_function
import os
import h5py
import json
import numpy as np
import tensorflow as tf
def log_sum_exp(x_k):
"""Computes log \sum exp in a numerically stable way.
log ( sum_i exp(x_i) )
log ( sum_i exp(x_i - m + m) ), with m = max(x_i)
log ( sum_i exp(x_i - m)*exp(m) )
log ( sum_i exp(x_i - m) + m
Args:
x_k - k -dimensional list of arguments to log_sum_exp.
Returns:
log_sum_exp of the arguments.
"""
m = tf.reduce_max(x_k)
x1_k = x_k - m
u_k = tf.exp(x1_k)
z = tf.reduce_sum(u_k)
return tf.log(z) + m
def linear(x, out_size, do_bias=True, alpha=1.0, identity_if_possible=False,
normalized=False, name=None, collections=None):
"""Linear (affine) transformation, y = x W + b, for a variety of
configurations.
Args:
x: input The tensor to tranformation.
out_size: The integer size of non-batch output dimension.
do_bias (optional): Add a learnable bias vector to the operation.
alpha (optional): A multiplicative scaling for the weight initialization
of the matrix, in the form \alpha * 1/\sqrt{x.shape[1]}.
identity_if_possible (optional): just return identity,
if x.shape[1] == out_size.
normalized (optional): Option to divide out by the norms of the rows of W.
name (optional): The name prefix to add to variables.
collections (optional): List of additional collections. (Placed in
tf.GraphKeys.GLOBAL_VARIABLES already, so no need for that.)
Returns:
In the equation, y = x W + b, returns the tensorflow op that yields y.
"""
in_size = int(x.get_shape()[1]) # from Dimension(10) -> 10
stddev = alpha/np.sqrt(float(in_size))
mat_init = tf.random_normal_initializer(0.0, stddev)
wname = (name + "/W") if name else "/W"
if identity_if_possible and in_size == out_size:
# Sometimes linear layers are nothing more than size adapters.
return tf.identity(x, name=(wname+'_ident'))
W,b = init_linear(in_size, out_size, do_bias=do_bias, alpha=alpha,
normalized=normalized, name=name, collections=collections)
if do_bias:
return tf.matmul(x, W) + b
else:
return tf.matmul(x, W)
def init_linear(in_size, out_size, do_bias=True, mat_init_value=None,
bias_init_value=None, alpha=1.0, identity_if_possible=False,
normalized=False, name=None, collections=None, trainable=True):
"""Linear (affine) transformation, y = x W + b, for a variety of
configurations.
Args:
in_size: The integer size of the non-batc input dimension. [(x),y]
out_size: The integer size of non-batch output dimension. [x,(y)]
do_bias (optional): Add a (learnable) bias vector to the operation,
if false, b will be None
mat_init_value (optional): numpy constant for matrix initialization, if None
, do random, with additional parameters.
alpha (optional): A multiplicative scaling for the weight initialization
of the matrix, in the form \alpha * 1/\sqrt{x.shape[1]}.
identity_if_possible (optional): just return identity,
if x.shape[1] == out_size.
normalized (optional): Option to divide out by the norms of the rows of W.
name (optional): The name prefix to add to variables.
collections (optional): List of additional collections. (Placed in
tf.GraphKeys.GLOBAL_VARIABLES already, so no need for that.)
Returns:
In the equation, y = x W + b, returns the pair (W, b).
"""
if mat_init_value is not None and mat_init_value.shape != (in_size, out_size):
raise ValueError(
'Provided mat_init_value must have shape [%d, %d].'%(in_size, out_size))
if bias_init_value is not None and bias_init_value.shape != (1,out_size):
raise ValueError(
'Provided bias_init_value must have shape [1,%d].'%(out_size,))
if mat_init_value is None:
stddev = alpha/np.sqrt(float(in_size))
mat_init = tf.random_normal_initializer(0.0, stddev)
wname = (name + "/W") if name else "/W"
if identity_if_possible and in_size == out_size:
return (tf.constant(np.eye(in_size).astype(np.float32)),
tf.zeros(in_size))
# Note the use of get_variable vs. tf.Variable. this is because get_variable
# does not allow the initialization of the variable with a value.
if normalized:
w_collections = [tf.GraphKeys.GLOBAL_VARIABLES, "norm-variables"]
if collections:
w_collections += collections
if mat_init_value is not None:
w = tf.Variable(mat_init_value, name=wname, collections=w_collections,
trainable=trainable)
else:
w = tf.get_variable(wname, [in_size, out_size], initializer=mat_init,
collections=w_collections, trainable=trainable)
w = tf.nn.l2_normalize(w, dim=0) # x W, so xW_j = \sum_i x_bi W_ij
else:
w_collections = [tf.GraphKeys.GLOBAL_VARIABLES]
if collections:
w_collections += collections
if mat_init_value is not None:
w = tf.Variable(mat_init_value, name=wname, collections=w_collections,
trainable=trainable)
else:
w = tf.get_variable(wname, [in_size, out_size], initializer=mat_init,
collections=w_collections, trainable=trainable)
b = None
if do_bias:
b_collections = [tf.GraphKeys.GLOBAL_VARIABLES]
if collections:
b_collections += collections
bname = (name + "/b") if name else "/b"
if bias_init_value is None:
b = tf.get_variable(bname, [1, out_size],
initializer=tf.zeros_initializer(),
collections=b_collections,
trainable=trainable)
else:
b = tf.Variable(bias_init_value, name=bname,
collections=b_collections,
trainable=trainable)
return (w, b)
def write_data(data_fname, data_dict, use_json=False, compression=None):
"""Write data in HD5F format.
Args:
data_fname: The filename of teh file in which to write the data.
data_dict: The dictionary of data to write. The keys are strings
and the values are numpy arrays.
use_json (optional): human readable format for simple items
compression (optional): The compression to use for h5py (disabled by
default because the library borks on scalars, otherwise try 'gzip').
"""
dir_name = os.path.dirname(data_fname)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
if use_json:
the_file = open(data_fname,'wb')
json.dump(data_dict, the_file)
the_file.close()
else:
try:
with h5py.File(data_fname, 'w') as hf:
for k, v in data_dict.items():
clean_k = k.replace('/', '_')
if clean_k is not k:
print('Warning: saving variable with name: ', k, ' as ', clean_k)
else:
print('Saving variable with name: ', clean_k)
hf.create_dataset(clean_k, data=v, compression=compression)
except IOError:
print("Cannot open %s for writing.", data_fname)
raise
def read_data(data_fname):
""" Read saved data in HDF5 format.
Args:
data_fname: The filename of the file from which to read the data.
Returns:
A dictionary whose keys will vary depending on dataset (but should
always contain the keys 'train_data' and 'valid_data') and whose
values are numpy arrays.
"""
try:
with h5py.File(data_fname, 'r') as hf:
data_dict = {k: np.array(v) for k, v in hf.items()}
return data_dict
except IOError:
print("Cannot open %s for reading." % data_fname)
raise
def write_datasets(data_path, data_fname_stem, dataset_dict, compression=None):
"""Write datasets in HD5F format.
This function assumes the dataset_dict is a mapping ( string ->
to data_dict ). It calls write_data for each data dictionary,
post-fixing the data filename with the key of the dataset.
Args:
data_path: The path to the save directory.
data_fname_stem: The filename stem of the file in which to write the data.
dataset_dict: The dictionary of datasets. The keys are strings
and the values data dictionaries (str -> numpy arrays) associations.
compression (optional): The compression to use for h5py (disabled by
default because the library borks on scalars, otherwise try 'gzip').
"""
full_name_stem = os.path.join(data_path, data_fname_stem)
for s, data_dict in dataset_dict.items():
write_data(full_name_stem + "_" + s, data_dict, compression=compression)
def read_datasets(data_path, data_fname_stem):
"""Read dataset sin HD5F format.
This function assumes the dataset_dict is a mapping ( string ->
to data_dict ). It calls write_data for each data dictionary,
post-fixing the data filename with the key of the dataset.
Args:
data_path: The path to the save directory.
data_fname_stem: The filename stem of the file in which to write the data.
"""
dataset_dict = {}
fnames = os.listdir(data_path)
print ('loading data from ' + data_path + ' with stem ' + data_fname_stem)
for fname in fnames:
if fname.startswith(data_fname_stem):
data_dict = read_data(os.path.join(data_path,fname))
idx = len(data_fname_stem) + 1
key = fname[idx:]
data_dict['data_dim'] = data_dict['train_data'].shape[2]
data_dict['num_steps'] = data_dict['train_data'].shape[1]
dataset_dict[key] = data_dict
if len(dataset_dict) == 0:
raise ValueError("Failed to load any datasets, are you sure that the "
"'--data_dir' and '--data_filename_stem' flag values "
"are correct?")
print (str(len(dataset_dict)) + ' datasets loaded')
return dataset_dict
# NUMPY utility functions
def list_t_bxn_to_list_b_txn(values_t_bxn):
"""Convert a length T list of BxN numpy tensors of length B list of TxN numpy
tensors.
Args:
values_t_bxn: The length T list of BxN numpy tensors.
Returns:
The length B list of TxN numpy tensors.
"""
T = len(values_t_bxn)
B, N = values_t_bxn[0].shape
values_b_txn = []
for b in range(B):
values_pb_txn = np.zeros([T,N])
for t in range(T):
values_pb_txn[t,:] = values_t_bxn[t][b,:]
values_b_txn.append(values_pb_txn)
return values_b_txn
def list_t_bxn_to_tensor_bxtxn(values_t_bxn):
"""Convert a length T list of BxN numpy tensors to single numpy tensor with
shape BxTxN.
Args:
values_t_bxn: The length T list of BxN numpy tensors.
Returns:
values_bxtxn: The BxTxN numpy tensor.
"""
T = len(values_t_bxn)
B, N = values_t_bxn[0].shape
values_bxtxn = np.zeros([B,T,N])
for t in range(T):
values_bxtxn[:,t,:] = values_t_bxn[t]
return values_bxtxn
def tensor_bxtxn_to_list_t_bxn(tensor_bxtxn):
"""Convert a numpy tensor with shape BxTxN to a length T list of numpy tensors
with shape BxT.
Args:
tensor_bxtxn: The BxTxN numpy tensor.
Returns:
A length T list of numpy tensors with shape BxT.
"""
values_t_bxn = []
B, T, N = tensor_bxtxn.shape
for t in range(T):
values_t_bxn.append(np.squeeze(tensor_bxtxn[:,t,:]))
return values_t_bxn
def flatten(list_of_lists):
"""Takes a list of lists and returns a list of the elements.
Args:
list_of_lists: List of lists.
Returns:
flat_list: Flattened list.
flat_list_idxs: Flattened list indices.
"""
flat_list = []
flat_list_idxs = []
start_idx = 0
for item in list_of_lists:
if isinstance(item, list):
flat_list += item
l = len(item)
idxs = range(start_idx, start_idx+l)
start_idx = start_idx+l
else: # a value
flat_list.append(item)
idxs = [start_idx]
start_idx += 1
flat_list_idxs.append(idxs)
return flat_list, flat_list_idxs
| 12,449 | 32.831522 | 80 | py |
models | models-master/research/lfads/plot_lfads.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import numpy as np
import tensorflow as tf
def _plot_item(W, name, full_name, nspaces):
plt.figure()
if W.shape == ():
print(name, ": ", W)
elif W.shape[0] == 1:
plt.stem(W.T)
plt.title(full_name)
elif W.shape[1] == 1:
plt.stem(W)
plt.title(full_name)
else:
plt.imshow(np.abs(W), interpolation='nearest', cmap='jet');
plt.colorbar()
plt.title(full_name)
def all_plot(d, full_name="", exclude="", nspaces=0):
"""Recursively plot all the LFADS model parameters in the nested
dictionary."""
for k, v in d.iteritems():
this_name = full_name+"/"+k
if isinstance(v, dict):
all_plot(v, full_name=this_name, exclude=exclude, nspaces=nspaces+4)
else:
if exclude == "" or exclude not in this_name:
_plot_item(v, name=k, full_name=full_name+"/"+k, nspaces=nspaces+4)
def plot_time_series(vals_bxtxn, bidx=None, n_to_plot=np.inf, scale=1.0,
color='r', title=None):
if bidx is None:
vals_txn = np.mean(vals_bxtxn, axis=0)
else:
vals_txn = vals_bxtxn[bidx,:,:]
T, N = vals_txn.shape
if n_to_plot > N:
n_to_plot = N
plt.plot(vals_txn[:,0:n_to_plot] + scale*np.array(range(n_to_plot)),
color=color, lw=1.0)
plt.axis('tight')
if title:
plt.title(title)
def plot_lfads_timeseries(data_bxtxn, model_vals, ext_input_bxtxi=None,
truth_bxtxn=None, bidx=None, output_dist="poisson",
conversion_factor=1.0, subplot_cidx=0,
col_title=None):
n_to_plot = 10
scale = 1.0
nrows = 7
plt.subplot(nrows,2,1+subplot_cidx)
if output_dist == 'poisson':
rates = means = conversion_factor * model_vals['output_dist_params']
plot_time_series(rates, bidx, n_to_plot=n_to_plot, scale=scale,
title=col_title + " rates (LFADS - red, Truth - black)")
elif output_dist == 'gaussian':
means_vars = model_vals['output_dist_params']
means, vars = np.split(means_vars,2, axis=2) # bxtxn
stds = np.sqrt(vars)
plot_time_series(means, bidx, n_to_plot=n_to_plot, scale=scale,
title=col_title + " means (LFADS - red, Truth - black)")
plot_time_series(means+stds, bidx, n_to_plot=n_to_plot, scale=scale,
color='c')
plot_time_series(means-stds, bidx, n_to_plot=n_to_plot, scale=scale,
color='c')
else:
assert 'NIY'
if truth_bxtxn is not None:
plot_time_series(truth_bxtxn, bidx, n_to_plot=n_to_plot, color='k',
scale=scale)
input_title = ""
if "controller_outputs" in model_vals.keys():
input_title += " Controller Output"
plt.subplot(nrows,2,3+subplot_cidx)
u_t = model_vals['controller_outputs'][0:-1]
plot_time_series(u_t, bidx, n_to_plot=n_to_plot, color='c', scale=1.0,
title=col_title + input_title)
if ext_input_bxtxi is not None:
input_title += " External Input"
plot_time_series(ext_input_bxtxi, n_to_plot=n_to_plot, color='b',
scale=scale, title=col_title + input_title)
plt.subplot(nrows,2,5+subplot_cidx)
plot_time_series(means, bidx,
n_to_plot=n_to_plot, scale=1.0,
title=col_title + " Spikes (LFADS - red, Spikes - black)")
plot_time_series(data_bxtxn, bidx, n_to_plot=n_to_plot, color='k', scale=1.0)
plt.subplot(nrows,2,7+subplot_cidx)
plot_time_series(model_vals['factors'], bidx, n_to_plot=n_to_plot, color='b',
scale=2.0, title=col_title + " Factors")
plt.subplot(nrows,2,9+subplot_cidx)
plot_time_series(model_vals['gen_states'], bidx, n_to_plot=n_to_plot,
color='g', scale=1.0, title=col_title + " Generator State")
if bidx is not None:
data_nxt = data_bxtxn[bidx,:,:].T
params_nxt = model_vals['output_dist_params'][bidx,:,:].T
else:
data_nxt = np.mean(data_bxtxn, axis=0).T
params_nxt = np.mean(model_vals['output_dist_params'], axis=0).T
if output_dist == 'poisson':
means_nxt = params_nxt
elif output_dist == 'gaussian': # (means+vars) x time
means_nxt = np.vsplit(params_nxt,2)[0] # get means
else:
assert "NIY"
plt.subplot(nrows,2,11+subplot_cidx)
plt.imshow(data_nxt, aspect='auto', interpolation='nearest')
plt.title(col_title + ' Data')
plt.subplot(nrows,2,13+subplot_cidx)
plt.imshow(means_nxt, aspect='auto', interpolation='nearest')
plt.title(col_title + ' Means')
def plot_lfads(train_bxtxd, train_model_vals,
train_ext_input_bxtxi=None, train_truth_bxtxd=None,
valid_bxtxd=None, valid_model_vals=None,
valid_ext_input_bxtxi=None, valid_truth_bxtxd=None,
bidx=None, cf=1.0, output_dist='poisson'):
# Plotting
f = plt.figure(figsize=(18,20), tight_layout=True)
plot_lfads_timeseries(train_bxtxd, train_model_vals,
train_ext_input_bxtxi,
truth_bxtxn=train_truth_bxtxd,
conversion_factor=cf, bidx=bidx,
output_dist=output_dist, col_title='Train')
plot_lfads_timeseries(valid_bxtxd, valid_model_vals,
valid_ext_input_bxtxi,
truth_bxtxn=valid_truth_bxtxd,
conversion_factor=cf, bidx=bidx,
output_dist=output_dist,
subplot_cidx=1, col_title='Valid')
# Convert from figure to an numpy array width x height x 3 (last for RGB)
f.canvas.draw()
data = np.fromstring(f.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data_wxhx3 = data.reshape(f.canvas.get_width_height()[::-1] + (3,))
plt.close()
return data_wxhx3
| 6,564 | 35.071429 | 80 | py |
models | models-master/research/lfads/lfads.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
"""
LFADS - Latent Factor Analysis via Dynamical Systems.
LFADS is an unsupervised method to decompose time series data into
various factors, such as an initial condition, a generative
dynamical system, control inputs to that generator, and a low
dimensional description of the observed data, called the factors.
Additionally, the observations have a noise model (in this case
Poisson), so a denoised version of the observations is also created
(e.g. underlying rates of a Poisson distribution given the observed
event counts).
The main data structure being passed around is a dataset. This is a dictionary
of data dictionaries.
DATASET: The top level dictionary is simply name (string -> dictionary).
The nested dictionary is the DATA DICTIONARY, which has the following keys:
'train_data' and 'valid_data', whose values are the corresponding training
and validation data with shape
ExTxD, E - # examples, T - # time steps, D - # dimensions in data.
The data dictionary also has a few more keys:
'train_ext_input' and 'valid_ext_input', if there are know external inputs
to the system being modeled, these take on dimensions:
ExTxI, E - # examples, T - # time steps, I = # dimensions in input.
'alignment_matrix_cxf' - If you are using multiple days data, it's possible
that one can align the channels (see manuscript). If so each dataset will
contain this matrix, which will be used for both the input adapter and the
output adapter for each dataset. These matrices, if provided, must be of
size [data_dim x factors] where data_dim is the number of neurons recorded
on that day, and factors is chosen and set through the '--factors' flag.
'alignment_bias_c' - See alignment_matrix_cxf. This bias will used to
the offset for the alignment transformation. It will *subtract* off the
bias from the data, so pca style inits can align factors across sessions.
If one runs LFADS on data where the true rates are known for some trials,
(say simulated, testing data, as in the example shipped with the paper), then
one can add three more fields for plotting purposes. These are 'train_truth'
and 'valid_truth', and 'conversion_factor'. These have the same dimensions as
'train_data', and 'valid_data' but represent the underlying rates of the
observations. Finally, if one needs to convert scale for plotting the true
underlying firing rates, there is the 'conversion_factor' key.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import os
import tensorflow as tf
from distributions import LearnableDiagonalGaussian, DiagonalGaussianFromInput
from distributions import diag_gaussian_log_likelihood
from distributions import KLCost_GaussianGaussian, Poisson
from distributions import LearnableAutoRegressive1Prior
from distributions import KLCost_GaussianGaussianProcessSampled
from utils import init_linear, linear, list_t_bxn_to_tensor_bxtxn, write_data
from utils import log_sum_exp, flatten
from plot_lfads import plot_lfads
class GRU(object):
"""Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078).
"""
def __init__(self, num_units, forget_bias=1.0, weight_scale=1.0,
clip_value=np.inf, collections=None):
"""Create a GRU object.
Args:
num_units: Number of units in the GRU.
forget_bias (optional): Hack to help learning.
weight_scale (optional): Weights are scaled by ws/sqrt(#inputs), with
ws being the weight scale.
clip_value (optional): If the recurrent values grow above this value,
clip them.
collections (optional): List of additional collections variables should
belong to.
"""
self._num_units = num_units
self._forget_bias = forget_bias
self._weight_scale = weight_scale
self._clip_value = clip_value
self._collections = collections
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
@property
def state_multiplier(self):
return 1
def output_from_state(self, state):
"""Return the output portion of the state."""
return state
def __call__(self, inputs, state, scope=None):
"""Gated recurrent unit (GRU) function.
Args:
inputs: A 2D batch x input_dim tensor of inputs.
state: The previous state from the last time step.
scope (optional): TF variable scope for defined GRU variables.
Returns:
A tuple (state, state), where state is the newly computed state at time t.
It is returned twice to respect an interface that works for LSTMs.
"""
x = inputs
h = state
if inputs is not None:
xh = tf.concat(axis=1, values=[x, h])
else:
xh = h
with tf.variable_scope(scope or type(self).__name__): # "GRU"
with tf.variable_scope("Gates"): # Reset gate and update gate.
# We start with bias of 1.0 to not reset and not update.
r, u = tf.split(axis=1, num_or_size_splits=2, value=linear(xh,
2 * self._num_units,
alpha=self._weight_scale,
name="xh_2_ru",
collections=self._collections))
r, u = tf.sigmoid(r), tf.sigmoid(u + self._forget_bias)
with tf.variable_scope("Candidate"):
xrh = tf.concat(axis=1, values=[x, r * h])
c = tf.tanh(linear(xrh, self._num_units, name="xrh_2_c",
collections=self._collections))
new_h = u * h + (1 - u) * c
new_h = tf.clip_by_value(new_h, -self._clip_value, self._clip_value)
return new_h, new_h
class GenGRU(object):
"""Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078).
This version is specialized for the generator, but isn't as fast, so
we have two. Note this allows for l2 regularization on the recurrent
weights, but also implicitly rescales the inputs via the 1/sqrt(input)
scaling in the linear helper routine to be large magnitude, if there are
fewer inputs than recurrent state.
"""
def __init__(self, num_units, forget_bias=1.0,
input_weight_scale=1.0, rec_weight_scale=1.0, clip_value=np.inf,
input_collections=None, recurrent_collections=None):
"""Create a GRU object.
Args:
num_units: Number of units in the GRU.
forget_bias (optional): Hack to help learning.
input_weight_scale (optional): Weights are scaled ws/sqrt(#inputs), with
ws being the weight scale.
rec_weight_scale (optional): Weights are scaled ws/sqrt(#inputs),
with ws being the weight scale.
clip_value (optional): If the recurrent values grow above this value,
clip them.
input_collections (optional): List of additional collections variables
that input->rec weights should belong to.
recurrent_collections (optional): List of additional collections variables
that rec->rec weights should belong to.
"""
self._num_units = num_units
self._forget_bias = forget_bias
self._input_weight_scale = input_weight_scale
self._rec_weight_scale = rec_weight_scale
self._clip_value = clip_value
self._input_collections = input_collections
self._rec_collections = recurrent_collections
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
@property
def state_multiplier(self):
return 1
def output_from_state(self, state):
"""Return the output portion of the state."""
return state
def __call__(self, inputs, state, scope=None):
"""Gated recurrent unit (GRU) function.
Args:
inputs: A 2D batch x input_dim tensor of inputs.
state: The previous state from the last time step.
scope (optional): TF variable scope for defined GRU variables.
Returns:
A tuple (state, state), where state is the newly computed state at time t.
It is returned twice to respect an interface that works for LSTMs.
"""
x = inputs
h = state
with tf.variable_scope(scope or type(self).__name__): # "GRU"
with tf.variable_scope("Gates"): # Reset gate and update gate.
# We start with bias of 1.0 to not reset and not update.
r_x = u_x = 0.0
if x is not None:
r_x, u_x = tf.split(axis=1, num_or_size_splits=2, value=linear(x,
2 * self._num_units,
alpha=self._input_weight_scale,
do_bias=False,
name="x_2_ru",
normalized=False,
collections=self._input_collections))
r_h, u_h = tf.split(axis=1, num_or_size_splits=2, value=linear(h,
2 * self._num_units,
do_bias=True,
alpha=self._rec_weight_scale,
name="h_2_ru",
collections=self._rec_collections))
r = r_x + r_h
u = u_x + u_h
r, u = tf.sigmoid(r), tf.sigmoid(u + self._forget_bias)
with tf.variable_scope("Candidate"):
c_x = 0.0
if x is not None:
c_x = linear(x, self._num_units, name="x_2_c", do_bias=False,
alpha=self._input_weight_scale,
normalized=False,
collections=self._input_collections)
c_rh = linear(r*h, self._num_units, name="rh_2_c", do_bias=True,
alpha=self._rec_weight_scale,
collections=self._rec_collections)
c = tf.tanh(c_x + c_rh)
new_h = u * h + (1 - u) * c
new_h = tf.clip_by_value(new_h, -self._clip_value, self._clip_value)
return new_h, new_h
class LFADS(object):
"""LFADS - Latent Factor Analysis via Dynamical Systems.
LFADS is an unsupervised method to decompose time series data into
various factors, such as an initial condition, a generative
dynamical system, inferred inputs to that generator, and a low
dimensional description of the observed data, called the factors.
Additionally, the observations have a noise model (in this case
Poisson), so a denoised version of the observations is also created
(e.g. underlying rates of a Poisson distribution given the observed
event counts).
"""
def __init__(self, hps, kind="train", datasets=None):
"""Create an LFADS model.
train - a model for training, sampling of posteriors is used
posterior_sample_and_average - sample from the posterior, this is used
for evaluating the expected value of the outputs of LFADS, given a
specific input, by averaging over multiple samples from the approx
posterior. Also used for the lower bound on the negative
log-likelihood using IWAE error (Importance Weighed Auto-encoder).
This is the denoising operation.
prior_sample - a model for generation - sampling from priors is used
Args:
hps: The dictionary of hyper parameters.
kind: The type of model to build (see above).
datasets: A dictionary of named data_dictionaries, see top of lfads.py
"""
print("Building graph...")
all_kinds = ['train', 'posterior_sample_and_average', 'posterior_push_mean',
'prior_sample']
assert kind in all_kinds, 'Wrong kind'
if hps.feedback_factors_or_rates == "rates":
assert len(hps.dataset_names) == 1, \
"Multiple datasets not supported for rate feedback."
num_steps = hps.num_steps
ic_dim = hps.ic_dim
co_dim = hps.co_dim
ext_input_dim = hps.ext_input_dim
cell_class = GRU
gen_cell_class = GenGRU
def makelambda(v): # Used with tf.case
return lambda: v
# Define the data placeholder, and deal with all parts of the graph
# that are dataset dependent.
self.dataName = tf.placeholder(tf.string, shape=())
# The batch_size to be inferred from data, as normal.
# Additionally, the data_dim will be inferred as well, allowing for a
# single placeholder for all datasets, regardless of data dimension.
if hps.output_dist == 'poisson':
# Enforce correct dtype
assert np.issubdtype(
datasets[hps.dataset_names[0]]['train_data'].dtype, int), \
"Data dtype must be int for poisson output distribution"
data_dtype = tf.int32
elif hps.output_dist == 'gaussian':
assert np.issubdtype(
datasets[hps.dataset_names[0]]['train_data'].dtype, float), \
"Data dtype must be float for gaussian output dsitribution"
data_dtype = tf.float32
else:
assert False, "NIY"
self.dataset_ph = dataset_ph = tf.placeholder(data_dtype,
[None, num_steps, None],
name="data")
self.train_step = tf.get_variable("global_step", [], tf.int64,
tf.zeros_initializer(),
trainable=False)
self.hps = hps
ndatasets = hps.ndatasets
factors_dim = hps.factors_dim
self.preds = preds = [None] * ndatasets
self.fns_in_fac_Ws = fns_in_fac_Ws = [None] * ndatasets
self.fns_in_fatcor_bs = fns_in_fac_bs = [None] * ndatasets
self.fns_out_fac_Ws = fns_out_fac_Ws = [None] * ndatasets
self.fns_out_fac_bs = fns_out_fac_bs = [None] * ndatasets
self.datasetNames = dataset_names = hps.dataset_names
self.ext_inputs = ext_inputs = None
if len(dataset_names) == 1: # single session
if 'alignment_matrix_cxf' in datasets[dataset_names[0]].keys():
used_in_factors_dim = factors_dim
in_identity_if_poss = False
else:
used_in_factors_dim = hps.dataset_dims[dataset_names[0]]
in_identity_if_poss = True
else: # multisession
used_in_factors_dim = factors_dim
in_identity_if_poss = False
for d, name in enumerate(dataset_names):
data_dim = hps.dataset_dims[name]
in_mat_cxf = None
in_bias_1xf = None
align_bias_1xc = None
if datasets and 'alignment_matrix_cxf' in datasets[name].keys():
dataset = datasets[name]
if hps.do_train_readin:
print("Initializing trainable readin matrix with alignment matrix" \
" provided for dataset:", name)
else:
print("Setting non-trainable readin matrix to alignment matrix" \
" provided for dataset:", name)
in_mat_cxf = dataset['alignment_matrix_cxf'].astype(np.float32)
if in_mat_cxf.shape != (data_dim, factors_dim):
raise ValueError("""Alignment matrix must have dimensions %d x %d
(data_dim x factors_dim), but currently has %d x %d."""%
(data_dim, factors_dim, in_mat_cxf.shape[0],
in_mat_cxf.shape[1]))
if datasets and 'alignment_bias_c' in datasets[name].keys():
dataset = datasets[name]
if hps.do_train_readin:
print("Initializing trainable readin bias with alignment bias " \
"provided for dataset:", name)
else:
print("Setting non-trainable readin bias to alignment bias " \
"provided for dataset:", name)
align_bias_c = dataset['alignment_bias_c'].astype(np.float32)
align_bias_1xc = np.expand_dims(align_bias_c, axis=0)
if align_bias_1xc.shape[1] != data_dim:
raise ValueError("""Alignment bias must have dimensions %d
(data_dim), but currently has %d."""%
(data_dim, in_mat_cxf.shape[0]))
if in_mat_cxf is not None and align_bias_1xc is not None:
# (data - alignment_bias) * W_in
# data * W_in - alignment_bias * W_in
# So b = -alignment_bias * W_in to accommodate PCA style offset.
in_bias_1xf = -np.dot(align_bias_1xc, in_mat_cxf)
if hps.do_train_readin:
# only add to IO transformations collection only if we want it to be
# learnable, because IO_transformations collection will be trained
# when do_train_io_only
collections_readin=['IO_transformations']
else:
collections_readin=None
in_fac_lin = init_linear(data_dim, used_in_factors_dim,
do_bias=True,
mat_init_value=in_mat_cxf,
bias_init_value=in_bias_1xf,
identity_if_possible=in_identity_if_poss,
normalized=False, name="x_2_infac_"+name,
collections=collections_readin,
trainable=hps.do_train_readin)
in_fac_W, in_fac_b = in_fac_lin
fns_in_fac_Ws[d] = makelambda(in_fac_W)
fns_in_fac_bs[d] = makelambda(in_fac_b)
with tf.variable_scope("glm"):
out_identity_if_poss = False
if len(dataset_names) == 1 and \
factors_dim == hps.dataset_dims[dataset_names[0]]:
out_identity_if_poss = True
for d, name in enumerate(dataset_names):
data_dim = hps.dataset_dims[name]
in_mat_cxf = None
if datasets and 'alignment_matrix_cxf' in datasets[name].keys():
dataset = datasets[name]
in_mat_cxf = dataset['alignment_matrix_cxf'].astype(np.float32)
if datasets and 'alignment_bias_c' in datasets[name].keys():
dataset = datasets[name]
align_bias_c = dataset['alignment_bias_c'].astype(np.float32)
align_bias_1xc = np.expand_dims(align_bias_c, axis=0)
out_mat_fxc = None
out_bias_1xc = None
if in_mat_cxf is not None:
out_mat_fxc = in_mat_cxf.T
if align_bias_1xc is not None:
out_bias_1xc = align_bias_1xc
if hps.output_dist == 'poisson':
out_fac_lin = init_linear(factors_dim, data_dim, do_bias=True,
mat_init_value=out_mat_fxc,
bias_init_value=out_bias_1xc,
identity_if_possible=out_identity_if_poss,
normalized=False,
name="fac_2_logrates_"+name,
collections=['IO_transformations'])
out_fac_W, out_fac_b = out_fac_lin
elif hps.output_dist == 'gaussian':
out_fac_lin_mean = \
init_linear(factors_dim, data_dim, do_bias=True,
mat_init_value=out_mat_fxc,
bias_init_value=out_bias_1xc,
normalized=False,
name="fac_2_means_"+name,
collections=['IO_transformations'])
out_fac_W_mean, out_fac_b_mean = out_fac_lin_mean
mat_init_value = np.zeros([factors_dim, data_dim]).astype(np.float32)
bias_init_value = np.ones([1, data_dim]).astype(np.float32)
out_fac_lin_logvar = \
init_linear(factors_dim, data_dim, do_bias=True,
mat_init_value=mat_init_value,
bias_init_value=bias_init_value,
normalized=False,
name="fac_2_logvars_"+name,
collections=['IO_transformations'])
out_fac_W_mean, out_fac_b_mean = out_fac_lin_mean
out_fac_W_logvar, out_fac_b_logvar = out_fac_lin_logvar
out_fac_W = tf.concat(
axis=1, values=[out_fac_W_mean, out_fac_W_logvar])
out_fac_b = tf.concat(
axis=1, values=[out_fac_b_mean, out_fac_b_logvar])
else:
assert False, "NIY"
preds[d] = tf.equal(tf.constant(name), self.dataName)
data_dim = hps.dataset_dims[name]
fns_out_fac_Ws[d] = makelambda(out_fac_W)
fns_out_fac_bs[d] = makelambda(out_fac_b)
pf_pairs_in_fac_Ws = zip(preds, fns_in_fac_Ws)
pf_pairs_in_fac_bs = zip(preds, fns_in_fac_bs)
pf_pairs_out_fac_Ws = zip(preds, fns_out_fac_Ws)
pf_pairs_out_fac_bs = zip(preds, fns_out_fac_bs)
this_in_fac_W = tf.case(pf_pairs_in_fac_Ws, exclusive=True)
this_in_fac_b = tf.case(pf_pairs_in_fac_bs, exclusive=True)
this_out_fac_W = tf.case(pf_pairs_out_fac_Ws, exclusive=True)
this_out_fac_b = tf.case(pf_pairs_out_fac_bs, exclusive=True)
# External inputs (not changing by dataset, by definition).
if hps.ext_input_dim > 0:
self.ext_input = tf.placeholder(tf.float32,
[None, num_steps, ext_input_dim],
name="ext_input")
else:
self.ext_input = None
ext_input_bxtxi = self.ext_input
self.keep_prob = keep_prob = tf.placeholder(tf.float32, [], "keep_prob")
self.batch_size = batch_size = int(hps.batch_size)
self.learning_rate = tf.Variable(float(hps.learning_rate_init),
trainable=False, name="learning_rate")
self.learning_rate_decay_op = self.learning_rate.assign(
self.learning_rate * hps.learning_rate_decay_factor)
# Dropout the data.
dataset_do_bxtxd = tf.nn.dropout(tf.to_float(dataset_ph), keep_prob)
if hps.ext_input_dim > 0:
ext_input_do_bxtxi = tf.nn.dropout(ext_input_bxtxi, keep_prob)
else:
ext_input_do_bxtxi = None
# ENCODERS
def encode_data(dataset_bxtxd, enc_cell, name, forward_or_reverse,
num_steps_to_encode):
"""Encode data for LFADS
Args:
dataset_bxtxd - the data to encode, as a 3 tensor, with dims
time x batch x data dims.
enc_cell: encoder cell
name: name of encoder
forward_or_reverse: string, encode in forward or reverse direction
num_steps_to_encode: number of steps to encode, 0:num_steps_to_encode
Returns:
encoded data as a list with num_steps_to_encode items, in order
"""
if forward_or_reverse == "forward":
dstr = "_fwd"
time_fwd_or_rev = range(num_steps_to_encode)
else:
dstr = "_rev"
time_fwd_or_rev = reversed(range(num_steps_to_encode))
with tf.variable_scope(name+"_enc"+dstr, reuse=False):
enc_state = tf.tile(
tf.Variable(tf.zeros([1, enc_cell.state_size]),
name=name+"_enc_t0"+dstr), tf.stack([batch_size, 1]))
enc_state.set_shape([None, enc_cell.state_size]) # tile loses shape
enc_outs = [None] * num_steps_to_encode
for i, t in enumerate(time_fwd_or_rev):
with tf.variable_scope(name+"_enc"+dstr, reuse=True if i > 0 else None):
dataset_t_bxd = dataset_bxtxd[:,t,:]
in_fac_t_bxf = tf.matmul(dataset_t_bxd, this_in_fac_W) + this_in_fac_b
in_fac_t_bxf.set_shape([None, used_in_factors_dim])
if ext_input_dim > 0 and not hps.inject_ext_input_to_gen:
ext_input_t_bxi = ext_input_do_bxtxi[:,t,:]
enc_input_t_bxfpe = tf.concat(
axis=1, values=[in_fac_t_bxf, ext_input_t_bxi])
else:
enc_input_t_bxfpe = in_fac_t_bxf
enc_out, enc_state = enc_cell(enc_input_t_bxfpe, enc_state)
enc_outs[t] = enc_out
return enc_outs
# Encode initial condition means and variances
# ([x_T, x_T-1, ... x_0] and [x_0, x_1, ... x_T] -> g0/c0)
self.ic_enc_fwd = [None] * num_steps
self.ic_enc_rev = [None] * num_steps
if ic_dim > 0:
enc_ic_cell = cell_class(hps.ic_enc_dim,
weight_scale=hps.cell_weight_scale,
clip_value=hps.cell_clip_value)
ic_enc_fwd = encode_data(dataset_do_bxtxd, enc_ic_cell,
"ic", "forward",
hps.num_steps_for_gen_ic)
ic_enc_rev = encode_data(dataset_do_bxtxd, enc_ic_cell,
"ic", "reverse",
hps.num_steps_for_gen_ic)
self.ic_enc_fwd = ic_enc_fwd
self.ic_enc_rev = ic_enc_rev
# Encoder control input means and variances, bi-directional encoding so:
# ([x_T, x_T-1, ..., x_0] and [x_0, x_1 ... x_T] -> u_t)
self.ci_enc_fwd = [None] * num_steps
self.ci_enc_rev = [None] * num_steps
if co_dim > 0:
enc_ci_cell = cell_class(hps.ci_enc_dim,
weight_scale=hps.cell_weight_scale,
clip_value=hps.cell_clip_value)
ci_enc_fwd = encode_data(dataset_do_bxtxd, enc_ci_cell,
"ci", "forward",
hps.num_steps)
if hps.do_causal_controller:
ci_enc_rev = None
else:
ci_enc_rev = encode_data(dataset_do_bxtxd, enc_ci_cell,
"ci", "reverse",
hps.num_steps)
self.ci_enc_fwd = ci_enc_fwd
self.ci_enc_rev = ci_enc_rev
# STOCHASTIC LATENT VARIABLES, priors and posteriors
# (initial conditions g0, and control inputs, u_t)
# Note that zs represent all the stochastic latent variables.
with tf.variable_scope("z", reuse=False):
self.prior_zs_g0 = None
self.posterior_zs_g0 = None
self.g0s_val = None
if ic_dim > 0:
self.prior_zs_g0 = \
LearnableDiagonalGaussian(batch_size, ic_dim, name="prior_g0",
mean_init=0.0,
var_min=hps.ic_prior_var_min,
var_init=hps.ic_prior_var_scale,
var_max=hps.ic_prior_var_max)
ic_enc = tf.concat(axis=1, values=[ic_enc_fwd[-1], ic_enc_rev[0]])
ic_enc = tf.nn.dropout(ic_enc, keep_prob)
self.posterior_zs_g0 = \
DiagonalGaussianFromInput(ic_enc, ic_dim, "ic_enc_2_post_g0",
var_min=hps.ic_post_var_min)
if kind in ["train", "posterior_sample_and_average",
"posterior_push_mean"]:
zs_g0 = self.posterior_zs_g0
else:
zs_g0 = self.prior_zs_g0
if kind in ["train", "posterior_sample_and_average", "prior_sample"]:
self.g0s_val = zs_g0.sample
else:
self.g0s_val = zs_g0.mean
# Priors for controller, 'co' for controller output
self.prior_zs_co = prior_zs_co = [None] * num_steps
self.posterior_zs_co = posterior_zs_co = [None] * num_steps
self.zs_co = zs_co = [None] * num_steps
self.prior_zs_ar_con = None
if co_dim > 0:
# Controller outputs
autocorrelation_taus = [hps.prior_ar_atau for x in range(hps.co_dim)]
noise_variances = [hps.prior_ar_nvar for x in range(hps.co_dim)]
self.prior_zs_ar_con = prior_zs_ar_con = \
LearnableAutoRegressive1Prior(batch_size, hps.co_dim,
autocorrelation_taus,
noise_variances,
hps.do_train_prior_ar_atau,
hps.do_train_prior_ar_nvar,
num_steps, "u_prior_ar1")
# CONTROLLER -> GENERATOR -> RATES
# (u(t) -> gen(t) -> factors(t) -> rates(t) -> p(x_t|z_t) )
self.controller_outputs = u_t = [None] * num_steps
self.con_ics = con_state = None
self.con_states = con_states = [None] * num_steps
self.con_outs = con_outs = [None] * num_steps
self.gen_inputs = gen_inputs = [None] * num_steps
if co_dim > 0:
# gen_cell_class here for l2 penalty recurrent weights
# didn't split the cell_weight scale here, because I doubt it matters
con_cell = gen_cell_class(hps.con_dim,
input_weight_scale=hps.cell_weight_scale,
rec_weight_scale=hps.cell_weight_scale,
clip_value=hps.cell_clip_value,
recurrent_collections=['l2_con_reg'])
with tf.variable_scope("con", reuse=False):
self.con_ics = tf.tile(
tf.Variable(tf.zeros([1, hps.con_dim*con_cell.state_multiplier]),
name="c0"),
tf.stack([batch_size, 1]))
self.con_ics.set_shape([None, con_cell.state_size]) # tile loses shape
con_states[-1] = self.con_ics
gen_cell = gen_cell_class(hps.gen_dim,
input_weight_scale=hps.gen_cell_input_weight_scale,
rec_weight_scale=hps.gen_cell_rec_weight_scale,
clip_value=hps.cell_clip_value,
recurrent_collections=['l2_gen_reg'])
with tf.variable_scope("gen", reuse=False):
if ic_dim == 0:
self.gen_ics = tf.tile(
tf.Variable(tf.zeros([1, gen_cell.state_size]), name="g0"),
tf.stack([batch_size, 1]))
else:
self.gen_ics = linear(self.g0s_val, gen_cell.state_size,
identity_if_possible=True,
name="g0_2_gen_ic")
self.gen_states = gen_states = [None] * num_steps
self.gen_outs = gen_outs = [None] * num_steps
gen_states[-1] = self.gen_ics
gen_outs[-1] = gen_cell.output_from_state(gen_states[-1])
self.factors = factors = [None] * num_steps
factors[-1] = linear(gen_outs[-1], factors_dim, do_bias=False,
normalized=True, name="gen_2_fac")
self.rates = rates = [None] * num_steps
# rates[-1] is collected to potentially feed back to controller
with tf.variable_scope("glm", reuse=False):
if hps.output_dist == 'poisson':
log_rates_t0 = tf.matmul(factors[-1], this_out_fac_W) + this_out_fac_b
log_rates_t0.set_shape([None, None])
rates[-1] = tf.exp(log_rates_t0) # rate
rates[-1].set_shape([None, hps.dataset_dims[hps.dataset_names[0]]])
elif hps.output_dist == 'gaussian':
mean_n_logvars = tf.matmul(factors[-1],this_out_fac_W) + this_out_fac_b
mean_n_logvars.set_shape([None, None])
means_t_bxd, logvars_t_bxd = tf.split(axis=1, num_or_size_splits=2,
value=mean_n_logvars)
rates[-1] = means_t_bxd
else:
assert False, "NIY"
# We support multiple output distributions, for example Poisson, and also
# Gaussian. In these two cases respectively, there are one and two
# parameters (rates vs. mean and variance). So the output_dist_params
# tensor will variable sizes via tf.concat and tf.split, along the 1st
# dimension. So in the case of gaussian, for example, it'll be
# batch x (D+D), where each D dims is the mean, and then variances,
# respectively. For a distribution with 3 parameters, it would be
# batch x (D+D+D).
self.output_dist_params = dist_params = [None] * num_steps
self.log_p_xgz_b = log_p_xgz_b = 0.0 # log P(x|z)
for t in range(num_steps):
# Controller
if co_dim > 0:
# Build inputs for controller
tlag = t - hps.controller_input_lag
if tlag < 0:
con_in_f_t = tf.zeros_like(ci_enc_fwd[0])
else:
con_in_f_t = ci_enc_fwd[tlag]
if hps.do_causal_controller:
# If controller is causal (wrt to data generation process), then it
# cannot see future data. Thus, excluding ci_enc_rev[t] is obvious.
# Less obvious is the need to exclude factors[t-1]. This arises
# because information flows from g0 through factors to the controller
# input. The g0 encoding is backwards, so we must necessarily exclude
# the factors in order to keep the controller input purely from a
# forward encoding (however unlikely it is that
# g0->factors->controller channel might actually be used in this way).
con_in_list_t = [con_in_f_t]
else:
tlag_rev = t + hps.controller_input_lag
if tlag_rev >= num_steps:
# better than zeros
con_in_r_t = tf.zeros_like(ci_enc_rev[0])
else:
con_in_r_t = ci_enc_rev[tlag_rev]
con_in_list_t = [con_in_f_t, con_in_r_t]
if hps.do_feed_factors_to_controller:
if hps.feedback_factors_or_rates == "factors":
con_in_list_t.append(factors[t-1])
elif hps.feedback_factors_or_rates == "rates":
con_in_list_t.append(rates[t-1])
else:
assert False, "NIY"
con_in_t = tf.concat(axis=1, values=con_in_list_t)
con_in_t = tf.nn.dropout(con_in_t, keep_prob)
with tf.variable_scope("con", reuse=True if t > 0 else None):
con_outs[t], con_states[t] = con_cell(con_in_t, con_states[t-1])
posterior_zs_co[t] = \
DiagonalGaussianFromInput(con_outs[t], co_dim,
name="con_to_post_co")
if kind == "train":
u_t[t] = posterior_zs_co[t].sample
elif kind == "posterior_sample_and_average":
u_t[t] = posterior_zs_co[t].sample
elif kind == "posterior_push_mean":
u_t[t] = posterior_zs_co[t].mean
else:
u_t[t] = prior_zs_ar_con.samples_t[t]
# Inputs to the generator (controller output + external input)
if ext_input_dim > 0 and hps.inject_ext_input_to_gen:
ext_input_t_bxi = ext_input_do_bxtxi[:,t,:]
if co_dim > 0:
gen_inputs[t] = tf.concat(axis=1, values=[u_t[t], ext_input_t_bxi])
else:
gen_inputs[t] = ext_input_t_bxi
else:
gen_inputs[t] = u_t[t]
# Generator
data_t_bxd = dataset_ph[:,t,:]
with tf.variable_scope("gen", reuse=True if t > 0 else None):
gen_outs[t], gen_states[t] = gen_cell(gen_inputs[t], gen_states[t-1])
gen_outs[t] = tf.nn.dropout(gen_outs[t], keep_prob)
with tf.variable_scope("gen", reuse=True): # ic defined it above
factors[t] = linear(gen_outs[t], factors_dim, do_bias=False,
normalized=True, name="gen_2_fac")
with tf.variable_scope("glm", reuse=True if t > 0 else None):
if hps.output_dist == 'poisson':
log_rates_t = tf.matmul(factors[t], this_out_fac_W) + this_out_fac_b
log_rates_t.set_shape([None, None])
rates[t] = dist_params[t] = tf.exp(tf.clip_by_value(log_rates_t, -hps._clip_value, hps._clip_value)) # rates feed back
rates[t].set_shape([None, hps.dataset_dims[hps.dataset_names[0]]])
loglikelihood_t = Poisson(log_rates_t).logp(data_t_bxd)
elif hps.output_dist == 'gaussian':
mean_n_logvars = tf.matmul(factors[t],this_out_fac_W) + this_out_fac_b
mean_n_logvars.set_shape([None, None])
means_t_bxd, logvars_t_bxd = tf.split(axis=1, num_or_size_splits=2,
value=mean_n_logvars)
rates[t] = means_t_bxd # rates feed back to controller
dist_params[t] = tf.concat(
axis=1, values=[means_t_bxd, tf.exp(tf.clip_by_value(logvars_t_bxd, -hps._clip_value, hps._clip_value))])
loglikelihood_t = \
diag_gaussian_log_likelihood(data_t_bxd,
means_t_bxd, logvars_t_bxd)
else:
assert False, "NIY"
log_p_xgz_b += tf.reduce_sum(loglikelihood_t, [1])
# Correlation of inferred inputs cost.
self.corr_cost = tf.constant(0.0)
if hps.co_mean_corr_scale > 0.0:
all_sum_corr = []
for i in range(hps.co_dim):
for j in range(i+1, hps.co_dim):
sum_corr_ij = tf.constant(0.0)
for t in range(num_steps):
u_mean_t = posterior_zs_co[t].mean
sum_corr_ij += u_mean_t[:,i]*u_mean_t[:,j]
all_sum_corr.append(0.5 * tf.square(sum_corr_ij))
self.corr_cost = tf.reduce_mean(all_sum_corr) # div by batch and by n*(n-1)/2 pairs
# Variational Lower Bound on posterior, p(z|x), plus reconstruction cost.
# KL and reconstruction costs are normalized only by batch size, not by
# dimension, or by time steps.
kl_cost_g0_b = tf.zeros_like(batch_size, dtype=tf.float32)
kl_cost_co_b = tf.zeros_like(batch_size, dtype=tf.float32)
self.kl_cost = tf.constant(0.0) # VAE KL cost
self.recon_cost = tf.constant(0.0) # VAE reconstruction cost
self.nll_bound_vae = tf.constant(0.0)
self.nll_bound_iwae = tf.constant(0.0) # for eval with IWAE cost.
if kind in ["train", "posterior_sample_and_average", "posterior_push_mean"]:
kl_cost_g0_b = 0.0
kl_cost_co_b = 0.0
if ic_dim > 0:
g0_priors = [self.prior_zs_g0]
g0_posts = [self.posterior_zs_g0]
kl_cost_g0_b = KLCost_GaussianGaussian(g0_posts, g0_priors).kl_cost_b
kl_cost_g0_b = hps.kl_ic_weight * kl_cost_g0_b
if co_dim > 0:
kl_cost_co_b = \
KLCost_GaussianGaussianProcessSampled(
posterior_zs_co, prior_zs_ar_con).kl_cost_b
kl_cost_co_b = hps.kl_co_weight * kl_cost_co_b
# L = -KL + log p(x|z), to maximize bound on likelihood
# -L = KL - log p(x|z), to minimize bound on NLL
# so 'reconstruction cost' is negative log likelihood
self.recon_cost = - tf.reduce_mean(log_p_xgz_b)
self.kl_cost = tf.reduce_mean(kl_cost_g0_b + kl_cost_co_b)
lb_on_ll_b = log_p_xgz_b - kl_cost_g0_b - kl_cost_co_b
# VAE error averages outside the log
self.nll_bound_vae = -tf.reduce_mean(lb_on_ll_b)
# IWAE error averages inside the log
k = tf.cast(tf.shape(log_p_xgz_b)[0], tf.float32)
iwae_lb_on_ll = -tf.log(k) + log_sum_exp(lb_on_ll_b)
self.nll_bound_iwae = -iwae_lb_on_ll
# L2 regularization on the generator, normalized by number of parameters.
self.l2_cost = tf.constant(0.0)
if self.hps.l2_gen_scale > 0.0 or self.hps.l2_con_scale > 0.0:
l2_costs = []
l2_numels = []
l2_reg_var_lists = [tf.get_collection('l2_gen_reg'),
tf.get_collection('l2_con_reg')]
l2_reg_scales = [self.hps.l2_gen_scale, self.hps.l2_con_scale]
for l2_reg_vars, l2_scale in zip(l2_reg_var_lists, l2_reg_scales):
for v in l2_reg_vars:
numel = tf.reduce_prod(tf.concat(axis=0, values=tf.shape(v)))
numel_f = tf.cast(numel, tf.float32)
l2_numels.append(numel_f)
v_l2 = tf.reduce_sum(v*v)
l2_costs.append(0.5 * l2_scale * v_l2)
self.l2_cost = tf.add_n(l2_costs) / tf.add_n(l2_numels)
# Compute the cost for training, part of the graph regardless.
# The KL cost can be problematic at the beginning of optimization,
# so we allow an exponential increase in weighting the KL from 0
# to 1.
self.kl_decay_step = tf.maximum(self.train_step - hps.kl_start_step, 0)
self.l2_decay_step = tf.maximum(self.train_step - hps.l2_start_step, 0)
kl_decay_step_f = tf.cast(self.kl_decay_step, tf.float32)
l2_decay_step_f = tf.cast(self.l2_decay_step, tf.float32)
kl_increase_steps_f = tf.cast(hps.kl_increase_steps, tf.float32)
l2_increase_steps_f = tf.cast(hps.l2_increase_steps, tf.float32)
self.kl_weight = kl_weight = \
tf.minimum(kl_decay_step_f / kl_increase_steps_f, 1.0)
self.l2_weight = l2_weight = \
tf.minimum(l2_decay_step_f / l2_increase_steps_f, 1.0)
self.timed_kl_cost = kl_weight * self.kl_cost
self.timed_l2_cost = l2_weight * self.l2_cost
self.weight_corr_cost = hps.co_mean_corr_scale * self.corr_cost
self.cost = self.recon_cost + self.timed_kl_cost + \
self.timed_l2_cost + self.weight_corr_cost
if kind != "train":
# save every so often
self.seso_saver = tf.train.Saver(tf.global_variables(),
max_to_keep=hps.max_ckpt_to_keep)
# lowest validation error
self.lve_saver = tf.train.Saver(tf.global_variables(),
max_to_keep=hps.max_ckpt_to_keep_lve)
return
# OPTIMIZATION
# train the io matrices only
if self.hps.do_train_io_only:
self.train_vars = tvars = \
tf.get_collection('IO_transformations',
scope=tf.get_variable_scope().name)
# train the encoder only
elif self.hps.do_train_encoder_only:
tvars1 = \
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope='LFADS/ic_enc_*')
tvars2 = \
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope='LFADS/z/ic_enc_*')
self.train_vars = tvars = tvars1 + tvars2
# train all variables
else:
self.train_vars = tvars = \
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope=tf.get_variable_scope().name)
print("done.")
print("Model Variables (to be optimized): ")
total_params = 0
for i in range(len(tvars)):
shape = tvars[i].get_shape().as_list()
print(" ", i, tvars[i].name, shape)
total_params += np.prod(shape)
print("Total model parameters: ", total_params)
grads = tf.gradients(self.cost, tvars)
grads, grad_global_norm = tf.clip_by_global_norm(grads, hps.max_grad_norm)
opt = tf.train.AdamOptimizer(self.learning_rate, beta1=0.9, beta2=0.999,
epsilon=1e-01)
self.grads = grads
self.grad_global_norm = grad_global_norm
self.train_op = opt.apply_gradients(
zip(grads, tvars), global_step=self.train_step)
self.seso_saver = tf.train.Saver(tf.global_variables(),
max_to_keep=hps.max_ckpt_to_keep)
# lowest validation error
self.lve_saver = tf.train.Saver(tf.global_variables(),
max_to_keep=hps.max_ckpt_to_keep)
# SUMMARIES, used only during training.
# example summary
self.example_image = tf.placeholder(tf.float32, shape=[1,None,None,3],
name='image_tensor')
self.example_summ = tf.summary.image("LFADS example", self.example_image,
collections=["example_summaries"])
# general training summaries
self.lr_summ = tf.summary.scalar("Learning rate", self.learning_rate)
self.kl_weight_summ = tf.summary.scalar("KL weight", self.kl_weight)
self.l2_weight_summ = tf.summary.scalar("L2 weight", self.l2_weight)
self.corr_cost_summ = tf.summary.scalar("Corr cost", self.weight_corr_cost)
self.grad_global_norm_summ = tf.summary.scalar("Gradient global norm",
self.grad_global_norm)
if hps.co_dim > 0:
self.atau_summ = [None] * hps.co_dim
self.pvar_summ = [None] * hps.co_dim
for c in range(hps.co_dim):
self.atau_summ[c] = \
tf.summary.scalar("AR Autocorrelation taus " + str(c),
tf.exp(self.prior_zs_ar_con.logataus_1xu[0,c]))
self.pvar_summ[c] = \
tf.summary.scalar("AR Variances " + str(c),
tf.exp(self.prior_zs_ar_con.logpvars_1xu[0,c]))
# cost summaries, separated into different collections for
# training vs validation. We make placeholders for these, because
# even though the graph computes these costs on a per-batch basis,
# we want to report the more reliable metric of per-epoch cost.
kl_cost_ph = tf.placeholder(tf.float32, shape=[], name='kl_cost_ph')
self.kl_t_cost_summ = tf.summary.scalar("KL cost (train)", kl_cost_ph,
collections=["train_summaries"])
self.kl_v_cost_summ = tf.summary.scalar("KL cost (valid)", kl_cost_ph,
collections=["valid_summaries"])
l2_cost_ph = tf.placeholder(tf.float32, shape=[], name='l2_cost_ph')
self.l2_cost_summ = tf.summary.scalar("L2 cost", l2_cost_ph,
collections=["train_summaries"])
recon_cost_ph = tf.placeholder(tf.float32, shape=[], name='recon_cost_ph')
self.recon_t_cost_summ = tf.summary.scalar("Reconstruction cost (train)",
recon_cost_ph,
collections=["train_summaries"])
self.recon_v_cost_summ = tf.summary.scalar("Reconstruction cost (valid)",
recon_cost_ph,
collections=["valid_summaries"])
total_cost_ph = tf.placeholder(tf.float32, shape=[], name='total_cost_ph')
self.cost_t_summ = tf.summary.scalar("Total cost (train)", total_cost_ph,
collections=["train_summaries"])
self.cost_v_summ = tf.summary.scalar("Total cost (valid)", total_cost_ph,
collections=["valid_summaries"])
self.kl_cost_ph = kl_cost_ph
self.l2_cost_ph = l2_cost_ph
self.recon_cost_ph = recon_cost_ph
self.total_cost_ph = total_cost_ph
# Merged summaries, for easy coding later.
self.merged_examples = tf.summary.merge_all(key="example_summaries")
self.merged_generic = tf.summary.merge_all() # default key is 'summaries'
self.merged_train = tf.summary.merge_all(key="train_summaries")
self.merged_valid = tf.summary.merge_all(key="valid_summaries")
session = tf.get_default_session()
self.logfile = os.path.join(hps.lfads_save_dir, "lfads_log")
self.writer = tf.summary.FileWriter(self.logfile)
def build_feed_dict(self, train_name, data_bxtxd, ext_input_bxtxi=None,
keep_prob=None):
"""Build the feed dictionary, handles cases where there is no value defined.
Args:
train_name: The key into the datasets, to set the tf.case statement for
the proper readin / readout matrices.
data_bxtxd: The data tensor.
ext_input_bxtxi (optional): The external input tensor.
keep_prob: The drop out keep probability.
Returns:
The feed dictionary with TF tensors as keys and data as values, for use
with tf.Session.run()
"""
feed_dict = {}
B, T, _ = data_bxtxd.shape
feed_dict[self.dataName] = train_name
feed_dict[self.dataset_ph] = data_bxtxd
if self.ext_input is not None and ext_input_bxtxi is not None:
feed_dict[self.ext_input] = ext_input_bxtxi
if keep_prob is None:
feed_dict[self.keep_prob] = self.hps.keep_prob
else:
feed_dict[self.keep_prob] = keep_prob
return feed_dict
@staticmethod
def get_batch(data_extxd, ext_input_extxi=None, batch_size=None,
example_idxs=None):
"""Get a batch of data, either randomly chosen, or specified directly.
Args:
data_extxd: The data to model, numpy tensors with shape:
# examples x # time steps x # dimensions
ext_input_extxi (optional): The external inputs, numpy tensor with shape:
# examples x # time steps x # external input dimensions
batch_size: The size of the batch to return.
example_idxs (optional): The example indices used to select examples.
Returns:
A tuple with two parts:
1. Batched data numpy tensor with shape:
batch_size x # time steps x # dimensions
2. Batched external input numpy tensor with shape:
batch_size x # time steps x # external input dims
"""
assert batch_size is not None or example_idxs is not None, "Problems"
E, T, D = data_extxd.shape
if example_idxs is None:
example_idxs = np.random.choice(E, batch_size)
ext_input_bxtxi = None
if ext_input_extxi is not None:
ext_input_bxtxi = ext_input_extxi[example_idxs,:,:]
return data_extxd[example_idxs,:,:], ext_input_bxtxi
@staticmethod
def example_idxs_mod_batch_size(nexamples, batch_size):
"""Given a number of examples, E, and a batch_size, B, generate indices
[0, 1, 2, ... B-1;
[B, B+1, ... 2*B-1;
...
]
returning those indices as a 2-dim tensor shaped like E/B x B. Note that
shape is only correct if E % B == 0. If not, then an extra row is generated
so that the remainder of examples is included. The extra examples are
explicitly to to the zero index (see randomize_example_idxs_mod_batch_size)
for randomized behavior.
Args:
nexamples: The number of examples to batch up.
batch_size: The size of the batch.
Returns:
2-dim tensor as described above.
"""
bmrem = batch_size - (nexamples % batch_size)
bmrem_examples = []
if bmrem < batch_size:
#bmrem_examples = np.zeros(bmrem, dtype=np.int32)
ridxs = np.random.permutation(nexamples)[0:bmrem].astype(np.int32)
bmrem_examples = np.sort(ridxs)
example_idxs = range(nexamples) + list(bmrem_examples)
example_idxs_e_x_edivb = np.reshape(example_idxs, [-1, batch_size])
return example_idxs_e_x_edivb, bmrem
@staticmethod
def randomize_example_idxs_mod_batch_size(nexamples, batch_size):
"""Indices 1:nexamples, randomized, in 2D form of
shape = (nexamples / batch_size) x batch_size. The remainder
is managed by drawing randomly from 1:nexamples.
Args:
nexamples: Number of examples to randomize.
batch_size: Number of elements in batch.
Returns:
The randomized, properly shaped indicies.
"""
assert nexamples > batch_size, "Problems"
bmrem = batch_size - nexamples % batch_size
bmrem_examples = []
if bmrem < batch_size:
bmrem_examples = np.random.choice(range(nexamples),
size=bmrem, replace=False)
example_idxs = range(nexamples) + list(bmrem_examples)
mixed_example_idxs = np.random.permutation(example_idxs)
example_idxs_e_x_edivb = np.reshape(mixed_example_idxs, [-1, batch_size])
return example_idxs_e_x_edivb, bmrem
def shuffle_spikes_in_time(self, data_bxtxd):
"""Shuffle the spikes in the temporal dimension. This is useful to
help the LFADS system avoid overfitting to individual spikes or fast
oscillations found in the data that are irrelevant to behavior. A
pure 'tabula rasa' approach would avoid this, but LFADS is sensitive
enough to pick up dynamics that you may not want.
Args:
data_bxtxd: Numpy array of spike count data to be shuffled.
Returns:
S_bxtxd, a numpy array with the same dimensions and contents as
data_bxtxd, but shuffled appropriately.
"""
B, T, N = data_bxtxd.shape
w = self.hps.temporal_spike_jitter_width
if w == 0:
return data_bxtxd
max_counts = np.max(data_bxtxd)
S_bxtxd = np.zeros([B,T,N])
# Intuitively, shuffle spike occurances, 0 or 1, but since we have counts,
# Do it over and over again up to the max count.
for mc in range(1,max_counts+1):
idxs = np.nonzero(data_bxtxd >= mc)
data_ones = np.zeros_like(data_bxtxd)
data_ones[data_bxtxd >= mc] = 1
nfound = len(idxs[0])
shuffles_incrs_in_time = np.random.randint(-w, w, size=nfound)
shuffle_tidxs = idxs[1].copy()
shuffle_tidxs += shuffles_incrs_in_time
# Reflect on the boundaries to not lose mass.
shuffle_tidxs[shuffle_tidxs < 0] = -shuffle_tidxs[shuffle_tidxs < 0]
shuffle_tidxs[shuffle_tidxs > T-1] = \
(T-1)-(shuffle_tidxs[shuffle_tidxs > T-1] -(T-1))
for iii in zip(idxs[0], shuffle_tidxs, idxs[2]):
S_bxtxd[iii] += 1
return S_bxtxd
def shuffle_and_flatten_datasets(self, datasets, kind='train'):
"""Since LFADS supports multiple datasets in the same dynamical model,
we have to be careful to use all the data in a single training epoch. But
since the datasets my have different data dimensionality, we cannot batch
examples from data dictionaries together. Instead, we generate random
batches within each data dictionary, and then randomize these batches
while holding onto the dataname, so that when it's time to feed
the graph, the correct in/out matrices can be selected, per batch.
Args:
datasets: A dict of data dicts. The dataset dict is simply a
name(string)-> data dictionary mapping (See top of lfads.py).
kind: 'train' or 'valid'
Returns:
A flat list, in which each element is a pair ('name', indices).
"""
batch_size = self.hps.batch_size
ndatasets = len(datasets)
random_example_idxs = {}
epoch_idxs = {}
all_name_example_idx_pairs = []
kind_data = kind + '_data'
for name, data_dict in datasets.items():
nexamples, ntime, data_dim = data_dict[kind_data].shape
epoch_idxs[name] = 0
random_example_idxs, _ = \
self.randomize_example_idxs_mod_batch_size(nexamples, batch_size)
epoch_size = random_example_idxs.shape[0]
names = [name] * epoch_size
all_name_example_idx_pairs += zip(names, random_example_idxs)
np.random.shuffle(all_name_example_idx_pairs) # shuffle in place
return all_name_example_idx_pairs
def train_epoch(self, datasets, batch_size=None, do_save_ckpt=True):
"""Train the model through the entire dataset once.
Args:
datasets: A dict of data dicts. The dataset dict is simply a
name(string)-> data dictionary mapping (See top of lfads.py).
batch_size (optional): The batch_size to use.
do_save_ckpt (optional): Should the routine save a checkpoint on this
training epoch?
Returns:
A tuple with 6 float values:
(total cost of the epoch, epoch reconstruction cost,
epoch kl cost, KL weight used this training epoch,
total l2 cost on generator, and the corresponding weight).
"""
ops_to_eval = [self.cost, self.recon_cost,
self.kl_cost, self.kl_weight,
self.l2_cost, self.l2_weight,
self.train_op]
collected_op_values = self.run_epoch(datasets, ops_to_eval, kind="train")
total_cost = total_recon_cost = total_kl_cost = 0.0
# normalizing by batch done in distributions.py
epoch_size = len(collected_op_values)
for op_values in collected_op_values:
total_cost += op_values[0]
total_recon_cost += op_values[1]
total_kl_cost += op_values[2]
kl_weight = collected_op_values[-1][3]
l2_cost = collected_op_values[-1][4]
l2_weight = collected_op_values[-1][5]
epoch_total_cost = total_cost / epoch_size
epoch_recon_cost = total_recon_cost / epoch_size
epoch_kl_cost = total_kl_cost / epoch_size
if do_save_ckpt:
session = tf.get_default_session()
checkpoint_path = os.path.join(self.hps.lfads_save_dir,
self.hps.checkpoint_name + '.ckpt')
self.seso_saver.save(session, checkpoint_path,
global_step=self.train_step)
return epoch_total_cost, epoch_recon_cost, epoch_kl_cost, \
kl_weight, l2_cost, l2_weight
def run_epoch(self, datasets, ops_to_eval, kind="train", batch_size=None,
do_collect=True, keep_prob=None):
"""Run the model through the entire dataset once.
Args:
datasets: A dict of data dicts. The dataset dict is simply a
name(string)-> data dictionary mapping (See top of lfads.py).
ops_to_eval: A list of tensorflow operations that will be evaluated in
the tf.session.run() call.
batch_size (optional): The batch_size to use.
do_collect (optional): Should the routine collect all session.run
output as a list, and return it?
keep_prob (optional): The dropout keep probability.
Returns:
A list of lists, the internal list is the return for the ops for each
session.run() call. The outer list collects over the epoch.
"""
hps = self.hps
all_name_example_idx_pairs = \
self.shuffle_and_flatten_datasets(datasets, kind)
kind_data = kind + '_data'
kind_ext_input = kind + '_ext_input'
total_cost = total_recon_cost = total_kl_cost = 0.0
session = tf.get_default_session()
epoch_size = len(all_name_example_idx_pairs)
evaled_ops_list = []
for name, example_idxs in all_name_example_idx_pairs:
data_dict = datasets[name]
data_extxd = data_dict[kind_data]
if hps.output_dist == 'poisson' and hps.temporal_spike_jitter_width > 0:
data_extxd = self.shuffle_spikes_in_time(data_extxd)
ext_input_extxi = data_dict[kind_ext_input]
data_bxtxd, ext_input_bxtxi = self.get_batch(data_extxd, ext_input_extxi,
example_idxs=example_idxs)
feed_dict = self.build_feed_dict(name, data_bxtxd, ext_input_bxtxi,
keep_prob=keep_prob)
evaled_ops_np = session.run(ops_to_eval, feed_dict=feed_dict)
if do_collect:
evaled_ops_list.append(evaled_ops_np)
return evaled_ops_list
def summarize_all(self, datasets, summary_values):
"""Plot and summarize stuff in tensorboard.
Note that everything done in the current function is otherwise done on
a single, randomly selected dataset (except for summary_values, which are
passed in.)
Args:
datasets, the dictionary of datasets used in the study.
summary_values: These summary values are created from the training loop,
and so summarize the entire set of datasets.
"""
hps = self.hps
tr_kl_cost = summary_values['tr_kl_cost']
tr_recon_cost = summary_values['tr_recon_cost']
tr_total_cost = summary_values['tr_total_cost']
kl_weight = summary_values['kl_weight']
l2_weight = summary_values['l2_weight']
l2_cost = summary_values['l2_cost']
has_any_valid_set = summary_values['has_any_valid_set']
i = summary_values['nepochs']
session = tf.get_default_session()
train_summ, train_step = session.run([self.merged_train,
self.train_step],
feed_dict={self.l2_cost_ph:l2_cost,
self.kl_cost_ph:tr_kl_cost,
self.recon_cost_ph:tr_recon_cost,
self.total_cost_ph:tr_total_cost})
self.writer.add_summary(train_summ, train_step)
if has_any_valid_set:
ev_kl_cost = summary_values['ev_kl_cost']
ev_recon_cost = summary_values['ev_recon_cost']
ev_total_cost = summary_values['ev_total_cost']
eval_summ = session.run(self.merged_valid,
feed_dict={self.kl_cost_ph:ev_kl_cost,
self.recon_cost_ph:ev_recon_cost,
self.total_cost_ph:ev_total_cost})
self.writer.add_summary(eval_summ, train_step)
print("Epoch:%d, step:%d (TRAIN, VALID): total: %.2f, %.2f\
recon: %.2f, %.2f, kl: %.2f, %.2f, l2: %.5f,\
kl weight: %.2f, l2 weight: %.2f" % \
(i, train_step, tr_total_cost, ev_total_cost,
tr_recon_cost, ev_recon_cost, tr_kl_cost, ev_kl_cost,
l2_cost, kl_weight, l2_weight))
csv_outstr = "epoch,%d, step,%d, total,%.2f,%.2f, \
recon,%.2f,%.2f, kl,%.2f,%.2f, l2,%.5f, \
klweight,%.2f, l2weight,%.2f\n"% \
(i, train_step, tr_total_cost, ev_total_cost,
tr_recon_cost, ev_recon_cost, tr_kl_cost, ev_kl_cost,
l2_cost, kl_weight, l2_weight)
else:
print("Epoch:%d, step:%d TRAIN: total: %.2f recon: %.2f, kl: %.2f,\
l2: %.5f, kl weight: %.2f, l2 weight: %.2f" % \
(i, train_step, tr_total_cost, tr_recon_cost, tr_kl_cost,
l2_cost, kl_weight, l2_weight))
csv_outstr = "epoch,%d, step,%d, total,%.2f, recon,%.2f, kl,%.2f, \
l2,%.5f, klweight,%.2f, l2weight,%.2f\n"% \
(i, train_step, tr_total_cost, tr_recon_cost,
tr_kl_cost, l2_cost, kl_weight, l2_weight)
if self.hps.csv_log:
csv_file = os.path.join(self.hps.lfads_save_dir, self.hps.csv_log+'.csv')
with open(csv_file, "a") as myfile:
myfile.write(csv_outstr)
def plot_single_example(self, datasets):
"""Plot an image relating to a randomly chosen, specific example. We use
posterior sample and average by taking one example, and filling a whole
batch with that example, sample from the posterior, and then average the
quantities.
"""
hps = self.hps
all_data_names = datasets.keys()
data_name = np.random.permutation(all_data_names)[0]
data_dict = datasets[data_name]
has_valid_set = True if data_dict['valid_data'] is not None else False
cf = 1.0 # plotting concern
# posterior sample and average here
E, _, _ = data_dict['train_data'].shape
eidx = np.random.choice(E)
example_idxs = eidx * np.ones(hps.batch_size, dtype=np.int32)
train_data_bxtxd, train_ext_input_bxtxi = \
self.get_batch(data_dict['train_data'], data_dict['train_ext_input'],
example_idxs=example_idxs)
truth_train_data_bxtxd = None
if 'train_truth' in data_dict and data_dict['train_truth'] is not None:
truth_train_data_bxtxd, _ = self.get_batch(data_dict['train_truth'],
example_idxs=example_idxs)
cf = data_dict['conversion_factor']
# plotter does averaging
train_model_values = self.eval_model_runs_batch(data_name,
train_data_bxtxd,
train_ext_input_bxtxi,
do_average_batch=False)
train_step = train_model_values['train_steps']
feed_dict = self.build_feed_dict(data_name, train_data_bxtxd,
train_ext_input_bxtxi, keep_prob=1.0)
session = tf.get_default_session()
generic_summ = session.run(self.merged_generic, feed_dict=feed_dict)
self.writer.add_summary(generic_summ, train_step)
valid_data_bxtxd = valid_model_values = valid_ext_input_bxtxi = None
truth_valid_data_bxtxd = None
if has_valid_set:
E, _, _ = data_dict['valid_data'].shape
eidx = np.random.choice(E)
example_idxs = eidx * np.ones(hps.batch_size, dtype=np.int32)
valid_data_bxtxd, valid_ext_input_bxtxi = \
self.get_batch(data_dict['valid_data'],
data_dict['valid_ext_input'],
example_idxs=example_idxs)
if 'valid_truth' in data_dict and data_dict['valid_truth'] is not None:
truth_valid_data_bxtxd, _ = self.get_batch(data_dict['valid_truth'],
example_idxs=example_idxs)
else:
truth_valid_data_bxtxd = None
# plotter does averaging
valid_model_values = self.eval_model_runs_batch(data_name,
valid_data_bxtxd,
valid_ext_input_bxtxi,
do_average_batch=False)
example_image = plot_lfads(train_bxtxd=train_data_bxtxd,
train_model_vals=train_model_values,
train_ext_input_bxtxi=train_ext_input_bxtxi,
train_truth_bxtxd=truth_train_data_bxtxd,
valid_bxtxd=valid_data_bxtxd,
valid_model_vals=valid_model_values,
valid_ext_input_bxtxi=valid_ext_input_bxtxi,
valid_truth_bxtxd=truth_valid_data_bxtxd,
bidx=None, cf=cf, output_dist=hps.output_dist)
example_image = np.expand_dims(example_image, axis=0)
example_summ = session.run(self.merged_examples,
feed_dict={self.example_image : example_image})
self.writer.add_summary(example_summ)
def train_model(self, datasets):
"""Train the model, print per-epoch information, and save checkpoints.
Loop over training epochs. The function that actually does the
training is train_epoch. This function iterates over the training
data, one epoch at a time. The learning rate schedule is such
that it will stay the same until the cost goes up in comparison to
the last few values, then it will drop.
Args:
datasets: A dict of data dicts. The dataset dict is simply a
name(string)-> data dictionary mapping (See top of lfads.py).
"""
hps = self.hps
has_any_valid_set = False
for data_dict in datasets.values():
if data_dict['valid_data'] is not None:
has_any_valid_set = True
break
session = tf.get_default_session()
lr = session.run(self.learning_rate)
lr_stop = hps.learning_rate_stop
i = -1
train_costs = []
valid_costs = []
ev_total_cost = ev_recon_cost = ev_kl_cost = 0.0
lowest_ev_cost = np.Inf
while True:
i += 1
do_save_ckpt = True if i % 10 ==0 else False
tr_total_cost, tr_recon_cost, tr_kl_cost, kl_weight, l2_cost, l2_weight = \
self.train_epoch(datasets, do_save_ckpt=do_save_ckpt)
# Evaluate the validation cost, and potentially save. Note that this
# routine will not save a validation checkpoint until the kl weight and
# l2 weights are equal to 1.0.
if has_any_valid_set:
ev_total_cost, ev_recon_cost, ev_kl_cost = \
self.eval_cost_epoch(datasets, kind='valid')
valid_costs.append(ev_total_cost)
# > 1 may give more consistent results, but not the actual lowest vae.
# == 1 gives the lowest vae seen so far.
n_lve = 1
run_avg_lve = np.mean(valid_costs[-n_lve:])
# conditions for saving checkpoints:
# KL weight must have finished stepping (>=1.0), AND
# L2 weight must have finished stepping OR L2 is not being used, AND
# the current run has a lower LVE than previous runs AND
# len(valid_costs > n_lve) (not sure what that does)
if kl_weight >= 1.0 and \
(l2_weight >= 1.0 or \
(self.hps.l2_gen_scale == 0.0 and self.hps.l2_con_scale == 0.0)) \
and (len(valid_costs) > n_lve and run_avg_lve < lowest_ev_cost):
lowest_ev_cost = run_avg_lve
checkpoint_path = os.path.join(self.hps.lfads_save_dir,
self.hps.checkpoint_name + '_lve.ckpt')
self.lve_saver.save(session, checkpoint_path,
global_step=self.train_step,
latest_filename='checkpoint_lve')
# Plot and summarize.
values = {'nepochs':i, 'has_any_valid_set': has_any_valid_set,
'tr_total_cost':tr_total_cost, 'ev_total_cost':ev_total_cost,
'tr_recon_cost':tr_recon_cost, 'ev_recon_cost':ev_recon_cost,
'tr_kl_cost':tr_kl_cost, 'ev_kl_cost':ev_kl_cost,
'l2_weight':l2_weight, 'kl_weight':kl_weight,
'l2_cost':l2_cost}
self.summarize_all(datasets, values)
self.plot_single_example(datasets)
# Manage learning rate.
train_res = tr_total_cost
n_lr = hps.learning_rate_n_to_compare
if len(train_costs) > n_lr and train_res > np.max(train_costs[-n_lr:]):
_ = session.run(self.learning_rate_decay_op)
lr = session.run(self.learning_rate)
print(" Decreasing learning rate to %f." % lr)
# Force the system to run n_lr times while at this lr.
train_costs.append(np.inf)
else:
train_costs.append(train_res)
if lr < lr_stop:
print("Stopping optimization based on learning rate criteria.")
break
def eval_cost_epoch(self, datasets, kind='train', ext_input_extxi=None,
batch_size=None):
"""Evaluate the cost of the epoch.
Args:
data_dict: The dictionary of data (training and validation) used for
training and evaluation of the model, respectively.
Returns:
a 3 tuple of costs:
(epoch total cost, epoch reconstruction cost, epoch KL cost)
"""
ops_to_eval = [self.cost, self.recon_cost, self.kl_cost]
collected_op_values = self.run_epoch(datasets, ops_to_eval, kind=kind,
keep_prob=1.0)
total_cost = total_recon_cost = total_kl_cost = 0.0
# normalizing by batch done in distributions.py
epoch_size = len(collected_op_values)
for op_values in collected_op_values:
total_cost += op_values[0]
total_recon_cost += op_values[1]
total_kl_cost += op_values[2]
epoch_total_cost = total_cost / epoch_size
epoch_recon_cost = total_recon_cost / epoch_size
epoch_kl_cost = total_kl_cost / epoch_size
return epoch_total_cost, epoch_recon_cost, epoch_kl_cost
def eval_model_runs_batch(self, data_name, data_bxtxd, ext_input_bxtxi=None,
do_eval_cost=False, do_average_batch=False):
"""Returns all the goodies for the entire model, per batch.
If data_bxtxd and ext_input_bxtxi can have fewer than batch_size along dim 1
in which case this handles the padding and truncating automatically
Args:
data_name: The name of the data dict, to select which in/out matrices
to use.
data_bxtxd: Numpy array training data with shape:
batch_size x # time steps x # dimensions
ext_input_bxtxi: Numpy array training external input with shape:
batch_size x # time steps x # external input dims
do_eval_cost (optional): If true, the IWAE (Importance Weighted
Autoencoder) log likeihood bound, instead of the VAE version.
do_average_batch (optional): average over the batch, useful for getting
good IWAE costs, and model outputs for a single data point.
Returns:
A dictionary with the outputs of the model decoder, namely:
prior g0 mean, prior g0 variance, approx. posterior mean, approx
posterior mean, the generator initial conditions, the control inputs (if
enabled), the state of the generator, the factors, and the rates.
"""
session = tf.get_default_session()
# if fewer than batch_size provided, pad to batch_size
hps = self.hps
batch_size = hps.batch_size
E, _, _ = data_bxtxd.shape
if E < hps.batch_size:
data_bxtxd = np.pad(data_bxtxd, ((0, hps.batch_size-E), (0, 0), (0, 0)),
mode='constant', constant_values=0)
if ext_input_bxtxi is not None:
ext_input_bxtxi = np.pad(ext_input_bxtxi,
((0, hps.batch_size-E), (0, 0), (0, 0)),
mode='constant', constant_values=0)
feed_dict = self.build_feed_dict(data_name, data_bxtxd,
ext_input_bxtxi, keep_prob=1.0)
# Non-temporal signals will be batch x dim.
# Temporal signals are list length T with elements batch x dim.
tf_vals = [self.gen_ics, self.gen_states, self.factors,
self.output_dist_params]
tf_vals.append(self.cost)
tf_vals.append(self.nll_bound_vae)
tf_vals.append(self.nll_bound_iwae)
tf_vals.append(self.train_step) # not train_op!
if self.hps.ic_dim > 0:
tf_vals += [self.prior_zs_g0.mean, self.prior_zs_g0.logvar,
self.posterior_zs_g0.mean, self.posterior_zs_g0.logvar]
if self.hps.co_dim > 0:
tf_vals.append(self.controller_outputs)
tf_vals_flat, fidxs = flatten(tf_vals)
np_vals_flat = session.run(tf_vals_flat, feed_dict=feed_dict)
ff = 0
gen_ics = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1
gen_states = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1
factors = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1
out_dist_params = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1
costs = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1
nll_bound_vaes = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1
nll_bound_iwaes = [np_vals_flat[f] for f in fidxs[ff]]; ff +=1
train_steps = [np_vals_flat[f] for f in fidxs[ff]]; ff +=1
if self.hps.ic_dim > 0:
prior_g0_mean = [np_vals_flat[f] for f in fidxs[ff]]; ff +=1
prior_g0_logvar = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1
post_g0_mean = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1
post_g0_logvar = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1
if self.hps.co_dim > 0:
controller_outputs = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1
# [0] are to take out the non-temporal items from lists
gen_ics = gen_ics[0]
costs = costs[0]
nll_bound_vaes = nll_bound_vaes[0]
nll_bound_iwaes = nll_bound_iwaes[0]
train_steps = train_steps[0]
# Convert to full tensors, not lists of tensors in time dim.
gen_states = list_t_bxn_to_tensor_bxtxn(gen_states)
factors = list_t_bxn_to_tensor_bxtxn(factors)
out_dist_params = list_t_bxn_to_tensor_bxtxn(out_dist_params)
if self.hps.ic_dim > 0:
# select first time point
prior_g0_mean = prior_g0_mean[0]
prior_g0_logvar = prior_g0_logvar[0]
post_g0_mean = post_g0_mean[0]
post_g0_logvar = post_g0_logvar[0]
if self.hps.co_dim > 0:
controller_outputs = list_t_bxn_to_tensor_bxtxn(controller_outputs)
# slice out the trials in case < batch_size provided
if E < hps.batch_size:
idx = np.arange(E)
gen_ics = gen_ics[idx, :]
gen_states = gen_states[idx, :]
factors = factors[idx, :, :]
out_dist_params = out_dist_params[idx, :, :]
if self.hps.ic_dim > 0:
prior_g0_mean = prior_g0_mean[idx, :]
prior_g0_logvar = prior_g0_logvar[idx, :]
post_g0_mean = post_g0_mean[idx, :]
post_g0_logvar = post_g0_logvar[idx, :]
if self.hps.co_dim > 0:
controller_outputs = controller_outputs[idx, :, :]
if do_average_batch:
gen_ics = np.mean(gen_ics, axis=0)
gen_states = np.mean(gen_states, axis=0)
factors = np.mean(factors, axis=0)
out_dist_params = np.mean(out_dist_params, axis=0)
if self.hps.ic_dim > 0:
prior_g0_mean = np.mean(prior_g0_mean, axis=0)
prior_g0_logvar = np.mean(prior_g0_logvar, axis=0)
post_g0_mean = np.mean(post_g0_mean, axis=0)
post_g0_logvar = np.mean(post_g0_logvar, axis=0)
if self.hps.co_dim > 0:
controller_outputs = np.mean(controller_outputs, axis=0)
model_vals = {}
model_vals['gen_ics'] = gen_ics
model_vals['gen_states'] = gen_states
model_vals['factors'] = factors
model_vals['output_dist_params'] = out_dist_params
model_vals['costs'] = costs
model_vals['nll_bound_vaes'] = nll_bound_vaes
model_vals['nll_bound_iwaes'] = nll_bound_iwaes
model_vals['train_steps'] = train_steps
if self.hps.ic_dim > 0:
model_vals['prior_g0_mean'] = prior_g0_mean
model_vals['prior_g0_logvar'] = prior_g0_logvar
model_vals['post_g0_mean'] = post_g0_mean
model_vals['post_g0_logvar'] = post_g0_logvar
if self.hps.co_dim > 0:
model_vals['controller_outputs'] = controller_outputs
return model_vals
def eval_model_runs_avg_epoch(self, data_name, data_extxd,
ext_input_extxi=None):
"""Returns all the expected value for goodies for the entire model.
The expected value is taken over hidden (z) variables, namely the initial
conditions and the control inputs. The expected value is approximate, and
accomplished via sampling (batch_size) samples for every examples.
Args:
data_name: The name of the data dict, to select which in/out matrices
to use.
data_extxd: Numpy array training data with shape:
# examples x # time steps x # dimensions
ext_input_extxi (optional): Numpy array training external input with
shape: # examples x # time steps x # external input dims
Returns:
A dictionary with the averaged outputs of the model decoder, namely:
prior g0 mean, prior g0 variance, approx. posterior mean, approx
posterior mean, the generator initial conditions, the control inputs (if
enabled), the state of the generator, the factors, and the output
distribution parameters, e.g. (rates or mean and variances).
"""
hps = self.hps
batch_size = hps.batch_size
E, T, D = data_extxd.shape
E_to_process = hps.ps_nexamples_to_process
if E_to_process > E:
E_to_process = E
if hps.ic_dim > 0:
prior_g0_mean = np.zeros([E_to_process, hps.ic_dim])
prior_g0_logvar = np.zeros([E_to_process, hps.ic_dim])
post_g0_mean = np.zeros([E_to_process, hps.ic_dim])
post_g0_logvar = np.zeros([E_to_process, hps.ic_dim])
if hps.co_dim > 0:
controller_outputs = np.zeros([E_to_process, T, hps.co_dim])
gen_ics = np.zeros([E_to_process, hps.gen_dim])
gen_states = np.zeros([E_to_process, T, hps.gen_dim])
factors = np.zeros([E_to_process, T, hps.factors_dim])
if hps.output_dist == 'poisson':
out_dist_params = np.zeros([E_to_process, T, D])
elif hps.output_dist == 'gaussian':
out_dist_params = np.zeros([E_to_process, T, D+D])
else:
assert False, "NIY"
costs = np.zeros(E_to_process)
nll_bound_vaes = np.zeros(E_to_process)
nll_bound_iwaes = np.zeros(E_to_process)
train_steps = np.zeros(E_to_process)
for es_idx in range(E_to_process):
print("Running %d of %d." % (es_idx+1, E_to_process))
example_idxs = es_idx * np.ones(batch_size, dtype=np.int32)
data_bxtxd, ext_input_bxtxi = self.get_batch(data_extxd,
ext_input_extxi,
batch_size=batch_size,
example_idxs=example_idxs)
model_values = self.eval_model_runs_batch(data_name, data_bxtxd,
ext_input_bxtxi,
do_eval_cost=True,
do_average_batch=True)
if self.hps.ic_dim > 0:
prior_g0_mean[es_idx,:] = model_values['prior_g0_mean']
prior_g0_logvar[es_idx,:] = model_values['prior_g0_logvar']
post_g0_mean[es_idx,:] = model_values['post_g0_mean']
post_g0_logvar[es_idx,:] = model_values['post_g0_logvar']
gen_ics[es_idx,:] = model_values['gen_ics']
if self.hps.co_dim > 0:
controller_outputs[es_idx,:,:] = model_values['controller_outputs']
gen_states[es_idx,:,:] = model_values['gen_states']
factors[es_idx,:,:] = model_values['factors']
out_dist_params[es_idx,:,:] = model_values['output_dist_params']
costs[es_idx] = model_values['costs']
nll_bound_vaes[es_idx] = model_values['nll_bound_vaes']
nll_bound_iwaes[es_idx] = model_values['nll_bound_iwaes']
train_steps[es_idx] = model_values['train_steps']
print('bound nll(vae): %.3f, bound nll(iwae): %.3f' \
% (nll_bound_vaes[es_idx], nll_bound_iwaes[es_idx]))
model_runs = {}
if self.hps.ic_dim > 0:
model_runs['prior_g0_mean'] = prior_g0_mean
model_runs['prior_g0_logvar'] = prior_g0_logvar
model_runs['post_g0_mean'] = post_g0_mean
model_runs['post_g0_logvar'] = post_g0_logvar
model_runs['gen_ics'] = gen_ics
if self.hps.co_dim > 0:
model_runs['controller_outputs'] = controller_outputs
model_runs['gen_states'] = gen_states
model_runs['factors'] = factors
model_runs['output_dist_params'] = out_dist_params
model_runs['costs'] = costs
model_runs['nll_bound_vaes'] = nll_bound_vaes
model_runs['nll_bound_iwaes'] = nll_bound_iwaes
model_runs['train_steps'] = train_steps
return model_runs
def eval_model_runs_push_mean(self, data_name, data_extxd,
ext_input_extxi=None):
"""Returns values of interest for the model by pushing the means through
The mean values for both initial conditions and the control inputs are
pushed through the model instead of sampling (as is done in
eval_model_runs_avg_epoch).
This is a quick and approximate version of estimating these values instead
of sampling from the posterior many times and then averaging those values of
interest.
Internally, a total of batch_size trials are run through the model at once.
Args:
data_name: The name of the data dict, to select which in/out matrices
to use.
data_extxd: Numpy array training data with shape:
# examples x # time steps x # dimensions
ext_input_extxi (optional): Numpy array training external input with
shape: # examples x # time steps x # external input dims
Returns:
A dictionary with the estimated outputs of the model decoder, namely:
prior g0 mean, prior g0 variance, approx. posterior mean, approx
posterior mean, the generator initial conditions, the control inputs (if
enabled), the state of the generator, the factors, and the output
distribution parameters, e.g. (rates or mean and variances).
"""
hps = self.hps
batch_size = hps.batch_size
E, T, D = data_extxd.shape
E_to_process = hps.ps_nexamples_to_process
if E_to_process > E:
print("Setting number of posterior samples to process to : ", E)
E_to_process = E
if hps.ic_dim > 0:
prior_g0_mean = np.zeros([E_to_process, hps.ic_dim])
prior_g0_logvar = np.zeros([E_to_process, hps.ic_dim])
post_g0_mean = np.zeros([E_to_process, hps.ic_dim])
post_g0_logvar = np.zeros([E_to_process, hps.ic_dim])
if hps.co_dim > 0:
controller_outputs = np.zeros([E_to_process, T, hps.co_dim])
gen_ics = np.zeros([E_to_process, hps.gen_dim])
gen_states = np.zeros([E_to_process, T, hps.gen_dim])
factors = np.zeros([E_to_process, T, hps.factors_dim])
if hps.output_dist == 'poisson':
out_dist_params = np.zeros([E_to_process, T, D])
elif hps.output_dist == 'gaussian':
out_dist_params = np.zeros([E_to_process, T, D+D])
else:
assert False, "NIY"
costs = np.zeros(E_to_process)
nll_bound_vaes = np.zeros(E_to_process)
nll_bound_iwaes = np.zeros(E_to_process)
train_steps = np.zeros(E_to_process)
# generator that will yield 0:N in groups of per items, e.g.
# (0:per-1), (per:2*per-1), ..., with the last group containing <= per items
# this will be used to feed per=batch_size trials into the model at a time
def trial_batches(N, per):
for i in range(0, N, per):
yield np.arange(i, min(i+per, N), dtype=np.int32)
for batch_idx, es_idx in enumerate(trial_batches(E_to_process,
hps.batch_size)):
print("Running trial batch %d with %d trials" % (batch_idx+1,
len(es_idx)))
data_bxtxd, ext_input_bxtxi = self.get_batch(data_extxd,
ext_input_extxi,
batch_size=batch_size,
example_idxs=es_idx)
model_values = self.eval_model_runs_batch(data_name, data_bxtxd,
ext_input_bxtxi,
do_eval_cost=True,
do_average_batch=False)
if self.hps.ic_dim > 0:
prior_g0_mean[es_idx,:] = model_values['prior_g0_mean']
prior_g0_logvar[es_idx,:] = model_values['prior_g0_logvar']
post_g0_mean[es_idx,:] = model_values['post_g0_mean']
post_g0_logvar[es_idx,:] = model_values['post_g0_logvar']
gen_ics[es_idx,:] = model_values['gen_ics']
if self.hps.co_dim > 0:
controller_outputs[es_idx,:,:] = model_values['controller_outputs']
gen_states[es_idx,:,:] = model_values['gen_states']
factors[es_idx,:,:] = model_values['factors']
out_dist_params[es_idx,:,:] = model_values['output_dist_params']
# TODO
# model_values['costs'] and other costs come out as scalars, summed over
# all the trials in the batch. what we want is the per-trial costs
costs[es_idx] = model_values['costs']
nll_bound_vaes[es_idx] = model_values['nll_bound_vaes']
nll_bound_iwaes[es_idx] = model_values['nll_bound_iwaes']
train_steps[es_idx] = model_values['train_steps']
model_runs = {}
if self.hps.ic_dim > 0:
model_runs['prior_g0_mean'] = prior_g0_mean
model_runs['prior_g0_logvar'] = prior_g0_logvar
model_runs['post_g0_mean'] = post_g0_mean
model_runs['post_g0_logvar'] = post_g0_logvar
model_runs['gen_ics'] = gen_ics
if self.hps.co_dim > 0:
model_runs['controller_outputs'] = controller_outputs
model_runs['gen_states'] = gen_states
model_runs['factors'] = factors
model_runs['output_dist_params'] = out_dist_params
# You probably do not want the LL associated values when pushing the mean
# instead of sampling.
model_runs['costs'] = costs
model_runs['nll_bound_vaes'] = nll_bound_vaes
model_runs['nll_bound_iwaes'] = nll_bound_iwaes
model_runs['train_steps'] = train_steps
return model_runs
def write_model_runs(self, datasets, output_fname=None, push_mean=False):
"""Run the model on the data in data_dict, and save the computed values.
LFADS generates a number of outputs for each examples, and these are all
saved. They are:
The mean and variance of the prior of g0.
The mean and variance of approximate posterior of g0.
The control inputs (if enabled).
The initial conditions, g0, for all examples.
The generator states for all time.
The factors for all time.
The output distribution parameters (e.g. rates) for all time.
Args:
datasets: A dictionary of named data_dictionaries, see top of lfads.py
output_fname: a file name stem for the output files.
push_mean: If False (default), generates batch_size samples for each trial
and averages the results. if True, runs each trial once without noise,
pushing the posterior mean initial conditions and control inputs through
the trained model. False is used for posterior_sample_and_average, True
is used for posterior_push_mean.
"""
hps = self.hps
kind = hps.kind
for data_name, data_dict in datasets.items():
data_tuple = [('train', data_dict['train_data'],
data_dict['train_ext_input']),
('valid', data_dict['valid_data'],
data_dict['valid_ext_input'])]
for data_kind, data_extxd, ext_input_extxi in data_tuple:
if not output_fname:
fname = "model_runs_" + data_name + '_' + data_kind + '_' + kind
else:
fname = output_fname + data_name + '_' + data_kind + '_' + kind
print("Writing data for %s data and kind %s." % (data_name, data_kind))
if push_mean:
model_runs = self.eval_model_runs_push_mean(data_name, data_extxd,
ext_input_extxi)
else:
model_runs = self.eval_model_runs_avg_epoch(data_name, data_extxd,
ext_input_extxi)
full_fname = os.path.join(hps.lfads_save_dir, fname)
write_data(full_fname, model_runs, compression='gzip')
print("Done.")
def write_model_samples(self, dataset_name, output_fname=None):
"""Use the prior distribution to generate batch_size number of samples
from the model.
LFADS generates a number of outputs for each sample, and these are all
saved. They are:
The mean and variance of the prior of g0.
The control inputs (if enabled).
The initial conditions, g0, for all examples.
The generator states for all time.
The factors for all time.
The output distribution parameters (e.g. rates) for all time.
Args:
dataset_name: The name of the dataset to grab the factors -> rates
alignment matrices from.
output_fname: The name of the file in which to save the generated
samples.
"""
hps = self.hps
batch_size = hps.batch_size
print("Generating %d samples" % (batch_size))
tf_vals = [self.factors, self.gen_states, self.gen_ics,
self.cost, self.output_dist_params]
if hps.ic_dim > 0:
tf_vals += [self.prior_zs_g0.mean, self.prior_zs_g0.logvar]
if hps.co_dim > 0:
tf_vals += [self.prior_zs_ar_con.samples_t]
tf_vals_flat, fidxs = flatten(tf_vals)
session = tf.get_default_session()
feed_dict = {}
feed_dict[self.dataName] = dataset_name
feed_dict[self.keep_prob] = 1.0
np_vals_flat = session.run(tf_vals_flat, feed_dict=feed_dict)
ff = 0
factors = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1
gen_states = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1
gen_ics = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1
costs = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1
output_dist_params = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1
if hps.ic_dim > 0:
prior_g0_mean = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1
prior_g0_logvar = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1
if hps.co_dim > 0:
prior_zs_ar_con = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1
# [0] are to take out the non-temporal items from lists
gen_ics = gen_ics[0]
costs = costs[0]
# Convert to full tensors, not lists of tensors in time dim.
gen_states = list_t_bxn_to_tensor_bxtxn(gen_states)
factors = list_t_bxn_to_tensor_bxtxn(factors)
output_dist_params = list_t_bxn_to_tensor_bxtxn(output_dist_params)
if hps.ic_dim > 0:
prior_g0_mean = prior_g0_mean[0]
prior_g0_logvar = prior_g0_logvar[0]
if hps.co_dim > 0:
prior_zs_ar_con = list_t_bxn_to_tensor_bxtxn(prior_zs_ar_con)
model_vals = {}
model_vals['gen_ics'] = gen_ics
model_vals['gen_states'] = gen_states
model_vals['factors'] = factors
model_vals['output_dist_params'] = output_dist_params
model_vals['costs'] = costs.reshape(1)
if hps.ic_dim > 0:
model_vals['prior_g0_mean'] = prior_g0_mean
model_vals['prior_g0_logvar'] = prior_g0_logvar
if hps.co_dim > 0:
model_vals['prior_zs_ar_con'] = prior_zs_ar_con
full_fname = os.path.join(hps.lfads_save_dir, output_fname)
write_data(full_fname, model_vals, compression='gzip')
print("Done.")
@staticmethod
def eval_model_parameters(use_nested=True, include_strs=None):
"""Evaluate and return all of the TF variables in the model.
Args:
use_nested (optional): For returning values, use a nested dictoinary, based
on variable scoping, or return all variables in a flat dictionary.
include_strs (optional): A list of strings to use as a filter, to reduce the
number of variables returned. A variable name must contain at least one
string in include_strs as a sub-string in order to be returned.
Returns:
The parameters of the model. This can be in a flat
dictionary, or a nested dictionary, where the nesting is by variable
scope.
"""
all_tf_vars = tf.global_variables()
session = tf.get_default_session()
all_tf_vars_eval = session.run(all_tf_vars)
vars_dict = {}
strs = ["LFADS"]
if include_strs:
strs += include_strs
for i, (var, var_eval) in enumerate(zip(all_tf_vars, all_tf_vars_eval)):
if any(s in include_strs for s in var.name):
if not isinstance(var_eval, np.ndarray): # for H5PY
print(var.name, """ is not numpy array, saving as numpy array
with value: """, var_eval, type(var_eval))
e = np.array(var_eval)
print(e, type(e))
else:
e = var_eval
vars_dict[var.name] = e
if not use_nested:
return vars_dict
var_names = vars_dict.keys()
nested_vars_dict = {}
current_dict = nested_vars_dict
for v, var_name in enumerate(var_names):
var_split_name_list = var_name.split('/')
split_name_list_len = len(var_split_name_list)
current_dict = nested_vars_dict
for p, part in enumerate(var_split_name_list):
if p < split_name_list_len - 1:
if part in current_dict:
current_dict = current_dict[part]
else:
current_dict[part] = {}
current_dict = current_dict[part]
else:
current_dict[part] = vars_dict[var_name]
return nested_vars_dict
@staticmethod
def spikify_rates(rates_bxtxd):
"""Randomly spikify underlying rates according a Poisson distribution
Args:
rates_bxtxd: A numpy tensor with shape:
Returns:
A numpy array with the same shape as rates_bxtxd, but with the event
counts.
"""
B,T,N = rates_bxtxd.shape
assert all([B > 0, N > 0]), "problems"
# Because the rates are changing, there is nesting
spikes_bxtxd = np.zeros([B,T,N], dtype=np.int32)
for b in range(B):
for t in range(T):
for n in range(N):
rate = rates_bxtxd[b,t,n]
count = np.random.poisson(rate)
spikes_bxtxd[b,t,n] = count
return spikes_bxtxd
| 94,581 | 42.566099 | 128 | py |
models | models-master/research/lfads/synth_data/generate_labeled_rnn_data.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
from __future__ import print_function
import os
import h5py
import numpy as np
from six.moves import xrange
from synthetic_data_utils import generate_data, generate_rnn
from synthetic_data_utils import get_train_n_valid_inds
from synthetic_data_utils import nparray_and_transpose
from synthetic_data_utils import spikify_data, split_list_by_inds
import tensorflow as tf
from utils import write_datasets
DATA_DIR = "rnn_synth_data_v1.0"
flags = tf.app.flags
flags.DEFINE_string("save_dir", "/tmp/" + DATA_DIR + "/",
"Directory for saving data.")
flags.DEFINE_string("datafile_name", "conditioned_rnn_data",
"Name of data file for input case.")
flags.DEFINE_integer("synth_data_seed", 5, "Random seed for RNN generation.")
flags.DEFINE_float("T", 1.0, "Time in seconds to generate.")
flags.DEFINE_integer("C", 400, "Number of conditions")
flags.DEFINE_integer("N", 50, "Number of units for the RNN")
flags.DEFINE_float("train_percentage", 4.0/5.0,
"Percentage of train vs validation trials")
flags.DEFINE_integer("nreplications", 10,
"Number of spikifications of the same underlying rates.")
flags.DEFINE_float("g", 1.5, "Complexity of dynamics")
flags.DEFINE_float("x0_std", 1.0,
"Volume from which to pull initial conditions (affects diversity of dynamics.")
flags.DEFINE_float("tau", 0.025, "Time constant of RNN")
flags.DEFINE_float("dt", 0.010, "Time bin")
flags.DEFINE_float("max_firing_rate", 30.0, "Map 1.0 of RNN to a spikes per second")
FLAGS = flags.FLAGS
rng = np.random.RandomState(seed=FLAGS.synth_data_seed)
rnn_rngs = [np.random.RandomState(seed=FLAGS.synth_data_seed+1),
np.random.RandomState(seed=FLAGS.synth_data_seed+2)]
T = FLAGS.T
C = FLAGS.C
N = FLAGS.N
nreplications = FLAGS.nreplications
E = nreplications * C
train_percentage = FLAGS.train_percentage
ntimesteps = int(T / FLAGS.dt)
rnn_a = generate_rnn(rnn_rngs[0], N, FLAGS.g, FLAGS.tau, FLAGS.dt,
FLAGS.max_firing_rate)
rnn_b = generate_rnn(rnn_rngs[1], N, FLAGS.g, FLAGS.tau, FLAGS.dt,
FLAGS.max_firing_rate)
rnns = [rnn_a, rnn_b]
# pick which RNN is used on each trial
rnn_to_use = rng.randint(2, size=E)
ext_input = np.repeat(np.expand_dims(rnn_to_use, axis=1), ntimesteps, axis=1)
ext_input = np.expand_dims(ext_input, axis=2) # these are "a's" in the paper
x0s = []
condition_labels = []
condition_number = 0
for c in range(C):
x0 = FLAGS.x0_std * rng.randn(N, 1)
x0s.append(np.tile(x0, nreplications))
for ns in range(nreplications):
condition_labels.append(condition_number)
condition_number += 1
x0s = np.concatenate(x0s, axis=1)
P_nxn = rng.randn(N, N) / np.sqrt(N)
# generate trials for both RNNs
rates_a, x0s_a, _ = generate_data(rnn_a, T=T, E=E, x0s=x0s, P_sxn=P_nxn,
input_magnitude=0.0, input_times=None)
spikes_a = spikify_data(rates_a, rng, rnn_a['dt'], rnn_a['max_firing_rate'])
rates_b, x0s_b, _ = generate_data(rnn_b, T=T, E=E, x0s=x0s, P_sxn=P_nxn,
input_magnitude=0.0, input_times=None)
spikes_b = spikify_data(rates_b, rng, rnn_b['dt'], rnn_b['max_firing_rate'])
# not the best way to do this but E is small enough
rates = []
spikes = []
for trial in xrange(E):
if rnn_to_use[trial] == 0:
rates.append(rates_a[trial])
spikes.append(spikes_a[trial])
else:
rates.append(rates_b[trial])
spikes.append(spikes_b[trial])
# split into train and validation sets
train_inds, valid_inds = get_train_n_valid_inds(E, train_percentage,
nreplications)
rates_train, rates_valid = split_list_by_inds(rates, train_inds, valid_inds)
spikes_train, spikes_valid = split_list_by_inds(spikes, train_inds, valid_inds)
condition_labels_train, condition_labels_valid = split_list_by_inds(
condition_labels, train_inds, valid_inds)
ext_input_train, ext_input_valid = split_list_by_inds(
ext_input, train_inds, valid_inds)
rates_train = nparray_and_transpose(rates_train)
rates_valid = nparray_and_transpose(rates_valid)
spikes_train = nparray_and_transpose(spikes_train)
spikes_valid = nparray_and_transpose(spikes_valid)
# add train_ext_input and valid_ext input
data = {'train_truth': rates_train,
'valid_truth': rates_valid,
'train_data' : spikes_train,
'valid_data' : spikes_valid,
'train_ext_input' : np.array(ext_input_train),
'valid_ext_input': np.array(ext_input_valid),
'train_percentage' : train_percentage,
'nreplications' : nreplications,
'dt' : FLAGS.dt,
'P_sxn' : P_nxn,
'condition_labels_train' : condition_labels_train,
'condition_labels_valid' : condition_labels_valid,
'conversion_factor': 1.0 / rnn_a['conversion_factor']}
# just one dataset here
datasets = {}
dataset_name = 'dataset_N' + str(N)
datasets[dataset_name] = data
# write out the dataset
write_datasets(FLAGS.save_dir, FLAGS.datafile_name, datasets)
print ('Saved to ', os.path.join(FLAGS.save_dir,
FLAGS.datafile_name + '_' + dataset_name))
| 5,832 | 38.412162 | 98 | py |
models | models-master/research/lfads/synth_data/synthetic_data_utils.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
from __future__ import print_function
import h5py
import numpy as np
import os
from utils import write_datasets
import matplotlib
import matplotlib.pyplot as plt
import scipy.signal
def generate_rnn(rng, N, g, tau, dt, max_firing_rate):
"""Create a (vanilla) RNN with a bunch of hyper parameters for generating
chaotic data.
Args:
rng: numpy random number generator
N: number of hidden units
g: scaling of recurrent weight matrix in g W, with W ~ N(0,1/N)
tau: time scale of individual unit dynamics
dt: time step for equation updates
max_firing_rate: how to resecale the -1,1 firing rates
Returns:
the dictionary of these parameters, plus some others.
"""
rnn = {}
rnn['N'] = N
rnn['W'] = rng.randn(N,N)/np.sqrt(N)
rnn['Bin'] = rng.randn(N)/np.sqrt(1.0)
rnn['Bin2'] = rng.randn(N)/np.sqrt(1.0)
rnn['b'] = np.zeros(N)
rnn['g'] = g
rnn['tau'] = tau
rnn['dt'] = dt
rnn['max_firing_rate'] = max_firing_rate
mfr = rnn['max_firing_rate'] # spikes / sec
nbins_per_sec = 1.0/rnn['dt'] # bins / sec
# Used for plotting in LFADS
rnn['conversion_factor'] = mfr / nbins_per_sec # spikes / bin
return rnn
def generate_data(rnn, T, E, x0s=None, P_sxn=None, input_magnitude=0.0,
input_times=None):
""" Generates data from an randomly initialized RNN.
Args:
rnn: the rnn
T: Time in seconds to run (divided by rnn['dt'] to get steps, rounded down.
E: total number of examples
S: number of samples (subsampling N)
Returns:
A list of length E of NxT tensors of the network being run.
"""
N = rnn['N']
def run_rnn(rnn, x0, ntime_steps, input_time=None):
rs = np.zeros([N,ntime_steps])
x_tm1 = x0
r_tm1 = np.tanh(x0)
tau = rnn['tau']
dt = rnn['dt']
alpha = (1.0-dt/tau)
W = dt/tau*rnn['W']*rnn['g']
Bin = dt/tau*rnn['Bin']
Bin2 = dt/tau*rnn['Bin2']
b = dt/tau*rnn['b']
us = np.zeros([1, ntime_steps])
for t in range(ntime_steps):
x_t = alpha*x_tm1 + np.dot(W,r_tm1) + b
if input_time is not None and t == input_time:
us[0,t] = input_magnitude
x_t += Bin * us[0,t] # DCS is this what was used?
r_t = np.tanh(x_t)
x_tm1 = x_t
r_tm1 = r_t
rs[:,t] = r_t
return rs, us
if P_sxn is None:
P_sxn = np.eye(N)
ntime_steps = int(T / rnn['dt'])
data_e = []
inputs_e = []
for e in range(E):
input_time = input_times[e] if input_times is not None else None
r_nxt, u_uxt = run_rnn(rnn, x0s[:,e], ntime_steps, input_time)
r_sxt = np.dot(P_sxn, r_nxt)
inputs_e.append(u_uxt)
data_e.append(r_sxt)
S = P_sxn.shape[0]
data_e = normalize_rates(data_e, E, S)
return data_e, x0s, inputs_e
def normalize_rates(data_e, E, S):
# Normalization, made more complex because of the P matrices.
# Normalize by min and max in each channel. This normalization will
# cause offset differences between identical rnn runs, but different
# t hits.
for e in range(E):
r_sxt = data_e[e]
for i in range(S):
rmin = np.min(r_sxt[i,:])
rmax = np.max(r_sxt[i,:])
assert rmax - rmin != 0, 'Something wrong'
r_sxt[i,:] = (r_sxt[i,:] - rmin)/(rmax-rmin)
data_e[e] = r_sxt
return data_e
def spikify_data(data_e, rng, dt=1.0, max_firing_rate=100):
""" Apply spikes to a continuous dataset whose values are between 0.0 and 1.0
Args:
data_e: nexamples length list of NxT trials
dt: how often the data are sampled
max_firing_rate: the firing rate that is associated with a value of 1.0
Returns:
spikified_e: a list of length b of the data represented as spikes,
sampled from the underlying poisson process.
"""
E = len(data_e)
spikes_e = []
for e in range(E):
data = data_e[e]
N,T = data.shape
data_s = np.zeros([N,T]).astype(np.int)
for n in range(N):
f = data[n,:]
s = rng.poisson(f*max_firing_rate*dt, size=T)
data_s[n,:] = s
spikes_e.append(data_s)
return spikes_e
def gaussify_data(data_e, rng, dt=1.0, max_firing_rate=100):
""" Apply gaussian noise to a continuous dataset whose values are between
0.0 and 1.0
Args:
data_e: nexamples length list of NxT trials
dt: how often the data are sampled
max_firing_rate: the firing rate that is associated with a value of 1.0
Returns:
gauss_e: a list of length b of the data with noise.
"""
E = len(data_e)
mfr = max_firing_rate
gauss_e = []
for e in range(E):
data = data_e[e]
N,T = data.shape
noisy_data = data * mfr + np.random.randn(N,T) * (5.0*mfr) * np.sqrt(dt)
gauss_e.append(noisy_data)
return gauss_e
def get_train_n_valid_inds(num_trials, train_fraction, nreplications):
"""Split the numbers between 0 and num_trials-1 into two portions for
training and validation, based on the train fraction.
Args:
num_trials: the number of trials
train_fraction: (e.g. .80)
nreplications: the number of spiking trials per initial condition
Returns:
a 2-tuple of two lists: the training indices and validation indices
"""
train_inds = []
valid_inds = []
for i in range(num_trials):
# This line divides up the trials so that within one initial condition,
# the randomness of spikifying the condition is shared among both
# training and validation data splits.
if (i % nreplications)+1 > train_fraction * nreplications:
valid_inds.append(i)
else:
train_inds.append(i)
return train_inds, valid_inds
def split_list_by_inds(data, inds1, inds2):
"""Take the data, a list, and split it up based on the indices in inds1 and
inds2.
Args:
data: the list of data to split
inds1, the first list of indices
inds2, the second list of indices
Returns: a 2-tuple of two lists.
"""
if data is None or len(data) == 0:
return [], []
else:
dout1 = [data[i] for i in inds1]
dout2 = [data[i] for i in inds2]
return dout1, dout2
def nparray_and_transpose(data_a_b_c):
"""Convert the list of items in data to a numpy array, and transpose it
Args:
data: data_asbsc: a nested, nested list of length a, with sublist length
b, with sublist length c.
Returns:
a numpy 3-tensor with dimensions a x c x b
"""
data_axbxc = np.array([datum_b_c for datum_b_c in data_a_b_c])
data_axcxb = np.transpose(data_axbxc, axes=[0,2,1])
return data_axcxb
def add_alignment_projections(datasets, npcs, ntime=None, nsamples=None):
"""Create a matrix that aligns the datasets a bit, under
the assumption that each dataset is observing the same underlying dynamical
system.
Args:
datasets: The dictionary of dataset structures.
npcs: The number of pcs for each, basically like lfads factors.
nsamples (optional): Number of samples to take for each dataset.
ntime (optional): Number of time steps to take in each sample.
Returns:
The dataset structures, with the field alignment_matrix_cxf added.
This is # channels x npcs dimension
"""
nchannels_all = 0
channel_idxs = {}
conditions_all = {}
nconditions_all = 0
for name, dataset in datasets.items():
cidxs = np.where(dataset['P_sxn'])[1] # non-zero entries in columns
channel_idxs[name] = [cidxs[0], cidxs[-1]+1]
nchannels_all += cidxs[-1]+1 - cidxs[0]
conditions_all[name] = np.unique(dataset['condition_labels_train'])
all_conditions_list = \
np.unique(np.ndarray.flatten(np.array(conditions_all.values())))
nconditions_all = all_conditions_list.shape[0]
if ntime is None:
ntime = dataset['train_data'].shape[1]
if nsamples is None:
nsamples = dataset['train_data'].shape[0]
# In the data workup in the paper, Chethan did intra condition
# averaging, so let's do that here.
avg_data_all = {}
for name, conditions in conditions_all.items():
dataset = datasets[name]
avg_data_all[name] = {}
for cname in conditions:
td_idxs = np.argwhere(np.array(dataset['condition_labels_train'])==cname)
data = np.squeeze(dataset['train_data'][td_idxs,:,:], axis=1)
avg_data = np.mean(data, axis=0)
avg_data_all[name][cname] = avg_data
# Visualize this in the morning.
all_data_nxtc = np.zeros([nchannels_all, ntime * nconditions_all])
for name, dataset in datasets.items():
cidx_s = channel_idxs[name][0]
cidx_f = channel_idxs[name][1]
for cname in conditions_all[name]:
cidxs = np.argwhere(all_conditions_list == cname)
if cidxs.shape[0] > 0:
cidx = cidxs[0][0]
all_tidxs = np.arange(0, ntime+1) + cidx*ntime
all_data_nxtc[cidx_s:cidx_f, all_tidxs[0]:all_tidxs[-1]] = \
avg_data_all[name][cname].T
# A bit of filtering. We don't care about spectral properties, or
# filtering artifacts, simply correlate time steps a bit.
filt_len = 6
bc_filt = np.ones([filt_len])/float(filt_len)
for c in range(nchannels_all):
all_data_nxtc[c,:] = scipy.signal.filtfilt(bc_filt, [1.0], all_data_nxtc[c,:])
# Compute the PCs.
all_data_mean_nx1 = np.mean(all_data_nxtc, axis=1, keepdims=True)
all_data_zm_nxtc = all_data_nxtc - all_data_mean_nx1
corr_mat_nxn = np.dot(all_data_zm_nxtc, all_data_zm_nxtc.T)
evals_n, evecs_nxn = np.linalg.eigh(corr_mat_nxn)
sidxs = np.flipud(np.argsort(evals_n)) # sort such that 0th is highest
evals_n = evals_n[sidxs]
evecs_nxn = evecs_nxn[:,sidxs]
# Project all the channels data onto the low-D PCA basis, where
# low-d is the npcs parameter.
all_data_pca_pxtc = np.dot(evecs_nxn[:, 0:npcs].T, all_data_zm_nxtc)
# Now for each dataset, we regress the channel data onto the top
# pcs, and this will be our alignment matrix for that dataset.
# |B - A*W|^2
for name, dataset in datasets.items():
cidx_s = channel_idxs[name][0]
cidx_f = channel_idxs[name][1]
all_data_zm_chxtc = all_data_zm_nxtc[cidx_s:cidx_f,:] # ch for channel
W_chxp, _, _, _ = \
np.linalg.lstsq(all_data_zm_chxtc.T, all_data_pca_pxtc.T)
dataset['alignment_matrix_cxf'] = W_chxp
alignment_bias_cx1 = all_data_mean_nx1[cidx_s:cidx_f]
dataset['alignment_bias_c'] = np.squeeze(alignment_bias_cx1, axis=1)
do_debug_plot = False
if do_debug_plot:
pc_vecs = evecs_nxn[:,0:npcs]
ntoplot = 400
plt.figure()
plt.plot(np.log10(evals_n), '-x')
plt.figure()
plt.subplot(311)
plt.imshow(all_data_pca_pxtc)
plt.colorbar()
plt.subplot(312)
plt.imshow(np.dot(W_chxp.T, all_data_zm_chxtc))
plt.colorbar()
plt.subplot(313)
plt.imshow(np.dot(all_data_zm_chxtc.T, W_chxp).T - all_data_pca_pxtc)
plt.colorbar()
import pdb
pdb.set_trace()
return datasets
| 11,355 | 31.538682 | 82 | py |
models | models-master/research/lfads/synth_data/generate_itb_data.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
from __future__ import print_function
import h5py
import numpy as np
import os
from six.moves import xrange
import tensorflow as tf
from utils import write_datasets
from synthetic_data_utils import normalize_rates
from synthetic_data_utils import get_train_n_valid_inds, nparray_and_transpose
from synthetic_data_utils import spikify_data, split_list_by_inds
DATA_DIR = "rnn_synth_data_v1.0"
flags = tf.app.flags
flags.DEFINE_string("save_dir", "/tmp/" + DATA_DIR + "/",
"Directory for saving data.")
flags.DEFINE_string("datafile_name", "itb_rnn",
"Name of data file for input case.")
flags.DEFINE_integer("synth_data_seed", 5, "Random seed for RNN generation.")
flags.DEFINE_float("T", 1.0, "Time in seconds to generate.")
flags.DEFINE_integer("C", 800, "Number of conditions")
flags.DEFINE_integer("N", 50, "Number of units for the RNN")
flags.DEFINE_float("train_percentage", 4.0/5.0,
"Percentage of train vs validation trials")
flags.DEFINE_integer("nreplications", 5,
"Number of spikifications of the same underlying rates.")
flags.DEFINE_float("tau", 0.025, "Time constant of RNN")
flags.DEFINE_float("dt", 0.010, "Time bin")
flags.DEFINE_float("max_firing_rate", 30.0,
"Map 1.0 of RNN to a spikes per second")
flags.DEFINE_float("u_std", 0.25,
"Std dev of input to integration to bound model")
flags.DEFINE_string("checkpoint_path", "SAMPLE_CHECKPOINT",
"""Path to directory with checkpoints of model
trained on integration to bound task. Currently this
is a placeholder which tells the code to grab the
checkpoint that is provided with the code
(in /trained_itb/..). If you have your own checkpoint
you would like to restore, you would point it to
that path.""")
FLAGS = flags.FLAGS
class IntegrationToBoundModel:
def __init__(self, N):
scale = 0.8 / float(N**0.5)
self.N = N
self.Wh_nxn = tf.Variable(tf.random_normal([N, N], stddev=scale))
self.b_1xn = tf.Variable(tf.zeros([1, N]))
self.Bu_1xn = tf.Variable(tf.zeros([1, N]))
self.Wro_nxo = tf.Variable(tf.random_normal([N, 1], stddev=scale))
self.bro_o = tf.Variable(tf.zeros([1]))
def call(self, h_tm1_bxn, u_bx1):
act_t_bxn = tf.matmul(h_tm1_bxn, self.Wh_nxn) + self.b_1xn + u_bx1 * self.Bu_1xn
h_t_bxn = tf.nn.tanh(act_t_bxn)
z_t = tf.nn.xw_plus_b(h_t_bxn, self.Wro_nxo, self.bro_o)
return z_t, h_t_bxn
def get_data_batch(batch_size, T, rng, u_std):
u_bxt = rng.randn(batch_size, T) * u_std
running_sum_b = np.zeros([batch_size])
labels_bxt = np.zeros([batch_size, T])
for t in xrange(T):
running_sum_b += u_bxt[:, t]
labels_bxt[:, t] += running_sum_b
labels_bxt = np.clip(labels_bxt, -1, 1)
return u_bxt, labels_bxt
rng = np.random.RandomState(seed=FLAGS.synth_data_seed)
u_rng = np.random.RandomState(seed=FLAGS.synth_data_seed+1)
T = FLAGS.T
C = FLAGS.C
N = FLAGS.N # must be same N as in trained model (provided example is N = 50)
nreplications = FLAGS.nreplications
E = nreplications * C # total number of trials
train_percentage = FLAGS.train_percentage
ntimesteps = int(T / FLAGS.dt)
batch_size = 1 # gives one example per ntrial
model = IntegrationToBoundModel(N)
inputs_ph_t = [tf.placeholder(tf.float32,
shape=[None, 1]) for _ in range(ntimesteps)]
state = tf.zeros([batch_size, N])
saver = tf.train.Saver()
P_nxn = rng.randn(N,N) / np.sqrt(N) # random projections
# unroll RNN for T timesteps
outputs_t = []
states_t = []
for inp in inputs_ph_t:
output, state = model.call(state, inp)
outputs_t.append(output)
states_t.append(state)
with tf.Session() as sess:
# restore the latest model ckpt
if FLAGS.checkpoint_path == "SAMPLE_CHECKPOINT":
dir_path = os.path.dirname(os.path.realpath(__file__))
model_checkpoint_path = os.path.join(dir_path, "trained_itb/model-65000")
else:
model_checkpoint_path = FLAGS.checkpoint_path
try:
saver.restore(sess, model_checkpoint_path)
print ('Model restored from', model_checkpoint_path)
except:
assert False, ("No checkpoints to restore from, is the path %s correct?"
%model_checkpoint_path)
# generate data for trials
data_e = []
u_e = []
outs_e = []
for c in range(C):
u_1xt, outs_1xt = get_data_batch(batch_size, ntimesteps, u_rng, FLAGS.u_std)
feed_dict = {}
for t in xrange(ntimesteps):
feed_dict[inputs_ph_t[t]] = np.reshape(u_1xt[:,t], (batch_size,-1))
states_t_bxn, outputs_t_bxn = sess.run([states_t, outputs_t],
feed_dict=feed_dict)
states_nxt = np.transpose(np.squeeze(np.asarray(states_t_bxn)))
outputs_t_bxn = np.squeeze(np.asarray(outputs_t_bxn))
r_sxt = np.dot(P_nxn, states_nxt)
for s in xrange(nreplications):
data_e.append(r_sxt)
u_e.append(u_1xt)
outs_e.append(outputs_t_bxn)
truth_data_e = normalize_rates(data_e, E, N)
spiking_data_e = spikify_data(truth_data_e, rng, dt=FLAGS.dt,
max_firing_rate=FLAGS.max_firing_rate)
train_inds, valid_inds = get_train_n_valid_inds(E, train_percentage,
nreplications)
data_train_truth, data_valid_truth = split_list_by_inds(truth_data_e,
train_inds,
valid_inds)
data_train_spiking, data_valid_spiking = split_list_by_inds(spiking_data_e,
train_inds,
valid_inds)
data_train_truth = nparray_and_transpose(data_train_truth)
data_valid_truth = nparray_and_transpose(data_valid_truth)
data_train_spiking = nparray_and_transpose(data_train_spiking)
data_valid_spiking = nparray_and_transpose(data_valid_spiking)
# save down the inputs used to generate this data
train_inputs_u, valid_inputs_u = split_list_by_inds(u_e,
train_inds,
valid_inds)
train_inputs_u = nparray_and_transpose(train_inputs_u)
valid_inputs_u = nparray_and_transpose(valid_inputs_u)
# save down the network outputs (may be useful later)
train_outputs_u, valid_outputs_u = split_list_by_inds(outs_e,
train_inds,
valid_inds)
train_outputs_u = np.array(train_outputs_u)
valid_outputs_u = np.array(valid_outputs_u)
data = { 'train_truth': data_train_truth,
'valid_truth': data_valid_truth,
'train_data' : data_train_spiking,
'valid_data' : data_valid_spiking,
'train_percentage' : train_percentage,
'nreplications' : nreplications,
'dt' : FLAGS.dt,
'u_std' : FLAGS.u_std,
'max_firing_rate': FLAGS.max_firing_rate,
'train_inputs_u': train_inputs_u,
'valid_inputs_u': valid_inputs_u,
'train_outputs_u': train_outputs_u,
'valid_outputs_u': valid_outputs_u,
'conversion_factor' : FLAGS.max_firing_rate/(1.0/FLAGS.dt) }
# just one dataset here
datasets = {}
dataset_name = 'dataset_N' + str(N)
datasets[dataset_name] = data
# write out the dataset
write_datasets(FLAGS.save_dir, FLAGS.datafile_name, datasets)
print ('Saved to ', os.path.join(FLAGS.save_dir,
FLAGS.datafile_name + '_' + dataset_name))
| 8,337 | 38.704762 | 84 | py |
models | models-master/research/lfads/synth_data/generate_chaotic_rnn_data.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
from __future__ import print_function
import h5py
import numpy as np
import os
import tensorflow as tf # used for flags here
from utils import write_datasets
from synthetic_data_utils import add_alignment_projections, generate_data
from synthetic_data_utils import generate_rnn, get_train_n_valid_inds
from synthetic_data_utils import nparray_and_transpose
from synthetic_data_utils import spikify_data, gaussify_data, split_list_by_inds
import matplotlib
import matplotlib.pyplot as plt
import scipy.signal
matplotlib.rcParams['image.interpolation'] = 'nearest'
DATA_DIR = "rnn_synth_data_v1.0"
flags = tf.app.flags
flags.DEFINE_string("save_dir", "/tmp/" + DATA_DIR + "/",
"Directory for saving data.")
flags.DEFINE_string("datafile_name", "thits_data",
"Name of data file for input case.")
flags.DEFINE_string("noise_type", "poisson", "Noise type for data.")
flags.DEFINE_integer("synth_data_seed", 5, "Random seed for RNN generation.")
flags.DEFINE_float("T", 1.0, "Time in seconds to generate.")
flags.DEFINE_integer("C", 100, "Number of conditions")
flags.DEFINE_integer("N", 50, "Number of units for the RNN")
flags.DEFINE_integer("S", 50, "Number of sampled units from RNN")
flags.DEFINE_integer("npcs", 10, "Number of PCS for multi-session case.")
flags.DEFINE_float("train_percentage", 4.0/5.0,
"Percentage of train vs validation trials")
flags.DEFINE_integer("nreplications", 40,
"Number of noise replications of the same underlying rates.")
flags.DEFINE_float("g", 1.5, "Complexity of dynamics")
flags.DEFINE_float("x0_std", 1.0,
"Volume from which to pull initial conditions (affects diversity of dynamics.")
flags.DEFINE_float("tau", 0.025, "Time constant of RNN")
flags.DEFINE_float("dt", 0.010, "Time bin")
flags.DEFINE_float("input_magnitude", 20.0,
"For the input case, what is the value of the input?")
flags.DEFINE_float("max_firing_rate", 30.0, "Map 1.0 of RNN to a spikes per second")
FLAGS = flags.FLAGS
# Note that with N small, (as it is 25 above), the finite size effects
# will have pretty dramatic effects on the dynamics of the random RNN.
# If you want more complex dynamics, you'll have to run the script a
# lot, or increase N (or g).
# Getting hard vs. easy data can be a little stochastic, so we set the seed.
# Pull out some commonly used parameters.
# These are user parameters (configuration)
rng = np.random.RandomState(seed=FLAGS.synth_data_seed)
T = FLAGS.T
C = FLAGS.C
N = FLAGS.N
S = FLAGS.S
input_magnitude = FLAGS.input_magnitude
nreplications = FLAGS.nreplications
E = nreplications * C # total number of trials
# S is the number of measurements in each datasets, w/ each
# dataset having a different set of observations.
ndatasets = N/S # ok if rounded down
train_percentage = FLAGS.train_percentage
ntime_steps = int(T / FLAGS.dt)
# End of user parameters
rnn = generate_rnn(rng, N, FLAGS.g, FLAGS.tau, FLAGS.dt, FLAGS.max_firing_rate)
# Check to make sure the RNN is the one we used in the paper.
if N == 50:
assert abs(rnn['W'][0,0] - 0.06239899) < 1e-8, 'Error in random seed?'
rem_check = nreplications * train_percentage
assert abs(rem_check - int(rem_check)) < 1e-8, \
'Train percentage * nreplications should be integral number.'
# Initial condition generation, and condition label generation. This
# happens outside of the dataset loop, so that all datasets have the
# same conditions, which is similar to a neurophys setup.
condition_number = 0
x0s = []
condition_labels = []
for c in range(C):
x0 = FLAGS.x0_std * rng.randn(N, 1)
x0s.append(np.tile(x0, nreplications)) # replicate x0 nreplications times
# replicate the condition label nreplications times
for ns in range(nreplications):
condition_labels.append(condition_number)
condition_number += 1
x0s = np.concatenate(x0s, axis=1)
# Containers for storing data across data.
datasets = {}
for n in range(ndatasets):
print(n+1, " of ", ndatasets)
# First generate all firing rates. in the next loop, generate all
# replications this allows the random state for rate generation to be
# independent of n_replications.
dataset_name = 'dataset_N' + str(N) + '_S' + str(S)
if S < N:
dataset_name += '_n' + str(n+1)
# Sample neuron subsets. The assumption is the PC axes of the RNN
# are not unit aligned, so sampling units is adequate to sample all
# the high-variance PCs.
P_sxn = np.eye(S,N)
for m in range(n):
P_sxn = np.roll(P_sxn, S, axis=1)
if input_magnitude > 0.0:
# time of "hits" randomly chosen between [1/4 and 3/4] of total time
input_times = rng.choice(int(ntime_steps/2), size=[E]) + int(ntime_steps/4)
else:
input_times = None
rates, x0s, inputs = \
generate_data(rnn, T=T, E=E, x0s=x0s, P_sxn=P_sxn,
input_magnitude=input_magnitude,
input_times=input_times)
if FLAGS.noise_type == "poisson":
noisy_data = spikify_data(rates, rng, rnn['dt'], rnn['max_firing_rate'])
elif FLAGS.noise_type == "gaussian":
noisy_data = gaussify_data(rates, rng, rnn['dt'], rnn['max_firing_rate'])
else:
raise ValueError("Only noise types supported are poisson or gaussian")
# split into train and validation sets
train_inds, valid_inds = get_train_n_valid_inds(E, train_percentage,
nreplications)
# Split the data, inputs, labels and times into train vs. validation.
rates_train, rates_valid = \
split_list_by_inds(rates, train_inds, valid_inds)
noisy_data_train, noisy_data_valid = \
split_list_by_inds(noisy_data, train_inds, valid_inds)
input_train, inputs_valid = \
split_list_by_inds(inputs, train_inds, valid_inds)
condition_labels_train, condition_labels_valid = \
split_list_by_inds(condition_labels, train_inds, valid_inds)
input_times_train, input_times_valid = \
split_list_by_inds(input_times, train_inds, valid_inds)
# Turn rates, noisy_data, and input into numpy arrays.
rates_train = nparray_and_transpose(rates_train)
rates_valid = nparray_and_transpose(rates_valid)
noisy_data_train = nparray_and_transpose(noisy_data_train)
noisy_data_valid = nparray_and_transpose(noisy_data_valid)
input_train = nparray_and_transpose(input_train)
inputs_valid = nparray_and_transpose(inputs_valid)
# Note that we put these 'truth' rates and input into this
# structure, the only data that is used in LFADS are the noisy
# data e.g. spike trains. The rest is either for printing or posterity.
data = {'train_truth': rates_train,
'valid_truth': rates_valid,
'input_train_truth' : input_train,
'input_valid_truth' : inputs_valid,
'train_data' : noisy_data_train,
'valid_data' : noisy_data_valid,
'train_percentage' : train_percentage,
'nreplications' : nreplications,
'dt' : rnn['dt'],
'input_magnitude' : input_magnitude,
'input_times_train' : input_times_train,
'input_times_valid' : input_times_valid,
'P_sxn' : P_sxn,
'condition_labels_train' : condition_labels_train,
'condition_labels_valid' : condition_labels_valid,
'conversion_factor': 1.0 / rnn['conversion_factor']}
datasets[dataset_name] = data
if S < N:
# Note that this isn't necessary for this synthetic example, but
# it's useful to see how the input factor matrices were initialized
# for actual neurophysiology data.
datasets = add_alignment_projections(datasets, npcs=FLAGS.npcs)
# Write out the datasets.
write_datasets(FLAGS.save_dir, FLAGS.datafile_name, datasets)
| 8,412 | 40.855721 | 98 | py |
models | models-master/research/cognitive_planning/label_map_util.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Label map utility functions."""
import logging
import tensorflow as tf
from google.protobuf import text_format
import string_int_label_map_pb2
def _validate_label_map(label_map):
"""Checks if a label map is valid.
Args:
label_map: StringIntLabelMap to validate.
Raises:
ValueError: if label map is invalid.
"""
for item in label_map.item:
if item.id < 0:
raise ValueError('Label map ids should be >= 0.')
if (item.id == 0 and item.name != 'background' and
item.display_name != 'background'):
raise ValueError('Label map id 0 is reserved for the background label')
def create_category_index(categories):
"""Creates dictionary of COCO compatible categories keyed by category id.
Args:
categories: a list of dicts, each of which has the following keys:
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'.
Returns:
category_index: a dict containing the same entries as categories, but keyed
by the 'id' field of each category.
"""
category_index = {}
for cat in categories:
category_index[cat['id']] = cat
return category_index
def get_max_label_map_index(label_map):
"""Get maximum index in label map.
Args:
label_map: a StringIntLabelMapProto
Returns:
an integer
"""
return max([item.id for item in label_map.item])
def convert_label_map_to_categories(label_map,
max_num_classes,
use_display_name=True):
"""Loads label map proto and returns categories list compatible with eval.
This function loads a label map and returns a list of dicts, each of which
has the following keys:
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'.
We only allow class into the list if its id-label_id_offset is
between 0 (inclusive) and max_num_classes (exclusive).
If there are several items mapping to the same id in the label map,
we will only keep the first one in the categories list.
Args:
label_map: a StringIntLabelMapProto or None. If None, a default categories
list is created with max_num_classes categories.
max_num_classes: maximum number of (consecutive) label indices to include.
use_display_name: (boolean) choose whether to load 'display_name' field
as category name. If False or if the display_name field does not exist,
uses 'name' field as category names instead.
Returns:
categories: a list of dictionaries representing all possible categories.
"""
categories = []
list_of_ids_already_added = []
if not label_map:
label_id_offset = 1
for class_id in range(max_num_classes):
categories.append({
'id': class_id + label_id_offset,
'name': 'category_{}'.format(class_id + label_id_offset)
})
return categories
for item in label_map.item:
if not 0 < item.id <= max_num_classes:
logging.info('Ignore item %d since it falls outside of requested '
'label range.', item.id)
continue
if use_display_name and item.HasField('display_name'):
name = item.display_name
else:
name = item.name
if item.id not in list_of_ids_already_added:
list_of_ids_already_added.append(item.id)
categories.append({'id': item.id, 'name': name})
return categories
def load_labelmap(path):
"""Loads label map proto.
Args:
path: path to StringIntLabelMap proto text file.
Returns:
a StringIntLabelMapProto
"""
with tf.gfile.GFile(path, 'r') as fid:
label_map_string = fid.read()
label_map = string_int_label_map_pb2.StringIntLabelMap()
try:
text_format.Merge(label_map_string, label_map)
except text_format.ParseError:
label_map.ParseFromString(label_map_string)
_validate_label_map(label_map)
return label_map
def get_label_map_dict(label_map_path, use_display_name=False):
"""Reads a label map and returns a dictionary of label names to id.
Args:
label_map_path: path to label_map.
use_display_name: whether to use the label map items' display names as keys.
Returns:
A dictionary mapping label names to id.
"""
label_map = load_labelmap(label_map_path)
label_map_dict = {}
for item in label_map.item:
if use_display_name:
label_map_dict[item.display_name] = item.id
else:
label_map_dict[item.name] = item.id
return label_map_dict
def create_category_index_from_labelmap(label_map_path):
"""Reads a label map and returns a category index.
Args:
label_map_path: Path to `StringIntLabelMap` proto text file.
Returns:
A category index, which is a dictionary that maps integer ids to dicts
containing categories, e.g.
{1: {'id': 1, 'name': 'dog'}, 2: {'id': 2, 'name': 'cat'}, ...}
"""
label_map = load_labelmap(label_map_path)
max_num_classes = max(item.id for item in label_map.item)
categories = convert_label_map_to_categories(label_map, max_num_classes)
return create_category_index(categories)
def create_class_agnostic_category_index():
"""Creates a category index with a single `object` class."""
return {1: {'id': 1, 'name': 'object'}}
| 6,044 | 32.214286 | 80 | py |
models | models-master/research/cognitive_planning/string_int_label_map_pb2.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: object_detection/protos/string_int_label_map.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='object_detection/protos/string_int_label_map.proto',
package='object_detection.protos',
syntax='proto2',
serialized_pb=_b('\n2object_detection/protos/string_int_label_map.proto\x12\x17object_detection.protos\"G\n\x15StringIntLabelMapItem\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\x05\x12\x14\n\x0c\x64isplay_name\x18\x03 \x01(\t\"Q\n\x11StringIntLabelMap\x12<\n\x04item\x18\x01 \x03(\x0b\x32..object_detection.protos.StringIntLabelMapItem')
)
_STRINGINTLABELMAPITEM = _descriptor.Descriptor(
name='StringIntLabelMapItem',
full_name='object_detection.protos.StringIntLabelMapItem',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='object_detection.protos.StringIntLabelMapItem.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='id', full_name='object_detection.protos.StringIntLabelMapItem.id', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='display_name', full_name='object_detection.protos.StringIntLabelMapItem.display_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=79,
serialized_end=150,
)
_STRINGINTLABELMAP = _descriptor.Descriptor(
name='StringIntLabelMap',
full_name='object_detection.protos.StringIntLabelMap',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='item', full_name='object_detection.protos.StringIntLabelMap.item', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=152,
serialized_end=233,
)
_STRINGINTLABELMAP.fields_by_name['item'].message_type = _STRINGINTLABELMAPITEM
DESCRIPTOR.message_types_by_name['StringIntLabelMapItem'] = _STRINGINTLABELMAPITEM
DESCRIPTOR.message_types_by_name['StringIntLabelMap'] = _STRINGINTLABELMAP
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
StringIntLabelMapItem = _reflection.GeneratedProtocolMessageType('StringIntLabelMapItem', (_message.Message,), dict(
DESCRIPTOR = _STRINGINTLABELMAPITEM,
__module__ = 'object_detection.protos.string_int_label_map_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.StringIntLabelMapItem)
))
_sym_db.RegisterMessage(StringIntLabelMapItem)
StringIntLabelMap = _reflection.GeneratedProtocolMessageType('StringIntLabelMap', (_message.Message,), dict(
DESCRIPTOR = _STRINGINTLABELMAP,
__module__ = 'object_detection.protos.string_int_label_map_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.StringIntLabelMap)
))
_sym_db.RegisterMessage(StringIntLabelMap)
# @@protoc_insertion_point(module_scope)
| 5,076 | 35.52518 | 357 | py |
models | models-master/research/cognitive_planning/visualization_utils.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A set of functions that are used for visualization.
These functions often receive an image, perform some visualization on the image.
The functions do not return a value, instead they modify the image itself.
"""
import collections
import functools
# Set headless-friendly backend.
import matplotlib; matplotlib.use('Agg') # pylint: disable=multiple-statements
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
import numpy as np
import PIL.Image as Image
import PIL.ImageColor as ImageColor
import PIL.ImageDraw as ImageDraw
import PIL.ImageFont as ImageFont
import six
import tensorflow as tf
import standard_fields as fields
_TITLE_LEFT_MARGIN = 10
_TITLE_TOP_MARGIN = 10
STANDARD_COLORS = [
'AliceBlue', 'Chartreuse', 'Aqua', 'Aquamarine', 'Azure', 'Beige', 'Bisque',
'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite',
'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan',
'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange',
'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet',
'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite',
'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod',
'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki',
'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue',
'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey',
'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue',
'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime',
'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid',
'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen',
'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin',
'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed',
'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed',
'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple',
'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown',
'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue',
'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow',
'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White',
'WhiteSmoke', 'Yellow', 'YellowGreen'
]
def save_image_array_as_png(image, output_path):
"""Saves an image (represented as a numpy array) to PNG.
Args:
image: a numpy array with shape [height, width, 3].
output_path: path to which image should be written.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
with tf.gfile.Open(output_path, 'w') as fid:
image_pil.save(fid, 'PNG')
def encode_image_array_as_png_str(image):
"""Encodes a numpy array into a PNG string.
Args:
image: a numpy array with shape [height, width, 3].
Returns:
PNG encoded image string.
"""
image_pil = Image.fromarray(np.uint8(image))
output = six.BytesIO()
image_pil.save(output, format='PNG')
png_string = output.getvalue()
output.close()
return png_string
def draw_bounding_box_on_image_array(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=4,
display_str_list=(),
use_normalized_coordinates=True):
"""Adds a bounding box to an image (numpy array).
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
Args:
image: a numpy array with shape [height, width, 3].
ymin: ymin of bounding box.
xmin: xmin of bounding box.
ymax: ymax of bounding box.
xmax: xmax of bounding box.
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list: list of strings to display in box
(each to be shown on its own line).
use_normalized_coordinates: If True (default), treat coordinates
ymin, xmin, ymax, xmax as relative to the image. Otherwise treat
coordinates as absolute.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_bounding_box_on_image(image_pil, ymin, xmin, ymax, xmax, color,
thickness, display_str_list,
use_normalized_coordinates)
np.copyto(image, np.array(image_pil))
def draw_bounding_box_on_image(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=4,
display_str_list=(),
use_normalized_coordinates=True):
"""Adds a bounding box to an image.
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
Each string in display_str_list is displayed on a separate line above the
bounding box in black text on a rectangle filled with the input 'color'.
If the top of the bounding box extends to the edge of the image, the strings
are displayed below the bounding box.
Args:
image: a PIL.Image object.
ymin: ymin of bounding box.
xmin: xmin of bounding box.
ymax: ymax of bounding box.
xmax: xmax of bounding box.
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list: list of strings to display in box
(each to be shown on its own line).
use_normalized_coordinates: If True (default), treat coordinates
ymin, xmin, ymax, xmax as relative to the image. Otherwise treat
coordinates as absolute.
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
if use_normalized_coordinates:
(left, right, top, bottom) = (xmin * im_width, xmax * im_width,
ymin * im_height, ymax * im_height)
else:
(left, right, top, bottom) = (xmin, xmax, ymin, ymax)
draw.line([(left, top), (left, bottom), (right, bottom),
(right, top), (left, top)], width=thickness, fill=color)
try:
font = ImageFont.truetype('arial.ttf', 24)
except IOError:
font = ImageFont.load_default()
# If the total height of the display strings added to the top of the bounding
# box exceeds the top of the image, stack the strings below the bounding box
# instead of above.
display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]
# Each display_str has a top and bottom margin of 0.05x.
total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)
if top > total_display_str_height:
text_bottom = top
else:
text_bottom = bottom + total_display_str_height
# Reverse list and print from bottom to top.
for display_str in display_str_list[::-1]:
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle(
[(left, text_bottom - text_height - 2 * margin), (left + text_width,
text_bottom)],
fill=color)
draw.text(
(left + margin, text_bottom - text_height - margin),
display_str,
fill='black',
font=font)
text_bottom -= text_height - 2 * margin
def draw_bounding_boxes_on_image_array(image,
boxes,
color='red',
thickness=4,
display_str_list_list=()):
"""Draws bounding boxes on image (numpy array).
Args:
image: a numpy array object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax).
The coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list_list: list of list of strings.
a list of strings for each bounding box.
The reason to pass a list of strings for a
bounding box is that it might contain
multiple labels.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
image_pil = Image.fromarray(image)
draw_bounding_boxes_on_image(image_pil, boxes, color, thickness,
display_str_list_list)
np.copyto(image, np.array(image_pil))
def draw_bounding_boxes_on_image(image,
boxes,
color='red',
thickness=4,
display_str_list_list=()):
"""Draws bounding boxes on image.
Args:
image: a PIL.Image object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax).
The coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list_list: list of list of strings.
a list of strings for each bounding box.
The reason to pass a list of strings for a
bounding box is that it might contain
multiple labels.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
boxes_shape = boxes.shape
if not boxes_shape:
return
if len(boxes_shape) != 2 or boxes_shape[1] != 4:
raise ValueError('Input must be of size [N, 4]')
for i in range(boxes_shape[0]):
display_str_list = ()
if display_str_list_list:
display_str_list = display_str_list_list[i]
draw_bounding_box_on_image(image, boxes[i, 0], boxes[i, 1], boxes[i, 2],
boxes[i, 3], color, thickness, display_str_list)
def _visualize_boxes(image, boxes, classes, scores, category_index, **kwargs):
return visualize_boxes_and_labels_on_image_array(
image, boxes, classes, scores, category_index=category_index, **kwargs)
def _visualize_boxes_and_masks(image, boxes, classes, scores, masks,
category_index, **kwargs):
return visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index=category_index,
instance_masks=masks,
**kwargs)
def _visualize_boxes_and_keypoints(image, boxes, classes, scores, keypoints,
category_index, **kwargs):
return visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index=category_index,
keypoints=keypoints,
**kwargs)
def _visualize_boxes_and_masks_and_keypoints(
image, boxes, classes, scores, masks, keypoints, category_index, **kwargs):
return visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index=category_index,
instance_masks=masks,
keypoints=keypoints,
**kwargs)
def draw_bounding_boxes_on_image_tensors(images,
boxes,
classes,
scores,
category_index,
instance_masks=None,
keypoints=None,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True):
"""Draws bounding boxes, masks, and keypoints on batch of image tensors.
Args:
images: A 4D uint8 image tensor of shape [N, H, W, C]. If C > 3, additional
channels will be ignored.
boxes: [N, max_detections, 4] float32 tensor of detection boxes.
classes: [N, max_detections] int tensor of detection classes. Note that
classes are 1-indexed.
scores: [N, max_detections] float32 tensor of detection scores.
category_index: a dict that maps integer ids to category dicts. e.g.
{1: {1: 'dog'}, 2: {2: 'cat'}, ...}
instance_masks: A 4D uint8 tensor of shape [N, max_detection, H, W] with
instance masks.
keypoints: A 4D float32 tensor of shape [N, max_detection, num_keypoints, 2]
with keypoints.
max_boxes_to_draw: Maximum number of boxes to draw on an image. Default 20.
min_score_thresh: Minimum score threshold for visualization. Default 0.2.
use_normalized_coordinates: Whether to assume boxes and kepoints are in
normalized coordinates (as opposed to absolute coordiantes).
Default is True.
Returns:
4D image tensor of type uint8, with boxes drawn on top.
"""
# Additional channels are being ignored.
images = images[:, :, :, 0:3]
visualization_keyword_args = {
'use_normalized_coordinates': use_normalized_coordinates,
'max_boxes_to_draw': max_boxes_to_draw,
'min_score_thresh': min_score_thresh,
'agnostic_mode': False,
'line_thickness': 4
}
if instance_masks is not None and keypoints is None:
visualize_boxes_fn = functools.partial(
_visualize_boxes_and_masks,
category_index=category_index,
**visualization_keyword_args)
elems = [images, boxes, classes, scores, instance_masks]
elif instance_masks is None and keypoints is not None:
visualize_boxes_fn = functools.partial(
_visualize_boxes_and_keypoints,
category_index=category_index,
**visualization_keyword_args)
elems = [images, boxes, classes, scores, keypoints]
elif instance_masks is not None and keypoints is not None:
visualize_boxes_fn = functools.partial(
_visualize_boxes_and_masks_and_keypoints,
category_index=category_index,
**visualization_keyword_args)
elems = [images, boxes, classes, scores, instance_masks, keypoints]
else:
visualize_boxes_fn = functools.partial(
_visualize_boxes,
category_index=category_index,
**visualization_keyword_args)
elems = [images, boxes, classes, scores]
def draw_boxes(image_and_detections):
"""Draws boxes on image."""
image_with_boxes = tf.py_func(visualize_boxes_fn, image_and_detections,
tf.uint8)
return image_with_boxes
images = tf.map_fn(draw_boxes, elems, dtype=tf.uint8, back_prop=False)
return images
def draw_side_by_side_evaluation_image(eval_dict,
category_index,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True):
"""Creates a side-by-side image with detections and groundtruth.
Bounding boxes (and instance masks, if available) are visualized on both
subimages.
Args:
eval_dict: The evaluation dictionary returned by
eval_util.result_dict_for_single_example().
category_index: A category index (dictionary) produced from a labelmap.
max_boxes_to_draw: The maximum number of boxes to draw for detections.
min_score_thresh: The minimum score threshold for showing detections.
use_normalized_coordinates: Whether to assume boxes and kepoints are in
normalized coordinates (as opposed to absolute coordiantes).
Default is True.
Returns:
A [1, H, 2 * W, C] uint8 tensor. The subimage on the left corresponds to
detections, while the subimage on the right corresponds to groundtruth.
"""
detection_fields = fields.DetectionResultFields()
input_data_fields = fields.InputDataFields()
instance_masks = None
if detection_fields.detection_masks in eval_dict:
instance_masks = tf.cast(
tf.expand_dims(eval_dict[detection_fields.detection_masks], axis=0),
tf.uint8)
keypoints = None
if detection_fields.detection_keypoints in eval_dict:
keypoints = tf.expand_dims(
eval_dict[detection_fields.detection_keypoints], axis=0)
groundtruth_instance_masks = None
if input_data_fields.groundtruth_instance_masks in eval_dict:
groundtruth_instance_masks = tf.cast(
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_instance_masks], axis=0),
tf.uint8)
images_with_detections = draw_bounding_boxes_on_image_tensors(
eval_dict[input_data_fields.original_image],
tf.expand_dims(eval_dict[detection_fields.detection_boxes], axis=0),
tf.expand_dims(eval_dict[detection_fields.detection_classes], axis=0),
tf.expand_dims(eval_dict[detection_fields.detection_scores], axis=0),
category_index,
instance_masks=instance_masks,
keypoints=keypoints,
max_boxes_to_draw=max_boxes_to_draw,
min_score_thresh=min_score_thresh,
use_normalized_coordinates=use_normalized_coordinates)
images_with_groundtruth = draw_bounding_boxes_on_image_tensors(
eval_dict[input_data_fields.original_image],
tf.expand_dims(eval_dict[input_data_fields.groundtruth_boxes], axis=0),
tf.expand_dims(eval_dict[input_data_fields.groundtruth_classes], axis=0),
tf.expand_dims(
tf.ones_like(
eval_dict[input_data_fields.groundtruth_classes],
dtype=tf.float32),
axis=0),
category_index,
instance_masks=groundtruth_instance_masks,
keypoints=None,
max_boxes_to_draw=None,
min_score_thresh=0.0,
use_normalized_coordinates=use_normalized_coordinates)
return tf.concat([images_with_detections, images_with_groundtruth], axis=2)
def draw_keypoints_on_image_array(image,
keypoints,
color='red',
radius=2,
use_normalized_coordinates=True):
"""Draws keypoints on an image (numpy array).
Args:
image: a numpy array with shape [height, width, 3].
keypoints: a numpy array with shape [num_keypoints, 2].
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_keypoints_on_image(image_pil, keypoints, color, radius,
use_normalized_coordinates)
np.copyto(image, np.array(image_pil))
def draw_keypoints_on_image(image,
keypoints,
color='red',
radius=2,
use_normalized_coordinates=True):
"""Draws keypoints on an image.
Args:
image: a PIL.Image object.
keypoints: a numpy array with shape [num_keypoints, 2].
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
keypoints_x = [k[1] for k in keypoints]
keypoints_y = [k[0] for k in keypoints]
if use_normalized_coordinates:
keypoints_x = tuple([im_width * x for x in keypoints_x])
keypoints_y = tuple([im_height * y for y in keypoints_y])
for keypoint_x, keypoint_y in zip(keypoints_x, keypoints_y):
draw.ellipse([(keypoint_x - radius, keypoint_y - radius),
(keypoint_x + radius, keypoint_y + radius)],
outline=color, fill=color)
def draw_mask_on_image_array(image, mask, color='red', alpha=0.4):
"""Draws mask on an image.
Args:
image: uint8 numpy array with shape (img_height, img_height, 3)
mask: a uint8 numpy array of shape (img_height, img_height) with
values between either 0 or 1.
color: color to draw the keypoints with. Default is red.
alpha: transparency value between 0 and 1. (default: 0.4)
Raises:
ValueError: On incorrect data type for image or masks.
"""
if image.dtype != np.uint8:
raise ValueError('`image` not of type np.uint8')
if mask.dtype != np.uint8:
raise ValueError('`mask` not of type np.uint8')
if np.any(np.logical_and(mask != 1, mask != 0)):
raise ValueError('`mask` elements should be in [0, 1]')
if image.shape[:2] != mask.shape:
raise ValueError('The image has spatial dimensions %s but the mask has '
'dimensions %s' % (image.shape[:2], mask.shape))
rgb = ImageColor.getrgb(color)
pil_image = Image.fromarray(image)
solid_color = np.expand_dims(
np.ones_like(mask), axis=2) * np.reshape(list(rgb), [1, 1, 3])
pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA')
pil_mask = Image.fromarray(np.uint8(255.0*alpha*mask)).convert('L')
pil_image = Image.composite(pil_solid_color, pil_image, pil_mask)
np.copyto(image, np.array(pil_image.convert('RGB')))
def visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index,
instance_masks=None,
instance_boundaries=None,
keypoints=None,
use_normalized_coordinates=False,
max_boxes_to_draw=20,
min_score_thresh=.5,
agnostic_mode=False,
line_thickness=4,
groundtruth_box_visualization_color='black',
skip_scores=False,
skip_labels=False):
"""Overlay labeled boxes on an image with formatted scores and label names.
This function groups boxes that correspond to the same location
and creates a display string for each detection and overlays these
on the image. Note that this function modifies the image in place, and returns
that same image.
Args:
image: uint8 numpy array with shape (img_height, img_width, 3)
boxes: a numpy array of shape [N, 4]
classes: a numpy array of shape [N]. Note that class indices are 1-based,
and match the keys in the label map.
scores: a numpy array of shape [N] or None. If scores=None, then
this function assumes that the boxes to be plotted are groundtruth
boxes and plot all boxes as black with no classes or scores.
category_index: a dict containing category dictionaries (each holding
category index `id` and category name `name`) keyed by category indices.
instance_masks: a numpy array of shape [N, image_height, image_width] with
values ranging between 0 and 1, can be None.
instance_boundaries: a numpy array of shape [N, image_height, image_width]
with values ranging between 0 and 1, can be None.
keypoints: a numpy array of shape [N, num_keypoints, 2], can
be None
use_normalized_coordinates: whether boxes is to be interpreted as
normalized coordinates or not.
max_boxes_to_draw: maximum number of boxes to visualize. If None, draw
all boxes.
min_score_thresh: minimum score threshold for a box to be visualized
agnostic_mode: boolean (default: False) controlling whether to evaluate in
class-agnostic mode or not. This mode will display scores but ignore
classes.
line_thickness: integer (default: 4) controlling line width of the boxes.
groundtruth_box_visualization_color: box color for visualizing groundtruth
boxes
skip_scores: whether to skip score when drawing a single detection
skip_labels: whether to skip label when drawing a single detection
Returns:
uint8 numpy array with shape (img_height, img_width, 3) with overlaid boxes.
"""
# Create a display string (and color) for every box location, group any boxes
# that correspond to the same location.
box_to_display_str_map = collections.defaultdict(list)
box_to_color_map = collections.defaultdict(str)
box_to_instance_masks_map = {}
box_to_instance_boundaries_map = {}
box_to_keypoints_map = collections.defaultdict(list)
if not max_boxes_to_draw:
max_boxes_to_draw = boxes.shape[0]
for i in range(min(max_boxes_to_draw, boxes.shape[0])):
if scores is None or scores[i] > min_score_thresh:
box = tuple(boxes[i].tolist())
if instance_masks is not None:
box_to_instance_masks_map[box] = instance_masks[i]
if instance_boundaries is not None:
box_to_instance_boundaries_map[box] = instance_boundaries[i]
if keypoints is not None:
box_to_keypoints_map[box].extend(keypoints[i])
if scores is None:
box_to_color_map[box] = groundtruth_box_visualization_color
else:
display_str = ''
if not skip_labels:
if not agnostic_mode:
if classes[i] in category_index.keys():
class_name = category_index[classes[i]]['name']
else:
class_name = 'N/A'
display_str = str(class_name)
if not skip_scores:
if not display_str:
display_str = '{}%'.format(int(100*scores[i]))
else:
display_str = '{}: {}%'.format(display_str, int(100*scores[i]))
box_to_display_str_map[box].append(display_str)
if agnostic_mode:
box_to_color_map[box] = 'DarkOrange'
else:
box_to_color_map[box] = STANDARD_COLORS[
classes[i] % len(STANDARD_COLORS)]
# Draw all boxes onto image.
for box, color in box_to_color_map.items():
ymin, xmin, ymax, xmax = box
if instance_masks is not None:
draw_mask_on_image_array(
image,
box_to_instance_masks_map[box],
color=color
)
if instance_boundaries is not None:
draw_mask_on_image_array(
image,
box_to_instance_boundaries_map[box],
color='red',
alpha=1.0
)
draw_bounding_box_on_image_array(
image,
ymin,
xmin,
ymax,
xmax,
color=color,
thickness=line_thickness,
display_str_list=box_to_display_str_map[box],
use_normalized_coordinates=use_normalized_coordinates)
if keypoints is not None:
draw_keypoints_on_image_array(
image,
box_to_keypoints_map[box],
color=color,
radius=line_thickness / 2,
use_normalized_coordinates=use_normalized_coordinates)
return image
def add_cdf_image_summary(values, name):
"""Adds a tf.summary.image for a CDF plot of the values.
Normalizes `values` such that they sum to 1, plots the cumulative distribution
function and creates a tf image summary.
Args:
values: a 1-D float32 tensor containing the values.
name: name for the image summary.
"""
def cdf_plot(values):
"""Numpy function to plot CDF."""
normalized_values = values / np.sum(values)
sorted_values = np.sort(normalized_values)
cumulative_values = np.cumsum(sorted_values)
fraction_of_examples = (np.arange(cumulative_values.size, dtype=np.float32)
/ cumulative_values.size)
fig = plt.figure(frameon=False)
ax = fig.add_subplot('111')
ax.plot(fraction_of_examples, cumulative_values)
ax.set_ylabel('cumulative normalized values')
ax.set_xlabel('fraction of examples')
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.fromstring(fig.canvas.tostring_rgb(), dtype='uint8').reshape(
1, int(height), int(width), 3)
return image
cdf_plot = tf.py_func(cdf_plot, [values], tf.uint8)
tf.summary.image(name, cdf_plot)
def add_hist_image_summary(values, bins, name):
"""Adds a tf.summary.image for a histogram plot of the values.
Plots the histogram of values and creates a tf image summary.
Args:
values: a 1-D float32 tensor containing the values.
bins: bin edges which will be directly passed to np.histogram.
name: name for the image summary.
"""
def hist_plot(values, bins):
"""Numpy function to plot hist."""
fig = plt.figure(frameon=False)
ax = fig.add_subplot('111')
y, x = np.histogram(values, bins=bins)
ax.plot(x[:-1], y)
ax.set_ylabel('count')
ax.set_xlabel('value')
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.fromstring(
fig.canvas.tostring_rgb(), dtype='uint8').reshape(
1, int(height), int(width), 3)
return image
hist_plot = tf.py_func(hist_plot, [values, bins], tf.uint8)
tf.summary.image(name, hist_plot)
| 29,599 | 39.326975 | 80 | py |
models | models-master/research/cognitive_planning/standard_fields.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains classes specifying naming conventions used for object detection.
Specifies:
InputDataFields: standard fields used by reader/preprocessor/batcher.
DetectionResultFields: standard fields returned by object detector.
BoxListFields: standard field used by BoxList
TfExampleFields: standard fields for tf-example data format (go/tf-example).
"""
class InputDataFields(object):
"""Names for the input tensors.
Holds the standard data field names to use for identifying input tensors. This
should be used by the decoder to identify keys for the returned tensor_dict
containing input tensors. And it should be used by the model to identify the
tensors it needs.
Attributes:
image: image.
image_additional_channels: additional channels.
original_image: image in the original input size.
key: unique key corresponding to image.
source_id: source of the original image.
filename: original filename of the dataset (without common path).
groundtruth_image_classes: image-level class labels.
groundtruth_boxes: coordinates of the ground truth boxes in the image.
groundtruth_classes: box-level class labels.
groundtruth_label_types: box-level label types (e.g. explicit negative).
groundtruth_is_crowd: [DEPRECATED, use groundtruth_group_of instead]
is the groundtruth a single object or a crowd.
groundtruth_area: area of a groundtruth segment.
groundtruth_difficult: is a `difficult` object
groundtruth_group_of: is a `group_of` objects, e.g. multiple objects of the
same class, forming a connected group, where instances are heavily
occluding each other.
proposal_boxes: coordinates of object proposal boxes.
proposal_objectness: objectness score of each proposal.
groundtruth_instance_masks: ground truth instance masks.
groundtruth_instance_boundaries: ground truth instance boundaries.
groundtruth_instance_classes: instance mask-level class labels.
groundtruth_keypoints: ground truth keypoints.
groundtruth_keypoint_visibilities: ground truth keypoint visibilities.
groundtruth_label_scores: groundtruth label scores.
groundtruth_weights: groundtruth weight factor for bounding boxes.
num_groundtruth_boxes: number of groundtruth boxes.
true_image_shapes: true shapes of images in the resized images, as resized
images can be padded with zeros.
multiclass_scores: the label score per class for each box.
"""
image = 'image'
image_additional_channels = 'image_additional_channels'
original_image = 'original_image'
key = 'key'
source_id = 'source_id'
filename = 'filename'
groundtruth_image_classes = 'groundtruth_image_classes'
groundtruth_boxes = 'groundtruth_boxes'
groundtruth_classes = 'groundtruth_classes'
groundtruth_label_types = 'groundtruth_label_types'
groundtruth_is_crowd = 'groundtruth_is_crowd'
groundtruth_area = 'groundtruth_area'
groundtruth_difficult = 'groundtruth_difficult'
groundtruth_group_of = 'groundtruth_group_of'
proposal_boxes = 'proposal_boxes'
proposal_objectness = 'proposal_objectness'
groundtruth_instance_masks = 'groundtruth_instance_masks'
groundtruth_instance_boundaries = 'groundtruth_instance_boundaries'
groundtruth_instance_classes = 'groundtruth_instance_classes'
groundtruth_keypoints = 'groundtruth_keypoints'
groundtruth_keypoint_visibilities = 'groundtruth_keypoint_visibilities'
groundtruth_label_scores = 'groundtruth_label_scores'
groundtruth_weights = 'groundtruth_weights'
num_groundtruth_boxes = 'num_groundtruth_boxes'
true_image_shape = 'true_image_shape'
multiclass_scores = 'multiclass_scores'
class DetectionResultFields(object):
"""Naming conventions for storing the output of the detector.
Attributes:
source_id: source of the original image.
key: unique key corresponding to image.
detection_boxes: coordinates of the detection boxes in the image.
detection_scores: detection scores for the detection boxes in the image.
detection_classes: detection-level class labels.
detection_masks: contains a segmentation mask for each detection box.
detection_boundaries: contains an object boundary for each detection box.
detection_keypoints: contains detection keypoints for each detection box.
num_detections: number of detections in the batch.
"""
source_id = 'source_id'
key = 'key'
detection_boxes = 'detection_boxes'
detection_scores = 'detection_scores'
detection_classes = 'detection_classes'
detection_masks = 'detection_masks'
detection_boundaries = 'detection_boundaries'
detection_keypoints = 'detection_keypoints'
num_detections = 'num_detections'
class BoxListFields(object):
"""Naming conventions for BoxLists.
Attributes:
boxes: bounding box coordinates.
classes: classes per bounding box.
scores: scores per bounding box.
weights: sample weights per bounding box.
objectness: objectness score per bounding box.
masks: masks per bounding box.
boundaries: boundaries per bounding box.
keypoints: keypoints per bounding box.
keypoint_heatmaps: keypoint heatmaps per bounding box.
is_crowd: is_crowd annotation per bounding box.
"""
boxes = 'boxes'
classes = 'classes'
scores = 'scores'
weights = 'weights'
objectness = 'objectness'
masks = 'masks'
boundaries = 'boundaries'
keypoints = 'keypoints'
keypoint_heatmaps = 'keypoint_heatmaps'
is_crowd = 'is_crowd'
class TfExampleFields(object):
"""TF-example proto feature names for object detection.
Holds the standard feature names to load from an Example proto for object
detection.
Attributes:
image_encoded: JPEG encoded string
image_format: image format, e.g. "JPEG"
filename: filename
channels: number of channels of image
colorspace: colorspace, e.g. "RGB"
height: height of image in pixels, e.g. 462
width: width of image in pixels, e.g. 581
source_id: original source of the image
image_class_text: image-level label in text format
image_class_label: image-level label in numerical format
object_class_text: labels in text format, e.g. ["person", "cat"]
object_class_label: labels in numbers, e.g. [16, 8]
object_bbox_xmin: xmin coordinates of groundtruth box, e.g. 10, 30
object_bbox_xmax: xmax coordinates of groundtruth box, e.g. 50, 40
object_bbox_ymin: ymin coordinates of groundtruth box, e.g. 40, 50
object_bbox_ymax: ymax coordinates of groundtruth box, e.g. 80, 70
object_view: viewpoint of object, e.g. ["frontal", "left"]
object_truncated: is object truncated, e.g. [true, false]
object_occluded: is object occluded, e.g. [true, false]
object_difficult: is object difficult, e.g. [true, false]
object_group_of: is object a single object or a group of objects
object_depiction: is object a depiction
object_is_crowd: [DEPRECATED, use object_group_of instead]
is the object a single object or a crowd
object_segment_area: the area of the segment.
object_weight: a weight factor for the object's bounding box.
instance_masks: instance segmentation masks.
instance_boundaries: instance boundaries.
instance_classes: Classes for each instance segmentation mask.
detection_class_label: class label in numbers.
detection_bbox_ymin: ymin coordinates of a detection box.
detection_bbox_xmin: xmin coordinates of a detection box.
detection_bbox_ymax: ymax coordinates of a detection box.
detection_bbox_xmax: xmax coordinates of a detection box.
detection_score: detection score for the class label and box.
"""
image_encoded = 'image/encoded'
image_format = 'image/format' # format is reserved keyword
filename = 'image/filename'
channels = 'image/channels'
colorspace = 'image/colorspace'
height = 'image/height'
width = 'image/width'
source_id = 'image/source_id'
image_class_text = 'image/class/text'
image_class_label = 'image/class/label'
object_class_text = 'image/object/class/text'
object_class_label = 'image/object/class/label'
object_bbox_ymin = 'image/object/bbox/ymin'
object_bbox_xmin = 'image/object/bbox/xmin'
object_bbox_ymax = 'image/object/bbox/ymax'
object_bbox_xmax = 'image/object/bbox/xmax'
object_view = 'image/object/view'
object_truncated = 'image/object/truncated'
object_occluded = 'image/object/occluded'
object_difficult = 'image/object/difficult'
object_group_of = 'image/object/group_of'
object_depiction = 'image/object/depiction'
object_is_crowd = 'image/object/is_crowd'
object_segment_area = 'image/object/segment/area'
object_weight = 'image/object/weight'
instance_masks = 'image/segmentation/object'
instance_boundaries = 'image/boundaries/object'
instance_classes = 'image/segmentation/object/class'
detection_class_label = 'image/detection/label'
detection_bbox_ymin = 'image/detection/bbox/ymin'
detection_bbox_xmin = 'image/detection/bbox/xmin'
detection_bbox_ymax = 'image/detection/bbox/ymax'
detection_bbox_xmax = 'image/detection/bbox/xmax'
detection_score = 'image/detection/score'
| 9,818 | 42.64 | 80 | py |
models | models-master/research/cognitive_planning/viz_active_vision_dataset_main.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Initializes at random location and visualizes the optimal path.
Different modes of execution:
1) benchmark: It generates benchmark_iter sample trajectory to random goals
and plots the histogram of path lengths. It can be also used to see how fast
it runs.
2) vis: It visualizes the generated paths by image, semantic segmentation, and
so on.
3) human: allows the user to navigate through environment from keyboard input.
python viz_active_vision_dataset_main -- \
--mode=benchmark --benchmark_iter=1000 --gin_config=envs/configs/active_vision_config.gin
python viz_active_vision_dataset_main -- \
--mode=vis \
--gin_config=envs/configs/active_vision_config.gin
python viz_active_vision_dataset_main -- \
--mode=human \
--gin_config=envs/configs/active_vision_config.gin
python viz_active_vision_dataset_main.py --mode=eval --eval_folder=/usr/local/google/home/$USER/checkin_log_det/evals/ --output_folder=/usr/local/google/home/$USER/test_imgs/ --gin_config=envs/configs/active_vision_config.gin
"""
import matplotlib
# pylint: disable=g-import-not-at-top
# Need Tk for interactive plots.
matplotlib.use('TkAgg')
import tensorflow as tf
from matplotlib import pyplot as plt
import numpy as np
import os
from pyglib import app
from pyglib import flags
import gin
import cv2
from envs import active_vision_dataset_env
from envs import task_env
VIS_MODE = 'vis'
HUMAN_MODE = 'human'
BENCHMARK_MODE = 'benchmark'
GRAPH_MODE = 'graph'
EVAL_MODE = 'eval'
flags.DEFINE_enum('mode', VIS_MODE,
[VIS_MODE, HUMAN_MODE, BENCHMARK_MODE, GRAPH_MODE, EVAL_MODE],
'mode of the execution')
flags.DEFINE_integer('benchmark_iter', 1000,
'number of iterations for benchmarking')
flags.DEFINE_string('eval_folder', '', 'the path to the eval folder')
flags.DEFINE_string('output_folder', '',
'the path to which the images and gifs are written')
flags.DEFINE_multi_string('gin_config', [],
'List of paths to a gin config files for the env.')
flags.DEFINE_multi_string('gin_params', [],
'Newline separated list of Gin parameter bindings.')
mt = task_env.ModalityTypes
FLAGS = flags.FLAGS
def benchmark(env, targets):
"""Benchmarks the speed of sequence generation by env.
Args:
env: environment.
targets: list of target classes.
"""
episode_lengths = {}
all_init_configs = {}
all_actions = dict([(a, 0.) for a in env.actions])
for i in range(FLAGS.benchmark_iter):
path, actions, _, _ = env.random_step_sequence()
selected_actions = np.argmax(actions, axis=-1)
new_actions = dict([(a, 0.) for a in env.actions])
for a in selected_actions:
new_actions[env.actions[a]] += 1. / selected_actions.shape[0]
for a in new_actions:
all_actions[a] += new_actions[a] / FLAGS.benchmark_iter
start_image_id, world, goal = env.get_init_config(path)
print world
if world not in all_init_configs:
all_init_configs[world] = set()
all_init_configs[world].add((start_image_id, goal, len(actions)))
if env.goal_index not in episode_lengths:
episode_lengths[env.goal_index] = []
episode_lengths[env.goal_index].append(len(actions))
for i, cls in enumerate(episode_lengths):
plt.subplot(231 + i)
plt.hist(episode_lengths[cls])
plt.title(targets[cls])
plt.show()
def human(env, targets):
"""Lets user play around the env manually."""
string_key_map = {
'a': 'left',
'd': 'right',
'w': 'forward',
's': 'backward',
'j': 'rotate_ccw',
'l': 'rotate_cw',
'n': 'stop'
}
integer_key_map = {
'a': env.actions.index('left'),
'd': env.actions.index('right'),
'w': env.actions.index('forward'),
's': env.actions.index('backward'),
'j': env.actions.index('rotate_ccw'),
'l': env.actions.index('rotate_cw'),
'n': env.actions.index('stop')
}
for k in integer_key_map:
integer_key_map[k] = np.int32(integer_key_map[k])
plt.ion()
for _ in range(20):
obs = env.reset()
steps = -1
action = None
while True:
print 'distance = ', obs[task_env.ModalityTypes.DISTANCE]
steps += 1
depth_value = obs[task_env.ModalityTypes.DEPTH][:, :, 0]
depth_mask = obs[task_env.ModalityTypes.DEPTH][:, :, 1]
seg_mask = np.squeeze(obs[task_env.ModalityTypes.SEMANTIC_SEGMENTATION])
det_mask = np.argmax(
obs[task_env.ModalityTypes.OBJECT_DETECTION], axis=-1)
img = obs[task_env.ModalityTypes.IMAGE]
plt.subplot(231)
plt.title('steps = {}'.format(steps))
plt.imshow(img.astype(np.uint8))
plt.subplot(232)
plt.imshow(depth_value)
plt.title('depth value')
plt.subplot(233)
plt.imshow(depth_mask)
plt.title('depth mask')
plt.subplot(234)
plt.imshow(seg_mask)
plt.title('seg')
plt.subplot(235)
plt.imshow(det_mask)
plt.title('det')
plt.subplot(236)
plt.title('goal={}'.format(targets[env.goal_index]))
plt.draw()
while True:
s = raw_input('key = ')
if np.random.rand() > 0.5:
key_map = string_key_map
else:
key_map = integer_key_map
if s in key_map:
action = key_map[s]
break
else:
print 'invalid action'
print 'action = {}'.format(action)
if action == 'stop':
print 'dist to goal: {}'.format(len(env.path_to_goal()) - 2)
break
obs, reward, done, info = env.step(action)
print 'reward = {}, done = {}, success = {}'.format(
reward, done, info['success'])
def visualize_random_step_sequence(env):
"""Visualizes random sequence of steps."""
plt.ion()
for _ in range(20):
path, actions, _, step_outputs = env.random_step_sequence(max_len=30)
print 'path = {}'.format(path)
for action, step_output in zip(actions, step_outputs):
obs, _, done, _ = step_output
depth_value = obs[task_env.ModalityTypes.DEPTH][:, :, 0]
depth_mask = obs[task_env.ModalityTypes.DEPTH][:, :, 1]
seg_mask = np.squeeze(obs[task_env.ModalityTypes.SEMANTIC_SEGMENTATION])
det_mask = np.argmax(
obs[task_env.ModalityTypes.OBJECT_DETECTION], axis=-1)
img = obs[task_env.ModalityTypes.IMAGE]
plt.subplot(231)
plt.imshow(img.astype(np.uint8))
plt.subplot(232)
plt.imshow(depth_value)
plt.title('depth value')
plt.subplot(233)
plt.imshow(depth_mask)
plt.title('depth mask')
plt.subplot(234)
plt.imshow(seg_mask)
plt.title('seg')
plt.subplot(235)
plt.imshow(det_mask)
plt.title('det')
plt.subplot(236)
print 'action = {}'.format(action)
print 'done = {}'.format(done)
plt.draw()
if raw_input('press \'n\' to go to the next random sequence. Otherwise, '
'press any key to continue...') == 'n':
break
def visualize(env, input_folder, output_root_folder):
"""visualizes images for sequence of steps from the evals folder."""
def which_env(file_name):
img_name = file_name.split('_')[0][2:5]
env_dict = {'161': 'Home_016_1', '131': 'Home_013_1', '111': 'Home_011_1'}
if img_name in env_dict:
return env_dict[img_name]
else:
raise ValueError('could not resolve env: {} {}'.format(
img_name, file_name))
def which_goal(file_name):
return file_name[file_name.find('_')+1:]
output_images_folder = os.path.join(output_root_folder, 'images')
output_gifs_folder = os.path.join(output_root_folder, 'gifs')
if not tf.gfile.IsDirectory(output_images_folder):
tf.gfile.MakeDirs(output_images_folder)
if not tf.gfile.IsDirectory(output_gifs_folder):
tf.gfile.MakeDirs(output_gifs_folder)
npy_files = [
os.path.join(input_folder, name)
for name in tf.gfile.ListDirectory(input_folder)
if name.find('npy') >= 0
]
for i, npy_file in enumerate(npy_files):
print 'saving images {}/{}'.format(i, len(npy_files))
pure_name = npy_file[npy_file.rfind('/') + 1:-4]
output_folder = os.path.join(output_images_folder, pure_name)
if not tf.gfile.IsDirectory(output_folder):
tf.gfile.MakeDirs(output_folder)
print '*******'
print pure_name[0:pure_name.find('_')]
env.reset_for_eval(which_env(pure_name),
which_goal(pure_name),
pure_name[0:pure_name.find('_')],
)
with tf.gfile.Open(npy_file) as h:
states = np.load(h).item()['states']
images = [
env.observation(state)[mt.IMAGE] for state in states
]
for j, img in enumerate(images):
cv2.imwrite(os.path.join(output_folder, '{0:03d}'.format(j) + '.jpg'),
img[:, :, ::-1])
print 'converting to gif'
os.system(
'convert -set delay 20 -colors 256 -dispose 1 {}/*.jpg {}.gif'.format(
output_folder,
os.path.join(output_gifs_folder, pure_name + '.gif')
)
)
def evaluate_folder(env, folder_path):
"""Evaluates the performance from the evals folder."""
targets = ['fridge', 'dining_table', 'microwave', 'tv', 'couch']
def compute_acc(npy_file):
with tf.gfile.Open(npy_file) as h:
data = np.load(h).item()
if npy_file.find('dining_table') >= 0:
category = 'dining_table'
else:
category = npy_file[npy_file.rfind('_') + 1:-4]
return category, data['distance'][-1] - 2
def evaluate_iteration(folder):
"""Evaluates the data from the folder of certain eval iteration."""
print folder
npy_files = [
os.path.join(folder, name)
for name in tf.gfile.ListDirectory(folder)
if name.find('npy') >= 0
]
eval_stats = {c: [] for c in targets}
for npy_file in npy_files:
try:
category, dist = compute_acc(npy_file)
except: # pylint: disable=bare-except
continue
eval_stats[category].append(float(dist <= 5))
for c in eval_stats:
if not eval_stats[c]:
print 'incomplete eval {}: empty class {}'.format(folder_path, c)
return None
eval_stats[c] = np.mean(eval_stats[c])
eval_stats['mean'] = np.mean(eval_stats.values())
return eval_stats
checkpoint_folders = [
folder_path + x
for x in tf.gfile.ListDirectory(folder_path)
if tf.gfile.IsDirectory(folder_path + x)
]
print '{} folders found'.format(len(checkpoint_folders))
print '------------------------'
all_iters = []
all_accs = []
for i, folder in enumerate(checkpoint_folders):
print 'processing {}/{}'.format(i, len(checkpoint_folders))
eval_stats = evaluate_iteration(folder)
if eval_stats is None:
continue
else:
iter_no = int(folder[folder.rfind('/') + 1:])
print 'result ', iter_no, eval_stats['mean']
all_accs.append(eval_stats['mean'])
all_iters.append(iter_no)
all_accs = np.asarray(all_accs)
all_iters = np.asarray(all_iters)
idx = np.argmax(all_accs)
print 'best result at iteration {} was {}'.format(all_iters[idx],
all_accs[idx])
order = np.argsort(all_iters)
all_iters = all_iters[order]
all_accs = all_accs[order]
#plt.plot(all_iters, all_accs)
#plt.show()
#print 'done plotting'
best_iteration_folder = os.path.join(folder_path, str(all_iters[idx]))
print 'generating gifs and images for {}'.format(best_iteration_folder)
visualize(env, best_iteration_folder, FLAGS.output_folder)
def main(_):
gin.parse_config_files_and_bindings(FLAGS.gin_config, FLAGS.gin_params)
print('********')
print(FLAGS.mode)
print(FLAGS.gin_config)
print(FLAGS.gin_params)
env = active_vision_dataset_env.ActiveVisionDatasetEnv(modality_types=[
task_env.ModalityTypes.IMAGE,
task_env.ModalityTypes.SEMANTIC_SEGMENTATION,
task_env.ModalityTypes.OBJECT_DETECTION, task_env.ModalityTypes.DEPTH,
task_env.ModalityTypes.DISTANCE
])
if FLAGS.mode == BENCHMARK_MODE:
benchmark(env, env.possible_targets)
elif FLAGS.mode == GRAPH_MODE:
for loc in env.worlds:
env.check_scene_graph(loc, 'fridge')
elif FLAGS.mode == HUMAN_MODE:
human(env, env.possible_targets)
elif FLAGS.mode == VIS_MODE:
visualize_random_step_sequence(env)
elif FLAGS.mode == EVAL_MODE:
evaluate_folder(env, FLAGS.eval_folder)
if __name__ == '__main__':
app.run(main)
| 13,173 | 33.668421 | 225 | py |
models | models-master/research/cognitive_planning/policies.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Interface for the policy of the agents use for navigation."""
import abc
import tensorflow as tf
from absl import logging
import embedders
from envs import task_env
slim = tf.contrib.slim
def _print_debug_ios(history, goal, output):
"""Prints sizes of history, goal and outputs."""
if history is not None:
shape = history.get_shape().as_list()
# logging.info('history embedding shape ')
# logging.info(shape)
if len(shape) != 3:
raise ValueError('history Tensor must have rank=3')
if goal is not None:
logging.info('goal embedding shape ')
logging.info(goal.get_shape().as_list())
if output is not None:
logging.info('targets shape ')
logging.info(output.get_shape().as_list())
class Policy(object):
"""Represents the policy of the agent for navigation tasks.
Instantiates a policy that takes embedders for each modality and builds a
model to infer the actions.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, embedders_dict, action_size):
"""Instantiates the policy.
Args:
embedders_dict: Dictionary of embedders for different modalities. Keys
should be identical to keys of observation modality.
action_size: Number of possible actions.
"""
self._embedders = embedders_dict
self._action_size = action_size
@abc.abstractmethod
def build(self, observations, prev_state):
"""Builds the model that represents the policy of the agent.
Args:
observations: Dictionary of observations from different modalities. Keys
are the name of the modalities.
prev_state: The tensor of the previous state of the model. Should be set
to None if the policy is stateless
Returns:
Tuple of (action, state) where action is the action logits and state is
the state of the model after taking new observation.
"""
raise NotImplementedError(
'Needs implementation as part of Policy interface')
class LSTMPolicy(Policy):
"""Represents the implementation of the LSTM based policy.
The architecture of the model is as follows. It embeds all the observations
using the embedders, concatenates the embeddings of all the modalities. Feed
them through two fully connected layers. The lstm takes the features from
fully connected layer and the previous action and success of previous action
and feed them to LSTM. The value for each action is predicted afterwards.
Although the class name has the word LSTM in it, it also supports a mode that
builds the network without LSTM just for comparison purposes.
"""
def __init__(self,
modality_names,
embedders_dict,
action_size,
params,
max_episode_length,
feedforward_mode=False):
"""Instantiates the LSTM policy.
Args:
modality_names: List of modality names. Makes sure the ordering in
concatenation remains the same as modality_names list. Each modality
needs to be in the embedders_dict.
embedders_dict: Dictionary of embedders for different modalities. Keys
should be identical to keys of observation modality. Values should be
instance of Embedder class. All the observations except PREV_ACTION
requires embedder.
action_size: Number of possible actions.
params: is instance of tf.hparams and contains the hyperparameters for the
policy network.
max_episode_length: integer, specifying the maximum length of each
episode.
feedforward_mode: If True, it does not add LSTM to the model. It should
only be set True for comparison between LSTM and feedforward models.
"""
super(LSTMPolicy, self).__init__(embedders_dict, action_size)
self._modality_names = modality_names
self._lstm_state_size = params.lstm_state_size
self._fc_channels = params.fc_channels
self._weight_decay = params.weight_decay
self._target_embedding_size = params.target_embedding_size
self._max_episode_length = max_episode_length
self._feedforward_mode = feedforward_mode
def _build_lstm(self, encoded_inputs, prev_state, episode_length,
prev_action=None):
"""Builds an LSTM on top of the encoded inputs.
If prev_action is not None then it concatenates them to the input of LSTM.
Args:
encoded_inputs: The embedding of the observations and goal.
prev_state: previous state of LSTM.
episode_length: The tensor that contains the length of the sequence for
each element of the batch.
prev_action: tensor to previous chosen action and additional bit for
indicating whether the previous action was successful or not.
Returns:
a tuple of (lstm output, lstm state).
"""
# Adding prev action and success in addition to the embeddings of the
# modalities.
if prev_action is not None:
encoded_inputs = tf.concat([encoded_inputs, prev_action], axis=-1)
with tf.variable_scope('LSTM'):
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(self._lstm_state_size)
if prev_state is None:
# If prev state is set to None, a state of all zeros will be
# passed as a previous value for the cell. Should be used for the
# first step of each episode.
tf_prev_state = lstm_cell.zero_state(
encoded_inputs.get_shape().as_list()[0], dtype=tf.float32)
else:
tf_prev_state = tf.nn.rnn_cell.LSTMStateTuple(prev_state[0],
prev_state[1])
lstm_outputs, lstm_state = tf.nn.dynamic_rnn(
cell=lstm_cell,
inputs=encoded_inputs,
sequence_length=episode_length,
initial_state=tf_prev_state,
dtype=tf.float32,
)
lstm_outputs = tf.reshape(lstm_outputs, [-1, lstm_cell.output_size])
return lstm_outputs, lstm_state
def build(
self,
observations,
prev_state,
):
"""Builds the model that represents the policy of the agent.
Args:
observations: Dictionary of observations from different modalities. Keys
are the name of the modalities. Observation should have the following
key-values.
observations['goal']: One-hot tensor that indicates the semantic
category of the goal. The shape should be
(batch_size x max_sequence_length x goals).
observations[task_env.ModalityTypes.PREV_ACTION]: has action_size + 1
elements where the first action_size numbers are the one hot vector
of the previous action and the last element indicates whether the
previous action was successful or not. If
task_env.ModalityTypes.PREV_ACTION is not in the observation, it
will not be used in the policy.
prev_state: Previous state of the model. It should be a tuple of (c,h)
where c and h are the previous cell value and hidden state of the lstm.
Each element of tuple has shape of (batch_size x lstm_cell_size).
If it is set to None, then it initializes the state of the lstm with all
zeros.
Returns:
Tuple of (action, state) where action is the action logits and state is
the state of the model after taking new observation.
Raises:
ValueError: If any of the modality names is not in observations or
embedders_dict.
ValueError: If 'goal' is not in the observations.
"""
for modality_name in self._modality_names:
if modality_name not in observations:
raise ValueError('modality name does not exist in observations: {} not '
'in {}'.format(modality_name, observations.keys()))
if modality_name not in self._embedders:
if modality_name == task_env.ModalityTypes.PREV_ACTION:
continue
raise ValueError('modality name does not have corresponding embedder'
' {} not in {}'.format(modality_name,
self._embedders.keys()))
if task_env.ModalityTypes.GOAL not in observations:
raise ValueError('goal should be provided in the observations')
goal = observations[task_env.ModalityTypes.GOAL]
prev_action = None
if task_env.ModalityTypes.PREV_ACTION in observations:
prev_action = observations[task_env.ModalityTypes.PREV_ACTION]
with tf.variable_scope('policy'):
with slim.arg_scope(
[slim.fully_connected],
activation_fn=tf.nn.relu,
weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
weights_regularizer=slim.l2_regularizer(self._weight_decay)):
all_inputs = []
# Concatenating the embedding of each modality by applying the embedders
# to corresponding observations.
def embed(name):
with tf.variable_scope('embed_{}'.format(name)):
# logging.info('Policy uses embedding %s', name)
return self._embedders[name].build(observations[name])
all_inputs = map(embed, [
x for x in self._modality_names
if x != task_env.ModalityTypes.PREV_ACTION
])
# Computing goal embedding.
shape = goal.get_shape().as_list()
with tf.variable_scope('embed_goal'):
encoded_goal = tf.reshape(goal, [shape[0] * shape[1], -1])
encoded_goal = slim.fully_connected(encoded_goal,
self._target_embedding_size)
encoded_goal = tf.reshape(encoded_goal, [shape[0], shape[1], -1])
all_inputs.append(encoded_goal)
# Concatenating all the modalities and goal.
all_inputs = tf.concat(all_inputs, axis=-1, name='concat_embeddings')
shape = all_inputs.get_shape().as_list()
all_inputs = tf.reshape(all_inputs, [shape[0] * shape[1], shape[2]])
# Applying fully connected layers.
encoded_inputs = slim.fully_connected(all_inputs, self._fc_channels)
encoded_inputs = slim.fully_connected(encoded_inputs, self._fc_channels)
if not self._feedforward_mode:
encoded_inputs = tf.reshape(encoded_inputs,
[shape[0], shape[1], self._fc_channels])
lstm_outputs, lstm_state = self._build_lstm(
encoded_inputs=encoded_inputs,
prev_state=prev_state,
episode_length=tf.ones((shape[0],), dtype=tf.float32) *
self._max_episode_length,
prev_action=prev_action,
)
else:
# If feedforward_mode=True, directly compute bypass the whole LSTM
# computations.
lstm_outputs = encoded_inputs
lstm_outputs = slim.fully_connected(lstm_outputs, self._fc_channels)
action_values = slim.fully_connected(
lstm_outputs, self._action_size, activation_fn=None)
action_values = tf.reshape(action_values, [shape[0], shape[1], -1])
if not self._feedforward_mode:
return action_values, lstm_state
else:
return action_values, None
class TaskPolicy(Policy):
"""A covenience abstract class providing functionality to deal with Tasks."""
def __init__(self,
task_config,
model_hparams=None,
embedder_hparams=None,
train_hparams=None):
"""Constructs a policy which knows how to work with tasks (see tasks.py).
It allows to read task history, goal and outputs in consistency with the
task config.
Args:
task_config: an object of type tasks.TaskIOConfig (see tasks.py)
model_hparams: a tf.HParams object containing parameter pertaining to
model (these are implementation specific)
embedder_hparams: a tf.HParams object containing parameter pertaining to
history, goal embedders (these are implementation specific)
train_hparams: a tf.HParams object containing parameter pertaining to
trainin (these are implementation specific)`
"""
super(TaskPolicy, self).__init__(None, None)
self._model_hparams = model_hparams
self._embedder_hparams = embedder_hparams
self._train_hparams = train_hparams
self._task_config = task_config
self._extra_train_ops = []
@property
def extra_train_ops(self):
"""Training ops in addition to the loss, e.g. batch norm updates.
Returns:
A list of tf ops.
"""
return self._extra_train_ops
def _embed_task_ios(self, streams):
"""Embeds a list of heterogenous streams.
These streams correspond to task history, goal and output. The number of
streams is equal to the total number of history, plus one for the goal if
present, plus one for the output. If the number of history is k, then the
first k streams are the history.
The used embedders depend on the input (or goal) types. If an input is an
image, then a ResNet embedder is used, otherwise
MLPEmbedder (see embedders.py).
Args:
streams: a list of Tensors.
Returns:
Three float Tensors history, goal, output. If there are no history, or no
goal, then the corresponding returned values are None. The shape of the
embedded history is batch_size x sequence_length x sum of all embedding
dimensions for all history. The shape of the goal is embedding dimension.
"""
# EMBED history.
index = 0
inps = []
scopes = []
for c in self._task_config.inputs:
if c == task_env.ModalityTypes.IMAGE:
scope_name = 'image_embedder/image'
reuse = scope_name in scopes
scopes.append(scope_name)
with tf.variable_scope(scope_name, reuse=reuse):
resnet_embedder = embedders.ResNet(self._embedder_hparams.image)
image_embeddings = resnet_embedder.build(streams[index])
# Uncover batch norm ops.
if self._embedder_hparams.image.is_train:
self._extra_train_ops += resnet_embedder.extra_train_ops
inps.append(image_embeddings)
index += 1
else:
scope_name = 'input_embedder/vector'
reuse = scope_name in scopes
scopes.append(scope_name)
with tf.variable_scope(scope_name, reuse=reuse):
input_vector_embedder = embedders.MLPEmbedder(
layers=self._embedder_hparams.vector)
vector_embedder = input_vector_embedder.build(streams[index])
inps.append(vector_embedder)
index += 1
history = tf.concat(inps, axis=2) if inps else None
# EMBED goal.
goal = None
if self._task_config.query is not None:
scope_name = 'image_embedder/query'
reuse = scope_name in scopes
scopes.append(scope_name)
with tf.variable_scope(scope_name, reuse=reuse):
resnet_goal_embedder = embedders.ResNet(self._embedder_hparams.goal)
goal = resnet_goal_embedder.build(streams[index])
if self._embedder_hparams.goal.is_train:
self._extra_train_ops += resnet_goal_embedder.extra_train_ops
index += 1
# Embed true targets if needed (tbd).
true_target = streams[index]
return history, goal, true_target
@abc.abstractmethod
def build(self, feeds, prev_state):
pass
class ReactivePolicy(TaskPolicy):
"""A policy which ignores history.
It processes only the current observation (last element in history) and the
goal to output a prediction.
"""
def __init__(self, *args, **kwargs):
super(ReactivePolicy, self).__init__(*args, **kwargs)
# The current implementation ignores the prev_state as it is purely reactive.
# It returns None for the current state.
def build(self, feeds, prev_state):
history, goal, _ = self._embed_task_ios(feeds)
_print_debug_ios(history, goal, None)
with tf.variable_scope('output_decoder'):
# Concatenate the embeddings of the current observation and the goal.
reactive_input = tf.concat([tf.squeeze(history[:, -1, :]), goal], axis=1)
oconfig = self._task_config.output.shape
assert len(oconfig) == 1
decoder = embedders.MLPEmbedder(
layers=self._embedder_hparams.predictions.layer_sizes + oconfig)
predictions = decoder.build(reactive_input)
return predictions, None
class RNNPolicy(TaskPolicy):
"""A policy which takes into account the full history via RNN.
The implementation might and will change.
The history, together with the goal, is processed using a stacked LSTM. The
output of the last LSTM step is used to produce a prediction. Currently, only
a single step output is supported.
"""
def __init__(self, lstm_hparams, *args, **kwargs):
super(RNNPolicy, self).__init__(*args, **kwargs)
self._lstm_hparams = lstm_hparams
# The prev_state is ignored as for now the full history is specified as first
# element of the feeds. It might turn out to be beneficial to keep the state
# as part of the policy object.
def build(self, feeds, state):
history, goal, _ = self._embed_task_ios(feeds)
_print_debug_ios(history, goal, None)
params = self._lstm_hparams
cell = lambda: tf.contrib.rnn.BasicLSTMCell(params.cell_size)
stacked_lstm = tf.contrib.rnn.MultiRNNCell(
[cell() for _ in range(params.num_layers)])
# history is of shape batch_size x seq_len x embedding_dimension
batch_size, seq_len, _ = tuple(history.get_shape().as_list())
if state is None:
state = stacked_lstm.zero_state(batch_size, tf.float32)
for t in range(seq_len):
if params.concat_goal_everywhere:
lstm_input = tf.concat([tf.squeeze(history[:, t, :]), goal], axis=1)
else:
lstm_input = tf.squeeze(history[:, t, :])
output, state = stacked_lstm(lstm_input, state)
with tf.variable_scope('output_decoder'):
oconfig = self._task_config.output.shape
assert len(oconfig) == 1
features = tf.concat([output, goal], axis=1)
assert len(output.get_shape().as_list()) == 2
assert len(goal.get_shape().as_list()) == 2
decoder = embedders.MLPEmbedder(
layers=self._embedder_hparams.predictions.layer_sizes + oconfig)
# Prediction is done off the last step lstm output and the goal.
predictions = decoder.build(features)
return predictions, state
| 18,995 | 38.991579 | 80 | py |
models | models-master/research/cognitive_planning/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/cognitive_planning/tasks.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library of tasks.
This interface is intended to implement a wide variety of navigation
tasks. See go/navigation_tasks for a list.
"""
import abc
import collections
import math
import threading
import networkx as nx
import numpy as np
import tensorflow as tf
#from pyglib import logging
#import gin
from envs import task_env
from envs import util as envs_util
# Utility functions.
def _pad_or_clip_array(np_arr, arr_len, is_front_clip=True, output_mask=False):
"""Make np_arr array to have length arr_len.
If the array is shorter than arr_len, then it is padded from the front with
zeros. If it is longer, then it is clipped either from the back or from the
front. Only the first dimension is modified.
Args:
np_arr: numpy array.
arr_len: integer scalar.
is_front_clip: a boolean. If true then clipping is done in the front,
otherwise in the back.
output_mask: If True, outputs a numpy array of rank 1 which represents
a mask of which values have been added (0 - added, 1 - actual output).
Returns:
A numpy array and the size of padding (as a python int32). This size is
negative is the array is clipped.
"""
shape = list(np_arr.shape)
pad_size = arr_len - shape[0]
padded_or_clipped = None
if pad_size < 0:
if is_front_clip:
padded_or_clipped = np_arr[-pad_size:, :]
else:
padded_or_clipped = np_arr[:arr_len, :]
elif pad_size > 0:
padding = np.zeros([pad_size] + shape[1:], dtype=np_arr.dtype)
padded_or_clipped = np.concatenate([np_arr, padding], axis=0)
else:
padded_or_clipped = np_arr
if output_mask:
mask = np.ones((arr_len,), dtype=np.int)
if pad_size > 0:
mask[-pad_size:] = 0
return padded_or_clipped, pad_size, mask
else:
return padded_or_clipped, pad_size
def classification_loss(truth, predicted, weights=None, is_one_hot=True):
"""A cross entropy loss.
Computes the mean of cross entropy losses for all pairs of true labels and
predictions. It wraps around a tf implementation of the cross entropy loss
with additional reformating of the inputs. If the truth and predicted are
n-rank Tensors with n > 2, then these are reshaped to 2-rank Tensors. It
allows for truth to be specified as one hot vector or class indices. Finally,
a weight can be specified for each element in truth and predicted.
Args:
truth: an n-rank or (n-1)-rank Tensor containing labels. If is_one_hot is
True, then n-rank Tensor is expected, otherwise (n-1) rank one.
predicted: an n-rank float Tensor containing prediction probabilities.
weights: an (n-1)-rank float Tensor of weights
is_one_hot: a boolean.
Returns:
A TF float scalar.
"""
num_labels = predicted.get_shape().as_list()[-1]
if not is_one_hot:
truth = tf.reshape(truth, [-1])
truth = tf.one_hot(
truth, depth=num_labels, on_value=1.0, off_value=0.0, axis=-1)
else:
truth = tf.reshape(truth, [-1, num_labels])
predicted = tf.reshape(predicted, [-1, num_labels])
losses = tf.nn.softmax_cross_entropy_with_logits(
labels=truth, logits=predicted)
if weights is not None:
losses = tf.boolean_mask(losses,
tf.cast(tf.reshape(weights, [-1]), dtype=tf.bool))
return tf.reduce_mean(losses)
class UnrolledTaskIOConfig(object):
"""Configuration of task inputs and outputs.
A task can have multiple inputs, which define the context, and a task query
which defines what is to be executed in this context. The desired execution
is encoded in an output. The config defines the shapes of the inputs, the
query and the outputs.
"""
def __init__(self, inputs, output, query=None):
"""Constructs a Task input/output config.
Args:
inputs: a list of tuples. Each tuple represents the configuration of an
input, with first element being the type (a string value) and the second
element the shape.
output: a tuple representing the configuration of the output.
query: a tuple representing the configuration of the query. If no query,
then None.
"""
# A configuration of a single input, output or query. Consists of the type,
# which can be one of the three specified above, and a shape. The shape must
# be consistent with the type, e.g. if type == 'image', then shape is a 3
# valued list.
io_config = collections.namedtuple('IOConfig', ['type', 'shape'])
def assert_config(config):
if not isinstance(config, tuple):
raise ValueError('config must be a tuple. Received {}'.format(
type(config)))
if len(config) != 2:
raise ValueError('config must have 2 elements, has %d' % len(config))
if not isinstance(config[0], tf.DType):
raise ValueError('First element of config must be a tf.DType.')
if not isinstance(config[1], list):
raise ValueError('Second element of config must be a list.')
assert isinstance(inputs, collections.OrderedDict)
for modality_type in inputs:
assert_config(inputs[modality_type])
self._inputs = collections.OrderedDict(
[(k, io_config(*value)) for k, value in inputs.iteritems()])
if query is not None:
assert_config(query)
self._query = io_config(*query)
else:
self._query = None
assert_config(output)
self._output = io_config(*output)
@property
def inputs(self):
return self._inputs
@property
def output(self):
return self._output
@property
def query(self):
return self._query
class UnrolledTask(object):
"""An interface for a Task which can be unrolled during training.
Each example is called episode and consists of inputs and target output, where
the output can be considered as desired unrolled sequence of actions for the
inputs. For the specified tasks, these action sequences are to be
unambiguously definable.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, config):
assert isinstance(config, UnrolledTaskIOConfig)
self._config = config
# A dict of bookkeeping variables.
self.info = {}
# Tensorflow input is multithreaded and this lock is needed to prevent
# race condition in the environment. Without the lock, non-thread safe
# environments crash.
self._lock = threading.Lock()
@property
def config(self):
return self._config
@abc.abstractmethod
def episode(self):
"""Returns data needed to train and test a single episode.
Each episode consists of inputs, which define the context of the task, a
query which defines the task, and a target output, which defines a
sequence of actions to be executed for this query. This sequence should not
require feedback, i.e. can be predicted purely from input and query.]
Returns:
inputs, query, output, where inputs is a list of numpy arrays and query
and output are numpy arrays. These arrays must be of shape and type as
specified in the task configuration.
"""
pass
def reset(self, observation):
"""Called after the environment is reset."""
pass
def episode_batch(self, batch_size):
"""Returns a batch of episodes.
Args:
batch_size: size of batch.
Returns:
(inputs, query, output, masks) where inputs is list of numpy arrays and
query, output, and mask are numpy arrays. These arrays must be of shape
and type as specified in the task configuration with one additional
preceding dimension corresponding to the batch.
Raises:
ValueError: if self.episode() returns illegal values.
"""
batched_inputs = collections.OrderedDict(
[[mtype, []] for mtype in self.config.inputs])
batched_queries = []
batched_outputs = []
batched_masks = []
for _ in range(int(batch_size)):
with self._lock:
# The episode function needs to be thread-safe. Since the current
# implementation for the envs are not thread safe we need to have lock
# the operations here.
inputs, query, outputs = self.episode()
if not isinstance(outputs, tuple):
raise ValueError('Outputs return value must be tuple.')
if len(outputs) != 2:
raise ValueError('Output tuple must be of size 2.')
if inputs is not None:
for modality_type in batched_inputs:
batched_inputs[modality_type].append(
np.expand_dims(inputs[modality_type], axis=0))
if query is not None:
batched_queries.append(np.expand_dims(query, axis=0))
batched_outputs.append(np.expand_dims(outputs[0], axis=0))
if outputs[1] is not None:
batched_masks.append(np.expand_dims(outputs[1], axis=0))
batched_inputs = {
k: np.concatenate(i, axis=0) for k, i in batched_inputs.iteritems()
}
if batched_queries:
batched_queries = np.concatenate(batched_queries, axis=0)
batched_outputs = np.concatenate(batched_outputs, axis=0)
if batched_masks:
batched_masks = np.concatenate(batched_masks, axis=0).astype(np.float32)
else:
# When the array is empty, the default np.dtype is float64 which causes
# py_func to crash in the tests.
batched_masks = np.array([], dtype=np.float32)
batched_inputs = [batched_inputs[k] for k in self._config.inputs]
return batched_inputs, batched_queries, batched_outputs, batched_masks
def tf_episode_batch(self, batch_size):
"""A batch of episodes as TF Tensors.
Same as episode_batch with the difference that the return values are TF
Tensors.
Args:
batch_size: a python float for the batch size.
Returns:
inputs, query, output, mask where inputs is a dictionary of tf.Tensor
where the keys are the modality types specified in the config.inputs.
query, output, and mask are TF Tensors. These tensors must
be of shape and type as specified in the task configuration with one
additional preceding dimension corresponding to the batch. Both mask and
output have the same shape as output.
"""
# Define TF outputs.
touts = []
shapes = []
for _, i in self._config.inputs.iteritems():
touts.append(i.type)
shapes.append(i.shape)
if self._config.query is not None:
touts.append(self._config.query.type)
shapes.append(self._config.query.shape)
# Shapes and types for batched_outputs.
touts.append(self._config.output.type)
shapes.append(self._config.output.shape)
# Shapes and types for batched_masks.
touts.append(self._config.output.type)
shapes.append(self._config.output.shape[0:1])
def episode_batch_func():
if self.config.query is None:
inp, _, output, masks = self.episode_batch(int(batch_size))
return tuple(inp) + (output, masks)
else:
inp, query, output, masks = self.episode_batch(int(batch_size))
return tuple(inp) + (query, output, masks)
tf_episode_batch = tf.py_func(episode_batch_func, [], touts,
stateful=True, name='taskdata')
for episode, shape in zip(tf_episode_batch, shapes):
episode.set_shape([batch_size] + shape)
tf_episode_batch_dict = collections.OrderedDict([
(mtype, episode)
for mtype, episode in zip(self.config.inputs.keys(), tf_episode_batch)
])
cur_index = len(self.config.inputs.keys())
tf_query = None
if self.config.query is not None:
tf_query = tf_episode_batch[cur_index]
cur_index += 1
tf_outputs = tf_episode_batch[cur_index]
tf_masks = tf_episode_batch[cur_index + 1]
return tf_episode_batch_dict, tf_query, tf_outputs, tf_masks
@abc.abstractmethod
def target_loss(self, true_targets, targets, weights=None):
"""A loss for training a task model.
This loss measures the discrepancy between the task outputs, the true and
predicted ones.
Args:
true_targets: tf.Tensor of shape and type as defined in the task config
containing the true outputs.
targets: tf.Tensor of shape and type as defined in the task config
containing the predicted outputs.
weights: a bool tf.Tensor of shape as targets. Only true values are
considered when formulating the loss.
"""
pass
def reward(self, obs, done, info):
"""Returns a reward.
The tasks has to compute a reward based on the state of the environment. The
reward computation, though, is task specific. The task is to use the
environment interface, as defined in task_env.py, to compute the reward. If
this interface does not expose enough information, it is to be updated.
Args:
obs: Observation from environment's step function.
done: Done flag from environment's step function.
info: Info dict from environment's step function.
Returns:
obs: Observation.
reward: Floating point value.
done: Done flag.
info: Info dict.
"""
# Default implementation does not do anything.
return obs, 0.0, done, info
class RandomExplorationBasedTask(UnrolledTask):
"""A Task which starts with a random exploration of the environment."""
def __init__(self,
env,
seed,
add_query_noise=False,
query_noise_var=0.0,
*args,
**kwargs): # pylint: disable=keyword-arg-before-vararg
"""Initializes a Task using a random exploration runs.
Args:
env: an instance of type TaskEnv and gym.Env.
seed: a random seed.
add_query_noise: boolean, if True then whatever queries are generated,
they are randomly perturbed. The semantics of the queries depends on the
concrete task implementation.
query_noise_var: float, the variance of Gaussian noise used for query
perturbation. Used iff add_query_noise==True.
*args: see super class.
**kwargs: see super class.
"""
super(RandomExplorationBasedTask, self).__init__(*args, **kwargs)
assert isinstance(env, task_env.TaskEnv)
self._env = env
self._env.set_task(self)
self._rng = np.random.RandomState(seed)
self._add_query_noise = add_query_noise
self._query_noise_var = query_noise_var
# GoToStaticXTask can also take empty config but for the rest of the classes
# the number of modality types is 1.
if len(self.config.inputs.keys()) > 1:
raise NotImplementedError('current implementation supports input '
'with only one modality type or less.')
def _exploration(self):
"""Generates a random exploration run.
The function uses the environment to generate a run.
Returns:
A tuple of numpy arrays. The i-th array contains observation of type and
shape as specified in config.inputs[i].
A list of states along the exploration path.
A list of vertex indices corresponding to the path of the exploration.
"""
in_seq_len = self._config.inputs.values()[0].shape[0]
path, _, states, step_outputs = self._env.random_step_sequence(
min_len=in_seq_len)
obs = {modality_type: [] for modality_type in self._config.inputs}
for o in step_outputs:
step_obs, _, done, _ = o
# It is expected that each value of step_obs is a dict of observations,
# whose dimensions are consistent with the config.inputs sizes.
for modality_type in self._config.inputs:
assert modality_type in step_obs, '{}'.format(type(step_obs))
o = step_obs[modality_type]
i = self._config.inputs[modality_type]
assert len(o.shape) == len(i.shape) - 1
for dim_o, dim_i in zip(o.shape, i.shape[1:]):
assert dim_o == dim_i, '{} != {}'.format(dim_o, dim_i)
obs[modality_type].append(o)
if done:
break
if not obs:
return obs, states, path
max_path_len = int(
round(in_seq_len * float(len(path)) / float(len(obs.values()[0]))))
path = path[-max_path_len:]
states = states[-in_seq_len:]
# The above obs is a list of tuples of np,array. Re-format them as tuple of
# np.array, each array containing all observations from all steps.
def regroup(obs, i):
"""Regroups observations.
Args:
obs: a list of tuples of same size. The k-th tuple contains all the
observations from k-th step. Each observation is a numpy array.
i: the index of the observation in each tuple to be grouped.
Returns:
A numpy array of shape config.inputs[i] which contains all i-th
observations from all steps. These are concatenated along the first
dimension. In addition, if the number of observations is different from
the one specified in config.inputs[i].shape[0], then the array is either
padded from front or clipped.
"""
grouped_obs = np.concatenate(
[np.expand_dims(o, axis=0) for o in obs[i]], axis=0)
in_seq_len = self._config.inputs[i].shape[0]
# pylint: disable=unbalanced-tuple-unpacking
grouped_obs, _ = _pad_or_clip_array(
grouped_obs, in_seq_len, is_front_clip=True)
return grouped_obs
all_obs = {i: regroup(obs, i) for i in self._config.inputs}
return all_obs, states, path
def _obs_to_state(self, path, states):
"""Computes mapping between path nodes and states."""
# Generate a numpy array of locations corresponding to the path vertices.
path_coordinates = map(self._env.vertex_to_pose, path)
path_coordinates = np.concatenate(
[np.reshape(p, [1, 2]) for p in path_coordinates])
# The observations are taken along a smoothed trajectory following the path.
# We compute a mapping between the obeservations and the map vertices.
path_to_obs = collections.defaultdict(list)
obs_to_state = []
for i, s in enumerate(states):
location = np.reshape(s[0:2], [1, 2])
index = np.argmin(
np.reshape(
np.sum(np.power(path_coordinates - location, 2), axis=1), [-1]))
index = path[index]
path_to_obs[index].append(i)
obs_to_state.append(index)
return path_to_obs, obs_to_state
def _perturb_state(self, state, noise_var):
"""Perturbes the state.
The location are purturbed using a Gaussian noise with variance
noise_var. The orientation is uniformly sampled.
Args:
state: a numpy array containing an env state (x, y locations).
noise_var: float
Returns:
The perturbed state.
"""
def normal(v, std):
if std > 0:
n = self._rng.normal(0.0, std)
n = min(n, 2.0 * std)
n = max(n, -2.0 * std)
return v + n
else:
return v
state = state.copy()
state[0] = normal(state[0], noise_var)
state[1] = normal(state[1], noise_var)
if state.size > 2:
state[2] = self._rng.uniform(-math.pi, math.pi)
return state
def _sample_obs(self,
indices,
observations,
observation_states,
path_to_obs,
max_obs_index=None,
use_exploration_obs=True):
"""Samples one observation which corresponds to vertex_index in path.
In addition, the sampled observation must have index in observations less
than max_obs_index. If these two conditions cannot be satisfied the
function returns None.
Args:
indices: a list of integers.
observations: a list of numpy arrays containing all the observations.
observation_states: a list of numpy arrays, each array representing the
state of the observation.
path_to_obs: a dict of path indices to lists of observation indices.
max_obs_index: an integer.
use_exploration_obs: if True, then the observation is sampled among the
specified observations, otherwise it is obtained from the environment.
Returns:
A tuple of:
-- A numpy array of size width x height x 3 representing the sampled
observation.
-- The index of the sampld observation among the input observations.
-- The state at which the observation is captured.
Raises:
ValueError: if the observation and observation_states lists are of
different lengths.
"""
if len(observations) != len(observation_states):
raise ValueError('observation and observation_states lists must have '
'equal lengths')
if not indices:
return None, None, None
vertex_index = self._rng.choice(indices)
if use_exploration_obs:
obs_indices = path_to_obs[vertex_index]
if max_obs_index is not None:
obs_indices = [i for i in obs_indices if i < max_obs_index]
if obs_indices:
index = self._rng.choice(obs_indices)
if self._add_query_noise:
xytheta = self._perturb_state(observation_states[index],
self._query_noise_var)
return self._env.observation(xytheta), index, xytheta
else:
return observations[index], index, observation_states[index]
else:
return None, None, None
else:
xy = self._env.vertex_to_pose(vertex_index)
xytheta = np.array([xy[0], xy[1], 0.0])
xytheta = self._perturb_state(xytheta, self._query_noise_var)
return self._env.observation(xytheta), None, xytheta
class AreNearbyTask(RandomExplorationBasedTask):
"""A task of identifying whether a query is nearby current location or not.
The query is guaranteed to be in proximity of an already visited location,
i.e. close to one of the observations. For each observation we have one
query, which is either close or not to this observation.
"""
def __init__(
self,
max_distance=0,
*args,
**kwargs): # pylint: disable=keyword-arg-before-vararg
super(AreNearbyTask, self).__init__(*args, **kwargs)
self._max_distance = max_distance
if len(self.config.inputs.keys()) != 1:
raise NotImplementedError('current implementation supports input '
'with only one modality type')
def episode(self):
"""Episode data.
Returns:
observations: a tuple with one element. This element is a numpy array of
size in_seq_len x observation_size x observation_size x 3 containing
in_seq_len images.
query: a numpy array of size
in_seq_len x observation_size X observation_size x 3 containing a query
image.
A tuple of size two. First element is a in_seq_len x 2 numpy array of
either 1.0 or 0.0. The i-th element denotes whether the i-th query
image is neraby (value 1.0) or not (value 0.0) to the i-th observation.
The second element in the tuple is a mask, a numpy array of size
in_seq_len x 1 and values 1.0 or 0.0 denoting whether the query is
valid or not (it can happen that the query is not valid, e.g. there are
not enough observations to have a meaningful queries).
"""
observations, states, path = self._exploration()
assert len(observations.values()[0]) == len(states)
# The observations are taken along a smoothed trajectory following the path.
# We compute a mapping between the obeservations and the map vertices.
path_to_obs, obs_to_path = self._obs_to_state(path, states)
# Go over all observations, and sample a query. With probability 0.5 this
# query is a nearby observation (defined as belonging to the same vertex
# in path).
g = self._env.graph
queries = []
labels = []
validity_masks = []
query_index_in_observations = []
for i, curr_o in enumerate(observations.values()[0]):
p = obs_to_path[i]
low = max(0, i - self._max_distance)
# A list of lists of vertex indices. Each list in this group corresponds
# to one possible label.
index_groups = [[], [], []]
# Nearby visited indices, label 1.
nearby_visited = [
ii for ii in path[low:i + 1] + g[p].keys() if ii in obs_to_path[:i]
]
nearby_visited = [ii for ii in index_groups[1] if ii in path_to_obs]
# NOT Nearby visited indices, label 0.
not_nearby_visited = [ii for ii in path[:low] if ii not in g[p].keys()]
not_nearby_visited = [ii for ii in index_groups[0] if ii in path_to_obs]
# NOT visited indices, label 2.
not_visited = [
ii for ii in range(g.number_of_nodes()) if ii not in path[:i + 1]
]
index_groups = [not_nearby_visited, nearby_visited, not_visited]
# Consider only labels for which there are indices.
allowed_labels = [ii for ii, group in enumerate(index_groups) if group]
label = self._rng.choice(allowed_labels)
indices = list(set(index_groups[label]))
max_obs_index = None if label == 2 else i
use_exploration_obs = False if label == 2 else True
o, obs_index, _ = self._sample_obs(
indices=indices,
observations=observations.values()[0],
observation_states=states,
path_to_obs=path_to_obs,
max_obs_index=max_obs_index,
use_exploration_obs=use_exploration_obs)
query_index_in_observations.append(obs_index)
# If we cannot sample a valid query, we mark it as not valid in mask.
if o is None:
label = 0.0
o = curr_o
validity_masks.append(0)
else:
validity_masks.append(1)
queries.append(o.values()[0])
labels.append(label)
query = np.concatenate([np.expand_dims(q, axis=0) for q in queries], axis=0)
def one_hot(label, num_labels=3):
a = np.zeros((num_labels,), dtype=np.float)
a[int(label)] = 1.0
return a
outputs = np.stack([one_hot(l) for l in labels], axis=0)
validity_mask = np.reshape(
np.array(validity_masks, dtype=np.int32), [-1, 1])
self.info['query_index_in_observations'] = query_index_in_observations
self.info['observation_states'] = states
return observations, query, (outputs, validity_mask)
def target_loss(self, truth, predicted, weights=None):
pass
class NeighboringQueriesTask(RandomExplorationBasedTask):
"""A task of identifying whether two queries are closeby or not.
The proximity between queries is defined by the length of the shorest path
between them.
"""
def __init__(
self,
max_distance=1,
*args,
**kwargs): # pylint: disable=keyword-arg-before-vararg
"""Initializes a NeighboringQueriesTask.
Args:
max_distance: integer, the maximum distance in terms of number of vertices
between the two queries, so that they are considered neighboring.
*args: for super class.
**kwargs: for super class.
"""
super(NeighboringQueriesTask, self).__init__(*args, **kwargs)
self._max_distance = max_distance
if len(self.config.inputs.keys()) != 1:
raise NotImplementedError('current implementation supports input '
'with only one modality type')
def episode(self):
"""Episode data.
Returns:
observations: a tuple with one element. This element is a numpy array of
size in_seq_len x observation_size x observation_size x 3 containing
in_seq_len images.
query: a numpy array of size
2 x observation_size X observation_size x 3 containing a pair of query
images.
A tuple of size two. First element is a numpy array of size 2 containing
a one hot vector of whether the two observations are neighobring. Second
element is a boolean numpy value denoting whether this is a valid
episode.
"""
observations, states, path = self._exploration()
assert len(observations.values()[0]) == len(states)
path_to_obs, _ = self._obs_to_state(path, states)
# Restrict path to ones for which observations have been generated.
path = [p for p in path if p in path_to_obs]
# Sample first query.
query1_index = self._rng.choice(path)
# Sample label.
label = self._rng.randint(2)
# Sample second query.
# If label == 1, then second query must be nearby, otherwise not.
closest_indices = nx.single_source_shortest_path(
self._env.graph, query1_index, self._max_distance).keys()
if label == 0:
# Closest indices on the path.
indices = [p for p in path if p not in closest_indices]
else:
# Indices which are not closest on the path.
indices = [p for p in closest_indices if p in path]
query2_index = self._rng.choice(indices)
# Generate an observation.
query1, query1_index, _ = self._sample_obs(
[query1_index],
observations.values()[0],
states,
path_to_obs,
max_obs_index=None,
use_exploration_obs=True)
query2, query2_index, _ = self._sample_obs(
[query2_index],
observations.values()[0],
states,
path_to_obs,
max_obs_index=None,
use_exploration_obs=True)
queries = np.concatenate(
[np.expand_dims(q, axis=0) for q in [query1, query2]])
labels = np.array([0, 0])
labels[label] = 1
is_valid = np.array([1])
self.info['observation_states'] = states
self.info['query_indices_in_observations'] = [query1_index, query2_index]
return observations, queries, (labels, is_valid)
def target_loss(self, truth, predicted, weights=None):
pass
#@gin.configurable
class GotoStaticXTask(RandomExplorationBasedTask):
"""Task go to a static X.
If continuous reward is used only one goal is allowed so that the reward can
be computed as a delta-distance to that goal..
"""
def __init__(self,
step_reward=0.0,
goal_reward=1.0,
hit_wall_reward=-1.0,
done_at_target=False,
use_continuous_reward=False,
*args,
**kwargs): # pylint: disable=keyword-arg-before-vararg
super(GotoStaticXTask, self).__init__(*args, **kwargs)
if len(self.config.inputs.keys()) > 1:
raise NotImplementedError('current implementation supports input '
'with only one modality type or less.')
self._step_reward = step_reward
self._goal_reward = goal_reward
self._hit_wall_reward = hit_wall_reward
self._done_at_target = done_at_target
self._use_continuous_reward = use_continuous_reward
self._previous_path_length = None
def episode(self):
observations, _, path = self._exploration()
if len(path) < 2:
raise ValueError('The exploration path has only one node.')
g = self._env.graph
start = path[-1]
while True:
goal = self._rng.choice(path[:-1])
if goal != start:
break
goal_path = nx.shortest_path(g, start, goal)
init_orientation = self._rng.uniform(0, np.pi, (1,))
trajectory = np.array(
[list(self._env.vertex_to_pose(p)) for p in goal_path])
init_xy = np.reshape(trajectory[0, :], [-1])
init_state = np.concatenate([init_xy, init_orientation], 0)
trajectory = trajectory[1:, :]
deltas = envs_util.trajectory_to_deltas(trajectory, init_state)
output_seq_len = self._config.output.shape[0]
arr = _pad_or_clip_array(deltas, output_seq_len, output_mask=True)
# pylint: disable=unbalanced-tuple-unpacking
thetas, _, thetas_mask = arr
query = self._env.observation(self._env.vertex_to_pose(goal)).values()[0]
return observations, query, (thetas, thetas_mask)
def reward(self, obs, done, info):
if 'wall_collision' in info and info['wall_collision']:
return obs, self._hit_wall_reward, done, info
reward = 0.0
current_vertex = self._env.pose_to_vertex(self._env.state)
if current_vertex in self._env.targets():
if self._done_at_target:
done = True
else:
obs = self._env.reset()
reward = self._goal_reward
else:
if self._use_continuous_reward:
if len(self._env.targets()) != 1:
raise ValueError(
'FindX task with continuous reward is assuming only one target.')
goal_vertex = self._env.targets()[0]
path_length = self._compute_path_length(goal_vertex)
reward = self._previous_path_length - path_length
self._previous_path_length = path_length
else:
reward = self._step_reward
return obs, reward, done, info
def _compute_path_length(self, goal_vertex):
current_vertex = self._env.pose_to_vertex(self._env.state)
path = nx.shortest_path(self._env.graph, current_vertex, goal_vertex)
assert len(path) >= 2
curr_xy = np.array(self._env.state[:2])
next_xy = np.array(self._env.vertex_to_pose(path[1]))
last_step_distance = np.linalg.norm(next_xy - curr_xy)
return (len(path) - 2) * self._env.cell_size_px + last_step_distance
def reset(self, observation):
if self._use_continuous_reward:
if len(self._env.targets()) != 1:
raise ValueError(
'FindX task with continuous reward is assuming only one target.')
goal_vertex = self._env.targets()[0]
self._previous_path_length = self._compute_path_length(goal_vertex)
def target_loss(self, truth, predicted, weights=None):
"""Action classification loss.
Args:
truth: a batch_size x sequence length x number of labels float
Tensor containing a one hot vector for each label in each batch and
time.
predicted: a batch_size x sequence length x number of labels float
Tensor containing a predicted distribution over all actions.
weights: a batch_size x sequence_length float Tensor of bool
denoting which actions are valid.
Returns:
An average cross entropy over all batches and elements in sequence.
"""
return classification_loss(
truth=truth, predicted=predicted, weights=weights, is_one_hot=True)
class RelativeLocationTask(RandomExplorationBasedTask):
"""A task of estimating the relative location of a query w.r.t current.
It is to be used for debugging. It is designed such that the output is a
single value, out of a discrete set of values, so that it can be phrased as
a classification problem.
"""
def __init__(self, num_labels, *args, **kwargs):
"""Initializes a relative location task.
Args:
num_labels: integer, number of orientations to bin the relative
orientation into.
*args: see super class.
**kwargs: see super class.
"""
super(RelativeLocationTask, self).__init__(*args, **kwargs)
self._num_labels = num_labels
if len(self.config.inputs.keys()) != 1:
raise NotImplementedError('current implementation supports input '
'with only one modality type')
def episode(self):
observations, states, path = self._exploration()
# Select a random element from history.
path_to_obs, _ = self._obs_to_state(path, states)
use_exploration_obs = not self._add_query_noise
query, _, query_state = self._sample_obs(
path[:-1],
observations.values()[0],
states,
path_to_obs,
max_obs_index=None,
use_exploration_obs=use_exploration_obs)
x, y, theta = tuple(states[-1])
q_x, q_y, _ = tuple(query_state)
t_x, t_y = q_x - x, q_y - y
(rt_x, rt_y) = (np.sin(theta) * t_x - np.cos(theta) * t_y,
np.cos(theta) * t_x + np.sin(theta) * t_y)
# Bins are [a(i), a(i+1)] for a(i) = -pi + 0.5 * bin_size + i * bin_size.
shift = np.pi * (1 - 1.0 / (2.0 * self._num_labels))
orientation = np.arctan2(rt_y, rt_x) + shift
if orientation < 0:
orientation += 2 * np.pi
label = int(np.floor(self._num_labels * orientation / (2 * np.pi)))
out_shape = self._config.output.shape
if len(out_shape) != 1:
raise ValueError('Output shape should be of rank 1.')
if out_shape[0] != self._num_labels:
raise ValueError('Output shape must be of size %d' % self._num_labels)
output = np.zeros(out_shape, dtype=np.float32)
output[label] = 1
return observations, query, (output, None)
def target_loss(self, truth, predicted, weights=None):
return classification_loss(
truth=truth, predicted=predicted, weights=weights, is_one_hot=True)
class LocationClassificationTask(UnrolledTask):
"""A task of classifying a location as one of several classes.
The task does not have an input, but just a query and an output. The query
is an observation of the current location, e.g. an image taken from the
current state. The output is a label classifying this location in one of
predefined set of locations (or landmarks).
The current implementation classifies locations as intersections based on the
number and directions of biforcations. It is expected that a location can have
at most 4 different directions, aligned with the axes. As each of these four
directions might be present or not, the number of possible intersections are
2^4 = 16.
"""
def __init__(self, env, seed, *args, **kwargs):
super(LocationClassificationTask, self).__init__(*args, **kwargs)
self._env = env
self._rng = np.random.RandomState(seed)
# A location property which can be set. If not set, a random one is
# generated.
self._location = None
if len(self.config.inputs.keys()) > 1:
raise NotImplementedError('current implementation supports input '
'with only one modality type or less.')
@property
def location(self):
return self._location
@location.setter
def location(self, location):
self._location = location
def episode(self):
# Get a location. If not set, sample on at a vertex with a random
# orientation
location = self._location
if location is None:
num_nodes = self._env.graph.number_of_nodes()
vertex = int(math.floor(self._rng.uniform(0, num_nodes)))
xy = self._env.vertex_to_pose(vertex)
theta = self._rng.uniform(0, 2 * math.pi)
location = np.concatenate(
[np.reshape(xy, [-1]), np.array([theta])], axis=0)
else:
vertex = self._env.pose_to_vertex(location)
theta = location[2]
neighbors = self._env.graph.neighbors(vertex)
xy_s = [self._env.vertex_to_pose(n) for n in neighbors]
def rotate(xy, theta):
"""Rotates a vector around the origin by angle theta.
Args:
xy: a numpy darray of shape (2, ) of floats containing the x and y
coordinates of a vector.
theta: a python float containing the rotation angle in radians.
Returns:
A numpy darray of floats of shape (2,) containing the x and y
coordinates rotated xy.
"""
rotated_x = np.cos(theta) * xy[0] - np.sin(theta) * xy[1]
rotated_y = np.sin(theta) * xy[0] + np.cos(theta) * xy[1]
return np.array([rotated_x, rotated_y])
# Rotate all intersection biforcation by the orientation of the agent as the
# intersection label is defined in an agent centered fashion.
xy_s = [
rotate(xy - location[0:2], -location[2] - math.pi / 4) for xy in xy_s
]
th_s = [np.arctan2(xy[1], xy[0]) for xy in xy_s]
out_shape = self._config.output.shape
if len(out_shape) != 1:
raise ValueError('Output shape should be of rank 1.')
num_labels = out_shape[0]
if num_labels != 16:
raise ValueError('Currently only 16 labels are supported '
'(there are 16 different 4 way intersection types).')
th_s = set([int(math.floor(4 * (th / (2 * np.pi) + 0.5))) for th in th_s])
one_hot_label = np.zeros((num_labels,), dtype=np.float32)
label = 0
for th in th_s:
label += pow(2, th)
one_hot_label[int(label)] = 1.0
query = self._env.observation(location).values()[0]
return [], query, (one_hot_label, None)
def reward(self, obs, done, info):
raise ValueError('Do not call.')
def target_loss(self, truth, predicted, weights=None):
return classification_loss(
truth=truth, predicted=predicted, weights=weights, is_one_hot=True)
class GotoStaticXNoExplorationTask(UnrolledTask):
"""An interface for findX tasks without exploration.
The agent is initialized a random location in a random world and a random goal
and the objective is for the agent to move toward the goal. This class
generates episode for such task. Each generates a sequence of observations x
and target outputs y. x is the observations and is an OrderedDict with keys
provided from config.inputs.keys() and the shapes provided in the
config.inputs. The output is a numpy arrays with the shape specified in the
config.output. The shape of the array is (sequence_length x action_size) where
action is the number of actions that can be done in the environment. Note that
config.output.shape should be set according to the number of actions that can
be done in the env.
target outputs y are the groundtruth value of each action that is computed
from the environment graph. The target output for each action is proportional
to the progress that each action makes. Target value of 1 means that the
action takes the agent one step closer, -1 means the action takes the agent
one step farther. Value of -2 means that action should not take place at all.
This can be because the action leads to collision or it wants to terminate the
episode prematurely.
"""
def __init__(self, env, *args, **kwargs):
super(GotoStaticXNoExplorationTask, self).__init__(*args, **kwargs)
if self._config.query is not None:
raise ValueError('query should be None.')
if len(self._config.output.shape) != 2:
raise ValueError('output should only have two dimensions:'
'(sequence_length x number_of_actions)')
for input_config in self._config.inputs.values():
if input_config.shape[0] != self._config.output.shape[0]:
raise ValueError('the first dimension of the input and output should'
'be the same.')
if len(self._config.output.shape) != 2:
raise ValueError('output shape should be '
'(sequence_length x number_of_actions)')
self._env = env
def _compute_shortest_path_length(self, vertex, target_vertices):
"""Computes length of the shortest path from vertex to any target vertexes.
Args:
vertex: integer, index of the vertex in the environment graph.
target_vertices: list of the target vertexes
Returns:
integer, minimum distance from the vertex to any of the target_vertices.
Raises:
ValueError: if there is no path between the vertex and at least one of
the target_vertices.
"""
try:
return np.min([
len(nx.shortest_path(self._env.graph, vertex, t))
for t in target_vertices
])
except:
#logging.error('there is no path between vertex %d and at least one of '
# 'the targets %r', vertex, target_vertices)
raise
def _compute_gt_value(self, vertex, target_vertices):
"""Computes groundtruth value of all the actions at the vertex.
The value of each action is the difference each action makes in the length
of the shortest path to the goal. If an action takes the agent one step
closer to the goal the value is 1. In case, it takes the agent one step away
from the goal it would be -1. If it leads to collision or if the agent uses
action stop before reaching to the goal it is -2. To avoid scale issues the
gt_values are multipled by 0.5.
Args:
vertex: integer, the index of current vertex.
target_vertices: list of the integer indexes of the target views.
Returns:
numpy array with shape (action_size,) and each element is the groundtruth
value of each action based on the progress each action makes.
"""
action_size = self._config.output.shape[1]
output_value = np.ones((action_size), dtype=np.float32) * -2
my_distance = self._compute_shortest_path_length(vertex, target_vertices)
for adj in self._env.graph[vertex]:
adj_distance = self._compute_shortest_path_length(adj, target_vertices)
if adj_distance is None:
continue
action_index = self._env.action(
self._env.vertex_to_pose(vertex), self._env.vertex_to_pose(adj))
assert action_index is not None, ('{} is not adjacent to {}. There might '
'be a problem in environment graph '
'connectivity because there is no '
'direct edge between the given '
'vertices').format(
self._env.vertex_to_pose(vertex),
self._env.vertex_to_pose(adj))
output_value[action_index] = my_distance - adj_distance
return output_value * 0.5
def episode(self):
"""Returns data needed to train and test a single episode.
Returns:
(inputs, None, output) where inputs is a dictionary of modality types to
numpy arrays. The second element is query but we assume that the goal
is also given as part of observation so it should be None for this task,
and the outputs is the tuple of ground truth action values with the
shape of (sequence_length x action_size) that is coming from
config.output.shape and a numpy array with the shape of
(sequence_length,) that is 1 if the corresponding element of the
input and output should be used in the training optimization.
Raises:
ValueError: If the output values for env.random_step_sequence is not
valid.
ValueError: If the shape of observations coming from the env is not
consistent with the config.
ValueError: If there is a modality type specified in the config but the
environment does not return that.
"""
# Sequence length is the first dimension of any of the input tensors.
sequence_length = self._config.inputs.values()[0].shape[0]
modality_types = self._config.inputs.keys()
path, _, _, step_outputs = self._env.random_step_sequence(
max_len=sequence_length)
target_vertices = [self._env.pose_to_vertex(x) for x in self._env.targets()]
if len(path) != len(step_outputs):
raise ValueError('path, and step_outputs should have equal length'
' {}!={}'.format(len(path), len(step_outputs)))
# Building up observations. observations will be a OrderedDict of
# modality types. The values are numpy arrays that follow the given shape
# in the input config for each modality type.
observations = collections.OrderedDict([k, []] for k in modality_types)
for step_output in step_outputs:
obs_dict = step_output[0]
# Only going over the modality types that are specified in the input
# config.
for modality_type in modality_types:
if modality_type not in obs_dict:
raise ValueError('modality type is not returned from the environment.'
'{} not in {}'.format(modality_type,
obs_dict.keys()))
obs = obs_dict[modality_type]
if np.any(
obs.shape != tuple(self._config.inputs[modality_type].shape[1:])):
raise ValueError(
'The observations should have the same size as speicifed in'
'config for modality type {}. {} != {}'.format(
modality_type, obs.shape,
self._config.inputs[modality_type].shape[1:]))
observations[modality_type].append(obs)
gt_value = [self._compute_gt_value(v, target_vertices) for v in path]
# pylint: disable=unbalanced-tuple-unpacking
gt_value, _, value_mask = _pad_or_clip_array(
np.array(gt_value),
sequence_length,
is_front_clip=False,
output_mask=True,
)
for modality_type, obs in observations.iteritems():
observations[modality_type], _, mask = _pad_or_clip_array(
np.array(obs), sequence_length, is_front_clip=False, output_mask=True)
assert np.all(mask == value_mask)
return observations, None, (gt_value, value_mask)
def reset(self, observation):
"""Called after the environment is reset."""
pass
def target_loss(self, true_targets, targets, weights=None):
"""A loss for training a task model.
This loss measures the discrepancy between the task outputs, the true and
predicted ones.
Args:
true_targets: tf.Tensor of tf.float32 with the shape of
(batch_size x sequence_length x action_size).
targets: tf.Tensor of tf.float32 with the shape of
(batch_size x sequence_length x action_size).
weights: tf.Tensor of tf.bool with the shape of
(batch_size x sequence_length).
Raises:
ValueError: if the shapes of the input tensors are not consistent.
Returns:
L2 loss between the predicted action values and true action values.
"""
targets_shape = targets.get_shape().as_list()
true_targets_shape = true_targets.get_shape().as_list()
if len(targets_shape) != 3 or len(true_targets_shape) != 3:
raise ValueError('invalid shape for targets or true_targets_shape')
if np.any(targets_shape != true_targets_shape):
raise ValueError('the shape of targets and true_targets are not the same'
'{} != {}'.format(targets_shape, true_targets_shape))
if weights is not None:
# Filtering targets and true_targets using weights.
weights_shape = weights.get_shape().as_list()
if np.any(weights_shape != targets_shape[0:2]):
raise ValueError('The first two elements of weights shape should match'
'target. {} != {}'.format(weights_shape,
targets_shape))
true_targets = tf.boolean_mask(true_targets, weights)
targets = tf.boolean_mask(targets, weights)
return tf.losses.mean_squared_error(tf.reshape(targets, [-1]),
tf.reshape(true_targets, [-1]))
def reward(self, obs, done, info):
raise NotImplementedError('reward is not implemented for this task')
################################################################################
class NewTask(UnrolledTask):
def __init__(self, env, *args, **kwargs):
super(NewTask, self).__init__(*args, **kwargs)
self._env = env
def _compute_shortest_path_length(self, vertex, target_vertices):
"""Computes length of the shortest path from vertex to any target vertexes.
Args:
vertex: integer, index of the vertex in the environment graph.
target_vertices: list of the target vertexes
Returns:
integer, minimum distance from the vertex to any of the target_vertices.
Raises:
ValueError: if there is no path between the vertex and at least one of
the target_vertices.
"""
try:
return np.min([
len(nx.shortest_path(self._env.graph, vertex, t))
for t in target_vertices
])
except:
logging.error('there is no path between vertex %d and at least one of '
'the targets %r', vertex, target_vertices)
raise
def _compute_gt_value(self, vertex, target_vertices):
"""Computes groundtruth value of all the actions at the vertex.
The value of each action is the difference each action makes in the length
of the shortest path to the goal. If an action takes the agent one step
closer to the goal the value is 1. In case, it takes the agent one step away
from the goal it would be -1. If it leads to collision or if the agent uses
action stop before reaching to the goal it is -2. To avoid scale issues the
gt_values are multipled by 0.5.
Args:
vertex: integer, the index of current vertex.
target_vertices: list of the integer indexes of the target views.
Returns:
numpy array with shape (action_size,) and each element is the groundtruth
value of each action based on the progress each action makes.
"""
action_size = self._config.output.shape[1]
output_value = np.ones((action_size), dtype=np.float32) * -2
# own compute _compute_shortest_path_length - returnts float
my_distance = self._compute_shortest_path_length(vertex, target_vertices)
for adj in self._env.graph[vertex]:
adj_distance = self._compute_shortest_path_length(adj, target_vertices)
if adj_distance is None:
continue
action_index = self._env.action(
self._env.vertex_to_pose(vertex), self._env.vertex_to_pose(adj))
assert action_index is not None, ('{} is not adjacent to {}. There might '
'be a problem in environment graph '
'connectivity because there is no '
'direct edge between the given '
'vertices').format(
self._env.vertex_to_pose(vertex),
self._env.vertex_to_pose(adj))
output_value[action_index] = my_distance - adj_distance
return output_value * 0.5
def episode(self):
"""Returns data needed to train and test a single episode.
Returns:
(inputs, None, output) where inputs is a dictionary of modality types to
numpy arrays. The second element is query but we assume that the goal
is also given as part of observation so it should be None for this task,
and the outputs is the tuple of ground truth action values with the
shape of (sequence_length x action_size) that is coming from
config.output.shape and a numpy array with the shape of
(sequence_length,) that is 1 if the corresponding element of the
input and output should be used in the training optimization.
Raises:
ValueError: If the output values for env.random_step_sequence is not
valid.
ValueError: If the shape of observations coming from the env is not
consistent with the config.
ValueError: If there is a modality type specified in the config but the
environment does not return that.
"""
# Sequence length is the first dimension of any of the input tensors.
sequence_length = self._config.inputs.values()[0].shape[0]
modality_types = self._config.inputs.keys()
path, _, _, step_outputs = self._env.random_step_sequence(
max_len=sequence_length)
target_vertices = [self._env.pose_to_vertex(x) for x in self._env.targets()]
if len(path) != len(step_outputs):
raise ValueError('path, and step_outputs should have equal length'
' {}!={}'.format(len(path), len(step_outputs)))
# Building up observations. observations will be a OrderedDict of
# modality types. The values are numpy arrays that follow the given shape
# in the input config for each modality type.
observations = collections.OrderedDict([k, []] for k in modality_types)
for step_output in step_outputs:
obs_dict = step_output[0]
# Only going over the modality types that are specified in the input
# config.
for modality_type in modality_types:
if modality_type not in obs_dict:
raise ValueError('modality type is not returned from the environment.'
'{} not in {}'.format(modality_type,
obs_dict.keys()))
obs = obs_dict[modality_type]
if np.any(
obs.shape != tuple(self._config.inputs[modality_type].shape[1:])):
raise ValueError(
'The observations should have the same size as speicifed in'
'config for modality type {}. {} != {}'.format(
modality_type, obs.shape,
self._config.inputs[modality_type].shape[1:]))
observations[modality_type].append(obs)
gt_value = [self._compute_gt_value(v, target_vertices) for v in path]
# pylint: disable=unbalanced-tuple-unpacking
gt_value, _, value_mask = _pad_or_clip_array(
np.array(gt_value),
sequence_length,
is_front_clip=False,
output_mask=True,
)
for modality_type, obs in observations.iteritems():
observations[modality_type], _, mask = _pad_or_clip_array(
np.array(obs), sequence_length, is_front_clip=False, output_mask=True)
assert np.all(mask == value_mask)
return observations, None, (gt_value, value_mask)
def reset(self, observation):
"""Called after the environment is reset."""
pass
def target_loss(self, true_targets, targets, weights=None):
"""A loss for training a task model.
This loss measures the discrepancy between the task outputs, the true and
predicted ones.
Args:
true_targets: tf.Tensor of tf.float32 with the shape of
(batch_size x sequence_length x action_size).
targets: tf.Tensor of tf.float32 with the shape of
(batch_size x sequence_length x action_size).
weights: tf.Tensor of tf.bool with the shape of
(batch_size x sequence_length).
Raises:
ValueError: if the shapes of the input tensors are not consistent.
Returns:
L2 loss between the predicted action values and true action values.
"""
targets_shape = targets.get_shape().as_list()
true_targets_shape = true_targets.get_shape().as_list()
if len(targets_shape) != 3 or len(true_targets_shape) != 3:
raise ValueError('invalid shape for targets or true_targets_shape')
if np.any(targets_shape != true_targets_shape):
raise ValueError('the shape of targets and true_targets are not the same'
'{} != {}'.format(targets_shape, true_targets_shape))
if weights is not None:
# Filtering targets and true_targets using weights.
weights_shape = weights.get_shape().as_list()
if np.any(weights_shape != targets_shape[0:2]):
raise ValueError('The first two elements of weights shape should match'
'target. {} != {}'.format(weights_shape,
targets_shape))
true_targets = tf.boolean_mask(true_targets, weights)
targets = tf.boolean_mask(targets, weights)
return tf.losses.mean_squared_error(tf.reshape(targets, [-1]),
tf.reshape(true_targets, [-1]))
def reward(self, obs, done, info):
raise NotImplementedError('reward is not implemented for this task')
| 59,475 | 38.440318 | 80 | py |
models | models-master/research/cognitive_planning/train_supervised_active_vision.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
# pyformat: disable
"""Train and eval for supervised navigation training.
For training:
python train_supervised_active_vision.py \
--mode='train' \
--logdir=$logdir/checkin_log_det/ \
--modality_types='det' \
--batch_size=8 \
--train_iters=200000 \
--lstm_cell_size=2048 \
--policy_fc_size=2048 \
--sequence_length=20 \
--max_eval_episode_length=100 \
--test_iters=194 \
--gin_config=envs/configs/active_vision_config.gin \
--gin_params='ActiveVisionDatasetEnv.dataset_root="$datadir"' \
--logtostderr
For testing:
python train_supervised_active_vision.py
--mode='eval' \
--logdir=$logdir/checkin_log_det/ \
--modality_types='det' \
--batch_size=8 \
--train_iters=200000 \
--lstm_cell_size=2048 \
--policy_fc_size=2048 \
--sequence_length=20 \
--max_eval_episode_length=100 \
--test_iters=194 \
--gin_config=envs/configs/active_vision_config.gin \
--gin_params='ActiveVisionDatasetEnv.dataset_root="$datadir"' \
--logtostderr
"""
import collections
import os
import time
from absl import app
from absl import flags
from absl import logging
import networkx as nx
import numpy as np
import tensorflow as tf
import gin
import embedders
import policies
import tasks
from envs import active_vision_dataset_env
from envs import task_env
slim = tf.contrib.slim
flags.DEFINE_string('logdir', '',
'Path to a directory to write summaries and checkpoints')
# Parameters controlling the training setup. In general one would not need to
# modify them.
flags.DEFINE_string('master', 'local',
'BNS name of the TensorFlow master, or local.')
flags.DEFINE_integer('task_id', 0,
'Task id of the replica running the training.')
flags.DEFINE_integer('ps_tasks', 0,
'Number of tasks in the ps job. If 0 no ps job is used.')
flags.DEFINE_integer('decay_steps', 1000,
'Number of steps for exponential decay.')
flags.DEFINE_float('learning_rate', 0.0001, 'Learning rate.')
flags.DEFINE_integer('batch_size', 8, 'Batch size.')
flags.DEFINE_integer('sequence_length', 20, 'sequence length')
flags.DEFINE_integer('train_iters', 200000, 'number of training iterations.')
flags.DEFINE_integer('save_summaries_secs', 300,
'number of seconds between saving summaries')
flags.DEFINE_integer('save_interval_secs', 300,
'numer of seconds between saving variables')
flags.DEFINE_integer('log_every_n_steps', 20, 'number of steps between logging')
flags.DEFINE_string('modality_types', '',
'modality names in _ separated format')
flags.DEFINE_string('conv_window_sizes', '8_4_3',
'conv window size in separated by _')
flags.DEFINE_string('conv_strides', '4_2_1', '')
flags.DEFINE_string('conv_channels', '8_16_16', '')
flags.DEFINE_integer('embedding_fc_size', 128,
'size of embedding for each modality')
flags.DEFINE_integer('obs_resolution', 64,
'resolution of the input observations')
flags.DEFINE_integer('lstm_cell_size', 2048, 'size of lstm cell size')
flags.DEFINE_integer('policy_fc_size', 2048,
'size of fully connected layers for policy part')
flags.DEFINE_float('weight_decay', 0.0002, 'weight decay')
flags.DEFINE_integer('goal_category_count', 5, 'number of goal categories')
flags.DEFINE_integer('action_size', 7, 'number of possible actions')
flags.DEFINE_integer('max_eval_episode_length', 100,
'maximum sequence length for evaluation.')
flags.DEFINE_enum('mode', 'train', ['train', 'eval'],
'indicates whether it is in training or evaluation')
flags.DEFINE_integer('test_iters', 194,
'number of iterations that the eval needs to be run')
flags.DEFINE_multi_string('gin_config', [],
'List of paths to a gin config files for the env.')
flags.DEFINE_multi_string('gin_params', [],
'Newline separated list of Gin parameter bindings.')
flags.DEFINE_string(
'resnet50_path', './resnet_v2_50_checkpoint/resnet_v2_50.ckpt', 'path to resnet50'
'checkpoint')
flags.DEFINE_bool('freeze_resnet_weights', True, '')
flags.DEFINE_string(
'eval_init_points_file_name', '',
'Name of the file that containts the initial locations and'
'worlds for each evalution point')
FLAGS = flags.FLAGS
TRAIN_WORLDS = [
'Home_001_1', 'Home_001_2', 'Home_002_1', 'Home_003_1', 'Home_003_2',
'Home_004_1', 'Home_004_2', 'Home_005_1', 'Home_005_2', 'Home_006_1',
'Home_010_1'
]
TEST_WORLDS = ['Home_011_1', 'Home_013_1', 'Home_016_1']
def create_modality_types():
"""Parses the modality_types and returns a list of task_env.ModalityType."""
if not FLAGS.modality_types:
raise ValueError('there needs to be at least one modality type')
modality_types = FLAGS.modality_types.split('_')
for x in modality_types:
if x not in ['image', 'sseg', 'det', 'depth']:
raise ValueError('invalid modality type: {}'.format(x))
conversion_dict = {
'image': task_env.ModalityTypes.IMAGE,
'sseg': task_env.ModalityTypes.SEMANTIC_SEGMENTATION,
'depth': task_env.ModalityTypes.DEPTH,
'det': task_env.ModalityTypes.OBJECT_DETECTION,
}
return [conversion_dict[k] for k in modality_types]
def create_task_io_config(
modality_types,
goal_category_count,
action_size,
sequence_length,
):
"""Generates task io config."""
shape_prefix = [sequence_length, FLAGS.obs_resolution, FLAGS.obs_resolution]
shapes = {
task_env.ModalityTypes.IMAGE: [sequence_length, 224, 224, 3],
task_env.ModalityTypes.DEPTH: shape_prefix + [
2,
],
task_env.ModalityTypes.SEMANTIC_SEGMENTATION: shape_prefix + [
1,
],
task_env.ModalityTypes.OBJECT_DETECTION: shape_prefix + [
90,
]
}
types = {k: tf.float32 for k in shapes}
types[task_env.ModalityTypes.IMAGE] = tf.uint8
inputs = collections.OrderedDict(
[[mtype, (types[mtype], shapes[mtype])] for mtype in modality_types])
inputs[task_env.ModalityTypes.GOAL] = (tf.float32,
[sequence_length, goal_category_count])
inputs[task_env.ModalityTypes.PREV_ACTION] = (tf.float32, [
sequence_length, action_size + 1
])
print inputs
return tasks.UnrolledTaskIOConfig(
inputs=inputs,
output=(tf.float32, [sequence_length, action_size]),
query=None)
def map_to_embedder(modality_type):
"""Maps modality_type to its corresponding embedder."""
if modality_type == task_env.ModalityTypes.PREV_ACTION:
return None
if modality_type == task_env.ModalityTypes.GOAL:
return embedders.IdentityEmbedder()
if modality_type == task_env.ModalityTypes.IMAGE:
return embedders.ResNet50Embedder()
conv_window_sizes = [int(x) for x in FLAGS.conv_window_sizes.split('_')]
conv_channels = [int(x) for x in FLAGS.conv_channels.split('_')]
conv_strides = [int(x) for x in FLAGS.conv_strides.split('_')]
params = tf.contrib.training.HParams(
to_one_hot=modality_type == task_env.ModalityTypes.SEMANTIC_SEGMENTATION,
one_hot_length=10,
conv_sizes=conv_window_sizes,
conv_strides=conv_strides,
conv_channels=conv_channels,
embedding_size=FLAGS.embedding_fc_size,
weight_decay_rate=FLAGS.weight_decay,
)
return embedders.SmallNetworkEmbedder(params)
def create_train_and_init_ops(policy, task):
"""Creates training ops given the arguments.
Args:
policy: the policy for the task.
task: the task instance.
Returns:
train_op: the op that needs to be runned at each step.
summaries_op: the summary op that is executed.
init_fn: the op that initializes the variables if there is no previous
checkpoint. If Resnet50 is not used in the model it is None, otherwise
it reads the weights from FLAGS.resnet50_path and sets the init_fn
to the op that initializes the ResNet50 with the pre-trained weights.
"""
assert isinstance(task, tasks.GotoStaticXNoExplorationTask)
assert isinstance(policy, policies.Policy)
inputs, _, gt_outputs, masks = task.tf_episode_batch(FLAGS.batch_size)
outputs, _ = policy.build(inputs, None)
loss = task.target_loss(gt_outputs, outputs, masks)
init_fn = None
# If resnet is added to the graph, init_fn should initialize resnet weights
# if there is no previous checkpoint.
variables_assign_dict = {}
vars_list = []
for v in slim.get_model_variables():
if v.name.find('resnet') >= 0:
if not FLAGS.freeze_resnet_weights:
vars_list.append(v)
variables_assign_dict[v.name[v.name.find('resnet'):-2]] = v
else:
vars_list.append(v)
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.train.exponential_decay(
FLAGS.learning_rate,
global_step,
decay_steps=FLAGS.decay_steps,
decay_rate=0.98,
staircase=True)
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = slim.learning.create_train_op(
loss,
optimizer,
global_step=global_step,
variables_to_train=vars_list,
)
if variables_assign_dict:
init_fn = slim.assign_from_checkpoint_fn(
FLAGS.resnet50_path,
variables_assign_dict,
ignore_missing_vars=False)
scalar_summaries = {}
scalar_summaries['LR'] = learning_rate
scalar_summaries['loss'] = loss
for name, summary in scalar_summaries.iteritems():
tf.summary.scalar(name, summary)
return train_op, init_fn
def create_eval_ops(policy, config, possible_targets):
"""Creates the necessary ops for evaluation."""
inputs_feed = collections.OrderedDict([[
mtype,
tf.placeholder(config.inputs[mtype].type,
[1] + config.inputs[mtype].shape)
] for mtype in config.inputs])
inputs_feed[task_env.ModalityTypes.PREV_ACTION] = tf.placeholder(
tf.float32, [1, 1] + [
config.output.shape[-1] + 1,
])
prev_state_feed = [
tf.placeholder(
tf.float32, [1, FLAGS.lstm_cell_size], name='prev_state_{}'.format(i))
for i in range(2)
]
policy_outputs = policy.build(inputs_feed, prev_state_feed)
summary_feed = {}
for c in possible_targets + ['mean']:
summary_feed[c] = tf.placeholder(
tf.float32, [], name='eval_in_range_{}_input'.format(c))
tf.summary.scalar('eval_in_range_{}'.format(c), summary_feed[c])
return inputs_feed, prev_state_feed, policy_outputs, (tf.summary.merge_all(),
summary_feed)
def unroll_policy_for_eval(
sess,
env,
inputs_feed,
prev_state_feed,
policy_outputs,
number_of_steps,
output_folder,
):
"""unrolls the policy for testing.
Args:
sess: tf.Session
env: The environment.
inputs_feed: dictionary of placeholder for the input modalities.
prev_state_feed: placeholder for the input to the prev_state of the model.
policy_outputs: tensor that contains outputs of the policy.
number_of_steps: maximum number of unrolling steps.
output_folder: output_folder where the function writes a dictionary of
detailed information about the path. The dictionary keys are 'states' and
'distance'. The value for 'states' is the list of states that the agent
goes along the path. The value for 'distance' contains the length of
shortest path to the goal at each step.
Returns:
states: list of states along the path.
distance: list of distances along the path.
"""
prev_state = [
np.zeros((1, FLAGS.lstm_cell_size), dtype=np.float32) for _ in range(2)
]
prev_action = np.zeros((1, 1, FLAGS.action_size + 1), dtype=np.float32)
obs = env.reset()
distances_to_goal = []
states = []
unique_id = '{}_{}'.format(env.cur_image_id(), env.goal_string)
for _ in range(number_of_steps):
distances_to_goal.append(
np.min([
len(
nx.shortest_path(env.graph, env.pose_to_vertex(env.state()),
env.pose_to_vertex(target_view)))
for target_view in env.targets()
]))
states.append(env.state())
feed_dict = {inputs_feed[mtype]: [[obs[mtype]]] for mtype in inputs_feed}
feed_dict[prev_state_feed[0]] = prev_state[0]
feed_dict[prev_state_feed[1]] = prev_state[1]
action_values, prev_state = sess.run(policy_outputs, feed_dict=feed_dict)
chosen_action = np.argmax(action_values[0])
obs, _, done, info = env.step(np.int32(chosen_action))
prev_action[0][0][chosen_action] = 1.
prev_action[0][0][-1] = float(info['success'])
# If the agent chooses action stop or the number of steps exceeeded
# env._episode_length.
if done:
break
# logging.info('distance = %d, id = %s, #steps = %d', distances_to_goal[-1],
output_path = os.path.join(output_folder, unique_id + '.npy')
with tf.gfile.Open(output_path, 'w') as f:
print 'saving path information to {}'.format(output_path)
np.save(f, {'states': states, 'distance': distances_to_goal})
return states, distances_to_goal
def init(sequence_length, eval_init_points_file_name, worlds):
"""Initializes the common operations between train and test."""
modality_types = create_modality_types()
logging.info('modality types: %r', modality_types)
# negative reward_goal_range prevents the env from terminating early when the
# agent is close to the goal. The policy should keep the agent until the end
# of the 100 steps either through chosing stop action or oscilating around
# the target.
env = active_vision_dataset_env.ActiveVisionDatasetEnv(
modality_types=modality_types +
[task_env.ModalityTypes.GOAL, task_env.ModalityTypes.PREV_ACTION],
reward_goal_range=-1,
eval_init_points_file_name=eval_init_points_file_name,
worlds=worlds,
output_size=FLAGS.obs_resolution,
)
config = create_task_io_config(
modality_types=modality_types,
goal_category_count=FLAGS.goal_category_count,
action_size=FLAGS.action_size,
sequence_length=sequence_length,
)
task = tasks.GotoStaticXNoExplorationTask(env=env, config=config)
embedders_dict = {mtype: map_to_embedder(mtype) for mtype in config.inputs}
policy_params = tf.contrib.training.HParams(
lstm_state_size=FLAGS.lstm_cell_size,
fc_channels=FLAGS.policy_fc_size,
weight_decay=FLAGS.weight_decay,
target_embedding_size=FLAGS.embedding_fc_size,
)
policy = policies.LSTMPolicy(
modality_names=config.inputs.keys(),
embedders_dict=embedders_dict,
action_size=FLAGS.action_size,
params=policy_params,
max_episode_length=sequence_length)
return env, config, task, policy
def test():
"""Contains all the operations for testing policies."""
env, config, _, policy = init(1, 'all_init_configs', TEST_WORLDS)
inputs_feed, prev_state_feed, policy_outputs, summary_op = create_eval_ops(
policy, config, env.possible_targets)
sv = tf.train.Supervisor(logdir=FLAGS.logdir)
prev_checkpoint = None
with sv.managed_session(
start_standard_services=False,
config=tf.ConfigProto(allow_soft_placement=True)) as sess:
while not sv.should_stop():
while True:
new_checkpoint = tf.train.latest_checkpoint(FLAGS.logdir)
print 'new_checkpoint ', new_checkpoint
if not new_checkpoint:
time.sleep(1)
continue
if prev_checkpoint is None:
prev_checkpoint = new_checkpoint
break
if prev_checkpoint != new_checkpoint:
prev_checkpoint = new_checkpoint
break
else: # if prev_checkpoint == new_checkpoint, we have to wait more.
time.sleep(1)
checkpoint_step = int(new_checkpoint[new_checkpoint.rfind('-') + 1:])
sv.saver.restore(sess, new_checkpoint)
print '--------------------'
print 'evaluating checkpoint {}'.format(new_checkpoint)
folder_path = os.path.join(FLAGS.logdir, 'evals', str(checkpoint_step))
if not tf.gfile.Exists(folder_path):
tf.gfile.MakeDirs(folder_path)
eval_stats = {c: [] for c in env.possible_targets}
for test_iter in range(FLAGS.test_iters):
print 'evaluating {} of {}'.format(test_iter, FLAGS.test_iters)
_, distance_to_goal = unroll_policy_for_eval(
sess,
env,
inputs_feed,
prev_state_feed,
policy_outputs,
FLAGS.max_eval_episode_length,
folder_path,
)
print 'goal = {}'.format(env.goal_string)
eval_stats[env.goal_string].append(float(distance_to_goal[-1] <= 7))
eval_stats = {k: np.mean(v) for k, v in eval_stats.iteritems()}
eval_stats['mean'] = np.mean(eval_stats.values())
print eval_stats
feed_dict = {summary_op[1][c]: eval_stats[c] for c in eval_stats}
summary_str = sess.run(summary_op[0], feed_dict=feed_dict)
writer = sv.summary_writer
writer.add_summary(summary_str, checkpoint_step)
writer.flush()
def train():
_, _, task, policy = init(FLAGS.sequence_length, None, TRAIN_WORLDS)
print(FLAGS.save_summaries_secs)
print(FLAGS.save_interval_secs)
print(FLAGS.logdir)
with tf.device(
tf.train.replica_device_setter(ps_tasks=FLAGS.ps_tasks, merge_devices=True)):
train_op, init_fn = create_train_and_init_ops(policy=policy, task=task)
print(FLAGS.logdir)
slim.learning.train(
train_op=train_op,
init_fn=init_fn,
logdir=FLAGS.logdir,
is_chief=FLAGS.task_id == 0,
number_of_steps=FLAGS.train_iters,
save_summaries_secs=FLAGS.save_summaries_secs,
save_interval_secs=FLAGS.save_interval_secs,
session_config=tf.ConfigProto(allow_soft_placement=True),
)
def main(_):
gin.parse_config_files_and_bindings(FLAGS.gin_config, FLAGS.gin_params)
if FLAGS.mode == 'train':
train()
else:
test()
if __name__ == '__main__':
app.run(main)
| 18,750 | 36.204365 | 86 | py |
models | models-master/research/cognitive_planning/embedders.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Interface for different embedders for modalities."""
import abc
import numpy as np
import tensorflow as tf
import preprocessing
from tensorflow.contrib.slim.nets import resnet_v2
slim = tf.contrib.slim
class Embedder(object):
"""Represents the embedder for different modalities.
Modalities can be semantic segmentation, depth channel, object detection and
so on, which require specific embedder for them.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def build(self, observation):
"""Builds the model to embed the observation modality.
Args:
observation: tensor that contains the raw observation from modality.
Returns:
Embedding tensor for the given observation tensor.
"""
raise NotImplementedError(
'Needs to be implemented as part of Embedder Interface')
class DetectionBoxEmbedder(Embedder):
"""Represents the model that encodes the detection boxes from images."""
def __init__(self, rnn_state_size, scope=None):
self._rnn_state_size = rnn_state_size
self._scope = scope
def build(self, observations):
"""Builds the model to embed object detection observations.
Args:
observations: a tuple of (dets, det_num).
dets is a tensor of BxTxLxE that has the detection boxes in all the
images of the batch. B is the batch size, T is the maximum length of
episode, L is the maximum number of detections per image in the batch
and E is the size of each detection embedding.
det_num is a tensor of BxT that contains the number of detected boxes
each image of each sequence in the batch.
Returns:
For each image in the batch, returns the accumulative embedding of all the
detection boxes in that image.
"""
with tf.variable_scope(self._scope, default_name=''):
shape = observations[0].shape
dets = tf.reshape(observations[0], [-1, shape[-2], shape[-1]])
det_num = tf.reshape(observations[1], [-1])
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(self._rnn_state_size)
batch_size = tf.shape(dets)[0]
lstm_outputs, _ = tf.nn.dynamic_rnn(
cell=lstm_cell,
inputs=dets,
sequence_length=det_num,
initial_state=lstm_cell.zero_state(batch_size, dtype=tf.float32),
dtype=tf.float32)
# Gathering the last state of each sequence in the batch.
batch_range = tf.range(batch_size)
indices = tf.stack([batch_range, det_num - 1], axis=1)
last_lstm_outputs = tf.gather_nd(lstm_outputs, indices)
last_lstm_outputs = tf.reshape(last_lstm_outputs,
[-1, shape[1], self._rnn_state_size])
return last_lstm_outputs
class ResNet(Embedder):
"""Residual net embedder for image data."""
def __init__(self, params, *args, **kwargs):
super(ResNet, self).__init__(*args, **kwargs)
self._params = params
self._extra_train_ops = []
def build(self, images):
shape = images.get_shape().as_list()
if len(shape) == 5:
images = tf.reshape(images,
[shape[0] * shape[1], shape[2], shape[3], shape[4]])
embedding = self._build_model(images)
if len(shape) == 5:
embedding = tf.reshape(embedding, [shape[0], shape[1], -1])
return embedding
@property
def extra_train_ops(self):
return self._extra_train_ops
def _build_model(self, images):
"""Builds the model."""
# Convert images to floats and normalize them.
images = tf.to_float(images)
bs = images.get_shape().as_list()[0]
images = [
tf.image.per_image_standardization(tf.squeeze(i))
for i in tf.split(images, bs)
]
images = tf.concat([tf.expand_dims(i, axis=0) for i in images], axis=0)
with tf.variable_scope('init'):
x = self._conv('init_conv', images, 3, 3, 16, self._stride_arr(1))
strides = [1, 2, 2]
activate_before_residual = [True, False, False]
if self._params.use_bottleneck:
res_func = self._bottleneck_residual
filters = [16, 64, 128, 256]
else:
res_func = self._residual
filters = [16, 16, 32, 128]
with tf.variable_scope('unit_1_0'):
x = res_func(x, filters[0], filters[1], self._stride_arr(strides[0]),
activate_before_residual[0])
for i in xrange(1, self._params.num_residual_units):
with tf.variable_scope('unit_1_%d' % i):
x = res_func(x, filters[1], filters[1], self._stride_arr(1), False)
with tf.variable_scope('unit_2_0'):
x = res_func(x, filters[1], filters[2], self._stride_arr(strides[1]),
activate_before_residual[1])
for i in xrange(1, self._params.num_residual_units):
with tf.variable_scope('unit_2_%d' % i):
x = res_func(x, filters[2], filters[2], self._stride_arr(1), False)
with tf.variable_scope('unit_3_0'):
x = res_func(x, filters[2], filters[3], self._stride_arr(strides[2]),
activate_before_residual[2])
for i in xrange(1, self._params.num_residual_units):
with tf.variable_scope('unit_3_%d' % i):
x = res_func(x, filters[3], filters[3], self._stride_arr(1), False)
with tf.variable_scope('unit_last'):
x = self._batch_norm('final_bn', x)
x = self._relu(x, self._params.relu_leakiness)
with tf.variable_scope('pool_logit'):
x = self._global_avg_pooling(x)
return x
def _stride_arr(self, stride):
return [1, stride, stride, 1]
def _batch_norm(self, name, x):
"""batch norm implementation."""
with tf.variable_scope(name):
params_shape = [x.shape[-1]]
beta = tf.get_variable(
'beta',
params_shape,
tf.float32,
initializer=tf.constant_initializer(0.0, tf.float32))
gamma = tf.get_variable(
'gamma',
params_shape,
tf.float32,
initializer=tf.constant_initializer(1.0, tf.float32))
if self._params.is_train:
mean, variance = tf.nn.moments(x, [0, 1, 2], name='moments')
moving_mean = tf.get_variable(
'moving_mean',
params_shape,
tf.float32,
initializer=tf.constant_initializer(0.0, tf.float32),
trainable=False)
moving_variance = tf.get_variable(
'moving_variance',
params_shape,
tf.float32,
initializer=tf.constant_initializer(1.0, tf.float32),
trainable=False)
self._extra_train_ops.append(
tf.assign_moving_average(moving_mean, mean, 0.9))
self._extra_train_ops.append(
tf.assign_moving_average(moving_variance, variance, 0.9))
else:
mean = tf.get_variable(
'moving_mean',
params_shape,
tf.float32,
initializer=tf.constant_initializer(0.0, tf.float32),
trainable=False)
variance = tf.get_variable(
'moving_variance',
params_shape,
tf.float32,
initializer=tf.constant_initializer(1.0, tf.float32),
trainable=False)
tf.summary.histogram(mean.op.name, mean)
tf.summary.histogram(variance.op.name, variance)
# elipson used to be 1e-5. Maybe 0.001 solves NaN problem in deeper net.
y = tf.nn.batch_normalization(x, mean, variance, beta, gamma, 0.001)
y.set_shape(x.shape)
return y
def _residual(self,
x,
in_filter,
out_filter,
stride,
activate_before_residual=False):
"""Residual unit with 2 sub layers."""
if activate_before_residual:
with tf.variable_scope('shared_activation'):
x = self._batch_norm('init_bn', x)
x = self._relu(x, self._params.relu_leakiness)
orig_x = x
else:
with tf.variable_scope('residual_only_activation'):
orig_x = x
x = self._batch_norm('init_bn', x)
x = self._relu(x, self._params.relu_leakiness)
with tf.variable_scope('sub1'):
x = self._conv('conv1', x, 3, in_filter, out_filter, stride)
with tf.variable_scope('sub2'):
x = self._batch_norm('bn2', x)
x = self._relu(x, self._params.relu_leakiness)
x = self._conv('conv2', x, 3, out_filter, out_filter, [1, 1, 1, 1])
with tf.variable_scope('sub_add'):
if in_filter != out_filter:
orig_x = tf.nn.avg_pool(orig_x, stride, stride, 'VALID')
orig_x = tf.pad(
orig_x, [[0, 0], [0, 0], [0, 0], [(out_filter - in_filter) // 2,
(out_filter - in_filter) // 2]])
x += orig_x
return x
def _bottleneck_residual(self,
x,
in_filter,
out_filter,
stride,
activate_before_residual=False):
"""A residual convolutional layer with a bottleneck.
The layer is a composite of three convolutional layers with a ReLU non-
linearity and batch normalization after each linear convolution. The depth
if the second and third layer is out_filter / 4 (hence it is a bottleneck).
Args:
x: a float 4 rank Tensor representing the input to the layer.
in_filter: a python integer representing depth of the input.
out_filter: a python integer representing depth of the output.
stride: a python integer denoting the stride of the layer applied before
the first convolution.
activate_before_residual: a python boolean. If True, then a ReLU is
applied as a first operation on the input x before everything else.
Returns:
A 4 rank Tensor with batch_size = batch size of input, width and height =
width / stride and height / stride of the input and depth = out_filter.
"""
if activate_before_residual:
with tf.variable_scope('common_bn_relu'):
x = self._batch_norm('init_bn', x)
x = self._relu(x, self._params.relu_leakiness)
orig_x = x
else:
with tf.variable_scope('residual_bn_relu'):
orig_x = x
x = self._batch_norm('init_bn', x)
x = self._relu(x, self._params.relu_leakiness)
with tf.variable_scope('sub1'):
x = self._conv('conv1', x, 1, in_filter, out_filter / 4, stride)
with tf.variable_scope('sub2'):
x = self._batch_norm('bn2', x)
x = self._relu(x, self._params.relu_leakiness)
x = self._conv('conv2', x, 3, out_filter / 4, out_filter / 4,
[1, 1, 1, 1])
with tf.variable_scope('sub3'):
x = self._batch_norm('bn3', x)
x = self._relu(x, self._params.relu_leakiness)
x = self._conv('conv3', x, 1, out_filter / 4, out_filter, [1, 1, 1, 1])
with tf.variable_scope('sub_add'):
if in_filter != out_filter:
orig_x = self._conv('project', orig_x, 1, in_filter, out_filter, stride)
x += orig_x
return x
def _decay(self):
costs = []
for var in tf.trainable_variables():
if var.op.name.find(r'DW') > 0:
costs.append(tf.nn.l2_loss(var))
return tf.mul(self._params.weight_decay_rate, tf.add_n(costs))
def _conv(self, name, x, filter_size, in_filters, out_filters, strides):
"""Convolution."""
with tf.variable_scope(name):
n = filter_size * filter_size * out_filters
kernel = tf.get_variable(
'DW', [filter_size, filter_size, in_filters, out_filters],
tf.float32,
initializer=tf.random_normal_initializer(stddev=np.sqrt(2.0 / n)))
return tf.nn.conv2d(x, kernel, strides, padding='SAME')
def _relu(self, x, leakiness=0.0):
return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu')
def _fully_connected(self, x, out_dim):
x = tf.reshape(x, [self._params.batch_size, -1])
w = tf.get_variable(
'DW', [x.get_shape()[1], out_dim],
initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
b = tf.get_variable(
'biases', [out_dim], initializer=tf.constant_initializer())
return tf.nn.xw_plus_b(x, w, b)
def _global_avg_pooling(self, x):
assert x.get_shape().ndims == 4
return tf.reduce_mean(x, [1, 2])
class MLPEmbedder(Embedder):
"""Embedder of vectorial data.
The net is a multi-layer perceptron, with ReLU nonlinearities in all layers
except the last one.
"""
def __init__(self, layers, *args, **kwargs):
"""Constructs MLPEmbedder.
Args:
layers: a list of python integers representing layer sizes.
*args: arguments for super constructor.
**kwargs: keyed arguments for super constructor.
"""
super(MLPEmbedder, self).__init__(*args, **kwargs)
self._layers = layers
def build(self, features):
shape = features.get_shape().as_list()
if len(shape) == 3:
features = tf.reshape(features, [shape[0] * shape[1], shape[2]])
x = features
for i, dim in enumerate(self._layers):
with tf.variable_scope('layer_%i' % i):
x = self._fully_connected(x, dim)
if i < len(self._layers) - 1:
x = self._relu(x)
if len(shape) == 3:
x = tf.reshape(x, shape[:-1] + [self._layers[-1]])
return x
def _fully_connected(self, x, out_dim):
w = tf.get_variable(
'DW', [x.get_shape()[1], out_dim],
initializer=tf.variance_scaling_initializer(distribution='uniform'))
b = tf.get_variable(
'biases', [out_dim], initializer=tf.constant_initializer())
return tf.nn.xw_plus_b(x, w, b)
def _relu(self, x, leakiness=0.0):
return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu')
class SmallNetworkEmbedder(Embedder):
"""Embedder for image like observations.
The network is comprised of multiple conv layers and a fully connected layer
at the end. The number of conv layers and the parameters are configured from
params.
"""
def __init__(self, params, *args, **kwargs):
"""Constructs the small network.
Args:
params: params should be tf.hparams type. params need to have a list of
conv_sizes, conv_strides, conv_channels. The length of these lists
should be equal to each other and to the number of conv layers in the
network. Plus, it also needs to have boolean variable named to_one_hot
which indicates whether the input should be converted to one hot or not.
The size of the fully connected layer is specified by
params.embedding_size.
*args: The rest of the parameters.
**kwargs: the reset of the parameters.
Raises:
ValueError: If the length of params.conv_strides, params.conv_sizes, and
params.conv_channels are not equal.
"""
super(SmallNetworkEmbedder, self).__init__(*args, **kwargs)
self._params = params
if len(self._params.conv_sizes) != len(self._params.conv_strides):
raise ValueError(
'Conv sizes and strides should have the same length: {} != {}'.format(
len(self._params.conv_sizes), len(self._params.conv_strides)))
if len(self._params.conv_sizes) != len(self._params.conv_channels):
raise ValueError(
'Conv sizes and channels should have the same length: {} != {}'.
format(len(self._params.conv_sizes), len(self._params.conv_channels)))
def build(self, images):
"""Builds the embedder with the given speicifcation.
Args:
images: a tensor that contains the input images which has the shape of
NxTxHxWxC where N is the batch size, T is the maximum length of the
sequence, H and W are the height and width of the images and C is the
number of channels.
Returns:
A tensor that is the embedding of the images.
"""
shape = images.get_shape().as_list()
images = tf.reshape(images,
[shape[0] * shape[1], shape[2], shape[3], shape[4]])
with slim.arg_scope(
[slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(self._params.weight_decay_rate),
biases_initializer=tf.zeros_initializer()):
with slim.arg_scope([slim.conv2d], padding='SAME'):
# convert the image to one hot if needed.
if self._params.to_one_hot:
net = tf.one_hot(
tf.squeeze(tf.to_int32(images), axis=[-1]),
self._params.one_hot_length)
else:
net = images
p = self._params
# Adding conv layers with the specified configurations.
for conv_id, kernel_stride_channel in enumerate(
zip(p.conv_sizes, p.conv_strides, p.conv_channels)):
kernel_size, stride, channels = kernel_stride_channel
net = slim.conv2d(
net,
channels, [kernel_size, kernel_size],
stride,
scope='conv_{}'.format(conv_id + 1))
net = slim.flatten(net)
net = slim.fully_connected(net, self._params.embedding_size, scope='fc')
output = tf.reshape(net, [shape[0], shape[1], -1])
return output
class ResNet50Embedder(Embedder):
"""Uses ResNet50 to embed input images."""
def build(self, images):
"""Builds a ResNet50 embedder for the input images.
It assumes that the range of the pixel values in the images tensor is
[0,255] and should be castable to tf.uint8.
Args:
images: a tensor that contains the input images which has the shape of
NxTxHxWx3 where N is the batch size, T is the maximum length of the
sequence, H and W are the height and width of the images and C is the
number of channels.
Returns:
The embedding of the input image with the shape of NxTxL where L is the
embedding size of the output.
Raises:
ValueError: if the shape of the input does not agree with the expected
shape explained in the Args section.
"""
shape = images.get_shape().as_list()
if len(shape) != 5:
raise ValueError(
'The tensor shape should have 5 elements, {} is provided'.format(
len(shape)))
if shape[4] != 3:
raise ValueError('Three channels are expected for the input image')
images = tf.cast(images, tf.uint8)
images = tf.reshape(images,
[shape[0] * shape[1], shape[2], shape[3], shape[4]])
with slim.arg_scope(resnet_v2.resnet_arg_scope()):
def preprocess_fn(x):
x = tf.expand_dims(x, 0)
x = tf.image.resize_bilinear(x, [299, 299],
align_corners=False)
return(tf.squeeze(x, [0]))
images = tf.map_fn(preprocess_fn, images, dtype=tf.float32)
net, _ = resnet_v2.resnet_v2_50(
images, is_training=False, global_pool=True)
output = tf.reshape(net, [shape[0], shape[1], -1])
return output
class IdentityEmbedder(Embedder):
"""This embedder just returns the input as the output.
Used for modalitites that the embedding of the modality is the same as the
modality itself. For example, it can be used for one_hot goal.
"""
def build(self, images):
return images
| 19,820 | 35.169708 | 80 | py |
models | models-master/research/cognitive_planning/envs/task_env.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An interface representing the topology of an environment.
Allows for high level planning and high level instruction generation for
navigation tasks.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import enum
import gym
import gin
@gin.config.constants_from_enum
class ModalityTypes(enum.Enum):
"""Types of the modalities that can be used."""
IMAGE = 0
SEMANTIC_SEGMENTATION = 1
OBJECT_DETECTION = 2
DEPTH = 3
GOAL = 4
PREV_ACTION = 5
PREV_SUCCESS = 6
STATE = 7
DISTANCE = 8
CAN_STEP = 9
def __lt__(self, other):
if self.__class__ is other.__class__:
return self.value < other.value
return NotImplemented
class TaskEnvInterface(object):
"""Interface for an environment topology.
An environment can implement this interface if there is a topological graph
underlying this environment. All paths below are defined as paths in this
graph. Using path_to_actions function one can translate a topological path
to a geometric path in the environment.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def random_step_sequence(self, min_len=None, max_len=None):
"""Generates a random sequence of actions and executes them.
Args:
min_len: integer, minimum length of a step sequence.
max_len: integer, if it is set to non-None, the method returns only
the first n steps of a random sequence. If the environment is
computationally heavy this argument should be set to speed up the
training and avoid unnecessary computations by the environment.
Returns:
A path, defined as a list of vertex indices, a list of actions, a list of
states, and a list of step() return tuples.
"""
raise NotImplementedError(
'Needs implementation as part of EnvTopology interface.')
@abc.abstractmethod
def targets(self):
"""A list of targets in the environment.
Returns:
A list of target locations.
"""
raise NotImplementedError(
'Needs implementation as part of EnvTopology interface.')
@abc.abstractproperty
def state(self):
"""Returns the position for the current location of agent."""
raise NotImplementedError(
'Needs implementation as part of EnvTopology interface.')
@abc.abstractproperty
def graph(self):
"""Returns a graph representing the environment topology.
Returns:
nx.Graph object.
"""
raise NotImplementedError(
'Needs implementation as part of EnvTopology interface.')
@abc.abstractmethod
def vertex_to_pose(self, vertex_index):
"""Maps a vertex index to a pose in the environment.
Pose of the camera can be represented by (x,y,theta) or (x,y,z,theta).
Args:
vertex_index: index of a vertex in the topology graph.
Returns:
A np.array of floats of size 3 or 4 representing the pose of the vertex.
"""
raise NotImplementedError(
'Needs implementation as part of EnvTopology interface.')
@abc.abstractmethod
def pose_to_vertex(self, pose):
"""Maps a coordinate in the maze to the closest vertex in topology graph.
Args:
pose: np.array of floats containing a the pose of the view.
Returns:
index of a vertex.
"""
raise NotImplementedError(
'Needs implementation as part of EnvTopology interface.')
@abc.abstractmethod
def observation(self, state):
"""Returns observation at location xy and orientation theta.
Args:
state: a np.array of floats containing coordinates of a location and
orientation.
Returns:
Dictionary of observations in the case of multiple observations.
The keys are the modality names and the values are the np.array of float
of observations for corresponding modality.
"""
raise NotImplementedError(
'Needs implementation as part of EnvTopology interface.')
def action(self, init_state, final_state):
"""Computes the transition action from state1 to state2.
If the environment is discrete and the views are not adjacent in the
environment. i.e. it is not possible to move from the first view to the
second view with one action it should return None. In the continuous case,
it will be the continuous difference of first view and second view.
Args:
init_state: numpy array, the initial view of the agent.
final_state: numpy array, the final view of the agent.
"""
raise NotImplementedError(
'Needs implementation as part of EnvTopology interface.')
@gin.configurable
class TaskEnv(gym.Env, TaskEnvInterface):
"""An environment which uses a Task to compute reward.
The environment implements a a gym interface, as well as EnvTopology. The
former makes sure it can be used within an RL training, while the latter
makes sure it can be used by a Task.
This environment requires _step_no_reward to be implemented, which steps
through it but does not return reward. Instead, the reward calculation is
delegated to the Task object, which in return can access needed properties
of the environment. These properties are exposed via the EnvTopology
interface.
"""
def __init__(self, task=None):
self._task = task
def set_task(self, task):
self._task = task
@abc.abstractmethod
def _step_no_reward(self, action):
"""Same as _step without returning reward.
Args:
action: see _step.
Returns:
state, done, info as defined in _step.
"""
raise NotImplementedError('Implement step.')
@abc.abstractmethod
def _reset_env(self):
"""Resets the environment. Returns initial observation."""
raise NotImplementedError('Implement _reset. Must call super!')
def step(self, action):
obs, done, info = self._step_no_reward(action)
reward = 0.0
if self._task is not None:
obs, reward, done, info = self._task.reward(obs, done, info)
return obs, reward, done, info
def reset(self):
"""Resets the environment. Gym API."""
obs = self._reset_env()
if self._task is not None:
self._task.reset(obs)
return obs
| 6,846 | 30.26484 | 80 | py |
models | models-master/research/cognitive_planning/envs/active_vision_dataset_env.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gym environment for the ActiveVision Dataset.
The dataset is captured with a robot moving around and taking picture in
multiple directions. The actions are moving in four directions, and rotate
clockwise or counter clockwise. The observations are the output of vision
pipelines such as object detectors. The goal is to find objects of interest
in each environment. For more details, refer:
http://cs.unc.edu/~ammirato/active_vision_dataset_website/.
"""
import tensorflow as tf
import collections
import copy
import json
import os
from StringIO import StringIO
import time
import gym
from gym.envs.registration import register
import gym.spaces
import networkx as nx
import numpy as np
import scipy.io as sio
from absl import logging
import gin
import cv2
import label_map_util
import visualization_utils as vis_util
from envs import task_env
register(
id='active-vision-env-v0',
entry_point=
'cognitive_planning.envs.active_vision_dataset_env:ActiveVisionDatasetEnv', # pylint: disable=line-too-long
)
_MAX_DEPTH_VALUE = 12102
SUPPORTED_ACTIONS = [
'right', 'rotate_cw', 'rotate_ccw', 'forward', 'left', 'backward', 'stop'
]
SUPPORTED_MODALITIES = [
task_env.ModalityTypes.SEMANTIC_SEGMENTATION,
task_env.ModalityTypes.DEPTH,
task_env.ModalityTypes.OBJECT_DETECTION,
task_env.ModalityTypes.IMAGE,
task_env.ModalityTypes.GOAL,
task_env.ModalityTypes.PREV_ACTION,
task_env.ModalityTypes.DISTANCE,
]
# Data structure for storing the information related to the graph of the world.
_Graph = collections.namedtuple('_Graph', [
'graph', 'id_to_index', 'index_to_id', 'target_indexes', 'distance_to_goal'
])
def _init_category_index(label_map_path):
"""Creates category index from class indexes to name of the classes.
Args:
label_map_path: path to the mapping.
Returns:
A map for mapping int keys to string categories.
"""
label_map = label_map_util.load_labelmap(label_map_path)
num_classes = np.max(x.id for x in label_map.item)
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes=num_classes, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
return category_index
def _draw_detections(image_np, detections, category_index):
"""Draws detections on to the image.
Args:
image_np: Image in the form of uint8 numpy array.
detections: a dictionary that contains the detection outputs.
category_index: contains the mapping between indexes and the category names.
Returns:
Does not return anything but draws the boxes on the
"""
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
detections['detection_boxes'],
detections['detection_classes'],
detections['detection_scores'],
category_index,
use_normalized_coordinates=True,
max_boxes_to_draw=1000,
min_score_thresh=.0,
agnostic_mode=False)
def generate_detection_image(detections,
image_size,
category_map,
num_classes,
is_binary=True):
"""Generates one_hot vector of the image using the detection boxes.
Args:
detections: 2D object detections from the image. It's a dictionary that
contains detection_boxes, detection_classes, and detection_scores with
dimensions of nx4, nx1, nx1 where n is the number of detections.
image_size: The resolution of the output image.
category_map: dictionary that maps label names to index.
num_classes: Number of classes.
is_binary: If true, it sets the corresponding channels to 0 and 1.
Otherwise, sets the score in the corresponding channel.
Returns:
Returns image_size x image_size x num_classes image for the detection boxes.
"""
res = np.zeros((image_size, image_size, num_classes), dtype=np.float32)
boxes = detections['detection_boxes']
labels = detections['detection_classes']
scores = detections['detection_scores']
for box, label, score in zip(boxes, labels, scores):
transformed_boxes = [int(round(t)) for t in box * image_size]
y1, x1, y2, x2 = transformed_boxes
# Detector returns fixed number of detections. Boxes with area of zero
# are equivalent of boxes that don't correspond to any detection box.
# So, we need to skip the boxes with area 0.
if (y2 - y1) * (x2 - x1) == 0:
continue
assert category_map[label] < num_classes, 'label = {}'.format(label)
value = score
if is_binary:
value = 1
res[y1:y2, x1:x2, category_map[label]] = value
return res
def _get_detection_path(root, detection_folder_name, world):
return os.path.join(root, 'Meta', detection_folder_name, world + '.npy')
def _get_image_folder(root, world):
return os.path.join(root, world, 'jpg_rgb')
def _get_json_path(root, world):
return os.path.join(root, world, 'annotations.json')
def _get_image_path(root, world, image_id):
return os.path.join(_get_image_folder(root, world), image_id + '.jpg')
def _get_image_list(path, worlds):
"""Builds a dictionary for all the worlds.
Args:
path: the path to the dataset on cns.
worlds: list of the worlds.
Returns:
dictionary where the key is the world names and the values
are the image_ids of that world.
"""
world_id_dict = {}
for loc in worlds:
files = [t[:-4] for t in tf.gfile.ListDir(_get_image_folder(path, loc))]
world_id_dict[loc] = files
return world_id_dict
def read_all_poses(dataset_root, world):
"""Reads all the poses for each world.
Args:
dataset_root: the path to the root of the dataset.
world: string, name of the world.
Returns:
Dictionary of poses for all the images in each world. The key is the image
id of each view and the values are tuple of (x, z, R, scale). Where x and z
are the first and third coordinate of translation. R is the 3x3 rotation
matrix and scale is a float scalar that indicates the scale that needs to
be multipled to x and z in order to get the real world coordinates.
Raises:
ValueError: if the number of images do not match the number of poses read.
"""
path = os.path.join(dataset_root, world, 'image_structs.mat')
with tf.gfile.Open(path) as f:
data = sio.loadmat(f)
xyz = data['image_structs']['world_pos']
image_names = data['image_structs']['image_name'][0]
rot = data['image_structs']['R'][0]
scale = data['scale'][0][0]
n = xyz.shape[1]
x = [xyz[0][i][0][0] for i in range(n)]
z = [xyz[0][i][2][0] for i in range(n)]
names = [name[0][:-4] for name in image_names]
if len(names) != len(x):
raise ValueError('number of image names are not equal to the number of '
'poses {} != {}'.format(len(names), len(x)))
output = {}
for i in range(n):
if rot[i].shape[0] != 0:
assert rot[i].shape[0] == 3
assert rot[i].shape[1] == 3
output[names[i]] = (x[i], z[i], rot[i], scale)
else:
output[names[i]] = (x[i], z[i], None, scale)
return output
def read_cached_data(should_load_images, dataset_root, segmentation_file_name,
targets_file_name, output_size):
"""Reads all the necessary cached data.
Args:
should_load_images: whether to load the images or not.
dataset_root: path to the root of the dataset.
segmentation_file_name: The name of the file that contains semantic
segmentation annotations.
targets_file_name: The name of the file the contains targets annotated for
each world.
output_size: Size of the output images. This is used for pre-processing the
loaded images.
Returns:
Dictionary of all the cached data.
"""
load_start = time.time()
result_data = {}
annotated_target_path = os.path.join(dataset_root, 'Meta',
targets_file_name + '.npy')
logging.info('loading targets: %s', annotated_target_path)
with tf.gfile.Open(annotated_target_path) as f:
result_data['targets'] = np.load(f).item()
depth_image_path = os.path.join(dataset_root, 'Meta/depth_imgs.npy')
logging.info('loading depth: %s', depth_image_path)
with tf.gfile.Open(depth_image_path) as f:
depth_data = np.load(f).item()
logging.info('processing depth')
for home_id in depth_data:
images = depth_data[home_id]
for image_id in images:
depth = images[image_id]
depth = cv2.resize(
depth / _MAX_DEPTH_VALUE, (output_size, output_size),
interpolation=cv2.INTER_NEAREST)
depth_mask = (depth > 0).astype(np.float32)
depth = np.dstack((depth, depth_mask))
images[image_id] = depth
result_data[task_env.ModalityTypes.DEPTH] = depth_data
sseg_path = os.path.join(dataset_root, 'Meta',
segmentation_file_name + '.npy')
logging.info('loading sseg: %s', sseg_path)
with tf.gfile.Open(sseg_path) as f:
sseg_data = np.load(f).item()
logging.info('processing sseg')
for home_id in sseg_data:
images = sseg_data[home_id]
for image_id in images:
sseg = images[image_id]
sseg = cv2.resize(
sseg, (output_size, output_size), interpolation=cv2.INTER_NEAREST)
images[image_id] = np.expand_dims(sseg, axis=-1).astype(np.float32)
result_data[task_env.ModalityTypes.SEMANTIC_SEGMENTATION] = sseg_data
if should_load_images:
image_path = os.path.join(dataset_root, 'Meta/imgs.npy')
logging.info('loading imgs: %s', image_path)
with tf.gfile.Open(image_path) as f:
image_data = np.load(f).item()
result_data[task_env.ModalityTypes.IMAGE] = image_data
with tf.gfile.Open(os.path.join(dataset_root, 'Meta/world_id_dict.npy')) as f:
result_data['world_id_dict'] = np.load(f).item()
logging.info('logging done in %f seconds', time.time() - load_start)
return result_data
@gin.configurable
def get_spec_dtype_map():
return {gym.spaces.Box: np.float32}
@gin.configurable
class ActiveVisionDatasetEnv(task_env.TaskEnv):
"""Simulates the environment from ActiveVisionDataset."""
cached_data = None
def __init__(
self,
episode_length,
modality_types,
confidence_threshold,
output_size,
worlds,
targets,
compute_distance,
should_draw_detections,
dataset_root,
labelmap_path,
reward_collision,
reward_goal_range,
num_detection_classes,
segmentation_file_name,
detection_folder_name,
actions,
targets_file_name,
eval_init_points_file_name=None,
shaped_reward=False,
):
"""Instantiates the environment for ActiveVision Dataset.
Args:
episode_length: the length of each episode.
modality_types: a list of the strings where each entry indicates the name
of the modalities to be loaded. Valid entries are "sseg", "det",
"depth", "image", "distance", and "prev_action". "distance" should be
used for computing metrics in tf agents.
confidence_threshold: Consider detections more than confidence_threshold
for potential targets.
output_size: Resolution of the output image.
worlds: List of the name of the worlds.
targets: List of the target names. Each entry is a string label of the
target category (e.g. 'fridge', 'microwave', so on).
compute_distance: If True, outputs the distance of the view to the goal.
should_draw_detections (bool): If True, the image returned for the
observation will contains the bounding boxes.
dataset_root: the path to the root folder of the dataset.
labelmap_path: path to the dictionary that converts label strings to
indexes.
reward_collision: the reward the agents get after hitting an obstacle.
It should be a non-positive number.
reward_goal_range: the number of steps from goal, such that the agent is
considered to have reached the goal. If the agent's distance is less
than the specified goal range, the episode is also finishes by setting
done = True.
num_detection_classes: number of classes that detector outputs.
segmentation_file_name: the name of the file that contains the semantic
information. The file should be in the dataset_root/Meta/ folder.
detection_folder_name: Name of the folder that contains the detections
for each world. The folder should be under dataset_root/Meta/ folder.
actions: The list of the action names. Valid entries are listed in
SUPPORTED_ACTIONS.
targets_file_name: the name of the file that contains the annotated
targets. The file should be in the dataset_root/Meta/Folder
eval_init_points_file_name: The name of the file that contains the initial
points for evaluating the performance of the agent. If set to None,
episodes start at random locations. Should be only set for evaluation.
shaped_reward: Whether to add delta goal distance to the reward each step.
Raises:
ValueError: If one of the targets are not available in the annotated
targets or the modality names are not from the domain specified above.
ValueError: If one of the actions is not in SUPPORTED_ACTIONS.
ValueError: If the reward_collision is a positive number.
ValueError: If there is no action other than stop provided.
"""
if reward_collision > 0:
raise ValueError('"reward" for collision should be non positive')
if reward_goal_range < 0:
logging.warning('environment does not terminate the episode if the agent '
'is too close to the environment')
if not modality_types:
raise ValueError('modality names can not be empty')
for name in modality_types:
if name not in SUPPORTED_MODALITIES:
raise ValueError('invalid modality type: {}'.format(name))
actions_other_than_stop_found = False
for a in actions:
if a != 'stop':
actions_other_than_stop_found = True
if a not in SUPPORTED_ACTIONS:
raise ValueError('invalid action %s', a)
if not actions_other_than_stop_found:
raise ValueError('environment needs to have actions other than stop.')
super(ActiveVisionDatasetEnv, self).__init__()
self._episode_length = episode_length
self._modality_types = set(modality_types)
self._confidence_threshold = confidence_threshold
self._output_size = output_size
self._dataset_root = dataset_root
self._worlds = worlds
self._targets = targets
self._all_graph = {}
for world in self._worlds:
with tf.gfile.Open(_get_json_path(self._dataset_root, world), 'r') as f:
file_content = f.read()
file_content = file_content.replace('.jpg', '')
io = StringIO(file_content)
self._all_graph[world] = json.load(io)
self._cur_world = ''
self._cur_image_id = ''
self._cur_graph = None # Loaded by _update_graph
self._steps_taken = 0
self._last_action_success = True
self._category_index = _init_category_index(labelmap_path)
self._category_map = dict(
[(c, i) for i, c in enumerate(self._category_index)])
self._detection_cache = {}
if not ActiveVisionDatasetEnv.cached_data:
ActiveVisionDatasetEnv.cached_data = read_cached_data(
True, self._dataset_root, segmentation_file_name, targets_file_name,
self._output_size)
cached_data = ActiveVisionDatasetEnv.cached_data
self._world_id_dict = cached_data['world_id_dict']
self._depth_images = cached_data[task_env.ModalityTypes.DEPTH]
self._semantic_segmentations = cached_data[
task_env.ModalityTypes.SEMANTIC_SEGMENTATION]
self._annotated_targets = cached_data['targets']
self._cached_imgs = cached_data[task_env.ModalityTypes.IMAGE]
self._graph_cache = {}
self._compute_distance = compute_distance
self._should_draw_detections = should_draw_detections
self._reward_collision = reward_collision
self._reward_goal_range = reward_goal_range
self._num_detection_classes = num_detection_classes
self._actions = actions
self._detection_folder_name = detection_folder_name
self._shaped_reward = shaped_reward
self._eval_init_points = None
if eval_init_points_file_name is not None:
self._eval_init_index = 0
init_points_path = os.path.join(self._dataset_root, 'Meta',
eval_init_points_file_name + '.npy')
with tf.gfile.Open(init_points_path) as points_file:
data = np.load(points_file).item()
self._eval_init_points = []
for world in self._worlds:
for goal in self._targets:
if world in self._annotated_targets[goal]:
for image_id in data[world]:
self._eval_init_points.append((world, image_id[0], goal))
logging.info('loaded %d eval init points', len(self._eval_init_points))
self.action_space = gym.spaces.Discrete(len(self._actions))
obs_shapes = {}
if task_env.ModalityTypes.SEMANTIC_SEGMENTATION in self._modality_types:
obs_shapes[task_env.ModalityTypes.SEMANTIC_SEGMENTATION] = gym.spaces.Box(
low=0, high=255, shape=(self._output_size, self._output_size, 1))
if task_env.ModalityTypes.OBJECT_DETECTION in self._modality_types:
obs_shapes[task_env.ModalityTypes.OBJECT_DETECTION] = gym.spaces.Box(
low=0,
high=255,
shape=(self._output_size, self._output_size,
self._num_detection_classes))
if task_env.ModalityTypes.DEPTH in self._modality_types:
obs_shapes[task_env.ModalityTypes.DEPTH] = gym.spaces.Box(
low=0,
high=_MAX_DEPTH_VALUE,
shape=(self._output_size, self._output_size, 2))
if task_env.ModalityTypes.IMAGE in self._modality_types:
obs_shapes[task_env.ModalityTypes.IMAGE] = gym.spaces.Box(
low=0, high=255, shape=(self._output_size, self._output_size, 3))
if task_env.ModalityTypes.GOAL in self._modality_types:
obs_shapes[task_env.ModalityTypes.GOAL] = gym.spaces.Box(
low=0, high=1., shape=(len(self._targets),))
if task_env.ModalityTypes.PREV_ACTION in self._modality_types:
obs_shapes[task_env.ModalityTypes.PREV_ACTION] = gym.spaces.Box(
low=0, high=1., shape=(len(self._actions) + 1,))
if task_env.ModalityTypes.DISTANCE in self._modality_types:
obs_shapes[task_env.ModalityTypes.DISTANCE] = gym.spaces.Box(
low=0, high=255, shape=(1,))
self.observation_space = gym.spaces.Dict(obs_shapes)
self._prev_action = np.zeros((len(self._actions) + 1), dtype=np.float32)
# Loading all the poses.
all_poses = {}
for world in self._worlds:
all_poses[world] = read_all_poses(self._dataset_root, world)
self._cached_poses = all_poses
self._vertex_to_pose = {}
self._pose_to_vertex = {}
@property
def actions(self):
"""Returns list of actions for the env."""
return self._actions
def _next_image(self, image_id, action):
"""Given the action, returns the name of the image that agent ends up in.
Args:
image_id: The image id of the current view.
action: valid actions are ['right', 'rotate_cw', 'rotate_ccw',
'forward', 'left']. Each rotation is 30 degrees.
Returns:
The image name for the next location of the agent. If the action results
in collision or it is not possible for the agent to execute that action,
returns empty string.
"""
assert action in self._actions, 'invalid action : {}'.format(action)
assert self._cur_world in self._all_graph, 'invalid world {}'.format(
self._cur_world)
assert image_id in self._all_graph[
self._cur_world], 'image_id {} is not in {}'.format(
image_id, self._cur_world)
return self._all_graph[self._cur_world][image_id][action]
def _largest_detection_for_image(self, image_id, detections_dict):
"""Assigns area of the largest box for the view with given image id.
Args:
image_id: Image id of the view.
detections_dict: Detections for the view.
"""
for cls, box, score in zip(detections_dict['detection_classes'],
detections_dict['detection_boxes'],
detections_dict['detection_scores']):
if cls not in self._targets:
continue
if score < self._confidence_threshold:
continue
ymin, xmin, ymax, xmax = box
area = (ymax - ymin) * (xmax - xmin)
if abs(area) < 1e-5:
continue
if image_id not in self._detection_area:
self._detection_area[image_id] = area
else:
self._detection_area[image_id] = max(self._detection_area[image_id],
area)
def _compute_goal_indexes(self):
"""Computes the goal indexes for the environment.
Returns:
The indexes of the goals that are closest to target categories. A vertex
is goal vertice if the desired objects are detected in the image and the
target categories are not seen by moving forward from that vertice.
"""
for image_id in self._world_id_dict[self._cur_world]:
detections_dict = self._detection_table[image_id]
self._largest_detection_for_image(image_id, detections_dict)
goal_indexes = []
for image_id in self._world_id_dict[self._cur_world]:
if image_id not in self._detection_area:
continue
# Detection box is large enough.
if self._detection_area[image_id] < 0.01:
continue
ok = True
next_image_id = self._next_image(image_id, 'forward')
if next_image_id:
if next_image_id in self._detection_area:
ok = False
if ok:
goal_indexes.append(self._cur_graph.id_to_index[image_id])
return goal_indexes
def to_image_id(self, vid):
"""Converts vertex id to the image id.
Args:
vid: vertex id of the view.
Returns:
image id of the input vertex id.
"""
return self._cur_graph.index_to_id[vid]
def to_vertex(self, image_id):
return self._cur_graph.id_to_index[image_id]
def observation(self, view_pose):
"""Returns the observation at the given the vertex.
Args:
view_pose: pose of the view of interest.
Returns:
Observation at the given view point.
Raises:
ValueError: if the given view pose is not similar to any of the poses in
the current world.
"""
vertex = self.pose_to_vertex(view_pose)
if vertex is None:
raise ValueError('The given found is not close enough to any of the poses'
' in the environment.')
image_id = self._cur_graph.index_to_id[vertex]
output = collections.OrderedDict()
if task_env.ModalityTypes.SEMANTIC_SEGMENTATION in self._modality_types:
output[task_env.ModalityTypes.
SEMANTIC_SEGMENTATION] = self._semantic_segmentations[
self._cur_world][image_id]
detection = None
need_det = (
task_env.ModalityTypes.OBJECT_DETECTION in self._modality_types or
(task_env.ModalityTypes.IMAGE in self._modality_types and
self._should_draw_detections))
if need_det:
detection = self._detection_table[image_id]
detection_image = generate_detection_image(
detection,
self._output_size,
self._category_map,
num_classes=self._num_detection_classes)
if task_env.ModalityTypes.OBJECT_DETECTION in self._modality_types:
output[task_env.ModalityTypes.OBJECT_DETECTION] = detection_image
if task_env.ModalityTypes.DEPTH in self._modality_types:
output[task_env.ModalityTypes.DEPTH] = self._depth_images[
self._cur_world][image_id]
if task_env.ModalityTypes.IMAGE in self._modality_types:
output_img = self._cached_imgs[self._cur_world][image_id]
if self._should_draw_detections:
output_img = output_img.copy()
_draw_detections(output_img, detection, self._category_index)
output[task_env.ModalityTypes.IMAGE] = output_img
if task_env.ModalityTypes.GOAL in self._modality_types:
goal = np.zeros((len(self._targets),), dtype=np.float32)
goal[self._targets.index(self._cur_goal)] = 1.
output[task_env.ModalityTypes.GOAL] = goal
if task_env.ModalityTypes.PREV_ACTION in self._modality_types:
output[task_env.ModalityTypes.PREV_ACTION] = self._prev_action
if task_env.ModalityTypes.DISTANCE in self._modality_types:
output[task_env.ModalityTypes.DISTANCE] = np.asarray(
[self.gt_value(self._cur_goal, vertex)], dtype=np.float32)
return output
def _step_no_reward(self, action):
"""Performs a step in the environment with given action.
Args:
action: Action that is used to step in the environment. Action can be
string or integer. If the type is integer then it uses the ith element
from self._actions list. Otherwise, uses the string value as the action.
Returns:
observation, done, info
observation: dictonary that contains all the observations specified in
modality_types.
observation[task_env.ModalityTypes.OBJECT_DETECTION]: contains the
detection of the current view.
observation[task_env.ModalityTypes.IMAGE]: contains the
image of the current view. Note that if using the images for training,
should_load_images should be set to false.
observation[task_env.ModalityTypes.SEMANTIC_SEGMENTATION]: contains the
semantic segmentation of the current view.
observation[task_env.ModalityTypes.DEPTH]: If selected, returns the
depth map for the current view.
observation[task_env.ModalityTypes.PREV_ACTION]: If selected, returns
a numpy of (action_size + 1,). The first action_size elements indicate
the action and the last element indicates whether the previous action
was successful or not.
done: True after episode_length steps have been taken, False otherwise.
info: Empty dictionary.
Raises:
ValueError: for invalid actions.
"""
# Primarily used for gym interface.
if not isinstance(action, str):
if not self.action_space.contains(action):
raise ValueError('Not a valid actions: %d', action)
action = self._actions[action]
if action not in self._actions:
raise ValueError('Not a valid action: %s', action)
action_index = self._actions.index(action)
if action == 'stop':
next_image_id = self._cur_image_id
done = True
success = True
else:
next_image_id = self._next_image(self._cur_image_id, action)
self._steps_taken += 1
done = False
success = True
if not next_image_id:
success = False
else:
self._cur_image_id = next_image_id
if self._steps_taken >= self._episode_length:
done = True
cur_vertex = self._cur_graph.id_to_index[self._cur_image_id]
observation = self.observation(self.vertex_to_pose(cur_vertex))
# Concatenation of one-hot prev action + a binary number for success of
# previous actions.
self._prev_action = np.zeros((len(self._actions) + 1,), dtype=np.float32)
self._prev_action[action_index] = 1.
self._prev_action[-1] = float(success)
distance_to_goal = self.gt_value(self._cur_goal, cur_vertex)
if success:
if distance_to_goal <= self._reward_goal_range:
done = True
return observation, done, {'success': success}
@property
def graph(self):
return self._cur_graph.graph
def state(self):
return self.vertex_to_pose(self.to_vertex(self._cur_image_id))
def gt_value(self, goal, v):
"""Computes the distance to the goal from vertex v.
Args:
goal: name of the goal.
v: vertex id.
Returns:
Minimmum number of steps to the given goal.
"""
assert goal in self._cur_graph.distance_to_goal, 'goal: {}'.format(goal)
assert v in self._cur_graph.distance_to_goal[goal]
res = self._cur_graph.distance_to_goal[goal][v]
return res
def _update_graph(self):
"""Creates the graph for each environment and updates the _cur_graph."""
if self._cur_world not in self._graph_cache:
graph = nx.DiGraph()
id_to_index = {}
index_to_id = {}
image_list = self._world_id_dict[self._cur_world]
for i, image_id in enumerate(image_list):
id_to_index[image_id] = i
index_to_id[i] = image_id
graph.add_node(i)
for image_id in image_list:
for action in self._actions:
if action == 'stop':
continue
next_image = self._all_graph[self._cur_world][image_id][action]
if next_image:
graph.add_edge(
id_to_index[image_id], id_to_index[next_image], action=action)
target_indexes = {}
number_of_nodes_without_targets = graph.number_of_nodes()
distance_to_goal = {}
for goal in self._targets:
if self._cur_world not in self._annotated_targets[goal]:
continue
goal_indexes = [
id_to_index[i]
for i in self._annotated_targets[goal][self._cur_world]
if i
]
super_source_index = graph.number_of_nodes()
target_indexes[goal] = super_source_index
graph.add_node(super_source_index)
index_to_id[super_source_index] = goal
id_to_index[goal] = super_source_index
for v in goal_indexes:
graph.add_edge(v, super_source_index, action='stop')
graph.add_edge(super_source_index, v, action='stop')
distance_to_goal[goal] = {}
for v in range(number_of_nodes_without_targets):
distance_to_goal[goal][v] = len(
nx.shortest_path(graph, v, super_source_index)) - 2
self._graph_cache[self._cur_world] = _Graph(
graph, id_to_index, index_to_id, target_indexes, distance_to_goal)
self._cur_graph = self._graph_cache[self._cur_world]
def reset_for_eval(self, new_world, new_goal, new_image_id):
"""Resets to the given goal and image_id."""
return self._reset_env(new_world=new_world, new_goal=new_goal, new_image_id=new_image_id)
def get_init_config(self, path):
"""Exposes the initial state of the agent for the given path.
Args:
path: sequences of the vertexes that the agent moves.
Returns:
image_id of the first view, world, and the goal.
"""
return self._cur_graph.index_to_id[path[0]], self._cur_world, self._cur_goal
def _reset_env(
self,
new_world=None,
new_goal=None,
new_image_id=None,
):
"""Resets the agent in a random world and random id.
Args:
new_world: If not None, sets the new world to new_world.
new_goal: If not None, sets the new goal to new_goal.
new_image_id: If not None, sets the first image id to new_image_id.
Returns:
observation: dictionary of the observations. Content of the observation
is similar to that of the step function.
Raises:
ValueError: if it can't find a world and annotated goal.
"""
self._steps_taken = 0
# The first prev_action is special all zero vector + success=1.
self._prev_action = np.zeros((len(self._actions) + 1,), dtype=np.float32)
self._prev_action[len(self._actions)] = 1.
if self._eval_init_points is not None:
if self._eval_init_index >= len(self._eval_init_points):
self._eval_init_index = 0
a = self._eval_init_points[self._eval_init_index]
self._cur_world, self._cur_image_id, self._cur_goal = a
self._eval_init_index += 1
elif not new_world:
attempts = 100
found = False
while attempts >= 0:
attempts -= 1
self._cur_goal = np.random.choice(self._targets)
available_worlds = list(
set(self._annotated_targets[self._cur_goal].keys()).intersection(
set(self._worlds)))
if available_worlds:
found = True
break
if not found:
raise ValueError('could not find a world that has a target annotated')
self._cur_world = np.random.choice(available_worlds)
else:
self._cur_world = new_world
self._cur_goal = new_goal
if new_world not in self._annotated_targets[new_goal]:
return None
self._cur_goal_index = self._targets.index(self._cur_goal)
if new_image_id:
self._cur_image_id = new_image_id
else:
self._cur_image_id = np.random.choice(
self._world_id_dict[self._cur_world])
if self._cur_world not in self._detection_cache:
with tf.gfile.Open(
_get_detection_path(self._dataset_root, self._detection_folder_name,
self._cur_world)) as f:
# Each file contains a dictionary with image ids as keys and detection
# dicts as values.
self._detection_cache[self._cur_world] = np.load(f).item()
self._detection_table = self._detection_cache[self._cur_world]
self._detection_area = {}
self._update_graph()
if self._cur_world not in self._vertex_to_pose:
# adding fake pose for the super nodes of each target categories.
self._vertex_to_pose[self._cur_world] = {
index: (-index,) for index in self._cur_graph.target_indexes.values()
}
# Calling vetex_to_pose for each vertex results in filling out the
# dictionaries that contain pose related data.
for image_id in self._world_id_dict[self._cur_world]:
self.vertex_to_pose(self.to_vertex(image_id))
# Filling out pose_to_vertex from vertex_to_pose.
self._pose_to_vertex[self._cur_world] = {
tuple(v): k
for k, v in self._vertex_to_pose[self._cur_world].iteritems()
}
cur_vertex = self._cur_graph.id_to_index[self._cur_image_id]
observation = self.observation(self.vertex_to_pose(cur_vertex))
return observation
def cur_vertex(self):
return self._cur_graph.id_to_index[self._cur_image_id]
def cur_image_id(self):
return self._cur_image_id
def path_to_goal(self, image_id=None):
"""Returns the path from image_id to the self._cur_goal.
Args:
image_id: If set to None, computes the path from the current view.
Otherwise, sets the current view to the given image_id.
Returns:
The path to the goal.
Raises:
Exception if there's no path from the view to the goal.
"""
if image_id is None:
image_id = self._cur_image_id
super_source = self._cur_graph.target_indexes[self._cur_goal]
try:
path = nx.shortest_path(self._cur_graph.graph,
self._cur_graph.id_to_index[image_id],
super_source)
except:
print 'path not found, image_id = ', self._cur_world, self._cur_image_id
raise
return path[:-1]
def targets(self):
return [self.vertex_to_pose(self._cur_graph.target_indexes[self._cur_goal])]
def vertex_to_pose(self, v):
"""Returns pose of the view for a given vertex.
Args:
v: integer, vertex index.
Returns:
(x, z, dir_x, dir_z) where x and z are the tranlation and dir_x, dir_z are
a vector giving direction of the view.
"""
if v in self._vertex_to_pose[self._cur_world]:
return np.copy(self._vertex_to_pose[self._cur_world][v])
x, z, rot, scale = self._cached_poses[self._cur_world][self.to_image_id(
v)]
if rot is None: # if rotation is not provided for the given vertex.
self._vertex_to_pose[self._cur_world][v] = np.asarray(
[x * scale, z * scale, v])
return np.copy(self._vertex_to_pose[self._cur_world][v])
# Multiply rotation matrix by [0,0,1] to get a vector of length 1 in the
# direction of the ray.
direction = np.zeros((3, 1), dtype=np.float32)
direction[2][0] = 1
direction = np.matmul(np.transpose(rot), direction)
direction = [direction[0][0], direction[2][0]]
self._vertex_to_pose[self._cur_world][v] = np.asarray(
[x * scale, z * scale, direction[0], direction[1]])
return np.copy(self._vertex_to_pose[self._cur_world][v])
def pose_to_vertex(self, pose):
"""Returns the vertex id for the given pose."""
if tuple(pose) not in self._pose_to_vertex[self._cur_world]:
raise ValueError(
'The given pose is not present in the dictionary: {}'.format(
tuple(pose)))
return self._pose_to_vertex[self._cur_world][tuple(pose)]
def check_scene_graph(self, world, goal):
"""Checks the connectivity of the scene graph.
Goes over all the views. computes the shortest path to the goal. If it
crashes it means that it's not connected. Otherwise, the env graph is fine.
Args:
world: the string name of the world.
goal: the string label for the goal.
Returns:
Nothing.
"""
obs = self._reset_env(new_world=world, new_goal=goal)
if not obs:
print '{} is not availble in {}'.format(goal, world)
return True
for image_id in self._world_id_dict[self._cur_world]:
print 'check image_id = {}'.format(image_id)
self._cur_image_id = image_id
path = self.path_to_goal()
actions = []
for i in range(len(path) - 2):
actions.append(self.action(path[i], path[i + 1]))
actions.append('stop')
@property
def goal_one_hot(self):
res = np.zeros((len(self._targets),), dtype=np.float32)
res[self._cur_goal_index] = 1.
return res
@property
def goal_index(self):
return self._cur_goal_index
@property
def goal_string(self):
return self._cur_goal
@property
def worlds(self):
return self._worlds
@property
def possible_targets(self):
return self._targets
def action(self, from_pose, to_pose):
"""Returns the action that takes source vertex to destination vertex.
Args:
from_pose: pose of the source.
to_pose: pose of the destination.
Returns:
Returns the index of the action.
Raises:
ValueError: If it is not possible to go from the first vertice to second
vertice with one action, it raises value error.
"""
from_index = self.pose_to_vertex(from_pose)
to_index = self.pose_to_vertex(to_pose)
if to_index not in self.graph[from_index]:
from_image_id = self.to_image_id(from_index)
to_image_id = self.to_image_id(to_index)
raise ValueError('{},{} is not connected to {},{}'.format(
from_index, from_image_id, to_index, to_image_id))
return self._actions.index(self.graph[from_index][to_index]['action'])
def random_step_sequence(self, min_len=None, max_len=None):
"""Generates random step sequence that takes agent to the goal.
Args:
min_len: integer, minimum length of a step sequence. Not yet implemented.
max_len: integer, should be set to an integer and it is the maximum number
of observations and path length to be max_len.
Returns:
Tuple of (path, actions, states, step_outputs).
path: a random path from a random starting point and random environment.
actions: actions of the returned path.
states: viewpoints of all the states in between.
step_outputs: list of step() return tuples.
Raises:
ValueError: if first_n is not greater than zero; if min_len is different
from None.
"""
if max_len is None:
raise ValueError('max_len can not be set as None')
if max_len < 1:
raise ValueError('first_n must be greater or equal to 1.')
if min_len is not None:
raise ValueError('min_len is not yet implemented.')
path = []
actions = []
states = []
step_outputs = []
obs = self.reset()
last_obs_tuple = [obs, 0, False, {}]
for _ in xrange(max_len):
action = np.random.choice(self._actions)
# We don't want to sample stop action because stop does not add new
# information.
while action == 'stop':
action = np.random.choice(self._actions)
path.append(self.to_vertex(self._cur_image_id))
onehot = np.zeros((len(self._actions),), dtype=np.float32)
onehot[self._actions.index(action)] = 1.
actions.append(onehot)
states.append(self.vertex_to_pose(path[-1]))
step_outputs.append(copy.deepcopy(last_obs_tuple))
last_obs_tuple = self.step(action)
return path, actions, states, step_outputs
| 41,226 | 36.547359 | 112 | py |
models | models-master/research/cognitive_planning/envs/util.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A module with utility functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
def trajectory_to_deltas(trajectory, state):
"""Computes a sequence of deltas of a state to traverse a trajectory in 2D.
The initial state of the agent contains its pose -- location in 2D and
orientation. When the computed deltas are incrementally added to it, it
traverses the specified trajectory while keeping its orientation parallel to
the trajectory.
Args:
trajectory: a np.array of floats of shape n x 2. The n-th row contains the
n-th point.
state: a 3 element np.array of floats containing agent's location and
orientation in radians.
Returns:
A np.array of floats of size n x 3.
"""
state = np.reshape(state, [-1])
init_xy = state[0:2]
init_theta = state[2]
delta_xy = trajectory - np.concatenate(
[np.reshape(init_xy, [1, 2]), trajectory[:-1, :]], axis=0)
thetas = np.reshape(np.arctan2(delta_xy[:, 1], delta_xy[:, 0]), [-1, 1])
thetas = np.concatenate([np.reshape(init_theta, [1, 1]), thetas], axis=0)
delta_thetas = thetas[1:] - thetas[:-1]
deltas = np.concatenate([delta_xy, delta_thetas], axis=1)
return deltas
| 1,957 | 33.964286 | 80 | py |
models | models-master/research/cognitive_planning/envs/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/cognitive_planning/preprocessing/inception_preprocessing.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides utilities to preprocess images for the Inception networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
def apply_with_random_selector(x, func, num_cases):
"""Computes func(x, sel), with sel sampled from [0...num_cases-1].
Args:
x: input Tensor.
func: Python function to apply.
num_cases: Python int32, number of cases to sample sel from.
Returns:
The result of func(x, sel), where func receives the value of the
selector as a python integer, but sel is sampled dynamically.
"""
sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
# Pass the real x only to one of the func calls.
return control_flow_ops.merge([
func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case)
for case in range(num_cases)])[0]
def distort_color(image, color_ordering=0, fast_mode=True, scope=None):
"""Distort the color of a Tensor image.
Each color distortion is non-commutative and thus ordering of the color ops
matters. Ideally we would randomly permute the ordering of the color ops.
Rather then adding that level of complication, we select a distinct ordering
of color ops for each preprocessing thread.
Args:
image: 3-D Tensor containing single image in [0, 1].
color_ordering: Python int, a type of distortion (valid values: 0-3).
fast_mode: Avoids slower ops (random_hue and random_contrast)
scope: Optional scope for name_scope.
Returns:
3-D Tensor color-distorted image on range [0, 1]
Raises:
ValueError: if color_ordering not in [0, 3]
"""
with tf.name_scope(scope, 'distort_color', [image]):
if fast_mode:
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
else:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
else:
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_ordering == 1:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
elif color_ordering == 2:
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
elif color_ordering == 3:
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
else:
raise ValueError('color_ordering must be in [0, 3]')
# The random_* ops do not necessarily clamp.
return tf.clip_by_value(image, 0.0, 1.0)
def distorted_bounding_box_crop(image,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0),
max_attempts=100,
scope=None):
"""Generates cropped_image using a one of the bboxes randomly distorted.
See `tf.image.sample_distorted_bounding_box` for more documentation.
Args:
image: 3-D Tensor of image (it will be converted to floats in [0, 1]).
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax]. If num_boxes is 0 then it would use the whole
image.
min_object_covered: An optional `float`. Defaults to `0.1`. The cropped
area of the image must contain at least this fraction of any bounding box
supplied.
aspect_ratio_range: An optional list of `floats`. The cropped area of the
image must have an aspect ratio = width / height within this range.
area_range: An optional list of `floats`. The cropped area of the image
must contain a fraction of the supplied image within in this range.
max_attempts: An optional `int`. Number of attempts at generating a cropped
region of the image of the specified constraints. After `max_attempts`
failures, return the entire image.
scope: Optional scope for name_scope.
Returns:
A tuple, a 3-D Tensor cropped_image and the distorted bbox
"""
with tf.name_scope(scope, 'distorted_bounding_box_crop', [image, bbox]):
# Each bounding box has shape [1, num_boxes, box coords] and
# the coordinates are ordered [ymin, xmin, ymax, xmax].
# A large fraction of image datasets contain a human-annotated bounding
# box delineating the region of the image containing the object of interest.
# We choose to create a new bounding box for the object which is a randomly
# distorted version of the human-annotated bounding box that obeys an
# allowed range of aspect ratios, sizes and overlap with the human-annotated
# bounding box. If no box is supplied, then we assume the bounding box is
# the entire image.
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box
# Crop the image to the specified bounding box.
cropped_image = tf.slice(image, bbox_begin, bbox_size)
return cropped_image, distort_bbox
def preprocess_for_train(image, height, width, bbox,
fast_mode=True,
scope=None,
add_image_summaries=True):
"""Distort one image for training a network.
Distorting images provides a useful technique for augmenting the data
set during training in order to make the network invariant to aspects
of the image that do not effect the label.
Additionally it would create image_summaries to display the different
transformations applied to the image.
Args:
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
[0, 1], otherwise it would converted to tf.float32 assuming that the range
is [0, MAX], where MAX is largest positive representable number for
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details).
height: integer
width: integer
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax].
fast_mode: Optional boolean, if True avoids slower transformations (i.e.
bi-cubic resizing, random_hue or random_contrast).
scope: Optional scope for name_scope.
add_image_summaries: Enable image summaries.
Returns:
3-D float Tensor of distorted image used for training with range [-1, 1].
"""
with tf.name_scope(scope, 'distort_image', [image, height, width, bbox]):
if bbox is None:
bbox = tf.constant([0.0, 0.0, 1.0, 1.0],
dtype=tf.float32,
shape=[1, 1, 4])
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Each bounding box has shape [1, num_boxes, box coords] and
# the coordinates are ordered [ymin, xmin, ymax, xmax].
image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
bbox)
if add_image_summaries:
tf.summary.image('image_with_bounding_boxes', image_with_box)
distorted_image, distorted_bbox = distorted_bounding_box_crop(image, bbox)
# Restore the shape since the dynamic slice based upon the bbox_size loses
# the third dimension.
distorted_image.set_shape([None, None, 3])
image_with_distorted_box = tf.image.draw_bounding_boxes(
tf.expand_dims(image, 0), distorted_bbox)
if add_image_summaries:
tf.summary.image('images_with_distorted_bounding_box',
image_with_distorted_box)
# This resizing operation may distort the images because the aspect
# ratio is not respected. We select a resize method in a round robin
# fashion based on the thread number.
# Note that ResizeMethod contains 4 enumerated resizing methods.
# We select only 1 case for fast_mode bilinear.
num_resize_cases = 1 if fast_mode else 4
distorted_image = apply_with_random_selector(
distorted_image,
lambda x, method: tf.image.resize_images(x, [height, width], method),
num_cases=num_resize_cases)
if add_image_summaries:
tf.summary.image('cropped_resized_image',
tf.expand_dims(distorted_image, 0))
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Randomly distort the colors. There are 1 or 4 ways to do it.
num_distort_cases = 1 if fast_mode else 4
distorted_image = apply_with_random_selector(
distorted_image,
lambda x, ordering: distort_color(x, ordering, fast_mode),
num_cases=num_distort_cases)
if add_image_summaries:
tf.summary.image('final_distorted_image',
tf.expand_dims(distorted_image, 0))
distorted_image = tf.subtract(distorted_image, 0.5)
distorted_image = tf.multiply(distorted_image, 2.0)
return distorted_image
def preprocess_for_eval(image, height, width,
central_fraction=0.875, scope=None):
"""Prepare one image for evaluation.
If height and width are specified it would output an image with that size by
applying resize_bilinear.
If central_fraction is specified it would crop the central fraction of the
input image.
Args:
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
[0, 1], otherwise it would converted to tf.float32 assuming that the range
is [0, MAX], where MAX is largest positive representable number for
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details).
height: integer
width: integer
central_fraction: Optional Float, fraction of the image to crop.
scope: Optional scope for name_scope.
Returns:
3-D float Tensor of prepared image.
"""
with tf.name_scope(scope, 'eval_image', [image, height, width]):
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Crop the central region of the image with an area containing 87.5% of
# the original image.
if central_fraction:
image = tf.image.central_crop(image, central_fraction=central_fraction)
if height and width:
# Resize the image to the specified height and width.
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width],
align_corners=False)
image = tf.squeeze(image, [0])
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image
def preprocess_image(image, height, width,
is_training=False,
bbox=None,
fast_mode=True,
add_image_summaries=True):
"""Pre-process one image for training or evaluation.
Args:
image: 3-D Tensor [height, width, channels] with the image. If dtype is
tf.float32 then the range should be [0, 1], otherwise it would converted
to tf.float32 assuming that the range is [0, MAX], where MAX is largest
positive representable number for int(8/16/32) data type (see
`tf.image.convert_image_dtype` for details).
height: integer, image expected height.
width: integer, image expected width.
is_training: Boolean. If true it would transform an image for train,
otherwise it would transform it for evaluation.
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
fast_mode: Optional boolean, if True avoids slower transformations.
add_image_summaries: Enable image summaries.
Returns:
3-D float Tensor containing an appropriately scaled image
Raises:
ValueError: if user does not provide bounding box
"""
if is_training:
return preprocess_for_train(image, height, width, bbox, fast_mode,
add_image_summaries=add_image_summaries)
else:
return preprocess_for_eval(image, height, width)
| 14,113 | 43.244514 | 80 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.