repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
models | models-master/research/rebar/logger.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Logger for REBAR"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class Logger:
def __init__(self):
pass
def log(self, key, value):
pass
def flush(self):
pass
| 925 | 28.870968 | 80 | py |
models | models-master/research/rebar/config.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Configuration variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
DATA_DIR = 'data'
MNIST_BINARIZED = 'mnist_salakhutdinov_07-19-2017.pkl'
MNIST_FLOAT = 'mnist_train_xs_07-19-2017.npy'
OMNIGLOT = 'omniglot_07-19-2017.mat'
| 976 | 36.576923 | 80 | py |
models | models-master/research/rebar/datasets.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library of datasets for REBAR."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import os
import scipy.io
import numpy as np
import cPickle as pickle
import tensorflow as tf
import config
gfile = tf.gfile
def load_data(hparams):
# Load data
if hparams.task in ['sbn', 'sp']:
reader = read_MNIST
elif hparams.task == 'omni':
reader = read_omniglot
x_train, x_valid, x_test = reader(binarize=not hparams.dynamic_b)
return x_train, x_valid, x_test
def read_MNIST(binarize=False):
"""Reads in MNIST images.
Args:
binarize: whether to use the fixed binarization
Returns:
x_train: 50k training images
x_valid: 10k validation images
x_test: 10k test images
"""
with gfile.FastGFile(os.path.join(config.DATA_DIR, config.MNIST_BINARIZED), 'r') as f:
(x_train, _), (x_valid, _), (x_test, _) = pickle.load(f)
if not binarize:
with gfile.FastGFile(os.path.join(config.DATA_DIR, config.MNIST_FLOAT), 'r') as f:
x_train = np.load(f).reshape(-1, 784)
return x_train, x_valid, x_test
def read_omniglot(binarize=False):
"""Reads in Omniglot images.
Args:
binarize: whether to use the fixed binarization
Returns:
x_train: training images
x_valid: validation images
x_test: test images
"""
n_validation=1345
def reshape_data(data):
return data.reshape((-1, 28, 28)).reshape((-1, 28*28), order='fortran')
omni_raw = scipy.io.loadmat(os.path.join(config.DATA_DIR, config.OMNIGLOT))
train_data = reshape_data(omni_raw['data'].T.astype('float32'))
test_data = reshape_data(omni_raw['testdata'].T.astype('float32'))
# Binarize the data with a fixed seed
if binarize:
np.random.seed(5)
train_data = (np.random.rand(*train_data.shape) < train_data).astype(float)
test_data = (np.random.rand(*test_data.shape) < test_data).astype(float)
shuffle_seed = 123
permutation = np.random.RandomState(seed=shuffle_seed).permutation(train_data.shape[0])
train_data = train_data[permutation]
x_train = train_data[:-n_validation]
x_valid = train_data[-n_validation:]
x_test = test_data
return x_train, x_valid, x_test
| 2,890 | 27.623762 | 89 | py |
models | models-master/research/rebar/rebar.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tensorflow as tf
import numpy as np
from scipy.misc import logsumexp
import tensorflow.contrib.slim as slim
from tensorflow.python.ops import init_ops
import utils as U
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
FLAGS = tf.flags.FLAGS
Q_COLLECTION = "q_collection"
P_COLLECTION = "p_collection"
class SBN(object): # REINFORCE
def __init__(self,
hparams,
activation_func=tf.nn.sigmoid,
mean_xs = None,
eval_mode=False):
self.eval_mode = eval_mode
self.hparams = hparams
self.mean_xs = mean_xs
self.train_bias= -np.log(1./np.clip(mean_xs, 0.001, 0.999)-1.).astype(np.float32)
self.activation_func = activation_func
self.n_samples = tf.placeholder('int32')
self.x = tf.placeholder('float', [None, self.hparams.n_input])
self._x = tf.tile(self.x, [self.n_samples, 1])
self.batch_size = tf.shape(self._x)[0]
self.uniform_samples = dict()
self.uniform_samples_v = dict()
self.prior = tf.Variable(tf.zeros([self.hparams.n_hidden],
dtype=tf.float32),
name='p_prior',
collections=[tf.GraphKeys.GLOBAL_VARIABLES, P_COLLECTION])
self.run_recognition_network = False
self.run_generator_network = False
# Initialize temperature
self.pre_temperature_variable = tf.Variable(
np.log(self.hparams.temperature),
trainable=False,
dtype=tf.float32)
self.temperature_variable = tf.exp(self.pre_temperature_variable)
self.global_step = tf.Variable(0, trainable=False)
self.baseline_loss = []
self.ema = tf.train.ExponentialMovingAverage(decay=0.999)
self.maintain_ema_ops = []
self.optimizer_class = tf.train.AdamOptimizer(
learning_rate=1*self.hparams.learning_rate,
beta2=self.hparams.beta2)
self._generate_randomness()
self._create_network()
def initialize(self, sess):
self.sess = sess
def _create_eta(self, shape=[], collection='CV'):
return 2 * tf.sigmoid(tf.Variable(tf.zeros(shape), trainable=False,
collections=[collection, tf.GraphKeys.GLOBAL_VARIABLES, Q_COLLECTION]))
def _create_baseline(self, n_output=1, n_hidden=100,
is_zero_init=False,
collection='BASELINE'):
# center input
h = self._x
if self.mean_xs is not None:
h -= self.mean_xs
if is_zero_init:
initializer = init_ops.zeros_initializer()
else:
initializer = slim.variance_scaling_initializer()
with slim.arg_scope([slim.fully_connected],
variables_collections=[collection, Q_COLLECTION],
trainable=False,
weights_initializer=initializer):
h = slim.fully_connected(h, n_hidden, activation_fn=tf.nn.tanh)
baseline = slim.fully_connected(h, n_output, activation_fn=None)
if n_output == 1:
baseline = tf.reshape(baseline, [-1]) # very important to reshape
return baseline
def _create_transformation(self, input, n_output, reuse, scope_prefix):
"""Create the deterministic transformation between stochastic layers.
If self.hparam.nonlinear:
2 x tanh layers
Else:
1 x linear layer
"""
if self.hparams.nonlinear:
h = slim.fully_connected(input,
self.hparams.n_hidden,
reuse=reuse,
activation_fn=tf.nn.tanh,
scope='%s_nonlinear_1' % scope_prefix)
h = slim.fully_connected(h,
self.hparams.n_hidden,
reuse=reuse,
activation_fn=tf.nn.tanh,
scope='%s_nonlinear_2' % scope_prefix)
h = slim.fully_connected(h,
n_output,
reuse=reuse,
activation_fn=None,
scope='%s' % scope_prefix)
else:
h = slim.fully_connected(input,
n_output,
reuse=reuse,
activation_fn=None,
scope='%s' % scope_prefix)
return h
def _recognition_network(self, sampler=None, log_likelihood_func=None):
"""x values -> samples from Q and return log Q(h|x)."""
samples = {}
reuse = None if not self.run_recognition_network else True
# Set defaults
if sampler is None:
sampler = self._random_sample
if log_likelihood_func is None:
log_likelihood_func = lambda sample, log_params: (
U.binary_log_likelihood(sample['activation'], log_params))
logQ = []
if self.hparams.task in ['sbn', 'omni']:
# Initialize the edge case
samples[-1] = {'activation': self._x}
if self.mean_xs is not None:
samples[-1]['activation'] -= self.mean_xs # center the input
samples[-1]['activation'] = (samples[-1]['activation'] + 1)/2.0
with slim.arg_scope([slim.fully_connected],
weights_initializer=slim.variance_scaling_initializer(),
variables_collections=[Q_COLLECTION]):
for i in xrange(self.hparams.n_layer):
# Set up the input to the layer
input = 2.0*samples[i-1]['activation'] - 1.0
# Create the conditional distribution (output is the logits)
h = self._create_transformation(input,
n_output=self.hparams.n_hidden,
reuse=reuse,
scope_prefix='q_%d' % i)
samples[i] = sampler(h, self.uniform_samples[i], i)
logQ.append(log_likelihood_func(samples[i], h))
self.run_recognition_network = True
return logQ, samples
elif self.hparams.task == 'sp':
# Initialize the edge case
samples[-1] = {'activation': tf.split(self._x,
num_or_size_splits=2,
axis=1)[0]} # top half of digit
if self.mean_xs is not None:
samples[-1]['activation'] -= np.split(self.mean_xs, 2, 0)[0] # center the input
samples[-1]['activation'] = (samples[-1]['activation'] + 1)/2.0
with slim.arg_scope([slim.fully_connected],
weights_initializer=slim.variance_scaling_initializer(),
variables_collections=[Q_COLLECTION]):
for i in xrange(self.hparams.n_layer):
# Set up the input to the layer
input = 2.0*samples[i-1]['activation'] - 1.0
# Create the conditional distribution (output is the logits)
h = self._create_transformation(input,
n_output=self.hparams.n_hidden,
reuse=reuse,
scope_prefix='q_%d' % i)
samples[i] = sampler(h, self.uniform_samples[i], i)
logQ.append(log_likelihood_func(samples[i], h))
self.run_recognition_network = True
return logQ, samples
def _generator_network(self, samples, logQ, log_likelihood_func=None):
'''Returns learning signal and function.
This is the implementation for SBNs for the ELBO.
Args:
samples: dictionary of sampled latent variables
logQ: list of log q(h_i) terms
log_likelihood_func: function used to compute log probs for the latent
variables
Returns:
learning_signal: the "reward" function
function_term: part of the function that depends on the parameters
and needs to have the gradient taken through
'''
reuse=None if not self.run_generator_network else True
if self.hparams.task in ['sbn', 'omni']:
if log_likelihood_func is None:
log_likelihood_func = lambda sample, log_params: (
U.binary_log_likelihood(sample['activation'], log_params))
logPPrior = log_likelihood_func(
samples[self.hparams.n_layer-1],
tf.expand_dims(self.prior, 0))
with slim.arg_scope([slim.fully_connected],
weights_initializer=slim.variance_scaling_initializer(),
variables_collections=[P_COLLECTION]):
for i in reversed(xrange(self.hparams.n_layer)):
if i == 0:
n_output = self.hparams.n_input
else:
n_output = self.hparams.n_hidden
input = 2.0*samples[i]['activation']-1.0
h = self._create_transformation(input,
n_output,
reuse=reuse,
scope_prefix='p_%d' % i)
if i == 0:
# Assume output is binary
logP = U.binary_log_likelihood(self._x, h + self.train_bias)
else:
logPPrior += log_likelihood_func(samples[i-1], h)
self.run_generator_network = True
return logP + logPPrior - tf.add_n(logQ), logP + logPPrior
elif self.hparams.task == 'sp':
with slim.arg_scope([slim.fully_connected],
weights_initializer=slim.variance_scaling_initializer(),
variables_collections=[P_COLLECTION]):
n_output = int(self.hparams.n_input/2)
i = self.hparams.n_layer - 1 # use the last layer
input = 2.0*samples[i]['activation']-1.0
h = self._create_transformation(input,
n_output,
reuse=reuse,
scope_prefix='p_%d' % i)
# Predict on the lower half of the image
logP = U.binary_log_likelihood(tf.split(self._x,
num_or_size_splits=2,
axis=1)[1],
h + np.split(self.train_bias, 2, 0)[1])
self.run_generator_network = True
return logP, logP
def _create_loss(self):
# Hard loss
logQHard, samples = self._recognition_network()
reinforce_learning_signal, reinforce_model_grad = self._generator_network(samples, logQHard)
logQHard = tf.add_n(logQHard)
# REINFORCE
learning_signal = tf.stop_gradient(U.center(reinforce_learning_signal))
self.optimizerLoss = -(learning_signal*logQHard +
reinforce_model_grad)
self.lHat = map(tf.reduce_mean, [
reinforce_learning_signal,
U.rms(learning_signal),
])
return reinforce_learning_signal
def _reshape(self, t):
return tf.transpose(tf.reshape(t,
[self.n_samples, -1]))
def compute_tensor_variance(self, t):
"""Compute the mean per component variance.
Use a moving average to estimate the required moments.
"""
t_sq = tf.reduce_mean(tf.square(t))
self.maintain_ema_ops.append(self.ema.apply([t, t_sq]))
# mean per component variance
variance_estimator = (self.ema.average(t_sq) -
tf.reduce_mean(
tf.square(self.ema.average(t))))
return variance_estimator
def _create_train_op(self, grads_and_vars, extra_grads_and_vars=[]):
'''
Args:
grads_and_vars: gradients to apply and compute running average variance
extra_grads_and_vars: gradients to apply (not used to compute average variance)
'''
# Variance summaries
first_moment = U.vectorize(grads_and_vars, skip_none=True)
second_moment = tf.square(first_moment)
self.maintain_ema_ops.append(self.ema.apply([first_moment, second_moment]))
# Add baseline losses
if len(self.baseline_loss) > 0:
mean_baseline_loss = tf.reduce_mean(tf.add_n(self.baseline_loss))
extra_grads_and_vars += self.optimizer_class.compute_gradients(
mean_baseline_loss,
var_list=tf.get_collection('BASELINE'))
# Ensure that all required tensors are computed before updates are executed
extra_optimizer = tf.train.AdamOptimizer(
learning_rate=10*self.hparams.learning_rate,
beta2=self.hparams.beta2)
with tf.control_dependencies(
[tf.group(*[g for g, _ in (grads_and_vars + extra_grads_and_vars) if g is not None])]):
# Filter out the P_COLLECTION variables if we're in eval mode
if self.eval_mode:
grads_and_vars = [(g, v) for g, v in grads_and_vars
if v not in tf.get_collection(P_COLLECTION)]
train_op = self.optimizer_class.apply_gradients(grads_and_vars,
global_step=self.global_step)
if len(extra_grads_and_vars) > 0:
extra_train_op = extra_optimizer.apply_gradients(extra_grads_and_vars)
else:
extra_train_op = tf.no_op()
self.optimizer = tf.group(train_op, extra_train_op, *self.maintain_ema_ops)
# per parameter variance
variance_estimator = (self.ema.average(second_moment) -
tf.square(self.ema.average(first_moment)))
self.grad_variance = tf.reduce_mean(variance_estimator)
def _create_network(self):
logF = self._create_loss()
self.optimizerLoss = tf.reduce_mean(self.optimizerLoss)
# Setup optimizer
grads_and_vars = self.optimizer_class.compute_gradients(self.optimizerLoss)
self._create_train_op(grads_and_vars)
# Create IWAE lower bound for evaluation
self.logF = self._reshape(logF)
self.iwae = tf.reduce_mean(U.logSumExp(self.logF, axis=1) -
tf.log(tf.to_float(self.n_samples)))
def partial_fit(self, X, n_samples=1):
if hasattr(self, 'grad_variances'):
grad_variance_field_to_return = self.grad_variances
else:
grad_variance_field_to_return = self.grad_variance
_, res, grad_variance, step, temperature = self.sess.run(
(self.optimizer, self.lHat, grad_variance_field_to_return, self.global_step, self.temperature_variable),
feed_dict={self.x: X, self.n_samples: n_samples})
return res, grad_variance, step, temperature
def partial_grad(self, X, n_samples=1):
control_variate_grads, step = self.sess.run(
(self.control_variate_grads, self.global_step),
feed_dict={self.x: X, self.n_samples: n_samples})
return control_variate_grads, step
def partial_eval(self, X, n_samples=5):
if n_samples < 1000:
res, iwae = self.sess.run(
(self.lHat, self.iwae),
feed_dict={self.x: X, self.n_samples: n_samples})
res = [iwae] + res
else: # special case to handle OOM
assert n_samples % 100 == 0, "When using large # of samples, it must be divisble by 100"
res = []
for i in xrange(int(n_samples/100)):
logF, = self.sess.run(
(self.logF,),
feed_dict={self.x: X, self.n_samples: 100})
res.append(logsumexp(logF, axis=1))
res = [np.mean(logsumexp(res, axis=0) - np.log(n_samples))]
return res
# Random samplers
def _mean_sample(self, log_alpha, _, layer):
"""Returns mean of random variables parameterized by log_alpha."""
mu = tf.nn.sigmoid(log_alpha)
return {
'preactivation': mu,
'activation': mu,
'log_param': log_alpha,
}
def _generate_randomness(self):
for i in xrange(self.hparams.n_layer):
self.uniform_samples[i] = tf.stop_gradient(tf.random_uniform(
[self.batch_size, self.hparams.n_hidden]))
def _u_to_v(self, log_alpha, u, eps = 1e-8):
"""Convert u to tied randomness in v."""
u_prime = tf.nn.sigmoid(-log_alpha) # g(u') = 0
v_1 = (u - u_prime) / tf.clip_by_value(1 - u_prime, eps, 1)
v_1 = tf.clip_by_value(v_1, 0, 1)
v_1 = tf.stop_gradient(v_1)
v_1 = v_1*(1 - u_prime) + u_prime
v_0 = u / tf.clip_by_value(u_prime, eps, 1)
v_0 = tf.clip_by_value(v_0, 0, 1)
v_0 = tf.stop_gradient(v_0)
v_0 = v_0 * u_prime
v = tf.where(u > u_prime, v_1, v_0)
v = tf.check_numerics(v, 'v sampling is not numerically stable.')
v = v + tf.stop_gradient(-v + u) # v and u are the same up to numerical errors
return v
def _random_sample(self, log_alpha, u, layer):
"""Returns sampled random variables parameterized by log_alpha."""
# Generate tied randomness for later
if layer not in self.uniform_samples_v:
self.uniform_samples_v[layer] = self._u_to_v(log_alpha, u)
# Sample random variable underlying softmax/argmax
x = log_alpha + U.safe_log_prob(u) - U.safe_log_prob(1 - u)
samples = tf.stop_gradient(tf.to_float(x > 0))
return {
'preactivation': x,
'activation': samples,
'log_param': log_alpha,
}
def _random_sample_soft(self, log_alpha, u, layer, temperature=None):
"""Returns sampled random variables parameterized by log_alpha."""
if temperature is None:
temperature = self.hparams.temperature
# Sample random variable underlying softmax/argmax
x = log_alpha + U.safe_log_prob(u) - U.safe_log_prob(1 - u)
x /= tf.expand_dims(temperature, -1)
if self.hparams.muprop_relaxation:
y = tf.nn.sigmoid(x + log_alpha * tf.expand_dims(temperature/(temperature + 1), -1))
else:
y = tf.nn.sigmoid(x)
return {
'preactivation': x,
'activation': y,
'log_param': log_alpha
}
def _random_sample_soft_v(self, log_alpha, _, layer, temperature=None):
"""Returns sampled random variables parameterized by log_alpha."""
v = self.uniform_samples_v[layer]
return self._random_sample_soft(log_alpha, v, layer, temperature)
def get_gumbel_gradient(self):
logQ, softSamples = self._recognition_network(sampler=self._random_sample_soft)
logQ = tf.add_n(logQ)
logPPrior, logP = self._generator_network(softSamples)
softELBO = logPPrior + logP - logQ
gumbel_gradient = (self.optimizer_class.
compute_gradients(softELBO))
debug = {
'softELBO': softELBO,
}
return gumbel_gradient, debug
# samplers used for quadratic version
def _random_sample_switch(self, log_alpha, u, layer, switch_layer, temperature=None):
"""Run partial discrete, then continuous path.
Args:
switch_layer: this layer and beyond will be continuous
"""
if layer < switch_layer:
return self._random_sample(log_alpha, u, layer)
else:
return self._random_sample_soft(log_alpha, u, layer, temperature)
def _random_sample_switch_v(self, log_alpha, u, layer, switch_layer, temperature=None):
"""Run partial discrete, then continuous path.
Args:
switch_layer: this layer and beyond will be continuous
"""
if layer < switch_layer:
return self._random_sample(log_alpha, u, layer)
else:
return self._random_sample_soft_v(log_alpha, u, layer, temperature)
# #####
# Gradient computation
# #####
def get_nvil_gradient(self):
"""Compute the NVIL gradient."""
# Hard loss
logQHard, samples = self._recognition_network()
ELBO, reinforce_model_grad = self._generator_network(samples, logQHard)
logQHard = tf.add_n(logQHard)
# Add baselines (no variance normalization)
learning_signal = tf.stop_gradient(ELBO) - self._create_baseline()
# Set up losses
self.baseline_loss.append(tf.square(learning_signal))
optimizerLoss = -(tf.stop_gradient(learning_signal)*logQHard +
reinforce_model_grad)
optimizerLoss = tf.reduce_mean(optimizerLoss)
nvil_gradient = self.optimizer_class.compute_gradients(optimizerLoss)
debug = {
'ELBO': ELBO,
'RMS of centered learning signal': U.rms(learning_signal),
}
return nvil_gradient, debug
def get_simple_muprop_gradient(self):
""" Computes the simple muprop gradient.
This muprop control variate does not include the linear term.
"""
# Hard loss
logQHard, hardSamples = self._recognition_network()
hardELBO, reinforce_model_grad = self._generator_network(hardSamples, logQHard)
# Soft loss
logQ, muSamples = self._recognition_network(sampler=self._mean_sample)
muELBO, _ = self._generator_network(muSamples, logQ)
scaling_baseline = self._create_eta(collection='BASELINE')
learning_signal = (hardELBO
- scaling_baseline * muELBO
- self._create_baseline())
self.baseline_loss.append(tf.square(learning_signal))
optimizerLoss = -(tf.stop_gradient(learning_signal) * tf.add_n(logQHard)
+ reinforce_model_grad)
optimizerLoss = tf.reduce_mean(optimizerLoss)
simple_muprop_gradient = (self.optimizer_class.
compute_gradients(optimizerLoss))
debug = {
'ELBO': hardELBO,
'muELBO': muELBO,
'RMS': U.rms(learning_signal),
}
return simple_muprop_gradient, debug
def get_muprop_gradient(self):
"""
random sample function that actually returns mean
new forward pass that returns logQ as a list
can get x_i from samples
"""
# Hard loss
logQHard, hardSamples = self._recognition_network()
hardELBO, reinforce_model_grad = self._generator_network(hardSamples, logQHard)
# Soft loss
logQ, muSamples = self._recognition_network(sampler=self._mean_sample)
muELBO, _ = self._generator_network(muSamples, logQ)
# Compute gradients
muELBOGrads = tf.gradients(tf.reduce_sum(muELBO),
[ muSamples[i]['activation'] for
i in xrange(self.hparams.n_layer) ])
# Compute MuProp gradient estimates
learning_signal = hardELBO
optimizerLoss = 0.0
learning_signals = []
for i in xrange(self.hparams.n_layer):
dfDiff = tf.reduce_sum(
muELBOGrads[i] * (hardSamples[i]['activation'] -
muSamples[i]['activation']),
axis=1)
dfMu = tf.reduce_sum(
tf.stop_gradient(muELBOGrads[i]) *
tf.nn.sigmoid(hardSamples[i]['log_param']),
axis=1)
scaling_baseline_0 = self._create_eta(collection='BASELINE')
scaling_baseline_1 = self._create_eta(collection='BASELINE')
learning_signals.append(learning_signal - scaling_baseline_0 * muELBO - scaling_baseline_1 * dfDiff - self._create_baseline())
self.baseline_loss.append(tf.square(learning_signals[i]))
optimizerLoss += (
logQHard[i] * tf.stop_gradient(learning_signals[i]) +
tf.stop_gradient(scaling_baseline_1) * dfMu)
optimizerLoss += reinforce_model_grad
optimizerLoss *= -1
optimizerLoss = tf.reduce_mean(optimizerLoss)
muprop_gradient = self.optimizer_class.compute_gradients(optimizerLoss)
debug = {
'ELBO': hardELBO,
'muELBO': muELBO,
}
debug.update(dict([
('RMS learning signal layer %d' % i, U.rms(learning_signal))
for (i, learning_signal) in enumerate(learning_signals)]))
return muprop_gradient, debug
# REBAR gradient helper functions
def _create_gumbel_control_variate(self, logQHard, temperature=None):
'''Calculate gumbel control variate.
'''
if temperature is None:
temperature = self.hparams.temperature
logQ, softSamples = self._recognition_network(sampler=functools.partial(
self._random_sample_soft, temperature=temperature))
softELBO, _ = self._generator_network(softSamples, logQ)
logQ = tf.add_n(logQ)
# Generate the softELBO_v (should be the same value but different grads)
logQ_v, softSamples_v = self._recognition_network(sampler=functools.partial(
self._random_sample_soft_v, temperature=temperature))
softELBO_v, _ = self._generator_network(softSamples_v, logQ_v)
logQ_v = tf.add_n(logQ_v)
# Compute losses
learning_signal = tf.stop_gradient(softELBO_v)
# Control variate
h = (tf.stop_gradient(learning_signal) * tf.add_n(logQHard)
- softELBO + softELBO_v)
extra = (softELBO_v, -softELBO + softELBO_v)
return h, extra
def _create_gumbel_control_variate_quadratic(self, logQHard, temperature=None):
'''Calculate gumbel control variate.
'''
if temperature is None:
temperature = self.hparams.temperature
h = 0
extra = []
for layer in xrange(self.hparams.n_layer):
logQ, softSamples = self._recognition_network(sampler=functools.partial(
self._random_sample_switch, switch_layer=layer, temperature=temperature))
softELBO, _ = self._generator_network(softSamples, logQ)
# Generate the softELBO_v (should be the same value but different grads)
logQ_v, softSamples_v = self._recognition_network(sampler=functools.partial(
self._random_sample_switch_v, switch_layer=layer, temperature=temperature))
softELBO_v, _ = self._generator_network(softSamples_v, logQ_v)
# Compute losses
learning_signal = tf.stop_gradient(softELBO_v)
# Control variate
h += (tf.stop_gradient(learning_signal) * logQHard[layer]
- softELBO + softELBO_v)
extra.append((softELBO_v, -softELBO + softELBO_v))
return h, extra
def _create_hard_elbo(self):
logQHard, hardSamples = self._recognition_network()
hardELBO, reinforce_model_grad = self._generator_network(hardSamples, logQHard)
reinforce_learning_signal = tf.stop_gradient(hardELBO)
# Center learning signal
baseline = self._create_baseline(collection='CV')
reinforce_learning_signal = tf.stop_gradient(reinforce_learning_signal) - baseline
nvil_gradient = (tf.stop_gradient(hardELBO) - baseline) * tf.add_n(logQHard) + reinforce_model_grad
return hardELBO, nvil_gradient, logQHard
def multiply_by_eta(self, h_grads, eta):
# Modifies eta
res = []
eta_statistics = []
for (g, v) in h_grads:
if g is None:
res.append((g, v))
else:
if 'network' not in eta:
eta['network'] = self._create_eta()
res.append((g*eta['network'], v))
eta_statistics.append(eta['network'])
return res, eta_statistics
def multiply_by_eta_per_layer(self, h_grads, eta):
# Modifies eta
res = []
eta_statistics = []
for (g, v) in h_grads:
if g is None:
res.append((g, v))
else:
if v not in eta:
eta[v] = self._create_eta()
res.append((g*eta[v], v))
eta_statistics.append(eta[v])
return res, eta_statistics
def multiply_by_eta_per_unit(self, h_grads, eta):
# Modifies eta
res = []
eta_statistics = []
for (g, v) in h_grads:
if g is None:
res.append((g, v))
else:
if v not in eta:
g_shape = g.shape_as_list()
assert len(g_shape) <= 2, 'Gradient has too many dimensions'
if len(g_shape) == 1:
eta[v] = self._create_eta(g_shape)
else:
eta[v] = self._create_eta([1, g_shape[1]])
h_grads.append((g*eta[v], v))
eta_statistics.extend(tf.nn.moments(tf.squeeze(eta[v]), axes=[0]))
return res, eta_statistics
def get_dynamic_rebar_gradient(self):
"""Get the dynamic rebar gradient (t, eta optimized)."""
tiled_pre_temperature = tf.tile([self.pre_temperature_variable],
[self.batch_size])
temperature = tf.exp(tiled_pre_temperature)
hardELBO, nvil_gradient, logQHard = self._create_hard_elbo()
if self.hparams.quadratic:
gumbel_cv, extra = self._create_gumbel_control_variate_quadratic(logQHard, temperature=temperature)
else:
gumbel_cv, extra = self._create_gumbel_control_variate(logQHard, temperature=temperature)
f_grads = self.optimizer_class.compute_gradients(tf.reduce_mean(-nvil_gradient))
eta = {}
h_grads, eta_statistics = self.multiply_by_eta_per_layer(
self.optimizer_class.compute_gradients(tf.reduce_mean(gumbel_cv)),
eta)
model_grads = U.add_grads_and_vars(f_grads, h_grads)
total_grads = model_grads
# Construct the variance objective
g = U.vectorize(model_grads, set_none_to_zero=True)
self.maintain_ema_ops.append(self.ema.apply([g]))
gbar = 0 #tf.stop_gradient(self.ema.average(g))
variance_objective = tf.reduce_mean(tf.square(g - gbar))
reinf_g_t = 0
if self.hparams.quadratic:
for layer in xrange(self.hparams.n_layer):
gumbel_learning_signal, _ = extra[layer]
df_dt = tf.gradients(gumbel_learning_signal, tiled_pre_temperature)[0]
reinf_g_t_i, _ = self.multiply_by_eta_per_layer(
self.optimizer_class.compute_gradients(tf.reduce_mean(tf.stop_gradient(df_dt) * logQHard[layer])),
eta)
reinf_g_t += U.vectorize(reinf_g_t_i, set_none_to_zero=True)
reparam = tf.add_n([reparam_i for _, reparam_i in extra])
else:
gumbel_learning_signal, reparam = extra
df_dt = tf.gradients(gumbel_learning_signal, tiled_pre_temperature)[0]
reinf_g_t, _ = self.multiply_by_eta_per_layer(
self.optimizer_class.compute_gradients(tf.reduce_mean(tf.stop_gradient(df_dt) * tf.add_n(logQHard))),
eta)
reinf_g_t = U.vectorize(reinf_g_t, set_none_to_zero=True)
reparam_g, _ = self.multiply_by_eta_per_layer(
self.optimizer_class.compute_gradients(tf.reduce_mean(reparam)),
eta)
reparam_g = U.vectorize(reparam_g, set_none_to_zero=True)
reparam_g_t = tf.gradients(tf.reduce_mean(2*tf.stop_gradient(g - gbar)*reparam_g), self.pre_temperature_variable)[0]
variance_objective_grad = tf.reduce_mean(2*(g - gbar)*reinf_g_t) + reparam_g_t
debug = { 'ELBO': hardELBO,
'etas': eta_statistics,
'variance_objective': variance_objective,
}
return total_grads, debug, variance_objective, variance_objective_grad
def get_rebar_gradient(self):
"""Get the rebar gradient."""
hardELBO, nvil_gradient, logQHard = self._create_hard_elbo()
if self.hparams.quadratic:
gumbel_cv, _ = self._create_gumbel_control_variate_quadratic(logQHard)
else:
gumbel_cv, _ = self._create_gumbel_control_variate(logQHard)
f_grads = self.optimizer_class.compute_gradients(tf.reduce_mean(-nvil_gradient))
eta = {}
h_grads, eta_statistics = self.multiply_by_eta_per_layer(
self.optimizer_class.compute_gradients(tf.reduce_mean(gumbel_cv)),
eta)
model_grads = U.add_grads_and_vars(f_grads, h_grads)
total_grads = model_grads
# Construct the variance objective
variance_objective = tf.reduce_mean(tf.square(U.vectorize(model_grads, set_none_to_zero=True)))
debug = { 'ELBO': hardELBO,
'etas': eta_statistics,
'variance_objective': variance_objective,
}
return total_grads, debug, variance_objective
###
# Create varaints
###
class SBNSimpleMuProp(SBN):
def _create_loss(self):
simple_muprop_gradient, debug = self.get_simple_muprop_gradient()
self.lHat = map(tf.reduce_mean, [
debug['ELBO'],
debug['muELBO'],
])
return debug['ELBO'], simple_muprop_gradient
def _create_network(self):
logF, loss_grads = self._create_loss()
self._create_train_op(loss_grads)
# Create IWAE lower bound for evaluation
self.logF = self._reshape(logF)
self.iwae = tf.reduce_mean(U.logSumExp(self.logF, axis=1) -
tf.log(tf.to_float(self.n_samples)))
class SBNMuProp(SBN):
def _create_loss(self):
muprop_gradient, debug = self.get_muprop_gradient()
self.lHat = map(tf.reduce_mean, [
debug['ELBO'],
debug['muELBO'],
])
return debug['ELBO'], muprop_gradient
def _create_network(self):
logF, loss_grads = self._create_loss()
self._create_train_op(loss_grads)
# Create IWAE lower bound for evaluation
self.logF = self._reshape(logF)
self.iwae = tf.reduce_mean(U.logSumExp(self.logF, axis=1) -
tf.log(tf.to_float(self.n_samples)))
class SBNNVIL(SBN):
def _create_loss(self):
nvil_gradient, debug = self.get_nvil_gradient()
self.lHat = map(tf.reduce_mean, [
debug['ELBO'],
])
return debug['ELBO'], nvil_gradient
def _create_network(self):
logF, loss_grads = self._create_loss()
self._create_train_op(loss_grads)
# Create IWAE lower bound for evaluation
self.logF = self._reshape(logF)
self.iwae = tf.reduce_mean(U.logSumExp(self.logF, axis=1) -
tf.log(tf.to_float(self.n_samples)))
class SBNRebar(SBN):
def _create_loss(self):
rebar_gradient, debug, variance_objective = self.get_rebar_gradient()
self.lHat = map(tf.reduce_mean, [
debug['ELBO'],
])
self.lHat.extend(map(tf.reduce_mean, debug['etas']))
return debug['ELBO'], rebar_gradient, variance_objective
def _create_network(self):
logF, loss_grads, variance_objective = self._create_loss()
# Create additional updates for control variates and temperature
eta_grads = (self.optimizer_class.compute_gradients(variance_objective,
var_list=tf.get_collection('CV')))
self._create_train_op(loss_grads, eta_grads)
# Create IWAE lower bound for evaluation
self.logF = self._reshape(logF)
self.iwae = tf.reduce_mean(U.logSumExp(self.logF, axis=1) -
tf.log(tf.to_float(self.n_samples)))
class SBNDynamicRebar(SBN):
def _create_loss(self):
rebar_gradient, debug, variance_objective, variance_objective_grad = self.get_dynamic_rebar_gradient()
self.lHat = map(tf.reduce_mean, [
debug['ELBO'],
self.temperature_variable,
])
self.lHat.extend(debug['etas'])
return debug['ELBO'], rebar_gradient, variance_objective, variance_objective_grad
def _create_network(self):
logF, loss_grads, variance_objective, variance_objective_grad = self._create_loss()
# Create additional updates for control variates and temperature
eta_grads = (self.optimizer_class.compute_gradients(variance_objective,
var_list=tf.get_collection('CV'))
+ [(variance_objective_grad, self.pre_temperature_variable)])
self._create_train_op(loss_grads, eta_grads)
# Create IWAE lower bound for evaluation
self.logF = self._reshape(logF)
self.iwae = tf.reduce_mean(U.logSumExp(self.logF, axis=1) -
tf.log(tf.to_float(self.n_samples)))
class SBNTrackGradVariances(SBN):
"""Follow NVIL, compute gradient variances for NVIL, MuProp and REBAR."""
def compute_gradient_moments(self, grads_and_vars):
first_moment = U.vectorize(grads_and_vars, set_none_to_zero=True)
second_moment = tf.square(first_moment)
self.maintain_ema_ops.append(self.ema.apply([first_moment, second_moment]))
return self.ema.average(first_moment), self.ema.average(second_moment)
def _create_loss(self):
self.losses = [
('NVIL', self.get_nvil_gradient),
('SimpleMuProp', self.get_simple_muprop_gradient),
('MuProp', self.get_muprop_gradient),
]
moments = []
for k, v in self.losses:
print(k)
gradient, debug = v()
if k == 'SimpleMuProp':
ELBO = debug['ELBO']
gradient_to_follow = gradient
moments.append(self.compute_gradient_moments(
gradient))
self.losses.append(('DynamicREBAR', self.get_dynamic_rebar_gradient))
dynamic_rebar_gradient, _, variance_objective, variance_objective_grad = self.get_dynamic_rebar_gradient()
moments.append(self.compute_gradient_moments(dynamic_rebar_gradient))
self.losses.append(('REBAR', self.get_rebar_gradient))
rebar_gradient, _, variance_objective2 = self.get_rebar_gradient()
moments.append(self.compute_gradient_moments(rebar_gradient))
mu = tf.reduce_mean(tf.stack([f for f, _ in moments]), axis=0)
self.grad_variances = []
deviations = []
for f, s in moments:
self.grad_variances.append(tf.reduce_mean(s - tf.square(mu)))
deviations.append(tf.reduce_mean(tf.square(f - mu)))
self.lHat = map(tf.reduce_mean, [
ELBO,
self.temperature_variable,
variance_objective_grad,
variance_objective_grad*variance_objective_grad,
])
self.lHat.extend(deviations)
self.lHat.append(tf.log(tf.reduce_mean(mu*mu)))
# self.lHat.extend(map(tf.log, grad_variances))
return ELBO, gradient_to_follow, variance_objective + variance_objective2, variance_objective_grad
def _create_network(self):
logF, loss_grads, variance_objective, variance_objective_grad = self._create_loss()
eta_grads = (self.optimizer_class.compute_gradients(variance_objective,
var_list=tf.get_collection('CV'))
+ [(variance_objective_grad, self.pre_temperature_variable)])
self._create_train_op(loss_grads, eta_grads)
# Create IWAE lower bound for evaluation
self.logF = self._reshape(logF)
self.iwae = tf.reduce_mean(U.logSumExp(self.logF, axis=1) -
tf.log(tf.to_float(self.n_samples)))
class SBNGumbel(SBN):
def _random_sample_soft(self, log_alpha, u, layer, temperature=None):
"""Returns sampled random variables parameterized by log_alpha."""
if temperature is None:
temperature = self.hparams.temperature
# Sample random variable underlying softmax/argmax
x = log_alpha + U.safe_log_prob(u) - U.safe_log_prob(1 - u)
x /= temperature
if self.hparams.muprop_relaxation:
x += temperature/(temperature + 1)*log_alpha
y = tf.nn.sigmoid(x)
return {
'preactivation': x,
'activation': y,
'log_param': log_alpha
}
def _create_loss(self):
# Hard loss
logQHard, hardSamples = self._recognition_network()
hardELBO, _ = self._generator_network(hardSamples, logQHard)
logQ, softSamples = self._recognition_network(sampler=self._random_sample_soft)
softELBO, _ = self._generator_network(softSamples, logQ)
self.optimizerLoss = -softELBO
self.lHat = map(tf.reduce_mean, [
hardELBO,
softELBO,
])
return hardELBO
default_hparams = tf.contrib.training.HParams(model='SBNGumbel',
n_hidden=200,
n_input=784,
n_layer=1,
nonlinear=False,
learning_rate=0.001,
temperature=0.5,
n_samples=1,
batch_size=24,
trial=1,
muprop_relaxation=True,
dynamic_b=False, # dynamic binarization
quadratic=True,
beta2=0.99999,
task='sbn',
)
| 39,986 | 35.252947 | 132 | py |
models | models-master/research/rebar/rebar_train.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import random
import sys
import os
import numpy as np
import tensorflow as tf
import rebar
import datasets
import logger as L
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
gfile = tf.gfile
tf.app.flags.DEFINE_string("working_dir", "/tmp/rebar",
"""Directory where to save data, write logs, etc.""")
tf.app.flags.DEFINE_string('hparams', '',
'''Comma separated list of name=value pairs.''')
tf.app.flags.DEFINE_integer('eval_freq', 20,
'''How often to run the evaluation step.''')
FLAGS = tf.flags.FLAGS
def manual_scalar_summary(name, value):
value = tf.Summary.Value(tag=name, simple_value=value)
summary_str = tf.Summary(value=[value])
return summary_str
def eval(sbn, eval_xs, n_samples=100, batch_size=5):
n = eval_xs.shape[0]
i = 0
res = []
while i < n:
batch_xs = eval_xs[i:min(i+batch_size, n)]
res.append(sbn.partial_eval(batch_xs, n_samples))
i += batch_size
res = np.mean(res, axis=0)
return res
def train(sbn, train_xs, valid_xs, test_xs, training_steps, debug=False):
hparams = sorted(sbn.hparams.values().items())
hparams = (map(str, x) for x in hparams)
hparams = ('_'.join(x) for x in hparams)
hparams_str = '.'.join(hparams)
logger = L.Logger()
# Create the experiment name from the hparams
experiment_name = ([str(sbn.hparams.n_hidden) for i in xrange(sbn.hparams.n_layer)] +
[str(sbn.hparams.n_input)])
if sbn.hparams.nonlinear:
experiment_name = '~'.join(experiment_name)
else:
experiment_name = '-'.join(experiment_name)
experiment_name = 'SBN_%s' % experiment_name
rowkey = {'experiment': experiment_name,
'model': hparams_str}
# Create summary writer
summ_dir = os.path.join(FLAGS.working_dir, hparams_str)
summary_writer = tf.summary.FileWriter(
summ_dir, flush_secs=15, max_queue=100)
sv = tf.train.Supervisor(logdir=os.path.join(
FLAGS.working_dir, hparams_str),
save_summaries_secs=0,
save_model_secs=1200,
summary_op=None,
recovery_wait_secs=30,
global_step=sbn.global_step)
with sv.managed_session() as sess:
# Dump hparams to file
with gfile.Open(os.path.join(FLAGS.working_dir,
hparams_str,
'hparams.json'),
'w') as out:
json.dump(sbn.hparams.values(), out)
sbn.initialize(sess)
batch_size = sbn.hparams.batch_size
scores = []
n = train_xs.shape[0]
index = range(n)
while not sv.should_stop():
lHats = []
grad_variances = []
temperatures = []
random.shuffle(index)
i = 0
while i < n:
batch_index = index[i:min(i+batch_size, n)]
batch_xs = train_xs[batch_index, :]
if sbn.hparams.dynamic_b:
# Dynamically binarize the batch data
batch_xs = (np.random.rand(*batch_xs.shape) < batch_xs).astype(float)
lHat, grad_variance, step, temperature = sbn.partial_fit(batch_xs,
sbn.hparams.n_samples)
if debug:
print(i, lHat)
if i > 100:
return
lHats.append(lHat)
grad_variances.append(grad_variance)
temperatures.append(temperature)
i += batch_size
grad_variances = np.log(np.mean(grad_variances, axis=0)).tolist()
summary_strings = []
if isinstance(grad_variances, list):
grad_variances = dict(zip([k for (k, v) in sbn.losses], map(float, grad_variances)))
rowkey['step'] = step
logger.log(rowkey, {'step': step,
'train': np.mean(lHats, axis=0)[0],
'grad_variances': grad_variances,
'temperature': np.mean(temperatures), })
grad_variances = '\n'.join(map(str, sorted(grad_variances.iteritems())))
else:
rowkey['step'] = step
logger.log(rowkey, {'step': step,
'train': np.mean(lHats, axis=0)[0],
'grad_variance': grad_variances,
'temperature': np.mean(temperatures), })
summary_strings.append(manual_scalar_summary("log grad variance", grad_variances))
print('Step %d: %s\n%s' % (step, str(np.mean(lHats, axis=0)), str(grad_variances)))
# Every few epochs compute test and validation scores
epoch = int(step / (train_xs.shape[0] / sbn.hparams.batch_size))
if epoch % FLAGS.eval_freq == 0:
valid_res = eval(sbn, valid_xs)
test_res= eval(sbn, test_xs)
print('\nValid %d: %s' % (step, str(valid_res)))
print('Test %d: %s\n' % (step, str(test_res)))
logger.log(rowkey, {'step': step,
'valid': valid_res[0],
'test': test_res[0]})
logger.flush() # Flush infrequently
# Create summaries
summary_strings.extend([
manual_scalar_summary("Train ELBO", np.mean(lHats, axis=0)[0]),
manual_scalar_summary("Temperature", np.mean(temperatures)),
])
for summ_str in summary_strings:
summary_writer.add_summary(summ_str, global_step=step)
summary_writer.flush()
sys.stdout.flush()
scores.append(np.mean(lHats, axis=0))
if step > training_steps:
break
return scores
def main():
# Parse hyperparams
hparams = rebar.default_hparams
hparams.parse(FLAGS.hparams)
print(hparams.values())
train_xs, valid_xs, test_xs = datasets.load_data(hparams)
mean_xs = np.mean(train_xs, axis=0) # Compute mean centering on training
training_steps = 2000000
model = getattr(rebar, hparams.model)
sbn = model(hparams, mean_xs=mean_xs)
scores = train(sbn, train_xs, valid_xs, test_xs,
training_steps=training_steps, debug=False)
if __name__ == '__main__':
main()
| 6,852 | 32.925743 | 92 | py |
models | models-master/research/deep_speech/deep_speech_model.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Network structure for DeepSpeech2 model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Supported rnn cells.
SUPPORTED_RNNS = {
"lstm": tf.keras.layers.LSTMCell,
"rnn": tf.keras.layers.SimpleRNNCell,
"gru": tf.keras.layers.GRUCell,
}
# Parameters for batch normalization.
_BATCH_NORM_EPSILON = 1e-5
_BATCH_NORM_DECAY = 0.997
# Filters of convolution layer
_CONV_FILTERS = 32
def batch_norm(inputs, training):
"""Batch normalization layer.
Note that the momentum to use will affect validation accuracy over time.
Batch norm has different behaviors during training/evaluation. With a large
momentum, the model takes longer to get a near-accurate estimation of the
moving mean/variance over the entire training dataset, which means we need
more iterations to see good evaluation results. If the training data is evenly
distributed over the feature space, we can also try setting a smaller momentum
(such as 0.1) to get good evaluation result sooner.
Args:
inputs: input data for batch norm layer.
training: a boolean to indicate if it is in training stage.
Returns:
tensor output from batch norm layer.
"""
return tf.keras.layers.BatchNormalization(
momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON)(inputs, training=training)
def _conv_bn_layer(inputs, padding, filters, kernel_size, strides, layer_id,
training):
"""Defines 2D convolutional + batch normalization layer.
Args:
inputs: input data for convolution layer.
padding: padding to be applied before convolution layer.
filters: an integer, number of output filters in the convolution.
kernel_size: a tuple specifying the height and width of the 2D convolution
window.
strides: a tuple specifying the stride length of the convolution.
layer_id: an integer specifying the layer index.
training: a boolean to indicate which stage we are in (training/eval).
Returns:
tensor output from the current layer.
"""
# Perform symmetric padding on the feature dimension of time_step
# This step is required to avoid issues when RNN output sequence is shorter
# than the label length.
inputs = tf.pad(
inputs,
[[0, 0], [padding[0], padding[0]], [padding[1], padding[1]], [0, 0]])
inputs = tf.keras.layers.Conv2D(
filters=filters, kernel_size=kernel_size, strides=strides,
padding="valid", use_bias=False, activation=tf.nn.relu6,
name="cnn_{}".format(layer_id))(inputs)
return batch_norm(inputs, training)
def _rnn_layer(inputs, rnn_cell, rnn_hidden_size, layer_id, is_batch_norm,
is_bidirectional, training):
"""Defines a batch normalization + rnn layer.
Args:
inputs: input tensors for the current layer.
rnn_cell: RNN cell instance to use.
rnn_hidden_size: an integer for the dimensionality of the rnn output space.
layer_id: an integer for the index of current layer.
is_batch_norm: a boolean specifying whether to perform batch normalization
on input states.
is_bidirectional: a boolean specifying whether the rnn layer is
bi-directional.
training: a boolean to indicate which stage we are in (training/eval).
Returns:
tensor output for the current layer.
"""
if is_batch_norm:
inputs = batch_norm(inputs, training)
if is_bidirectional:
rnn_outputs = tf.keras.layers.Bidirectional(
tf.keras.layers.RNN(rnn_cell(rnn_hidden_size),
return_sequences=True))(inputs)
else:
rnn_outputs = tf.keras.layers.RNN(
rnn_cell(rnn_hidden_size), return_sequences=True)(inputs)
return rnn_outputs
class DeepSpeech2(object):
"""Define DeepSpeech2 model."""
def __init__(self, num_rnn_layers, rnn_type, is_bidirectional,
rnn_hidden_size, num_classes, use_bias):
"""Initialize DeepSpeech2 model.
Args:
num_rnn_layers: an integer, the number of rnn layers. By default, it's 5.
rnn_type: a string, one of the supported rnn cells: gru, rnn and lstm.
is_bidirectional: a boolean to indicate if the rnn layer is bidirectional.
rnn_hidden_size: an integer for the number of hidden states in each unit.
num_classes: an integer, the number of output classes/labels.
use_bias: a boolean specifying whether to use bias in the last fc layer.
"""
self.num_rnn_layers = num_rnn_layers
self.rnn_type = rnn_type
self.is_bidirectional = is_bidirectional
self.rnn_hidden_size = rnn_hidden_size
self.num_classes = num_classes
self.use_bias = use_bias
def __call__(self, inputs, training):
# Two cnn layers.
inputs = _conv_bn_layer(
inputs, padding=(20, 5), filters=_CONV_FILTERS, kernel_size=(41, 11),
strides=(2, 2), layer_id=1, training=training)
inputs = _conv_bn_layer(
inputs, padding=(10, 5), filters=_CONV_FILTERS, kernel_size=(21, 11),
strides=(2, 1), layer_id=2, training=training)
# output of conv_layer2 with the shape of
# [batch_size (N), times (T), features (F), channels (C)].
# Convert the conv output to rnn input.
batch_size = tf.shape(inputs)[0]
feat_size = inputs.get_shape().as_list()[2]
inputs = tf.reshape(
inputs,
[batch_size, -1, feat_size * _CONV_FILTERS])
# RNN layers.
rnn_cell = SUPPORTED_RNNS[self.rnn_type]
for layer_counter in xrange(self.num_rnn_layers):
# No batch normalization on the first layer.
is_batch_norm = (layer_counter != 0)
inputs = _rnn_layer(
inputs, rnn_cell, self.rnn_hidden_size, layer_counter + 1,
is_batch_norm, self.is_bidirectional, training)
# FC layer with batch norm.
inputs = batch_norm(inputs, training)
logits = tf.keras.layers.Dense(
self.num_classes, use_bias=self.use_bias, activation="softmax")(inputs)
return logits
| 6,717 | 36.741573 | 89 | py |
models | models-master/research/deep_speech/decoder.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Deep speech decoder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from nltk.metrics import distance
import numpy as np
class DeepSpeechDecoder(object):
"""Greedy decoder implementation for Deep Speech model."""
def __init__(self, labels, blank_index=28):
"""Decoder initialization.
Args:
labels: a string specifying the speech labels for the decoder to use.
blank_index: an integer specifying index for the blank character.
Defaults to 28.
"""
# e.g. labels = "[a-z]' _"
self.labels = labels
self.blank_index = blank_index
self.int_to_char = dict([(i, c) for (i, c) in enumerate(labels)])
def convert_to_string(self, sequence):
"""Convert a sequence of indexes into corresponding string."""
return ''.join([self.int_to_char[i] for i in sequence])
def wer(self, decode, target):
"""Computes the Word Error Rate (WER).
WER is defined as the edit distance between the two provided sentences after
tokenizing to words.
Args:
decode: string of the decoded output.
target: a string for the ground truth label.
Returns:
A float number for the WER of the current decode-target pair.
"""
# Map each word to a new char.
words = set(decode.split() + target.split())
word2char = dict(zip(words, range(len(words))))
new_decode = [chr(word2char[w]) for w in decode.split()]
new_target = [chr(word2char[w]) for w in target.split()]
return distance.edit_distance(''.join(new_decode), ''.join(new_target))
def cer(self, decode, target):
"""Computes the Character Error Rate (CER).
CER is defined as the edit distance between the two given strings.
Args:
decode: a string of the decoded output.
target: a string for the ground truth label.
Returns:
A float number denoting the CER for the current sentence pair.
"""
return distance.edit_distance(decode, target)
def decode(self, logits):
"""Decode the best guess from logits using greedy algorithm."""
# Choose the class with maximimum probability.
best = list(np.argmax(logits, axis=1))
# Merge repeated chars.
merge = [k for k, _ in itertools.groupby(best)]
# Remove the blank index in the decoded sequence.
merge_remove_blank = []
for k in merge:
if k != self.blank_index:
merge_remove_blank.append(k)
return self.convert_to_string(merge_remove_blank)
| 3,209 | 32.4375 | 80 | py |
models | models-master/research/deep_speech/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/deep_speech/deep_speech.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Main entry to train and evaluate DeepSpeech model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
# pylint: disable=g-bad-import-order
from absl import app as absl_app
from absl import flags
from absl import logging
import tensorflow as tf
# pylint: enable=g-bad-import-order
import data.dataset as dataset
import decoder
import deep_speech_model
from official.utils.flags import core as flags_core
from official.utils.misc import distribution_utils
from official.utils.misc import model_helpers
# Default vocabulary file
_VOCABULARY_FILE = os.path.join(
os.path.dirname(__file__), "data/vocabulary.txt")
# Evaluation metrics
_WER_KEY = "WER"
_CER_KEY = "CER"
def compute_length_after_conv(max_time_steps, ctc_time_steps, input_length):
"""Computes the time_steps/ctc_input_length after convolution.
Suppose that the original feature contains two parts:
1) Real spectrogram signals, spanning input_length steps.
2) Padded part with all 0s.
The total length of those two parts is denoted as max_time_steps, which is
the padded length of the current batch. After convolution layers, the time
steps of a spectrogram feature will be decreased. As we know the percentage
of its original length within the entire length, we can compute the time steps
for the signal after conv as follows (using ctc_input_length to denote):
ctc_input_length = (input_length / max_time_steps) * output_length_of_conv.
This length is then fed into ctc loss function to compute loss.
Args:
max_time_steps: max_time_steps for the batch, after padding.
ctc_time_steps: number of timesteps after convolution.
input_length: actual length of the original spectrogram, without padding.
Returns:
the ctc_input_length after convolution layer.
"""
ctc_input_length = tf.cast(tf.multiply(
input_length, ctc_time_steps), dtype=tf.float32)
return tf.cast(tf.math.floordiv(
ctc_input_length, tf.cast(max_time_steps, dtype=tf.float32)), dtype=tf.int32)
def evaluate_model(estimator, speech_labels, entries, input_fn_eval):
"""Evaluate the model performance using WER anc CER as metrics.
WER: Word Error Rate
CER: Character Error Rate
Args:
estimator: estimator to evaluate.
speech_labels: a string specifying all the character in the vocabulary.
entries: a list of data entries (audio_file, file_size, transcript) for the
given dataset.
input_fn_eval: data input function for evaluation.
Returns:
Evaluation result containing 'wer' and 'cer' as two metrics.
"""
# Get predictions
predictions = estimator.predict(input_fn=input_fn_eval)
# Get probabilities of each predicted class
probs = [pred["probabilities"] for pred in predictions]
num_of_examples = len(probs)
targets = [entry[2] for entry in entries] # The ground truth transcript
total_wer, total_cer = 0, 0
greedy_decoder = decoder.DeepSpeechDecoder(speech_labels)
for i in range(num_of_examples):
# Decode string.
decoded_str = greedy_decoder.decode(probs[i])
# Compute CER.
total_cer += greedy_decoder.cer(decoded_str, targets[i]) / float(
len(targets[i]))
# Compute WER.
total_wer += greedy_decoder.wer(decoded_str, targets[i]) / float(
len(targets[i].split()))
# Get mean value
total_cer /= num_of_examples
total_wer /= num_of_examples
global_step = estimator.get_variable_value(tf.compat.v1.GraphKeys.GLOBAL_STEP)
eval_results = {
_WER_KEY: total_wer,
_CER_KEY: total_cer,
tf.compat.v1.GraphKeys.GLOBAL_STEP: global_step,
}
return eval_results
def model_fn(features, labels, mode, params):
"""Define model function for deep speech model.
Args:
features: a dictionary of input_data features. It includes the data
input_length, label_length and the spectrogram features.
labels: a list of labels for the input data.
mode: current estimator mode; should be one of
`tf.estimator.ModeKeys.TRAIN`, `EVALUATE`, `PREDICT`.
params: a dict of hyper parameters to be passed to model_fn.
Returns:
EstimatorSpec parameterized according to the input params and the
current mode.
"""
num_classes = params["num_classes"]
input_length = features["input_length"]
label_length = features["label_length"]
features = features["features"]
# Create DeepSpeech2 model.
model = deep_speech_model.DeepSpeech2(
flags_obj.rnn_hidden_layers, flags_obj.rnn_type,
flags_obj.is_bidirectional, flags_obj.rnn_hidden_size,
num_classes, flags_obj.use_bias)
if mode == tf.estimator.ModeKeys.PREDICT:
logits = model(features, training=False)
predictions = {
"classes": tf.argmax(logits, axis=2),
"probabilities": logits,
"logits": logits
}
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions)
# In training mode.
logits = model(features, training=True)
ctc_input_length = compute_length_after_conv(
tf.shape(features)[1], tf.shape(logits)[1], input_length)
# Compute CTC loss
loss = tf.reduce_mean(tf.keras.backend.ctc_batch_cost(
labels, logits, ctc_input_length, label_length))
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=flags_obj.learning_rate)
global_step = tf.compat.v1.train.get_or_create_global_step()
minimize_op = optimizer.minimize(loss, global_step=global_step)
update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)
# Create the train_op that groups both minimize_ops and update_ops
train_op = tf.group(minimize_op, update_ops)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op)
def generate_dataset(data_dir):
"""Generate a speech dataset."""
audio_conf = dataset.AudioConfig(sample_rate=flags_obj.sample_rate,
window_ms=flags_obj.window_ms,
stride_ms=flags_obj.stride_ms,
normalize=True)
train_data_conf = dataset.DatasetConfig(
audio_conf,
data_dir,
flags_obj.vocabulary_file,
flags_obj.sortagrad
)
speech_dataset = dataset.DeepSpeechDataset(train_data_conf)
return speech_dataset
def per_device_batch_size(batch_size, num_gpus):
"""For multi-gpu, batch-size must be a multiple of the number of GPUs.
Note that distribution strategy handles this automatically when used with
Keras. For using with Estimator, we need to get per GPU batch.
Args:
batch_size: Global batch size to be divided among devices. This should be
equal to num_gpus times the single-GPU batch_size for multi-gpu training.
num_gpus: How many GPUs are used with DistributionStrategies.
Returns:
Batch size per device.
Raises:
ValueError: if batch_size is not divisible by number of devices
"""
if num_gpus <= 1:
return batch_size
remainder = batch_size % num_gpus
if remainder:
err = ('When running with multiple GPUs, batch size '
'must be a multiple of the number of available GPUs. Found {} '
'GPUs with a batch size of {}; try --batch_size={} instead.'
).format(num_gpus, batch_size, batch_size - remainder)
raise ValueError(err)
return int(batch_size / num_gpus)
def run_deep_speech(_):
"""Run deep speech training and eval loop."""
tf.compat.v1.set_random_seed(flags_obj.seed)
# Data preprocessing
logging.info("Data preprocessing...")
train_speech_dataset = generate_dataset(flags_obj.train_data_dir)
eval_speech_dataset = generate_dataset(flags_obj.eval_data_dir)
# Number of label classes. Label string is "[a-z]' -"
num_classes = len(train_speech_dataset.speech_labels)
# Use distribution strategy for multi-gpu training
num_gpus = flags_core.get_num_gpus(flags_obj)
distribution_strategy = distribution_utils.get_distribution_strategy(num_gpus=num_gpus)
run_config = tf.estimator.RunConfig(
train_distribute=distribution_strategy)
estimator = tf.estimator.Estimator(
model_fn=model_fn,
model_dir=flags_obj.model_dir,
config=run_config,
params={
"num_classes": num_classes,
}
)
# Benchmark logging
run_params = {
"batch_size": flags_obj.batch_size,
"train_epochs": flags_obj.train_epochs,
"rnn_hidden_size": flags_obj.rnn_hidden_size,
"rnn_hidden_layers": flags_obj.rnn_hidden_layers,
"rnn_type": flags_obj.rnn_type,
"is_bidirectional": flags_obj.is_bidirectional,
"use_bias": flags_obj.use_bias
}
per_replica_batch_size = per_device_batch_size(flags_obj.batch_size, num_gpus)
def input_fn_train():
return dataset.input_fn(
per_replica_batch_size, train_speech_dataset)
def input_fn_eval():
return dataset.input_fn(
per_replica_batch_size, eval_speech_dataset)
total_training_cycle = (flags_obj.train_epochs //
flags_obj.epochs_between_evals)
for cycle_index in range(total_training_cycle):
logging.info("Starting a training cycle: %d/%d",
cycle_index + 1, total_training_cycle)
# Perform batch_wise dataset shuffling
train_speech_dataset.entries = dataset.batch_wise_dataset_shuffle(
train_speech_dataset.entries, cycle_index, flags_obj.sortagrad,
flags_obj.batch_size)
estimator.train(input_fn=input_fn_train)
# Evaluation
logging.info("Starting to evaluate...")
eval_results = evaluate_model(
estimator, eval_speech_dataset.speech_labels,
eval_speech_dataset.entries, input_fn_eval)
# Log the WER and CER results.
benchmark_logger.log_evaluation_result(eval_results)
logging.info(
"Iteration {}: WER = {:.2f}, CER = {:.2f}".format(
cycle_index + 1, eval_results[_WER_KEY], eval_results[_CER_KEY]))
# If some evaluation threshold is met
if model_helpers.past_stop_threshold(
flags_obj.wer_threshold, eval_results[_WER_KEY]):
break
def define_deep_speech_flags():
"""Add flags for run_deep_speech."""
# Add common flags
flags_core.define_base(
data_dir=False, # we use train_data_dir and eval_data_dir instead
export_dir=True,
train_epochs=True,
hooks=True,
num_gpu=True,
epochs_between_evals=True
)
flags_core.define_performance(
num_parallel_calls=False,
inter_op=False,
intra_op=False,
synthetic_data=False,
max_train_steps=False,
dtype=False
)
flags_core.define_benchmark()
flags.adopt_module_key_flags(flags_core)
flags_core.set_defaults(
model_dir="/tmp/deep_speech_model/",
export_dir="/tmp/deep_speech_saved_model/",
train_epochs=10,
batch_size=128,
hooks="")
# Deep speech flags
flags.DEFINE_integer(
name="seed", default=1,
help=flags_core.help_wrap("The random seed."))
flags.DEFINE_string(
name="train_data_dir",
default="/tmp/librispeech_data/test-clean/LibriSpeech/test-clean.csv",
help=flags_core.help_wrap("The csv file path of train dataset."))
flags.DEFINE_string(
name="eval_data_dir",
default="/tmp/librispeech_data/test-clean/LibriSpeech/test-clean.csv",
help=flags_core.help_wrap("The csv file path of evaluation dataset."))
flags.DEFINE_bool(
name="sortagrad", default=True,
help=flags_core.help_wrap(
"If true, sort examples by audio length and perform no "
"batch_wise shuffling for the first epoch."))
flags.DEFINE_integer(
name="sample_rate", default=16000,
help=flags_core.help_wrap("The sample rate for audio."))
flags.DEFINE_integer(
name="window_ms", default=20,
help=flags_core.help_wrap("The frame length for spectrogram."))
flags.DEFINE_integer(
name="stride_ms", default=10,
help=flags_core.help_wrap("The frame step."))
flags.DEFINE_string(
name="vocabulary_file", default=_VOCABULARY_FILE,
help=flags_core.help_wrap("The file path of vocabulary file."))
# RNN related flags
flags.DEFINE_integer(
name="rnn_hidden_size", default=800,
help=flags_core.help_wrap("The hidden size of RNNs."))
flags.DEFINE_integer(
name="rnn_hidden_layers", default=5,
help=flags_core.help_wrap("The number of RNN layers."))
flags.DEFINE_bool(
name="use_bias", default=True,
help=flags_core.help_wrap("Use bias in the last fully-connected layer"))
flags.DEFINE_bool(
name="is_bidirectional", default=True,
help=flags_core.help_wrap("If rnn unit is bidirectional"))
flags.DEFINE_enum(
name="rnn_type", default="gru",
enum_values=deep_speech_model.SUPPORTED_RNNS.keys(),
case_sensitive=False,
help=flags_core.help_wrap("Type of RNN cell."))
# Training related flags
flags.DEFINE_float(
name="learning_rate", default=5e-4,
help=flags_core.help_wrap("The initial learning rate."))
# Evaluation metrics threshold
flags.DEFINE_float(
name="wer_threshold", default=None,
help=flags_core.help_wrap(
"If passed, training will stop when the evaluation metric WER is "
"greater than or equal to wer_threshold. For libri speech dataset "
"the desired wer_threshold is 0.23 which is the result achieved by "
"MLPerf implementation."))
def main(_):
run_deep_speech(flags_obj)
if __name__ == "__main__":
logging.set_verbosity(logging.INFO)
define_deep_speech_flags()
flags_obj = flags.FLAGS
absl_app.run(main)
| 14,268 | 33.136364 | 89 | py |
models | models-master/research/deep_speech/data/download.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Download and preprocess LibriSpeech dataset for DeepSpeech model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import codecs
import fnmatch
import os
import sys
import tarfile
import tempfile
import unicodedata
from absl import app as absl_app
from absl import flags as absl_flags
import pandas
from six.moves import urllib
from sox import Transformer
import tensorflow as tf
from absl import logging
LIBRI_SPEECH_URLS = {
"train-clean-100":
"http://www.openslr.org/resources/12/train-clean-100.tar.gz",
"train-clean-360":
"http://www.openslr.org/resources/12/train-clean-360.tar.gz",
"train-other-500":
"http://www.openslr.org/resources/12/train-other-500.tar.gz",
"dev-clean":
"http://www.openslr.org/resources/12/dev-clean.tar.gz",
"dev-other":
"http://www.openslr.org/resources/12/dev-other.tar.gz",
"test-clean":
"http://www.openslr.org/resources/12/test-clean.tar.gz",
"test-other":
"http://www.openslr.org/resources/12/test-other.tar.gz"
}
def download_and_extract(directory, url):
"""Download and extract the given split of dataset.
Args:
directory: the directory where to extract the tarball.
url: the url to download the data file.
"""
if not tf.io.gfile.exists(directory):
tf.io.gfile.makedirs(directory)
_, tar_filepath = tempfile.mkstemp(suffix=".tar.gz")
try:
logging.info("Downloading %s to %s" % (url, tar_filepath))
def _progress(count, block_size, total_size):
sys.stdout.write("\r>> Downloading {} {:.1f}%".format(
tar_filepath, 100.0 * count * block_size / total_size))
sys.stdout.flush()
urllib.request.urlretrieve(url, tar_filepath, _progress)
print()
statinfo = os.stat(tar_filepath)
logging.info(
"Successfully downloaded %s, size(bytes): %d" % (url, statinfo.st_size))
with tarfile.open(tar_filepath, "r") as tar:
tar.extractall(directory)
finally:
tf.io.gfile.remove(tar_filepath)
def convert_audio_and_split_transcript(input_dir, source_name, target_name,
output_dir, output_file):
"""Convert FLAC to WAV and split the transcript.
For audio file, convert the format from FLAC to WAV using the sox.Transformer
library.
For transcripts, each line contains the sequence id and the corresponding
transcript (separated by space):
Input data format: seq-id transcript_of_seq-id
For example:
1-2-0 transcript_of_1-2-0.flac
1-2-1 transcript_of_1-2-1.flac
...
Each sequence id has a corresponding .flac file.
Parse the transcript file and generate a new csv file which has three columns:
"wav_filename": the absolute path to a wav file.
"wav_filesize": the size of the corresponding wav file.
"transcript": the transcript for this audio segement.
Args:
input_dir: the directory which holds the input dataset.
source_name: the name of the specified dataset. e.g. test-clean
target_name: the directory name for the newly generated audio files.
e.g. test-clean-wav
output_dir: the directory to place the newly generated csv files.
output_file: the name of the newly generated csv file. e.g. test-clean.csv
"""
logging.info("Preprocessing audio and transcript for %s" % source_name)
source_dir = os.path.join(input_dir, source_name)
target_dir = os.path.join(input_dir, target_name)
if not tf.io.gfile.exists(target_dir):
tf.io.gfile.makedirs(target_dir)
files = []
tfm = Transformer()
# Convert all FLAC file into WAV format. At the same time, generate the csv
# file.
for root, _, filenames in tf.io.gfile.walk(source_dir):
for filename in fnmatch.filter(filenames, "*.trans.txt"):
trans_file = os.path.join(root, filename)
with codecs.open(trans_file, "r", "utf-8") as fin:
for line in fin:
seqid, transcript = line.split(" ", 1)
# We do a encode-decode transformation here because the output type
# of encode is a bytes object, we need convert it to string.
transcript = unicodedata.normalize("NFKD", transcript).encode(
"ascii", "ignore").decode("ascii", "ignore").strip().lower()
# Convert FLAC to WAV.
flac_file = os.path.join(root, seqid + ".flac")
wav_file = os.path.join(target_dir, seqid + ".wav")
if not tf.io.gfile.exists(wav_file):
tfm.build(flac_file, wav_file)
wav_filesize = os.path.getsize(wav_file)
files.append((os.path.abspath(wav_file), wav_filesize, transcript))
# Write to CSV file which contains three columns:
# "wav_filename", "wav_filesize", "transcript".
csv_file_path = os.path.join(output_dir, output_file)
df = pandas.DataFrame(
data=files, columns=["wav_filename", "wav_filesize", "transcript"])
df.to_csv(csv_file_path, index=False, sep="\t")
logging.info("Successfully generated csv file {}".format(csv_file_path))
def download_and_process_datasets(directory, datasets):
"""Download and pre-process the specified list of LibriSpeech dataset.
Args:
directory: the directory to put all the downloaded and preprocessed data.
datasets: list of dataset names that will be downloaded and processed.
"""
logging.info("Preparing LibriSpeech dataset: {}".format(
",".join(datasets)))
for dataset in datasets:
logging.info("Preparing dataset %s", dataset)
dataset_dir = os.path.join(directory, dataset)
download_and_extract(dataset_dir, LIBRI_SPEECH_URLS[dataset])
convert_audio_and_split_transcript(
dataset_dir + "/LibriSpeech", dataset, dataset + "-wav",
dataset_dir + "/LibriSpeech", dataset + ".csv")
def define_data_download_flags():
"""Define flags for data downloading."""
absl_flags.DEFINE_string(
"data_dir", "/tmp/librispeech_data",
"Directory to download data and extract the tarball")
absl_flags.DEFINE_bool("train_only", False,
"If true, only download the training set")
absl_flags.DEFINE_bool("dev_only", False,
"If true, only download the dev set")
absl_flags.DEFINE_bool("test_only", False,
"If true, only download the test set")
def main(_):
if not tf.io.gfile.exists(FLAGS.data_dir):
tf.io.gfile.makedirs(FLAGS.data_dir)
if FLAGS.train_only:
download_and_process_datasets(
FLAGS.data_dir,
["train-clean-100", "train-clean-360", "train-other-500"])
elif FLAGS.dev_only:
download_and_process_datasets(FLAGS.data_dir, ["dev-clean", "dev-other"])
elif FLAGS.test_only:
download_and_process_datasets(FLAGS.data_dir, ["test-clean", "test-other"])
else:
# By default we download the entire dataset.
download_and_process_datasets(FLAGS.data_dir, LIBRI_SPEECH_URLS.keys())
if __name__ == "__main__":
logging.set_verbosity(logging.INFO)
define_data_download_flags()
FLAGS = absl_flags.FLAGS
absl_app.run(main)
| 7,750 | 35.909524 | 80 | py |
models | models-master/research/deep_speech/data/dataset.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate tf.data.Dataset object for deep speech training/evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import random
# pylint: disable=g-bad-import-order
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import soundfile
import tensorflow as tf
from absl import logging
# pylint: enable=g-bad-import-order
import data.featurizer as featurizer # pylint: disable=g-bad-import-order
class AudioConfig(object):
"""Configs for spectrogram extraction from audio."""
def __init__(self,
sample_rate,
window_ms,
stride_ms,
normalize=False):
"""Initialize the AudioConfig class.
Args:
sample_rate: an integer denoting the sample rate of the input waveform.
window_ms: an integer for the length of a spectrogram frame, in ms.
stride_ms: an integer for the frame stride, in ms.
normalize: a boolean for whether apply normalization on the audio feature.
"""
self.sample_rate = sample_rate
self.window_ms = window_ms
self.stride_ms = stride_ms
self.normalize = normalize
class DatasetConfig(object):
"""Config class for generating the DeepSpeechDataset."""
def __init__(self, audio_config, data_path, vocab_file_path, sortagrad):
"""Initialize the configs for deep speech dataset.
Args:
audio_config: AudioConfig object specifying the audio-related configs.
data_path: a string denoting the full path of a manifest file.
vocab_file_path: a string specifying the vocabulary file path.
sortagrad: a boolean, if set to true, audio sequences will be fed by
increasing length in the first training epoch, which will
expedite network convergence.
Raises:
RuntimeError: file path not exist.
"""
self.audio_config = audio_config
assert tf.io.gfile.exists(data_path)
assert tf.io.gfile.exists(vocab_file_path)
self.data_path = data_path
self.vocab_file_path = vocab_file_path
self.sortagrad = sortagrad
def _normalize_audio_feature(audio_feature):
"""Perform mean and variance normalization on the spectrogram feature.
Args:
audio_feature: a numpy array for the spectrogram feature.
Returns:
a numpy array of the normalized spectrogram.
"""
mean = np.mean(audio_feature, axis=0)
var = np.var(audio_feature, axis=0)
normalized = (audio_feature - mean) / (np.sqrt(var) + 1e-6)
return normalized
def _preprocess_audio(audio_file_path, audio_featurizer, normalize):
"""Load the audio file and compute spectrogram feature."""
data, _ = soundfile.read(audio_file_path)
feature = featurizer.compute_spectrogram_feature(
data, audio_featurizer.sample_rate, audio_featurizer.stride_ms,
audio_featurizer.window_ms)
# Feature normalization
if normalize:
feature = _normalize_audio_feature(feature)
# Adding Channel dimension for conv2D input.
feature = np.expand_dims(feature, axis=2)
return feature
def _preprocess_data(file_path):
"""Generate a list of tuples (wav_filename, wav_filesize, transcript).
Each dataset file contains three columns: "wav_filename", "wav_filesize",
and "transcript". This function parses the csv file and stores each example
by the increasing order of audio length (indicated by wav_filesize).
AS the waveforms are ordered in increasing length, audio samples in a
mini-batch have similar length.
Args:
file_path: a string specifying the csv file path for a dataset.
Returns:
A list of tuples (wav_filename, wav_filesize, transcript) sorted by
file_size.
"""
logging.info("Loading data set {}".format(file_path))
with tf.io.gfile.GFile(file_path, "r") as f:
lines = f.read().splitlines()
# Skip the csv header in lines[0].
lines = lines[1:]
# The metadata file is tab separated.
lines = [line.split("\t", 2) for line in lines]
# Sort input data by the length of audio sequence.
lines.sort(key=lambda item: int(item[1]))
return [tuple(line) for line in lines]
class DeepSpeechDataset(object):
"""Dataset class for training/evaluation of DeepSpeech model."""
def __init__(self, dataset_config):
"""Initialize the DeepSpeechDataset class.
Args:
dataset_config: DatasetConfig object.
"""
self.config = dataset_config
# Instantiate audio feature extractor.
self.audio_featurizer = featurizer.AudioFeaturizer(
sample_rate=self.config.audio_config.sample_rate,
window_ms=self.config.audio_config.window_ms,
stride_ms=self.config.audio_config.stride_ms)
# Instantiate text feature extractor.
self.text_featurizer = featurizer.TextFeaturizer(
vocab_file=self.config.vocab_file_path)
self.speech_labels = self.text_featurizer.speech_labels
self.entries = _preprocess_data(self.config.data_path)
# The generated spectrogram will have 161 feature bins.
self.num_feature_bins = 161
def batch_wise_dataset_shuffle(entries, epoch_index, sortagrad, batch_size):
"""Batch-wise shuffling of the data entries.
Each data entry is in the format of (audio_file, file_size, transcript).
If epoch_index is 0 and sortagrad is true, we don't perform shuffling and
return entries in sorted file_size order. Otherwise, do batch_wise shuffling.
Args:
entries: a list of data entries.
epoch_index: an integer of epoch index
sortagrad: a boolean to control whether sorting the audio in the first
training epoch.
batch_size: an integer for the batch size.
Returns:
The shuffled data entries.
"""
shuffled_entries = []
if epoch_index == 0 and sortagrad:
# No need to shuffle.
shuffled_entries = entries
else:
# Shuffle entries batch-wise.
max_buckets = int(math.floor(len(entries) / batch_size))
total_buckets = [i for i in xrange(max_buckets)]
random.shuffle(total_buckets)
shuffled_entries = []
for i in total_buckets:
shuffled_entries.extend(entries[i * batch_size : (i + 1) * batch_size])
# If the last batch doesn't contain enough batch_size examples,
# just append it to the shuffled_entries.
shuffled_entries.extend(entries[max_buckets * batch_size:])
return shuffled_entries
def input_fn(batch_size, deep_speech_dataset, repeat=1):
"""Input function for model training and evaluation.
Args:
batch_size: an integer denoting the size of a batch.
deep_speech_dataset: DeepSpeechDataset object.
repeat: an integer for how many times to repeat the dataset.
Returns:
a tf.data.Dataset object for model to consume.
"""
# Dataset properties
data_entries = deep_speech_dataset.entries
num_feature_bins = deep_speech_dataset.num_feature_bins
audio_featurizer = deep_speech_dataset.audio_featurizer
feature_normalize = deep_speech_dataset.config.audio_config.normalize
text_featurizer = deep_speech_dataset.text_featurizer
def _gen_data():
"""Dataset generator function."""
for audio_file, _, transcript in data_entries:
features = _preprocess_audio(
audio_file, audio_featurizer, feature_normalize)
labels = featurizer.compute_label_feature(
transcript, text_featurizer.token_to_index)
input_length = [features.shape[0]]
label_length = [len(labels)]
# Yield a tuple of (features, labels) where features is a dict containing
# all info about the actual data features.
yield (
{
"features": features,
"input_length": input_length,
"label_length": label_length
},
labels)
dataset = tf.data.Dataset.from_generator(
_gen_data,
output_types=(
{
"features": tf.float32,
"input_length": tf.int32,
"label_length": tf.int32
},
tf.int32),
output_shapes=(
{
"features": tf.TensorShape([None, num_feature_bins, 1]),
"input_length": tf.TensorShape([1]),
"label_length": tf.TensorShape([1])
},
tf.TensorShape([None]))
)
# Repeat and batch the dataset
dataset = dataset.repeat(repeat)
# Padding the features to its max length dimensions.
dataset = dataset.padded_batch(
batch_size=batch_size,
padded_shapes=(
{
"features": tf.TensorShape([None, num_feature_bins, 1]),
"input_length": tf.TensorShape([1]),
"label_length": tf.TensorShape([1])
},
tf.TensorShape([None]))
)
# Prefetch to improve speed of input pipeline.
dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return dataset
| 9,481 | 33.355072 | 80 | py |
models | models-master/research/deep_speech/data/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/deep_speech/data/featurizer.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility class for extracting features from the text and audio input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import codecs
import numpy as np
def compute_spectrogram_feature(samples, sample_rate, stride_ms=10.0,
window_ms=20.0, max_freq=None, eps=1e-14):
"""Compute the spectrograms for the input samples(waveforms).
More about spectrogram computation, please refer to:
https://en.wikipedia.org/wiki/Short-time_Fourier_transform.
"""
if max_freq is None:
max_freq = sample_rate / 2
if max_freq > sample_rate / 2:
raise ValueError("max_freq must not be greater than half of sample rate.")
if stride_ms > window_ms:
raise ValueError("Stride size must not be greater than window size.")
stride_size = int(0.001 * sample_rate * stride_ms)
window_size = int(0.001 * sample_rate * window_ms)
# Extract strided windows
truncate_size = (len(samples) - window_size) % stride_size
samples = samples[:len(samples) - truncate_size]
nshape = (window_size, (len(samples) - window_size) // stride_size + 1)
nstrides = (samples.strides[0], samples.strides[0] * stride_size)
windows = np.lib.stride_tricks.as_strided(
samples, shape=nshape, strides=nstrides)
assert np.all(
windows[:, 1] == samples[stride_size:(stride_size + window_size)])
# Window weighting, squared Fast Fourier Transform (fft), scaling
weighting = np.hanning(window_size)[:, None]
fft = np.fft.rfft(windows * weighting, axis=0)
fft = np.absolute(fft)
fft = fft**2
scale = np.sum(weighting**2) * sample_rate
fft[1:-1, :] *= (2.0 / scale)
fft[(0, -1), :] /= scale
# Prepare fft frequency list
freqs = float(sample_rate) / window_size * np.arange(fft.shape[0])
# Compute spectrogram feature
ind = np.where(freqs <= max_freq)[0][-1] + 1
specgram = np.log(fft[:ind, :] + eps)
return np.transpose(specgram, (1, 0))
class AudioFeaturizer(object):
"""Class to extract spectrogram features from the audio input."""
def __init__(self,
sample_rate=16000,
window_ms=20.0,
stride_ms=10.0):
"""Initialize the audio featurizer class according to the configs.
Args:
sample_rate: an integer specifying the sample rate of the input waveform.
window_ms: an integer for the length of a spectrogram frame, in ms.
stride_ms: an integer for the frame stride, in ms.
"""
self.sample_rate = sample_rate
self.window_ms = window_ms
self.stride_ms = stride_ms
def compute_label_feature(text, token_to_idx):
"""Convert string to a list of integers."""
tokens = list(text.strip().lower())
feats = [token_to_idx[token] for token in tokens]
return feats
class TextFeaturizer(object):
"""Extract text feature based on char-level granularity.
By looking up the vocabulary table, each input string (one line of transcript)
will be converted to a sequence of integer indexes.
"""
def __init__(self, vocab_file):
lines = []
with codecs.open(vocab_file, "r", "utf-8") as fin:
lines.extend(fin.readlines())
self.token_to_index = {}
self.index_to_token = {}
self.speech_labels = ""
index = 0
for line in lines:
line = line[:-1] # Strip the '\n' char.
if line.startswith("#"):
# Skip from reading comment line.
continue
self.token_to_index[line] = index
self.index_to_token[index] = line
self.speech_labels += line
index += 1
| 4,241 | 34.647059 | 80 | py |
models | models-master/research/slim/export_inference_graph.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Saves out a GraphDef containing the architecture of the model.
To use it, run something like this, with a model name defined by slim:
bazel build tensorflow_models/research/slim:export_inference_graph
bazel-bin/tensorflow_models/research/slim/export_inference_graph \
--model_name=inception_v3 --output_file=/tmp/inception_v3_inf_graph.pb
If you then want to use the resulting model with your own or pretrained
checkpoints as part of a mobile model, you can run freeze_graph to get a graph
def with the variables inlined as constants using:
bazel build tensorflow/python/tools:freeze_graph
bazel-bin/tensorflow/python/tools/freeze_graph \
--input_graph=/tmp/inception_v3_inf_graph.pb \
--input_checkpoint=/tmp/checkpoints/inception_v3.ckpt \
--input_binary=true --output_graph=/tmp/frozen_inception_v3.pb \
--output_node_names=InceptionV3/Predictions/Reshape_1
The output node names will vary depending on the model, but you can inspect and
estimate them using the summarize_graph tool:
bazel build tensorflow/tools/graph_transforms:summarize_graph
bazel-bin/tensorflow/tools/graph_transforms/summarize_graph \
--in_graph=/tmp/inception_v3_inf_graph.pb
To run the resulting graph in C++, you can look at the label_image sample code:
bazel build tensorflow/examples/label_image:label_image
bazel-bin/tensorflow/examples/label_image/label_image \
--image=${HOME}/Pictures/flowers.jpg \
--input_layer=input \
--output_layer=InceptionV3/Predictions/Reshape_1 \
--graph=/tmp/frozen_inception_v3.pb \
--labels=/tmp/imagenet_slim_labels.txt \
--input_mean=0 \
--input_std=255
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow.compat.v1 as tf
from tensorflow.contrib import quantize as contrib_quantize
from tensorflow.python.platform import gfile
from datasets import dataset_factory
from nets import nets_factory
tf.app.flags.DEFINE_string(
'model_name', 'inception_v3', 'The name of the architecture to save.')
tf.app.flags.DEFINE_boolean(
'is_training', False,
'Whether to save out a training-focused version of the model.')
tf.app.flags.DEFINE_integer(
'image_size', None,
'The image size to use, otherwise use the model default_image_size.')
tf.app.flags.DEFINE_integer(
'batch_size', None,
'Batch size for the exported model. Defaulted to "None" so batch size can '
'be specified at model runtime.')
tf.app.flags.DEFINE_string('dataset_name', 'imagenet',
'The name of the dataset to use with the model.')
tf.app.flags.DEFINE_integer(
'labels_offset', 0,
'An offset for the labels in the dataset. This flag is primarily used to '
'evaluate the VGG and ResNet architectures which do not use a background '
'class for the ImageNet dataset.')
tf.app.flags.DEFINE_string(
'output_file', '', 'Where to save the resulting file to.')
tf.app.flags.DEFINE_string(
'dataset_dir', '', 'Directory to save intermediate dataset files to')
tf.app.flags.DEFINE_bool(
'quantize', False, 'whether to use quantized graph or not.')
tf.app.flags.DEFINE_bool(
'is_video_model', False, 'whether to use 5-D inputs for video model.')
tf.app.flags.DEFINE_integer(
'num_frames', None,
'The number of frames to use. Only used if is_video_model is True.')
tf.app.flags.DEFINE_bool('write_text_graphdef', False,
'Whether to write a text version of graphdef.')
tf.app.flags.DEFINE_bool('use_grayscale', False,
'Whether to convert input images to grayscale.')
FLAGS = tf.app.flags.FLAGS
def main(_):
if not FLAGS.output_file:
raise ValueError('You must supply the path to save to with --output_file')
if FLAGS.is_video_model and not FLAGS.num_frames:
raise ValueError(
'Number of frames must be specified for video models with --num_frames')
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Graph().as_default() as graph:
dataset = dataset_factory.get_dataset(FLAGS.dataset_name, 'train',
FLAGS.dataset_dir)
network_fn = nets_factory.get_network_fn(
FLAGS.model_name,
num_classes=(dataset.num_classes - FLAGS.labels_offset),
is_training=FLAGS.is_training)
image_size = FLAGS.image_size or network_fn.default_image_size
num_channels = 1 if FLAGS.use_grayscale else 3
if FLAGS.is_video_model:
input_shape = [
FLAGS.batch_size, FLAGS.num_frames, image_size, image_size,
num_channels
]
else:
input_shape = [FLAGS.batch_size, image_size, image_size, num_channels]
placeholder = tf.placeholder(name='input', dtype=tf.float32,
shape=input_shape)
network_fn(placeholder)
if FLAGS.quantize:
contrib_quantize.create_eval_graph()
graph_def = graph.as_graph_def()
if FLAGS.write_text_graphdef:
tf.io.write_graph(
graph_def,
os.path.dirname(FLAGS.output_file),
os.path.basename(FLAGS.output_file),
as_text=True)
else:
with gfile.GFile(FLAGS.output_file, 'wb') as f:
f.write(graph_def.SerializeToString())
if __name__ == '__main__':
tf.app.run()
| 5,939 | 35.441718 | 80 | py |
models | models-master/research/slim/setup.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Setup script for slim."""
from setuptools import find_packages
from setuptools import setup
install_requires = [
'six',
'tf-slim>=1.1',
]
setup(
name='slim',
version='0.1',
install_requires=install_requires,
include_package_data=True,
packages=find_packages(),
description='tf-slim',
)
| 1,009 | 29.606061 | 80 | py |
models | models-master/research/slim/train_image_classifier.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generic training script that trains a model using a given dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
from tensorflow.contrib import quantize as contrib_quantize
from datasets import dataset_factory
from deployment import model_deploy
from nets import nets_factory
from preprocessing import preprocessing_factory
tf.app.flags.DEFINE_string(
'master', '', 'The address of the TensorFlow master to use.')
tf.app.flags.DEFINE_string(
'train_dir', '/tmp/tfmodel/',
'Directory where checkpoints and event logs are written to.')
tf.app.flags.DEFINE_float(
'warmup_epochs', 0,
'Linearly warmup learning rate from 0 to learning_rate over this '
'many epochs.')
tf.app.flags.DEFINE_integer('num_clones', 1,
'Number of model clones to deploy. Note For '
'historical reasons loss from all clones averaged '
'out and learning rate decay happen per clone '
'epochs')
tf.app.flags.DEFINE_boolean('clone_on_cpu', False,
'Use CPUs to deploy clones.')
tf.app.flags.DEFINE_integer('worker_replicas', 1, 'Number of worker replicas.')
tf.app.flags.DEFINE_integer(
'num_ps_tasks', 0,
'The number of parameter servers. If the value is 0, then the parameters '
'are handled locally by the worker.')
tf.app.flags.DEFINE_integer(
'num_readers', 4,
'The number of parallel readers that read data from the dataset.')
tf.app.flags.DEFINE_integer(
'num_preprocessing_threads', 4,
'The number of threads used to create the batches.')
tf.app.flags.DEFINE_integer(
'log_every_n_steps', 10,
'The frequency with which logs are print.')
tf.app.flags.DEFINE_integer(
'save_summaries_secs', 600,
'The frequency with which summaries are saved, in seconds.')
tf.app.flags.DEFINE_integer(
'save_interval_secs', 600,
'The frequency with which the model is saved, in seconds.')
tf.app.flags.DEFINE_integer(
'task', 0, 'Task id of the replica running the training.')
######################
# Optimization Flags #
######################
tf.app.flags.DEFINE_float(
'weight_decay', 0.00004, 'The weight decay on the model weights.')
tf.app.flags.DEFINE_string(
'optimizer', 'rmsprop',
'The name of the optimizer, one of "adadelta", "adagrad", "adam",'
'"ftrl", "momentum", "sgd" or "rmsprop".')
tf.app.flags.DEFINE_float(
'adadelta_rho', 0.95,
'The decay rate for adadelta.')
tf.app.flags.DEFINE_float(
'adagrad_initial_accumulator_value', 0.1,
'Starting value for the AdaGrad accumulators.')
tf.app.flags.DEFINE_float(
'adam_beta1', 0.9,
'The exponential decay rate for the 1st moment estimates.')
tf.app.flags.DEFINE_float(
'adam_beta2', 0.999,
'The exponential decay rate for the 2nd moment estimates.')
tf.app.flags.DEFINE_float('opt_epsilon', 1.0, 'Epsilon term for the optimizer.')
tf.app.flags.DEFINE_float('ftrl_learning_rate_power', -0.5,
'The learning rate power.')
tf.app.flags.DEFINE_float(
'ftrl_initial_accumulator_value', 0.1,
'Starting value for the FTRL accumulators.')
tf.app.flags.DEFINE_float(
'ftrl_l1', 0.0, 'The FTRL l1 regularization strength.')
tf.app.flags.DEFINE_float(
'ftrl_l2', 0.0, 'The FTRL l2 regularization strength.')
tf.app.flags.DEFINE_float(
'momentum', 0.9,
'The momentum for the MomentumOptimizer and RMSPropOptimizer.')
tf.app.flags.DEFINE_float('rmsprop_momentum', 0.9, 'Momentum.')
tf.app.flags.DEFINE_float('rmsprop_decay', 0.9, 'Decay term for RMSProp.')
tf.app.flags.DEFINE_integer(
'quantize_delay', -1,
'Number of steps to start quantized training. Set to -1 would disable '
'quantized training.')
#######################
# Learning Rate Flags #
#######################
tf.app.flags.DEFINE_string(
'learning_rate_decay_type',
'exponential',
'Specifies how the learning rate is decayed. One of "fixed", "exponential",'
' or "polynomial"')
tf.app.flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
tf.app.flags.DEFINE_float(
'end_learning_rate', 0.0001,
'The minimal end learning rate used by a polynomial decay learning rate.')
tf.app.flags.DEFINE_float(
'label_smoothing', 0.0, 'The amount of label smoothing.')
tf.app.flags.DEFINE_float(
'learning_rate_decay_factor', 0.94, 'Learning rate decay factor.')
tf.app.flags.DEFINE_float(
'num_epochs_per_decay', 2.0,
'Number of epochs after which learning rate decays. Note: this flag counts '
'epochs per clone but aggregates per sync replicas. So 1.0 means that '
'each clone will go over full epoch individually, but replicas will go '
'once across all replicas.')
tf.app.flags.DEFINE_bool(
'sync_replicas', False,
'Whether or not to synchronize the replicas during training.')
tf.app.flags.DEFINE_integer(
'replicas_to_aggregate', 1,
'The Number of gradients to collect before updating params.')
tf.app.flags.DEFINE_float(
'moving_average_decay', None,
'The decay to use for the moving average.'
'If left as None, then moving averages are not used.')
#######################
# Dataset Flags #
#######################
tf.app.flags.DEFINE_string(
'dataset_name', 'imagenet', 'The name of the dataset to load.')
tf.app.flags.DEFINE_string(
'dataset_split_name', 'train', 'The name of the train/test split.')
tf.app.flags.DEFINE_string(
'dataset_dir', None, 'The directory where the dataset files are stored.')
tf.app.flags.DEFINE_integer(
'labels_offset', 0,
'An offset for the labels in the dataset. This flag is primarily used to '
'evaluate the VGG and ResNet architectures which do not use a background '
'class for the ImageNet dataset.')
tf.app.flags.DEFINE_string(
'model_name', 'inception_v3', 'The name of the architecture to train.')
tf.app.flags.DEFINE_string(
'preprocessing_name', None, 'The name of the preprocessing to use. If left '
'as `None`, then the model_name flag is used.')
tf.app.flags.DEFINE_integer(
'batch_size', 32, 'The number of samples in each batch.')
tf.app.flags.DEFINE_integer(
'train_image_size', None, 'Train image size')
tf.app.flags.DEFINE_integer('max_number_of_steps', None,
'The maximum number of training steps.')
tf.app.flags.DEFINE_bool('use_grayscale', False,
'Whether to convert input images to grayscale.')
#####################
# Fine-Tuning Flags #
#####################
tf.app.flags.DEFINE_string(
'checkpoint_path', None,
'The path to a checkpoint from which to fine-tune.')
tf.app.flags.DEFINE_string(
'checkpoint_exclude_scopes', None,
'Comma-separated list of scopes of variables to exclude when restoring '
'from a checkpoint.')
tf.app.flags.DEFINE_string(
'trainable_scopes', None,
'Comma-separated list of scopes to filter the set of variables to train.'
'By default, None would train all the variables.')
tf.app.flags.DEFINE_boolean(
'ignore_missing_vars', False,
'When restoring a checkpoint would ignore missing variables.')
FLAGS = tf.app.flags.FLAGS
def _configure_learning_rate(num_samples_per_epoch, global_step):
"""Configures the learning rate.
Args:
num_samples_per_epoch: The number of samples in each epoch of training.
global_step: The global_step tensor.
Returns:
A `Tensor` representing the learning rate.
Raises:
ValueError: if
"""
# Note: when num_clones is > 1, this will actually have each clone to go
# over each epoch FLAGS.num_epochs_per_decay times. This is different
# behavior from sync replicas and is expected to produce different results.
steps_per_epoch = num_samples_per_epoch / FLAGS.batch_size
if FLAGS.sync_replicas:
steps_per_epoch /= FLAGS.replicas_to_aggregate
decay_steps = int(steps_per_epoch * FLAGS.num_epochs_per_decay)
if FLAGS.learning_rate_decay_type == 'exponential':
learning_rate = tf.train.exponential_decay(
FLAGS.learning_rate,
global_step,
decay_steps,
FLAGS.learning_rate_decay_factor,
staircase=True,
name='exponential_decay_learning_rate')
elif FLAGS.learning_rate_decay_type == 'fixed':
learning_rate = tf.constant(FLAGS.learning_rate, name='fixed_learning_rate')
elif FLAGS.learning_rate_decay_type == 'polynomial':
learning_rate = tf.train.polynomial_decay(
FLAGS.learning_rate,
global_step,
decay_steps,
FLAGS.end_learning_rate,
power=1.0,
cycle=False,
name='polynomial_decay_learning_rate')
else:
raise ValueError('learning_rate_decay_type [%s] was not recognized' %
FLAGS.learning_rate_decay_type)
if FLAGS.warmup_epochs:
warmup_lr = (
FLAGS.learning_rate * tf.cast(global_step, tf.float32) /
(steps_per_epoch * FLAGS.warmup_epochs))
learning_rate = tf.minimum(warmup_lr, learning_rate)
return learning_rate
def _configure_optimizer(learning_rate):
"""Configures the optimizer used for training.
Args:
learning_rate: A scalar or `Tensor` learning rate.
Returns:
An instance of an optimizer.
Raises:
ValueError: if FLAGS.optimizer is not recognized.
"""
if FLAGS.optimizer == 'adadelta':
optimizer = tf.train.AdadeltaOptimizer(
learning_rate,
rho=FLAGS.adadelta_rho,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'adagrad':
optimizer = tf.train.AdagradOptimizer(
learning_rate,
initial_accumulator_value=FLAGS.adagrad_initial_accumulator_value)
elif FLAGS.optimizer == 'adam':
optimizer = tf.train.AdamOptimizer(
learning_rate,
beta1=FLAGS.adam_beta1,
beta2=FLAGS.adam_beta2,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'ftrl':
optimizer = tf.train.FtrlOptimizer(
learning_rate,
learning_rate_power=FLAGS.ftrl_learning_rate_power,
initial_accumulator_value=FLAGS.ftrl_initial_accumulator_value,
l1_regularization_strength=FLAGS.ftrl_l1,
l2_regularization_strength=FLAGS.ftrl_l2)
elif FLAGS.optimizer == 'momentum':
optimizer = tf.train.MomentumOptimizer(
learning_rate,
momentum=FLAGS.momentum,
name='Momentum')
elif FLAGS.optimizer == 'rmsprop':
optimizer = tf.train.RMSPropOptimizer(
learning_rate,
decay=FLAGS.rmsprop_decay,
momentum=FLAGS.rmsprop_momentum,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
else:
raise ValueError('Optimizer [%s] was not recognized' % FLAGS.optimizer)
return optimizer
def _get_init_fn():
"""Returns a function run by the chief worker to warm-start the training.
Note that the init_fn is only run when initializing the model during the very
first global step.
Returns:
An init function run by the supervisor.
"""
if FLAGS.checkpoint_path is None:
return None
# Warn the user if a checkpoint exists in the train_dir. Then we'll be
# ignoring the checkpoint anyway.
if tf.train.latest_checkpoint(FLAGS.train_dir):
tf.logging.info(
'Ignoring --checkpoint_path because a checkpoint already exists in %s'
% FLAGS.train_dir)
return None
exclusions = []
if FLAGS.checkpoint_exclude_scopes:
exclusions = [scope.strip()
for scope in FLAGS.checkpoint_exclude_scopes.split(',')]
# TODO(sguada) variables.filter_variables()
variables_to_restore = []
for var in slim.get_model_variables():
for exclusion in exclusions:
if var.op.name.startswith(exclusion):
break
else:
variables_to_restore.append(var)
if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
else:
checkpoint_path = FLAGS.checkpoint_path
tf.logging.info('Fine-tuning from %s' % checkpoint_path)
return slim.assign_from_checkpoint_fn(
checkpoint_path,
variables_to_restore,
ignore_missing_vars=FLAGS.ignore_missing_vars)
def _get_variables_to_train():
"""Returns a list of variables to train.
Returns:
A list of variables to train by the optimizer.
"""
if FLAGS.trainable_scopes is None:
return tf.trainable_variables()
else:
scopes = [scope.strip() for scope in FLAGS.trainable_scopes.split(',')]
variables_to_train = []
for scope in scopes:
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
variables_to_train.extend(variables)
return variables_to_train
def main(_):
if not FLAGS.dataset_dir:
raise ValueError('You must supply the dataset directory with --dataset_dir')
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Graph().as_default():
#######################
# Config model_deploy #
#######################
deploy_config = model_deploy.DeploymentConfig(
num_clones=FLAGS.num_clones,
clone_on_cpu=FLAGS.clone_on_cpu,
replica_id=FLAGS.task,
num_replicas=FLAGS.worker_replicas,
num_ps_tasks=FLAGS.num_ps_tasks)
# Create global_step
with tf.device(deploy_config.variables_device()):
global_step = slim.create_global_step()
######################
# Select the dataset #
######################
dataset = dataset_factory.get_dataset(
FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)
######################
# Select the network #
######################
network_fn = nets_factory.get_network_fn(
FLAGS.model_name,
num_classes=(dataset.num_classes - FLAGS.labels_offset),
weight_decay=FLAGS.weight_decay,
is_training=True)
#####################################
# Select the preprocessing function #
#####################################
preprocessing_name = FLAGS.preprocessing_name or FLAGS.model_name
image_preprocessing_fn = preprocessing_factory.get_preprocessing(
preprocessing_name,
is_training=True,
use_grayscale=FLAGS.use_grayscale)
##############################################################
# Create a dataset provider that loads data from the dataset #
##############################################################
with tf.device(deploy_config.inputs_device()):
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
num_readers=FLAGS.num_readers,
common_queue_capacity=20 * FLAGS.batch_size,
common_queue_min=10 * FLAGS.batch_size)
[image, label] = provider.get(['image', 'label'])
label -= FLAGS.labels_offset
train_image_size = FLAGS.train_image_size or network_fn.default_image_size
image = image_preprocessing_fn(image, train_image_size, train_image_size)
images, labels = tf.train.batch(
[image, label],
batch_size=FLAGS.batch_size,
num_threads=FLAGS.num_preprocessing_threads,
capacity=5 * FLAGS.batch_size)
labels = slim.one_hot_encoding(
labels, dataset.num_classes - FLAGS.labels_offset)
batch_queue = slim.prefetch_queue.prefetch_queue(
[images, labels], capacity=2 * deploy_config.num_clones)
####################
# Define the model #
####################
def clone_fn(batch_queue):
"""Allows data parallelism by creating multiple clones of network_fn."""
images, labels = batch_queue.dequeue()
logits, end_points = network_fn(images)
#############################
# Specify the loss function #
#############################
if 'AuxLogits' in end_points:
slim.losses.softmax_cross_entropy(
end_points['AuxLogits'], labels,
label_smoothing=FLAGS.label_smoothing, weights=0.4,
scope='aux_loss')
slim.losses.softmax_cross_entropy(
logits, labels, label_smoothing=FLAGS.label_smoothing, weights=1.0)
return end_points
# Gather initial summaries.
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
clones = model_deploy.create_clones(deploy_config, clone_fn, [batch_queue])
first_clone_scope = deploy_config.clone_scope(0)
# Gather update_ops from the first clone. These contain, for example,
# the updates for the batch_norm variables created by network_fn.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope)
# Add summaries for end_points.
end_points = clones[0].outputs
for end_point in end_points:
x = end_points[end_point]
summaries.add(tf.summary.histogram('activations/' + end_point, x))
summaries.add(tf.summary.scalar('sparsity/' + end_point,
tf.nn.zero_fraction(x)))
# Add summaries for losses.
for loss in tf.get_collection(tf.GraphKeys.LOSSES, first_clone_scope):
summaries.add(tf.summary.scalar('losses/%s' % loss.op.name, loss))
# Add summaries for variables.
for variable in slim.get_model_variables():
summaries.add(tf.summary.histogram(variable.op.name, variable))
#################################
# Configure the moving averages #
#################################
if FLAGS.moving_average_decay:
moving_average_variables = slim.get_model_variables()
variable_averages = tf.train.ExponentialMovingAverage(
FLAGS.moving_average_decay, global_step)
else:
moving_average_variables, variable_averages = None, None
if FLAGS.quantize_delay >= 0:
contrib_quantize.create_training_graph(quant_delay=FLAGS.quantize_delay)
#########################################
# Configure the optimization procedure. #
#########################################
with tf.device(deploy_config.optimizer_device()):
learning_rate = _configure_learning_rate(dataset.num_samples, global_step)
optimizer = _configure_optimizer(learning_rate)
summaries.add(tf.summary.scalar('learning_rate', learning_rate))
if FLAGS.sync_replicas:
# If sync_replicas is enabled, the averaging will be done in the chief
# queue runner.
optimizer = tf.train.SyncReplicasOptimizer(
opt=optimizer,
replicas_to_aggregate=FLAGS.replicas_to_aggregate,
total_num_replicas=FLAGS.worker_replicas,
variable_averages=variable_averages,
variables_to_average=moving_average_variables)
elif FLAGS.moving_average_decay:
# Update ops executed locally by trainer.
update_ops.append(variable_averages.apply(moving_average_variables))
# Variables to train.
variables_to_train = _get_variables_to_train()
# and returns a train_tensor and summary_op
total_loss, clones_gradients = model_deploy.optimize_clones(
clones,
optimizer,
var_list=variables_to_train)
# Add total_loss to summary.
summaries.add(tf.summary.scalar('total_loss', total_loss))
# Create gradient updates.
grad_updates = optimizer.apply_gradients(clones_gradients,
global_step=global_step)
update_ops.append(grad_updates)
update_op = tf.group(*update_ops)
with tf.control_dependencies([update_op]):
train_tensor = tf.identity(total_loss, name='train_op')
# Add the summaries from the first clone. These contain the summaries
# created by model_fn and either optimize_clones() or _gather_clone_loss().
summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES,
first_clone_scope))
# Merge all summaries together.
summary_op = tf.summary.merge(list(summaries), name='summary_op')
###########################
# Kicks off the training. #
###########################
slim.learning.train(
train_tensor,
logdir=FLAGS.train_dir,
master=FLAGS.master,
is_chief=(FLAGS.task == 0),
init_fn=_get_init_fn(),
summary_op=summary_op,
number_of_steps=FLAGS.max_number_of_steps,
log_every_n_steps=FLAGS.log_every_n_steps,
save_summaries_secs=FLAGS.save_summaries_secs,
save_interval_secs=FLAGS.save_interval_secs,
sync_optimizer=optimizer if FLAGS.sync_replicas else None)
if __name__ == '__main__':
tf.app.run()
| 21,229 | 33.917763 | 80 | py |
models | models-master/research/slim/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/slim/eval_image_classifier.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generic evaluation script that evaluates a model using a given dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow.compat.v1 as tf
import tf_slim as slim
from tensorflow.contrib import quantize as contrib_quantize
from datasets import dataset_factory
from nets import nets_factory
from preprocessing import preprocessing_factory
tf.app.flags.DEFINE_integer(
'batch_size', 100, 'The number of samples in each batch.')
tf.app.flags.DEFINE_integer(
'max_num_batches', None,
'Max number of batches to evaluate by default use all.')
tf.app.flags.DEFINE_string(
'master', '', 'The address of the TensorFlow master to use.')
tf.app.flags.DEFINE_string(
'checkpoint_path', '/tmp/tfmodel/',
'The directory where the model was written to or an absolute path to a '
'checkpoint file.')
tf.app.flags.DEFINE_string(
'eval_dir', '/tmp/tfmodel/', 'Directory where the results are saved to.')
tf.app.flags.DEFINE_integer(
'num_preprocessing_threads', 4,
'The number of threads used to create the batches.')
tf.app.flags.DEFINE_string(
'dataset_name', 'imagenet', 'The name of the dataset to load.')
tf.app.flags.DEFINE_string(
'dataset_split_name', 'test', 'The name of the train/test split.')
tf.app.flags.DEFINE_string(
'dataset_dir', None, 'The directory where the dataset files are stored.')
tf.app.flags.DEFINE_integer(
'labels_offset', 0,
'An offset for the labels in the dataset. This flag is primarily used to '
'evaluate the VGG and ResNet architectures which do not use a background '
'class for the ImageNet dataset.')
tf.app.flags.DEFINE_string(
'model_name', 'inception_v3', 'The name of the architecture to evaluate.')
tf.app.flags.DEFINE_string(
'preprocessing_name', None, 'The name of the preprocessing to use. If left '
'as `None`, then the model_name flag is used.')
tf.app.flags.DEFINE_float(
'moving_average_decay', None,
'The decay to use for the moving average.'
'If left as None, then moving averages are not used.')
tf.app.flags.DEFINE_integer(
'eval_image_size', None, 'Eval image size')
tf.app.flags.DEFINE_bool(
'quantize', False, 'whether to use quantized graph or not.')
tf.app.flags.DEFINE_bool('use_grayscale', False,
'Whether to convert input images to grayscale.')
FLAGS = tf.app.flags.FLAGS
def main(_):
if not FLAGS.dataset_dir:
raise ValueError('You must supply the dataset directory with --dataset_dir')
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Graph().as_default():
tf_global_step = slim.get_or_create_global_step()
######################
# Select the dataset #
######################
dataset = dataset_factory.get_dataset(
FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)
####################
# Select the model #
####################
network_fn = nets_factory.get_network_fn(
FLAGS.model_name,
num_classes=(dataset.num_classes - FLAGS.labels_offset),
is_training=False)
##############################################################
# Create a dataset provider that loads data from the dataset #
##############################################################
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
shuffle=False,
common_queue_capacity=2 * FLAGS.batch_size,
common_queue_min=FLAGS.batch_size)
[image, label] = provider.get(['image', 'label'])
label -= FLAGS.labels_offset
#####################################
# Select the preprocessing function #
#####################################
preprocessing_name = FLAGS.preprocessing_name or FLAGS.model_name
image_preprocessing_fn = preprocessing_factory.get_preprocessing(
preprocessing_name,
is_training=False,
use_grayscale=FLAGS.use_grayscale)
eval_image_size = FLAGS.eval_image_size or network_fn.default_image_size
image = image_preprocessing_fn(image, eval_image_size, eval_image_size)
images, labels = tf.train.batch(
[image, label],
batch_size=FLAGS.batch_size,
num_threads=FLAGS.num_preprocessing_threads,
capacity=5 * FLAGS.batch_size)
####################
# Define the model #
####################
logits, _ = network_fn(images)
if FLAGS.quantize:
contrib_quantize.create_eval_graph()
if FLAGS.moving_average_decay:
variable_averages = tf.train.ExponentialMovingAverage(
FLAGS.moving_average_decay, tf_global_step)
variables_to_restore = variable_averages.variables_to_restore(
slim.get_model_variables())
variables_to_restore[tf_global_step.op.name] = tf_global_step
else:
variables_to_restore = slim.get_variables_to_restore()
predictions = tf.argmax(logits, 1)
labels = tf.squeeze(labels)
# Define the metrics:
names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
'Accuracy': slim.metrics.streaming_accuracy(predictions, labels),
'Recall_5': slim.metrics.streaming_recall_at_k(
logits, labels, 5),
})
# Print the summaries to screen.
for name, value in names_to_values.items():
summary_name = 'eval/%s' % name
op = tf.summary.scalar(summary_name, value, collections=[])
op = tf.Print(op, [value], summary_name)
tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)
# TODO(sguada) use num_epochs=1
if FLAGS.max_num_batches:
num_batches = FLAGS.max_num_batches
else:
# This ensures that we make a single pass over all of the data.
num_batches = math.ceil(dataset.num_samples / float(FLAGS.batch_size))
if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
else:
checkpoint_path = FLAGS.checkpoint_path
tf.logging.info('Evaluating %s' % checkpoint_path)
slim.evaluation.evaluate_once(
master=FLAGS.master,
checkpoint_path=checkpoint_path,
logdir=FLAGS.eval_dir,
num_evals=num_batches,
eval_op=list(names_to_updates.values()),
variables_to_restore=variables_to_restore)
if __name__ == '__main__':
tf.app.run()
| 7,062 | 33.793103 | 80 | py |
models | models-master/research/slim/export_inference_graph_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for export_inference_graph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow.compat.v1 as tf
from tensorflow.python.platform import gfile
import export_inference_graph
class ExportInferenceGraphTest(tf.test.TestCase):
def testExportInferenceGraph(self):
tmpdir = self.get_temp_dir()
output_file = os.path.join(tmpdir, 'inception_v3.pb')
flags = tf.app.flags.FLAGS
flags.output_file = output_file
flags.model_name = 'inception_v3'
flags.dataset_dir = tmpdir
export_inference_graph.main(None)
self.assertTrue(gfile.Exists(output_file))
if __name__ == '__main__':
tf.test.main()
| 1,407 | 30.288889 | 80 | py |
models | models-master/research/slim/download_and_convert_data.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Downloads and converts a particular dataset.
Usage:
```shell
$ python download_and_convert_data.py \
--dataset_name=flowers \
--dataset_dir=/tmp/flowers
$ python download_and_convert_data.py \
--dataset_name=cifar10 \
--dataset_dir=/tmp/cifar10
$ python download_and_convert_data.py \
--dataset_name=mnist \
--dataset_dir=/tmp/mnist
$ python download_and_convert_data.py \
--dataset_name=visualwakewords \
--dataset_dir=/tmp/visualwakewords
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from datasets import download_and_convert_cifar10
from datasets import download_and_convert_flowers
from datasets import download_and_convert_mnist
from datasets import download_and_convert_visualwakewords
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string(
'dataset_name',
None,
'The name of the dataset to convert, one of "flowers", "cifar10", "mnist", "visualwakewords"'
)
tf.app.flags.DEFINE_string(
'dataset_dir',
None,
'The directory where the output TFRecords and temporary files are saved.')
tf.flags.DEFINE_float(
'small_object_area_threshold', 0.005,
'For --dataset_name=visualwakewords only. Threshold of fraction of image '
'area below which small objects are filtered')
tf.flags.DEFINE_string(
'foreground_class_of_interest', 'person',
'For --dataset_name=visualwakewords only. Build a binary classifier based '
'on the presence or absence of this object in the image.')
def main(_):
if not FLAGS.dataset_name:
raise ValueError('You must supply the dataset name with --dataset_name')
if not FLAGS.dataset_dir:
raise ValueError('You must supply the dataset directory with --dataset_dir')
if FLAGS.dataset_name == 'flowers':
download_and_convert_flowers.run(FLAGS.dataset_dir)
elif FLAGS.dataset_name == 'cifar10':
download_and_convert_cifar10.run(FLAGS.dataset_dir)
elif FLAGS.dataset_name == 'mnist':
download_and_convert_mnist.run(FLAGS.dataset_dir)
elif FLAGS.dataset_name == 'visualwakewords':
download_and_convert_visualwakewords.run(
FLAGS.dataset_dir, FLAGS.small_object_area_threshold,
FLAGS.foreground_class_of_interest)
else:
raise ValueError(
'dataset_name [%s] was not recognized.' % FLAGS.dataset_name)
if __name__ == '__main__':
tf.app.run()
| 3,125 | 31.905263 | 97 | py |
models | models-master/research/slim/nets/inception_resnet_v2_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.inception_resnet_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets import inception
class InceptionTest(tf.test.TestCase):
def testBuildLogits(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, endpoints = inception.inception_resnet_v2(inputs, num_classes)
self.assertTrue('AuxLogits' in endpoints)
auxlogits = endpoints['AuxLogits']
self.assertTrue(
auxlogits.op.name.startswith('InceptionResnetV2/AuxLogits'))
self.assertListEqual(auxlogits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue(logits.op.name.startswith('InceptionResnetV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testBuildWithoutAuxLogits(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, endpoints = inception.inception_resnet_v2(inputs, num_classes,
create_aux_logits=False)
self.assertTrue('AuxLogits' not in endpoints)
self.assertTrue(logits.op.name.startswith('InceptionResnetV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testBuildNoClasses(self):
batch_size = 5
height, width = 299, 299
num_classes = None
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
net, endpoints = inception.inception_resnet_v2(inputs, num_classes)
self.assertTrue('AuxLogits' not in endpoints)
self.assertTrue('Logits' not in endpoints)
self.assertTrue(
net.op.name.startswith('InceptionResnetV2/Logits/AvgPool'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 1536])
def testBuildEndPoints(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = inception.inception_resnet_v2(inputs, num_classes)
self.assertTrue('Logits' in end_points)
logits = end_points['Logits']
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('AuxLogits' in end_points)
aux_logits = end_points['AuxLogits']
self.assertListEqual(aux_logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Conv2d_7b_1x1']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 8, 8, 1536])
def testBuildBaseNetwork(self):
batch_size = 5
height, width = 299, 299
inputs = tf.random.uniform((batch_size, height, width, 3))
net, end_points = inception.inception_resnet_v2_base(inputs)
self.assertTrue(net.op.name.startswith('InceptionResnetV2/Conv2d_7b_1x1'))
self.assertListEqual(net.get_shape().as_list(),
[batch_size, 8, 8, 1536])
expected_endpoints = ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3',
'MaxPool_5a_3x3', 'Mixed_5b', 'Mixed_6a',
'PreAuxLogits', 'Mixed_7a', 'Conv2d_7b_1x1']
self.assertItemsEqual(end_points.keys(), expected_endpoints)
def testBuildOnlyUptoFinalEndpoint(self):
batch_size = 5
height, width = 299, 299
endpoints = ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3',
'MaxPool_5a_3x3', 'Mixed_5b', 'Mixed_6a',
'PreAuxLogits', 'Mixed_7a', 'Conv2d_7b_1x1']
for index, endpoint in enumerate(endpoints):
with tf.Graph().as_default():
inputs = tf.random.uniform((batch_size, height, width, 3))
out_tensor, end_points = inception.inception_resnet_v2_base(
inputs, final_endpoint=endpoint)
if endpoint != 'PreAuxLogits':
self.assertTrue(out_tensor.op.name.startswith(
'InceptionResnetV2/' + endpoint))
self.assertItemsEqual(endpoints[:index + 1], end_points.keys())
def testBuildAndCheckAllEndPointsUptoPreAuxLogits(self):
batch_size = 5
height, width = 299, 299
inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = inception.inception_resnet_v2_base(
inputs, final_endpoint='PreAuxLogits')
endpoints_shapes = {'Conv2d_1a_3x3': [5, 149, 149, 32],
'Conv2d_2a_3x3': [5, 147, 147, 32],
'Conv2d_2b_3x3': [5, 147, 147, 64],
'MaxPool_3a_3x3': [5, 73, 73, 64],
'Conv2d_3b_1x1': [5, 73, 73, 80],
'Conv2d_4a_3x3': [5, 71, 71, 192],
'MaxPool_5a_3x3': [5, 35, 35, 192],
'Mixed_5b': [5, 35, 35, 320],
'Mixed_6a': [5, 17, 17, 1088],
'PreAuxLogits': [5, 17, 17, 1088]
}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testBuildAndCheckAllEndPointsUptoPreAuxLogitsWithAlignedFeatureMaps(self):
batch_size = 5
height, width = 299, 299
inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = inception.inception_resnet_v2_base(
inputs, final_endpoint='PreAuxLogits', align_feature_maps=True)
endpoints_shapes = {'Conv2d_1a_3x3': [5, 150, 150, 32],
'Conv2d_2a_3x3': [5, 150, 150, 32],
'Conv2d_2b_3x3': [5, 150, 150, 64],
'MaxPool_3a_3x3': [5, 75, 75, 64],
'Conv2d_3b_1x1': [5, 75, 75, 80],
'Conv2d_4a_3x3': [5, 75, 75, 192],
'MaxPool_5a_3x3': [5, 38, 38, 192],
'Mixed_5b': [5, 38, 38, 320],
'Mixed_6a': [5, 19, 19, 1088],
'PreAuxLogits': [5, 19, 19, 1088]
}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testBuildAndCheckAllEndPointsUptoPreAuxLogitsWithOutputStrideEight(self):
batch_size = 5
height, width = 299, 299
inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = inception.inception_resnet_v2_base(
inputs, final_endpoint='PreAuxLogits', output_stride=8)
endpoints_shapes = {'Conv2d_1a_3x3': [5, 149, 149, 32],
'Conv2d_2a_3x3': [5, 147, 147, 32],
'Conv2d_2b_3x3': [5, 147, 147, 64],
'MaxPool_3a_3x3': [5, 73, 73, 64],
'Conv2d_3b_1x1': [5, 73, 73, 80],
'Conv2d_4a_3x3': [5, 71, 71, 192],
'MaxPool_5a_3x3': [5, 35, 35, 192],
'Mixed_5b': [5, 35, 35, 320],
'Mixed_6a': [5, 33, 33, 1088],
'PreAuxLogits': [5, 33, 33, 1088]
}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testVariablesSetDevice(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
# Force all Variables to reside on the device.
with tf.variable_scope('on_cpu'), tf.device('/cpu:0'):
inception.inception_resnet_v2(inputs, num_classes)
with tf.variable_scope('on_gpu'), tf.device('/gpu:0'):
inception.inception_resnet_v2(inputs, num_classes)
for v in tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope='on_cpu'):
self.assertDeviceEqual(v.device, '/cpu:0')
for v in tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope='on_gpu'):
self.assertDeviceEqual(v.device, '/gpu:0')
def testHalfSizeImages(self):
batch_size = 5
height, width = 150, 150
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_resnet_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionResnetV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Conv2d_7b_1x1']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 3, 3, 1536])
def testGlobalPool(self):
batch_size = 1
height, width = 330, 400
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_resnet_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionResnetV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Conv2d_7b_1x1']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 8, 11, 1536])
def testGlobalPoolUnknownImageShape(self):
batch_size = 1
height, width = 330, 400
num_classes = 1000
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, (batch_size, None, None, 3))
logits, end_points = inception.inception_resnet_v2(
inputs, num_classes, create_aux_logits=False)
self.assertTrue(logits.op.name.startswith('InceptionResnetV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Conv2d_7b_1x1']
images = tf.random.uniform((batch_size, height, width, 3))
sess.run(tf.global_variables_initializer())
logits_out, pre_pool_out = sess.run([logits, pre_pool],
{inputs: images.eval()})
self.assertTupleEqual(logits_out.shape, (batch_size, num_classes))
self.assertTupleEqual(pre_pool_out.shape, (batch_size, 8, 11, 1536))
def testUnknownBatchSize(self):
batch_size = 1
height, width = 299, 299
num_classes = 1000
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, (None, height, width, 3))
logits, _ = inception.inception_resnet_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionResnetV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes])
images = tf.random.uniform((batch_size, height, width, 3))
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
def testEvaluation(self):
batch_size = 2
height, width = 299, 299
num_classes = 1000
with self.test_session() as sess:
eval_inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = inception.inception_resnet_v2(eval_inputs,
num_classes,
is_training=False)
predictions = tf.argmax(input=logits, axis=1)
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, width = 150, 150
num_classes = 1000
with self.test_session() as sess:
train_inputs = tf.random.uniform((train_batch_size, height, width, 3))
inception.inception_resnet_v2(train_inputs, num_classes)
eval_inputs = tf.random.uniform((eval_batch_size, height, width, 3))
logits, _ = inception.inception_resnet_v2(eval_inputs,
num_classes,
is_training=False,
reuse=True)
predictions = tf.argmax(input=logits, axis=1)
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
def testNoBatchNormScaleByDefault(self):
height, width = 299, 299
num_classes = 1000
inputs = tf.placeholder(tf.float32, (1, height, width, 3))
with slim.arg_scope(inception.inception_resnet_v2_arg_scope()):
inception.inception_resnet_v2(inputs, num_classes, is_training=False)
self.assertEqual(tf.global_variables('.*/BatchNorm/gamma:0$'), [])
def testBatchNormScale(self):
height, width = 299, 299
num_classes = 1000
inputs = tf.placeholder(tf.float32, (1, height, width, 3))
with slim.arg_scope(
inception.inception_resnet_v2_arg_scope(batch_norm_scale=True)):
inception.inception_resnet_v2(inputs, num_classes, is_training=False)
gamma_names = set(
v.op.name
for v in tf.global_variables('.*/BatchNorm/gamma:0$'))
self.assertGreater(len(gamma_names), 0)
for v in tf.global_variables('.*/BatchNorm/moving_mean:0$'):
self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names)
if __name__ == '__main__':
tf.test.main()
| 15,185 | 43.79646 | 80 | py |
models | models-master/research/slim/nets/mobilenet_v1_eval.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Validate mobilenet_v1 with options for quantization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow.compat.v1 as tf
import tf_slim as slim
from tensorflow.contrib import quantize as contrib_quantize
from datasets import dataset_factory
from nets import mobilenet_v1
from preprocessing import preprocessing_factory
flags = tf.app.flags
flags.DEFINE_string('master', '', 'Session master')
flags.DEFINE_integer('batch_size', 250, 'Batch size')
flags.DEFINE_integer('num_classes', 1001, 'Number of classes to distinguish')
flags.DEFINE_integer('num_examples', 50000, 'Number of examples to evaluate')
flags.DEFINE_integer('image_size', 224, 'Input image resolution')
flags.DEFINE_float('depth_multiplier', 1.0, 'Depth multiplier for mobilenet')
flags.DEFINE_bool('quantize', False, 'Quantize training')
flags.DEFINE_string('checkpoint_dir', '', 'The directory for checkpoints')
flags.DEFINE_string('eval_dir', '', 'Directory for writing eval event logs')
flags.DEFINE_string('dataset_dir', '', 'Location of dataset')
FLAGS = flags.FLAGS
def imagenet_input(is_training):
"""Data reader for imagenet.
Reads in imagenet data and performs pre-processing on the images.
Args:
is_training: bool specifying if train or validation dataset is needed.
Returns:
A batch of images and labels.
"""
if is_training:
dataset = dataset_factory.get_dataset('imagenet', 'train',
FLAGS.dataset_dir)
else:
dataset = dataset_factory.get_dataset('imagenet', 'validation',
FLAGS.dataset_dir)
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
shuffle=is_training,
common_queue_capacity=2 * FLAGS.batch_size,
common_queue_min=FLAGS.batch_size)
[image, label] = provider.get(['image', 'label'])
image_preprocessing_fn = preprocessing_factory.get_preprocessing(
'mobilenet_v1', is_training=is_training)
image = image_preprocessing_fn(image, FLAGS.image_size, FLAGS.image_size)
images, labels = tf.train.batch(
tensors=[image, label],
batch_size=FLAGS.batch_size,
num_threads=4,
capacity=5 * FLAGS.batch_size)
return images, labels
def metrics(logits, labels):
"""Specify the metrics for eval.
Args:
logits: Logits output from the graph.
labels: Ground truth labels for inputs.
Returns:
Eval Op for the graph.
"""
labels = tf.squeeze(labels)
names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
'Accuracy':
tf.metrics.accuracy(
tf.argmax(input=logits, axis=1), labels),
'Recall_5':
tf.metrics.recall_at_k(labels, logits, 5),
})
for name, value in names_to_values.iteritems():
slim.summaries.add_scalar_summary(
value, name, prefix='eval', print_summary=True)
return names_to_updates.values()
def build_model():
"""Build the mobilenet_v1 model for evaluation.
Returns:
g: graph with rewrites after insertion of quantization ops and batch norm
folding.
eval_ops: eval ops for inference.
variables_to_restore: List of variables to restore from checkpoint.
"""
g = tf.Graph()
with g.as_default():
inputs, labels = imagenet_input(is_training=False)
scope = mobilenet_v1.mobilenet_v1_arg_scope(
is_training=False, weight_decay=0.0)
with slim.arg_scope(scope):
logits, _ = mobilenet_v1.mobilenet_v1(
inputs,
is_training=False,
depth_multiplier=FLAGS.depth_multiplier,
num_classes=FLAGS.num_classes)
if FLAGS.quantize:
contrib_quantize.create_eval_graph()
eval_ops = metrics(logits, labels)
return g, eval_ops
def eval_model():
"""Evaluates mobilenet_v1."""
g, eval_ops = build_model()
with g.as_default():
num_batches = math.ceil(FLAGS.num_examples / float(FLAGS.batch_size))
slim.evaluation.evaluate_once(
FLAGS.master,
FLAGS.checkpoint_dir,
logdir=FLAGS.eval_dir,
num_evals=num_batches,
eval_op=eval_ops)
def main(unused_arg):
eval_model()
if __name__ == '__main__':
tf.app.run(main)
| 4,939 | 30.464968 | 80 | py |
models | models-master/research/slim/nets/s3dg_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for networks.s3dg."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import tensorflow.compat.v1 as tf
from nets import s3dg
class S3DGTest(tf.test.TestCase):
def testBuildClassificationNetwork(self):
batch_size = 5
num_frames = 64
height, width = 224, 224
num_classes = 1000
inputs = tf.random.uniform((batch_size, num_frames, height, width, 3))
logits, end_points = s3dg.s3dg(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('Predictions' in end_points)
self.assertListEqual(end_points['Predictions'].get_shape().as_list(),
[batch_size, num_classes])
def testBuildBaseNetwork(self):
batch_size = 5
num_frames = 64
height, width = 224, 224
inputs = tf.random.uniform((batch_size, num_frames, height, width, 3))
mixed_6c, end_points = s3dg.s3dg_base(inputs)
self.assertTrue(mixed_6c.op.name.startswith('InceptionV1/Mixed_5c'))
self.assertListEqual(mixed_6c.get_shape().as_list(),
[batch_size, 8, 7, 7, 1024])
expected_endpoints = ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b',
'Mixed_3c', 'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c',
'Mixed_4d', 'Mixed_4e', 'Mixed_4f', 'MaxPool_5a_2x2',
'Mixed_5b', 'Mixed_5c']
self.assertItemsEqual(list(end_points.keys()), expected_endpoints)
def testBuildOnlyUptoFinalEndpointNoGating(self):
batch_size = 5
num_frames = 64
height, width = 224, 224
endpoints = ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c',
'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d',
'Mixed_4e', 'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b',
'Mixed_5c']
for index, endpoint in enumerate(endpoints):
with tf.Graph().as_default():
inputs = tf.random.uniform((batch_size, num_frames, height, width, 3))
out_tensor, end_points = s3dg.s3dg_base(
inputs, final_endpoint=endpoint, gating_startat=None)
print(endpoint, out_tensor.op.name)
self.assertTrue(out_tensor.op.name.startswith(
'InceptionV1/' + endpoint))
self.assertItemsEqual(endpoints[:index+1], end_points)
def testBuildAndCheckAllEndPointsUptoMixed5c(self):
batch_size = 5
num_frames = 64
height, width = 224, 224
inputs = tf.random.uniform((batch_size, num_frames, height, width, 3))
_, end_points = s3dg.s3dg_base(inputs,
final_endpoint='Mixed_5c')
endpoints_shapes = {'Conv2d_1a_7x7': [5, 32, 112, 112, 64],
'MaxPool_2a_3x3': [5, 32, 56, 56, 64],
'Conv2d_2b_1x1': [5, 32, 56, 56, 64],
'Conv2d_2c_3x3': [5, 32, 56, 56, 192],
'MaxPool_3a_3x3': [5, 32, 28, 28, 192],
'Mixed_3b': [5, 32, 28, 28, 256],
'Mixed_3c': [5, 32, 28, 28, 480],
'MaxPool_4a_3x3': [5, 16, 14, 14, 480],
'Mixed_4b': [5, 16, 14, 14, 512],
'Mixed_4c': [5, 16, 14, 14, 512],
'Mixed_4d': [5, 16, 14, 14, 512],
'Mixed_4e': [5, 16, 14, 14, 528],
'Mixed_4f': [5, 16, 14, 14, 832],
'MaxPool_5a_2x2': [5, 8, 7, 7, 832],
'Mixed_5b': [5, 8, 7, 7, 832],
'Mixed_5c': [5, 8, 7, 7, 1024]}
self.assertItemsEqual(
list(endpoints_shapes.keys()), list(end_points.keys()))
for endpoint_name, expected_shape in six.iteritems(endpoints_shapes):
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testHalfSizeImages(self):
batch_size = 5
num_frames = 64
height, width = 112, 112
inputs = tf.random.uniform((batch_size, num_frames, height, width, 3))
mixed_5c, _ = s3dg.s3dg_base(inputs)
self.assertTrue(mixed_5c.op.name.startswith('InceptionV1/Mixed_5c'))
self.assertListEqual(mixed_5c.get_shape().as_list(),
[batch_size, 8, 4, 4, 1024])
def testTenFrames(self):
batch_size = 5
num_frames = 10
height, width = 224, 224
inputs = tf.random.uniform((batch_size, num_frames, height, width, 3))
mixed_5c, _ = s3dg.s3dg_base(inputs)
self.assertTrue(mixed_5c.op.name.startswith('InceptionV1/Mixed_5c'))
self.assertListEqual(mixed_5c.get_shape().as_list(),
[batch_size, 2, 7, 7, 1024])
def testEvaluation(self):
batch_size = 2
num_frames = 64
height, width = 224, 224
num_classes = 1000
eval_inputs = tf.random.uniform((batch_size, num_frames, height, width, 3))
logits, _ = s3dg.s3dg(eval_inputs, num_classes,
is_training=False)
predictions = tf.argmax(input=logits, axis=1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
if __name__ == '__main__':
tf.test.main()
| 6,283 | 40.071895 | 80 | py |
models | models-master/research/slim/nets/pix2pix_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for pix2pix."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets import pix2pix
class GeneratorTest(tf.test.TestCase):
def _reduced_default_blocks(self):
"""Returns the default blocks, scaled down to make test run faster."""
return [pix2pix.Block(b.num_filters // 32, b.decoder_keep_prob)
for b in pix2pix._default_generator_blocks()]
def test_output_size_nn_upsample_conv(self):
batch_size = 2
height, width = 256, 256
num_outputs = 4
images = tf.ones((batch_size, height, width, 3))
with slim.arg_scope(pix2pix.pix2pix_arg_scope()):
logits, _ = pix2pix.pix2pix_generator(
images, num_outputs, blocks=self._reduced_default_blocks(),
upsample_method='nn_upsample_conv')
with self.test_session() as session:
session.run(tf.global_variables_initializer())
np_outputs = session.run(logits)
self.assertListEqual([batch_size, height, width, num_outputs],
list(np_outputs.shape))
def test_output_size_conv2d_transpose(self):
batch_size = 2
height, width = 256, 256
num_outputs = 4
images = tf.ones((batch_size, height, width, 3))
with slim.arg_scope(pix2pix.pix2pix_arg_scope()):
logits, _ = pix2pix.pix2pix_generator(
images, num_outputs, blocks=self._reduced_default_blocks(),
upsample_method='conv2d_transpose')
with self.test_session() as session:
session.run(tf.global_variables_initializer())
np_outputs = session.run(logits)
self.assertListEqual([batch_size, height, width, num_outputs],
list(np_outputs.shape))
def test_block_number_dictates_number_of_layers(self):
batch_size = 2
height, width = 256, 256
num_outputs = 4
images = tf.ones((batch_size, height, width, 3))
blocks = [
pix2pix.Block(64, 0.5),
pix2pix.Block(128, 0),
]
with slim.arg_scope(pix2pix.pix2pix_arg_scope()):
_, end_points = pix2pix.pix2pix_generator(
images, num_outputs, blocks)
num_encoder_layers = 0
num_decoder_layers = 0
for end_point in end_points:
if end_point.startswith('encoder'):
num_encoder_layers += 1
elif end_point.startswith('decoder'):
num_decoder_layers += 1
self.assertEqual(num_encoder_layers, len(blocks))
self.assertEqual(num_decoder_layers, len(blocks))
class DiscriminatorTest(tf.test.TestCase):
def _layer_output_size(self, input_size, kernel_size=4, stride=2, pad=2):
return (input_size + pad * 2 - kernel_size) // stride + 1
def test_four_layers(self):
batch_size = 2
input_size = 256
output_size = self._layer_output_size(input_size)
output_size = self._layer_output_size(output_size)
output_size = self._layer_output_size(output_size)
output_size = self._layer_output_size(output_size, stride=1)
output_size = self._layer_output_size(output_size, stride=1)
images = tf.ones((batch_size, input_size, input_size, 3))
with slim.arg_scope(pix2pix.pix2pix_arg_scope()):
logits, end_points = pix2pix.pix2pix_discriminator(
images, num_filters=[64, 128, 256, 512])
self.assertListEqual([batch_size, output_size, output_size, 1],
logits.shape.as_list())
self.assertListEqual([batch_size, output_size, output_size, 1],
end_points['predictions'].shape.as_list())
def test_four_layers_no_padding(self):
batch_size = 2
input_size = 256
output_size = self._layer_output_size(input_size, pad=0)
output_size = self._layer_output_size(output_size, pad=0)
output_size = self._layer_output_size(output_size, pad=0)
output_size = self._layer_output_size(output_size, stride=1, pad=0)
output_size = self._layer_output_size(output_size, stride=1, pad=0)
images = tf.ones((batch_size, input_size, input_size, 3))
with slim.arg_scope(pix2pix.pix2pix_arg_scope()):
logits, end_points = pix2pix.pix2pix_discriminator(
images, num_filters=[64, 128, 256, 512], padding=0)
self.assertListEqual([batch_size, output_size, output_size, 1],
logits.shape.as_list())
self.assertListEqual([batch_size, output_size, output_size, 1],
end_points['predictions'].shape.as_list())
def test_four_layers_wrog_paddig(self):
batch_size = 2
input_size = 256
images = tf.ones((batch_size, input_size, input_size, 3))
with slim.arg_scope(pix2pix.pix2pix_arg_scope()):
with self.assertRaises(TypeError):
pix2pix.pix2pix_discriminator(
images, num_filters=[64, 128, 256, 512], padding=1.5)
def test_four_layers_negative_padding(self):
batch_size = 2
input_size = 256
images = tf.ones((batch_size, input_size, input_size, 3))
with slim.arg_scope(pix2pix.pix2pix_arg_scope()):
with self.assertRaises(ValueError):
pix2pix.pix2pix_discriminator(
images, num_filters=[64, 128, 256, 512], padding=-1)
if __name__ == '__main__':
tf.test.main()
| 5,886 | 36.259494 | 79 | py |
models | models-master/research/slim/nets/mobilenet_v1_train.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Build and train mobilenet_v1 with options for quantization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
from tensorflow.contrib import quantize as contrib_quantize
from datasets import dataset_factory
from nets import mobilenet_v1
from preprocessing import preprocessing_factory
flags = tf.app.flags
flags.DEFINE_string('master', '', 'Session master')
flags.DEFINE_integer('task', 0, 'Task')
flags.DEFINE_integer('ps_tasks', 0, 'Number of ps')
flags.DEFINE_integer('batch_size', 64, 'Batch size')
flags.DEFINE_integer('num_classes', 1001, 'Number of classes to distinguish')
flags.DEFINE_integer('number_of_steps', None,
'Number of training steps to perform before stopping')
flags.DEFINE_integer('image_size', 224, 'Input image resolution')
flags.DEFINE_float('depth_multiplier', 1.0, 'Depth multiplier for mobilenet')
flags.DEFINE_bool('quantize', False, 'Quantize training')
flags.DEFINE_string('fine_tune_checkpoint', '',
'Checkpoint from which to start finetuning.')
flags.DEFINE_string('checkpoint_dir', '',
'Directory for writing training checkpoints and logs')
flags.DEFINE_string('dataset_dir', '', 'Location of dataset')
flags.DEFINE_integer('log_every_n_steps', 100, 'Number of steps per log')
flags.DEFINE_integer('save_summaries_secs', 100,
'How often to save summaries, secs')
flags.DEFINE_integer('save_interval_secs', 100,
'How often to save checkpoints, secs')
FLAGS = flags.FLAGS
_LEARNING_RATE_DECAY_FACTOR = 0.94
def get_learning_rate():
if FLAGS.fine_tune_checkpoint:
# If we are fine tuning a checkpoint we need to start at a lower learning
# rate since we are farther along on training.
return 1e-4
else:
return 0.045
def get_quant_delay():
if FLAGS.fine_tune_checkpoint:
# We can start quantizing immediately if we are finetuning.
return 0
else:
# We need to wait for the model to train a bit before we quantize if we are
# training from scratch.
return 250000
def imagenet_input(is_training):
"""Data reader for imagenet.
Reads in imagenet data and performs pre-processing on the images.
Args:
is_training: bool specifying if train or validation dataset is needed.
Returns:
A batch of images and labels.
"""
if is_training:
dataset = dataset_factory.get_dataset('imagenet', 'train',
FLAGS.dataset_dir)
else:
dataset = dataset_factory.get_dataset('imagenet', 'validation',
FLAGS.dataset_dir)
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
shuffle=is_training,
common_queue_capacity=2 * FLAGS.batch_size,
common_queue_min=FLAGS.batch_size)
[image, label] = provider.get(['image', 'label'])
image_preprocessing_fn = preprocessing_factory.get_preprocessing(
'mobilenet_v1', is_training=is_training)
image = image_preprocessing_fn(image, FLAGS.image_size, FLAGS.image_size)
images, labels = tf.train.batch([image, label],
batch_size=FLAGS.batch_size,
num_threads=4,
capacity=5 * FLAGS.batch_size)
labels = slim.one_hot_encoding(labels, FLAGS.num_classes)
return images, labels
def build_model():
"""Builds graph for model to train with rewrites for quantization.
Returns:
g: Graph with fake quantization ops and batch norm folding suitable for
training quantized weights.
train_tensor: Train op for execution during training.
"""
g = tf.Graph()
with g.as_default(), tf.device(
tf.train.replica_device_setter(FLAGS.ps_tasks)):
inputs, labels = imagenet_input(is_training=True)
with slim.arg_scope(mobilenet_v1.mobilenet_v1_arg_scope(is_training=True)):
logits, _ = mobilenet_v1.mobilenet_v1(
inputs,
is_training=True,
depth_multiplier=FLAGS.depth_multiplier,
num_classes=FLAGS.num_classes)
tf.losses.softmax_cross_entropy(labels, logits)
# Call rewriter to produce graph with fake quant ops and folded batch norms
# quant_delay delays start of quantization till quant_delay steps, allowing
# for better model accuracy.
if FLAGS.quantize:
contrib_quantize.create_training_graph(quant_delay=get_quant_delay())
total_loss = tf.losses.get_total_loss(name='total_loss')
# Configure the learning rate using an exponential decay.
num_epochs_per_decay = 2.5
imagenet_size = 1271167
decay_steps = int(imagenet_size / FLAGS.batch_size * num_epochs_per_decay)
learning_rate = tf.train.exponential_decay(
get_learning_rate(),
tf.train.get_or_create_global_step(),
decay_steps,
_LEARNING_RATE_DECAY_FACTOR,
staircase=True)
opt = tf.train.GradientDescentOptimizer(learning_rate)
train_tensor = slim.learning.create_train_op(
total_loss,
optimizer=opt)
slim.summaries.add_scalar_summary(total_loss, 'total_loss', 'losses')
slim.summaries.add_scalar_summary(learning_rate, 'learning_rate', 'training')
return g, train_tensor
def get_checkpoint_init_fn():
"""Returns the checkpoint init_fn if the checkpoint is provided."""
if FLAGS.fine_tune_checkpoint:
variables_to_restore = slim.get_variables_to_restore()
global_step_reset = tf.assign(
tf.train.get_or_create_global_step(), 0)
# When restoring from a floating point model, the min/max values for
# quantized weights and activations are not present.
# We instruct slim to ignore variables that are missing during restoration
# by setting ignore_missing_vars=True
slim_init_fn = slim.assign_from_checkpoint_fn(
FLAGS.fine_tune_checkpoint,
variables_to_restore,
ignore_missing_vars=True)
def init_fn(sess):
slim_init_fn(sess)
# If we are restoring from a floating point model, we need to initialize
# the global step to zero for the exponential decay to result in
# reasonable learning rates.
sess.run(global_step_reset)
return init_fn
else:
return None
def train_model():
"""Trains mobilenet_v1."""
g, train_tensor = build_model()
with g.as_default():
slim.learning.train(
train_tensor,
FLAGS.checkpoint_dir,
is_chief=(FLAGS.task == 0),
master=FLAGS.master,
log_every_n_steps=FLAGS.log_every_n_steps,
graph=g,
number_of_steps=FLAGS.number_of_steps,
save_summaries_secs=FLAGS.save_summaries_secs,
save_interval_secs=FLAGS.save_interval_secs,
init_fn=get_checkpoint_init_fn(),
global_step=tf.train.get_global_step())
def main(unused_arg):
train_model()
if __name__ == '__main__':
tf.app.run(main)
| 7,647 | 34.738318 | 80 | py |
models | models-master/research/slim/nets/resnet_v1_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.nets.resnet_v1."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets import resnet_utils
from nets import resnet_v1
tf.disable_resource_variables()
def create_test_input(batch_size, height, width, channels):
"""Create test input tensor.
Args:
batch_size: The number of images per batch or `None` if unknown.
height: The height of each image or `None` if unknown.
width: The width of each image or `None` if unknown.
channels: The number of channels per image or `None` if unknown.
Returns:
Either a placeholder `Tensor` of dimension
[batch_size, height, width, channels] if any of the inputs are `None` or a
constant `Tensor` with the mesh grid values along the spatial dimensions.
"""
if None in [batch_size, height, width, channels]:
return tf.placeholder(tf.float32, (batch_size, height, width, channels))
else:
return tf.cast(
np.tile(
np.reshape(
np.reshape(np.arange(height), [height, 1]) +
np.reshape(np.arange(width), [1, width]),
[1, height, width, 1]), [batch_size, 1, 1, channels]),
dtype=tf.float32)
class ResnetUtilsTest(tf.test.TestCase):
def testSubsampleThreeByThree(self):
x = tf.reshape(tf.cast(tf.range(9), dtype=tf.float32), [1, 3, 3, 1])
x = resnet_utils.subsample(x, 2)
expected = tf.reshape(tf.constant([0, 2, 6, 8]), [1, 2, 2, 1])
with self.test_session():
self.assertAllClose(x.eval(), expected.eval())
def testSubsampleFourByFour(self):
x = tf.reshape(tf.cast(tf.range(16), dtype=tf.float32), [1, 4, 4, 1])
x = resnet_utils.subsample(x, 2)
expected = tf.reshape(tf.constant([0, 2, 8, 10]), [1, 2, 2, 1])
with self.test_session():
self.assertAllClose(x.eval(), expected.eval())
def testConv2DSameEven(self):
n, n2 = 4, 2
# Input image.
x = create_test_input(1, n, n, 1)
# Convolution kernel.
w = create_test_input(1, 3, 3, 1)
w = tf.reshape(w, [3, 3, 1, 1])
tf.get_variable('Conv/weights', initializer=w)
tf.get_variable('Conv/biases', initializer=tf.zeros([1]))
tf.get_variable_scope().reuse_variables()
y1 = slim.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = tf.cast([[14, 28, 43, 26], [28, 48, 66, 37], [43, 66, 84, 46],
[26, 37, 46, 22]],
dtype=tf.float32)
y1_expected = tf.reshape(y1_expected, [1, n, n, 1])
y2 = resnet_utils.subsample(y1, 2)
y2_expected = tf.cast([[14, 43], [43, 84]], dtype=tf.float32)
y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1])
y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')
y3_expected = y2_expected
y4 = slim.conv2d(x, 1, [3, 3], stride=2, scope='Conv')
y4_expected = tf.cast([[48, 37], [37, 22]], dtype=tf.float32)
y4_expected = tf.reshape(y4_expected, [1, n2, n2, 1])
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
self.assertAllClose(y4.eval(), y4_expected.eval())
def testConv2DSameOdd(self):
n, n2 = 5, 3
# Input image.
x = create_test_input(1, n, n, 1)
# Convolution kernel.
w = create_test_input(1, 3, 3, 1)
w = tf.reshape(w, [3, 3, 1, 1])
tf.get_variable('Conv/weights', initializer=w)
tf.get_variable('Conv/biases', initializer=tf.zeros([1]))
tf.get_variable_scope().reuse_variables()
y1 = slim.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = tf.cast(
[[14, 28, 43, 58, 34], [28, 48, 66, 84, 46], [43, 66, 84, 102, 55],
[58, 84, 102, 120, 64], [34, 46, 55, 64, 30]],
dtype=tf.float32)
y1_expected = tf.reshape(y1_expected, [1, n, n, 1])
y2 = resnet_utils.subsample(y1, 2)
y2_expected = tf.cast([[14, 43, 34], [43, 84, 55], [34, 55, 30]],
dtype=tf.float32)
y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1])
y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')
y3_expected = y2_expected
y4 = slim.conv2d(x, 1, [3, 3], stride=2, scope='Conv')
y4_expected = y2_expected
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
self.assertAllClose(y4.eval(), y4_expected.eval())
def _resnet_plain(self, inputs, blocks, output_stride=None, scope=None):
"""A plain ResNet without extra layers before or after the ResNet blocks."""
with tf.variable_scope(scope, values=[inputs]):
with slim.arg_scope([slim.conv2d], outputs_collections='end_points'):
net = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride)
end_points = slim.utils.convert_collection_to_dict('end_points')
return net, end_points
def testEndPointsV1(self):
"""Test the end points of a tiny v1 bottleneck network."""
blocks = [
resnet_v1.resnet_v1_block(
'block1', base_depth=1, num_units=2, stride=2),
resnet_v1.resnet_v1_block(
'block2', base_depth=2, num_units=2, stride=1),
]
inputs = create_test_input(2, 32, 16, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_plain(inputs, blocks, scope='tiny')
expected = [
'tiny/block1/unit_1/bottleneck_v1/shortcut',
'tiny/block1/unit_1/bottleneck_v1/conv1',
'tiny/block1/unit_1/bottleneck_v1/conv2',
'tiny/block1/unit_1/bottleneck_v1/conv3',
'tiny/block1/unit_2/bottleneck_v1/conv1',
'tiny/block1/unit_2/bottleneck_v1/conv2',
'tiny/block1/unit_2/bottleneck_v1/conv3',
'tiny/block2/unit_1/bottleneck_v1/shortcut',
'tiny/block2/unit_1/bottleneck_v1/conv1',
'tiny/block2/unit_1/bottleneck_v1/conv2',
'tiny/block2/unit_1/bottleneck_v1/conv3',
'tiny/block2/unit_2/bottleneck_v1/conv1',
'tiny/block2/unit_2/bottleneck_v1/conv2',
'tiny/block2/unit_2/bottleneck_v1/conv3']
self.assertItemsEqual(expected, end_points.keys())
def _stack_blocks_nondense(self, net, blocks):
"""A simplified ResNet Block stacker without output stride control."""
for block in blocks:
with tf.variable_scope(block.scope, 'block', [net]):
for i, unit in enumerate(block.args):
with tf.variable_scope('unit_%d' % (i + 1), values=[net]):
net = block.unit_fn(net, rate=1, **unit)
return net
def testAtrousValuesBottleneck(self):
"""Verify the values of dense feature extraction by atrous convolution.
Make sure that dense feature extraction by stack_blocks_dense() followed by
subsampling gives identical results to feature extraction at the nominal
network output stride using the simple self._stack_blocks_nondense() above.
"""
block = resnet_v1.resnet_v1_block
blocks = [
block('block1', base_depth=1, num_units=2, stride=2),
block('block2', base_depth=2, num_units=2, stride=2),
block('block3', base_depth=4, num_units=2, stride=2),
block('block4', base_depth=8, num_units=2, stride=1),
]
nominal_stride = 8
# Test both odd and even input dimensions.
height = 30
width = 31
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
with slim.arg_scope([slim.batch_norm], is_training=False):
for output_stride in [1, 2, 4, 8, None]:
with tf.Graph().as_default():
with self.test_session() as sess:
tf.set_random_seed(0)
inputs = create_test_input(1, height, width, 3)
# Dense feature extraction followed by subsampling.
output = resnet_utils.stack_blocks_dense(inputs,
blocks,
output_stride)
if output_stride is None:
factor = 1
else:
factor = nominal_stride // output_stride
output = resnet_utils.subsample(output, factor)
# Make the two networks use the same weights.
tf.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected = self._stack_blocks_nondense(inputs, blocks)
sess.run(tf.global_variables_initializer())
output, expected = sess.run([output, expected])
self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4)
def testStridingLastUnitVsSubsampleBlockEnd(self):
"""Compares subsampling at the block's last unit or block's end.
Makes sure that the final output is the same when we use a stride at the
last unit of a block vs. we subsample activations at the end of a block.
"""
block = resnet_v1.resnet_v1_block
blocks = [
block('block1', base_depth=1, num_units=2, stride=2),
block('block2', base_depth=2, num_units=2, stride=2),
block('block3', base_depth=4, num_units=2, stride=2),
block('block4', base_depth=8, num_units=2, stride=1),
]
# Test both odd and even input dimensions.
height = 30
width = 31
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
with slim.arg_scope([slim.batch_norm], is_training=False):
for output_stride in [1, 2, 4, 8, None]:
with tf.Graph().as_default():
with self.test_session() as sess:
tf.set_random_seed(0)
inputs = create_test_input(1, height, width, 3)
# Subsampling at the last unit of the block.
output = resnet_utils.stack_blocks_dense(
inputs, blocks, output_stride,
store_non_strided_activations=False,
outputs_collections='output')
output_end_points = slim.utils.convert_collection_to_dict(
'output')
# Make the two networks use the same weights.
tf.get_variable_scope().reuse_variables()
# Subsample activations at the end of the blocks.
expected = resnet_utils.stack_blocks_dense(
inputs, blocks, output_stride,
store_non_strided_activations=True,
outputs_collections='expected')
expected_end_points = slim.utils.convert_collection_to_dict(
'expected')
sess.run(tf.global_variables_initializer())
# Make sure that the final output is the same.
output, expected = sess.run([output, expected])
self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4)
# Make sure that intermediate block activations in
# output_end_points are subsampled versions of the corresponding
# ones in expected_end_points.
for i, block in enumerate(blocks[:-1:]):
output = output_end_points[block.scope]
expected = expected_end_points[block.scope]
atrous_activated = (output_stride is not None and
2 ** i >= output_stride)
if not atrous_activated:
expected = resnet_utils.subsample(expected, 2)
output, expected = sess.run([output, expected])
self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4)
class ResnetCompleteNetworkTest(tf.test.TestCase):
"""Tests with complete small ResNet v1 networks."""
def _resnet_small(self,
inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
include_root_block=True,
spatial_squeeze=True,
reuse=None,
scope='resnet_v1_small'):
"""A shallow and thin ResNet v1 for faster tests."""
block = resnet_v1.resnet_v1_block
blocks = [
block('block1', base_depth=1, num_units=3, stride=2),
block('block2', base_depth=2, num_units=3, stride=2),
block('block3', base_depth=4, num_units=3, stride=2),
block('block4', base_depth=8, num_units=2, stride=1),
]
return resnet_v1.resnet_v1(inputs, blocks, num_classes,
is_training=is_training,
global_pool=global_pool,
output_stride=output_stride,
include_root_block=include_root_block,
spatial_squeeze=spatial_squeeze,
reuse=reuse,
scope=scope)
def testClassificationEndPoints(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
logits, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
spatial_squeeze=False,
scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes])
self.assertTrue('predictions' in end_points)
self.assertListEqual(end_points['predictions'].get_shape().as_list(),
[2, 1, 1, num_classes])
self.assertTrue('global_pool' in end_points)
self.assertListEqual(end_points['global_pool'].get_shape().as_list(),
[2, 1, 1, 32])
def testClassificationEndPointsWithNoBatchNormArgscope(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
logits, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
spatial_squeeze=False,
is_training=None,
scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes])
self.assertTrue('predictions' in end_points)
self.assertListEqual(end_points['predictions'].get_shape().as_list(),
[2, 1, 1, num_classes])
self.assertTrue('global_pool' in end_points)
self.assertListEqual(end_points['global_pool'].get_shape().as_list(),
[2, 1, 1, 32])
def testEndpointNames(self):
# Like ResnetUtilsTest.testEndPointsV1(), but for the public API.
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
scope='resnet')
expected = ['resnet/conv1']
for block in range(1, 5):
for unit in range(1, 4 if block < 4 else 3):
for conv in range(1, 4):
expected.append('resnet/block%d/unit_%d/bottleneck_v1/conv%d' %
(block, unit, conv))
expected.append('resnet/block%d/unit_%d/bottleneck_v1' % (block, unit))
expected.append('resnet/block%d/unit_1/bottleneck_v1/shortcut' % block)
expected.append('resnet/block%d' % block)
expected.extend(['global_pool', 'resnet/logits', 'resnet/spatial_squeeze',
'predictions'])
self.assertItemsEqual(end_points.keys(), expected)
def testClassificationShapes(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 28, 28, 4],
'resnet/block2': [2, 14, 14, 8],
'resnet/block3': [2, 7, 7, 16],
'resnet/block4': [2, 7, 7, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
inputs = create_test_input(2, 321, 321, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
spatial_squeeze=False,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 41, 41, 4],
'resnet/block2': [2, 21, 21, 8],
'resnet/block3': [2, 11, 11, 16],
'resnet/block4': [2, 11, 11, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testRootlessFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
inputs = create_test_input(2, 128, 128, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
include_root_block=False,
spatial_squeeze=False,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 64, 64, 4],
'resnet/block2': [2, 32, 32, 8],
'resnet/block3': [2, 16, 16, 16],
'resnet/block4': [2, 16, 16, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testAtrousFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
output_stride = 8
inputs = create_test_input(2, 321, 321, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs,
num_classes,
global_pool=global_pool,
output_stride=output_stride,
spatial_squeeze=False,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 41, 41, 4],
'resnet/block2': [2, 41, 41, 8],
'resnet/block3': [2, 41, 41, 16],
'resnet/block4': [2, 41, 41, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testAtrousFullyConvolutionalValues(self):
"""Verify dense feature extraction with atrous convolution."""
nominal_stride = 32
for output_stride in [4, 8, 16, 32, None]:
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
with tf.Graph().as_default():
with self.test_session() as sess:
tf.set_random_seed(0)
inputs = create_test_input(2, 81, 81, 3)
# Dense feature extraction followed by subsampling.
output, _ = self._resnet_small(inputs, None, is_training=False,
global_pool=False,
output_stride=output_stride)
if output_stride is None:
factor = 1
else:
factor = nominal_stride // output_stride
output = resnet_utils.subsample(output, factor)
# Make the two networks use the same weights.
tf.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected, _ = self._resnet_small(inputs, None, is_training=False,
global_pool=False)
sess.run(tf.global_variables_initializer())
self.assertAllClose(output.eval(), expected.eval(),
atol=1e-4, rtol=1e-4)
def testUnknownBatchSize(self):
batch = 2
height, width = 65, 65
global_pool = True
num_classes = 10
inputs = create_test_input(None, height, width, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
logits, _ = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
spatial_squeeze=False,
scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, 1, 1, num_classes])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 1, 1, num_classes))
def testFullyConvolutionalUnknownHeightWidth(self):
batch = 2
height, width = 65, 65
global_pool = False
inputs = create_test_input(batch, None, None, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
output, _ = self._resnet_small(inputs, None, global_pool=global_pool)
self.assertListEqual(output.get_shape().as_list(),
[batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 3, 3, 32))
def testAtrousFullyConvolutionalUnknownHeightWidth(self):
batch = 2
height, width = 65, 65
global_pool = False
output_stride = 8
inputs = create_test_input(batch, None, None, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
output, _ = self._resnet_small(inputs,
None,
global_pool=global_pool,
output_stride=output_stride)
self.assertListEqual(output.get_shape().as_list(),
[batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 9, 9, 32))
def testDepthMultiplier(self):
resnets = [
resnet_v1.resnet_v1_50, resnet_v1.resnet_v1_101,
resnet_v1.resnet_v1_152, resnet_v1.resnet_v1_200
]
resnet_names = [
'resnet_v1_50', 'resnet_v1_101', 'resnet_v1_152', 'resnet_v1_200'
]
for resnet, resnet_name in zip(resnets, resnet_names):
depth_multiplier = 0.25
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
scope_base = resnet_name + '_base'
_, end_points_base = resnet(
inputs,
num_classes,
global_pool=global_pool,
min_base_depth=1,
scope=scope_base)
scope_test = resnet_name + '_test'
_, end_points_test = resnet(
inputs,
num_classes,
global_pool=global_pool,
min_base_depth=1,
depth_multiplier=depth_multiplier,
scope=scope_test)
for block in ['block1', 'block2', 'block3', 'block4']:
block_name_base = scope_base + '/' + block
block_name_test = scope_test + '/' + block
self.assertTrue(block_name_base in end_points_base)
self.assertTrue(block_name_test in end_points_test)
self.assertEqual(
len(end_points_base[block_name_base].get_shape().as_list()), 4)
self.assertEqual(
len(end_points_test[block_name_test].get_shape().as_list()), 4)
self.assertListEqual(
end_points_base[block_name_base].get_shape().as_list()[:3],
end_points_test[block_name_test].get_shape().as_list()[:3])
self.assertEqual(
int(depth_multiplier *
end_points_base[block_name_base].get_shape().as_list()[3]),
end_points_test[block_name_test].get_shape().as_list()[3])
def testMinBaseDepth(self):
resnets = [
resnet_v1.resnet_v1_50, resnet_v1.resnet_v1_101,
resnet_v1.resnet_v1_152, resnet_v1.resnet_v1_200
]
resnet_names = [
'resnet_v1_50', 'resnet_v1_101', 'resnet_v1_152', 'resnet_v1_200'
]
for resnet, resnet_name in zip(resnets, resnet_names):
min_base_depth = 5
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = resnet(
inputs,
num_classes,
global_pool=global_pool,
min_base_depth=min_base_depth,
depth_multiplier=0,
scope=resnet_name)
for block in ['block1', 'block2', 'block3', 'block4']:
block_name = resnet_name + '/' + block
self.assertTrue(block_name in end_points)
self.assertEqual(
len(end_points[block_name].get_shape().as_list()), 4)
# The output depth is 4 times base_depth.
depth_expected = min_base_depth * 4
self.assertEqual(
end_points[block_name].get_shape().as_list()[3], depth_expected)
if __name__ == '__main__':
tf.test.main()
| 27,159 | 42.248408 | 80 | py |
models | models-master/research/slim/nets/vgg_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.nets.vgg."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets import vgg
class VGGATest(tf.test.TestCase):
def testBuild(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_a(inputs, num_classes)
self.assertEquals(logits.op.name, 'vgg_a/fc8/squeezed')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testFullyConvolutional(self):
batch_size = 1
height, width = 256, 256
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_a(inputs, num_classes, spatial_squeeze=False)
self.assertEquals(logits.op.name, 'vgg_a/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, 2, 2, num_classes])
def testGlobalPool(self):
batch_size = 1
height, width = 256, 256
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_a(inputs, num_classes, spatial_squeeze=False,
global_pool=True)
self.assertEquals(logits.op.name, 'vgg_a/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, 1, 1, num_classes])
def testEndPoints(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = vgg.vgg_a(inputs, num_classes)
expected_names = ['vgg_a/conv1/conv1_1',
'vgg_a/pool1',
'vgg_a/conv2/conv2_1',
'vgg_a/pool2',
'vgg_a/conv3/conv3_1',
'vgg_a/conv3/conv3_2',
'vgg_a/pool3',
'vgg_a/conv4/conv4_1',
'vgg_a/conv4/conv4_2',
'vgg_a/pool4',
'vgg_a/conv5/conv5_1',
'vgg_a/conv5/conv5_2',
'vgg_a/pool5',
'vgg_a/fc6',
'vgg_a/fc7',
'vgg_a/fc8'
]
self.assertSetEqual(set(end_points.keys()), set(expected_names))
def testNoClasses(self):
batch_size = 5
height, width = 224, 224
num_classes = None
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
net, end_points = vgg.vgg_a(inputs, num_classes)
expected_names = ['vgg_a/conv1/conv1_1',
'vgg_a/pool1',
'vgg_a/conv2/conv2_1',
'vgg_a/pool2',
'vgg_a/conv3/conv3_1',
'vgg_a/conv3/conv3_2',
'vgg_a/pool3',
'vgg_a/conv4/conv4_1',
'vgg_a/conv4/conv4_2',
'vgg_a/pool4',
'vgg_a/conv5/conv5_1',
'vgg_a/conv5/conv5_2',
'vgg_a/pool5',
'vgg_a/fc6',
'vgg_a/fc7',
]
self.assertSetEqual(set(end_points.keys()), set(expected_names))
self.assertTrue(net.op.name.startswith('vgg_a/fc7'))
def testModelVariables(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
vgg.vgg_a(inputs, num_classes)
expected_names = ['vgg_a/conv1/conv1_1/weights',
'vgg_a/conv1/conv1_1/biases',
'vgg_a/conv2/conv2_1/weights',
'vgg_a/conv2/conv2_1/biases',
'vgg_a/conv3/conv3_1/weights',
'vgg_a/conv3/conv3_1/biases',
'vgg_a/conv3/conv3_2/weights',
'vgg_a/conv3/conv3_2/biases',
'vgg_a/conv4/conv4_1/weights',
'vgg_a/conv4/conv4_1/biases',
'vgg_a/conv4/conv4_2/weights',
'vgg_a/conv4/conv4_2/biases',
'vgg_a/conv5/conv5_1/weights',
'vgg_a/conv5/conv5_1/biases',
'vgg_a/conv5/conv5_2/weights',
'vgg_a/conv5/conv5_2/biases',
'vgg_a/fc6/weights',
'vgg_a/fc6/biases',
'vgg_a/fc7/weights',
'vgg_a/fc7/biases',
'vgg_a/fc8/weights',
'vgg_a/fc8/biases',
]
model_variables = [v.op.name for v in slim.get_model_variables()]
self.assertSetEqual(set(model_variables), set(expected_names))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
with self.test_session():
eval_inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_a(eval_inputs, is_training=False)
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
predictions = tf.argmax(input=logits, axis=1)
self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 224, 224
eval_height, eval_width = 256, 256
num_classes = 1000
with self.test_session():
train_inputs = tf.random.uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = vgg.vgg_a(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random.uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = vgg.vgg_a(eval_inputs, is_training=False,
spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 2, 2, num_classes])
logits = tf.reduce_mean(input_tensor=logits, axis=[1, 2])
predictions = tf.argmax(input=logits, axis=1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testForward(self):
batch_size = 1
height, width = 224, 224
with self.test_session() as sess:
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_a(inputs)
sess.run(tf.global_variables_initializer())
output = sess.run(logits)
self.assertTrue(output.any())
class VGG16Test(tf.test.TestCase):
def testBuild(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_16(inputs, num_classes)
self.assertEquals(logits.op.name, 'vgg_16/fc8/squeezed')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testFullyConvolutional(self):
batch_size = 1
height, width = 256, 256
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_16(inputs, num_classes, spatial_squeeze=False)
self.assertEquals(logits.op.name, 'vgg_16/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, 2, 2, num_classes])
def testGlobalPool(self):
batch_size = 1
height, width = 256, 256
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_16(inputs, num_classes, spatial_squeeze=False,
global_pool=True)
self.assertEquals(logits.op.name, 'vgg_16/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, 1, 1, num_classes])
def testEndPoints(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = vgg.vgg_16(inputs, num_classes)
expected_names = ['vgg_16/conv1/conv1_1',
'vgg_16/conv1/conv1_2',
'vgg_16/pool1',
'vgg_16/conv2/conv2_1',
'vgg_16/conv2/conv2_2',
'vgg_16/pool2',
'vgg_16/conv3/conv3_1',
'vgg_16/conv3/conv3_2',
'vgg_16/conv3/conv3_3',
'vgg_16/pool3',
'vgg_16/conv4/conv4_1',
'vgg_16/conv4/conv4_2',
'vgg_16/conv4/conv4_3',
'vgg_16/pool4',
'vgg_16/conv5/conv5_1',
'vgg_16/conv5/conv5_2',
'vgg_16/conv5/conv5_3',
'vgg_16/pool5',
'vgg_16/fc6',
'vgg_16/fc7',
'vgg_16/fc8'
]
self.assertSetEqual(set(end_points.keys()), set(expected_names))
def testNoClasses(self):
batch_size = 5
height, width = 224, 224
num_classes = None
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
net, end_points = vgg.vgg_16(inputs, num_classes)
expected_names = ['vgg_16/conv1/conv1_1',
'vgg_16/conv1/conv1_2',
'vgg_16/pool1',
'vgg_16/conv2/conv2_1',
'vgg_16/conv2/conv2_2',
'vgg_16/pool2',
'vgg_16/conv3/conv3_1',
'vgg_16/conv3/conv3_2',
'vgg_16/conv3/conv3_3',
'vgg_16/pool3',
'vgg_16/conv4/conv4_1',
'vgg_16/conv4/conv4_2',
'vgg_16/conv4/conv4_3',
'vgg_16/pool4',
'vgg_16/conv5/conv5_1',
'vgg_16/conv5/conv5_2',
'vgg_16/conv5/conv5_3',
'vgg_16/pool5',
'vgg_16/fc6',
'vgg_16/fc7',
]
self.assertSetEqual(set(end_points.keys()), set(expected_names))
self.assertTrue(net.op.name.startswith('vgg_16/fc7'))
def testModelVariables(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
vgg.vgg_16(inputs, num_classes)
expected_names = ['vgg_16/conv1/conv1_1/weights',
'vgg_16/conv1/conv1_1/biases',
'vgg_16/conv1/conv1_2/weights',
'vgg_16/conv1/conv1_2/biases',
'vgg_16/conv2/conv2_1/weights',
'vgg_16/conv2/conv2_1/biases',
'vgg_16/conv2/conv2_2/weights',
'vgg_16/conv2/conv2_2/biases',
'vgg_16/conv3/conv3_1/weights',
'vgg_16/conv3/conv3_1/biases',
'vgg_16/conv3/conv3_2/weights',
'vgg_16/conv3/conv3_2/biases',
'vgg_16/conv3/conv3_3/weights',
'vgg_16/conv3/conv3_3/biases',
'vgg_16/conv4/conv4_1/weights',
'vgg_16/conv4/conv4_1/biases',
'vgg_16/conv4/conv4_2/weights',
'vgg_16/conv4/conv4_2/biases',
'vgg_16/conv4/conv4_3/weights',
'vgg_16/conv4/conv4_3/biases',
'vgg_16/conv5/conv5_1/weights',
'vgg_16/conv5/conv5_1/biases',
'vgg_16/conv5/conv5_2/weights',
'vgg_16/conv5/conv5_2/biases',
'vgg_16/conv5/conv5_3/weights',
'vgg_16/conv5/conv5_3/biases',
'vgg_16/fc6/weights',
'vgg_16/fc6/biases',
'vgg_16/fc7/weights',
'vgg_16/fc7/biases',
'vgg_16/fc8/weights',
'vgg_16/fc8/biases',
]
model_variables = [v.op.name for v in slim.get_model_variables()]
self.assertSetEqual(set(model_variables), set(expected_names))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
with self.test_session():
eval_inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_16(eval_inputs, is_training=False)
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
predictions = tf.argmax(input=logits, axis=1)
self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 224, 224
eval_height, eval_width = 256, 256
num_classes = 1000
with self.test_session():
train_inputs = tf.random.uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = vgg.vgg_16(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random.uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = vgg.vgg_16(eval_inputs, is_training=False,
spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 2, 2, num_classes])
logits = tf.reduce_mean(input_tensor=logits, axis=[1, 2])
predictions = tf.argmax(input=logits, axis=1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testForward(self):
batch_size = 1
height, width = 224, 224
with self.test_session() as sess:
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_16(inputs)
sess.run(tf.global_variables_initializer())
output = sess.run(logits)
self.assertTrue(output.any())
class VGG19Test(tf.test.TestCase):
def testBuild(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_19(inputs, num_classes)
self.assertEquals(logits.op.name, 'vgg_19/fc8/squeezed')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testFullyConvolutional(self):
batch_size = 1
height, width = 256, 256
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_19(inputs, num_classes, spatial_squeeze=False)
self.assertEquals(logits.op.name, 'vgg_19/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, 2, 2, num_classes])
def testGlobalPool(self):
batch_size = 1
height, width = 256, 256
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_19(inputs, num_classes, spatial_squeeze=False,
global_pool=True)
self.assertEquals(logits.op.name, 'vgg_19/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, 1, 1, num_classes])
def testEndPoints(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = vgg.vgg_19(inputs, num_classes)
expected_names = [
'vgg_19/conv1/conv1_1',
'vgg_19/conv1/conv1_2',
'vgg_19/pool1',
'vgg_19/conv2/conv2_1',
'vgg_19/conv2/conv2_2',
'vgg_19/pool2',
'vgg_19/conv3/conv3_1',
'vgg_19/conv3/conv3_2',
'vgg_19/conv3/conv3_3',
'vgg_19/conv3/conv3_4',
'vgg_19/pool3',
'vgg_19/conv4/conv4_1',
'vgg_19/conv4/conv4_2',
'vgg_19/conv4/conv4_3',
'vgg_19/conv4/conv4_4',
'vgg_19/pool4',
'vgg_19/conv5/conv5_1',
'vgg_19/conv5/conv5_2',
'vgg_19/conv5/conv5_3',
'vgg_19/conv5/conv5_4',
'vgg_19/pool5',
'vgg_19/fc6',
'vgg_19/fc7',
'vgg_19/fc8'
]
self.assertSetEqual(set(end_points.keys()), set(expected_names))
def testNoClasses(self):
batch_size = 5
height, width = 224, 224
num_classes = None
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
net, end_points = vgg.vgg_19(inputs, num_classes)
expected_names = [
'vgg_19/conv1/conv1_1',
'vgg_19/conv1/conv1_2',
'vgg_19/pool1',
'vgg_19/conv2/conv2_1',
'vgg_19/conv2/conv2_2',
'vgg_19/pool2',
'vgg_19/conv3/conv3_1',
'vgg_19/conv3/conv3_2',
'vgg_19/conv3/conv3_3',
'vgg_19/conv3/conv3_4',
'vgg_19/pool3',
'vgg_19/conv4/conv4_1',
'vgg_19/conv4/conv4_2',
'vgg_19/conv4/conv4_3',
'vgg_19/conv4/conv4_4',
'vgg_19/pool4',
'vgg_19/conv5/conv5_1',
'vgg_19/conv5/conv5_2',
'vgg_19/conv5/conv5_3',
'vgg_19/conv5/conv5_4',
'vgg_19/pool5',
'vgg_19/fc6',
'vgg_19/fc7',
]
self.assertSetEqual(set(end_points.keys()), set(expected_names))
self.assertTrue(net.op.name.startswith('vgg_19/fc7'))
def testModelVariables(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
vgg.vgg_19(inputs, num_classes)
expected_names = [
'vgg_19/conv1/conv1_1/weights',
'vgg_19/conv1/conv1_1/biases',
'vgg_19/conv1/conv1_2/weights',
'vgg_19/conv1/conv1_2/biases',
'vgg_19/conv2/conv2_1/weights',
'vgg_19/conv2/conv2_1/biases',
'vgg_19/conv2/conv2_2/weights',
'vgg_19/conv2/conv2_2/biases',
'vgg_19/conv3/conv3_1/weights',
'vgg_19/conv3/conv3_1/biases',
'vgg_19/conv3/conv3_2/weights',
'vgg_19/conv3/conv3_2/biases',
'vgg_19/conv3/conv3_3/weights',
'vgg_19/conv3/conv3_3/biases',
'vgg_19/conv3/conv3_4/weights',
'vgg_19/conv3/conv3_4/biases',
'vgg_19/conv4/conv4_1/weights',
'vgg_19/conv4/conv4_1/biases',
'vgg_19/conv4/conv4_2/weights',
'vgg_19/conv4/conv4_2/biases',
'vgg_19/conv4/conv4_3/weights',
'vgg_19/conv4/conv4_3/biases',
'vgg_19/conv4/conv4_4/weights',
'vgg_19/conv4/conv4_4/biases',
'vgg_19/conv5/conv5_1/weights',
'vgg_19/conv5/conv5_1/biases',
'vgg_19/conv5/conv5_2/weights',
'vgg_19/conv5/conv5_2/biases',
'vgg_19/conv5/conv5_3/weights',
'vgg_19/conv5/conv5_3/biases',
'vgg_19/conv5/conv5_4/weights',
'vgg_19/conv5/conv5_4/biases',
'vgg_19/fc6/weights',
'vgg_19/fc6/biases',
'vgg_19/fc7/weights',
'vgg_19/fc7/biases',
'vgg_19/fc8/weights',
'vgg_19/fc8/biases',
]
model_variables = [v.op.name for v in slim.get_model_variables()]
self.assertSetEqual(set(model_variables), set(expected_names))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
with self.test_session():
eval_inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_19(eval_inputs, is_training=False)
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
predictions = tf.argmax(input=logits, axis=1)
self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 224, 224
eval_height, eval_width = 256, 256
num_classes = 1000
with self.test_session():
train_inputs = tf.random.uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = vgg.vgg_19(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random.uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = vgg.vgg_19(eval_inputs, is_training=False,
spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 2, 2, num_classes])
logits = tf.reduce_mean(input_tensor=logits, axis=[1, 2])
predictions = tf.argmax(input=logits, axis=1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testForward(self):
batch_size = 1
height, width = 224, 224
with self.test_session() as sess:
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_19(inputs)
sess.run(tf.global_variables_initializer())
output = sess.run(logits)
self.assertTrue(output.any())
if __name__ == '__main__':
tf.test.main()
| 23,270 | 38.915952 | 80 | py |
models | models-master/research/slim/nets/resnet_v1.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for the original form of Residual Networks.
The 'v1' residual networks (ResNets) implemented in this module were proposed
by:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
Other variants were introduced in:
[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027
The networks defined in this module utilize the bottleneck building block of
[1] with projection shortcuts only for increasing depths. They employ batch
normalization *after* every weight layer. This is the architecture used by
MSRA in the Imagenet and MSCOCO 2016 competition models ResNet-101 and
ResNet-152. See [2; Fig. 1a] for a comparison between the current 'v1'
architecture and the alternative 'v2' architecture of [2] which uses batch
normalization *before* every weight layer in the so-called full pre-activation
units.
Typical use:
from tf_slim.nets import resnet_v1
ResNet-101 for image classification into 1000 classes:
# inputs has shape [batch, 224, 224, 3]
with slim.arg_scope(resnet_v1.resnet_arg_scope()):
net, end_points = resnet_v1.resnet_v1_101(inputs, 1000, is_training=False)
ResNet-101 for semantic segmentation into 21 classes:
# inputs has shape [batch, 513, 513, 3]
with slim.arg_scope(resnet_v1.resnet_arg_scope()):
net, end_points = resnet_v1.resnet_v1_101(inputs,
21,
is_training=False,
global_pool=False,
output_stride=16)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets import resnet_utils
resnet_arg_scope = resnet_utils.resnet_arg_scope
class NoOpScope(object):
"""No-op context manager."""
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, traceback):
return False
@slim.add_arg_scope
def bottleneck(inputs,
depth,
depth_bottleneck,
stride,
rate=1,
outputs_collections=None,
scope=None,
use_bounded_activations=False):
"""Bottleneck residual unit variant with BN after convolutions.
This is the original residual unit proposed in [1]. See Fig. 1(a) of [2] for
its definition. Note that we use here the bottleneck variant which has an
extra bottleneck layer.
When putting together two consecutive ResNet blocks that use this unit, one
should use stride = 2 in the last unit of the first block.
Args:
inputs: A tensor of size [batch, height, width, channels].
depth: The depth of the ResNet unit output.
depth_bottleneck: The depth of the bottleneck layers.
stride: The ResNet unit's stride. Determines the amount of downsampling of
the units output compared to its input.
rate: An integer, rate for atrous convolution.
outputs_collections: Collection to add the ResNet unit output.
scope: Optional variable_scope.
use_bounded_activations: Whether or not to use bounded activations. Bounded
activations better lend themselves to quantized inference.
Returns:
The ResNet unit's output.
"""
with tf.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc:
depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
if depth == depth_in:
shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
else:
shortcut = slim.conv2d(
inputs,
depth, [1, 1],
stride=stride,
activation_fn=tf.nn.relu6 if use_bounded_activations else None,
scope='shortcut')
residual = slim.conv2d(inputs, depth_bottleneck, [1, 1], stride=1,
scope='conv1')
residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride,
rate=rate, scope='conv2')
residual = slim.conv2d(residual, depth, [1, 1], stride=1,
activation_fn=None, scope='conv3')
if use_bounded_activations:
# Use clip_by_value to simulate bandpass activation.
residual = tf.clip_by_value(residual, -6.0, 6.0)
output = tf.nn.relu6(shortcut + residual)
else:
output = tf.nn.relu(shortcut + residual)
return slim.utils.collect_named_outputs(outputs_collections,
sc.name,
output)
def resnet_v1(inputs,
blocks,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
include_root_block=True,
spatial_squeeze=True,
store_non_strided_activations=False,
reuse=None,
scope=None):
"""Generator for v1 ResNet models.
This function generates a family of ResNet v1 models. See the resnet_v1_*()
methods for specific model instantiations, obtained by selecting different
block instantiations that produce ResNets of various depths.
Training for image classification on Imagenet is usually done with [224, 224]
inputs, resulting in [7, 7] feature maps at the output of the last ResNet
block for the ResNets defined in [1] that have nominal stride equal to 32.
However, for dense prediction tasks we advise that one uses inputs with
spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In
this case the feature maps at the ResNet output will have spatial shape
[(height - 1) / output_stride + 1, (width - 1) / output_stride + 1]
and corners exactly aligned with the input image corners, which greatly
facilitates alignment of the features to the image. Using as input [225, 225]
images results in [8, 8] feature maps at the output of the last ResNet block.
For dense prediction tasks, the ResNet needs to run in fully-convolutional
(FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all
have nominal stride equal to 32 and a good choice in FCN mode is to use
output_stride=16 in order to increase the density of the computed features at
small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
blocks: A list of length equal to the number of ResNet blocks. Each element
is a resnet_utils.Block object describing the units in the block.
num_classes: Number of predicted classes for classification tasks.
If 0 or None, we return the features before the logit layer.
is_training: whether batch_norm layers are in training mode. If this is set
to None, the callers can specify slim.batch_norm's is_training parameter
from an outer slim.arg_scope.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
include_root_block: If True, include the initial convolution followed by
max-pooling, if False excludes it.
spatial_squeeze: if True, logits is of shape [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
To use this parameter, the input images must be smaller than 300x300
pixels, in which case the output logit layer does not contain spatial
information and can be removed.
store_non_strided_activations: If True, we compute non-strided (undecimated)
activations at the last unit of each block and store them in the
`outputs_collections` before subsampling them. This gives us access to
higher resolution intermediate activations which are useful in some
dense prediction problems but increases 4x the computation and memory cost
at the last unit of each block.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is 0 or None,
then net is the output of the last ResNet block, potentially after global
average pooling. If num_classes a non-zero integer, net contains the
pre-softmax activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: If the target output_stride is not valid.
"""
with tf.variable_scope(
scope, 'resnet_v1', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
with slim.arg_scope([slim.conv2d, bottleneck,
resnet_utils.stack_blocks_dense],
outputs_collections=end_points_collection):
with (slim.arg_scope([slim.batch_norm], is_training=is_training)
if is_training is not None else NoOpScope()):
net = inputs
if include_root_block:
if output_stride is not None:
if output_stride % 4 != 0:
raise ValueError('The output_stride needs to be a multiple of 4.')
output_stride /= 4
net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')
net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')
net = resnet_utils.stack_blocks_dense(net, blocks, output_stride,
store_non_strided_activations)
# Convert end_points_collection into a dictionary of end_points.
end_points = slim.utils.convert_collection_to_dict(
end_points_collection)
if global_pool:
# Global average pooling.
net = tf.reduce_mean(
input_tensor=net, axis=[1, 2], name='pool5', keepdims=True)
end_points['global_pool'] = net
if num_classes:
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='logits')
end_points[sc.name + '/logits'] = net
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='SpatialSqueeze')
end_points[sc.name + '/spatial_squeeze'] = net
end_points['predictions'] = slim.softmax(net, scope='predictions')
return net, end_points
resnet_v1.default_image_size = 224
def resnet_v1_block(scope, base_depth, num_units, stride):
"""Helper function for creating a resnet_v1 bottleneck block.
Args:
scope: The scope of the block.
base_depth: The depth of the bottleneck layer for each unit.
num_units: The number of units in the block.
stride: The stride of the block, implemented as a stride in the last unit.
All other units have stride=1.
Returns:
A resnet_v1 bottleneck block.
"""
return resnet_utils.Block(scope, bottleneck, [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': 1
}] * (num_units - 1) + [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': stride
}])
def resnet_v1_50(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
store_non_strided_activations=False,
min_base_depth=8,
depth_multiplier=1,
reuse=None,
scope='resnet_v1_50'):
"""ResNet-50 model of [1]. See resnet_v1() for arg and return description."""
depth_func = lambda d: max(int(d * depth_multiplier), min_base_depth)
blocks = [
resnet_v1_block('block1', base_depth=depth_func(64), num_units=3,
stride=2),
resnet_v1_block('block2', base_depth=depth_func(128), num_units=4,
stride=2),
resnet_v1_block('block3', base_depth=depth_func(256), num_units=6,
stride=2),
resnet_v1_block('block4', base_depth=depth_func(512), num_units=3,
stride=1),
]
return resnet_v1(inputs, blocks, num_classes, is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
store_non_strided_activations=store_non_strided_activations,
reuse=reuse, scope=scope)
resnet_v1_50.default_image_size = resnet_v1.default_image_size
def resnet_v1_101(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
store_non_strided_activations=False,
min_base_depth=8,
depth_multiplier=1,
reuse=None,
scope='resnet_v1_101'):
"""ResNet-101 model of [1]. See resnet_v1() for arg and return description."""
depth_func = lambda d: max(int(d * depth_multiplier), min_base_depth)
blocks = [
resnet_v1_block('block1', base_depth=depth_func(64), num_units=3,
stride=2),
resnet_v1_block('block2', base_depth=depth_func(128), num_units=4,
stride=2),
resnet_v1_block('block3', base_depth=depth_func(256), num_units=23,
stride=2),
resnet_v1_block('block4', base_depth=depth_func(512), num_units=3,
stride=1),
]
return resnet_v1(inputs, blocks, num_classes, is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
store_non_strided_activations=store_non_strided_activations,
reuse=reuse, scope=scope)
resnet_v1_101.default_image_size = resnet_v1.default_image_size
def resnet_v1_152(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
store_non_strided_activations=False,
spatial_squeeze=True,
min_base_depth=8,
depth_multiplier=1,
reuse=None,
scope='resnet_v1_152'):
"""ResNet-152 model of [1]. See resnet_v1() for arg and return description."""
depth_func = lambda d: max(int(d * depth_multiplier), min_base_depth)
blocks = [
resnet_v1_block('block1', base_depth=depth_func(64), num_units=3,
stride=2),
resnet_v1_block('block2', base_depth=depth_func(128), num_units=8,
stride=2),
resnet_v1_block('block3', base_depth=depth_func(256), num_units=36,
stride=2),
resnet_v1_block('block4', base_depth=depth_func(512), num_units=3,
stride=1),
]
return resnet_v1(inputs, blocks, num_classes, is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
store_non_strided_activations=store_non_strided_activations,
reuse=reuse, scope=scope)
resnet_v1_152.default_image_size = resnet_v1.default_image_size
def resnet_v1_200(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
store_non_strided_activations=False,
spatial_squeeze=True,
min_base_depth=8,
depth_multiplier=1,
reuse=None,
scope='resnet_v1_200'):
"""ResNet-200 model of [2]. See resnet_v1() for arg and return description."""
depth_func = lambda d: max(int(d * depth_multiplier), min_base_depth)
blocks = [
resnet_v1_block('block1', base_depth=depth_func(64), num_units=3,
stride=2),
resnet_v1_block('block2', base_depth=depth_func(128), num_units=24,
stride=2),
resnet_v1_block('block3', base_depth=depth_func(256), num_units=36,
stride=2),
resnet_v1_block('block4', base_depth=depth_func(512), num_units=3,
stride=1),
]
return resnet_v1(inputs, blocks, num_classes, is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
store_non_strided_activations=store_non_strided_activations,
reuse=reuse, scope=scope)
resnet_v1_200.default_image_size = resnet_v1.default_image_size
| 18,020 | 43.3867 | 80 | py |
models | models-master/research/slim/nets/i3d_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for networks.i3d."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import tensorflow.compat.v1 as tf
from nets import i3d
class I3DTest(tf.test.TestCase):
def testBuildClassificationNetwork(self):
batch_size = 5
num_frames = 64
height, width = 224, 224
num_classes = 1000
inputs = tf.random.uniform((batch_size, num_frames, height, width, 3))
logits, end_points = i3d.i3d(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('Predictions' in end_points)
self.assertListEqual(end_points['Predictions'].get_shape().as_list(),
[batch_size, num_classes])
def testBuildBaseNetwork(self):
batch_size = 5
num_frames = 64
height, width = 224, 224
inputs = tf.random.uniform((batch_size, num_frames, height, width, 3))
mixed_6c, end_points = i3d.i3d_base(inputs)
self.assertTrue(mixed_6c.op.name.startswith('InceptionV1/Mixed_5c'))
self.assertListEqual(mixed_6c.get_shape().as_list(),
[batch_size, 8, 7, 7, 1024])
expected_endpoints = ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b',
'Mixed_3c', 'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c',
'Mixed_4d', 'Mixed_4e', 'Mixed_4f', 'MaxPool_5a_2x2',
'Mixed_5b', 'Mixed_5c']
self.assertItemsEqual(list(end_points.keys()), expected_endpoints)
def testBuildOnlyUptoFinalEndpoint(self):
batch_size = 5
num_frames = 64
height, width = 224, 224
endpoints = ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c',
'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d',
'Mixed_4e', 'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b',
'Mixed_5c']
for index, endpoint in enumerate(endpoints):
with tf.Graph().as_default():
inputs = tf.random.uniform((batch_size, num_frames, height, width, 3))
out_tensor, end_points = i3d.i3d_base(
inputs, final_endpoint=endpoint)
self.assertTrue(out_tensor.op.name.startswith(
'InceptionV1/' + endpoint))
self.assertItemsEqual(endpoints[:index+1], end_points)
def testBuildAndCheckAllEndPointsUptoMixed5c(self):
batch_size = 5
num_frames = 64
height, width = 224, 224
inputs = tf.random.uniform((batch_size, num_frames, height, width, 3))
_, end_points = i3d.i3d_base(inputs,
final_endpoint='Mixed_5c')
endpoints_shapes = {'Conv2d_1a_7x7': [5, 32, 112, 112, 64],
'MaxPool_2a_3x3': [5, 32, 56, 56, 64],
'Conv2d_2b_1x1': [5, 32, 56, 56, 64],
'Conv2d_2c_3x3': [5, 32, 56, 56, 192],
'MaxPool_3a_3x3': [5, 32, 28, 28, 192],
'Mixed_3b': [5, 32, 28, 28, 256],
'Mixed_3c': [5, 32, 28, 28, 480],
'MaxPool_4a_3x3': [5, 16, 14, 14, 480],
'Mixed_4b': [5, 16, 14, 14, 512],
'Mixed_4c': [5, 16, 14, 14, 512],
'Mixed_4d': [5, 16, 14, 14, 512],
'Mixed_4e': [5, 16, 14, 14, 528],
'Mixed_4f': [5, 16, 14, 14, 832],
'MaxPool_5a_2x2': [5, 8, 7, 7, 832],
'Mixed_5b': [5, 8, 7, 7, 832],
'Mixed_5c': [5, 8, 7, 7, 1024]}
self.assertItemsEqual(
list(endpoints_shapes.keys()), list(end_points.keys()))
for endpoint_name, expected_shape in six.iteritems(endpoints_shapes):
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testHalfSizeImages(self):
batch_size = 5
num_frames = 64
height, width = 112, 112
inputs = tf.random.uniform((batch_size, num_frames, height, width, 3))
mixed_5c, _ = i3d.i3d_base(inputs)
self.assertTrue(mixed_5c.op.name.startswith('InceptionV1/Mixed_5c'))
self.assertListEqual(mixed_5c.get_shape().as_list(),
[batch_size, 8, 4, 4, 1024])
def testTenFrames(self):
batch_size = 5
num_frames = 10
height, width = 224, 224
inputs = tf.random.uniform((batch_size, num_frames, height, width, 3))
mixed_5c, _ = i3d.i3d_base(inputs)
self.assertTrue(mixed_5c.op.name.startswith('InceptionV1/Mixed_5c'))
self.assertListEqual(mixed_5c.get_shape().as_list(),
[batch_size, 2, 7, 7, 1024])
def testEvaluation(self):
batch_size = 2
num_frames = 64
height, width = 224, 224
num_classes = 1000
eval_inputs = tf.random.uniform((batch_size, num_frames, height, width, 3))
logits, _ = i3d.i3d(eval_inputs, num_classes,
is_training=False)
predictions = tf.argmax(input=logits, axis=1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
if __name__ == '__main__':
tf.test.main()
| 6,189 | 39.723684 | 80 | py |
models | models-master/research/slim/nets/s3dg.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition for Gated Separable 3D network (S3D-G).
The network architecture is proposed by:
Saining Xie, Chen Sun, Jonathan Huang, Zhuowen Tu and Kevin Murphy,
Rethinking Spatiotemporal Feature Learning For Video Understanding.
https://arxiv.org/abs/1712.04851.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets import i3d_utils
# pylint: disable=g-long-lambda
trunc_normal = lambda stddev: tf.truncated_normal_initializer(
0.0, stddev)
conv3d_spatiotemporal = i3d_utils.conv3d_spatiotemporal
inception_block_v1_3d = i3d_utils.inception_block_v1_3d
arg_scope = slim.arg_scope
def s3dg_arg_scope(weight_decay=1e-7,
batch_norm_decay=0.999,
batch_norm_epsilon=0.001):
"""Defines default arg_scope for S3D-G.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: Decay for batch norm moving average.
batch_norm_epsilon: Small float added to variance to avoid dividing by zero
in batch norm.
Returns:
sc: An arg_scope to use for the models.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': batch_norm_decay,
# epsilon to prevent 0s in variance.
'epsilon': batch_norm_epsilon,
# Turns off fused batch norm.
'fused': False,
# collection containing the moving mean and moving variance.
'variables_collections': {
'beta': None,
'gamma': None,
'moving_mean': ['moving_vars'],
'moving_variance': ['moving_vars'],
}
}
with arg_scope([slim.conv3d, conv3d_spatiotemporal],
weights_regularizer=slim.l2_regularizer(weight_decay),
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
with arg_scope([conv3d_spatiotemporal], separable=True) as sc:
return sc
def self_gating(input_tensor, scope, data_format='NDHWC'):
"""Feature gating as used in S3D-G.
Transforms the input features by aggregating features from all
spatial and temporal locations, and applying gating conditioned
on the aggregated features. More details can be found at:
https://arxiv.org/abs/1712.04851
Args:
input_tensor: A 5-D float tensor of size [batch_size, num_frames,
height, width, channels].
scope: scope for `variable_scope`.
data_format: An optional string from: "NDHWC", "NCDHW". Defaults to "NDHWC".
The data format of the input and output data. With the default format
"NDHWC", the data is stored in the order of: [batch, in_depth, in_height,
in_width, in_channels]. Alternatively, the format could be "NCDHW", the
data storage order is:
[batch, in_channels, in_depth, in_height, in_width].
Returns:
A tensor with the same shape as input_tensor.
"""
index_c = data_format.index('C')
index_d = data_format.index('D')
index_h = data_format.index('H')
index_w = data_format.index('W')
input_shape = input_tensor.get_shape().as_list()
t = input_shape[index_d]
w = input_shape[index_w]
h = input_shape[index_h]
num_channels = input_shape[index_c]
spatiotemporal_average = slim.avg_pool3d(
input_tensor, [t, w, h],
stride=1,
data_format=data_format,
scope=scope + '/self_gating/avg_pool3d')
weights = slim.conv3d(
spatiotemporal_average,
num_channels, [1, 1, 1],
activation_fn=None,
normalizer_fn=None,
biases_initializer=None,
data_format=data_format,
weights_initializer=trunc_normal(0.01),
scope=scope + '/self_gating/transformer_W')
tile_multiples = [1, t, w, h]
tile_multiples.insert(index_c, 1)
weights = tf.tile(weights, tile_multiples)
weights = tf.nn.sigmoid(weights)
return tf.multiply(weights, input_tensor)
def s3dg_base(inputs,
first_temporal_kernel_size=3,
temporal_conv_startat='Conv2d_2c_3x3',
gating_startat='Conv2d_2c_3x3',
final_endpoint='Mixed_5c',
min_depth=16,
depth_multiplier=1.0,
data_format='NDHWC',
scope='InceptionV1'):
"""Defines the I3D/S3DG base architecture.
Note that we use the names as defined in Inception V1 to facilitate checkpoint
conversion from an image-trained Inception V1 checkpoint to I3D checkpoint.
Args:
inputs: A 5-D float tensor of size [batch_size, num_frames, height, width,
channels].
first_temporal_kernel_size: Specifies the temporal kernel size for the first
conv3d filter. A larger value slows down the model but provides little
accuracy improvement. The default is 7 in the original I3D and S3D-G but 3
gives better performance. Must be set to one of 1, 3, 5 or 7.
temporal_conv_startat: Specifies the first conv block to use 3D or separable
3D convs rather than 2D convs (implemented as [1, k, k] 3D conv). This is
used to construct the inverted pyramid models. 'Conv2d_2c_3x3' is the
first valid block to use separable 3D convs. If provided block name is
not present, all valid blocks will use separable 3D convs. Note that
'Conv2d_1a_7x7' cannot be made into a separable 3D conv, but can be made
into a 2D or 3D conv using the `first_temporal_kernel_size` option.
gating_startat: Specifies the first conv block to use self gating.
'Conv2d_2c_3x3' is the first valid block to use self gating. If provided
block name is not present, all valid blocks will use separable 3D convs.
final_endpoint: Specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c',
'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e',
'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b', 'Mixed_5c']
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
data_format: An optional string from: "NDHWC", "NCDHW". Defaults to "NDHWC".
The data format of the input and output data. With the default format
"NDHWC", the data is stored in the order of: [batch, in_depth, in_height,
in_width, in_channels]. Alternatively, the format could be "NCDHW", the
data storage order is:
[batch, in_channels, in_depth, in_height, in_width].
scope: Optional variable_scope.
Returns:
A dictionary from components of the network to the corresponding activation.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values, or
if depth_multiplier <= 0.
"""
assert data_format in ['NDHWC', 'NCDHW']
end_points = {}
t = 1
# For inverted pyramid models, we start with gating switched off.
use_gating = False
self_gating_fn = None
def gating_fn(inputs, scope):
return self_gating(inputs, scope, data_format=data_format)
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
depth = lambda d: max(int(d * depth_multiplier), min_depth)
with tf.variable_scope(scope, 'InceptionV1', [inputs]):
with arg_scope([slim.conv3d], weights_initializer=trunc_normal(0.01)):
with arg_scope([slim.conv3d, slim.max_pool3d, conv3d_spatiotemporal],
stride=1,
data_format=data_format,
padding='SAME'):
# batch_size x 32 x 112 x 112 x 64
end_point = 'Conv2d_1a_7x7'
if first_temporal_kernel_size not in [1, 3, 5, 7]:
raise ValueError(
'first_temporal_kernel_size can only be 1, 3, 5 or 7.')
# Separable conv is slow when used at first conv layer.
net = conv3d_spatiotemporal(
inputs,
depth(64), [first_temporal_kernel_size, 7, 7],
stride=2,
separable=False,
scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
# batch_size x 32 x 56 x 56 x 64
end_point = 'MaxPool_2a_3x3'
net = slim.max_pool3d(net, [1, 3, 3], stride=[1, 2, 2], scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
# batch_size x 32 x 56 x 56 x 64
end_point = 'Conv2d_2b_1x1'
net = slim.conv3d(net, depth(64), [1, 1, 1], scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
# batch_size x 32 x 56 x 56 x 192
end_point = 'Conv2d_2c_3x3'
if temporal_conv_startat == end_point:
t = 3
if gating_startat == end_point:
use_gating = True
self_gating_fn = gating_fn
net = conv3d_spatiotemporal(net, depth(192), [t, 3, 3], scope=end_point)
if use_gating:
net = self_gating(net, scope=end_point, data_format=data_format)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
# batch_size x 32 x 28 x 28 x 192
end_point = 'MaxPool_3a_3x3'
net = slim.max_pool3d(net, [1, 3, 3], stride=[1, 2, 2], scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
# batch_size x 32 x 28 x 28 x 256
end_point = 'Mixed_3b'
if temporal_conv_startat == end_point:
t = 3
if gating_startat == end_point:
use_gating = True
self_gating_fn = gating_fn
net = inception_block_v1_3d(
net,
num_outputs_0_0a=depth(64),
num_outputs_1_0a=depth(96),
num_outputs_1_0b=depth(128),
num_outputs_2_0a=depth(16),
num_outputs_2_0b=depth(32),
num_outputs_3_0b=depth(32),
temporal_kernel_size=t,
self_gating_fn=self_gating_fn,
data_format=data_format,
scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'Mixed_3c'
if temporal_conv_startat == end_point:
t = 3
if gating_startat == end_point:
use_gating = True
self_gating_fn = gating_fn
net = inception_block_v1_3d(
net,
num_outputs_0_0a=depth(128),
num_outputs_1_0a=depth(128),
num_outputs_1_0b=depth(192),
num_outputs_2_0a=depth(32),
num_outputs_2_0b=depth(96),
num_outputs_3_0b=depth(64),
temporal_kernel_size=t,
self_gating_fn=self_gating_fn,
data_format=data_format,
scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'MaxPool_4a_3x3'
net = slim.max_pool3d(net, [3, 3, 3], stride=[2, 2, 2], scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
# batch_size x 16 x 14 x 14 x 512
end_point = 'Mixed_4b'
if temporal_conv_startat == end_point:
t = 3
if gating_startat == end_point:
use_gating = True
self_gating_fn = gating_fn
net = inception_block_v1_3d(
net,
num_outputs_0_0a=depth(192),
num_outputs_1_0a=depth(96),
num_outputs_1_0b=depth(208),
num_outputs_2_0a=depth(16),
num_outputs_2_0b=depth(48),
num_outputs_3_0b=depth(64),
temporal_kernel_size=t,
self_gating_fn=self_gating_fn,
data_format=data_format,
scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
# batch_size x 16 x 14 x 14 x 512
end_point = 'Mixed_4c'
if temporal_conv_startat == end_point:
t = 3
if gating_startat == end_point:
use_gating = True
self_gating_fn = gating_fn
net = inception_block_v1_3d(
net,
num_outputs_0_0a=depth(160),
num_outputs_1_0a=depth(112),
num_outputs_1_0b=depth(224),
num_outputs_2_0a=depth(24),
num_outputs_2_0b=depth(64),
num_outputs_3_0b=depth(64),
temporal_kernel_size=t,
self_gating_fn=self_gating_fn,
data_format=data_format,
scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
# batch_size x 16 x 14 x 14 x 512
end_point = 'Mixed_4d'
if temporal_conv_startat == end_point:
t = 3
if gating_startat == end_point:
use_gating = True
self_gating_fn = gating_fn
net = inception_block_v1_3d(
net,
num_outputs_0_0a=depth(128),
num_outputs_1_0a=depth(128),
num_outputs_1_0b=depth(256),
num_outputs_2_0a=depth(24),
num_outputs_2_0b=depth(64),
num_outputs_3_0b=depth(64),
temporal_kernel_size=t,
self_gating_fn=self_gating_fn,
data_format=data_format,
scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
# batch_size x 16 x 14 x 14 x 528
end_point = 'Mixed_4e'
if temporal_conv_startat == end_point:
t = 3
if gating_startat == end_point:
use_gating = True
self_gating_fn = gating_fn
net = inception_block_v1_3d(
net,
num_outputs_0_0a=depth(112),
num_outputs_1_0a=depth(144),
num_outputs_1_0b=depth(288),
num_outputs_2_0a=depth(32),
num_outputs_2_0b=depth(64),
num_outputs_3_0b=depth(64),
temporal_kernel_size=t,
self_gating_fn=self_gating_fn,
data_format=data_format,
scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
# batch_size x 16 x 14 x 14 x 832
end_point = 'Mixed_4f'
if temporal_conv_startat == end_point:
t = 3
if gating_startat == end_point:
use_gating = True
self_gating_fn = gating_fn
net = inception_block_v1_3d(
net,
num_outputs_0_0a=depth(256),
num_outputs_1_0a=depth(160),
num_outputs_1_0b=depth(320),
num_outputs_2_0a=depth(32),
num_outputs_2_0b=depth(128),
num_outputs_3_0b=depth(128),
temporal_kernel_size=t,
self_gating_fn=self_gating_fn,
data_format=data_format,
scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'MaxPool_5a_2x2'
net = slim.max_pool3d(net, [2, 2, 2], stride=[2, 2, 2], scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
# batch_size x 8 x 7 x 7 x 832
end_point = 'Mixed_5b'
if temporal_conv_startat == end_point:
t = 3
if gating_startat == end_point:
use_gating = True
self_gating_fn = gating_fn
net = inception_block_v1_3d(
net,
num_outputs_0_0a=depth(256),
num_outputs_1_0a=depth(160),
num_outputs_1_0b=depth(320),
num_outputs_2_0a=depth(32),
num_outputs_2_0b=depth(128),
num_outputs_3_0b=depth(128),
temporal_kernel_size=t,
self_gating_fn=self_gating_fn,
data_format=data_format,
scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
# batch_size x 8 x 7 x 7 x 1024
end_point = 'Mixed_5c'
if temporal_conv_startat == end_point:
t = 3
if gating_startat == end_point:
use_gating = True
self_gating_fn = gating_fn
net = inception_block_v1_3d(
net,
num_outputs_0_0a=depth(384),
num_outputs_1_0a=depth(192),
num_outputs_1_0b=depth(384),
num_outputs_2_0a=depth(48),
num_outputs_2_0b=depth(128),
num_outputs_3_0b=depth(128),
temporal_kernel_size=t,
self_gating_fn=self_gating_fn,
data_format=data_format,
scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def s3dg(inputs,
num_classes=1000,
first_temporal_kernel_size=3,
temporal_conv_startat='Conv2d_2c_3x3',
gating_startat='Conv2d_2c_3x3',
final_endpoint='Mixed_5c',
min_depth=16,
depth_multiplier=1.0,
dropout_keep_prob=0.8,
is_training=True,
prediction_fn=slim.softmax,
spatial_squeeze=True,
reuse=None,
data_format='NDHWC',
scope='InceptionV1'):
"""Defines the S3D-G architecture.
The default image size used to train this network is 224x224.
Args:
inputs: A 5-D float tensor of size [batch_size, num_frames, height, width,
channels].
num_classes: number of predicted classes.
first_temporal_kernel_size: Specifies the temporal kernel size for the first
conv3d filter. A larger value slows down the model but provides little
accuracy improvement. Must be set to one of 1, 3, 5 or 7.
temporal_conv_startat: Specifies the first conv block to use separable 3D
convs rather than 2D convs (implemented as [1, k, k] 3D conv). This is
used to construct the inverted pyramid models. 'Conv2d_2c_3x3' is the
first valid block to use separable 3D convs. If provided block name is
not present, all valid blocks will use separable 3D convs.
gating_startat: Specifies the first conv block to use self gating.
'Conv2d_2c_3x3' is the first valid block to use self gating. If provided
block name is not present, all valid blocks will use separable 3D convs.
final_endpoint: Specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c',
'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e',
'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b', 'Mixed_5c']
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
dropout_keep_prob: the percentage of activation values that are retained.
is_training: whether is training or not.
prediction_fn: a function to get predictions out of logits.
spatial_squeeze: if True, logits is of shape is [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
data_format: An optional string from: "NDHWC", "NCDHW". Defaults to "NDHWC".
The data format of the input and output data. With the default format
"NDHWC", the data is stored in the order of: [batch, in_depth, in_height,
in_width, in_channels]. Alternatively, the format could be "NCDHW", the
data storage order is:
[batch, in_channels, in_depth, in_height, in_width].
scope: Optional variable_scope.
Returns:
logits: the pre-softmax activations, a tensor of size
[batch_size, num_classes]
end_points: a dictionary from components of the network to the corresponding
activation.
"""
assert data_format in ['NDHWC', 'NCDHW']
# Final pooling and prediction
with tf.variable_scope(
scope, 'InceptionV1', [inputs, num_classes], reuse=reuse) as scope:
with arg_scope([slim.batch_norm, slim.dropout], is_training=is_training):
net, end_points = s3dg_base(
inputs,
first_temporal_kernel_size=first_temporal_kernel_size,
temporal_conv_startat=temporal_conv_startat,
gating_startat=gating_startat,
final_endpoint=final_endpoint,
min_depth=min_depth,
depth_multiplier=depth_multiplier,
data_format=data_format,
scope=scope)
with tf.variable_scope('Logits'):
if data_format.startswith('NC'):
net = tf.transpose(a=net, perm=[0, 2, 3, 4, 1])
kernel_size = i3d_utils.reduced_kernel_size_3d(net, [2, 7, 7])
net = slim.avg_pool3d(
net,
kernel_size,
stride=1,
data_format='NDHWC',
scope='AvgPool_0a_7x7')
net = slim.dropout(net, dropout_keep_prob, scope='Dropout_0b')
logits = slim.conv3d(
net,
num_classes, [1, 1, 1],
activation_fn=None,
normalizer_fn=None,
data_format='NDHWC',
scope='Conv2d_0c_1x1')
# Temporal average pooling.
logits = tf.reduce_mean(input_tensor=logits, axis=1)
if spatial_squeeze:
logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
s3dg.default_image_size = 224
| 23,312 | 38.247475 | 80 | py |
models | models-master/research/slim/nets/cifarnet.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains a variant of the CIFAR-10 model definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
# pylint: disable=g-long-lambda
trunc_normal = lambda stddev: tf.truncated_normal_initializer(
stddev=stddev)
def cifarnet(images, num_classes=10, is_training=False,
dropout_keep_prob=0.5,
prediction_fn=slim.softmax,
scope='CifarNet'):
"""Creates a variant of the CifarNet model.
Note that since the output is a set of 'logits', the values fall in the
interval of (-infinity, infinity). Consequently, to convert the outputs to a
probability distribution over the characters, one will need to convert them
using the softmax function:
logits = cifarnet.cifarnet(images, is_training=False)
probabilities = tf.nn.softmax(logits)
predictions = tf.argmax(logits, 1)
Args:
images: A batch of `Tensors` of size [batch_size, height, width, channels].
num_classes: the number of classes in the dataset. If 0 or None, the logits
layer is omitted and the input features to the logits layer are returned
instead.
is_training: specifies whether or not we're currently training the model.
This variable will determine the behaviour of the dropout layer.
dropout_keep_prob: the percentage of activation values that are retained.
prediction_fn: a function to get predictions out of logits.
scope: Optional variable_scope.
Returns:
net: a 2D Tensor with the logits (pre-softmax activations) if num_classes
is a non-zero integer, or the input to the logits layer if num_classes
is 0 or None.
end_points: a dictionary from components of the network to the corresponding
activation.
"""
end_points = {}
with tf.variable_scope(scope, 'CifarNet', [images]):
net = slim.conv2d(images, 64, [5, 5], scope='conv1')
end_points['conv1'] = net
net = slim.max_pool2d(net, [2, 2], 2, scope='pool1')
end_points['pool1'] = net
net = tf.nn.lrn(net, 4, bias=1.0, alpha=0.001/9.0, beta=0.75, name='norm1')
net = slim.conv2d(net, 64, [5, 5], scope='conv2')
end_points['conv2'] = net
net = tf.nn.lrn(net, 4, bias=1.0, alpha=0.001/9.0, beta=0.75, name='norm2')
net = slim.max_pool2d(net, [2, 2], 2, scope='pool2')
end_points['pool2'] = net
net = slim.flatten(net)
end_points['Flatten'] = net
net = slim.fully_connected(net, 384, scope='fc3')
end_points['fc3'] = net
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout3')
net = slim.fully_connected(net, 192, scope='fc4')
end_points['fc4'] = net
if not num_classes:
return net, end_points
logits = slim.fully_connected(
net,
num_classes,
biases_initializer=tf.zeros_initializer(),
weights_initializer=trunc_normal(1 / 192.0),
weights_regularizer=None,
activation_fn=None,
scope='logits')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
cifarnet.default_image_size = 32
def cifarnet_arg_scope(weight_decay=0.004):
"""Defines the default cifarnet argument scope.
Args:
weight_decay: The weight decay to use for regularizing the model.
Returns:
An `arg_scope` to use for the inception v3 model.
"""
with slim.arg_scope(
[slim.conv2d],
weights_initializer=tf.truncated_normal_initializer(
stddev=5e-2),
activation_fn=tf.nn.relu):
with slim.arg_scope(
[slim.fully_connected],
biases_initializer=tf.constant_initializer(0.1),
weights_initializer=trunc_normal(0.04),
weights_regularizer=slim.l2_regularizer(weight_decay),
activation_fn=tf.nn.relu) as sc:
return sc
| 4,629 | 36.95082 | 80 | py |
models | models-master/research/slim/nets/resnet_v2_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.nets.resnet_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets import resnet_utils
from nets import resnet_v2
tf.disable_resource_variables()
def create_test_input(batch_size, height, width, channels):
"""Create test input tensor.
Args:
batch_size: The number of images per batch or `None` if unknown.
height: The height of each image or `None` if unknown.
width: The width of each image or `None` if unknown.
channels: The number of channels per image or `None` if unknown.
Returns:
Either a placeholder `Tensor` of dimension
[batch_size, height, width, channels] if any of the inputs are `None` or a
constant `Tensor` with the mesh grid values along the spatial dimensions.
"""
if None in [batch_size, height, width, channels]:
return tf.placeholder(tf.float32, (batch_size, height, width, channels))
else:
return tf.cast(
np.tile(
np.reshape(
np.reshape(np.arange(height), [height, 1]) +
np.reshape(np.arange(width), [1, width]),
[1, height, width, 1]), [batch_size, 1, 1, channels]),
dtype=tf.float32)
class ResnetUtilsTest(tf.test.TestCase):
def testSubsampleThreeByThree(self):
x = tf.reshape(tf.cast(tf.range(9), dtype=tf.float32), [1, 3, 3, 1])
x = resnet_utils.subsample(x, 2)
expected = tf.reshape(tf.constant([0, 2, 6, 8]), [1, 2, 2, 1])
with self.test_session():
self.assertAllClose(x.eval(), expected.eval())
def testSubsampleFourByFour(self):
x = tf.reshape(tf.cast(tf.range(16), dtype=tf.float32), [1, 4, 4, 1])
x = resnet_utils.subsample(x, 2)
expected = tf.reshape(tf.constant([0, 2, 8, 10]), [1, 2, 2, 1])
with self.test_session():
self.assertAllClose(x.eval(), expected.eval())
def testConv2DSameEven(self):
n, n2 = 4, 2
# Input image.
x = create_test_input(1, n, n, 1)
# Convolution kernel.
w = create_test_input(1, 3, 3, 1)
w = tf.reshape(w, [3, 3, 1, 1])
tf.get_variable('Conv/weights', initializer=w)
tf.get_variable('Conv/biases', initializer=tf.zeros([1]))
tf.get_variable_scope().reuse_variables()
y1 = slim.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = tf.cast([[14, 28, 43, 26], [28, 48, 66, 37], [43, 66, 84, 46],
[26, 37, 46, 22]],
dtype=tf.float32)
y1_expected = tf.reshape(y1_expected, [1, n, n, 1])
y2 = resnet_utils.subsample(y1, 2)
y2_expected = tf.cast([[14, 43], [43, 84]], dtype=tf.float32)
y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1])
y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')
y3_expected = y2_expected
y4 = slim.conv2d(x, 1, [3, 3], stride=2, scope='Conv')
y4_expected = tf.cast([[48, 37], [37, 22]], dtype=tf.float32)
y4_expected = tf.reshape(y4_expected, [1, n2, n2, 1])
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
self.assertAllClose(y4.eval(), y4_expected.eval())
def testConv2DSameOdd(self):
n, n2 = 5, 3
# Input image.
x = create_test_input(1, n, n, 1)
# Convolution kernel.
w = create_test_input(1, 3, 3, 1)
w = tf.reshape(w, [3, 3, 1, 1])
tf.get_variable('Conv/weights', initializer=w)
tf.get_variable('Conv/biases', initializer=tf.zeros([1]))
tf.get_variable_scope().reuse_variables()
y1 = slim.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = tf.cast(
[[14, 28, 43, 58, 34], [28, 48, 66, 84, 46], [43, 66, 84, 102, 55],
[58, 84, 102, 120, 64], [34, 46, 55, 64, 30]],
dtype=tf.float32)
y1_expected = tf.reshape(y1_expected, [1, n, n, 1])
y2 = resnet_utils.subsample(y1, 2)
y2_expected = tf.cast([[14, 43, 34], [43, 84, 55], [34, 55, 30]],
dtype=tf.float32)
y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1])
y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')
y3_expected = y2_expected
y4 = slim.conv2d(x, 1, [3, 3], stride=2, scope='Conv')
y4_expected = y2_expected
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
self.assertAllClose(y4.eval(), y4_expected.eval())
def _resnet_plain(self, inputs, blocks, output_stride=None, scope=None):
"""A plain ResNet without extra layers before or after the ResNet blocks."""
with tf.variable_scope(scope, values=[inputs]):
with slim.arg_scope([slim.conv2d], outputs_collections='end_points'):
net = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride)
end_points = slim.utils.convert_collection_to_dict('end_points')
return net, end_points
def testEndPointsV2(self):
"""Test the end points of a tiny v2 bottleneck network."""
blocks = [
resnet_v2.resnet_v2_block(
'block1', base_depth=1, num_units=2, stride=2),
resnet_v2.resnet_v2_block(
'block2', base_depth=2, num_units=2, stride=1),
]
inputs = create_test_input(2, 32, 16, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_plain(inputs, blocks, scope='tiny')
expected = [
'tiny/block1/unit_1/bottleneck_v2/shortcut',
'tiny/block1/unit_1/bottleneck_v2/conv1',
'tiny/block1/unit_1/bottleneck_v2/conv2',
'tiny/block1/unit_1/bottleneck_v2/conv3',
'tiny/block1/unit_2/bottleneck_v2/conv1',
'tiny/block1/unit_2/bottleneck_v2/conv2',
'tiny/block1/unit_2/bottleneck_v2/conv3',
'tiny/block2/unit_1/bottleneck_v2/shortcut',
'tiny/block2/unit_1/bottleneck_v2/conv1',
'tiny/block2/unit_1/bottleneck_v2/conv2',
'tiny/block2/unit_1/bottleneck_v2/conv3',
'tiny/block2/unit_2/bottleneck_v2/conv1',
'tiny/block2/unit_2/bottleneck_v2/conv2',
'tiny/block2/unit_2/bottleneck_v2/conv3']
self.assertItemsEqual(expected, end_points.keys())
def _stack_blocks_nondense(self, net, blocks):
"""A simplified ResNet Block stacker without output stride control."""
for block in blocks:
with tf.variable_scope(block.scope, 'block', [net]):
for i, unit in enumerate(block.args):
with tf.variable_scope('unit_%d' % (i + 1), values=[net]):
net = block.unit_fn(net, rate=1, **unit)
return net
def testAtrousValuesBottleneck(self):
"""Verify the values of dense feature extraction by atrous convolution.
Make sure that dense feature extraction by stack_blocks_dense() followed by
subsampling gives identical results to feature extraction at the nominal
network output stride using the simple self._stack_blocks_nondense() above.
"""
block = resnet_v2.resnet_v2_block
blocks = [
block('block1', base_depth=1, num_units=2, stride=2),
block('block2', base_depth=2, num_units=2, stride=2),
block('block3', base_depth=4, num_units=2, stride=2),
block('block4', base_depth=8, num_units=2, stride=1),
]
nominal_stride = 8
# Test both odd and even input dimensions.
height = 30
width = 31
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
with slim.arg_scope([slim.batch_norm], is_training=False):
for output_stride in [1, 2, 4, 8, None]:
with tf.Graph().as_default():
with self.test_session() as sess:
tf.set_random_seed(0)
inputs = create_test_input(1, height, width, 3)
# Dense feature extraction followed by subsampling.
output = resnet_utils.stack_blocks_dense(inputs,
blocks,
output_stride)
if output_stride is None:
factor = 1
else:
factor = nominal_stride // output_stride
output = resnet_utils.subsample(output, factor)
# Make the two networks use the same weights.
tf.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected = self._stack_blocks_nondense(inputs, blocks)
sess.run(tf.global_variables_initializer())
output, expected = sess.run([output, expected])
self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4)
class ResnetCompleteNetworkTest(tf.test.TestCase):
"""Tests with complete small ResNet v2 networks."""
def _resnet_small(self,
inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
include_root_block=True,
spatial_squeeze=True,
reuse=None,
scope='resnet_v2_small'):
"""A shallow and thin ResNet v2 for faster tests."""
block = resnet_v2.resnet_v2_block
blocks = [
block('block1', base_depth=1, num_units=3, stride=2),
block('block2', base_depth=2, num_units=3, stride=2),
block('block3', base_depth=4, num_units=3, stride=2),
block('block4', base_depth=8, num_units=2, stride=1),
]
return resnet_v2.resnet_v2(inputs, blocks, num_classes,
is_training=is_training,
global_pool=global_pool,
output_stride=output_stride,
include_root_block=include_root_block,
spatial_squeeze=spatial_squeeze,
reuse=reuse,
scope=scope)
def testClassificationEndPoints(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
logits, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
spatial_squeeze=False,
scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes])
self.assertTrue('predictions' in end_points)
self.assertListEqual(end_points['predictions'].get_shape().as_list(),
[2, 1, 1, num_classes])
self.assertTrue('global_pool' in end_points)
self.assertListEqual(end_points['global_pool'].get_shape().as_list(),
[2, 1, 1, 32])
def testEndpointNames(self):
# Like ResnetUtilsTest.testEndPointsV2(), but for the public API.
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
scope='resnet')
expected = ['resnet/conv1']
for block in range(1, 5):
for unit in range(1, 4 if block < 4 else 3):
for conv in range(1, 4):
expected.append('resnet/block%d/unit_%d/bottleneck_v2/conv%d' %
(block, unit, conv))
expected.append('resnet/block%d/unit_%d/bottleneck_v2' % (block, unit))
expected.append('resnet/block%d/unit_1/bottleneck_v2/shortcut' % block)
expected.append('resnet/block%d' % block)
expected.extend(['global_pool', 'resnet/logits', 'resnet/spatial_squeeze',
'predictions'])
self.assertItemsEqual(end_points.keys(), expected)
def testClassificationShapes(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 28, 28, 4],
'resnet/block2': [2, 14, 14, 8],
'resnet/block3': [2, 7, 7, 16],
'resnet/block4': [2, 7, 7, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
inputs = create_test_input(2, 321, 321, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
spatial_squeeze=False,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 41, 41, 4],
'resnet/block2': [2, 21, 21, 8],
'resnet/block3': [2, 11, 11, 16],
'resnet/block4': [2, 11, 11, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testRootlessFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
inputs = create_test_input(2, 128, 128, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
include_root_block=False,
spatial_squeeze=False,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 64, 64, 4],
'resnet/block2': [2, 32, 32, 8],
'resnet/block3': [2, 16, 16, 16],
'resnet/block4': [2, 16, 16, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testAtrousFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
output_stride = 8
inputs = create_test_input(2, 321, 321, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs,
num_classes,
global_pool=global_pool,
output_stride=output_stride,
spatial_squeeze=False,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 41, 41, 4],
'resnet/block2': [2, 41, 41, 8],
'resnet/block3': [2, 41, 41, 16],
'resnet/block4': [2, 41, 41, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testAtrousFullyConvolutionalValues(self):
"""Verify dense feature extraction with atrous convolution."""
nominal_stride = 32
for output_stride in [4, 8, 16, 32, None]:
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
with tf.Graph().as_default():
with self.test_session() as sess:
tf.set_random_seed(0)
inputs = create_test_input(2, 81, 81, 3)
# Dense feature extraction followed by subsampling.
output, _ = self._resnet_small(inputs, None,
is_training=False,
global_pool=False,
output_stride=output_stride)
if output_stride is None:
factor = 1
else:
factor = nominal_stride // output_stride
output = resnet_utils.subsample(output, factor)
# Make the two networks use the same weights.
tf.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected, _ = self._resnet_small(inputs, None,
is_training=False,
global_pool=False)
sess.run(tf.global_variables_initializer())
self.assertAllClose(output.eval(), expected.eval(),
atol=1e-4, rtol=1e-4)
def testUnknownBatchSize(self):
batch = 2
height, width = 65, 65
global_pool = True
num_classes = 10
inputs = create_test_input(None, height, width, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
logits, _ = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
spatial_squeeze=False,
scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, 1, 1, num_classes])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 1, 1, num_classes))
def testFullyConvolutionalUnknownHeightWidth(self):
batch = 2
height, width = 65, 65
global_pool = False
inputs = create_test_input(batch, None, None, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
output, _ = self._resnet_small(inputs, None,
global_pool=global_pool)
self.assertListEqual(output.get_shape().as_list(),
[batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 3, 3, 32))
def testAtrousFullyConvolutionalUnknownHeightWidth(self):
batch = 2
height, width = 65, 65
global_pool = False
output_stride = 8
inputs = create_test_input(batch, None, None, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
output, _ = self._resnet_small(inputs,
None,
global_pool=global_pool,
output_stride=output_stride)
self.assertListEqual(output.get_shape().as_list(),
[batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 9, 9, 32))
if __name__ == '__main__':
tf.test.main()
| 20,270 | 41.947034 | 80 | py |
models | models-master/research/slim/nets/inception.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Brings all inception models under one namespace."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from nets.inception_resnet_v2 import inception_resnet_v2
from nets.inception_resnet_v2 import inception_resnet_v2_arg_scope
from nets.inception_resnet_v2 import inception_resnet_v2_base
from nets.inception_v1 import inception_v1
from nets.inception_v1 import inception_v1_arg_scope
from nets.inception_v1 import inception_v1_base
from nets.inception_v2 import inception_v2
from nets.inception_v2 import inception_v2_arg_scope
from nets.inception_v2 import inception_v2_base
from nets.inception_v3 import inception_v3
from nets.inception_v3 import inception_v3_arg_scope
from nets.inception_v3 import inception_v3_base
from nets.inception_v4 import inception_v4
from nets.inception_v4 import inception_v4_arg_scope
from nets.inception_v4 import inception_v4_base
# pylint: enable=unused-import
| 1,676 | 43.131579 | 80 | py |
models | models-master/research/slim/nets/pix2pix.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Implementation of the Image-to-Image Translation model.
This network represents a port of the following work:
Image-to-Image Translation with Conditional Adversarial Networks
Phillip Isola, Jun-Yan Zhu, Tinghui Zhou and Alexei A. Efros
Arxiv, 2017
https://phillipi.github.io/pix2pix/
A reference implementation written in Lua can be found at:
https://github.com/phillipi/pix2pix/blob/master/models.lua
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import tensorflow.compat.v1 as tf
import tf_slim as slim
def pix2pix_arg_scope():
"""Returns a default argument scope for isola_net.
Returns:
An arg scope.
"""
# These parameters come from the online port, which don't necessarily match
# those in the paper.
# TODO(nsilberman): confirm these values with Philip.
instance_norm_params = {
'center': True,
'scale': True,
'epsilon': 0.00001,
}
with slim.arg_scope(
[slim.conv2d, slim.conv2d_transpose],
normalizer_fn=slim.instance_norm,
normalizer_params=instance_norm_params,
weights_initializer=tf.random_normal_initializer(0, 0.02)) as sc:
return sc
def upsample(net, num_outputs, kernel_size, method='nn_upsample_conv'):
"""Upsamples the given inputs.
Args:
net: A `Tensor` of size [batch_size, height, width, filters].
num_outputs: The number of output filters.
kernel_size: A list of 2 scalars or a 1x2 `Tensor` indicating the scale,
relative to the inputs, of the output dimensions. For example, if kernel
size is [2, 3], then the output height and width will be twice and three
times the input size.
method: The upsampling method.
Returns:
An `Tensor` which was upsampled using the specified method.
Raises:
ValueError: if `method` is not recognized.
"""
net_shape = tf.shape(input=net)
height = net_shape[1]
width = net_shape[2]
if method == 'nn_upsample_conv':
net = tf.image.resize(
net, [kernel_size[0] * height, kernel_size[1] * width],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
net = slim.conv2d(net, num_outputs, [4, 4], activation_fn=None)
elif method == 'conv2d_transpose':
net = slim.conv2d_transpose(
net, num_outputs, [4, 4], stride=kernel_size, activation_fn=None)
else:
raise ValueError('Unknown method: [%s]' % method)
return net
class Block(
collections.namedtuple('Block', ['num_filters', 'decoder_keep_prob'])):
"""Represents a single block of encoder and decoder processing.
The Image-to-Image translation paper works a bit differently than the original
U-Net model. In particular, each block represents a single operation in the
encoder which is concatenated with the corresponding decoder representation.
A dropout layer follows the concatenation and convolution of the concatenated
features.
"""
pass
def _default_generator_blocks():
"""Returns the default generator block definitions.
Returns:
A list of generator blocks.
"""
return [
Block(64, 0.5),
Block(128, 0.5),
Block(256, 0.5),
Block(512, 0),
Block(512, 0),
Block(512, 0),
Block(512, 0),
]
def pix2pix_generator(net,
num_outputs,
blocks=None,
upsample_method='nn_upsample_conv',
is_training=False): # pylint: disable=unused-argument
"""Defines the network architecture.
Args:
net: A `Tensor` of size [batch, height, width, channels]. Note that the
generator currently requires square inputs (e.g. height=width).
num_outputs: The number of (per-pixel) outputs.
blocks: A list of generator blocks or `None` to use the default generator
definition.
upsample_method: The method of upsampling images, one of 'nn_upsample_conv'
or 'conv2d_transpose'
is_training: Whether or not we're in training or testing mode.
Returns:
A `Tensor` representing the model output and a dictionary of model end
points.
Raises:
ValueError: if the input heights do not match their widths.
"""
end_points = {}
blocks = blocks or _default_generator_blocks()
input_size = net.get_shape().as_list()
input_size[3] = num_outputs
upsample_fn = functools.partial(upsample, method=upsample_method)
encoder_activations = []
###########
# Encoder #
###########
with tf.variable_scope('encoder'):
with slim.arg_scope([slim.conv2d],
kernel_size=[4, 4],
stride=2,
activation_fn=tf.nn.leaky_relu):
for block_id, block in enumerate(blocks):
# No normalizer for the first encoder layers as per 'Image-to-Image',
# Section 5.1.1
if block_id == 0:
# First layer doesn't use normalizer_fn
net = slim.conv2d(net, block.num_filters, normalizer_fn=None)
elif block_id < len(blocks) - 1:
net = slim.conv2d(net, block.num_filters)
else:
# Last layer doesn't use activation_fn nor normalizer_fn
net = slim.conv2d(
net, block.num_filters, activation_fn=None, normalizer_fn=None)
encoder_activations.append(net)
end_points['encoder%d' % block_id] = net
###########
# Decoder #
###########
reversed_blocks = list(blocks)
reversed_blocks.reverse()
with tf.variable_scope('decoder'):
# Dropout is used at both train and test time as per 'Image-to-Image',
# Section 2.1 (last paragraph).
with slim.arg_scope([slim.dropout], is_training=True):
for block_id, block in enumerate(reversed_blocks):
if block_id > 0:
net = tf.concat([net, encoder_activations[-block_id - 1]], axis=3)
# The Relu comes BEFORE the upsample op:
net = tf.nn.relu(net)
net = upsample_fn(net, block.num_filters, [2, 2])
if block.decoder_keep_prob > 0:
net = slim.dropout(net, keep_prob=block.decoder_keep_prob)
end_points['decoder%d' % block_id] = net
with tf.variable_scope('output'):
# Explicitly set the normalizer_fn to None to override any default value
# that may come from an arg_scope, such as pix2pix_arg_scope.
logits = slim.conv2d(
net, num_outputs, [4, 4], activation_fn=None, normalizer_fn=None)
logits = tf.reshape(logits, input_size)
end_points['logits'] = logits
end_points['predictions'] = tf.tanh(logits)
return logits, end_points
def pix2pix_discriminator(net, num_filters, padding=2, pad_mode='REFLECT',
activation_fn=tf.nn.leaky_relu, is_training=False):
"""Creates the Image2Image Translation Discriminator.
Args:
net: A `Tensor` of size [batch_size, height, width, channels] representing
the input.
num_filters: A list of the filters in the discriminator. The length of the
list determines the number of layers in the discriminator.
padding: Amount of reflection padding applied before each convolution.
pad_mode: mode for tf.pad, one of "CONSTANT", "REFLECT", or "SYMMETRIC".
activation_fn: activation fn for slim.conv2d.
is_training: Whether or not the model is training or testing.
Returns:
A logits `Tensor` of size [batch_size, N, N, 1] where N is the number of
'patches' we're attempting to discriminate and a dictionary of model end
points.
"""
del is_training
end_points = {}
num_layers = len(num_filters)
def padded(net, scope):
if padding:
with tf.variable_scope(scope):
spatial_pad = tf.constant(
[[0, 0], [padding, padding], [padding, padding], [0, 0]],
dtype=tf.int32)
return tf.pad(tensor=net, paddings=spatial_pad, mode=pad_mode)
else:
return net
with slim.arg_scope([slim.conv2d],
kernel_size=[4, 4],
stride=2,
padding='valid',
activation_fn=activation_fn):
# No normalization on the input layer.
net = slim.conv2d(
padded(net, 'conv0'), num_filters[0], normalizer_fn=None, scope='conv0')
end_points['conv0'] = net
for i in range(1, num_layers - 1):
net = slim.conv2d(
padded(net, 'conv%d' % i), num_filters[i], scope='conv%d' % i)
end_points['conv%d' % i] = net
# Stride 1 on the last layer.
net = slim.conv2d(
padded(net, 'conv%d' % (num_layers - 1)),
num_filters[-1],
stride=1,
scope='conv%d' % (num_layers - 1))
end_points['conv%d' % (num_layers - 1)] = net
# 1-dim logits, stride 1, no activation, no normalization.
logits = slim.conv2d(
padded(net, 'conv%d' % num_layers),
1,
stride=1,
activation_fn=None,
normalizer_fn=None,
scope='conv%d' % num_layers)
end_points['logits'] = logits
end_points['predictions'] = tf.sigmoid(logits)
return logits, end_points
| 9,709 | 32.027211 | 80 | py |
models | models-master/research/slim/nets/nets_factory.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains a factory for building various models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tf_slim as slim
from nets import alexnet
from nets import cifarnet
from nets import i3d
from nets import inception
from nets import lenet
from nets import mobilenet_v1
from nets import overfeat
from nets import resnet_v1
from nets import resnet_v2
from nets import s3dg
from nets import vgg
from nets.mobilenet import mobilenet_v2
from nets.mobilenet import mobilenet_v3
from nets.nasnet import nasnet
from nets.nasnet import pnasnet
networks_map = {
'alexnet_v2': alexnet.alexnet_v2,
'cifarnet': cifarnet.cifarnet,
'overfeat': overfeat.overfeat,
'vgg_a': vgg.vgg_a,
'vgg_16': vgg.vgg_16,
'vgg_19': vgg.vgg_19,
'inception_v1': inception.inception_v1,
'inception_v2': inception.inception_v2,
'inception_v3': inception.inception_v3,
'inception_v4': inception.inception_v4,
'inception_resnet_v2': inception.inception_resnet_v2,
'i3d': i3d.i3d,
's3dg': s3dg.s3dg,
'lenet': lenet.lenet,
'resnet_v1_50': resnet_v1.resnet_v1_50,
'resnet_v1_101': resnet_v1.resnet_v1_101,
'resnet_v1_152': resnet_v1.resnet_v1_152,
'resnet_v1_200': resnet_v1.resnet_v1_200,
'resnet_v2_50': resnet_v2.resnet_v2_50,
'resnet_v2_101': resnet_v2.resnet_v2_101,
'resnet_v2_152': resnet_v2.resnet_v2_152,
'resnet_v2_200': resnet_v2.resnet_v2_200,
'mobilenet_v1': mobilenet_v1.mobilenet_v1,
'mobilenet_v1_075': mobilenet_v1.mobilenet_v1_075,
'mobilenet_v1_050': mobilenet_v1.mobilenet_v1_050,
'mobilenet_v1_025': mobilenet_v1.mobilenet_v1_025,
'mobilenet_v2': mobilenet_v2.mobilenet,
'mobilenet_v2_140': mobilenet_v2.mobilenet_v2_140,
'mobilenet_v2_035': mobilenet_v2.mobilenet_v2_035,
'mobilenet_v3_small': mobilenet_v3.small,
'mobilenet_v3_large': mobilenet_v3.large,
'mobilenet_v3_small_minimalistic': mobilenet_v3.small_minimalistic,
'mobilenet_v3_large_minimalistic': mobilenet_v3.large_minimalistic,
'mobilenet_edgetpu': mobilenet_v3.edge_tpu,
'mobilenet_edgetpu_075': mobilenet_v3.edge_tpu_075,
'nasnet_cifar': nasnet.build_nasnet_cifar,
'nasnet_mobile': nasnet.build_nasnet_mobile,
'nasnet_large': nasnet.build_nasnet_large,
'pnasnet_large': pnasnet.build_pnasnet_large,
'pnasnet_mobile': pnasnet.build_pnasnet_mobile,
}
arg_scopes_map = {
'alexnet_v2': alexnet.alexnet_v2_arg_scope,
'cifarnet': cifarnet.cifarnet_arg_scope,
'overfeat': overfeat.overfeat_arg_scope,
'vgg_a': vgg.vgg_arg_scope,
'vgg_16': vgg.vgg_arg_scope,
'vgg_19': vgg.vgg_arg_scope,
'inception_v1': inception.inception_v3_arg_scope,
'inception_v2': inception.inception_v3_arg_scope,
'inception_v3': inception.inception_v3_arg_scope,
'inception_v4': inception.inception_v4_arg_scope,
'inception_resnet_v2': inception.inception_resnet_v2_arg_scope,
'i3d': i3d.i3d_arg_scope,
's3dg': s3dg.s3dg_arg_scope,
'lenet': lenet.lenet_arg_scope,
'resnet_v1_50': resnet_v1.resnet_arg_scope,
'resnet_v1_101': resnet_v1.resnet_arg_scope,
'resnet_v1_152': resnet_v1.resnet_arg_scope,
'resnet_v1_200': resnet_v1.resnet_arg_scope,
'resnet_v2_50': resnet_v2.resnet_arg_scope,
'resnet_v2_101': resnet_v2.resnet_arg_scope,
'resnet_v2_152': resnet_v2.resnet_arg_scope,
'resnet_v2_200': resnet_v2.resnet_arg_scope,
'mobilenet_v1': mobilenet_v1.mobilenet_v1_arg_scope,
'mobilenet_v1_075': mobilenet_v1.mobilenet_v1_arg_scope,
'mobilenet_v1_050': mobilenet_v1.mobilenet_v1_arg_scope,
'mobilenet_v1_025': mobilenet_v1.mobilenet_v1_arg_scope,
'mobilenet_v2': mobilenet_v2.training_scope,
'mobilenet_v2_035': mobilenet_v2.training_scope,
'mobilenet_v2_140': mobilenet_v2.training_scope,
'mobilenet_v3_small': mobilenet_v3.training_scope,
'mobilenet_v3_large': mobilenet_v3.training_scope,
'mobilenet_v3_small_minimalistic': mobilenet_v3.training_scope,
'mobilenet_v3_large_minimalistic': mobilenet_v3.training_scope,
'mobilenet_edgetpu': mobilenet_v3.training_scope,
'mobilenet_edgetpu_075': mobilenet_v3.training_scope,
'nasnet_cifar': nasnet.nasnet_cifar_arg_scope,
'nasnet_mobile': nasnet.nasnet_mobile_arg_scope,
'nasnet_large': nasnet.nasnet_large_arg_scope,
'pnasnet_large': pnasnet.pnasnet_large_arg_scope,
'pnasnet_mobile': pnasnet.pnasnet_mobile_arg_scope,
}
def get_network_fn(name, num_classes, weight_decay=0.0, is_training=False):
"""Returns a network_fn such as `logits, end_points = network_fn(images)`.
Args:
name: The name of the network.
num_classes: The number of classes to use for classification. If 0 or None,
the logits layer is omitted and its input features are returned instead.
weight_decay: The l2 coefficient for the model weights.
is_training: `True` if the model is being used for training and `False`
otherwise.
Returns:
network_fn: A function that applies the model to a batch of images. It has
the following signature:
net, end_points = network_fn(images)
The `images` input is a tensor of shape [batch_size, height, width, 3 or
1] with height = width = network_fn.default_image_size. (The
permissibility and treatment of other sizes depends on the network_fn.)
The returned `end_points` are a dictionary of intermediate activations.
The returned `net` is the topmost layer, depending on `num_classes`:
If `num_classes` was a non-zero integer, `net` is a logits tensor
of shape [batch_size, num_classes].
If `num_classes` was 0 or `None`, `net` is a tensor with the input
to the logits layer of shape [batch_size, 1, 1, num_features] or
[batch_size, num_features]. Dropout has not been applied to this
(even if the network's original classification does); it remains for
the caller to do this or not.
Raises:
ValueError: If network `name` is not recognized.
"""
if name not in networks_map:
raise ValueError('Name of network unknown %s' % name)
func = networks_map[name]
@functools.wraps(func)
def network_fn(images, **kwargs):
arg_scope = arg_scopes_map[name](weight_decay=weight_decay)
with slim.arg_scope(arg_scope):
return func(images, num_classes=num_classes, is_training=is_training,
**kwargs)
if hasattr(func, 'default_image_size'):
network_fn.default_image_size = func.default_image_size
return network_fn
| 7,253 | 41.421053 | 80 | py |
models | models-master/research/slim/nets/post_training_quantization.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Export quantized tflite model from a trained checkpoint."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from absl import app
from absl import flags
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
from nets import nets_factory
from preprocessing import preprocessing_factory
flags.DEFINE_string("model_name", None,
"The name of the architecture to quantize.")
flags.DEFINE_string("checkpoint_path", None, "Path to the training checkpoint.")
flags.DEFINE_string("dataset_name", "imagenet2012",
"Name of the dataset to use for quantization calibration.")
flags.DEFINE_string("dataset_dir", None, "Dataset location.")
flags.DEFINE_string(
"dataset_split", "train",
"The dataset split (train, validation etc.) to use for calibration.")
flags.DEFINE_string("output_tflite", None, "Path to output tflite file.")
flags.DEFINE_boolean(
"use_model_specific_preprocessing", False,
"When true, uses the preprocessing corresponding to the model as specified "
"in preprocessing factory.")
flags.DEFINE_boolean("enable_ema", True,
"Load exponential moving average version of variables.")
flags.DEFINE_integer(
"num_steps", 1000,
"Number of post-training quantization calibration steps to run.")
flags.DEFINE_integer("image_size", 224, "Size of the input image.")
flags.DEFINE_integer("num_classes", 1001,
"Number of output classes for the model.")
FLAGS = flags.FLAGS
# Mean and standard deviation used for normalizing the image tensor.
_MEAN_RGB = 127.5
_STD_RGB = 127.5
def _preprocess_for_quantization(image_data, image_size, crop_padding=32):
"""Crops to center of image with padding then scales, normalizes image_size.
Args:
image_data: A 3D Tensor representing the RGB image data. Image can be of
arbitrary height and width.
image_size: image height/width dimension.
crop_padding: the padding size to use when centering the crop.
Returns:
A decoded and cropped image Tensor. Image is normalized to [-1,1].
"""
shape = tf.shape(image_data)
image_height = shape[0]
image_width = shape[1]
padded_center_crop_size = tf.cast(
(image_size * 1.0 / (image_size + crop_padding)) *
tf.cast(tf.minimum(image_height, image_width), tf.float32), tf.int32)
offset_height = ((image_height - padded_center_crop_size) + 1) // 2
offset_width = ((image_width - padded_center_crop_size) + 1) // 2
image = tf.image.crop_to_bounding_box(
image_data,
offset_height=offset_height,
offset_width=offset_width,
target_height=padded_center_crop_size,
target_width=padded_center_crop_size)
image = tf.image.resize([image], [image_size, image_size],
method=tf.image.ResizeMethod.BICUBIC)[0]
image = tf.cast(image, tf.float32)
image -= tf.constant(_MEAN_RGB)
image /= tf.constant(_STD_RGB)
return image
def restore_model(sess, checkpoint_path, enable_ema=True):
"""Restore variables from the checkpoint into the provided session.
Args:
sess: A tensorflow session where the checkpoint will be loaded.
checkpoint_path: Path to the trained checkpoint.
enable_ema: (optional) Whether to load the exponential moving average (ema)
version of the tensorflow variables. Defaults to True.
"""
if enable_ema:
ema = tf.train.ExponentialMovingAverage(decay=0.0)
ema_vars = tf.trainable_variables() + tf.get_collection("moving_vars")
for v in tf.global_variables():
if "moving_mean" in v.name or "moving_variance" in v.name:
ema_vars.append(v)
ema_vars = list(set(ema_vars))
var_dict = ema.variables_to_restore(ema_vars)
else:
var_dict = None
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(var_dict, max_to_keep=1)
saver.restore(sess, checkpoint_path)
def _representative_dataset_gen():
"""Gets a python generator of numpy arrays for the given dataset."""
image_size = FLAGS.image_size
dataset = tfds.builder(FLAGS.dataset_name, data_dir=FLAGS.dataset_dir)
dataset.download_and_prepare()
data = dataset.as_dataset()[FLAGS.dataset_split]
iterator = tf.data.make_one_shot_iterator(data)
if FLAGS.use_model_specific_preprocessing:
preprocess_fn = functools.partial(
preprocessing_factory.get_preprocessing(name=FLAGS.model_name),
output_height=image_size,
output_width=image_size)
else:
preprocess_fn = functools.partial(
_preprocess_for_quantization, image_size=image_size)
features = iterator.get_next()
image = features["image"]
image = preprocess_fn(image)
image = tf.reshape(image, [1, image_size, image_size, 3])
for _ in range(FLAGS.num_steps):
yield [image.eval()]
def main(_):
with tf.Graph().as_default(), tf.Session() as sess:
network_fn = nets_factory.get_network_fn(
FLAGS.model_name, num_classes=FLAGS.num_classes, is_training=False)
image_size = FLAGS.image_size
images = tf.placeholder(
tf.float32, shape=(1, image_size, image_size, 3), name="images")
logits, _ = network_fn(images)
output_tensor = tf.nn.softmax(logits)
restore_model(sess, FLAGS.checkpoint_path, enable_ema=FLAGS.enable_ema)
converter = tf.lite.TFLiteConverter.from_session(sess, [images],
[output_tensor])
converter.representative_dataset = tf.lite.RepresentativeDataset(
_representative_dataset_gen)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
tflite_buffer = converter.convert()
with tf.gfile.GFile(FLAGS.output_tflite, "wb") as output_tflite:
output_tflite.write(tflite_buffer)
print("tflite model written to %s" % FLAGS.output_tflite)
if __name__ == "__main__":
flags.mark_flag_as_required("model_name")
flags.mark_flag_as_required("checkpoint_path")
flags.mark_flag_as_required("dataset_dir")
flags.mark_flag_as_required("output_tflite")
app.run(main)
| 6,926 | 37.06044 | 80 | py |
models | models-master/research/slim/nets/vgg.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains model definitions for versions of the Oxford VGG network.
These model definitions were introduced in the following technical report:
Very Deep Convolutional Networks For Large-Scale Image Recognition
Karen Simonyan and Andrew Zisserman
arXiv technical report, 2015
PDF: http://arxiv.org/pdf/1409.1556.pdf
ILSVRC 2014 Slides: http://www.robots.ox.ac.uk/~karen/pdf/ILSVRC_2014.pdf
CC-BY-4.0
More information can be obtained from the VGG website:
www.robots.ox.ac.uk/~vgg/research/very_deep/
Usage:
with slim.arg_scope(vgg.vgg_arg_scope()):
outputs, end_points = vgg.vgg_a(inputs)
with slim.arg_scope(vgg.vgg_arg_scope()):
outputs, end_points = vgg.vgg_16(inputs)
@@vgg_a
@@vgg_16
@@vgg_19
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
def vgg_arg_scope(weight_decay=0.0005):
"""Defines the VGG arg scope.
Args:
weight_decay: The l2 regularization coefficient.
Returns:
An arg_scope.
"""
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(weight_decay),
biases_initializer=tf.zeros_initializer()):
with slim.arg_scope([slim.conv2d], padding='SAME') as arg_sc:
return arg_sc
def vgg_a(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
reuse=None,
scope='vgg_a',
fc_conv_padding='VALID',
global_pool=False):
"""Oxford Net VGG 11-Layers version A Example.
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer is
omitted and the input features to the logits layer are returned instead.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional scope for the variables.
fc_conv_padding: the type of padding to use for the fully connected layer
that is implemented as a convolutional layer. Use 'SAME' padding if you
are applying the network in a fully convolutional manner and want to
get a prediction map downsampled by a factor of 32 as an output.
Otherwise, the output prediction map will be (input / 32) - 6 in case of
'VALID' padding.
global_pool: Optional boolean flag. If True, the input to the classification
layer is avgpooled to size 1x1, for any input size. (This is not part
of the original VGG architecture.)
Returns:
net: the output of the logits layer (if num_classes is a non-zero integer),
or the input to the logits layer (if num_classes is 0 or None).
end_points: a dict of tensors with intermediate activations.
"""
with tf.variable_scope(scope, 'vgg_a', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.max_pool2d],
outputs_collections=end_points_collection):
net = slim.repeat(inputs, 1, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 1, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 2, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.repeat(net, 2, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.repeat(net, 2, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
# Use conv2d instead of fully_connected layers.
net = slim.conv2d(net, 4096, [7, 7], padding=fc_conv_padding, scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout6')
net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
# Convert end_points_collection into a end_point dict.
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if global_pool:
net = tf.reduce_mean(
input_tensor=net, axis=[1, 2], keepdims=True, name='global_pool')
end_points['global_pool'] = net
if num_classes:
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout7')
net = slim.conv2d(net, num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='fc8')
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points
vgg_a.default_image_size = 224
def vgg_16(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
reuse=None,
scope='vgg_16',
fc_conv_padding='VALID',
global_pool=False):
"""Oxford Net VGG 16-Layers version D Example.
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer is
omitted and the input features to the logits layer are returned instead.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional scope for the variables.
fc_conv_padding: the type of padding to use for the fully connected layer
that is implemented as a convolutional layer. Use 'SAME' padding if you
are applying the network in a fully convolutional manner and want to
get a prediction map downsampled by a factor of 32 as an output.
Otherwise, the output prediction map will be (input / 32) - 6 in case of
'VALID' padding.
global_pool: Optional boolean flag. If True, the input to the classification
layer is avgpooled to size 1x1, for any input size. (This is not part
of the original VGG architecture.)
Returns:
net: the output of the logits layer (if num_classes is a non-zero integer),
or the input to the logits layer (if num_classes is 0 or None).
end_points: a dict of tensors with intermediate activations.
"""
with tf.variable_scope(
scope, 'vgg_16', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
outputs_collections=end_points_collection):
net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
# Use conv2d instead of fully_connected layers.
net = slim.conv2d(net, 4096, [7, 7], padding=fc_conv_padding, scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout6')
net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
# Convert end_points_collection into a end_point dict.
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if global_pool:
net = tf.reduce_mean(
input_tensor=net, axis=[1, 2], keepdims=True, name='global_pool')
end_points['global_pool'] = net
if num_classes:
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout7')
net = slim.conv2d(net, num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='fc8')
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points
vgg_16.default_image_size = 224
def vgg_19(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
reuse=None,
scope='vgg_19',
fc_conv_padding='VALID',
global_pool=False):
"""Oxford Net VGG 19-Layers version E Example.
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer is
omitted and the input features to the logits layer are returned instead.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional scope for the variables.
fc_conv_padding: the type of padding to use for the fully connected layer
that is implemented as a convolutional layer. Use 'SAME' padding if you
are applying the network in a fully convolutional manner and want to
get a prediction map downsampled by a factor of 32 as an output.
Otherwise, the output prediction map will be (input / 32) - 6 in case of
'VALID' padding.
global_pool: Optional boolean flag. If True, the input to the classification
layer is avgpooled to size 1x1, for any input size. (This is not part
of the original VGG architecture.)
Returns:
net: the output of the logits layer (if num_classes is a non-zero integer),
or the non-dropped-out input to the logits layer (if num_classes is 0 or
None).
end_points: a dict of tensors with intermediate activations.
"""
with tf.variable_scope(
scope, 'vgg_19', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
outputs_collections=end_points_collection):
net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 4, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.repeat(net, 4, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.repeat(net, 4, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
# Use conv2d instead of fully_connected layers.
net = slim.conv2d(net, 4096, [7, 7], padding=fc_conv_padding, scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout6')
net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
# Convert end_points_collection into a end_point dict.
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if global_pool:
net = tf.reduce_mean(
input_tensor=net, axis=[1, 2], keepdims=True, name='global_pool')
end_points['global_pool'] = net
if num_classes:
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout7')
net = slim.conv2d(net, num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='fc8')
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points
vgg_19.default_image_size = 224
# Alias
vgg_d = vgg_16
vgg_e = vgg_19
| 14,608 | 45.231013 | 80 | py |
models | models-master/research/slim/nets/cyclegan.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines the CycleGAN generator and discriminator networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow.compat.v1 as tf
import tf_slim as slim
from tensorflow.python.framework import tensor_util
def cyclegan_arg_scope(instance_norm_center=True,
instance_norm_scale=True,
instance_norm_epsilon=0.001,
weights_init_stddev=0.02,
weight_decay=0.0):
"""Returns a default argument scope for all generators and discriminators.
Args:
instance_norm_center: Whether instance normalization applies centering.
instance_norm_scale: Whether instance normalization applies scaling.
instance_norm_epsilon: Small float added to the variance in the instance
normalization to avoid dividing by zero.
weights_init_stddev: Standard deviation of the random values to initialize
the convolution kernels with.
weight_decay: Magnitude of weight decay applied to all convolution kernel
variables of the generator.
Returns:
An arg-scope.
"""
instance_norm_params = {
'center': instance_norm_center,
'scale': instance_norm_scale,
'epsilon': instance_norm_epsilon,
}
weights_regularizer = None
if weight_decay and weight_decay > 0.0:
weights_regularizer = slim.l2_regularizer(weight_decay)
with slim.arg_scope(
[slim.conv2d],
normalizer_fn=slim.instance_norm,
normalizer_params=instance_norm_params,
weights_initializer=tf.random_normal_initializer(
0, weights_init_stddev),
weights_regularizer=weights_regularizer) as sc:
return sc
def cyclegan_upsample(net, num_outputs, stride, method='conv2d_transpose',
pad_mode='REFLECT', align_corners=False):
"""Upsamples the given inputs.
Args:
net: A Tensor of size [batch_size, height, width, filters].
num_outputs: The number of output filters.
stride: A list of 2 scalars or a 1x2 Tensor indicating the scale,
relative to the inputs, of the output dimensions. For example, if kernel
size is [2, 3], then the output height and width will be twice and three
times the input size.
method: The upsampling method: 'nn_upsample_conv', 'bilinear_upsample_conv',
or 'conv2d_transpose'.
pad_mode: mode for tf.pad, one of "CONSTANT", "REFLECT", or "SYMMETRIC".
align_corners: option for method, 'bilinear_upsample_conv'. If true, the
centers of the 4 corner pixels of the input and output tensors are
aligned, preserving the values at the corner pixels.
Returns:
A Tensor which was upsampled using the specified method.
Raises:
ValueError: if `method` is not recognized.
"""
with tf.variable_scope('upconv'):
net_shape = tf.shape(input=net)
height = net_shape[1]
width = net_shape[2]
# Reflection pad by 1 in spatial dimensions (axes 1, 2 = h, w) to make a 3x3
# 'valid' convolution produce an output with the same dimension as the
# input.
spatial_pad_1 = np.array([[0, 0], [1, 1], [1, 1], [0, 0]])
if method == 'nn_upsample_conv':
net = tf.image.resize(
net, [stride[0] * height, stride[1] * width],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
net = tf.pad(tensor=net, paddings=spatial_pad_1, mode=pad_mode)
net = slim.conv2d(net, num_outputs, kernel_size=[3, 3], padding='valid')
elif method == 'bilinear_upsample_conv':
net = tf.image.resize_bilinear(
net, [stride[0] * height, stride[1] * width],
align_corners=align_corners)
net = tf.pad(tensor=net, paddings=spatial_pad_1, mode=pad_mode)
net = slim.conv2d(net, num_outputs, kernel_size=[3, 3], padding='valid')
elif method == 'conv2d_transpose':
# This corrects 1 pixel offset for images with even width and height.
# conv2d is left aligned and conv2d_transpose is right aligned for even
# sized images (while doing 'SAME' padding).
# Note: This doesn't reflect actual model in paper.
net = slim.conv2d_transpose(
net, num_outputs, kernel_size=[3, 3], stride=stride, padding='valid')
net = net[:, 1:, 1:, :]
else:
raise ValueError('Unknown method: [%s]' % method)
return net
def _dynamic_or_static_shape(tensor):
shape = tf.shape(input=tensor)
static_shape = tensor_util.constant_value(shape)
return static_shape if static_shape is not None else shape
def cyclegan_generator_resnet(images,
arg_scope_fn=cyclegan_arg_scope,
num_resnet_blocks=6,
num_filters=64,
upsample_fn=cyclegan_upsample,
kernel_size=3,
tanh_linear_slope=0.0,
is_training=False):
"""Defines the cyclegan resnet network architecture.
As closely as possible following
https://github.com/junyanz/CycleGAN/blob/master/models/architectures.lua#L232
FYI: This network requires input height and width to be divisible by 4 in
order to generate an output with shape equal to input shape. Assertions will
catch this if input dimensions are known at graph construction time, but
there's no protection if unknown at graph construction time (you'll see an
error).
Args:
images: Input image tensor of shape [batch_size, h, w, 3].
arg_scope_fn: Function to create the global arg_scope for the network.
num_resnet_blocks: Number of ResNet blocks in the middle of the generator.
num_filters: Number of filters of the first hidden layer.
upsample_fn: Upsampling function for the decoder part of the generator.
kernel_size: Size w or list/tuple [h, w] of the filter kernels for all inner
layers.
tanh_linear_slope: Slope of the linear function to add to the tanh over the
logits.
is_training: Whether the network is created in training mode or inference
only mode. Not actually needed, just for compliance with other generator
network functions.
Returns:
A `Tensor` representing the model output and a dictionary of model end
points.
Raises:
ValueError: If the input height or width is known at graph construction time
and not a multiple of 4.
"""
# Neither dropout nor batch norm -> dont need is_training
del is_training
end_points = {}
input_size = images.shape.as_list()
height, width = input_size[1], input_size[2]
if height and height % 4 != 0:
raise ValueError('The input height must be a multiple of 4.')
if width and width % 4 != 0:
raise ValueError('The input width must be a multiple of 4.')
num_outputs = input_size[3]
if not isinstance(kernel_size, (list, tuple)):
kernel_size = [kernel_size, kernel_size]
kernel_height = kernel_size[0]
kernel_width = kernel_size[1]
pad_top = (kernel_height - 1) // 2
pad_bottom = kernel_height // 2
pad_left = (kernel_width - 1) // 2
pad_right = kernel_width // 2
paddings = np.array(
[[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]],
dtype=np.int32)
spatial_pad_3 = np.array([[0, 0], [3, 3], [3, 3], [0, 0]])
with slim.arg_scope(arg_scope_fn()):
###########
# Encoder #
###########
with tf.variable_scope('input'):
# 7x7 input stage
net = tf.pad(tensor=images, paddings=spatial_pad_3, mode='REFLECT')
net = slim.conv2d(net, num_filters, kernel_size=[7, 7], padding='VALID')
end_points['encoder_0'] = net
with tf.variable_scope('encoder'):
with slim.arg_scope([slim.conv2d],
kernel_size=kernel_size,
stride=2,
activation_fn=tf.nn.relu,
padding='VALID'):
net = tf.pad(tensor=net, paddings=paddings, mode='REFLECT')
net = slim.conv2d(net, num_filters * 2)
end_points['encoder_1'] = net
net = tf.pad(tensor=net, paddings=paddings, mode='REFLECT')
net = slim.conv2d(net, num_filters * 4)
end_points['encoder_2'] = net
###################
# Residual Blocks #
###################
with tf.variable_scope('residual_blocks'):
with slim.arg_scope([slim.conv2d],
kernel_size=kernel_size,
stride=1,
activation_fn=tf.nn.relu,
padding='VALID'):
for block_id in xrange(num_resnet_blocks):
with tf.variable_scope('block_{}'.format(block_id)):
res_net = tf.pad(tensor=net, paddings=paddings, mode='REFLECT')
res_net = slim.conv2d(res_net, num_filters * 4)
res_net = tf.pad(tensor=res_net, paddings=paddings, mode='REFLECT')
res_net = slim.conv2d(res_net, num_filters * 4, activation_fn=None)
net += res_net
end_points['resnet_block_%d' % block_id] = net
###########
# Decoder #
###########
with tf.variable_scope('decoder'):
with slim.arg_scope([slim.conv2d],
kernel_size=kernel_size,
stride=1,
activation_fn=tf.nn.relu):
with tf.variable_scope('decoder1'):
net = upsample_fn(net, num_outputs=num_filters * 2, stride=[2, 2])
end_points['decoder1'] = net
with tf.variable_scope('decoder2'):
net = upsample_fn(net, num_outputs=num_filters, stride=[2, 2])
end_points['decoder2'] = net
with tf.variable_scope('output'):
net = tf.pad(tensor=net, paddings=spatial_pad_3, mode='REFLECT')
logits = slim.conv2d(
net,
num_outputs, [7, 7],
activation_fn=None,
normalizer_fn=None,
padding='valid')
logits = tf.reshape(logits, _dynamic_or_static_shape(images))
end_points['logits'] = logits
end_points['predictions'] = tf.tanh(logits) + logits * tanh_linear_slope
return end_points['predictions'], end_points
| 10,888 | 38.310469 | 80 | py |
models | models-master/research/slim/nets/i3d.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition for Inflated 3D Inception V1 (I3D).
The network architecture is proposed by:
Joao Carreira and Andrew Zisserman,
Quo Vadis, Action Recognition? A New Model and the Kinetics Dataset.
https://arxiv.org/abs/1705.07750
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets import i3d_utils
from nets import s3dg
# pylint: disable=g-long-lambda
trunc_normal = lambda stddev: tf.truncated_normal_initializer(
0.0, stddev)
conv3d_spatiotemporal = i3d_utils.conv3d_spatiotemporal
def i3d_arg_scope(weight_decay=1e-7,
batch_norm_decay=0.999,
batch_norm_epsilon=0.001,
use_renorm=False,
separable_conv3d=False):
"""Defines default arg_scope for I3D.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: Decay for batch norm moving average.
batch_norm_epsilon: Small float added to variance to avoid dividing by zero
in batch norm.
use_renorm: Whether to use batch renormalization or not.
separable_conv3d: Whether to use separable 3d Convs.
Returns:
sc: An arg_scope to use for the models.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': batch_norm_decay,
# epsilon to prevent 0s in variance.
'epsilon': batch_norm_epsilon,
# Turns off fused batch norm.
'fused': False,
'renorm': use_renorm,
# collection containing the moving mean and moving variance.
'variables_collections': {
'beta': None,
'gamma': None,
'moving_mean': ['moving_vars'],
'moving_variance': ['moving_vars'],
}
}
with slim.arg_scope(
[slim.conv3d, conv3d_spatiotemporal],
weights_regularizer=slim.l2_regularizer(weight_decay),
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
with slim.arg_scope(
[conv3d_spatiotemporal], separable=separable_conv3d) as sc:
return sc
def i3d_base(inputs, final_endpoint='Mixed_5c',
scope='InceptionV1'):
"""Defines the I3D base architecture.
Note that we use the names as defined in Inception V1 to facilitate checkpoint
conversion from an image-trained Inception V1 checkpoint to I3D checkpoint.
Args:
inputs: A 5-D float tensor of size [batch_size, num_frames, height, width,
channels].
final_endpoint: Specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c',
'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e',
'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b', 'Mixed_5c']
scope: Optional variable_scope.
Returns:
A dictionary from components of the network to the corresponding activation.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values.
"""
return s3dg.s3dg_base(
inputs,
first_temporal_kernel_size=7,
temporal_conv_startat='Conv2d_2c_3x3',
gating_startat=None,
final_endpoint=final_endpoint,
min_depth=16,
depth_multiplier=1.0,
data_format='NDHWC',
scope=scope)
def i3d(inputs,
num_classes=1000,
dropout_keep_prob=0.8,
is_training=True,
prediction_fn=slim.softmax,
spatial_squeeze=True,
reuse=None,
scope='InceptionV1'):
"""Defines the I3D architecture.
The default image size used to train this network is 224x224.
Args:
inputs: A 5-D float tensor of size [batch_size, num_frames, height, width,
channels].
num_classes: number of predicted classes.
dropout_keep_prob: the percentage of activation values that are retained.
is_training: whether is training or not.
prediction_fn: a function to get predictions out of logits.
spatial_squeeze: if True, logits is of shape is [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
logits: the pre-softmax activations, a tensor of size
[batch_size, num_classes]
end_points: a dictionary from components of the network to the corresponding
activation.
"""
# Final pooling and prediction
with tf.variable_scope(
scope, 'InceptionV1', [inputs, num_classes], reuse=reuse) as scope:
with slim.arg_scope(
[slim.batch_norm, slim.dropout], is_training=is_training):
net, end_points = i3d_base(inputs, scope=scope)
with tf.variable_scope('Logits'):
kernel_size = i3d_utils.reduced_kernel_size_3d(net, [2, 7, 7])
net = slim.avg_pool3d(
net, kernel_size, stride=1, scope='AvgPool_0a_7x7')
net = slim.dropout(net, dropout_keep_prob, scope='Dropout_0b')
logits = slim.conv3d(
net,
num_classes, [1, 1, 1],
activation_fn=None,
normalizer_fn=None,
scope='Conv2d_0c_1x1')
# Temporal average pooling.
logits = tf.reduce_mean(input_tensor=logits, axis=1)
if spatial_squeeze:
logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
i3d.default_image_size = 224
| 6,381 | 34.455556 | 80 | py |
models | models-master/research/slim/nets/dcgan.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DCGAN generator and discriminator from https://arxiv.org/abs/1511.06434."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from math import log
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow.compat.v1 as tf
import tf_slim as slim
def _validate_image_inputs(inputs):
inputs.get_shape().assert_has_rank(4)
inputs.get_shape()[1:3].assert_is_fully_defined()
if inputs.get_shape()[1] != inputs.get_shape()[2]:
raise ValueError('Input tensor does not have equal width and height: ',
inputs.get_shape()[1:3])
width = inputs.get_shape().as_list()[1]
if log(width, 2) != int(log(width, 2)):
raise ValueError('Input tensor `width` is not a power of 2: ', width)
# TODO(joelshor): Use fused batch norm by default. Investigate why some GAN
# setups need the gradient of gradient FusedBatchNormGrad.
def discriminator(inputs,
depth=64,
is_training=True,
reuse=None,
scope='Discriminator',
fused_batch_norm=False):
"""Discriminator network for DCGAN.
Construct discriminator network from inputs to the final endpoint.
Args:
inputs: A tensor of size [batch_size, height, width, channels]. Must be
floating point.
depth: Number of channels in first convolution layer.
is_training: Whether the network is for training or not.
reuse: Whether or not the network variables should be reused. `scope`
must be given to be reused.
scope: Optional variable_scope.
fused_batch_norm: If `True`, use a faster, fused implementation of
batch norm.
Returns:
logits: The pre-softmax activations, a tensor of size [batch_size, 1]
end_points: a dictionary from components of the network to their activation.
Raises:
ValueError: If the input image shape is not 4-dimensional, if the spatial
dimensions aren't defined at graph construction time, if the spatial
dimensions aren't square, or if the spatial dimensions aren't a power of
two.
"""
normalizer_fn = slim.batch_norm
normalizer_fn_args = {
'is_training': is_training,
'zero_debias_moving_mean': True,
'fused': fused_batch_norm,
}
_validate_image_inputs(inputs)
inp_shape = inputs.get_shape().as_list()[1]
end_points = {}
with tf.variable_scope(
scope, values=[inputs], reuse=reuse) as scope:
with slim.arg_scope([normalizer_fn], **normalizer_fn_args):
with slim.arg_scope([slim.conv2d],
stride=2,
kernel_size=4,
activation_fn=tf.nn.leaky_relu):
net = inputs
for i in xrange(int(log(inp_shape, 2))):
scope = 'conv%i' % (i + 1)
current_depth = depth * 2**i
normalizer_fn_ = None if i == 0 else normalizer_fn
net = slim.conv2d(
net, current_depth, normalizer_fn=normalizer_fn_, scope=scope)
end_points[scope] = net
logits = slim.conv2d(net, 1, kernel_size=1, stride=1, padding='VALID',
normalizer_fn=None, activation_fn=None)
logits = tf.reshape(logits, [-1, 1])
end_points['logits'] = logits
return logits, end_points
# TODO(joelshor): Use fused batch norm by default. Investigate why some GAN
# setups need the gradient of gradient FusedBatchNormGrad.
def generator(inputs,
depth=64,
final_size=32,
num_outputs=3,
is_training=True,
reuse=None,
scope='Generator',
fused_batch_norm=False):
"""Generator network for DCGAN.
Construct generator network from inputs to the final endpoint.
Args:
inputs: A tensor with any size N. [batch_size, N]
depth: Number of channels in last deconvolution layer.
final_size: The shape of the final output.
num_outputs: Number of output features. For images, this is the number of
channels.
is_training: whether is training or not.
reuse: Whether or not the network has its variables should be reused. scope
must be given to be reused.
scope: Optional variable_scope.
fused_batch_norm: If `True`, use a faster, fused implementation of
batch norm.
Returns:
logits: the pre-softmax activations, a tensor of size
[batch_size, 32, 32, channels]
end_points: a dictionary from components of the network to their activation.
Raises:
ValueError: If `inputs` is not 2-dimensional.
ValueError: If `final_size` isn't a power of 2 or is less than 8.
"""
normalizer_fn = slim.batch_norm
normalizer_fn_args = {
'is_training': is_training,
'zero_debias_moving_mean': True,
'fused': fused_batch_norm,
}
inputs.get_shape().assert_has_rank(2)
if log(final_size, 2) != int(log(final_size, 2)):
raise ValueError('`final_size` (%i) must be a power of 2.' % final_size)
if final_size < 8:
raise ValueError('`final_size` (%i) must be greater than 8.' % final_size)
end_points = {}
num_layers = int(log(final_size, 2)) - 1
with tf.variable_scope(
scope, values=[inputs], reuse=reuse) as scope:
with slim.arg_scope([normalizer_fn], **normalizer_fn_args):
with slim.arg_scope([slim.conv2d_transpose],
normalizer_fn=normalizer_fn,
stride=2,
kernel_size=4):
net = tf.expand_dims(tf.expand_dims(inputs, 1), 1)
# First upscaling is different because it takes the input vector.
current_depth = depth * 2 ** (num_layers - 1)
scope = 'deconv1'
net = slim.conv2d_transpose(
net, current_depth, stride=1, padding='VALID', scope=scope)
end_points[scope] = net
for i in xrange(2, num_layers):
scope = 'deconv%i' % (i)
current_depth = depth * 2 ** (num_layers - i)
net = slim.conv2d_transpose(net, current_depth, scope=scope)
end_points[scope] = net
# Last layer has different normalizer and activation.
scope = 'deconv%i' % (num_layers)
net = slim.conv2d_transpose(
net, depth, normalizer_fn=None, activation_fn=None, scope=scope)
end_points[scope] = net
# Convert to proper channels.
scope = 'logits'
logits = slim.conv2d(
net,
num_outputs,
normalizer_fn=None,
activation_fn=None,
kernel_size=1,
stride=1,
padding='VALID',
scope=scope)
end_points[scope] = logits
logits.get_shape().assert_has_rank(4)
logits.get_shape().assert_is_compatible_with(
[None, final_size, final_size, num_outputs])
return logits, end_points
| 7,569 | 36.107843 | 80 | py |
models | models-master/research/slim/nets/inception_v1_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for nets.inception_v1."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets import inception
class InceptionV1Test(tf.test.TestCase):
def testBuildClassificationNetwork(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith(
'InceptionV1/Logits/SpatialSqueeze'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('Predictions' in end_points)
self.assertListEqual(end_points['Predictions'].get_shape().as_list(),
[batch_size, num_classes])
def testBuildPreLogitsNetwork(self):
batch_size = 5
height, width = 224, 224
num_classes = None
inputs = tf.random.uniform((batch_size, height, width, 3))
net, end_points = inception.inception_v1(inputs, num_classes)
self.assertTrue(net.op.name.startswith('InceptionV1/Logits/AvgPool'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 1024])
self.assertFalse('Logits' in end_points)
self.assertFalse('Predictions' in end_points)
def testBuildBaseNetwork(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random.uniform((batch_size, height, width, 3))
mixed_6c, end_points = inception.inception_v1_base(inputs)
self.assertTrue(mixed_6c.op.name.startswith('InceptionV1/Mixed_5c'))
self.assertListEqual(mixed_6c.get_shape().as_list(),
[batch_size, 7, 7, 1024])
expected_endpoints = ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b',
'Mixed_3c', 'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c',
'Mixed_4d', 'Mixed_4e', 'Mixed_4f', 'MaxPool_5a_2x2',
'Mixed_5b', 'Mixed_5c']
self.assertItemsEqual(end_points.keys(), expected_endpoints)
def testBuildOnlyUptoFinalEndpoint(self):
batch_size = 5
height, width = 224, 224
endpoints = ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c',
'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d',
'Mixed_4e', 'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b',
'Mixed_5c']
for index, endpoint in enumerate(endpoints):
with tf.Graph().as_default():
inputs = tf.random.uniform((batch_size, height, width, 3))
out_tensor, end_points = inception.inception_v1_base(
inputs, final_endpoint=endpoint)
self.assertTrue(out_tensor.op.name.startswith(
'InceptionV1/' + endpoint))
self.assertItemsEqual(endpoints[:index+1], end_points.keys())
def testBuildAndCheckAllEndPointsUptoMixed5c(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v1_base(inputs,
final_endpoint='Mixed_5c')
endpoints_shapes = {
'Conv2d_1a_7x7': [5, 112, 112, 64],
'MaxPool_2a_3x3': [5, 56, 56, 64],
'Conv2d_2b_1x1': [5, 56, 56, 64],
'Conv2d_2c_3x3': [5, 56, 56, 192],
'MaxPool_3a_3x3': [5, 28, 28, 192],
'Mixed_3b': [5, 28, 28, 256],
'Mixed_3c': [5, 28, 28, 480],
'MaxPool_4a_3x3': [5, 14, 14, 480],
'Mixed_4b': [5, 14, 14, 512],
'Mixed_4c': [5, 14, 14, 512],
'Mixed_4d': [5, 14, 14, 512],
'Mixed_4e': [5, 14, 14, 528],
'Mixed_4f': [5, 14, 14, 832],
'MaxPool_5a_2x2': [5, 7, 7, 832],
'Mixed_5b': [5, 7, 7, 832],
'Mixed_5c': [5, 7, 7, 1024]
}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testModelHasExpectedNumberOfParameters(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random.uniform((batch_size, height, width, 3))
with slim.arg_scope(inception.inception_v1_arg_scope()):
inception.inception_v1_base(inputs)
total_params, _ = slim.model_analyzer.analyze_vars(
slim.get_model_variables())
self.assertAlmostEqual(5607184, total_params)
def testHalfSizeImages(self):
batch_size = 5
height, width = 112, 112
inputs = tf.random.uniform((batch_size, height, width, 3))
mixed_5c, _ = inception.inception_v1_base(inputs)
self.assertTrue(mixed_5c.op.name.startswith('InceptionV1/Mixed_5c'))
self.assertListEqual(mixed_5c.get_shape().as_list(),
[batch_size, 4, 4, 1024])
def testBuildBaseNetworkWithoutRootBlock(self):
batch_size = 5
height, width = 28, 28
channels = 192
inputs = tf.random.uniform((batch_size, height, width, channels))
_, end_points = inception.inception_v1_base(
inputs, include_root_block=False)
endpoints_shapes = {
'Mixed_3b': [5, 28, 28, 256],
'Mixed_3c': [5, 28, 28, 480],
'MaxPool_4a_3x3': [5, 14, 14, 480],
'Mixed_4b': [5, 14, 14, 512],
'Mixed_4c': [5, 14, 14, 512],
'Mixed_4d': [5, 14, 14, 512],
'Mixed_4e': [5, 14, 14, 528],
'Mixed_4f': [5, 14, 14, 832],
'MaxPool_5a_2x2': [5, 7, 7, 832],
'Mixed_5b': [5, 7, 7, 832],
'Mixed_5c': [5, 7, 7, 1024]
}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testUnknownImageShape(self):
tf.reset_default_graph()
batch_size = 2
height, width = 224, 224
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = tf.placeholder(
tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_5c']
feed_dict = {inputs: input_np}
tf.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
def testGlobalPoolUnknownImageShape(self):
tf.reset_default_graph()
batch_size = 1
height, width = 250, 300
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = tf.placeholder(
tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception.inception_v1(inputs, num_classes,
global_pool=True)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_5c']
feed_dict = {inputs: input_np}
tf.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 10, 1024])
def testUnknowBatchSize(self):
batch_size = 1
height, width = 224, 224
num_classes = 1000
inputs = tf.placeholder(tf.float32, (None, height, width, 3))
logits, _ = inception.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes])
images = tf.random.uniform((batch_size, height, width, 3))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
eval_inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = inception.inception_v1(eval_inputs, num_classes,
is_training=False)
predictions = tf.argmax(input=logits, axis=1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, width = 224, 224
num_classes = 1000
train_inputs = tf.random.uniform((train_batch_size, height, width, 3))
inception.inception_v1(train_inputs, num_classes)
eval_inputs = tf.random.uniform((eval_batch_size, height, width, 3))
logits, _ = inception.inception_v1(eval_inputs, num_classes, reuse=True)
predictions = tf.argmax(input=logits, axis=1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
def testLogitsNotSqueezed(self):
num_classes = 25
images = tf.random.uniform([1, 224, 224, 3])
logits, _ = inception.inception_v1(images,
num_classes=num_classes,
spatial_squeeze=False)
with self.test_session() as sess:
tf.global_variables_initializer().run()
logits_out = sess.run(logits)
self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
def testNoBatchNormScaleByDefault(self):
height, width = 224, 224
num_classes = 1000
inputs = tf.placeholder(tf.float32, (1, height, width, 3))
with slim.arg_scope(inception.inception_v1_arg_scope()):
inception.inception_v1(inputs, num_classes, is_training=False)
self.assertEqual(tf.global_variables('.*/BatchNorm/gamma:0$'), [])
def testBatchNormScale(self):
height, width = 224, 224
num_classes = 1000
inputs = tf.placeholder(tf.float32, (1, height, width, 3))
with slim.arg_scope(
inception.inception_v1_arg_scope(batch_norm_scale=True)):
inception.inception_v1(inputs, num_classes, is_training=False)
gamma_names = set(
v.op.name
for v in tf.global_variables('.*/BatchNorm/gamma:0$'))
self.assertGreater(len(gamma_names), 0)
for v in tf.global_variables('.*/BatchNorm/moving_mean:0$'):
self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names)
if __name__ == '__main__':
tf.test.main()
| 12,015 | 39.187291 | 80 | py |
models | models-master/research/slim/nets/resnet_v2.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for the preactivation form of Residual Networks.
Residual networks (ResNets) were originally proposed in:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
The full preactivation 'v2' ResNet variant implemented in this module was
introduced by:
[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027
The key difference of the full preactivation 'v2' variant compared to the
'v1' variant in [1] is the use of batch normalization before every weight layer.
Typical use:
from tf_slim.nets import resnet_v2
ResNet-101 for image classification into 1000 classes:
# inputs has shape [batch, 224, 224, 3]
with slim.arg_scope(resnet_v2.resnet_arg_scope()):
net, end_points = resnet_v2.resnet_v2_101(inputs, 1000, is_training=False)
ResNet-101 for semantic segmentation into 21 classes:
# inputs has shape [batch, 513, 513, 3]
with slim.arg_scope(resnet_v2.resnet_arg_scope()):
net, end_points = resnet_v2.resnet_v2_101(inputs,
21,
is_training=False,
global_pool=False,
output_stride=16)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets import resnet_utils
resnet_arg_scope = resnet_utils.resnet_arg_scope
@slim.add_arg_scope
def bottleneck(inputs, depth, depth_bottleneck, stride, rate=1,
outputs_collections=None, scope=None):
"""Bottleneck residual unit variant with BN before convolutions.
This is the full preactivation residual unit variant proposed in [2]. See
Fig. 1(b) of [2] for its definition. Note that we use here the bottleneck
variant which has an extra bottleneck layer.
When putting together two consecutive ResNet blocks that use this unit, one
should use stride = 2 in the last unit of the first block.
Args:
inputs: A tensor of size [batch, height, width, channels].
depth: The depth of the ResNet unit output.
depth_bottleneck: The depth of the bottleneck layers.
stride: The ResNet unit's stride. Determines the amount of downsampling of
the units output compared to its input.
rate: An integer, rate for atrous convolution.
outputs_collections: Collection to add the ResNet unit output.
scope: Optional variable_scope.
Returns:
The ResNet unit's output.
"""
with tf.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc:
depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
preact = slim.batch_norm(inputs, activation_fn=tf.nn.relu, scope='preact')
if depth == depth_in:
shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
else:
shortcut = slim.conv2d(preact, depth, [1, 1], stride=stride,
normalizer_fn=None, activation_fn=None,
scope='shortcut')
residual = slim.conv2d(preact, depth_bottleneck, [1, 1], stride=1,
scope='conv1')
residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride,
rate=rate, scope='conv2')
residual = slim.conv2d(residual, depth, [1, 1], stride=1,
normalizer_fn=None, activation_fn=None,
scope='conv3')
output = shortcut + residual
return slim.utils.collect_named_outputs(outputs_collections,
sc.name,
output)
def resnet_v2(inputs,
blocks,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
include_root_block=True,
spatial_squeeze=True,
reuse=None,
scope=None):
"""Generator for v2 (preactivation) ResNet models.
This function generates a family of ResNet v2 models. See the resnet_v2_*()
methods for specific model instantiations, obtained by selecting different
block instantiations that produce ResNets of various depths.
Training for image classification on Imagenet is usually done with [224, 224]
inputs, resulting in [7, 7] feature maps at the output of the last ResNet
block for the ResNets defined in [1] that have nominal stride equal to 32.
However, for dense prediction tasks we advise that one uses inputs with
spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In
this case the feature maps at the ResNet output will have spatial shape
[(height - 1) / output_stride + 1, (width - 1) / output_stride + 1]
and corners exactly aligned with the input image corners, which greatly
facilitates alignment of the features to the image. Using as input [225, 225]
images results in [8, 8] feature maps at the output of the last ResNet block.
For dense prediction tasks, the ResNet needs to run in fully-convolutional
(FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all
have nominal stride equal to 32 and a good choice in FCN mode is to use
output_stride=16 in order to increase the density of the computed features at
small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
blocks: A list of length equal to the number of ResNet blocks. Each element
is a resnet_utils.Block object describing the units in the block.
num_classes: Number of predicted classes for classification tasks.
If 0 or None, we return the features before the logit layer.
is_training: whether batch_norm layers are in training mode.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
include_root_block: If True, include the initial convolution followed by
max-pooling, if False excludes it. If excluded, `inputs` should be the
results of an activation-less convolution.
spatial_squeeze: if True, logits is of shape [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
To use this parameter, the input images must be smaller than 300x300
pixels, in which case the output logit layer does not contain spatial
information and can be removed.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is 0 or None,
then net is the output of the last ResNet block, potentially after global
average pooling. If num_classes is a non-zero integer, net contains the
pre-softmax activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: If the target output_stride is not valid.
"""
with tf.variable_scope(
scope, 'resnet_v2', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
with slim.arg_scope([slim.conv2d, bottleneck,
resnet_utils.stack_blocks_dense],
outputs_collections=end_points_collection):
with slim.arg_scope([slim.batch_norm], is_training=is_training):
net = inputs
if include_root_block:
if output_stride is not None:
if output_stride % 4 != 0:
raise ValueError('The output_stride needs to be a multiple of 4.')
output_stride /= 4
# We do not include batch normalization or activation functions in
# conv1 because the first ResNet unit will perform these. Cf.
# Appendix of [2].
with slim.arg_scope([slim.conv2d],
activation_fn=None, normalizer_fn=None):
net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')
net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')
net = resnet_utils.stack_blocks_dense(net, blocks, output_stride)
# This is needed because the pre-activation variant does not have batch
# normalization or activation functions in the residual unit output. See
# Appendix of [2].
net = slim.batch_norm(net, activation_fn=tf.nn.relu, scope='postnorm')
# Convert end_points_collection into a dictionary of end_points.
end_points = slim.utils.convert_collection_to_dict(
end_points_collection)
if global_pool:
# Global average pooling.
net = tf.reduce_mean(
input_tensor=net, axis=[1, 2], name='pool5', keepdims=True)
end_points['global_pool'] = net
if num_classes:
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='logits')
end_points[sc.name + '/logits'] = net
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='SpatialSqueeze')
end_points[sc.name + '/spatial_squeeze'] = net
end_points['predictions'] = slim.softmax(net, scope='predictions')
return net, end_points
resnet_v2.default_image_size = 224
def resnet_v2_block(scope, base_depth, num_units, stride):
"""Helper function for creating a resnet_v2 bottleneck block.
Args:
scope: The scope of the block.
base_depth: The depth of the bottleneck layer for each unit.
num_units: The number of units in the block.
stride: The stride of the block, implemented as a stride in the last unit.
All other units have stride=1.
Returns:
A resnet_v2 bottleneck block.
"""
return resnet_utils.Block(scope, bottleneck, [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': 1
}] * (num_units - 1) + [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': stride
}])
resnet_v2.default_image_size = 224
def resnet_v2_50(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
reuse=None,
scope='resnet_v2_50'):
"""ResNet-50 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=4, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=6, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(inputs, blocks, num_classes, is_training=is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
reuse=reuse, scope=scope)
resnet_v2_50.default_image_size = resnet_v2.default_image_size
def resnet_v2_101(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
reuse=None,
scope='resnet_v2_101'):
"""ResNet-101 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=4, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=23, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(inputs, blocks, num_classes, is_training=is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
reuse=reuse, scope=scope)
resnet_v2_101.default_image_size = resnet_v2.default_image_size
def resnet_v2_152(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
reuse=None,
scope='resnet_v2_152'):
"""ResNet-152 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=8, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=36, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(inputs, blocks, num_classes, is_training=is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
reuse=reuse, scope=scope)
resnet_v2_152.default_image_size = resnet_v2.default_image_size
def resnet_v2_200(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
reuse=None,
scope='resnet_v2_200'):
"""ResNet-200 model of [2]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=24, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=36, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(inputs, blocks, num_classes, is_training=is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
reuse=reuse, scope=scope)
resnet_v2_200.default_image_size = resnet_v2.default_image_size
| 15,499 | 44.588235 | 80 | py |
models | models-master/research/slim/nets/inception_v4.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition of the Inception V4 architecture.
As described in http://arxiv.org/abs/1602.07261.
Inception-v4, Inception-ResNet and the Impact of Residual Connections
on Learning
Christian Szegedy, Sergey Ioffe, Vincent Vanhoucke, Alex Alemi
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets import inception_utils
def block_inception_a(inputs, scope=None, reuse=None):
"""Builds Inception-A block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(
scope, 'BlockInceptionA', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 96, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 96, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(inputs, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 96, [1, 1], scope='Conv2d_0b_1x1')
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
def block_reduction_a(inputs, scope=None, reuse=None):
"""Builds Reduction-A block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(
scope, 'BlockReductionA', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 384, [3, 3], stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 224, [3, 3], scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d(branch_1, 256, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(inputs, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
def block_inception_b(inputs, scope=None, reuse=None):
"""Builds Inception-B block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(
scope, 'BlockInceptionB', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 224, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, 256, [7, 1], scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 192, [7, 1], scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(branch_2, 224, [1, 7], scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(branch_2, 224, [7, 1], scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, 256, [1, 7], scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
def block_reduction_b(inputs, scope=None, reuse=None):
"""Builds Reduction-B block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(
scope, 'BlockReductionB', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, 192, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 256, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 256, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, 320, [7, 1], scope='Conv2d_0c_7x1')
branch_1 = slim.conv2d(branch_1, 320, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(inputs, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
def block_inception_c(inputs, scope=None, reuse=None):
"""Builds Inception-C block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(
scope, 'BlockInceptionC', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 256, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = tf.concat(axis=3, values=[
slim.conv2d(branch_1, 256, [1, 3], scope='Conv2d_0b_1x3'),
slim.conv2d(branch_1, 256, [3, 1], scope='Conv2d_0c_3x1')])
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 448, [3, 1], scope='Conv2d_0b_3x1')
branch_2 = slim.conv2d(branch_2, 512, [1, 3], scope='Conv2d_0c_1x3')
branch_2 = tf.concat(axis=3, values=[
slim.conv2d(branch_2, 256, [1, 3], scope='Conv2d_0d_1x3'),
slim.conv2d(branch_2, 256, [3, 1], scope='Conv2d_0e_3x1')])
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 256, [1, 1], scope='Conv2d_0b_1x1')
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
def inception_v4_base(inputs, final_endpoint='Mixed_7d', scope=None):
"""Creates the Inception V4 network up to the given final endpoint.
Args:
inputs: a 4-D tensor of size [batch_size, height, width, 3].
final_endpoint: specifies the endpoint to construct the network up to.
It can be one of [ 'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
'Mixed_3a', 'Mixed_4a', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d',
'Mixed_5e', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d', 'Mixed_6e',
'Mixed_6f', 'Mixed_6g', 'Mixed_6h', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c',
'Mixed_7d']
scope: Optional variable_scope.
Returns:
logits: the logits outputs of the model.
end_points: the set of end_points from the inception model.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values,
"""
end_points = {}
def add_and_check_final(name, net):
end_points[name] = net
return name == final_endpoint
with tf.variable_scope(scope, 'InceptionV4', [inputs]):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
# 299 x 299 x 3
net = slim.conv2d(inputs, 32, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
if add_and_check_final('Conv2d_1a_3x3', net): return net, end_points
# 149 x 149 x 32
net = slim.conv2d(net, 32, [3, 3], padding='VALID',
scope='Conv2d_2a_3x3')
if add_and_check_final('Conv2d_2a_3x3', net): return net, end_points
# 147 x 147 x 32
net = slim.conv2d(net, 64, [3, 3], scope='Conv2d_2b_3x3')
if add_and_check_final('Conv2d_2b_3x3', net): return net, end_points
# 147 x 147 x 64
with tf.variable_scope('Mixed_3a'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
scope='MaxPool_0a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 96, [3, 3], stride=2, padding='VALID',
scope='Conv2d_0a_3x3')
net = tf.concat(axis=3, values=[branch_0, branch_1])
if add_and_check_final('Mixed_3a', net): return net, end_points
# 73 x 73 x 160
with tf.variable_scope('Mixed_4a'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, 96, [3, 3], padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 64, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, 64, [7, 1], scope='Conv2d_0c_7x1')
branch_1 = slim.conv2d(branch_1, 96, [3, 3], padding='VALID',
scope='Conv2d_1a_3x3')
net = tf.concat(axis=3, values=[branch_0, branch_1])
if add_and_check_final('Mixed_4a', net): return net, end_points
# 71 x 71 x 192
with tf.variable_scope('Mixed_5a'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 192, [3, 3], stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat(axis=3, values=[branch_0, branch_1])
if add_and_check_final('Mixed_5a', net): return net, end_points
# 35 x 35 x 384
# 4 x Inception-A blocks
for idx in range(4):
block_scope = 'Mixed_5' + chr(ord('b') + idx)
net = block_inception_a(net, block_scope)
if add_and_check_final(block_scope, net): return net, end_points
# 35 x 35 x 384
# Reduction-A block
net = block_reduction_a(net, 'Mixed_6a')
if add_and_check_final('Mixed_6a', net): return net, end_points
# 17 x 17 x 1024
# 7 x Inception-B blocks
for idx in range(7):
block_scope = 'Mixed_6' + chr(ord('b') + idx)
net = block_inception_b(net, block_scope)
if add_and_check_final(block_scope, net): return net, end_points
# 17 x 17 x 1024
# Reduction-B block
net = block_reduction_b(net, 'Mixed_7a')
if add_and_check_final('Mixed_7a', net): return net, end_points
# 8 x 8 x 1536
# 3 x Inception-C blocks
for idx in range(3):
block_scope = 'Mixed_7' + chr(ord('b') + idx)
net = block_inception_c(net, block_scope)
if add_and_check_final(block_scope, net): return net, end_points
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def inception_v4(inputs, num_classes=1001, is_training=True,
dropout_keep_prob=0.8,
reuse=None,
scope='InceptionV4',
create_aux_logits=True):
"""Creates the Inception V4 model.
Args:
inputs: a 4-D tensor of size [batch_size, height, width, 3].
num_classes: number of predicted classes. If 0 or None, the logits layer
is omitted and the input features to the logits layer (before dropout)
are returned instead.
is_training: whether is training or not.
dropout_keep_prob: float, the fraction to keep before final layer.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
create_aux_logits: Whether to include the auxiliary logits.
Returns:
net: a Tensor with the logits (pre-softmax activations) if num_classes
is a non-zero integer, or the non-dropped input to the logits layer
if num_classes is 0 or None.
end_points: the set of end_points from the inception model.
"""
end_points = {}
with tf.variable_scope(
scope, 'InceptionV4', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = inception_v4_base(inputs, scope=scope)
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
# Auxiliary Head logits
if create_aux_logits and num_classes:
with tf.variable_scope('AuxLogits'):
# 17 x 17 x 1024
aux_logits = end_points['Mixed_6h']
aux_logits = slim.avg_pool2d(aux_logits, [5, 5], stride=3,
padding='VALID',
scope='AvgPool_1a_5x5')
aux_logits = slim.conv2d(aux_logits, 128, [1, 1],
scope='Conv2d_1b_1x1')
aux_logits = slim.conv2d(aux_logits, 768,
aux_logits.get_shape()[1:3],
padding='VALID', scope='Conv2d_2a')
aux_logits = slim.flatten(aux_logits)
aux_logits = slim.fully_connected(aux_logits, num_classes,
activation_fn=None,
scope='Aux_logits')
end_points['AuxLogits'] = aux_logits
# Final pooling and prediction
# TODO(sguada,arnoegw): Consider adding a parameter global_pool which
# can be set to False to disable pooling here (as in resnet_*()).
with tf.variable_scope('Logits'):
# 8 x 8 x 1536
kernel_size = net.get_shape()[1:3]
if kernel_size.is_fully_defined():
net = slim.avg_pool2d(net, kernel_size, padding='VALID',
scope='AvgPool_1a')
else:
net = tf.reduce_mean(
input_tensor=net,
axis=[1, 2],
keepdims=True,
name='global_pool')
end_points['global_pool'] = net
if not num_classes:
return net, end_points
# 1 x 1 x 1536
net = slim.dropout(net, dropout_keep_prob, scope='Dropout_1b')
net = slim.flatten(net, scope='PreLogitsFlatten')
end_points['PreLogitsFlatten'] = net
# 1536
logits = slim.fully_connected(net, num_classes, activation_fn=None,
scope='Logits')
end_points['Logits'] = logits
end_points['Predictions'] = tf.nn.softmax(logits, name='Predictions')
return logits, end_points
inception_v4.default_image_size = 299
inception_v4_arg_scope = inception_utils.inception_arg_scope
| 16,519 | 46.745665 | 80 | py |
models | models-master/research/slim/nets/inception_v3.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition for inception v3 classification network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets import inception_utils
# pylint: disable=g-long-lambda
trunc_normal = lambda stddev: tf.truncated_normal_initializer(
0.0, stddev)
def inception_v3_base(inputs,
final_endpoint='Mixed_7c',
min_depth=16,
depth_multiplier=1.0,
scope=None):
"""Inception model from http://arxiv.org/abs/1512.00567.
Constructs an Inception v3 network from inputs to the given final endpoint.
This method can construct the network up to the final inception block
Mixed_7c.
Note that the names of the layers in the paper do not correspond to the names
of the endpoints registered by this function although they build the same
network.
Here is a mapping from the old_names to the new names:
Old name | New name
=======================================
conv0 | Conv2d_1a_3x3
conv1 | Conv2d_2a_3x3
conv2 | Conv2d_2b_3x3
pool1 | MaxPool_3a_3x3
conv3 | Conv2d_3b_1x1
conv4 | Conv2d_4a_3x3
pool2 | MaxPool_5a_3x3
mixed_35x35x256a | Mixed_5b
mixed_35x35x288a | Mixed_5c
mixed_35x35x288b | Mixed_5d
mixed_17x17x768a | Mixed_6a
mixed_17x17x768b | Mixed_6b
mixed_17x17x768c | Mixed_6c
mixed_17x17x768d | Mixed_6d
mixed_17x17x768e | Mixed_6e
mixed_8x8x1280a | Mixed_7a
mixed_8x8x2048a | Mixed_7b
mixed_8x8x2048b | Mixed_7c
Args:
inputs: a tensor of size [batch_size, height, width, channels].
final_endpoint: specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3',
'Mixed_5b', 'Mixed_5c', 'Mixed_5d', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c',
'Mixed_6d', 'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c'].
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
scope: Optional variable_scope.
Returns:
tensor_out: output tensor corresponding to the final_endpoint.
end_points: a set of activations for external use, for example summaries or
losses.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values,
or depth_multiplier <= 0
"""
# end_points will collect relevant activations for external use, for example
# summaries or losses.
end_points = {}
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
depth = lambda d: max(int(d * depth_multiplier), min_depth)
with tf.variable_scope(scope, 'InceptionV3', [inputs]):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='VALID'):
# 299 x 299 x 3
end_point = 'Conv2d_1a_3x3'
net = slim.conv2d(inputs, depth(32), [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 149 x 149 x 32
end_point = 'Conv2d_2a_3x3'
net = slim.conv2d(net, depth(32), [3, 3], scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 147 x 147 x 32
end_point = 'Conv2d_2b_3x3'
net = slim.conv2d(net, depth(64), [3, 3], padding='SAME', scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 147 x 147 x 64
end_point = 'MaxPool_3a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 73 x 73 x 64
end_point = 'Conv2d_3b_1x1'
net = slim.conv2d(net, depth(80), [1, 1], scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 73 x 73 x 80.
end_point = 'Conv2d_4a_3x3'
net = slim.conv2d(net, depth(192), [3, 3], scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 71 x 71 x 192.
end_point = 'MaxPool_5a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 35 x 35 x 192.
# Inception blocks
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
# mixed: 35 x 35 x 256.
end_point = 'Mixed_5b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(64), [5, 5],
scope='Conv2d_0b_5x5')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(32), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_1: 35 x 35 x 288.
end_point = 'Mixed_5c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0b_1x1')
branch_1 = slim.conv2d(branch_1, depth(64), [5, 5],
scope='Conv_1_0c_5x5')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(64), [1, 1],
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(64), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_2: 35 x 35 x 288.
end_point = 'Mixed_5d'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(64), [5, 5],
scope='Conv2d_0b_5x5')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(64), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_3: 17 x 17 x 768.
end_point = 'Mixed_6a'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(384), [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(96), [3, 3],
scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d(branch_1, depth(96), [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_1x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed4: 17 x 17 x 768.
end_point = 'Mixed_6b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(128), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(128), [1, 7],
scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(128), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(128), [7, 1],
scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(branch_2, depth(128), [1, 7],
scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(branch_2, depth(128), [7, 1],
scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_5: 17 x 17 x 768.
end_point = 'Mixed_6c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(160), [1, 7],
scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],
scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(branch_2, depth(160), [1, 7],
scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],
scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_6: 17 x 17 x 768.
end_point = 'Mixed_6d'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(160), [1, 7],
scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],
scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(branch_2, depth(160), [1, 7],
scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],
scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_7: 17 x 17 x 768.
end_point = 'Mixed_6e'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(192), [1, 7],
scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(192), [7, 1],
scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(branch_2, depth(192), [7, 1],
scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_8: 8 x 8 x 1280.
end_point = 'Mixed_7a'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, depth(320), [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(192), [1, 7],
scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
scope='Conv2d_0c_7x1')
branch_1 = slim.conv2d(branch_1, depth(192), [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_9: 8 x 8 x 2048.
end_point = 'Mixed_7b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(320), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(384), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = tf.concat(axis=3, values=[
slim.conv2d(branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'),
slim.conv2d(branch_1, depth(384), [3, 1], scope='Conv2d_0b_3x1')])
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(448), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(
branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = tf.concat(axis=3, values=[
slim.conv2d(branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'),
slim.conv2d(branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')])
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_10: 8 x 8 x 2048.
end_point = 'Mixed_7c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(320), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(384), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = tf.concat(axis=3, values=[
slim.conv2d(branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'),
slim.conv2d(branch_1, depth(384), [3, 1], scope='Conv2d_0c_3x1')])
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(448), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(
branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = tf.concat(axis=3, values=[
slim.conv2d(branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'),
slim.conv2d(branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')])
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def inception_v3(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.8,
min_depth=16,
depth_multiplier=1.0,
prediction_fn=slim.softmax,
spatial_squeeze=True,
reuse=None,
create_aux_logits=True,
scope='InceptionV3',
global_pool=False):
"""Inception model from http://arxiv.org/abs/1512.00567.
"Rethinking the Inception Architecture for Computer Vision"
Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens,
Zbigniew Wojna.
With the default arguments this method constructs the exact model defined in
the paper. However, one can experiment with variations of the inception_v3
network by changing arguments dropout_keep_prob, min_depth and
depth_multiplier.
The default image size used to train this network is 299x299.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer
is omitted and the input features to the logits layer (before dropout)
are returned instead.
is_training: whether is training or not.
dropout_keep_prob: the percentage of activation values that are retained.
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
prediction_fn: a function to get predictions out of logits.
spatial_squeeze: if True, logits is of shape [B, C], if false logits is of
shape [B, 1, 1, C], where B is batch_size and C is number of classes.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
create_aux_logits: Whether to create the auxiliary logits.
scope: Optional variable_scope.
global_pool: Optional boolean flag to control the avgpooling before the
logits layer. If false or unset, pooling is done with a fixed window
that reduces default-sized inputs to 1x1, while larger inputs lead to
larger outputs. If true, any input size is pooled down to 1x1.
Returns:
net: a Tensor with the logits (pre-softmax activations) if num_classes
is a non-zero integer, or the non-dropped-out input to the logits layer
if num_classes is 0 or None.
end_points: a dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: if 'depth_multiplier' is less than or equal to zero.
"""
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
depth = lambda d: max(int(d * depth_multiplier), min_depth)
with tf.variable_scope(
scope, 'InceptionV3', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = inception_v3_base(
inputs, scope=scope, min_depth=min_depth,
depth_multiplier=depth_multiplier)
# Auxiliary Head logits
if create_aux_logits and num_classes:
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
aux_logits = end_points['Mixed_6e']
with tf.variable_scope('AuxLogits'):
aux_logits = slim.avg_pool2d(
aux_logits, [5, 5], stride=3, padding='VALID',
scope='AvgPool_1a_5x5')
aux_logits = slim.conv2d(aux_logits, depth(128), [1, 1],
scope='Conv2d_1b_1x1')
# Shape of feature map before the final layer.
kernel_size = _reduced_kernel_size_for_small_input(
aux_logits, [5, 5])
aux_logits = slim.conv2d(
aux_logits, depth(768), kernel_size,
weights_initializer=trunc_normal(0.01),
padding='VALID', scope='Conv2d_2a_{}x{}'.format(*kernel_size))
aux_logits = slim.conv2d(
aux_logits, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, weights_initializer=trunc_normal(0.001),
scope='Conv2d_2b_1x1')
if spatial_squeeze:
aux_logits = tf.squeeze(aux_logits, [1, 2], name='SpatialSqueeze')
end_points['AuxLogits'] = aux_logits
# Final pooling and prediction
with tf.variable_scope('Logits'):
if global_pool:
# Global average pooling.
net = tf.reduce_mean(
input_tensor=net, axis=[1, 2], keepdims=True, name='GlobalPool')
end_points['global_pool'] = net
else:
# Pooling with a fixed kernel size.
kernel_size = _reduced_kernel_size_for_small_input(net, [8, 8])
net = slim.avg_pool2d(net, kernel_size, padding='VALID',
scope='AvgPool_1a_{}x{}'.format(*kernel_size))
end_points['AvgPool_1a'] = net
if not num_classes:
return net, end_points
# 1 x 1 x 2048
net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
end_points['PreLogits'] = net
# 2048
logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='Conv2d_1c_1x1')
if spatial_squeeze:
logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
# 1000
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
inception_v3.default_image_size = 299
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
"""Define kernel size which is automatically reduced for small input.
If the shape of the input images is unknown at graph construction time this
function assumes that the input images are is large enough.
Args:
input_tensor: input tensor of size [batch_size, height, width, channels].
kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]
Returns:
a tensor with the kernel size.
TODO(jrru): Make this function work with unknown shapes. Theoretically, this
can be done with the code below. Problems are two-fold: (1) If the shape was
known, it will be lost. (2) inception.slim.ops._two_element_tuple cannot
handle tensors that define the kernel size.
shape = tf.shape(input_tensor)
return = tf.stack([tf.minimum(shape[1], kernel_size[0]),
tf.minimum(shape[2], kernel_size[1])])
"""
shape = input_tensor.get_shape().as_list()
if shape[1] is None or shape[2] is None:
kernel_size_out = kernel_size
else:
kernel_size_out = [min(shape[1], kernel_size[0]),
min(shape[2], kernel_size[1])]
return kernel_size_out
inception_v3_arg_scope = inception_utils.inception_arg_scope
| 28,406 | 47.642123 | 80 | py |
models | models-master/research/slim/nets/dcgan_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dcgan."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow.compat.v1 as tf
from nets import dcgan
class DCGANTest(tf.test.TestCase):
def test_generator_run(self):
tf.set_random_seed(1234)
noise = tf.random.normal([100, 64])
image, _ = dcgan.generator(noise)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
image.eval()
def test_generator_graph(self):
tf.set_random_seed(1234)
# Check graph construction for a number of image size/depths and batch
# sizes.
for i, batch_size in zip(xrange(3, 7), xrange(3, 8)):
tf.reset_default_graph()
final_size = 2 ** i
noise = tf.random.normal([batch_size, 64])
image, end_points = dcgan.generator(
noise,
depth=32,
final_size=final_size)
self.assertAllEqual([batch_size, final_size, final_size, 3],
image.shape.as_list())
expected_names = ['deconv%i' % j for j in xrange(1, i)] + ['logits']
self.assertSetEqual(set(expected_names), set(end_points.keys()))
# Check layer depths.
for j in range(1, i):
layer = end_points['deconv%i' % j]
self.assertEqual(32 * 2**(i-j-1), layer.get_shape().as_list()[-1])
def test_generator_invalid_input(self):
wrong_dim_input = tf.zeros([5, 32, 32])
with self.assertRaises(ValueError):
dcgan.generator(wrong_dim_input)
correct_input = tf.zeros([3, 2])
with self.assertRaisesRegexp(ValueError, 'must be a power of 2'):
dcgan.generator(correct_input, final_size=30)
with self.assertRaisesRegexp(ValueError, 'must be greater than 8'):
dcgan.generator(correct_input, final_size=4)
def test_discriminator_run(self):
image = tf.random.uniform([5, 32, 32, 3], -1, 1)
output, _ = dcgan.discriminator(image)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output.eval()
def test_discriminator_graph(self):
# Check graph construction for a number of image size/depths and batch
# sizes.
for i, batch_size in zip(xrange(1, 6), xrange(3, 8)):
tf.reset_default_graph()
img_w = 2 ** i
image = tf.random.uniform([batch_size, img_w, img_w, 3], -1, 1)
output, end_points = dcgan.discriminator(
image,
depth=32)
self.assertAllEqual([batch_size, 1], output.get_shape().as_list())
expected_names = ['conv%i' % j for j in xrange(1, i+1)] + ['logits']
self.assertSetEqual(set(expected_names), set(end_points.keys()))
# Check layer depths.
for j in range(1, i+1):
layer = end_points['conv%i' % j]
self.assertEqual(32 * 2**(j-1), layer.get_shape().as_list()[-1])
def test_discriminator_invalid_input(self):
wrong_dim_img = tf.zeros([5, 32, 32])
with self.assertRaises(ValueError):
dcgan.discriminator(wrong_dim_img)
spatially_undefined_shape = tf.placeholder(
tf.float32, [5, 32, None, 3])
with self.assertRaises(ValueError):
dcgan.discriminator(spatially_undefined_shape)
not_square = tf.zeros([5, 32, 16, 3])
with self.assertRaisesRegexp(ValueError, 'not have equal width and height'):
dcgan.discriminator(not_square)
not_power_2 = tf.zeros([5, 30, 30, 3])
with self.assertRaisesRegexp(ValueError, 'not a power of 2'):
dcgan.discriminator(not_power_2)
if __name__ == '__main__':
tf.test.main()
| 4,283 | 34.114754 | 80 | py |
models | models-master/research/slim/nets/i3d_utils.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for building I3D network models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
import tf_slim as slim
add_arg_scope = slim.add_arg_scope
layers = slim.layers
def center_initializer():
"""Centering Initializer for I3D.
This initializer allows identity mapping for temporal convolution at the
initialization, which is critical for a desired convergence behavior
for training a seprable I3D model.
The centering behavior of this initializer requires an odd-sized kernel,
typically set to 3.
Returns:
A weight initializer op used in temporal convolutional layers.
Raises:
ValueError: Input tensor data type has to be tf.float32.
ValueError: If input tensor is not a 5-D tensor.
ValueError: If input and output channel dimensions are different.
ValueError: If spatial kernel sizes are not 1.
ValueError: If temporal kernel size is even.
"""
def _initializer(shape, dtype=tf.float32, partition_info=None): # pylint: disable=unused-argument
"""Initializer op."""
if dtype != tf.float32 and dtype != tf.bfloat16:
raise ValueError(
'Input tensor data type has to be tf.float32 or tf.bfloat16.')
if len(shape) != 5:
raise ValueError('Input tensor has to be 5-D.')
if shape[3] != shape[4]:
raise ValueError('Input and output channel dimensions must be the same.')
if shape[1] != 1 or shape[2] != 1:
raise ValueError('Spatial kernel sizes must be 1 (pointwise conv).')
if shape[0] % 2 == 0:
raise ValueError('Temporal kernel size has to be odd.')
center_pos = int(shape[0] / 2)
init_mat = np.zeros(
[shape[0], shape[1], shape[2], shape[3], shape[4]], dtype=np.float32)
for i in range(0, shape[3]):
init_mat[center_pos, 0, 0, i, i] = 1.0
init_op = tf.constant(init_mat, dtype=dtype)
return init_op
return _initializer
@add_arg_scope
def conv3d_spatiotemporal(inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
activation_fn=None,
normalizer_fn=None,
normalizer_params=None,
weights_regularizer=None,
separable=False,
data_format='NDHWC',
scope=''):
"""A wrapper for conv3d to model spatiotemporal representations.
This allows switching between original 3D convolution and separable 3D
convolutions for spatial and temporal features respectively. On Kinetics,
seprable 3D convolutions yields better classification performance.
Args:
inputs: a 5-D tensor `[batch_size, depth, height, width, channels]`.
num_outputs: integer, the number of output filters.
kernel_size: a list of length 3
`[kernel_depth, kernel_height, kernel_width]` of the filters. Can be an
int if all values are the same.
stride: a list of length 3 `[stride_depth, stride_height, stride_width]`.
Can be an int if all strides are the same.
padding: one of `VALID` or `SAME`.
activation_fn: activation function.
normalizer_fn: normalization function to use instead of `biases`.
normalizer_params: dictionary of normalization function parameters.
weights_regularizer: Optional regularizer for the weights.
separable: If `True`, use separable spatiotemporal convolutions.
data_format: An optional string from: "NDHWC", "NCDHW". Defaults to "NDHWC".
The data format of the input and output data. With the default format
"NDHWC", the data is stored in the order of: [batch, in_depth, in_height,
in_width, in_channels]. Alternatively, the format could be "NCDHW", the
data storage order is:
[batch, in_channels, in_depth, in_height, in_width].
scope: scope for `variable_scope`.
Returns:
A tensor representing the output of the (separable) conv3d operation.
"""
assert len(kernel_size) == 3
if separable and kernel_size[0] != 1:
spatial_kernel_size = [1, kernel_size[1], kernel_size[2]]
temporal_kernel_size = [kernel_size[0], 1, 1]
if isinstance(stride, list) and len(stride) == 3:
spatial_stride = [1, stride[1], stride[2]]
temporal_stride = [stride[0], 1, 1]
else:
spatial_stride = [1, stride, stride]
temporal_stride = [stride, 1, 1]
net = layers.conv3d(
inputs,
num_outputs,
spatial_kernel_size,
stride=spatial_stride,
padding=padding,
activation_fn=activation_fn,
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params,
weights_regularizer=weights_regularizer,
data_format=data_format,
scope=scope)
net = layers.conv3d(
net,
num_outputs,
temporal_kernel_size,
stride=temporal_stride,
padding=padding,
scope=scope + '/temporal',
activation_fn=activation_fn,
normalizer_fn=None,
data_format=data_format,
weights_initializer=center_initializer())
return net
else:
return layers.conv3d(
inputs,
num_outputs,
kernel_size,
stride=stride,
padding=padding,
activation_fn=activation_fn,
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params,
weights_regularizer=weights_regularizer,
data_format=data_format,
scope=scope)
@add_arg_scope
def inception_block_v1_3d(inputs,
num_outputs_0_0a,
num_outputs_1_0a,
num_outputs_1_0b,
num_outputs_2_0a,
num_outputs_2_0b,
num_outputs_3_0b,
temporal_kernel_size=3,
self_gating_fn=None,
data_format='NDHWC',
scope=''):
"""A 3D Inception v1 block.
This allows use of separable 3D convolutions and self-gating, as
described in:
Saining Xie, Chen Sun, Jonathan Huang, Zhuowen Tu and Kevin Murphy,
Rethinking Spatiotemporal Feature Learning For Video Understanding.
https://arxiv.org/abs/1712.04851.
Args:
inputs: a 5-D tensor `[batch_size, depth, height, width, channels]`.
num_outputs_0_0a: integer, the number of output filters for Branch 0,
operation Conv2d_0a_1x1.
num_outputs_1_0a: integer, the number of output filters for Branch 1,
operation Conv2d_0a_1x1.
num_outputs_1_0b: integer, the number of output filters for Branch 1,
operation Conv2d_0b_3x3.
num_outputs_2_0a: integer, the number of output filters for Branch 2,
operation Conv2d_0a_1x1.
num_outputs_2_0b: integer, the number of output filters for Branch 2,
operation Conv2d_0b_3x3.
num_outputs_3_0b: integer, the number of output filters for Branch 3,
operation Conv2d_0b_1x1.
temporal_kernel_size: integer, the size of the temporal convolutional
filters in the conv3d_spatiotemporal blocks.
self_gating_fn: function which optionally performs self-gating.
Must have two arguments, `inputs` and `scope`, and return one output
tensor the same size as `inputs`. If `None`, no self-gating is
applied.
data_format: An optional string from: "NDHWC", "NCDHW". Defaults to "NDHWC".
The data format of the input and output data. With the default format
"NDHWC", the data is stored in the order of: [batch, in_depth, in_height,
in_width, in_channels]. Alternatively, the format could be "NCDHW", the
data storage order is:
[batch, in_channels, in_depth, in_height, in_width].
scope: scope for `variable_scope`.
Returns:
A 5-D tensor `[batch_size, depth, height, width, out_channels]`, where
`out_channels = num_outputs_0_0a + num_outputs_1_0b + num_outputs_2_0b
+ num_outputs_3_0b`.
"""
use_gating = self_gating_fn is not None
with tf.variable_scope(scope):
with tf.variable_scope('Branch_0'):
branch_0 = layers.conv3d(
inputs, num_outputs_0_0a, [1, 1, 1], scope='Conv2d_0a_1x1')
if use_gating:
branch_0 = self_gating_fn(branch_0, scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = layers.conv3d(
inputs, num_outputs_1_0a, [1, 1, 1], scope='Conv2d_0a_1x1')
branch_1 = conv3d_spatiotemporal(
branch_1, num_outputs_1_0b, [temporal_kernel_size, 3, 3],
scope='Conv2d_0b_3x3')
if use_gating:
branch_1 = self_gating_fn(branch_1, scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = layers.conv3d(
inputs, num_outputs_2_0a, [1, 1, 1], scope='Conv2d_0a_1x1')
branch_2 = conv3d_spatiotemporal(
branch_2, num_outputs_2_0b, [temporal_kernel_size, 3, 3],
scope='Conv2d_0b_3x3')
if use_gating:
branch_2 = self_gating_fn(branch_2, scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = layers.max_pool3d(inputs, [3, 3, 3], scope='MaxPool_0a_3x3')
branch_3 = layers.conv3d(
branch_3, num_outputs_3_0b, [1, 1, 1], scope='Conv2d_0b_1x1')
if use_gating:
branch_3 = self_gating_fn(branch_3, scope='Conv2d_0b_1x1')
index_c = data_format.index('C')
assert 1 <= index_c <= 4, 'Cannot identify channel dimension.'
output = tf.concat([branch_0, branch_1, branch_2, branch_3], index_c)
return output
def reduced_kernel_size_3d(input_tensor, kernel_size):
"""Define kernel size which is automatically reduced for small input.
If the shape of the input images is unknown at graph construction time this
function assumes that the input images are large enough.
Args:
input_tensor: input tensor of size
[batch_size, time, height, width, channels].
kernel_size: desired kernel size of length 3, corresponding to time,
height and width.
Returns:
a tensor with the kernel size.
"""
assert len(kernel_size) == 3
shape = input_tensor.get_shape().as_list()
assert len(shape) == 5
if None in shape[1:4]:
kernel_size_out = kernel_size
else:
kernel_size_out = [min(shape[1], kernel_size[0]),
min(shape[2], kernel_size[1]),
min(shape[3], kernel_size[2])]
return kernel_size_out
| 11,210 | 38.199301 | 100 | py |
models | models-master/research/slim/nets/cyclegan_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.contrib.slim.nets.cyclegan."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from nets import cyclegan
# TODO(joelshor): Add a test to check generator endpoints.
class CycleganTest(tf.test.TestCase):
def test_generator_inference(self):
"""Check one inference step."""
img_batch = tf.zeros([2, 32, 32, 3])
model_output, _ = cyclegan.cyclegan_generator_resnet(img_batch)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(model_output)
def _test_generator_graph_helper(self, shape):
"""Check that generator can take small and non-square inputs."""
output_imgs, _ = cyclegan.cyclegan_generator_resnet(tf.ones(shape))
self.assertAllEqual(shape, output_imgs.shape.as_list())
def test_generator_graph_small(self):
self._test_generator_graph_helper([4, 32, 32, 3])
def test_generator_graph_medium(self):
self._test_generator_graph_helper([3, 128, 128, 3])
def test_generator_graph_nonsquare(self):
self._test_generator_graph_helper([2, 80, 400, 3])
def test_generator_unknown_batch_dim(self):
"""Check that generator can take unknown batch dimension inputs."""
img = tf.placeholder(tf.float32, shape=[None, 32, None, 3])
output_imgs, _ = cyclegan.cyclegan_generator_resnet(img)
self.assertAllEqual([None, 32, None, 3], output_imgs.shape.as_list())
def _input_and_output_same_shape_helper(self, kernel_size):
img_batch = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
output_img_batch, _ = cyclegan.cyclegan_generator_resnet(
img_batch, kernel_size=kernel_size)
self.assertAllEqual(img_batch.shape.as_list(),
output_img_batch.shape.as_list())
def input_and_output_same_shape_kernel3(self):
self._input_and_output_same_shape_helper(3)
def input_and_output_same_shape_kernel4(self):
self._input_and_output_same_shape_helper(4)
def input_and_output_same_shape_kernel5(self):
self._input_and_output_same_shape_helper(5)
def input_and_output_same_shape_kernel6(self):
self._input_and_output_same_shape_helper(6)
def _error_if_height_not_multiple_of_four_helper(self, height):
self.assertRaisesRegexp(
ValueError, 'The input height must be a multiple of 4.',
cyclegan.cyclegan_generator_resnet,
tf.placeholder(tf.float32, shape=[None, height, 32, 3]))
def test_error_if_height_not_multiple_of_four_height29(self):
self._error_if_height_not_multiple_of_four_helper(29)
def test_error_if_height_not_multiple_of_four_height30(self):
self._error_if_height_not_multiple_of_four_helper(30)
def test_error_if_height_not_multiple_of_four_height31(self):
self._error_if_height_not_multiple_of_four_helper(31)
def _error_if_width_not_multiple_of_four_helper(self, width):
self.assertRaisesRegexp(
ValueError, 'The input width must be a multiple of 4.',
cyclegan.cyclegan_generator_resnet,
tf.placeholder(tf.float32, shape=[None, 32, width, 3]))
def test_error_if_width_not_multiple_of_four_width29(self):
self._error_if_width_not_multiple_of_four_helper(29)
def test_error_if_width_not_multiple_of_four_width30(self):
self._error_if_width_not_multiple_of_four_helper(30)
def test_error_if_width_not_multiple_of_four_width31(self):
self._error_if_width_not_multiple_of_four_helper(31)
if __name__ == '__main__':
tf.test.main()
| 4,229 | 37.108108 | 80 | py |
models | models-master/research/slim/nets/nets_factory_test.py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.inception."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from nets import nets_factory
class NetworksTest(tf.test.TestCase):
def testGetNetworkFnFirstHalf(self):
batch_size = 5
num_classes = 1000
for net in list(nets_factory.networks_map.keys())[:10]:
with tf.Graph().as_default() as g, self.test_session(g):
net_fn = nets_factory.get_network_fn(net, num_classes=num_classes)
# Most networks use 224 as their default_image_size
image_size = getattr(net_fn, 'default_image_size', 224)
if net not in ['i3d', 's3dg']:
inputs = tf.random.uniform((batch_size, image_size, image_size, 3))
logits, end_points = net_fn(inputs)
self.assertTrue(isinstance(logits, tf.Tensor))
self.assertTrue(isinstance(end_points, dict))
self.assertEqual(logits.get_shape().as_list()[0], batch_size)
self.assertEqual(logits.get_shape().as_list()[-1], num_classes)
def testGetNetworkFnSecondHalf(self):
batch_size = 5
num_classes = 1000
for net in list(nets_factory.networks_map.keys())[10:]:
with tf.Graph().as_default() as g, self.test_session(g):
net_fn = nets_factory.get_network_fn(net, num_classes=num_classes)
# Most networks use 224 as their default_image_size
image_size = getattr(net_fn, 'default_image_size', 224)
if net not in ['i3d', 's3dg']:
inputs = tf.random.uniform((batch_size, image_size, image_size, 3))
logits, end_points = net_fn(inputs)
self.assertTrue(isinstance(logits, tf.Tensor))
self.assertTrue(isinstance(end_points, dict))
self.assertEqual(logits.get_shape().as_list()[0], batch_size)
self.assertEqual(logits.get_shape().as_list()[-1], num_classes)
def testGetNetworkFnVideoModels(self):
batch_size = 5
num_classes = 400
for net in ['i3d', 's3dg']:
with tf.Graph().as_default() as g, self.test_session(g):
net_fn = nets_factory.get_network_fn(net, num_classes=num_classes)
# Most networks use 224 as their default_image_size
image_size = getattr(net_fn, 'default_image_size', 224) // 2
inputs = tf.random.uniform((batch_size, 10, image_size, image_size, 3))
logits, end_points = net_fn(inputs)
self.assertTrue(isinstance(logits, tf.Tensor))
self.assertTrue(isinstance(end_points, dict))
self.assertEqual(logits.get_shape().as_list()[0], batch_size)
self.assertEqual(logits.get_shape().as_list()[-1], num_classes)
if __name__ == '__main__':
tf.test.main()
| 3,378 | 41.772152 | 80 | py |
models | models-master/research/slim/nets/inception_utils.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains common code shared by all inception models.
Usage of arg scope:
with slim.arg_scope(inception_arg_scope()):
logits, end_points = inception.inception_v3(images, num_classes,
is_training=is_training)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
def inception_arg_scope(
weight_decay=0.00004,
use_batch_norm=True,
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001,
activation_fn=tf.nn.relu,
batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS,
batch_norm_scale=False):
"""Defines the default arg scope for inception models.
Args:
weight_decay: The weight decay to use for regularizing the model.
use_batch_norm: "If `True`, batch_norm is applied after each convolution.
batch_norm_decay: Decay for batch norm moving average.
batch_norm_epsilon: Small float added to variance to avoid dividing by zero
in batch norm.
activation_fn: Activation function for conv2d.
batch_norm_updates_collections: Collection for the update ops for
batch norm.
batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
activations in the batch normalization layer.
Returns:
An `arg_scope` to use for the inception models.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': batch_norm_decay,
# epsilon to prevent 0s in variance.
'epsilon': batch_norm_epsilon,
# collection containing update_ops.
'updates_collections': batch_norm_updates_collections,
# use fused batch norm if possible.
'fused': None,
'scale': batch_norm_scale,
}
if use_batch_norm:
normalizer_fn = slim.batch_norm
normalizer_params = batch_norm_params
else:
normalizer_fn = None
normalizer_params = {}
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_regularizer=slim.l2_regularizer(weight_decay)):
with slim.arg_scope(
[slim.conv2d],
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=activation_fn,
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params) as sc:
return sc
| 3,079 | 36.108434 | 80 | py |
models | models-master/research/slim/nets/inception_v4_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.inception_v4."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets import inception
class InceptionTest(tf.test.TestCase):
def testBuildLogits(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v4(inputs, num_classes)
auxlogits = end_points['AuxLogits']
predictions = end_points['Predictions']
self.assertTrue(auxlogits.op.name.startswith('InceptionV4/AuxLogits'))
self.assertListEqual(auxlogits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue(predictions.op.name.startswith(
'InceptionV4/Logits/Predictions'))
self.assertListEqual(predictions.get_shape().as_list(),
[batch_size, num_classes])
def testBuildPreLogitsNetwork(self):
batch_size = 5
height, width = 299, 299
num_classes = None
inputs = tf.random.uniform((batch_size, height, width, 3))
net, end_points = inception.inception_v4(inputs, num_classes)
self.assertTrue(net.op.name.startswith('InceptionV4/Logits/AvgPool'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 1536])
self.assertFalse('Logits' in end_points)
self.assertFalse('Predictions' in end_points)
def testBuildWithoutAuxLogits(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, endpoints = inception.inception_v4(inputs, num_classes,
create_aux_logits=False)
self.assertFalse('AuxLogits' in endpoints)
self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testAllEndPointsShapes(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v4(inputs, num_classes)
endpoints_shapes = {'Conv2d_1a_3x3': [batch_size, 149, 149, 32],
'Conv2d_2a_3x3': [batch_size, 147, 147, 32],
'Conv2d_2b_3x3': [batch_size, 147, 147, 64],
'Mixed_3a': [batch_size, 73, 73, 160],
'Mixed_4a': [batch_size, 71, 71, 192],
'Mixed_5a': [batch_size, 35, 35, 384],
# 4 x Inception-A blocks
'Mixed_5b': [batch_size, 35, 35, 384],
'Mixed_5c': [batch_size, 35, 35, 384],
'Mixed_5d': [batch_size, 35, 35, 384],
'Mixed_5e': [batch_size, 35, 35, 384],
# Reduction-A block
'Mixed_6a': [batch_size, 17, 17, 1024],
# 7 x Inception-B blocks
'Mixed_6b': [batch_size, 17, 17, 1024],
'Mixed_6c': [batch_size, 17, 17, 1024],
'Mixed_6d': [batch_size, 17, 17, 1024],
'Mixed_6e': [batch_size, 17, 17, 1024],
'Mixed_6f': [batch_size, 17, 17, 1024],
'Mixed_6g': [batch_size, 17, 17, 1024],
'Mixed_6h': [batch_size, 17, 17, 1024],
# Reduction-A block
'Mixed_7a': [batch_size, 8, 8, 1536],
# 3 x Inception-C blocks
'Mixed_7b': [batch_size, 8, 8, 1536],
'Mixed_7c': [batch_size, 8, 8, 1536],
'Mixed_7d': [batch_size, 8, 8, 1536],
# Logits and predictions
'AuxLogits': [batch_size, num_classes],
'global_pool': [batch_size, 1, 1, 1536],
'PreLogitsFlatten': [batch_size, 1536],
'Logits': [batch_size, num_classes],
'Predictions': [batch_size, num_classes]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testBuildBaseNetwork(self):
batch_size = 5
height, width = 299, 299
inputs = tf.random.uniform((batch_size, height, width, 3))
net, end_points = inception.inception_v4_base(inputs)
self.assertTrue(net.op.name.startswith(
'InceptionV4/Mixed_7d'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 8, 8, 1536])
expected_endpoints = [
'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'Mixed_3a',
'Mixed_4a', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d',
'Mixed_5e', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d',
'Mixed_6e', 'Mixed_6f', 'Mixed_6g', 'Mixed_6h', 'Mixed_7a',
'Mixed_7b', 'Mixed_7c', 'Mixed_7d']
self.assertItemsEqual(end_points.keys(), expected_endpoints)
for name, op in end_points.items():
self.assertTrue(op.name.startswith('InceptionV4/' + name))
def testBuildOnlyUpToFinalEndpoint(self):
batch_size = 5
height, width = 299, 299
all_endpoints = [
'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'Mixed_3a',
'Mixed_4a', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d',
'Mixed_5e', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d',
'Mixed_6e', 'Mixed_6f', 'Mixed_6g', 'Mixed_6h', 'Mixed_7a',
'Mixed_7b', 'Mixed_7c', 'Mixed_7d']
for index, endpoint in enumerate(all_endpoints):
with tf.Graph().as_default():
inputs = tf.random.uniform((batch_size, height, width, 3))
out_tensor, end_points = inception.inception_v4_base(
inputs, final_endpoint=endpoint)
self.assertTrue(out_tensor.op.name.startswith(
'InceptionV4/' + endpoint))
self.assertItemsEqual(all_endpoints[:index + 1], end_points.keys())
def testVariablesSetDevice(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
# Force all Variables to reside on the device.
with tf.variable_scope('on_cpu'), tf.device('/cpu:0'):
inception.inception_v4(inputs, num_classes)
with tf.variable_scope('on_gpu'), tf.device('/gpu:0'):
inception.inception_v4(inputs, num_classes)
for v in tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope='on_cpu'):
self.assertDeviceEqual(v.device, '/cpu:0')
for v in tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope='on_gpu'):
self.assertDeviceEqual(v.device, '/gpu:0')
def testHalfSizeImages(self):
batch_size = 5
height, width = 150, 150
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v4(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_7d']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 3, 3, 1536])
def testGlobalPool(self):
batch_size = 1
height, width = 350, 400
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v4(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_7d']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 9, 11, 1536])
def testGlobalPoolUnknownImageShape(self):
batch_size = 1
height, width = 350, 400
num_classes = 1000
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, (batch_size, None, None, 3))
logits, end_points = inception.inception_v4(
inputs, num_classes, create_aux_logits=False)
self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_7d']
images = tf.random.uniform((batch_size, height, width, 3))
sess.run(tf.global_variables_initializer())
logits_out, pre_pool_out = sess.run([logits, pre_pool],
{inputs: images.eval()})
self.assertTupleEqual(logits_out.shape, (batch_size, num_classes))
self.assertTupleEqual(pre_pool_out.shape, (batch_size, 9, 11, 1536))
def testUnknownBatchSize(self):
batch_size = 1
height, width = 299, 299
num_classes = 1000
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, (None, height, width, 3))
logits, _ = inception.inception_v4(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes])
images = tf.random.uniform((batch_size, height, width, 3))
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
def testEvaluation(self):
batch_size = 2
height, width = 299, 299
num_classes = 1000
with self.test_session() as sess:
eval_inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = inception.inception_v4(eval_inputs,
num_classes,
is_training=False)
predictions = tf.argmax(input=logits, axis=1)
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, width = 150, 150
num_classes = 1000
with self.test_session() as sess:
train_inputs = tf.random.uniform((train_batch_size, height, width, 3))
inception.inception_v4(train_inputs, num_classes)
eval_inputs = tf.random.uniform((eval_batch_size, height, width, 3))
logits, _ = inception.inception_v4(eval_inputs,
num_classes,
is_training=False,
reuse=True)
predictions = tf.argmax(input=logits, axis=1)
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
def testNoBatchNormScaleByDefault(self):
height, width = 299, 299
num_classes = 1000
inputs = tf.placeholder(tf.float32, (1, height, width, 3))
with slim.arg_scope(inception.inception_v4_arg_scope()):
inception.inception_v4(inputs, num_classes, is_training=False)
self.assertEqual(tf.global_variables('.*/BatchNorm/gamma:0$'), [])
def testBatchNormScale(self):
height, width = 299, 299
num_classes = 1000
inputs = tf.placeholder(tf.float32, (1, height, width, 3))
with slim.arg_scope(
inception.inception_v4_arg_scope(batch_norm_scale=True)):
inception.inception_v4(inputs, num_classes, is_training=False)
gamma_names = set(
v.op.name
for v in tf.global_variables('.*/BatchNorm/gamma:0$'))
self.assertGreater(len(gamma_names), 0)
for v in tf.global_variables('.*/BatchNorm/moving_mean:0$'):
self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names)
if __name__ == '__main__':
tf.test.main()
| 13,029 | 44.243056 | 80 | py |
models | models-master/research/slim/nets/inception_v3_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for nets.inception_v1."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets import inception
class InceptionV3Test(tf.test.TestCase):
def testBuildClassificationNetwork(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v3(inputs, num_classes)
self.assertTrue(logits.op.name.startswith(
'InceptionV3/Logits/SpatialSqueeze'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('Predictions' in end_points)
self.assertListEqual(end_points['Predictions'].get_shape().as_list(),
[batch_size, num_classes])
def testBuildPreLogitsNetwork(self):
batch_size = 5
height, width = 299, 299
num_classes = None
inputs = tf.random.uniform((batch_size, height, width, 3))
net, end_points = inception.inception_v3(inputs, num_classes)
self.assertTrue(net.op.name.startswith('InceptionV3/Logits/AvgPool'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 2048])
self.assertFalse('Logits' in end_points)
self.assertFalse('Predictions' in end_points)
def testBuildBaseNetwork(self):
batch_size = 5
height, width = 299, 299
inputs = tf.random.uniform((batch_size, height, width, 3))
final_endpoint, end_points = inception.inception_v3_base(inputs)
self.assertTrue(final_endpoint.op.name.startswith(
'InceptionV3/Mixed_7c'))
self.assertListEqual(final_endpoint.get_shape().as_list(),
[batch_size, 8, 8, 2048])
expected_endpoints = ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3',
'MaxPool_5a_3x3', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d',
'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d',
'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c']
self.assertItemsEqual(end_points.keys(), expected_endpoints)
def testBuildOnlyUptoFinalEndpoint(self):
batch_size = 5
height, width = 299, 299
endpoints = ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3',
'MaxPool_5a_3x3', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d',
'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d',
'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c']
for index, endpoint in enumerate(endpoints):
with tf.Graph().as_default():
inputs = tf.random.uniform((batch_size, height, width, 3))
out_tensor, end_points = inception.inception_v3_base(
inputs, final_endpoint=endpoint)
self.assertTrue(out_tensor.op.name.startswith(
'InceptionV3/' + endpoint))
self.assertItemsEqual(endpoints[:index + 1], end_points.keys())
def testBuildAndCheckAllEndPointsUptoMixed7c(self):
batch_size = 5
height, width = 299, 299
inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v3_base(
inputs, final_endpoint='Mixed_7c')
endpoints_shapes = {'Conv2d_1a_3x3': [batch_size, 149, 149, 32],
'Conv2d_2a_3x3': [batch_size, 147, 147, 32],
'Conv2d_2b_3x3': [batch_size, 147, 147, 64],
'MaxPool_3a_3x3': [batch_size, 73, 73, 64],
'Conv2d_3b_1x1': [batch_size, 73, 73, 80],
'Conv2d_4a_3x3': [batch_size, 71, 71, 192],
'MaxPool_5a_3x3': [batch_size, 35, 35, 192],
'Mixed_5b': [batch_size, 35, 35, 256],
'Mixed_5c': [batch_size, 35, 35, 288],
'Mixed_5d': [batch_size, 35, 35, 288],
'Mixed_6a': [batch_size, 17, 17, 768],
'Mixed_6b': [batch_size, 17, 17, 768],
'Mixed_6c': [batch_size, 17, 17, 768],
'Mixed_6d': [batch_size, 17, 17, 768],
'Mixed_6e': [batch_size, 17, 17, 768],
'Mixed_7a': [batch_size, 8, 8, 1280],
'Mixed_7b': [batch_size, 8, 8, 2048],
'Mixed_7c': [batch_size, 8, 8, 2048]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testModelHasExpectedNumberOfParameters(self):
batch_size = 5
height, width = 299, 299
inputs = tf.random.uniform((batch_size, height, width, 3))
with slim.arg_scope(inception.inception_v3_arg_scope()):
inception.inception_v3_base(inputs)
total_params, _ = slim.model_analyzer.analyze_vars(
slim.get_model_variables())
self.assertAlmostEqual(21802784, total_params)
def testBuildEndPoints(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v3(inputs, num_classes)
self.assertTrue('Logits' in end_points)
logits = end_points['Logits']
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('AuxLogits' in end_points)
aux_logits = end_points['AuxLogits']
self.assertListEqual(aux_logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('Mixed_7c' in end_points)
pre_pool = end_points['Mixed_7c']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 8, 8, 2048])
self.assertTrue('PreLogits' in end_points)
pre_logits = end_points['PreLogits']
self.assertListEqual(pre_logits.get_shape().as_list(),
[batch_size, 1, 1, 2048])
def testBuildEndPointsWithDepthMultiplierLessThanOne(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v3(inputs, num_classes)
endpoint_keys = [key for key in end_points.keys()
if key.startswith('Mixed') or key.startswith('Conv')]
_, end_points_with_multiplier = inception.inception_v3(
inputs, num_classes, scope='depth_multiplied_net',
depth_multiplier=0.5)
for key in endpoint_keys:
original_depth = end_points[key].get_shape().as_list()[3]
new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]
self.assertEqual(0.5 * original_depth, new_depth)
def testBuildEndPointsWithDepthMultiplierGreaterThanOne(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v3(inputs, num_classes)
endpoint_keys = [key for key in end_points.keys()
if key.startswith('Mixed') or key.startswith('Conv')]
_, end_points_with_multiplier = inception.inception_v3(
inputs, num_classes, scope='depth_multiplied_net',
depth_multiplier=2.0)
for key in endpoint_keys:
original_depth = end_points[key].get_shape().as_list()[3]
new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]
self.assertEqual(2.0 * original_depth, new_depth)
def testRaiseValueErrorWithInvalidDepthMultiplier(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
with self.assertRaises(ValueError):
_ = inception.inception_v3(inputs, num_classes, depth_multiplier=-0.1)
with self.assertRaises(ValueError):
_ = inception.inception_v3(inputs, num_classes, depth_multiplier=0.0)
def testHalfSizeImages(self):
batch_size = 5
height, width = 150, 150
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v3(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV3/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_7c']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 3, 3, 2048])
def testUnknownImageShape(self):
tf.reset_default_graph()
batch_size = 2
height, width = 299, 299
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = tf.placeholder(
tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception.inception_v3(inputs, num_classes)
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_7c']
feed_dict = {inputs: input_np}
tf.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 8, 2048])
def testGlobalPoolUnknownImageShape(self):
tf.reset_default_graph()
batch_size = 1
height, width = 330, 400
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = tf.placeholder(
tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception.inception_v3(inputs, num_classes,
global_pool=True)
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_7c']
feed_dict = {inputs: input_np}
tf.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 11, 2048])
def testUnknowBatchSize(self):
batch_size = 1
height, width = 299, 299
num_classes = 1000
inputs = tf.placeholder(tf.float32, (None, height, width, 3))
logits, _ = inception.inception_v3(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV3/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes])
images = tf.random.uniform((batch_size, height, width, 3))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
def testEvaluation(self):
batch_size = 2
height, width = 299, 299
num_classes = 1000
eval_inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = inception.inception_v3(eval_inputs, num_classes,
is_training=False)
predictions = tf.argmax(input=logits, axis=1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, width = 150, 150
num_classes = 1000
train_inputs = tf.random.uniform((train_batch_size, height, width, 3))
inception.inception_v3(train_inputs, num_classes)
eval_inputs = tf.random.uniform((eval_batch_size, height, width, 3))
logits, _ = inception.inception_v3(eval_inputs, num_classes,
is_training=False, reuse=True)
predictions = tf.argmax(input=logits, axis=1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
def testLogitsNotSqueezed(self):
num_classes = 25
images = tf.random.uniform([1, 299, 299, 3])
logits, _ = inception.inception_v3(images,
num_classes=num_classes,
spatial_squeeze=False)
with self.test_session() as sess:
tf.global_variables_initializer().run()
logits_out = sess.run(logits)
self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
def testNoBatchNormScaleByDefault(self):
height, width = 299, 299
num_classes = 1000
inputs = tf.placeholder(tf.float32, (1, height, width, 3))
with slim.arg_scope(inception.inception_v3_arg_scope()):
inception.inception_v3(inputs, num_classes, is_training=False)
self.assertEqual(tf.global_variables('.*/BatchNorm/gamma:0$'), [])
def testBatchNormScale(self):
height, width = 299, 299
num_classes = 1000
inputs = tf.placeholder(tf.float32, (1, height, width, 3))
with slim.arg_scope(
inception.inception_v3_arg_scope(batch_norm_scale=True)):
inception.inception_v3(inputs, num_classes, is_training=False)
gamma_names = set(
v.op.name
for v in tf.global_variables('.*/BatchNorm/gamma:0$'))
self.assertGreater(len(gamma_names), 0)
for v in tf.global_variables('.*/BatchNorm/moving_mean:0$'):
self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names)
if __name__ == '__main__':
tf.test.main()
| 14,544 | 40.676218 | 80 | py |
models | models-master/research/slim/nets/lenet.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains a variant of the LeNet model definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
def lenet(images, num_classes=10, is_training=False,
dropout_keep_prob=0.5,
prediction_fn=slim.softmax,
scope='LeNet'):
"""Creates a variant of the LeNet model.
Note that since the output is a set of 'logits', the values fall in the
interval of (-infinity, infinity). Consequently, to convert the outputs to a
probability distribution over the characters, one will need to convert them
using the softmax function:
logits = lenet.lenet(images, is_training=False)
probabilities = tf.nn.softmax(logits)
predictions = tf.argmax(logits, 1)
Args:
images: A batch of `Tensors` of size [batch_size, height, width, channels].
num_classes: the number of classes in the dataset. If 0 or None, the logits
layer is omitted and the input features to the logits layer are returned
instead.
is_training: specifies whether or not we're currently training the model.
This variable will determine the behaviour of the dropout layer.
dropout_keep_prob: the percentage of activation values that are retained.
prediction_fn: a function to get predictions out of logits.
scope: Optional variable_scope.
Returns:
net: a 2D Tensor with the logits (pre-softmax activations) if num_classes
is a non-zero integer, or the inon-dropped-out nput to the logits layer
if num_classes is 0 or None.
end_points: a dictionary from components of the network to the corresponding
activation.
"""
end_points = {}
with tf.variable_scope(scope, 'LeNet', [images]):
net = end_points['conv1'] = slim.conv2d(images, 32, [5, 5], scope='conv1')
net = end_points['pool1'] = slim.max_pool2d(net, [2, 2], 2, scope='pool1')
net = end_points['conv2'] = slim.conv2d(net, 64, [5, 5], scope='conv2')
net = end_points['pool2'] = slim.max_pool2d(net, [2, 2], 2, scope='pool2')
net = slim.flatten(net)
end_points['Flatten'] = net
net = end_points['fc3'] = slim.fully_connected(net, 1024, scope='fc3')
if not num_classes:
return net, end_points
net = end_points['dropout3'] = slim.dropout(
net, dropout_keep_prob, is_training=is_training, scope='dropout3')
logits = end_points['Logits'] = slim.fully_connected(
net, num_classes, activation_fn=None, scope='fc4')
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
lenet.default_image_size = 28
def lenet_arg_scope(weight_decay=0.0):
"""Defines the default lenet argument scope.
Args:
weight_decay: The weight decay to use for regularizing the model.
Returns:
An `arg_scope` to use for the inception v3 model.
"""
with slim.arg_scope(
[slim.conv2d, slim.fully_connected],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
activation_fn=tf.nn.relu) as sc:
return sc
| 3,852 | 38.721649 | 80 | py |
models | models-master/research/slim/nets/__init__.py | 1 | 0 | 0 | py |
|
models | models-master/research/slim/nets/inception_resnet_v2.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition of the Inception Resnet V2 architecture.
As described in http://arxiv.org/abs/1602.07261.
Inception-v4, Inception-ResNet and the Impact of Residual Connections
on Learning
Christian Szegedy, Sergey Ioffe, Vincent Vanhoucke, Alex Alemi
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 35x35 resnet block."""
with tf.variable_scope(scope, 'Block35', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 48, 3, scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 64, 3, scope='Conv2d_0c_3x3')
mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_1, tower_conv2_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
scaled_up = up * scale
if activation_fn == tf.nn.relu6:
# Use clip_by_value to simulate bandpass activation.
scaled_up = tf.clip_by_value(scaled_up, -6.0, 6.0)
net += scaled_up
if activation_fn:
net = activation_fn(net)
return net
def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 17x17 resnet block."""
with tf.variable_scope(scope, 'Block17', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 160, [1, 7],
scope='Conv2d_0b_1x7')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [7, 1],
scope='Conv2d_0c_7x1')
mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
scaled_up = up * scale
if activation_fn == tf.nn.relu6:
# Use clip_by_value to simulate bandpass activation.
scaled_up = tf.clip_by_value(scaled_up, -6.0, 6.0)
net += scaled_up
if activation_fn:
net = activation_fn(net)
return net
def block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 8x8 resnet block."""
with tf.variable_scope(scope, 'Block8', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 224, [1, 3],
scope='Conv2d_0b_1x3')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 256, [3, 1],
scope='Conv2d_0c_3x1')
mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
scaled_up = up * scale
if activation_fn == tf.nn.relu6:
# Use clip_by_value to simulate bandpass activation.
scaled_up = tf.clip_by_value(scaled_up, -6.0, 6.0)
net += scaled_up
if activation_fn:
net = activation_fn(net)
return net
def inception_resnet_v2_base(inputs,
final_endpoint='Conv2d_7b_1x1',
output_stride=16,
align_feature_maps=False,
scope=None,
activation_fn=tf.nn.relu):
"""Inception model from http://arxiv.org/abs/1602.07261.
Constructs an Inception Resnet v2 network from inputs to the given final
endpoint. This method can construct the network up to the final inception
block Conv2d_7b_1x1.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
final_endpoint: specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3',
'Mixed_5b', 'Mixed_6a', 'PreAuxLogits', 'Mixed_7a', 'Conv2d_7b_1x1']
output_stride: A scalar that specifies the requested ratio of input to
output spatial resolution. Only supports 8 and 16.
align_feature_maps: When true, changes all the VALID paddings in the network
to SAME padding so that the feature maps are aligned.
scope: Optional variable_scope.
activation_fn: Activation function for block scopes.
Returns:
tensor_out: output tensor corresponding to the final_endpoint.
end_points: a set of activations for external use, for example summaries or
losses.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values,
or if the output_stride is not 8 or 16, or if the output_stride is 8 and
we request an end point after 'PreAuxLogits'.
"""
if output_stride != 8 and output_stride != 16:
raise ValueError('output_stride must be 8 or 16.')
padding = 'SAME' if align_feature_maps else 'VALID'
end_points = {}
def add_and_check_final(name, net):
end_points[name] = net
return name == final_endpoint
with tf.variable_scope(scope, 'InceptionResnetV2', [inputs]):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
# 149 x 149 x 32
net = slim.conv2d(inputs, 32, 3, stride=2, padding=padding,
scope='Conv2d_1a_3x3')
if add_and_check_final('Conv2d_1a_3x3', net): return net, end_points
# 147 x 147 x 32
net = slim.conv2d(net, 32, 3, padding=padding,
scope='Conv2d_2a_3x3')
if add_and_check_final('Conv2d_2a_3x3', net): return net, end_points
# 147 x 147 x 64
net = slim.conv2d(net, 64, 3, scope='Conv2d_2b_3x3')
if add_and_check_final('Conv2d_2b_3x3', net): return net, end_points
# 73 x 73 x 64
net = slim.max_pool2d(net, 3, stride=2, padding=padding,
scope='MaxPool_3a_3x3')
if add_and_check_final('MaxPool_3a_3x3', net): return net, end_points
# 73 x 73 x 80
net = slim.conv2d(net, 80, 1, padding=padding,
scope='Conv2d_3b_1x1')
if add_and_check_final('Conv2d_3b_1x1', net): return net, end_points
# 71 x 71 x 192
net = slim.conv2d(net, 192, 3, padding=padding,
scope='Conv2d_4a_3x3')
if add_and_check_final('Conv2d_4a_3x3', net): return net, end_points
# 35 x 35 x 192
net = slim.max_pool2d(net, 3, stride=2, padding=padding,
scope='MaxPool_5a_3x3')
if add_and_check_final('MaxPool_5a_3x3', net): return net, end_points
# 35 x 35 x 320
with tf.variable_scope('Mixed_5b'):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 96, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 48, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 64, 5,
scope='Conv2d_0b_5x5')
with tf.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 64, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 96, 3,
scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 96, 3,
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
tower_pool = slim.avg_pool2d(net, 3, stride=1, padding='SAME',
scope='AvgPool_0a_3x3')
tower_pool_1 = slim.conv2d(tower_pool, 64, 1,
scope='Conv2d_0b_1x1')
net = tf.concat(
[tower_conv, tower_conv1_1, tower_conv2_2, tower_pool_1], 3)
if add_and_check_final('Mixed_5b', net): return net, end_points
# TODO(alemi): Register intermediate endpoints
net = slim.repeat(net, 10, block35, scale=0.17,
activation_fn=activation_fn)
# 17 x 17 x 1088 if output_stride == 8,
# 33 x 33 x 1088 if output_stride == 16
use_atrous = output_stride == 8
with tf.variable_scope('Mixed_6a'):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 384, 3, stride=1 if use_atrous else 2,
padding=padding,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 256, 3,
scope='Conv2d_0b_3x3')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 384, 3,
stride=1 if use_atrous else 2,
padding=padding,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
tower_pool = slim.max_pool2d(net, 3, stride=1 if use_atrous else 2,
padding=padding,
scope='MaxPool_1a_3x3')
net = tf.concat([tower_conv, tower_conv1_2, tower_pool], 3)
if add_and_check_final('Mixed_6a', net): return net, end_points
# TODO(alemi): register intermediate endpoints
with slim.arg_scope([slim.conv2d], rate=2 if use_atrous else 1):
net = slim.repeat(net, 20, block17, scale=0.10,
activation_fn=activation_fn)
if add_and_check_final('PreAuxLogits', net): return net, end_points
if output_stride == 8:
# TODO(gpapan): Properly support output_stride for the rest of the net.
raise ValueError('output_stride==8 is only supported up to the '
'PreAuxlogits end_point for now.')
# 8 x 8 x 2080
with tf.variable_scope('Mixed_7a'):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv_1 = slim.conv2d(tower_conv, 384, 3, stride=2,
padding=padding,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
tower_conv1 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1, 288, 3, stride=2,
padding=padding,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2, 288, 3,
scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 320, 3, stride=2,
padding=padding,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_3'):
tower_pool = slim.max_pool2d(net, 3, stride=2,
padding=padding,
scope='MaxPool_1a_3x3')
net = tf.concat(
[tower_conv_1, tower_conv1_1, tower_conv2_2, tower_pool], 3)
if add_and_check_final('Mixed_7a', net): return net, end_points
# TODO(alemi): register intermediate endpoints
net = slim.repeat(net, 9, block8, scale=0.20, activation_fn=activation_fn)
net = block8(net, activation_fn=None)
# 8 x 8 x 1536
net = slim.conv2d(net, 1536, 1, scope='Conv2d_7b_1x1')
if add_and_check_final('Conv2d_7b_1x1', net): return net, end_points
raise ValueError('final_endpoint (%s) not recognized', final_endpoint)
def inception_resnet_v2(inputs, num_classes=1001, is_training=True,
dropout_keep_prob=0.8,
reuse=None,
scope='InceptionResnetV2',
create_aux_logits=True,
activation_fn=tf.nn.relu):
"""Creates the Inception Resnet V2 model.
Args:
inputs: a 4-D tensor of size [batch_size, height, width, 3].
Dimension batch_size may be undefined. If create_aux_logits is false,
also height and width may be undefined.
num_classes: number of predicted classes. If 0 or None, the logits layer
is omitted and the input features to the logits layer (before dropout)
are returned instead.
is_training: whether is training or not.
dropout_keep_prob: float, the fraction to keep before final layer.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
create_aux_logits: Whether to include the auxilliary logits.
activation_fn: Activation function for conv2d.
Returns:
net: the output of the logits layer (if num_classes is a non-zero integer),
or the non-dropped-out input to the logits layer (if num_classes is 0 or
None).
end_points: the set of end_points from the inception model.
"""
end_points = {}
with tf.variable_scope(
scope, 'InceptionResnetV2', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = inception_resnet_v2_base(inputs, scope=scope,
activation_fn=activation_fn)
if create_aux_logits and num_classes:
with tf.variable_scope('AuxLogits'):
aux = end_points['PreAuxLogits']
aux = slim.avg_pool2d(aux, 5, stride=3, padding='VALID',
scope='Conv2d_1a_3x3')
aux = slim.conv2d(aux, 128, 1, scope='Conv2d_1b_1x1')
aux = slim.conv2d(aux, 768, aux.get_shape()[1:3],
padding='VALID', scope='Conv2d_2a_5x5')
aux = slim.flatten(aux)
aux = slim.fully_connected(aux, num_classes, activation_fn=None,
scope='Logits')
end_points['AuxLogits'] = aux
with tf.variable_scope('Logits'):
# TODO(sguada,arnoegw): Consider adding a parameter global_pool which
# can be set to False to disable pooling here (as in resnet_*()).
kernel_size = net.get_shape()[1:3]
if kernel_size.is_fully_defined():
net = slim.avg_pool2d(net, kernel_size, padding='VALID',
scope='AvgPool_1a_8x8')
else:
net = tf.reduce_mean(
input_tensor=net, axis=[1, 2], keepdims=True, name='global_pool')
end_points['global_pool'] = net
if not num_classes:
return net, end_points
net = slim.flatten(net)
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='Dropout')
end_points['PreLogitsFlatten'] = net
logits = slim.fully_connected(net, num_classes, activation_fn=None,
scope='Logits')
end_points['Logits'] = logits
end_points['Predictions'] = tf.nn.softmax(logits, name='Predictions')
return logits, end_points
inception_resnet_v2.default_image_size = 299
def inception_resnet_v2_arg_scope(
weight_decay=0.00004,
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001,
activation_fn=tf.nn.relu,
batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS,
batch_norm_scale=False):
"""Returns the scope with the default parameters for inception_resnet_v2.
Args:
weight_decay: the weight decay for weights variables.
batch_norm_decay: decay for the moving average of batch_norm momentums.
batch_norm_epsilon: small float added to variance to avoid dividing by zero.
activation_fn: Activation function for conv2d.
batch_norm_updates_collections: Collection for the update ops for
batch norm.
batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
activations in the batch normalization layer.
Returns:
a arg_scope with the parameters needed for inception_resnet_v2.
"""
# Set weight_decay for weights in conv2d and fully_connected layers.
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_regularizer=slim.l2_regularizer(weight_decay),
biases_regularizer=slim.l2_regularizer(weight_decay)):
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'updates_collections': batch_norm_updates_collections,
'fused': None, # Use fused batch norm if possible.
'scale': batch_norm_scale,
}
# Set activation_fn and parameters for batch_norm.
with slim.arg_scope([slim.conv2d], activation_fn=activation_fn,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params) as scope:
return scope
| 18,455 | 44.346437 | 80 | py |
models | models-master/research/slim/nets/overfeat_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.nets.overfeat."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets import overfeat
class OverFeatTest(tf.test.TestCase):
def testBuild(self):
batch_size = 5
height, width = 231, 231
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = overfeat.overfeat(inputs, num_classes)
self.assertEquals(logits.op.name, 'overfeat/fc8/squeezed')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testFullyConvolutional(self):
batch_size = 1
height, width = 281, 281
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = overfeat.overfeat(inputs, num_classes, spatial_squeeze=False)
self.assertEquals(logits.op.name, 'overfeat/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, 2, 2, num_classes])
def testGlobalPool(self):
batch_size = 1
height, width = 281, 281
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = overfeat.overfeat(inputs, num_classes, spatial_squeeze=False,
global_pool=True)
self.assertEquals(logits.op.name, 'overfeat/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, 1, 1, num_classes])
def testEndPoints(self):
batch_size = 5
height, width = 231, 231
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = overfeat.overfeat(inputs, num_classes)
expected_names = ['overfeat/conv1',
'overfeat/pool1',
'overfeat/conv2',
'overfeat/pool2',
'overfeat/conv3',
'overfeat/conv4',
'overfeat/conv5',
'overfeat/pool5',
'overfeat/fc6',
'overfeat/fc7',
'overfeat/fc8'
]
self.assertSetEqual(set(end_points.keys()), set(expected_names))
def testNoClasses(self):
batch_size = 5
height, width = 231, 231
num_classes = None
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
net, end_points = overfeat.overfeat(inputs, num_classes)
expected_names = ['overfeat/conv1',
'overfeat/pool1',
'overfeat/conv2',
'overfeat/pool2',
'overfeat/conv3',
'overfeat/conv4',
'overfeat/conv5',
'overfeat/pool5',
'overfeat/fc6',
'overfeat/fc7'
]
self.assertSetEqual(set(end_points.keys()), set(expected_names))
self.assertTrue(net.op.name.startswith('overfeat/fc7'))
def testModelVariables(self):
batch_size = 5
height, width = 231, 231
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
overfeat.overfeat(inputs, num_classes)
expected_names = ['overfeat/conv1/weights',
'overfeat/conv1/biases',
'overfeat/conv2/weights',
'overfeat/conv2/biases',
'overfeat/conv3/weights',
'overfeat/conv3/biases',
'overfeat/conv4/weights',
'overfeat/conv4/biases',
'overfeat/conv5/weights',
'overfeat/conv5/biases',
'overfeat/fc6/weights',
'overfeat/fc6/biases',
'overfeat/fc7/weights',
'overfeat/fc7/biases',
'overfeat/fc8/weights',
'overfeat/fc8/biases',
]
model_variables = [v.op.name for v in slim.get_model_variables()]
self.assertSetEqual(set(model_variables), set(expected_names))
def testEvaluation(self):
batch_size = 2
height, width = 231, 231
num_classes = 1000
with self.test_session():
eval_inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = overfeat.overfeat(eval_inputs, is_training=False)
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
predictions = tf.argmax(input=logits, axis=1)
self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 231, 231
eval_height, eval_width = 281, 281
num_classes = 1000
with self.test_session():
train_inputs = tf.random.uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = overfeat.overfeat(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random.uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = overfeat.overfeat(eval_inputs, is_training=False,
spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 2, 2, num_classes])
logits = tf.reduce_mean(input_tensor=logits, axis=[1, 2])
predictions = tf.argmax(input=logits, axis=1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testForward(self):
batch_size = 1
height, width = 231, 231
with self.test_session() as sess:
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = overfeat.overfeat(inputs)
sess.run(tf.global_variables_initializer())
output = sess.run(logits)
self.assertTrue(output.any())
if __name__ == '__main__':
tf.test.main()
| 7,142 | 39.129213 | 80 | py |
models | models-master/research/slim/nets/alexnet_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.nets.alexnet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets import alexnet
class AlexnetV2Test(tf.test.TestCase):
def testBuild(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs, num_classes)
self.assertEquals(logits.op.name, 'alexnet_v2/fc8/squeezed')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testFullyConvolutional(self):
batch_size = 1
height, width = 300, 400
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs, num_classes, spatial_squeeze=False)
self.assertEquals(logits.op.name, 'alexnet_v2/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, 4, 7, num_classes])
def testGlobalPool(self):
batch_size = 1
height, width = 256, 256
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs, num_classes, spatial_squeeze=False,
global_pool=True)
self.assertEquals(logits.op.name, 'alexnet_v2/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, 1, 1, num_classes])
def testEndPoints(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = alexnet.alexnet_v2(inputs, num_classes)
expected_names = ['alexnet_v2/conv1',
'alexnet_v2/pool1',
'alexnet_v2/conv2',
'alexnet_v2/pool2',
'alexnet_v2/conv3',
'alexnet_v2/conv4',
'alexnet_v2/conv5',
'alexnet_v2/pool5',
'alexnet_v2/fc6',
'alexnet_v2/fc7',
'alexnet_v2/fc8'
]
self.assertSetEqual(set(end_points.keys()), set(expected_names))
def testNoClasses(self):
batch_size = 5
height, width = 224, 224
num_classes = None
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
net, end_points = alexnet.alexnet_v2(inputs, num_classes)
expected_names = ['alexnet_v2/conv1',
'alexnet_v2/pool1',
'alexnet_v2/conv2',
'alexnet_v2/pool2',
'alexnet_v2/conv3',
'alexnet_v2/conv4',
'alexnet_v2/conv5',
'alexnet_v2/pool5',
'alexnet_v2/fc6',
'alexnet_v2/fc7'
]
self.assertSetEqual(set(end_points.keys()), set(expected_names))
self.assertTrue(net.op.name.startswith('alexnet_v2/fc7'))
self.assertListEqual(net.get_shape().as_list(),
[batch_size, 1, 1, 4096])
def testModelVariables(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
alexnet.alexnet_v2(inputs, num_classes)
expected_names = ['alexnet_v2/conv1/weights',
'alexnet_v2/conv1/biases',
'alexnet_v2/conv2/weights',
'alexnet_v2/conv2/biases',
'alexnet_v2/conv3/weights',
'alexnet_v2/conv3/biases',
'alexnet_v2/conv4/weights',
'alexnet_v2/conv4/biases',
'alexnet_v2/conv5/weights',
'alexnet_v2/conv5/biases',
'alexnet_v2/fc6/weights',
'alexnet_v2/fc6/biases',
'alexnet_v2/fc7/weights',
'alexnet_v2/fc7/biases',
'alexnet_v2/fc8/weights',
'alexnet_v2/fc8/biases',
]
model_variables = [v.op.name for v in slim.get_model_variables()]
self.assertSetEqual(set(model_variables), set(expected_names))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
with self.test_session():
eval_inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(eval_inputs, is_training=False)
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
predictions = tf.argmax(input=logits, axis=1)
self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 224, 224
eval_height, eval_width = 300, 400
num_classes = 1000
with self.test_session():
train_inputs = tf.random.uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = alexnet.alexnet_v2(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random.uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = alexnet.alexnet_v2(eval_inputs, is_training=False,
spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 4, 7, num_classes])
logits = tf.reduce_mean(input_tensor=logits, axis=[1, 2])
predictions = tf.argmax(input=logits, axis=1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testForward(self):
batch_size = 1
height, width = 224, 224
with self.test_session() as sess:
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs)
sess.run(tf.global_variables_initializer())
output = sess.run(logits)
self.assertTrue(output.any())
if __name__ == '__main__':
tf.test.main()
| 7,342 | 39.794444 | 80 | py |
models | models-master/research/slim/nets/mobilenet_v1_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for MobileNet v1."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets import mobilenet_v1
class MobilenetV1Test(tf.test.TestCase):
def testBuildClassificationNetwork(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith(
'MobilenetV1/Logits/SpatialSqueeze'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('Predictions' in end_points)
self.assertListEqual(end_points['Predictions'].get_shape().as_list(),
[batch_size, num_classes])
def testBuildPreLogitsNetwork(self):
batch_size = 5
height, width = 224, 224
num_classes = None
inputs = tf.random.uniform((batch_size, height, width, 3))
net, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)
self.assertTrue(net.op.name.startswith('MobilenetV1/Logits/AvgPool'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 1024])
self.assertFalse('Logits' in end_points)
self.assertFalse('Predictions' in end_points)
def testBuildBaseNetwork(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random.uniform((batch_size, height, width, 3))
net, end_points = mobilenet_v1.mobilenet_v1_base(inputs)
self.assertTrue(net.op.name.startswith('MobilenetV1/Conv2d_13'))
self.assertListEqual(net.get_shape().as_list(),
[batch_size, 7, 7, 1024])
expected_endpoints = ['Conv2d_0',
'Conv2d_1_depthwise', 'Conv2d_1_pointwise',
'Conv2d_2_depthwise', 'Conv2d_2_pointwise',
'Conv2d_3_depthwise', 'Conv2d_3_pointwise',
'Conv2d_4_depthwise', 'Conv2d_4_pointwise',
'Conv2d_5_depthwise', 'Conv2d_5_pointwise',
'Conv2d_6_depthwise', 'Conv2d_6_pointwise',
'Conv2d_7_depthwise', 'Conv2d_7_pointwise',
'Conv2d_8_depthwise', 'Conv2d_8_pointwise',
'Conv2d_9_depthwise', 'Conv2d_9_pointwise',
'Conv2d_10_depthwise', 'Conv2d_10_pointwise',
'Conv2d_11_depthwise', 'Conv2d_11_pointwise',
'Conv2d_12_depthwise', 'Conv2d_12_pointwise',
'Conv2d_13_depthwise', 'Conv2d_13_pointwise']
self.assertItemsEqual(end_points.keys(), expected_endpoints)
def testBuildOnlyUptoFinalEndpoint(self):
batch_size = 5
height, width = 224, 224
endpoints = ['Conv2d_0',
'Conv2d_1_depthwise', 'Conv2d_1_pointwise',
'Conv2d_2_depthwise', 'Conv2d_2_pointwise',
'Conv2d_3_depthwise', 'Conv2d_3_pointwise',
'Conv2d_4_depthwise', 'Conv2d_4_pointwise',
'Conv2d_5_depthwise', 'Conv2d_5_pointwise',
'Conv2d_6_depthwise', 'Conv2d_6_pointwise',
'Conv2d_7_depthwise', 'Conv2d_7_pointwise',
'Conv2d_8_depthwise', 'Conv2d_8_pointwise',
'Conv2d_9_depthwise', 'Conv2d_9_pointwise',
'Conv2d_10_depthwise', 'Conv2d_10_pointwise',
'Conv2d_11_depthwise', 'Conv2d_11_pointwise',
'Conv2d_12_depthwise', 'Conv2d_12_pointwise',
'Conv2d_13_depthwise', 'Conv2d_13_pointwise']
for index, endpoint in enumerate(endpoints):
with tf.Graph().as_default():
inputs = tf.random.uniform((batch_size, height, width, 3))
out_tensor, end_points = mobilenet_v1.mobilenet_v1_base(
inputs, final_endpoint=endpoint)
self.assertTrue(out_tensor.op.name.startswith(
'MobilenetV1/' + endpoint))
self.assertItemsEqual(endpoints[:index + 1], end_points.keys())
def testBuildCustomNetworkUsingConvDefs(self):
batch_size = 5
height, width = 224, 224
conv_defs = [
mobilenet_v1.Conv(kernel=[3, 3], stride=2, depth=32),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=64),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=2, depth=128),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=512)
]
inputs = tf.random.uniform((batch_size, height, width, 3))
net, end_points = mobilenet_v1.mobilenet_v1_base(
inputs, final_endpoint='Conv2d_3_pointwise', conv_defs=conv_defs)
self.assertTrue(net.op.name.startswith('MobilenetV1/Conv2d_3'))
self.assertListEqual(net.get_shape().as_list(),
[batch_size, 56, 56, 512])
expected_endpoints = ['Conv2d_0',
'Conv2d_1_depthwise', 'Conv2d_1_pointwise',
'Conv2d_2_depthwise', 'Conv2d_2_pointwise',
'Conv2d_3_depthwise', 'Conv2d_3_pointwise']
self.assertItemsEqual(end_points.keys(), expected_endpoints)
def testBuildAndCheckAllEndPointsUptoConv2d_13(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random.uniform((batch_size, height, width, 3))
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
normalizer_fn=slim.batch_norm):
_, end_points = mobilenet_v1.mobilenet_v1_base(
inputs, final_endpoint='Conv2d_13_pointwise')
_, explicit_padding_end_points = mobilenet_v1.mobilenet_v1_base(
inputs, final_endpoint='Conv2d_13_pointwise',
use_explicit_padding=True)
endpoints_shapes = {'Conv2d_0': [batch_size, 112, 112, 32],
'Conv2d_1_depthwise': [batch_size, 112, 112, 32],
'Conv2d_1_pointwise': [batch_size, 112, 112, 64],
'Conv2d_2_depthwise': [batch_size, 56, 56, 64],
'Conv2d_2_pointwise': [batch_size, 56, 56, 128],
'Conv2d_3_depthwise': [batch_size, 56, 56, 128],
'Conv2d_3_pointwise': [batch_size, 56, 56, 128],
'Conv2d_4_depthwise': [batch_size, 28, 28, 128],
'Conv2d_4_pointwise': [batch_size, 28, 28, 256],
'Conv2d_5_depthwise': [batch_size, 28, 28, 256],
'Conv2d_5_pointwise': [batch_size, 28, 28, 256],
'Conv2d_6_depthwise': [batch_size, 14, 14, 256],
'Conv2d_6_pointwise': [batch_size, 14, 14, 512],
'Conv2d_7_depthwise': [batch_size, 14, 14, 512],
'Conv2d_7_pointwise': [batch_size, 14, 14, 512],
'Conv2d_8_depthwise': [batch_size, 14, 14, 512],
'Conv2d_8_pointwise': [batch_size, 14, 14, 512],
'Conv2d_9_depthwise': [batch_size, 14, 14, 512],
'Conv2d_9_pointwise': [batch_size, 14, 14, 512],
'Conv2d_10_depthwise': [batch_size, 14, 14, 512],
'Conv2d_10_pointwise': [batch_size, 14, 14, 512],
'Conv2d_11_depthwise': [batch_size, 14, 14, 512],
'Conv2d_11_pointwise': [batch_size, 14, 14, 512],
'Conv2d_12_depthwise': [batch_size, 7, 7, 512],
'Conv2d_12_pointwise': [batch_size, 7, 7, 1024],
'Conv2d_13_depthwise': [batch_size, 7, 7, 1024],
'Conv2d_13_pointwise': [batch_size, 7, 7, 1024]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name, expected_shape in endpoints_shapes.items():
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
self.assertItemsEqual(endpoints_shapes.keys(),
explicit_padding_end_points.keys())
for endpoint_name, expected_shape in endpoints_shapes.items():
self.assertTrue(endpoint_name in explicit_padding_end_points)
self.assertListEqual(
explicit_padding_end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testOutputStride16BuildAndCheckAllEndPointsUptoConv2d_13(self):
batch_size = 5
height, width = 224, 224
output_stride = 16
inputs = tf.random.uniform((batch_size, height, width, 3))
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
normalizer_fn=slim.batch_norm):
_, end_points = mobilenet_v1.mobilenet_v1_base(
inputs, output_stride=output_stride,
final_endpoint='Conv2d_13_pointwise')
_, explicit_padding_end_points = mobilenet_v1.mobilenet_v1_base(
inputs, output_stride=output_stride,
final_endpoint='Conv2d_13_pointwise', use_explicit_padding=True)
endpoints_shapes = {'Conv2d_0': [batch_size, 112, 112, 32],
'Conv2d_1_depthwise': [batch_size, 112, 112, 32],
'Conv2d_1_pointwise': [batch_size, 112, 112, 64],
'Conv2d_2_depthwise': [batch_size, 56, 56, 64],
'Conv2d_2_pointwise': [batch_size, 56, 56, 128],
'Conv2d_3_depthwise': [batch_size, 56, 56, 128],
'Conv2d_3_pointwise': [batch_size, 56, 56, 128],
'Conv2d_4_depthwise': [batch_size, 28, 28, 128],
'Conv2d_4_pointwise': [batch_size, 28, 28, 256],
'Conv2d_5_depthwise': [batch_size, 28, 28, 256],
'Conv2d_5_pointwise': [batch_size, 28, 28, 256],
'Conv2d_6_depthwise': [batch_size, 14, 14, 256],
'Conv2d_6_pointwise': [batch_size, 14, 14, 512],
'Conv2d_7_depthwise': [batch_size, 14, 14, 512],
'Conv2d_7_pointwise': [batch_size, 14, 14, 512],
'Conv2d_8_depthwise': [batch_size, 14, 14, 512],
'Conv2d_8_pointwise': [batch_size, 14, 14, 512],
'Conv2d_9_depthwise': [batch_size, 14, 14, 512],
'Conv2d_9_pointwise': [batch_size, 14, 14, 512],
'Conv2d_10_depthwise': [batch_size, 14, 14, 512],
'Conv2d_10_pointwise': [batch_size, 14, 14, 512],
'Conv2d_11_depthwise': [batch_size, 14, 14, 512],
'Conv2d_11_pointwise': [batch_size, 14, 14, 512],
'Conv2d_12_depthwise': [batch_size, 14, 14, 512],
'Conv2d_12_pointwise': [batch_size, 14, 14, 1024],
'Conv2d_13_depthwise': [batch_size, 14, 14, 1024],
'Conv2d_13_pointwise': [batch_size, 14, 14, 1024]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name, expected_shape in endpoints_shapes.items():
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
self.assertItemsEqual(endpoints_shapes.keys(),
explicit_padding_end_points.keys())
for endpoint_name, expected_shape in endpoints_shapes.items():
self.assertTrue(endpoint_name in explicit_padding_end_points)
self.assertListEqual(
explicit_padding_end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testOutputStride8BuildAndCheckAllEndPointsUptoConv2d_13(self):
batch_size = 5
height, width = 224, 224
output_stride = 8
inputs = tf.random.uniform((batch_size, height, width, 3))
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
normalizer_fn=slim.batch_norm):
_, end_points = mobilenet_v1.mobilenet_v1_base(
inputs, output_stride=output_stride,
final_endpoint='Conv2d_13_pointwise')
_, explicit_padding_end_points = mobilenet_v1.mobilenet_v1_base(
inputs, output_stride=output_stride,
final_endpoint='Conv2d_13_pointwise', use_explicit_padding=True)
endpoints_shapes = {'Conv2d_0': [batch_size, 112, 112, 32],
'Conv2d_1_depthwise': [batch_size, 112, 112, 32],
'Conv2d_1_pointwise': [batch_size, 112, 112, 64],
'Conv2d_2_depthwise': [batch_size, 56, 56, 64],
'Conv2d_2_pointwise': [batch_size, 56, 56, 128],
'Conv2d_3_depthwise': [batch_size, 56, 56, 128],
'Conv2d_3_pointwise': [batch_size, 56, 56, 128],
'Conv2d_4_depthwise': [batch_size, 28, 28, 128],
'Conv2d_4_pointwise': [batch_size, 28, 28, 256],
'Conv2d_5_depthwise': [batch_size, 28, 28, 256],
'Conv2d_5_pointwise': [batch_size, 28, 28, 256],
'Conv2d_6_depthwise': [batch_size, 28, 28, 256],
'Conv2d_6_pointwise': [batch_size, 28, 28, 512],
'Conv2d_7_depthwise': [batch_size, 28, 28, 512],
'Conv2d_7_pointwise': [batch_size, 28, 28, 512],
'Conv2d_8_depthwise': [batch_size, 28, 28, 512],
'Conv2d_8_pointwise': [batch_size, 28, 28, 512],
'Conv2d_9_depthwise': [batch_size, 28, 28, 512],
'Conv2d_9_pointwise': [batch_size, 28, 28, 512],
'Conv2d_10_depthwise': [batch_size, 28, 28, 512],
'Conv2d_10_pointwise': [batch_size, 28, 28, 512],
'Conv2d_11_depthwise': [batch_size, 28, 28, 512],
'Conv2d_11_pointwise': [batch_size, 28, 28, 512],
'Conv2d_12_depthwise': [batch_size, 28, 28, 512],
'Conv2d_12_pointwise': [batch_size, 28, 28, 1024],
'Conv2d_13_depthwise': [batch_size, 28, 28, 1024],
'Conv2d_13_pointwise': [batch_size, 28, 28, 1024]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name, expected_shape in endpoints_shapes.items():
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
self.assertItemsEqual(endpoints_shapes.keys(),
explicit_padding_end_points.keys())
for endpoint_name, expected_shape in endpoints_shapes.items():
self.assertTrue(endpoint_name in explicit_padding_end_points)
self.assertListEqual(
explicit_padding_end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testBuildAndCheckAllEndPointsApproximateFaceNet(self):
batch_size = 5
height, width = 128, 128
inputs = tf.random.uniform((batch_size, height, width, 3))
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
normalizer_fn=slim.batch_norm):
_, end_points = mobilenet_v1.mobilenet_v1_base(
inputs, final_endpoint='Conv2d_13_pointwise', depth_multiplier=0.75)
_, explicit_padding_end_points = mobilenet_v1.mobilenet_v1_base(
inputs, final_endpoint='Conv2d_13_pointwise', depth_multiplier=0.75,
use_explicit_padding=True)
# For the Conv2d_0 layer FaceNet has depth=16
endpoints_shapes = {'Conv2d_0': [batch_size, 64, 64, 24],
'Conv2d_1_depthwise': [batch_size, 64, 64, 24],
'Conv2d_1_pointwise': [batch_size, 64, 64, 48],
'Conv2d_2_depthwise': [batch_size, 32, 32, 48],
'Conv2d_2_pointwise': [batch_size, 32, 32, 96],
'Conv2d_3_depthwise': [batch_size, 32, 32, 96],
'Conv2d_3_pointwise': [batch_size, 32, 32, 96],
'Conv2d_4_depthwise': [batch_size, 16, 16, 96],
'Conv2d_4_pointwise': [batch_size, 16, 16, 192],
'Conv2d_5_depthwise': [batch_size, 16, 16, 192],
'Conv2d_5_pointwise': [batch_size, 16, 16, 192],
'Conv2d_6_depthwise': [batch_size, 8, 8, 192],
'Conv2d_6_pointwise': [batch_size, 8, 8, 384],
'Conv2d_7_depthwise': [batch_size, 8, 8, 384],
'Conv2d_7_pointwise': [batch_size, 8, 8, 384],
'Conv2d_8_depthwise': [batch_size, 8, 8, 384],
'Conv2d_8_pointwise': [batch_size, 8, 8, 384],
'Conv2d_9_depthwise': [batch_size, 8, 8, 384],
'Conv2d_9_pointwise': [batch_size, 8, 8, 384],
'Conv2d_10_depthwise': [batch_size, 8, 8, 384],
'Conv2d_10_pointwise': [batch_size, 8, 8, 384],
'Conv2d_11_depthwise': [batch_size, 8, 8, 384],
'Conv2d_11_pointwise': [batch_size, 8, 8, 384],
'Conv2d_12_depthwise': [batch_size, 4, 4, 384],
'Conv2d_12_pointwise': [batch_size, 4, 4, 768],
'Conv2d_13_depthwise': [batch_size, 4, 4, 768],
'Conv2d_13_pointwise': [batch_size, 4, 4, 768]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name, expected_shape in endpoints_shapes.items():
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
self.assertItemsEqual(endpoints_shapes.keys(),
explicit_padding_end_points.keys())
for endpoint_name, expected_shape in endpoints_shapes.items():
self.assertTrue(endpoint_name in explicit_padding_end_points)
self.assertListEqual(
explicit_padding_end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testModelHasExpectedNumberOfParameters(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random.uniform((batch_size, height, width, 3))
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
normalizer_fn=slim.batch_norm):
mobilenet_v1.mobilenet_v1_base(inputs)
total_params, _ = slim.model_analyzer.analyze_vars(
slim.get_model_variables())
self.assertAlmostEqual(3217920, total_params)
def testBuildEndPointsWithDepthMultiplierLessThanOne(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)
endpoint_keys = [key for key in end_points.keys() if key.startswith('Conv')]
_, end_points_with_multiplier = mobilenet_v1.mobilenet_v1(
inputs, num_classes, scope='depth_multiplied_net',
depth_multiplier=0.5)
for key in endpoint_keys:
original_depth = end_points[key].get_shape().as_list()[3]
new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]
self.assertEqual(0.5 * original_depth, new_depth)
def testBuildEndPointsWithDepthMultiplierGreaterThanOne(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)
endpoint_keys = [key for key in end_points.keys()
if key.startswith('Mixed') or key.startswith('Conv')]
_, end_points_with_multiplier = mobilenet_v1.mobilenet_v1(
inputs, num_classes, scope='depth_multiplied_net',
depth_multiplier=2.0)
for key in endpoint_keys:
original_depth = end_points[key].get_shape().as_list()[3]
new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]
self.assertEqual(2.0 * original_depth, new_depth)
def testRaiseValueErrorWithInvalidDepthMultiplier(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
with self.assertRaises(ValueError):
_ = mobilenet_v1.mobilenet_v1(
inputs, num_classes, depth_multiplier=-0.1)
with self.assertRaises(ValueError):
_ = mobilenet_v1.mobilenet_v1(
inputs, num_classes, depth_multiplier=0.0)
def testHalfSizeImages(self):
batch_size = 5
height, width = 112, 112
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Conv2d_13_pointwise']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 4, 4, 1024])
def testUnknownImageShape(self):
tf.reset_default_graph()
batch_size = 2
height, width = 224, 224
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = tf.placeholder(
tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Conv2d_13_pointwise']
feed_dict = {inputs: input_np}
tf.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
def testGlobalPoolUnknownImageShape(self):
tf.reset_default_graph()
batch_size = 1
height, width = 250, 300
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = tf.placeholder(
tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes,
global_pool=True)
self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Conv2d_13_pointwise']
feed_dict = {inputs: input_np}
tf.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 10, 1024])
def testUnknowBatchSize(self):
batch_size = 1
height, width = 224, 224
num_classes = 1000
inputs = tf.placeholder(tf.float32, (None, height, width, 3))
logits, _ = mobilenet_v1.mobilenet_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes])
images = tf.random.uniform((batch_size, height, width, 3))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
eval_inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = mobilenet_v1.mobilenet_v1(eval_inputs, num_classes,
is_training=False)
predictions = tf.argmax(input=logits, axis=1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, width = 150, 150
num_classes = 1000
train_inputs = tf.random.uniform((train_batch_size, height, width, 3))
mobilenet_v1.mobilenet_v1(train_inputs, num_classes)
eval_inputs = tf.random.uniform((eval_batch_size, height, width, 3))
logits, _ = mobilenet_v1.mobilenet_v1(eval_inputs, num_classes,
reuse=True)
predictions = tf.argmax(input=logits, axis=1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
def testLogitsNotSqueezed(self):
num_classes = 25
images = tf.random.uniform([1, 224, 224, 3])
logits, _ = mobilenet_v1.mobilenet_v1(images,
num_classes=num_classes,
spatial_squeeze=False)
with self.test_session() as sess:
tf.global_variables_initializer().run()
logits_out = sess.run(logits)
self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
def testBatchNormScopeDoesNotHaveIsTrainingWhenItsSetToNone(self):
sc = mobilenet_v1.mobilenet_v1_arg_scope(is_training=None)
self.assertNotIn('is_training', sc[slim.arg_scope_func_key(
slim.batch_norm)])
def testBatchNormScopeDoesHasIsTrainingWhenItsNotNone(self):
sc = mobilenet_v1.mobilenet_v1_arg_scope(is_training=True)
self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])
sc = mobilenet_v1.mobilenet_v1_arg_scope(is_training=False)
self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])
sc = mobilenet_v1.mobilenet_v1_arg_scope()
self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])
if __name__ == '__main__':
tf.test.main()
| 27,003 | 49.380597 | 80 | py |
models | models-master/research/slim/nets/inception_v2_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for nets.inception_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets import inception
class InceptionV2Test(tf.test.TestCase):
def testBuildClassificationNetwork(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith(
'InceptionV2/Logits/SpatialSqueeze'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('Predictions' in end_points)
self.assertListEqual(end_points['Predictions'].get_shape().as_list(),
[batch_size, num_classes])
def testBuildPreLogitsNetwork(self):
batch_size = 5
height, width = 224, 224
num_classes = None
inputs = tf.random.uniform((batch_size, height, width, 3))
net, end_points = inception.inception_v2(inputs, num_classes)
self.assertTrue(net.op.name.startswith('InceptionV2/Logits/AvgPool'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 1024])
self.assertFalse('Logits' in end_points)
self.assertFalse('Predictions' in end_points)
def testBuildBaseNetwork(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random.uniform((batch_size, height, width, 3))
mixed_5c, end_points = inception.inception_v2_base(inputs)
self.assertTrue(mixed_5c.op.name.startswith('InceptionV2/Mixed_5c'))
self.assertListEqual(mixed_5c.get_shape().as_list(),
[batch_size, 7, 7, 1024])
expected_endpoints = ['Mixed_3b', 'Mixed_3c', 'Mixed_4a', 'Mixed_4b',
'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_5a',
'Mixed_5b', 'Mixed_5c', 'Conv2d_1a_7x7',
'MaxPool_2a_3x3', 'Conv2d_2b_1x1', 'Conv2d_2c_3x3',
'MaxPool_3a_3x3']
self.assertItemsEqual(list(end_points.keys()), expected_endpoints)
def testBuildOnlyUptoFinalEndpoint(self):
batch_size = 5
height, width = 224, 224
endpoints = ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c',
'Mixed_4a', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e',
'Mixed_5a', 'Mixed_5b', 'Mixed_5c']
for index, endpoint in enumerate(endpoints):
with tf.Graph().as_default():
inputs = tf.random.uniform((batch_size, height, width, 3))
out_tensor, end_points = inception.inception_v2_base(
inputs, final_endpoint=endpoint)
self.assertTrue(out_tensor.op.name.startswith(
'InceptionV2/' + endpoint))
self.assertItemsEqual(endpoints[:index + 1], list(end_points.keys()))
def testBuildAndCheckAllEndPointsUptoMixed5c(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v2_base(inputs,
final_endpoint='Mixed_5c')
endpoints_shapes = {'Mixed_3b': [batch_size, 28, 28, 256],
'Mixed_3c': [batch_size, 28, 28, 320],
'Mixed_4a': [batch_size, 14, 14, 576],
'Mixed_4b': [batch_size, 14, 14, 576],
'Mixed_4c': [batch_size, 14, 14, 576],
'Mixed_4d': [batch_size, 14, 14, 576],
'Mixed_4e': [batch_size, 14, 14, 576],
'Mixed_5a': [batch_size, 7, 7, 1024],
'Mixed_5b': [batch_size, 7, 7, 1024],
'Mixed_5c': [batch_size, 7, 7, 1024],
'Conv2d_1a_7x7': [batch_size, 112, 112, 64],
'MaxPool_2a_3x3': [batch_size, 56, 56, 64],
'Conv2d_2b_1x1': [batch_size, 56, 56, 64],
'Conv2d_2c_3x3': [batch_size, 56, 56, 192],
'MaxPool_3a_3x3': [batch_size, 28, 28, 192]}
self.assertItemsEqual(
list(endpoints_shapes.keys()), list(end_points.keys()))
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testModelHasExpectedNumberOfParameters(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random.uniform((batch_size, height, width, 3))
with slim.arg_scope(inception.inception_v2_arg_scope()):
inception.inception_v2_base(inputs)
total_params, _ = slim.model_analyzer.analyze_vars(
slim.get_model_variables())
self.assertAlmostEqual(10173112, total_params)
def testBuildEndPointsWithDepthMultiplierLessThanOne(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v2(inputs, num_classes)
endpoint_keys = [key for key in end_points.keys()
if key.startswith('Mixed') or key.startswith('Conv')]
_, end_points_with_multiplier = inception.inception_v2(
inputs, num_classes, scope='depth_multiplied_net',
depth_multiplier=0.5)
for key in endpoint_keys:
original_depth = end_points[key].get_shape().as_list()[3]
new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]
self.assertEqual(0.5 * original_depth, new_depth)
def testBuildEndPointsWithDepthMultiplierGreaterThanOne(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v2(inputs, num_classes)
endpoint_keys = [key for key in end_points.keys()
if key.startswith('Mixed') or key.startswith('Conv')]
_, end_points_with_multiplier = inception.inception_v2(
inputs, num_classes, scope='depth_multiplied_net',
depth_multiplier=2.0)
for key in endpoint_keys:
original_depth = end_points[key].get_shape().as_list()[3]
new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]
self.assertEqual(2.0 * original_depth, new_depth)
def testRaiseValueErrorWithInvalidDepthMultiplier(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
with self.assertRaises(ValueError):
_ = inception.inception_v2(inputs, num_classes, depth_multiplier=-0.1)
with self.assertRaises(ValueError):
_ = inception.inception_v2(inputs, num_classes, depth_multiplier=0.0)
def testBuildEndPointsWithUseSeparableConvolutionFalse(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v2_base(inputs)
endpoint_keys = [
key for key in end_points.keys()
if key.startswith('Mixed') or key.startswith('Conv')
]
_, end_points_with_replacement = inception.inception_v2_base(
inputs, use_separable_conv=False)
# The endpoint shapes must be equal to the original shape even when the
# separable convolution is replaced with a normal convolution.
for key in endpoint_keys:
original_shape = end_points[key].get_shape().as_list()
self.assertTrue(key in end_points_with_replacement)
new_shape = end_points_with_replacement[key].get_shape().as_list()
self.assertListEqual(original_shape, new_shape)
def testBuildEndPointsNCHWDataFormat(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v2_base(inputs)
endpoint_keys = [
key for key in end_points.keys()
if key.startswith('Mixed') or key.startswith('Conv')
]
inputs_in_nchw = tf.random.uniform((batch_size, 3, height, width))
_, end_points_with_replacement = inception.inception_v2_base(
inputs_in_nchw, use_separable_conv=False, data_format='NCHW')
# With the 'NCHW' data format, all endpoint activations have a transposed
# shape from the original shape with the 'NHWC' layout.
for key in endpoint_keys:
transposed_original_shape = tf.transpose(
a=end_points[key], perm=[0, 3, 1, 2]).get_shape().as_list()
self.assertTrue(key in end_points_with_replacement)
new_shape = end_points_with_replacement[key].get_shape().as_list()
self.assertListEqual(transposed_original_shape, new_shape)
def testBuildErrorsForDataFormats(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random.uniform((batch_size, height, width, 3))
# 'NCWH' data format is not supported.
with self.assertRaises(ValueError):
_ = inception.inception_v2_base(inputs, data_format='NCWH')
# 'NCHW' data format is not supported for separable convolution.
with self.assertRaises(ValueError):
_ = inception.inception_v2_base(inputs, data_format='NCHW')
def testHalfSizeImages(self):
batch_size = 5
height, width = 112, 112
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_5c']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 4, 4, 1024])
def testBuildBaseNetworkWithoutRootBlock(self):
batch_size = 5
height, width = 28, 28
channels = 192
inputs = tf.random.uniform((batch_size, height, width, channels))
_, end_points = inception.inception_v2_base(
inputs, include_root_block=False)
endpoints_shapes = {
'Mixed_3b': [batch_size, 28, 28, 256],
'Mixed_3c': [batch_size, 28, 28, 320],
'Mixed_4a': [batch_size, 14, 14, 576],
'Mixed_4b': [batch_size, 14, 14, 576],
'Mixed_4c': [batch_size, 14, 14, 576],
'Mixed_4d': [batch_size, 14, 14, 576],
'Mixed_4e': [batch_size, 14, 14, 576],
'Mixed_5a': [batch_size, 7, 7, 1024],
'Mixed_5b': [batch_size, 7, 7, 1024],
'Mixed_5c': [batch_size, 7, 7, 1024]
}
self.assertItemsEqual(
list(endpoints_shapes.keys()), list(end_points.keys()))
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testUnknownImageShape(self):
tf.reset_default_graph()
batch_size = 2
height, width = 224, 224
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = tf.placeholder(
tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception.inception_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_5c']
feed_dict = {inputs: input_np}
tf.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
def testGlobalPoolUnknownImageShape(self):
tf.reset_default_graph()
batch_size = 1
height, width = 250, 300
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = tf.placeholder(
tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception.inception_v2(inputs, num_classes,
global_pool=True)
self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_5c']
feed_dict = {inputs: input_np}
tf.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 10, 1024])
def testUnknowBatchSize(self):
batch_size = 1
height, width = 224, 224
num_classes = 1000
inputs = tf.placeholder(tf.float32, (None, height, width, 3))
logits, _ = inception.inception_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes])
images = tf.random.uniform((batch_size, height, width, 3))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
eval_inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = inception.inception_v2(eval_inputs, num_classes,
is_training=False)
predictions = tf.argmax(input=logits, axis=1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, width = 150, 150
num_classes = 1000
train_inputs = tf.random.uniform((train_batch_size, height, width, 3))
inception.inception_v2(train_inputs, num_classes)
eval_inputs = tf.random.uniform((eval_batch_size, height, width, 3))
logits, _ = inception.inception_v2(eval_inputs, num_classes, reuse=True)
predictions = tf.argmax(input=logits, axis=1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
def testLogitsNotSqueezed(self):
num_classes = 25
images = tf.random.uniform([1, 224, 224, 3])
logits, _ = inception.inception_v2(images,
num_classes=num_classes,
spatial_squeeze=False)
with self.test_session() as sess:
tf.global_variables_initializer().run()
logits_out = sess.run(logits)
self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
def testNoBatchNormScaleByDefault(self):
height, width = 224, 224
num_classes = 1000
inputs = tf.placeholder(tf.float32, (1, height, width, 3))
with slim.arg_scope(inception.inception_v2_arg_scope()):
inception.inception_v2(inputs, num_classes, is_training=False)
self.assertEqual(tf.global_variables('.*/BatchNorm/gamma:0$'), [])
def testBatchNormScale(self):
height, width = 224, 224
num_classes = 1000
inputs = tf.placeholder(tf.float32, (1, height, width, 3))
with slim.arg_scope(
inception.inception_v2_arg_scope(batch_norm_scale=True)):
inception.inception_v2(inputs, num_classes, is_training=False)
gamma_names = set(
v.op.name
for v in tf.global_variables('.*/BatchNorm/gamma:0$'))
self.assertGreater(len(gamma_names), 0)
for v in tf.global_variables('.*/BatchNorm/moving_mean:0$'):
self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names)
if __name__ == '__main__':
tf.test.main()
| 16,902 | 40.126521 | 80 | py |
models | models-master/research/slim/nets/overfeat.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the model definition for the OverFeat network.
The definition for the network was obtained from:
OverFeat: Integrated Recognition, Localization and Detection using
Convolutional Networks
Pierre Sermanet, David Eigen, Xiang Zhang, Michael Mathieu, Rob Fergus and
Yann LeCun, 2014
http://arxiv.org/abs/1312.6229
Usage:
with slim.arg_scope(overfeat.overfeat_arg_scope()):
outputs, end_points = overfeat.overfeat(inputs)
@@overfeat
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
# pylint: disable=g-long-lambda
trunc_normal = lambda stddev: tf.truncated_normal_initializer(
0.0, stddev)
def overfeat_arg_scope(weight_decay=0.0005):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(weight_decay),
biases_initializer=tf.zeros_initializer()):
with slim.arg_scope([slim.conv2d], padding='SAME'):
with slim.arg_scope([slim.max_pool2d], padding='VALID') as arg_sc:
return arg_sc
def overfeat(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='overfeat',
global_pool=False):
"""Contains the model definition for the OverFeat network.
The definition for the network was obtained from:
OverFeat: Integrated Recognition, Localization and Detection using
Convolutional Networks
Pierre Sermanet, David Eigen, Xiang Zhang, Michael Mathieu, Rob Fergus and
Yann LeCun, 2014
http://arxiv.org/abs/1312.6229
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 231x231. To use in fully
convolutional mode, set spatial_squeeze to false.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer is
omitted and the input features to the logits layer are returned instead.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
global_pool: Optional boolean flag. If True, the input to the classification
layer is avgpooled to size 1x1, for any input size. (This is not part
of the original OverFeat.)
Returns:
net: the output of the logits layer (if num_classes is a non-zero integer),
or the non-dropped-out input to the logits layer (if num_classes is 0 or
None).
end_points: a dict of tensors with intermediate activations.
"""
with tf.variable_scope(scope, 'overfeat', [inputs]) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
outputs_collections=end_points_collection):
net = slim.conv2d(inputs, 64, [11, 11], 4, padding='VALID',
scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.conv2d(net, 256, [5, 5], padding='VALID', scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.conv2d(net, 512, [3, 3], scope='conv3')
net = slim.conv2d(net, 1024, [3, 3], scope='conv4')
net = slim.conv2d(net, 1024, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
# Use conv2d instead of fully_connected layers.
with slim.arg_scope(
[slim.conv2d],
weights_initializer=trunc_normal(0.005),
biases_initializer=tf.constant_initializer(0.1)):
net = slim.conv2d(net, 3072, [6, 6], padding='VALID', scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout6')
net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
# Convert end_points_collection into a end_point dict.
end_points = slim.utils.convert_collection_to_dict(
end_points_collection)
if global_pool:
net = tf.reduce_mean(
input_tensor=net, axis=[1, 2], keepdims=True, name='global_pool')
end_points['global_pool'] = net
if num_classes:
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout7')
net = slim.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
biases_initializer=tf.zeros_initializer(),
scope='fc8')
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points
overfeat.default_image_size = 231
| 5,965 | 42.231884 | 80 | py |
models | models-master/research/slim/nets/inception_v1.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition for inception v1 classification network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets import inception_utils
# pylint: disable=g-long-lambda
trunc_normal = lambda stddev: tf.truncated_normal_initializer(
0.0, stddev)
def inception_v1_base(inputs,
final_endpoint='Mixed_5c',
include_root_block=True,
scope='InceptionV1'):
"""Defines the Inception V1 base architecture.
This architecture is defined in:
Going deeper with convolutions
Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,
Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich.
http://arxiv.org/pdf/1409.4842v1.pdf.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
final_endpoint: specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c',
'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e',
'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b', 'Mixed_5c']. If
include_root_block is False, ['Conv2d_1a_7x7', 'MaxPool_2a_3x3',
'Conv2d_2b_1x1', 'Conv2d_2c_3x3', 'MaxPool_3a_3x3'] will not be available.
include_root_block: If True, include the convolution and max-pooling layers
before the inception modules. If False, excludes those layers.
scope: Optional variable_scope.
Returns:
A dictionary from components of the network to the corresponding activation.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values.
"""
end_points = {}
with tf.variable_scope(scope, 'InceptionV1', [inputs]):
with slim.arg_scope(
[slim.conv2d, slim.fully_connected],
weights_initializer=trunc_normal(0.01)):
with slim.arg_scope([slim.conv2d, slim.max_pool2d],
stride=1, padding='SAME'):
net = inputs
if include_root_block:
end_point = 'Conv2d_1a_7x7'
net = slim.conv2d(inputs, 64, [7, 7], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'MaxPool_2a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'Conv2d_2b_1x1'
net = slim.conv2d(net, 64, [1, 1], scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'Conv2d_2c_3x3'
net = slim.conv2d(net, 192, [3, 3], scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'MaxPool_3a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'Mixed_3b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 96, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 128, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 16, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 32, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 32, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_3c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 192, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'MaxPool_4a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 96, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 208, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 16, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 48, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 112, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 224, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 24, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4d'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 256, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 24, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4e'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 112, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 144, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 288, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4f'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 256, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 320, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'MaxPool_5a_2x2'
net = slim.max_pool2d(net, [2, 2], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_5b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 256, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 320, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0a_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_5c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 384, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 384, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 48, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def inception_v1(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.8,
prediction_fn=slim.softmax,
spatial_squeeze=True,
reuse=None,
scope='InceptionV1',
global_pool=False):
"""Defines the Inception V1 architecture.
This architecture is defined in:
Going deeper with convolutions
Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,
Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich.
http://arxiv.org/pdf/1409.4842v1.pdf.
The default image size used to train this network is 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer
is omitted and the input features to the logits layer (before dropout)
are returned instead.
is_training: whether is training or not.
dropout_keep_prob: the percentage of activation values that are retained.
prediction_fn: a function to get predictions out of logits.
spatial_squeeze: if True, logits is of shape [B, C], if false logits is of
shape [B, 1, 1, C], where B is batch_size and C is number of classes.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
global_pool: Optional boolean flag to control the avgpooling before the
logits layer. If false or unset, pooling is done with a fixed window
that reduces default-sized inputs to 1x1, while larger inputs lead to
larger outputs. If true, any input size is pooled down to 1x1.
Returns:
net: a Tensor with the logits (pre-softmax activations) if num_classes
is a non-zero integer, or the non-dropped-out input to the logits layer
if num_classes is 0 or None.
end_points: a dictionary from components of the network to the corresponding
activation.
"""
# Final pooling and prediction
with tf.variable_scope(
scope, 'InceptionV1', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = inception_v1_base(inputs, scope=scope)
with tf.variable_scope('Logits'):
if global_pool:
# Global average pooling.
net = tf.reduce_mean(
input_tensor=net, axis=[1, 2], keepdims=True, name='global_pool')
end_points['global_pool'] = net
else:
# Pooling with a fixed kernel size.
net = slim.avg_pool2d(net, [7, 7], stride=1, scope='AvgPool_0a_7x7')
end_points['AvgPool_0a_7x7'] = net
if not num_classes:
return net, end_points
net = slim.dropout(net, dropout_keep_prob, scope='Dropout_0b')
logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='Conv2d_0c_1x1')
if spatial_squeeze:
logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
inception_v1.default_image_size = 224
inception_v1_arg_scope = inception_utils.inception_arg_scope
| 16,875 | 47.774566 | 80 | py |
models | models-master/research/slim/nets/inception_v2.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition for inception v2 classification network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets import inception_utils
# pylint: disable=g-long-lambda
trunc_normal = lambda stddev: tf.truncated_normal_initializer(
0.0, stddev)
def inception_v2_base(inputs,
final_endpoint='Mixed_5c',
min_depth=16,
depth_multiplier=1.0,
use_separable_conv=True,
data_format='NHWC',
include_root_block=True,
scope=None):
"""Inception v2 (6a2).
Constructs an Inception v2 network from inputs to the given final endpoint.
This method can construct the network up to the layer inception(5b) as
described in http://arxiv.org/abs/1502.03167.
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
final_endpoint: specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c', 'Mixed_4a',
'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_5a', 'Mixed_5b',
'Mixed_5c']. If include_root_block is False, ['Conv2d_1a_7x7',
'MaxPool_2a_3x3', 'Conv2d_2b_1x1', 'Conv2d_2c_3x3', 'MaxPool_3a_3x3'] will
not be available.
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
use_separable_conv: Use a separable convolution for the first layer
Conv2d_1a_7x7. If this is False, use a normal convolution instead.
data_format: Data format of the activations ('NHWC' or 'NCHW').
include_root_block: If True, include the convolution and max-pooling layers
before the inception modules. If False, excludes those layers.
scope: Optional variable_scope.
Returns:
tensor_out: output tensor corresponding to the final_endpoint.
end_points: a set of activations for external use, for example summaries or
losses.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values,
or depth_multiplier <= 0
"""
# end_points will collect relevant activations for external use, for example
# summaries or losses.
end_points = {}
# Used to find thinned depths for each layer.
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
depth = lambda d: max(int(d * depth_multiplier), min_depth)
if data_format != 'NHWC' and data_format != 'NCHW':
raise ValueError('data_format must be either NHWC or NCHW.')
if data_format == 'NCHW' and use_separable_conv:
raise ValueError(
'separable convolution only supports NHWC layout. NCHW data format can'
' only be used when use_separable_conv is False.'
)
concat_dim = 3 if data_format == 'NHWC' else 1
with tf.variable_scope(scope, 'InceptionV2', [inputs]):
with slim.arg_scope(
[slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1,
padding='SAME',
data_format=data_format):
net = inputs
if include_root_block:
# Note that sizes in the comments below assume an input spatial size of
# 224x224, however, the inputs can be of any size greater 32x32.
# 224 x 224 x 3
end_point = 'Conv2d_1a_7x7'
if use_separable_conv:
# depthwise_multiplier here is different from depth_multiplier.
# depthwise_multiplier determines the output channels of the initial
# depthwise conv (see docs for tf.nn.separable_conv2d), while
# depth_multiplier controls the # channels of the subsequent 1x1
# convolution. Must have
# in_channels * depthwise_multipler <= out_channels
# so that the separable convolution is not overparameterized.
depthwise_multiplier = min(int(depth(64) / 3), 8)
net = slim.separable_conv2d(
inputs,
depth(64), [7, 7],
depth_multiplier=depthwise_multiplier,
stride=2,
padding='SAME',
weights_initializer=trunc_normal(1.0),
scope=end_point)
else:
# Use a normal convolution instead of a separable convolution.
net = slim.conv2d(
inputs,
depth(64), [7, 7],
stride=2,
weights_initializer=trunc_normal(1.0),
scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 112 x 112 x 64
end_point = 'MaxPool_2a_3x3'
net = slim.max_pool2d(net, [3, 3], scope=end_point, stride=2)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 56 x 56 x 64
end_point = 'Conv2d_2b_1x1'
net = slim.conv2d(
net,
depth(64), [1, 1],
scope=end_point,
weights_initializer=trunc_normal(0.1))
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 56 x 56 x 64
end_point = 'Conv2d_2c_3x3'
net = slim.conv2d(net, depth(192), [3, 3], scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 56 x 56 x 192
end_point = 'MaxPool_3a_3x3'
net = slim.max_pool2d(net, [3, 3], scope=end_point, stride=2)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 28 x 28 x 192
# Inception module.
end_point = 'Mixed_3b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(64), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(32), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = tf.concat(
axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 28 x 28 x 256
end_point = 'Mixed_3c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(96), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(64), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = tf.concat(
axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 28 x 28 x 320
end_point = 'Mixed_4a'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(
net, depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, depth(160), [3, 3], stride=2,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(
branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d(
branch_1, depth(96), [3, 3], stride=2, scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(
net, [3, 3], stride=2, scope='MaxPool_1a_3x3')
net = tf.concat(axis=concat_dim, values=[branch_0, branch_1, branch_2])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_4b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(224), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(
branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(96), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(128), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(128), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(128), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = tf.concat(
axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_4c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(96), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(128), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(96), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(128), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(128), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(128), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = tf.concat(
axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_4d'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(160), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(160), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(160), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(96), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = tf.concat(
axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_4e'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(96), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(192), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(160), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(192), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(192), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(96), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = tf.concat(
axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_5a'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(
net, depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, depth(192), [3, 3], stride=2,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(256), [3, 3],
scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d(branch_1, depth(256), [3, 3], stride=2,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(net, [3, 3], stride=2,
scope='MaxPool_1a_3x3')
net = tf.concat(
axis=concat_dim, values=[branch_0, branch_1, branch_2])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 7 x 7 x 1024
end_point = 'Mixed_5b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(352), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(320), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(160), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(128), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = tf.concat(
axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 7 x 7 x 1024
end_point = 'Mixed_5c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(352), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(320), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(128), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = tf.concat(
axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def inception_v2(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.8,
min_depth=16,
depth_multiplier=1.0,
prediction_fn=slim.softmax,
spatial_squeeze=True,
reuse=None,
scope='InceptionV2',
global_pool=False):
"""Inception v2 model for classification.
Constructs an Inception v2 network for classification as described in
http://arxiv.org/abs/1502.03167.
The default image size used to train this network is 224x224.
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer
is omitted and the input features to the logits layer (before dropout)
are returned instead.
is_training: whether is training or not.
dropout_keep_prob: the percentage of activation values that are retained.
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
prediction_fn: a function to get predictions out of logits.
spatial_squeeze: if True, logits is of shape [B, C], if false logits is of
shape [B, 1, 1, C], where B is batch_size and C is number of classes.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
global_pool: Optional boolean flag to control the avgpooling before the
logits layer. If false or unset, pooling is done with a fixed window
that reduces default-sized inputs to 1x1, while larger inputs lead to
larger outputs. If true, any input size is pooled down to 1x1.
Returns:
net: a Tensor with the logits (pre-softmax activations) if num_classes
is a non-zero integer, or the non-dropped-out input to the logits layer
if num_classes is 0 or None.
end_points: a dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values,
or depth_multiplier <= 0
"""
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
# Final pooling and prediction
with tf.variable_scope(
scope, 'InceptionV2', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = inception_v2_base(
inputs, scope=scope, min_depth=min_depth,
depth_multiplier=depth_multiplier)
with tf.variable_scope('Logits'):
if global_pool:
# Global average pooling.
net = tf.reduce_mean(
input_tensor=net, axis=[1, 2], keepdims=True, name='global_pool')
end_points['global_pool'] = net
else:
# Pooling with a fixed kernel size.
kernel_size = _reduced_kernel_size_for_small_input(net, [7, 7])
net = slim.avg_pool2d(net, kernel_size, padding='VALID',
scope='AvgPool_1a_{}x{}'.format(*kernel_size))
end_points['AvgPool_1a'] = net
if not num_classes:
return net, end_points
# 1 x 1 x 1024
net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
end_points['PreLogits'] = net
logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='Conv2d_1c_1x1')
if spatial_squeeze:
logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
inception_v2.default_image_size = 224
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
"""Define kernel size which is automatically reduced for small input.
If the shape of the input images is unknown at graph construction time this
function assumes that the input images are is large enough.
Args:
input_tensor: input tensor of size [batch_size, height, width, channels].
kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]
Returns:
a tensor with the kernel size.
TODO(jrru): Make this function work with unknown shapes. Theoretically, this
can be done with the code below. Problems are two-fold: (1) If the shape was
known, it will be lost. (2) inception.slim.ops._two_element_tuple cannot
handle tensors that define the kernel size.
shape = tf.shape(input_tensor)
return = tf.stack([tf.minimum(shape[1], kernel_size[0]),
tf.minimum(shape[2], kernel_size[1])])
"""
shape = input_tensor.get_shape().as_list()
if shape[1] is None or shape[2] is None:
kernel_size_out = kernel_size
else:
kernel_size_out = [min(shape[1], kernel_size[0]),
min(shape[2], kernel_size[1])]
return kernel_size_out
inception_v2_arg_scope = inception_utils.inception_arg_scope
| 26,623 | 43.746218 | 80 | py |
models | models-master/research/slim/nets/resnet_utils.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains building blocks for various versions of Residual Networks.
Residual networks (ResNets) were proposed in:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385, 2015
More variants were introduced in:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027, 2016
We can obtain different ResNet variants by changing the network depth, width,
and form of residual unit. This module implements the infrastructure for
building them. Concrete ResNet units and full ResNet networks are implemented in
the accompanying resnet_v1.py and resnet_v2.py modules.
Compared to https://github.com/KaimingHe/deep-residual-networks, in the current
implementation we subsample the output activations in the last residual unit of
each block, instead of subsampling the input activations in the first residual
unit of each block. The two implementations give identical results but our
implementation is more memory efficient.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow.compat.v1 as tf
import tf_slim as slim
class Block(collections.namedtuple('Block', ['scope', 'unit_fn', 'args'])):
"""A named tuple describing a ResNet block.
Its parts are:
scope: The scope of the `Block`.
unit_fn: The ResNet unit function which takes as input a `Tensor` and
returns another `Tensor` with the output of the ResNet unit.
args: A list of length equal to the number of units in the `Block`. The list
contains one (depth, depth_bottleneck, stride) tuple for each unit in the
block to serve as argument to unit_fn.
"""
def subsample(inputs, factor, scope=None):
"""Subsamples the input along the spatial dimensions.
Args:
inputs: A `Tensor` of size [batch, height_in, width_in, channels].
factor: The subsampling factor.
scope: Optional variable_scope.
Returns:
output: A `Tensor` of size [batch, height_out, width_out, channels] with the
input, either intact (if factor == 1) or subsampled (if factor > 1).
"""
if factor == 1:
return inputs
else:
return slim.max_pool2d(inputs, [1, 1], stride=factor, scope=scope)
def conv2d_same(inputs, num_outputs, kernel_size, stride, rate=1, scope=None):
"""Strided 2-D convolution with 'SAME' padding.
When stride > 1, then we do explicit zero-padding, followed by conv2d with
'VALID' padding.
Note that
net = conv2d_same(inputs, num_outputs, 3, stride=stride)
is equivalent to
net = slim.conv2d(inputs, num_outputs, 3, stride=1, padding='SAME')
net = subsample(net, factor=stride)
whereas
net = slim.conv2d(inputs, num_outputs, 3, stride=stride, padding='SAME')
is different when the input's height or width is even, which is why we add the
current function. For more details, see ResnetUtilsTest.testConv2DSameEven().
Args:
inputs: A 4-D tensor of size [batch, height_in, width_in, channels].
num_outputs: An integer, the number of output filters.
kernel_size: An int with the kernel_size of the filters.
stride: An integer, the output stride.
rate: An integer, rate for atrous convolution.
scope: Scope.
Returns:
output: A 4-D tensor of size [batch, height_out, width_out, channels] with
the convolution output.
"""
if stride == 1:
return slim.conv2d(inputs, num_outputs, kernel_size, stride=1, rate=rate,
padding='SAME', scope=scope)
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
inputs = tf.pad(
tensor=inputs,
paddings=[[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return slim.conv2d(inputs, num_outputs, kernel_size, stride=stride,
rate=rate, padding='VALID', scope=scope)
@slim.add_arg_scope
def stack_blocks_dense(net, blocks, output_stride=None,
store_non_strided_activations=False,
outputs_collections=None):
"""Stacks ResNet `Blocks` and controls output feature density.
First, this function creates scopes for the ResNet in the form of
'block_name/unit_1', 'block_name/unit_2', etc.
Second, this function allows the user to explicitly control the ResNet
output_stride, which is the ratio of the input to output spatial resolution.
This is useful for dense prediction tasks such as semantic segmentation or
object detection.
Most ResNets consist of 4 ResNet blocks and subsample the activations by a
factor of 2 when transitioning between consecutive ResNet blocks. This results
to a nominal ResNet output_stride equal to 8. If we set the output_stride to
half the nominal network stride (e.g., output_stride=4), then we compute
responses twice.
Control of the output feature density is implemented by atrous convolution.
Args:
net: A `Tensor` of size [batch, height, width, channels].
blocks: A list of length equal to the number of ResNet `Blocks`. Each
element is a ResNet `Block` object describing the units in the `Block`.
output_stride: If `None`, then the output will be computed at the nominal
network stride. If output_stride is not `None`, it specifies the requested
ratio of input to output spatial resolution, which needs to be equal to
the product of unit strides from the start up to some level of the ResNet.
For example, if the ResNet employs units with strides 1, 2, 1, 3, 4, 1,
then valid values for the output_stride are 1, 2, 6, 24 or None (which
is equivalent to output_stride=24).
store_non_strided_activations: If True, we compute non-strided (undecimated)
activations at the last unit of each block and store them in the
`outputs_collections` before subsampling them. This gives us access to
higher resolution intermediate activations which are useful in some
dense prediction problems but increases 4x the computation and memory cost
at the last unit of each block.
outputs_collections: Collection to add the ResNet block outputs.
Returns:
net: Output tensor with stride equal to the specified output_stride.
Raises:
ValueError: If the target output_stride is not valid.
"""
# The current_stride variable keeps track of the effective stride of the
# activations. This allows us to invoke atrous convolution whenever applying
# the next residual unit would result in the activations having stride larger
# than the target output_stride.
current_stride = 1
# The atrous convolution rate parameter.
rate = 1
for block in blocks:
with tf.variable_scope(block.scope, 'block', [net]) as sc:
block_stride = 1
for i, unit in enumerate(block.args):
if store_non_strided_activations and i == len(block.args) - 1:
# Move stride from the block's last unit to the end of the block.
block_stride = unit.get('stride', 1)
unit = dict(unit, stride=1)
with tf.variable_scope('unit_%d' % (i + 1), values=[net]):
# If we have reached the target output_stride, then we need to employ
# atrous convolution with stride=1 and multiply the atrous rate by the
# current unit's stride for use in subsequent layers.
if output_stride is not None and current_stride == output_stride:
net = block.unit_fn(net, rate=rate, **dict(unit, stride=1))
rate *= unit.get('stride', 1)
else:
net = block.unit_fn(net, rate=1, **unit)
current_stride *= unit.get('stride', 1)
if output_stride is not None and current_stride > output_stride:
raise ValueError('The target output_stride cannot be reached.')
# Collect activations at the block's end before performing subsampling.
net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net)
# Subsampling of the block's output activations.
if output_stride is not None and current_stride == output_stride:
rate *= block_stride
else:
net = subsample(net, block_stride)
current_stride *= block_stride
if output_stride is not None and current_stride > output_stride:
raise ValueError('The target output_stride cannot be reached.')
if output_stride is not None and current_stride != output_stride:
raise ValueError('The target output_stride cannot be reached.')
return net
def resnet_arg_scope(
weight_decay=0.0001,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True,
activation_fn=tf.nn.relu,
use_batch_norm=True,
batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS):
"""Defines the default ResNet arg scope.
TODO(gpapan): The batch-normalization related default values above are
appropriate for use in conjunction with the reference ResNet models
released at https://github.com/KaimingHe/deep-residual-networks. When
training ResNets from scratch, they might need to be tuned.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: The moving average decay when estimating layer activation
statistics in batch normalization.
batch_norm_epsilon: Small constant to prevent division by zero when
normalizing activations by their variance in batch normalization.
batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
activations in the batch normalization layer.
activation_fn: The activation function which is used in ResNet.
use_batch_norm: Whether or not to use batch normalization.
batch_norm_updates_collections: Collection for the update ops for
batch norm.
Returns:
An `arg_scope` to use for the resnet models.
"""
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'updates_collections': batch_norm_updates_collections,
'fused': None, # Use fused batch norm if possible.
}
with slim.arg_scope(
[slim.conv2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=activation_fn,
normalizer_fn=slim.batch_norm if use_batch_norm else None,
normalizer_params=batch_norm_params):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
# The following implies padding='SAME' for pool1, which makes feature
# alignment easier for dense prediction tasks. This is also used in
# https://github.com/facebook/fb.resnet.torch. However the accompanying
# code of 'Deep Residual Learning for Image Recognition' uses
# padding='VALID' for pool1. You can switch to that choice by setting
# slim.arg_scope([slim.max_pool2d], padding='VALID').
with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
return arg_sc
| 11,826 | 41.696751 | 80 | py |
models | models-master/research/slim/nets/alexnet.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains a model definition for AlexNet.
This work was first described in:
ImageNet Classification with Deep Convolutional Neural Networks
Alex Krizhevsky, Ilya Sutskever and Geoffrey E. Hinton
and later refined in:
One weird trick for parallelizing convolutional neural networks
Alex Krizhevsky, 2014
Here we provide the implementation proposed in "One weird trick" and not
"ImageNet Classification", as per the paper, the LRN layers have been removed.
Usage:
with slim.arg_scope(alexnet.alexnet_v2_arg_scope()):
outputs, end_points = alexnet.alexnet_v2(inputs)
@@alexnet_v2
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
# pylint: disable=g-long-lambda
trunc_normal = lambda stddev: tf.truncated_normal_initializer(
0.0, stddev)
def alexnet_v2_arg_scope(weight_decay=0.0005):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
biases_initializer=tf.constant_initializer(0.1),
weights_regularizer=slim.l2_regularizer(weight_decay)):
with slim.arg_scope([slim.conv2d], padding='SAME'):
with slim.arg_scope([slim.max_pool2d], padding='VALID') as arg_sc:
return arg_sc
def alexnet_v2(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='alexnet_v2',
global_pool=False):
"""AlexNet version 2.
Described in: http://arxiv.org/pdf/1404.5997v2.pdf
Parameters from:
github.com/akrizhevsky/cuda-convnet2/blob/master/layers/
layers-imagenet-1gpu.cfg
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224 or set
global_pool=True. To use in fully convolutional mode, set
spatial_squeeze to false.
The LRN layers have been removed and change the initializers from
random_normal_initializer to xavier_initializer.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: the number of predicted classes. If 0 or None, the logits layer
is omitted and the input features to the logits layer are returned instead.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
logits. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
global_pool: Optional boolean flag. If True, the input to the classification
layer is avgpooled to size 1x1, for any input size. (This is not part
of the original AlexNet.)
Returns:
net: the output of the logits layer (if num_classes is a non-zero integer),
or the non-dropped-out input to the logits layer (if num_classes is 0
or None).
end_points: a dict of tensors with intermediate activations.
"""
with tf.variable_scope(scope, 'alexnet_v2', [inputs]) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
outputs_collections=[end_points_collection]):
net = slim.conv2d(inputs, 64, [11, 11], 4, padding='VALID',
scope='conv1')
net = slim.max_pool2d(net, [3, 3], 2, scope='pool1')
net = slim.conv2d(net, 192, [5, 5], scope='conv2')
net = slim.max_pool2d(net, [3, 3], 2, scope='pool2')
net = slim.conv2d(net, 384, [3, 3], scope='conv3')
net = slim.conv2d(net, 384, [3, 3], scope='conv4')
net = slim.conv2d(net, 256, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [3, 3], 2, scope='pool5')
# Use conv2d instead of fully_connected layers.
with slim.arg_scope(
[slim.conv2d],
weights_initializer=trunc_normal(0.005),
biases_initializer=tf.constant_initializer(0.1)):
net = slim.conv2d(net, 4096, [5, 5], padding='VALID',
scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout6')
net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
# Convert end_points_collection into a end_point dict.
end_points = slim.utils.convert_collection_to_dict(
end_points_collection)
if global_pool:
net = tf.reduce_mean(
input_tensor=net, axis=[1, 2], keepdims=True, name='global_pool')
end_points['global_pool'] = net
if num_classes:
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout7')
net = slim.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
biases_initializer=tf.zeros_initializer(),
scope='fc8')
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points
alexnet_v2.default_image_size = 224
| 6,151 | 41.427586 | 80 | py |
models | models-master/research/slim/nets/mobilenet_v1.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""MobileNet v1.
MobileNet is a general architecture and can be used for multiple use cases.
Depending on the use case, it can use different input layer size and different
head (for example: embeddings, localization and classification).
As described in https://arxiv.org/abs/1704.04861.
MobileNets: Efficient Convolutional Neural Networks for
Mobile Vision Applications
Andrew G. Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang,
Tobias Weyand, Marco Andreetto, Hartwig Adam
100% Mobilenet V1 (base) with input size 224x224:
See mobilenet_v1()
Layer params macs
--------------------------------------------------------------------------------
MobilenetV1/Conv2d_0/Conv2D: 864 10,838,016
MobilenetV1/Conv2d_1_depthwise/depthwise: 288 3,612,672
MobilenetV1/Conv2d_1_pointwise/Conv2D: 2,048 25,690,112
MobilenetV1/Conv2d_2_depthwise/depthwise: 576 1,806,336
MobilenetV1/Conv2d_2_pointwise/Conv2D: 8,192 25,690,112
MobilenetV1/Conv2d_3_depthwise/depthwise: 1,152 3,612,672
MobilenetV1/Conv2d_3_pointwise/Conv2D: 16,384 51,380,224
MobilenetV1/Conv2d_4_depthwise/depthwise: 1,152 903,168
MobilenetV1/Conv2d_4_pointwise/Conv2D: 32,768 25,690,112
MobilenetV1/Conv2d_5_depthwise/depthwise: 2,304 1,806,336
MobilenetV1/Conv2d_5_pointwise/Conv2D: 65,536 51,380,224
MobilenetV1/Conv2d_6_depthwise/depthwise: 2,304 451,584
MobilenetV1/Conv2d_6_pointwise/Conv2D: 131,072 25,690,112
MobilenetV1/Conv2d_7_depthwise/depthwise: 4,608 903,168
MobilenetV1/Conv2d_7_pointwise/Conv2D: 262,144 51,380,224
MobilenetV1/Conv2d_8_depthwise/depthwise: 4,608 903,168
MobilenetV1/Conv2d_8_pointwise/Conv2D: 262,144 51,380,224
MobilenetV1/Conv2d_9_depthwise/depthwise: 4,608 903,168
MobilenetV1/Conv2d_9_pointwise/Conv2D: 262,144 51,380,224
MobilenetV1/Conv2d_10_depthwise/depthwise: 4,608 903,168
MobilenetV1/Conv2d_10_pointwise/Conv2D: 262,144 51,380,224
MobilenetV1/Conv2d_11_depthwise/depthwise: 4,608 903,168
MobilenetV1/Conv2d_11_pointwise/Conv2D: 262,144 51,380,224
MobilenetV1/Conv2d_12_depthwise/depthwise: 4,608 225,792
MobilenetV1/Conv2d_12_pointwise/Conv2D: 524,288 25,690,112
MobilenetV1/Conv2d_13_depthwise/depthwise: 9,216 451,584
MobilenetV1/Conv2d_13_pointwise/Conv2D: 1,048,576 51,380,224
--------------------------------------------------------------------------------
Total: 3,185,088 567,716,352
75% Mobilenet V1 (base) with input size 128x128:
See mobilenet_v1_075()
Layer params macs
--------------------------------------------------------------------------------
MobilenetV1/Conv2d_0/Conv2D: 648 2,654,208
MobilenetV1/Conv2d_1_depthwise/depthwise: 216 884,736
MobilenetV1/Conv2d_1_pointwise/Conv2D: 1,152 4,718,592
MobilenetV1/Conv2d_2_depthwise/depthwise: 432 442,368
MobilenetV1/Conv2d_2_pointwise/Conv2D: 4,608 4,718,592
MobilenetV1/Conv2d_3_depthwise/depthwise: 864 884,736
MobilenetV1/Conv2d_3_pointwise/Conv2D: 9,216 9,437,184
MobilenetV1/Conv2d_4_depthwise/depthwise: 864 221,184
MobilenetV1/Conv2d_4_pointwise/Conv2D: 18,432 4,718,592
MobilenetV1/Conv2d_5_depthwise/depthwise: 1,728 442,368
MobilenetV1/Conv2d_5_pointwise/Conv2D: 36,864 9,437,184
MobilenetV1/Conv2d_6_depthwise/depthwise: 1,728 110,592
MobilenetV1/Conv2d_6_pointwise/Conv2D: 73,728 4,718,592
MobilenetV1/Conv2d_7_depthwise/depthwise: 3,456 221,184
MobilenetV1/Conv2d_7_pointwise/Conv2D: 147,456 9,437,184
MobilenetV1/Conv2d_8_depthwise/depthwise: 3,456 221,184
MobilenetV1/Conv2d_8_pointwise/Conv2D: 147,456 9,437,184
MobilenetV1/Conv2d_9_depthwise/depthwise: 3,456 221,184
MobilenetV1/Conv2d_9_pointwise/Conv2D: 147,456 9,437,184
MobilenetV1/Conv2d_10_depthwise/depthwise: 3,456 221,184
MobilenetV1/Conv2d_10_pointwise/Conv2D: 147,456 9,437,184
MobilenetV1/Conv2d_11_depthwise/depthwise: 3,456 221,184
MobilenetV1/Conv2d_11_pointwise/Conv2D: 147,456 9,437,184
MobilenetV1/Conv2d_12_depthwise/depthwise: 3,456 55,296
MobilenetV1/Conv2d_12_pointwise/Conv2D: 294,912 4,718,592
MobilenetV1/Conv2d_13_depthwise/depthwise: 6,912 110,592
MobilenetV1/Conv2d_13_pointwise/Conv2D: 589,824 9,437,184
--------------------------------------------------------------------------------
Total: 1,800,144 106,002,432
"""
# Tensorflow mandates these.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import functools
import tensorflow.compat.v1 as tf
import tf_slim as slim
# Conv and DepthSepConv namedtuple define layers of the MobileNet architecture
# Conv defines 3x3 convolution layers
# DepthSepConv defines 3x3 depthwise convolution followed by 1x1 convolution.
# stride is the stride of the convolution
# depth is the number of channels or filters in a layer
Conv = namedtuple('Conv', ['kernel', 'stride', 'depth'])
DepthSepConv = namedtuple('DepthSepConv', ['kernel', 'stride', 'depth'])
# MOBILENETV1_CONV_DEFS specifies the MobileNet body
MOBILENETV1_CONV_DEFS = [
Conv(kernel=[3, 3], stride=2, depth=32),
DepthSepConv(kernel=[3, 3], stride=1, depth=64),
DepthSepConv(kernel=[3, 3], stride=2, depth=128),
DepthSepConv(kernel=[3, 3], stride=1, depth=128),
DepthSepConv(kernel=[3, 3], stride=2, depth=256),
DepthSepConv(kernel=[3, 3], stride=1, depth=256),
DepthSepConv(kernel=[3, 3], stride=2, depth=512),
DepthSepConv(kernel=[3, 3], stride=1, depth=512),
DepthSepConv(kernel=[3, 3], stride=1, depth=512),
DepthSepConv(kernel=[3, 3], stride=1, depth=512),
DepthSepConv(kernel=[3, 3], stride=1, depth=512),
DepthSepConv(kernel=[3, 3], stride=1, depth=512),
DepthSepConv(kernel=[3, 3], stride=2, depth=1024),
DepthSepConv(kernel=[3, 3], stride=1, depth=1024)
]
def _fixed_padding(inputs, kernel_size, rate=1):
"""Pads the input along the spatial dimensions independently of input size.
Pads the input such that if it was used in a convolution with 'VALID' padding,
the output would have the same dimensions as if the unpadded input was used
in a convolution with 'SAME' padding.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
rate: An integer, rate for atrous convolution.
Returns:
output: A tensor of size [batch, height_out, width_out, channels] with the
input, either intact (if kernel_size == 1) or padded (if kernel_size > 1).
"""
kernel_size_effective = [kernel_size[0] + (kernel_size[0] - 1) * (rate - 1),
kernel_size[1] + (kernel_size[1] - 1) * (rate - 1)]
pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1]
pad_beg = [pad_total[0] // 2, pad_total[1] // 2]
pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]]
padded_inputs = tf.pad(
tensor=inputs,
paddings=[[0, 0], [pad_beg[0], pad_end[0]], [pad_beg[1], pad_end[1]],
[0, 0]])
return padded_inputs
def mobilenet_v1_base(inputs,
final_endpoint='Conv2d_13_pointwise',
min_depth=8,
depth_multiplier=1.0,
conv_defs=None,
output_stride=None,
use_explicit_padding=False,
scope=None):
"""Mobilenet v1.
Constructs a Mobilenet v1 network from inputs to the given final endpoint.
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
final_endpoint: specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_0', 'Conv2d_1_pointwise', 'Conv2d_2_pointwise',
'Conv2d_3_pointwise', 'Conv2d_4_pointwise', 'Conv2d_5'_pointwise,
'Conv2d_6_pointwise', 'Conv2d_7_pointwise', 'Conv2d_8_pointwise',
'Conv2d_9_pointwise', 'Conv2d_10_pointwise', 'Conv2d_11_pointwise',
'Conv2d_12_pointwise', 'Conv2d_13_pointwise'].
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
conv_defs: A list of ConvDef namedtuples specifying the net architecture.
output_stride: An integer that specifies the requested ratio of input to
output spatial resolution. If not None, then we invoke atrous convolution
if necessary to prevent the network from reducing the spatial resolution
of the activation maps. Allowed values are 8 (accurate fully convolutional
mode), 16 (fast fully convolutional mode), 32 (classification mode).
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
scope: Optional variable_scope.
Returns:
tensor_out: output tensor corresponding to the final_endpoint.
end_points: a set of activations for external use, for example summaries or
losses.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values,
or depth_multiplier <= 0, or the target output_stride is not
allowed.
"""
depth = lambda d: max(int(d * depth_multiplier), min_depth)
end_points = {}
# Used to find thinned depths for each layer.
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
if conv_defs is None:
conv_defs = MOBILENETV1_CONV_DEFS
if output_stride is not None and output_stride not in [8, 16, 32]:
raise ValueError('Only allowed output_stride values are 8, 16, 32.')
padding = 'SAME'
if use_explicit_padding:
padding = 'VALID'
with tf.variable_scope(scope, 'MobilenetV1', [inputs]):
with slim.arg_scope([slim.conv2d, slim.separable_conv2d], padding=padding):
# The current_stride variable keeps track of the output stride of the
# activations, i.e., the running product of convolution strides up to the
# current network layer. This allows us to invoke atrous convolution
# whenever applying the next convolution would result in the activations
# having output stride larger than the target output_stride.
current_stride = 1
# The atrous convolution rate parameter.
rate = 1
net = inputs
for i, conv_def in enumerate(conv_defs):
end_point_base = 'Conv2d_%d' % i
if output_stride is not None and current_stride == output_stride:
# If we have reached the target output_stride, then we need to employ
# atrous convolution with stride=1 and multiply the atrous rate by the
# current unit's stride for use in subsequent layers.
layer_stride = 1
layer_rate = rate
rate *= conv_def.stride
else:
layer_stride = conv_def.stride
layer_rate = 1
current_stride *= conv_def.stride
if isinstance(conv_def, Conv):
end_point = end_point_base
if use_explicit_padding:
net = _fixed_padding(net, conv_def.kernel)
net = slim.conv2d(net, depth(conv_def.depth), conv_def.kernel,
stride=conv_def.stride,
scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
elif isinstance(conv_def, DepthSepConv):
end_point = end_point_base + '_depthwise'
# By passing filters=None
# separable_conv2d produces only a depthwise convolution layer
if use_explicit_padding:
net = _fixed_padding(net, conv_def.kernel, layer_rate)
net = slim.separable_conv2d(net, None, conv_def.kernel,
depth_multiplier=1,
stride=layer_stride,
rate=layer_rate,
scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
end_point = end_point_base + '_pointwise'
net = slim.conv2d(net, depth(conv_def.depth), [1, 1],
stride=1,
scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
else:
raise ValueError('Unknown convolution type %s for layer %d'
% (conv_def.ltype, i))
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def mobilenet_v1(inputs,
num_classes=1000,
dropout_keep_prob=0.999,
is_training=True,
min_depth=8,
depth_multiplier=1.0,
conv_defs=None,
prediction_fn=slim.softmax,
spatial_squeeze=True,
reuse=None,
scope='MobilenetV1',
global_pool=False):
"""Mobilenet v1 model for classification.
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer
is omitted and the input features to the logits layer (before dropout)
are returned instead.
dropout_keep_prob: the percentage of activation values that are retained.
is_training: whether is training or not.
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
conv_defs: A list of ConvDef namedtuples specifying the net architecture.
prediction_fn: a function to get predictions out of logits.
spatial_squeeze: if True, logits is of shape is [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
global_pool: Optional boolean flag to control the avgpooling before the
logits layer. If false or unset, pooling is done with a fixed window
that reduces default-sized inputs to 1x1, while larger inputs lead to
larger outputs. If true, any input size is pooled down to 1x1.
Returns:
net: a 2D Tensor with the logits (pre-softmax activations) if num_classes
is a non-zero integer, or the non-dropped-out input to the logits layer
if num_classes is 0 or None.
end_points: a dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: Input rank is invalid.
"""
input_shape = inputs.get_shape().as_list()
if len(input_shape) != 4:
raise ValueError('Invalid input tensor rank, expected 4, was: %d' %
len(input_shape))
with tf.variable_scope(
scope, 'MobilenetV1', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = mobilenet_v1_base(inputs, scope=scope,
min_depth=min_depth,
depth_multiplier=depth_multiplier,
conv_defs=conv_defs)
with tf.variable_scope('Logits'):
if global_pool:
# Global average pooling.
net = tf.reduce_mean(
input_tensor=net, axis=[1, 2], keepdims=True, name='global_pool')
end_points['global_pool'] = net
else:
# Pooling with a fixed kernel size.
kernel_size = _reduced_kernel_size_for_small_input(net, [7, 7])
net = slim.avg_pool2d(net, kernel_size, padding='VALID',
scope='AvgPool_1a')
end_points['AvgPool_1a'] = net
if not num_classes:
return net, end_points
# 1 x 1 x 1024
net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='Conv2d_1c_1x1')
if spatial_squeeze:
logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
end_points['Logits'] = logits
if prediction_fn:
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
mobilenet_v1.default_image_size = 224
def wrapped_partial(func, *args, **kwargs):
partial_func = functools.partial(func, *args, **kwargs)
functools.update_wrapper(partial_func, func)
return partial_func
mobilenet_v1_075 = wrapped_partial(mobilenet_v1, depth_multiplier=0.75)
mobilenet_v1_050 = wrapped_partial(mobilenet_v1, depth_multiplier=0.50)
mobilenet_v1_025 = wrapped_partial(mobilenet_v1, depth_multiplier=0.25)
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
"""Define kernel size which is automatically reduced for small input.
If the shape of the input images is unknown at graph construction time this
function assumes that the input images are large enough.
Args:
input_tensor: input tensor of size [batch_size, height, width, channels].
kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]
Returns:
a tensor with the kernel size.
"""
shape = input_tensor.get_shape().as_list()
if shape[1] is None or shape[2] is None:
kernel_size_out = kernel_size
else:
kernel_size_out = [min(shape[1], kernel_size[0]),
min(shape[2], kernel_size[1])]
return kernel_size_out
def mobilenet_v1_arg_scope(
is_training=True,
weight_decay=0.00004,
stddev=0.09,
regularize_depthwise=False,
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001,
batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS,
normalizer_fn=slim.batch_norm):
"""Defines the default MobilenetV1 arg scope.
Args:
is_training: Whether or not we're training the model. If this is set to
None, the parameter is not added to the batch_norm arg_scope.
weight_decay: The weight decay to use for regularizing the model.
stddev: The standard deviation of the trunctated normal weight initializer.
regularize_depthwise: Whether or not apply regularization on depthwise.
batch_norm_decay: Decay for batch norm moving average.
batch_norm_epsilon: Small float added to variance to avoid dividing by zero
in batch norm.
batch_norm_updates_collections: Collection for the update ops for
batch norm.
normalizer_fn: Normalization function to apply after convolution.
Returns:
An `arg_scope` to use for the mobilenet v1 model.
"""
batch_norm_params = {
'center': True,
'scale': True,
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'updates_collections': batch_norm_updates_collections,
}
if is_training is not None:
batch_norm_params['is_training'] = is_training
# Set weight_decay for weights in Conv and DepthSepConv layers.
weights_init = tf.truncated_normal_initializer(stddev=stddev)
regularizer = slim.l2_regularizer(weight_decay)
if regularize_depthwise:
depthwise_regularizer = regularizer
else:
depthwise_regularizer = None
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
weights_initializer=weights_init,
activation_fn=tf.nn.relu6, normalizer_fn=normalizer_fn):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with slim.arg_scope([slim.conv2d], weights_regularizer=regularizer):
with slim.arg_scope([slim.separable_conv2d],
weights_regularizer=depthwise_regularizer) as sc:
return sc
| 22,516 | 45.910417 | 80 | py |
models | models-master/research/slim/nets/nasnet/nasnet_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.nasnet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets.nasnet import nasnet
class NASNetTest(tf.test.TestCase):
def testBuildLogitsCifarModel(self):
batch_size = 5
height, width = 32, 32
num_classes = 10
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()):
logits, end_points = nasnet.build_nasnet_cifar(inputs, num_classes)
auxlogits = end_points['AuxLogits']
predictions = end_points['Predictions']
self.assertListEqual(auxlogits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(predictions.get_shape().as_list(),
[batch_size, num_classes])
def testBuildLogitsMobileModel(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
logits, end_points = nasnet.build_nasnet_mobile(inputs, num_classes)
auxlogits = end_points['AuxLogits']
predictions = end_points['Predictions']
self.assertListEqual(auxlogits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(predictions.get_shape().as_list(),
[batch_size, num_classes])
def testBuildLogitsLargeModel(self):
batch_size = 5
height, width = 331, 331
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_large_arg_scope()):
logits, end_points = nasnet.build_nasnet_large(inputs, num_classes)
auxlogits = end_points['AuxLogits']
predictions = end_points['Predictions']
self.assertListEqual(auxlogits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(predictions.get_shape().as_list(),
[batch_size, num_classes])
def testBuildPreLogitsCifarModel(self):
batch_size = 5
height, width = 32, 32
num_classes = None
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()):
net, end_points = nasnet.build_nasnet_cifar(inputs, num_classes)
self.assertFalse('AuxLogits' in end_points)
self.assertFalse('Predictions' in end_points)
self.assertTrue(net.op.name.startswith('final_layer/Mean'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 768])
def testBuildPreLogitsMobileModel(self):
batch_size = 5
height, width = 224, 224
num_classes = None
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
net, end_points = nasnet.build_nasnet_mobile(inputs, num_classes)
self.assertFalse('AuxLogits' in end_points)
self.assertFalse('Predictions' in end_points)
self.assertTrue(net.op.name.startswith('final_layer/Mean'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 1056])
def testBuildPreLogitsLargeModel(self):
batch_size = 5
height, width = 331, 331
num_classes = None
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_large_arg_scope()):
net, end_points = nasnet.build_nasnet_large(inputs, num_classes)
self.assertFalse('AuxLogits' in end_points)
self.assertFalse('Predictions' in end_points)
self.assertTrue(net.op.name.startswith('final_layer/Mean'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 4032])
def testAllEndPointsShapesCifarModel(self):
batch_size = 5
height, width = 32, 32
num_classes = 10
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()):
_, end_points = nasnet.build_nasnet_cifar(inputs, num_classes)
endpoints_shapes = {'Stem': [batch_size, 32, 32, 96],
'Cell_0': [batch_size, 32, 32, 192],
'Cell_1': [batch_size, 32, 32, 192],
'Cell_2': [batch_size, 32, 32, 192],
'Cell_3': [batch_size, 32, 32, 192],
'Cell_4': [batch_size, 32, 32, 192],
'Cell_5': [batch_size, 32, 32, 192],
'Cell_6': [batch_size, 16, 16, 384],
'Cell_7': [batch_size, 16, 16, 384],
'Cell_8': [batch_size, 16, 16, 384],
'Cell_9': [batch_size, 16, 16, 384],
'Cell_10': [batch_size, 16, 16, 384],
'Cell_11': [batch_size, 16, 16, 384],
'Cell_12': [batch_size, 8, 8, 768],
'Cell_13': [batch_size, 8, 8, 768],
'Cell_14': [batch_size, 8, 8, 768],
'Cell_15': [batch_size, 8, 8, 768],
'Cell_16': [batch_size, 8, 8, 768],
'Cell_17': [batch_size, 8, 8, 768],
'Reduction_Cell_0': [batch_size, 16, 16, 256],
'Reduction_Cell_1': [batch_size, 8, 8, 512],
'global_pool': [batch_size, 768],
# Logits and predictions
'AuxLogits': [batch_size, num_classes],
'Logits': [batch_size, num_classes],
'Predictions': [batch_size, num_classes]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
tf.logging.info('Endpoint name: {}'.format(endpoint_name))
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testNoAuxHeadCifarModel(self):
batch_size = 5
height, width = 32, 32
num_classes = 10
for use_aux_head in (True, False):
tf.reset_default_graph()
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = nasnet.cifar_config()
config.set_hparam('use_aux_head', int(use_aux_head))
with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()):
_, end_points = nasnet.build_nasnet_cifar(inputs, num_classes,
config=config)
self.assertEqual('AuxLogits' in end_points, use_aux_head)
def testAllEndPointsShapesMobileModel(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
_, end_points = nasnet.build_nasnet_mobile(inputs, num_classes)
endpoints_shapes = {'Stem': [batch_size, 28, 28, 88],
'Cell_0': [batch_size, 28, 28, 264],
'Cell_1': [batch_size, 28, 28, 264],
'Cell_2': [batch_size, 28, 28, 264],
'Cell_3': [batch_size, 28, 28, 264],
'Cell_4': [batch_size, 14, 14, 528],
'Cell_5': [batch_size, 14, 14, 528],
'Cell_6': [batch_size, 14, 14, 528],
'Cell_7': [batch_size, 14, 14, 528],
'Cell_8': [batch_size, 7, 7, 1056],
'Cell_9': [batch_size, 7, 7, 1056],
'Cell_10': [batch_size, 7, 7, 1056],
'Cell_11': [batch_size, 7, 7, 1056],
'Reduction_Cell_0': [batch_size, 14, 14, 352],
'Reduction_Cell_1': [batch_size, 7, 7, 704],
'global_pool': [batch_size, 1056],
# Logits and predictions
'AuxLogits': [batch_size, num_classes],
'Logits': [batch_size, num_classes],
'Predictions': [batch_size, num_classes]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
tf.logging.info('Endpoint name: {}'.format(endpoint_name))
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testNoAuxHeadMobileModel(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
for use_aux_head in (True, False):
tf.reset_default_graph()
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = nasnet.mobile_imagenet_config()
config.set_hparam('use_aux_head', int(use_aux_head))
with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
_, end_points = nasnet.build_nasnet_mobile(inputs, num_classes,
config=config)
self.assertEqual('AuxLogits' in end_points, use_aux_head)
def testAllEndPointsShapesLargeModel(self):
batch_size = 5
height, width = 331, 331
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_large_arg_scope()):
_, end_points = nasnet.build_nasnet_large(inputs, num_classes)
endpoints_shapes = {'Stem': [batch_size, 42, 42, 336],
'Cell_0': [batch_size, 42, 42, 1008],
'Cell_1': [batch_size, 42, 42, 1008],
'Cell_2': [batch_size, 42, 42, 1008],
'Cell_3': [batch_size, 42, 42, 1008],
'Cell_4': [batch_size, 42, 42, 1008],
'Cell_5': [batch_size, 42, 42, 1008],
'Cell_6': [batch_size, 21, 21, 2016],
'Cell_7': [batch_size, 21, 21, 2016],
'Cell_8': [batch_size, 21, 21, 2016],
'Cell_9': [batch_size, 21, 21, 2016],
'Cell_10': [batch_size, 21, 21, 2016],
'Cell_11': [batch_size, 21, 21, 2016],
'Cell_12': [batch_size, 11, 11, 4032],
'Cell_13': [batch_size, 11, 11, 4032],
'Cell_14': [batch_size, 11, 11, 4032],
'Cell_15': [batch_size, 11, 11, 4032],
'Cell_16': [batch_size, 11, 11, 4032],
'Cell_17': [batch_size, 11, 11, 4032],
'Reduction_Cell_0': [batch_size, 21, 21, 1344],
'Reduction_Cell_1': [batch_size, 11, 11, 2688],
'global_pool': [batch_size, 4032],
# Logits and predictions
'AuxLogits': [batch_size, num_classes],
'Logits': [batch_size, num_classes],
'Predictions': [batch_size, num_classes]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
tf.logging.info('Endpoint name: {}'.format(endpoint_name))
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testNoAuxHeadLargeModel(self):
batch_size = 5
height, width = 331, 331
num_classes = 1000
for use_aux_head in (True, False):
tf.reset_default_graph()
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = nasnet.large_imagenet_config()
config.set_hparam('use_aux_head', int(use_aux_head))
with slim.arg_scope(nasnet.nasnet_large_arg_scope()):
_, end_points = nasnet.build_nasnet_large(inputs, num_classes,
config=config)
self.assertEqual('AuxLogits' in end_points, use_aux_head)
def testVariablesSetDeviceMobileModel(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
# Force all Variables to reside on the device.
with tf.variable_scope('on_cpu'), tf.device('/cpu:0'):
with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
nasnet.build_nasnet_mobile(inputs, num_classes)
with tf.variable_scope('on_gpu'), tf.device('/gpu:0'):
with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
nasnet.build_nasnet_mobile(inputs, num_classes)
for v in tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope='on_cpu'):
self.assertDeviceEqual(v.device, '/cpu:0')
for v in tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope='on_gpu'):
self.assertDeviceEqual(v.device, '/gpu:0')
def testUnknownBatchSizeMobileModel(self):
batch_size = 1
height, width = 224, 224
num_classes = 1000
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, (None, height, width, 3))
with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
logits, _ = nasnet.build_nasnet_mobile(inputs, num_classes)
self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes])
images = tf.random.uniform((batch_size, height, width, 3))
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
def testEvaluationMobileModel(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
with self.test_session() as sess:
eval_inputs = tf.random.uniform((batch_size, height, width, 3))
with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
logits, _ = nasnet.build_nasnet_mobile(eval_inputs,
num_classes,
is_training=False)
predictions = tf.argmax(input=logits, axis=1)
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
def testOverrideHParamsCifarModel(self):
batch_size = 5
height, width = 32, 32
num_classes = 10
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = nasnet.cifar_config()
config.set_hparam('data_format', 'NCHW')
with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()):
_, end_points = nasnet.build_nasnet_cifar(
inputs, num_classes, config=config)
self.assertListEqual(
end_points['Stem'].shape.as_list(), [batch_size, 96, 32, 32])
def testOverrideHParamsMobileModel(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = nasnet.mobile_imagenet_config()
config.set_hparam('data_format', 'NCHW')
with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
_, end_points = nasnet.build_nasnet_mobile(
inputs, num_classes, config=config)
self.assertListEqual(
end_points['Stem'].shape.as_list(), [batch_size, 88, 28, 28])
def testOverrideHParamsLargeModel(self):
batch_size = 5
height, width = 331, 331
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = nasnet.large_imagenet_config()
config.set_hparam('data_format', 'NCHW')
with slim.arg_scope(nasnet.nasnet_large_arg_scope()):
_, end_points = nasnet.build_nasnet_large(
inputs, num_classes, config=config)
self.assertListEqual(
end_points['Stem'].shape.as_list(), [batch_size, 336, 42, 42])
def testCurrentStepCifarModel(self):
batch_size = 5
height, width = 32, 32
num_classes = 10
inputs = tf.random.uniform((batch_size, height, width, 3))
global_step = tf.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()):
logits, end_points = nasnet.build_nasnet_cifar(inputs,
num_classes,
current_step=global_step)
auxlogits = end_points['AuxLogits']
predictions = end_points['Predictions']
self.assertListEqual(auxlogits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(predictions.get_shape().as_list(),
[batch_size, num_classes])
def testUseBoundedAcitvationCifarModel(self):
batch_size = 1
height, width = 32, 32
num_classes = 10
for use_bounded_activation in (True, False):
tf.reset_default_graph()
inputs = tf.random.uniform((batch_size, height, width, 3))
config = nasnet.cifar_config()
config.set_hparam('use_bounded_activation', use_bounded_activation)
with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()):
_, _ = nasnet.build_nasnet_cifar(
inputs, num_classes, config=config)
for node in tf.get_default_graph().as_graph_def().node:
if node.op.startswith('Relu'):
self.assertEqual(node.op == 'Relu6', use_bounded_activation)
if __name__ == '__main__':
tf.test.main()
| 19,056 | 45.254854 | 80 | py |
models | models-master/research/slim/nets/nasnet/pnasnet.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition for the PNASNet classification networks.
Paper: https://arxiv.org/abs/1712.00559
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import tensorflow.compat.v1 as tf
import tf_slim as slim
from tensorflow.contrib import training as contrib_training
from nets.nasnet import nasnet
from nets.nasnet import nasnet_utils
arg_scope = slim.arg_scope
def large_imagenet_config():
"""Large ImageNet configuration based on PNASNet-5."""
return contrib_training.HParams(
stem_multiplier=3.0,
dense_dropout_keep_prob=0.5,
num_cells=12,
filter_scaling_rate=2.0,
num_conv_filters=216,
drop_path_keep_prob=0.6,
use_aux_head=1,
num_reduction_layers=2,
data_format='NHWC',
skip_reduction_layer_input=1,
total_training_steps=250000,
use_bounded_activation=False,
)
def mobile_imagenet_config():
"""Mobile ImageNet configuration based on PNASNet-5."""
return contrib_training.HParams(
stem_multiplier=1.0,
dense_dropout_keep_prob=0.5,
num_cells=9,
filter_scaling_rate=2.0,
num_conv_filters=54,
drop_path_keep_prob=1.0,
use_aux_head=1,
num_reduction_layers=2,
data_format='NHWC',
skip_reduction_layer_input=1,
total_training_steps=250000,
use_bounded_activation=False,
)
def pnasnet_large_arg_scope(weight_decay=4e-5, batch_norm_decay=0.9997,
batch_norm_epsilon=0.001):
"""Default arg scope for the PNASNet Large ImageNet model."""
return nasnet.nasnet_large_arg_scope(
weight_decay, batch_norm_decay, batch_norm_epsilon)
def pnasnet_mobile_arg_scope(weight_decay=4e-5,
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001):
"""Default arg scope for the PNASNet Mobile ImageNet model."""
return nasnet.nasnet_mobile_arg_scope(weight_decay, batch_norm_decay,
batch_norm_epsilon)
def _build_pnasnet_base(images,
normal_cell,
num_classes,
hparams,
is_training,
final_endpoint=None):
"""Constructs a PNASNet image model."""
end_points = {}
def add_and_check_endpoint(endpoint_name, net):
end_points[endpoint_name] = net
return final_endpoint and (endpoint_name == final_endpoint)
# Find where to place the reduction cells or stride normal cells
reduction_indices = nasnet_utils.calc_reduction_layers(
hparams.num_cells, hparams.num_reduction_layers)
# pylint: disable=protected-access
stem = lambda: nasnet._imagenet_stem(images, hparams, normal_cell)
# pylint: enable=protected-access
net, cell_outputs = stem()
if add_and_check_endpoint('Stem', net):
return net, end_points
# Setup for building in the auxiliary head.
aux_head_cell_idxes = []
if len(reduction_indices) >= 2:
aux_head_cell_idxes.append(reduction_indices[1] - 1)
# Run the cells
filter_scaling = 1.0
# true_cell_num accounts for the stem cells
true_cell_num = 2
activation_fn = tf.nn.relu6 if hparams.use_bounded_activation else tf.nn.relu
for cell_num in range(hparams.num_cells):
is_reduction = cell_num in reduction_indices
stride = 2 if is_reduction else 1
if is_reduction: filter_scaling *= hparams.filter_scaling_rate
if hparams.skip_reduction_layer_input or not is_reduction:
prev_layer = cell_outputs[-2]
net = normal_cell(
net,
scope='cell_{}'.format(cell_num),
filter_scaling=filter_scaling,
stride=stride,
prev_layer=prev_layer,
cell_num=true_cell_num)
if add_and_check_endpoint('Cell_{}'.format(cell_num), net):
return net, end_points
true_cell_num += 1
cell_outputs.append(net)
if (hparams.use_aux_head and cell_num in aux_head_cell_idxes and
num_classes and is_training):
aux_net = activation_fn(net)
# pylint: disable=protected-access
nasnet._build_aux_head(aux_net, end_points, num_classes, hparams,
scope='aux_{}'.format(cell_num))
# pylint: enable=protected-access
# Final softmax layer
with tf.variable_scope('final_layer'):
net = activation_fn(net)
net = nasnet_utils.global_avg_pool(net)
if add_and_check_endpoint('global_pool', net) or not num_classes:
return net, end_points
net = slim.dropout(net, hparams.dense_dropout_keep_prob, scope='dropout')
logits = slim.fully_connected(net, num_classes)
if add_and_check_endpoint('Logits', logits):
return net, end_points
predictions = tf.nn.softmax(logits, name='predictions')
if add_and_check_endpoint('Predictions', predictions):
return net, end_points
return logits, end_points
def build_pnasnet_large(images,
num_classes,
is_training=True,
final_endpoint=None,
config=None):
"""Build PNASNet Large model for the ImageNet Dataset."""
hparams = copy.deepcopy(config) if config else large_imagenet_config()
# pylint: disable=protected-access
nasnet._update_hparams(hparams, is_training)
# pylint: enable=protected-access
if tf.test.is_gpu_available() and hparams.data_format == 'NHWC':
tf.logging.info(
'A GPU is available on the machine, consider using NCHW '
'data format for increased speed on GPU.')
if hparams.data_format == 'NCHW':
images = tf.transpose(a=images, perm=[0, 3, 1, 2])
# Calculate the total number of cells in the network.
# There is no distinction between reduction and normal cells in PNAS so the
# total number of cells is equal to the number normal cells plus the number
# of stem cells (two by default).
total_num_cells = hparams.num_cells + 2
normal_cell = PNasNetNormalCell(hparams.num_conv_filters,
hparams.drop_path_keep_prob, total_num_cells,
hparams.total_training_steps,
hparams.use_bounded_activation)
with arg_scope(
[slim.dropout, nasnet_utils.drop_path, slim.batch_norm],
is_training=is_training):
with arg_scope([slim.avg_pool2d, slim.max_pool2d, slim.conv2d,
slim.batch_norm, slim.separable_conv2d,
nasnet_utils.factorized_reduction,
nasnet_utils.global_avg_pool,
nasnet_utils.get_channel_index,
nasnet_utils.get_channel_dim],
data_format=hparams.data_format):
return _build_pnasnet_base(
images,
normal_cell=normal_cell,
num_classes=num_classes,
hparams=hparams,
is_training=is_training,
final_endpoint=final_endpoint)
build_pnasnet_large.default_image_size = 331
def build_pnasnet_mobile(images,
num_classes,
is_training=True,
final_endpoint=None,
config=None):
"""Build PNASNet Mobile model for the ImageNet Dataset."""
hparams = copy.deepcopy(config) if config else mobile_imagenet_config()
# pylint: disable=protected-access
nasnet._update_hparams(hparams, is_training)
# pylint: enable=protected-access
if tf.test.is_gpu_available() and hparams.data_format == 'NHWC':
tf.logging.info(
'A GPU is available on the machine, consider using NCHW '
'data format for increased speed on GPU.')
if hparams.data_format == 'NCHW':
images = tf.transpose(a=images, perm=[0, 3, 1, 2])
# Calculate the total number of cells in the network.
# There is no distinction between reduction and normal cells in PNAS so the
# total number of cells is equal to the number normal cells plus the number
# of stem cells (two by default).
total_num_cells = hparams.num_cells + 2
normal_cell = PNasNetNormalCell(hparams.num_conv_filters,
hparams.drop_path_keep_prob, total_num_cells,
hparams.total_training_steps,
hparams.use_bounded_activation)
with arg_scope(
[slim.dropout, nasnet_utils.drop_path, slim.batch_norm],
is_training=is_training):
with arg_scope(
[
slim.avg_pool2d, slim.max_pool2d, slim.conv2d, slim.batch_norm,
slim.separable_conv2d, nasnet_utils.factorized_reduction,
nasnet_utils.global_avg_pool, nasnet_utils.get_channel_index,
nasnet_utils.get_channel_dim
],
data_format=hparams.data_format):
return _build_pnasnet_base(
images,
normal_cell=normal_cell,
num_classes=num_classes,
hparams=hparams,
is_training=is_training,
final_endpoint=final_endpoint)
build_pnasnet_mobile.default_image_size = 224
class PNasNetNormalCell(nasnet_utils.NasNetABaseCell):
"""PNASNet Normal Cell."""
def __init__(self, num_conv_filters, drop_path_keep_prob, total_num_cells,
total_training_steps, use_bounded_activation=False):
# Configuration for the PNASNet-5 model.
operations = [
'separable_5x5_2', 'max_pool_3x3', 'separable_7x7_2', 'max_pool_3x3',
'separable_5x5_2', 'separable_3x3_2', 'separable_3x3_2', 'max_pool_3x3',
'separable_3x3_2', 'none'
]
used_hiddenstates = [1, 1, 0, 0, 0, 0, 0]
hiddenstate_indices = [1, 1, 0, 0, 0, 0, 4, 0, 1, 0]
super(PNasNetNormalCell, self).__init__(
num_conv_filters, operations, used_hiddenstates, hiddenstate_indices,
drop_path_keep_prob, total_num_cells, total_training_steps,
use_bounded_activation)
| 10,541 | 36.119718 | 80 | py |
models | models-master/research/slim/nets/nasnet/nasnet_utils.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A custom module for some common operations used by NASNet.
Functions exposed in this file:
- calc_reduction_layers
- get_channel_index
- get_channel_dim
- global_avg_pool
- factorized_reduction
- drop_path
Classes exposed in this file:
- NasNetABaseCell
- NasNetANormalCell
- NasNetAReductionCell
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
arg_scope = slim.arg_scope
DATA_FORMAT_NCHW = 'NCHW'
DATA_FORMAT_NHWC = 'NHWC'
INVALID = 'null'
# The cap for tf.clip_by_value, it's hinted from the activation distribution
# that the majority of activation values are in the range [-6, 6].
CLIP_BY_VALUE_CAP = 6
def calc_reduction_layers(num_cells, num_reduction_layers):
"""Figure out what layers should have reductions."""
reduction_layers = []
for pool_num in range(1, num_reduction_layers + 1):
layer_num = (float(pool_num) / (num_reduction_layers + 1)) * num_cells
layer_num = int(layer_num)
reduction_layers.append(layer_num)
return reduction_layers
@slim.add_arg_scope
def get_channel_index(data_format=INVALID):
assert data_format != INVALID
axis = 3 if data_format == 'NHWC' else 1
return axis
@slim.add_arg_scope
def get_channel_dim(shape, data_format=INVALID):
assert data_format != INVALID
assert len(shape) == 4
if data_format == 'NHWC':
return int(shape[3])
elif data_format == 'NCHW':
return int(shape[1])
else:
raise ValueError('Not a valid data_format', data_format)
@slim.add_arg_scope
def global_avg_pool(x, data_format=INVALID):
"""Average pool away the height and width spatial dimensions of x."""
assert data_format != INVALID
assert data_format in ['NHWC', 'NCHW']
assert x.shape.ndims == 4
if data_format == 'NHWC':
return tf.reduce_mean(input_tensor=x, axis=[1, 2])
else:
return tf.reduce_mean(input_tensor=x, axis=[2, 3])
@slim.add_arg_scope
def factorized_reduction(net, output_filters, stride, data_format=INVALID):
"""Reduces the shape of net without information loss due to striding."""
assert data_format != INVALID
if stride == 1:
net = slim.conv2d(net, output_filters, 1, scope='path_conv')
net = slim.batch_norm(net, scope='path_bn')
return net
if data_format == 'NHWC':
stride_spec = [1, stride, stride, 1]
else:
stride_spec = [1, 1, stride, stride]
# Skip path 1
path1 = tf.nn.avg_pool2d(
net,
ksize=[1, 1, 1, 1],
strides=stride_spec,
padding='VALID',
data_format=data_format)
path1 = slim.conv2d(path1, int(output_filters / 2), 1, scope='path1_conv')
# Skip path 2
# First pad with 0's on the right and bottom, then shift the filter to
# include those 0's that were added.
if data_format == 'NHWC':
pad_arr = [[0, 0], [0, 1], [0, 1], [0, 0]]
path2 = tf.pad(tensor=net, paddings=pad_arr)[:, 1:, 1:, :]
concat_axis = 3
else:
pad_arr = [[0, 0], [0, 0], [0, 1], [0, 1]]
path2 = tf.pad(tensor=net, paddings=pad_arr)[:, :, 1:, 1:]
concat_axis = 1
path2 = tf.nn.avg_pool2d(
path2,
ksize=[1, 1, 1, 1],
strides=stride_spec,
padding='VALID',
data_format=data_format)
# If odd number of filters, add an additional one to the second path.
final_filter_size = int(output_filters / 2) + int(output_filters % 2)
path2 = slim.conv2d(path2, final_filter_size, 1, scope='path2_conv')
# Concat and apply BN
final_path = tf.concat(values=[path1, path2], axis=concat_axis)
final_path = slim.batch_norm(final_path, scope='final_path_bn')
return final_path
@slim.add_arg_scope
def drop_path(net, keep_prob, is_training=True):
"""Drops out a whole example hiddenstate with the specified probability."""
if is_training:
batch_size = tf.shape(input=net)[0]
noise_shape = [batch_size, 1, 1, 1]
random_tensor = keep_prob
random_tensor += tf.random.uniform(noise_shape, dtype=tf.float32)
binary_tensor = tf.cast(tf.floor(random_tensor), net.dtype)
keep_prob_inv = tf.cast(1.0 / keep_prob, net.dtype)
net = net * keep_prob_inv * binary_tensor
return net
def _operation_to_filter_shape(operation):
splitted_operation = operation.split('x')
filter_shape = int(splitted_operation[0][-1])
assert filter_shape == int(
splitted_operation[1][0]), 'Rectangular filters not supported.'
return filter_shape
def _operation_to_num_layers(operation):
splitted_operation = operation.split('_')
if 'x' in splitted_operation[-1]:
return 1
return int(splitted_operation[-1])
def _operation_to_info(operation):
"""Takes in operation name and returns meta information.
An example would be 'separable_3x3_4' -> (3, 4).
Args:
operation: String that corresponds to convolution operation.
Returns:
Tuple of (filter shape, num layers).
"""
num_layers = _operation_to_num_layers(operation)
filter_shape = _operation_to_filter_shape(operation)
return num_layers, filter_shape
def _stacked_separable_conv(net, stride, operation, filter_size,
use_bounded_activation):
"""Takes in an operations and parses it to the correct sep operation."""
num_layers, kernel_size = _operation_to_info(operation)
activation_fn = tf.nn.relu6 if use_bounded_activation else tf.nn.relu
for layer_num in range(num_layers - 1):
net = activation_fn(net)
net = slim.separable_conv2d(
net,
filter_size,
kernel_size,
depth_multiplier=1,
scope='separable_{0}x{0}_{1}'.format(kernel_size, layer_num + 1),
stride=stride)
net = slim.batch_norm(
net, scope='bn_sep_{0}x{0}_{1}'.format(kernel_size, layer_num + 1))
stride = 1
net = activation_fn(net)
net = slim.separable_conv2d(
net,
filter_size,
kernel_size,
depth_multiplier=1,
scope='separable_{0}x{0}_{1}'.format(kernel_size, num_layers),
stride=stride)
net = slim.batch_norm(
net, scope='bn_sep_{0}x{0}_{1}'.format(kernel_size, num_layers))
return net
def _operation_to_pooling_type(operation):
"""Takes in the operation string and returns the pooling type."""
splitted_operation = operation.split('_')
return splitted_operation[0]
def _operation_to_pooling_shape(operation):
"""Takes in the operation string and returns the pooling kernel shape."""
splitted_operation = operation.split('_')
shape = splitted_operation[-1]
assert 'x' in shape
filter_height, filter_width = shape.split('x')
assert filter_height == filter_width
return int(filter_height)
def _operation_to_pooling_info(operation):
"""Parses the pooling operation string to return its type and shape."""
pooling_type = _operation_to_pooling_type(operation)
pooling_shape = _operation_to_pooling_shape(operation)
return pooling_type, pooling_shape
def _pooling(net, stride, operation, use_bounded_activation):
"""Parses operation and performs the correct pooling operation on net."""
padding = 'SAME'
pooling_type, pooling_shape = _operation_to_pooling_info(operation)
if use_bounded_activation:
net = tf.nn.relu6(net)
if pooling_type == 'avg':
net = slim.avg_pool2d(net, pooling_shape, stride=stride, padding=padding)
elif pooling_type == 'max':
net = slim.max_pool2d(net, pooling_shape, stride=stride, padding=padding)
else:
raise NotImplementedError('Unimplemented pooling type: ', pooling_type)
return net
class NasNetABaseCell(object):
"""NASNet Cell class that is used as a 'layer' in image architectures.
Args:
num_conv_filters: The number of filters for each convolution operation.
operations: List of operations that are performed in the NASNet Cell in
order.
used_hiddenstates: Binary array that signals if the hiddenstate was used
within the cell. This is used to determine what outputs of the cell
should be concatenated together.
hiddenstate_indices: Determines what hiddenstates should be combined
together with the specified operations to create the NASNet cell.
use_bounded_activation: Whether or not to use bounded activations. Bounded
activations better lend themselves to quantized inference.
"""
def __init__(self, num_conv_filters, operations, used_hiddenstates,
hiddenstate_indices, drop_path_keep_prob, total_num_cells,
total_training_steps, use_bounded_activation=False):
self._num_conv_filters = num_conv_filters
self._operations = operations
self._used_hiddenstates = used_hiddenstates
self._hiddenstate_indices = hiddenstate_indices
self._drop_path_keep_prob = drop_path_keep_prob
self._total_num_cells = total_num_cells
self._total_training_steps = total_training_steps
self._use_bounded_activation = use_bounded_activation
def _reduce_prev_layer(self, prev_layer, curr_layer):
"""Matches dimension of prev_layer to the curr_layer."""
# Set the prev layer to the current layer if it is none
if prev_layer is None:
return curr_layer
curr_num_filters = self._filter_size
prev_num_filters = get_channel_dim(prev_layer.shape)
curr_filter_shape = int(curr_layer.shape[2])
prev_filter_shape = int(prev_layer.shape[2])
activation_fn = tf.nn.relu6 if self._use_bounded_activation else tf.nn.relu
if curr_filter_shape != prev_filter_shape:
prev_layer = activation_fn(prev_layer)
prev_layer = factorized_reduction(
prev_layer, curr_num_filters, stride=2)
elif curr_num_filters != prev_num_filters:
prev_layer = activation_fn(prev_layer)
prev_layer = slim.conv2d(
prev_layer, curr_num_filters, 1, scope='prev_1x1')
prev_layer = slim.batch_norm(prev_layer, scope='prev_bn')
return prev_layer
def _cell_base(self, net, prev_layer):
"""Runs the beginning of the conv cell before the predicted ops are run."""
num_filters = self._filter_size
# Check to be sure prev layer stuff is setup correctly
prev_layer = self._reduce_prev_layer(prev_layer, net)
net = tf.nn.relu6(net) if self._use_bounded_activation else tf.nn.relu(net)
net = slim.conv2d(net, num_filters, 1, scope='1x1')
net = slim.batch_norm(net, scope='beginning_bn')
# num_or_size_splits=1
net = [net]
net.append(prev_layer)
return net
def __call__(self, net, scope=None, filter_scaling=1, stride=1,
prev_layer=None, cell_num=-1, current_step=None):
"""Runs the conv cell."""
self._cell_num = cell_num
self._filter_scaling = filter_scaling
self._filter_size = int(self._num_conv_filters * filter_scaling)
i = 0
with tf.variable_scope(scope):
net = self._cell_base(net, prev_layer)
for iteration in range(5):
with tf.variable_scope('comb_iter_{}'.format(iteration)):
left_hiddenstate_idx, right_hiddenstate_idx = (
self._hiddenstate_indices[i],
self._hiddenstate_indices[i + 1])
original_input_left = left_hiddenstate_idx < 2
original_input_right = right_hiddenstate_idx < 2
h1 = net[left_hiddenstate_idx]
h2 = net[right_hiddenstate_idx]
operation_left = self._operations[i]
operation_right = self._operations[i+1]
i += 2
# Apply conv operations
with tf.variable_scope('left'):
h1 = self._apply_conv_operation(h1, operation_left,
stride, original_input_left,
current_step)
with tf.variable_scope('right'):
h2 = self._apply_conv_operation(h2, operation_right,
stride, original_input_right,
current_step)
# Combine hidden states using 'add'.
with tf.variable_scope('combine'):
h = h1 + h2
if self._use_bounded_activation:
h = tf.nn.relu6(h)
# Add hiddenstate to the list of hiddenstates we can choose from
net.append(h)
with tf.variable_scope('cell_output'):
net = self._combine_unused_states(net)
return net
def _apply_conv_operation(self, net, operation,
stride, is_from_original_input, current_step):
"""Applies the predicted conv operation to net."""
# Dont stride if this is not one of the original hiddenstates
if stride > 1 and not is_from_original_input:
stride = 1
input_filters = get_channel_dim(net.shape)
filter_size = self._filter_size
if 'separable' in operation:
net = _stacked_separable_conv(net, stride, operation, filter_size,
self._use_bounded_activation)
if self._use_bounded_activation:
net = tf.clip_by_value(net, -CLIP_BY_VALUE_CAP, CLIP_BY_VALUE_CAP)
elif operation in ['none']:
if self._use_bounded_activation:
net = tf.nn.relu6(net)
# Check if a stride is needed, then use a strided 1x1 here
if stride > 1 or (input_filters != filter_size):
if not self._use_bounded_activation:
net = tf.nn.relu(net)
net = slim.conv2d(net, filter_size, 1, stride=stride, scope='1x1')
net = slim.batch_norm(net, scope='bn_1')
if self._use_bounded_activation:
net = tf.clip_by_value(net, -CLIP_BY_VALUE_CAP, CLIP_BY_VALUE_CAP)
elif 'pool' in operation:
net = _pooling(net, stride, operation, self._use_bounded_activation)
if input_filters != filter_size:
net = slim.conv2d(net, filter_size, 1, stride=1, scope='1x1')
net = slim.batch_norm(net, scope='bn_1')
if self._use_bounded_activation:
net = tf.clip_by_value(net, -CLIP_BY_VALUE_CAP, CLIP_BY_VALUE_CAP)
else:
raise ValueError('Unimplemented operation', operation)
if operation != 'none':
net = self._apply_drop_path(net, current_step=current_step)
return net
def _combine_unused_states(self, net):
"""Concatenate the unused hidden states of the cell."""
used_hiddenstates = self._used_hiddenstates
final_height = int(net[-1].shape[2])
final_num_filters = get_channel_dim(net[-1].shape)
assert len(used_hiddenstates) == len(net)
for idx, used_h in enumerate(used_hiddenstates):
curr_height = int(net[idx].shape[2])
curr_num_filters = get_channel_dim(net[idx].shape)
# Determine if a reduction should be applied to make the number of
# filters match.
should_reduce = final_num_filters != curr_num_filters
should_reduce = (final_height != curr_height) or should_reduce
should_reduce = should_reduce and not used_h
if should_reduce:
stride = 2 if final_height != curr_height else 1
with tf.variable_scope('reduction_{}'.format(idx)):
net[idx] = factorized_reduction(
net[idx], final_num_filters, stride)
states_to_combine = (
[h for h, is_used in zip(net, used_hiddenstates) if not is_used])
# Return the concat of all the states
concat_axis = get_channel_index()
net = tf.concat(values=states_to_combine, axis=concat_axis)
return net
@slim.add_arg_scope # No public API. For internal use only.
def _apply_drop_path(self, net, current_step=None,
use_summaries=False, drop_connect_version='v3'):
"""Apply drop_path regularization.
Args:
net: the Tensor that gets drop_path regularization applied.
current_step: a float32 Tensor with the current global_step value,
to be divided by hparams.total_training_steps. Usually None, which
defaults to tf.train.get_or_create_global_step() properly casted.
use_summaries: a Python boolean. If set to False, no summaries are output.
drop_connect_version: one of 'v1', 'v2', 'v3', controlling whether
the dropout rate is scaled by current_step (v1), layer (v2), or
both (v3, the default).
Returns:
The dropped-out value of `net`.
"""
drop_path_keep_prob = self._drop_path_keep_prob
if drop_path_keep_prob < 1.0:
assert drop_connect_version in ['v1', 'v2', 'v3']
if drop_connect_version in ['v2', 'v3']:
# Scale keep prob by layer number
assert self._cell_num != -1
# The added 2 is for the reduction cells
num_cells = self._total_num_cells
layer_ratio = (self._cell_num + 1)/float(num_cells)
if use_summaries:
with tf.device('/cpu:0'):
tf.summary.scalar('layer_ratio', layer_ratio)
drop_path_keep_prob = 1 - layer_ratio * (1 - drop_path_keep_prob)
if drop_connect_version in ['v1', 'v3']:
# Decrease the keep probability over time
if current_step is None:
current_step = tf.train.get_or_create_global_step()
current_step = tf.cast(current_step, tf.float32)
drop_path_burn_in_steps = self._total_training_steps
current_ratio = current_step / drop_path_burn_in_steps
current_ratio = tf.minimum(1.0, current_ratio)
if use_summaries:
with tf.device('/cpu:0'):
tf.summary.scalar('current_ratio', current_ratio)
drop_path_keep_prob = (1 - current_ratio * (1 - drop_path_keep_prob))
if use_summaries:
with tf.device('/cpu:0'):
tf.summary.scalar('drop_path_keep_prob', drop_path_keep_prob)
net = drop_path(net, drop_path_keep_prob)
return net
class NasNetANormalCell(NasNetABaseCell):
"""NASNetA Normal Cell."""
def __init__(self, num_conv_filters, drop_path_keep_prob, total_num_cells,
total_training_steps, use_bounded_activation=False):
operations = ['separable_5x5_2',
'separable_3x3_2',
'separable_5x5_2',
'separable_3x3_2',
'avg_pool_3x3',
'none',
'avg_pool_3x3',
'avg_pool_3x3',
'separable_3x3_2',
'none']
used_hiddenstates = [1, 0, 0, 0, 0, 0, 0]
hiddenstate_indices = [0, 1, 1, 1, 0, 1, 1, 1, 0, 0]
super(NasNetANormalCell, self).__init__(num_conv_filters, operations,
used_hiddenstates,
hiddenstate_indices,
drop_path_keep_prob,
total_num_cells,
total_training_steps,
use_bounded_activation)
class NasNetAReductionCell(NasNetABaseCell):
"""NASNetA Reduction Cell."""
def __init__(self, num_conv_filters, drop_path_keep_prob, total_num_cells,
total_training_steps, use_bounded_activation=False):
operations = ['separable_5x5_2',
'separable_7x7_2',
'max_pool_3x3',
'separable_7x7_2',
'avg_pool_3x3',
'separable_5x5_2',
'none',
'avg_pool_3x3',
'separable_3x3_2',
'max_pool_3x3']
used_hiddenstates = [1, 1, 1, 0, 0, 0, 0]
hiddenstate_indices = [0, 1, 0, 1, 0, 1, 3, 2, 2, 0]
super(NasNetAReductionCell, self).__init__(num_conv_filters, operations,
used_hiddenstates,
hiddenstate_indices,
drop_path_keep_prob,
total_num_cells,
total_training_steps,
use_bounded_activation)
| 20,453 | 37.519774 | 80 | py |
models | models-master/research/slim/nets/nasnet/nasnet_utils_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.nets.nasnet.nasnet_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from nets.nasnet import nasnet_utils
class NasnetUtilsTest(tf.test.TestCase):
def testCalcReductionLayers(self):
num_cells = 18
num_reduction_layers = 2
reduction_layers = nasnet_utils.calc_reduction_layers(
num_cells, num_reduction_layers)
self.assertEqual(len(reduction_layers), 2)
self.assertEqual(reduction_layers[0], 6)
self.assertEqual(reduction_layers[1], 12)
def testGetChannelIndex(self):
data_formats = ['NHWC', 'NCHW']
for data_format in data_formats:
index = nasnet_utils.get_channel_index(data_format)
correct_index = 3 if data_format == 'NHWC' else 1
self.assertEqual(index, correct_index)
def testGetChannelDim(self):
data_formats = ['NHWC', 'NCHW']
shape = [10, 20, 30, 40]
for data_format in data_formats:
dim = nasnet_utils.get_channel_dim(shape, data_format)
correct_dim = shape[3] if data_format == 'NHWC' else shape[1]
self.assertEqual(dim, correct_dim)
def testGlobalAvgPool(self):
data_formats = ['NHWC', 'NCHW']
inputs = tf.placeholder(tf.float32, (5, 10, 20, 10))
for data_format in data_formats:
output = nasnet_utils.global_avg_pool(
inputs, data_format)
self.assertEqual(output.shape, [5, 10])
def test_factorized_reduction(self):
data_format = 'NHWC'
output_shape = (5, 10, 20, 16)
inputs = tf.placeholder(tf.float32, (5, 10, 20, 10))
output = nasnet_utils.factorized_reduction(
inputs, 16, stride=1, data_format=data_format)
self.assertSequenceEqual(output_shape, output.shape.as_list())
if __name__ == '__main__':
tf.test.main()
| 2,508 | 34.842857 | 80 | py |
models | models-master/research/slim/nets/nasnet/pnasnet_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.pnasnet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets.nasnet import pnasnet
class PNASNetTest(tf.test.TestCase):
def testBuildLogitsLargeModel(self):
batch_size = 5
height, width = 331, 331
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
logits, end_points = pnasnet.build_pnasnet_large(inputs, num_classes)
auxlogits = end_points['AuxLogits']
predictions = end_points['Predictions']
self.assertListEqual(auxlogits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(predictions.get_shape().as_list(),
[batch_size, num_classes])
def testBuildLogitsMobileModel(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
logits, end_points = pnasnet.build_pnasnet_mobile(inputs, num_classes)
auxlogits = end_points['AuxLogits']
predictions = end_points['Predictions']
self.assertListEqual(auxlogits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(predictions.get_shape().as_list(),
[batch_size, num_classes])
def testBuildNonExistingLayerLargeModel(self):
"""Tests that the model is built correctly without unnecessary layers."""
inputs = tf.random.uniform((5, 331, 331, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
pnasnet.build_pnasnet_large(inputs, 1000)
vars_names = [x.op.name for x in tf.trainable_variables()]
self.assertIn('cell_stem_0/1x1/weights', vars_names)
self.assertNotIn('cell_stem_1/comb_iter_0/right/1x1/weights', vars_names)
def testBuildNonExistingLayerMobileModel(self):
"""Tests that the model is built correctly without unnecessary layers."""
inputs = tf.random.uniform((5, 224, 224, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
pnasnet.build_pnasnet_mobile(inputs, 1000)
vars_names = [x.op.name for x in tf.trainable_variables()]
self.assertIn('cell_stem_0/1x1/weights', vars_names)
self.assertNotIn('cell_stem_1/comb_iter_0/right/1x1/weights', vars_names)
def testBuildPreLogitsLargeModel(self):
batch_size = 5
height, width = 331, 331
num_classes = None
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
net, end_points = pnasnet.build_pnasnet_large(inputs, num_classes)
self.assertFalse('AuxLogits' in end_points)
self.assertFalse('Predictions' in end_points)
self.assertTrue(net.op.name.startswith('final_layer/Mean'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 4320])
def testBuildPreLogitsMobileModel(self):
batch_size = 5
height, width = 224, 224
num_classes = None
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
net, end_points = pnasnet.build_pnasnet_mobile(inputs, num_classes)
self.assertFalse('AuxLogits' in end_points)
self.assertFalse('Predictions' in end_points)
self.assertTrue(net.op.name.startswith('final_layer/Mean'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 1080])
def testAllEndPointsShapesLargeModel(self):
batch_size = 5
height, width = 331, 331
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
_, end_points = pnasnet.build_pnasnet_large(inputs, num_classes)
endpoints_shapes = {'Stem': [batch_size, 42, 42, 540],
'Cell_0': [batch_size, 42, 42, 1080],
'Cell_1': [batch_size, 42, 42, 1080],
'Cell_2': [batch_size, 42, 42, 1080],
'Cell_3': [batch_size, 42, 42, 1080],
'Cell_4': [batch_size, 21, 21, 2160],
'Cell_5': [batch_size, 21, 21, 2160],
'Cell_6': [batch_size, 21, 21, 2160],
'Cell_7': [batch_size, 21, 21, 2160],
'Cell_8': [batch_size, 11, 11, 4320],
'Cell_9': [batch_size, 11, 11, 4320],
'Cell_10': [batch_size, 11, 11, 4320],
'Cell_11': [batch_size, 11, 11, 4320],
'global_pool': [batch_size, 4320],
# Logits and predictions
'AuxLogits': [batch_size, 1000],
'Predictions': [batch_size, 1000],
'Logits': [batch_size, 1000],
}
self.assertEqual(len(end_points), 17)
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
tf.logging.info('Endpoint name: {}'.format(endpoint_name))
expected_shape = endpoints_shapes[endpoint_name]
self.assertIn(endpoint_name, end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testAllEndPointsShapesMobileModel(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
_, end_points = pnasnet.build_pnasnet_mobile(inputs, num_classes)
endpoints_shapes = {
'Stem': [batch_size, 28, 28, 135],
'Cell_0': [batch_size, 28, 28, 270],
'Cell_1': [batch_size, 28, 28, 270],
'Cell_2': [batch_size, 28, 28, 270],
'Cell_3': [batch_size, 14, 14, 540],
'Cell_4': [batch_size, 14, 14, 540],
'Cell_5': [batch_size, 14, 14, 540],
'Cell_6': [batch_size, 7, 7, 1080],
'Cell_7': [batch_size, 7, 7, 1080],
'Cell_8': [batch_size, 7, 7, 1080],
'global_pool': [batch_size, 1080],
# Logits and predictions
'AuxLogits': [batch_size, num_classes],
'Predictions': [batch_size, num_classes],
'Logits': [batch_size, num_classes],
}
self.assertEqual(len(end_points), 14)
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
tf.logging.info('Endpoint name: {}'.format(endpoint_name))
expected_shape = endpoints_shapes[endpoint_name]
self.assertIn(endpoint_name, end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testNoAuxHeadLargeModel(self):
batch_size = 5
height, width = 331, 331
num_classes = 1000
for use_aux_head in (True, False):
tf.reset_default_graph()
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = pnasnet.large_imagenet_config()
config.set_hparam('use_aux_head', int(use_aux_head))
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
_, end_points = pnasnet.build_pnasnet_large(inputs, num_classes,
config=config)
self.assertEqual('AuxLogits' in end_points, use_aux_head)
def testNoAuxHeadMobileModel(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
for use_aux_head in (True, False):
tf.reset_default_graph()
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = pnasnet.mobile_imagenet_config()
config.set_hparam('use_aux_head', int(use_aux_head))
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
_, end_points = pnasnet.build_pnasnet_mobile(
inputs, num_classes, config=config)
self.assertEqual('AuxLogits' in end_points, use_aux_head)
def testOverrideHParamsLargeModel(self):
batch_size = 5
height, width = 331, 331
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = pnasnet.large_imagenet_config()
config.set_hparam('data_format', 'NCHW')
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
_, end_points = pnasnet.build_pnasnet_large(
inputs, num_classes, config=config)
self.assertListEqual(
end_points['Stem'].shape.as_list(), [batch_size, 540, 42, 42])
def testOverrideHParamsMobileModel(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = pnasnet.mobile_imagenet_config()
config.set_hparam('data_format', 'NCHW')
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
_, end_points = pnasnet.build_pnasnet_mobile(
inputs, num_classes, config=config)
self.assertListEqual(end_points['Stem'].shape.as_list(),
[batch_size, 135, 28, 28])
def testUseBoundedAcitvationMobileModel(self):
batch_size = 1
height, width = 224, 224
num_classes = 1000
for use_bounded_activation in (True, False):
tf.reset_default_graph()
inputs = tf.random.uniform((batch_size, height, width, 3))
config = pnasnet.mobile_imagenet_config()
config.set_hparam('use_bounded_activation', use_bounded_activation)
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
_, _ = pnasnet.build_pnasnet_mobile(
inputs, num_classes, config=config)
for node in tf.get_default_graph().as_graph_def().node:
if node.op.startswith('Relu'):
self.assertEqual(node.op == 'Relu6', use_bounded_activation)
if __name__ == '__main__':
tf.test.main()
| 11,211 | 42.796875 | 80 | py |
models | models-master/research/slim/nets/nasnet/nasnet.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition for the NASNet classification networks.
Paper: https://arxiv.org/abs/1707.07012
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import tensorflow.compat.v1 as tf
import tf_slim as slim
from tensorflow.contrib import training as contrib_training
from nets.nasnet import nasnet_utils
arg_scope = slim.arg_scope
# Notes for training NASNet Cifar Model
# -------------------------------------
# batch_size: 32
# learning rate: 0.025
# cosine (single period) learning rate decay
# auxiliary head loss weighting: 0.4
# clip global norm of all gradients by 5
def cifar_config():
return contrib_training.HParams(
stem_multiplier=3.0,
drop_path_keep_prob=0.6,
num_cells=18,
use_aux_head=1,
num_conv_filters=32,
dense_dropout_keep_prob=1.0,
filter_scaling_rate=2.0,
num_reduction_layers=2,
data_format='NHWC',
skip_reduction_layer_input=0,
# 600 epochs with a batch size of 32
# This is used for the drop path probabilities since it needs to increase
# the drop out probability over the course of training.
total_training_steps=937500,
use_bounded_activation=False,
)
# Notes for training large NASNet model on ImageNet
# -------------------------------------
# batch size (per replica): 16
# learning rate: 0.015 * 100
# learning rate decay factor: 0.97
# num epochs per decay: 2.4
# sync sgd with 100 replicas
# auxiliary head loss weighting: 0.4
# label smoothing: 0.1
# clip global norm of all gradients by 10
def large_imagenet_config():
return contrib_training.HParams(
stem_multiplier=3.0,
dense_dropout_keep_prob=0.5,
num_cells=18,
filter_scaling_rate=2.0,
num_conv_filters=168,
drop_path_keep_prob=0.7,
use_aux_head=1,
num_reduction_layers=2,
data_format='NHWC',
skip_reduction_layer_input=1,
total_training_steps=250000,
use_bounded_activation=False,
)
# Notes for training the mobile NASNet ImageNet model
# -------------------------------------
# batch size (per replica): 32
# learning rate: 0.04 * 50
# learning rate scaling factor: 0.97
# num epochs per decay: 2.4
# sync sgd with 50 replicas
# auxiliary head weighting: 0.4
# label smoothing: 0.1
# clip global norm of all gradients by 10
def mobile_imagenet_config():
return contrib_training.HParams(
stem_multiplier=1.0,
dense_dropout_keep_prob=0.5,
num_cells=12,
filter_scaling_rate=2.0,
drop_path_keep_prob=1.0,
num_conv_filters=44,
use_aux_head=1,
num_reduction_layers=2,
data_format='NHWC',
skip_reduction_layer_input=0,
total_training_steps=250000,
use_bounded_activation=False,
)
def _update_hparams(hparams, is_training):
"""Update hparams for given is_training option."""
if not is_training:
hparams.set_hparam('drop_path_keep_prob', 1.0)
def nasnet_cifar_arg_scope(weight_decay=5e-4,
batch_norm_decay=0.9,
batch_norm_epsilon=1e-5):
"""Defines the default arg scope for the NASNet-A Cifar model.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: Decay for batch norm moving average.
batch_norm_epsilon: Small float added to variance to avoid dividing by zero
in batch norm.
Returns:
An `arg_scope` to use for the NASNet Cifar Model.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': batch_norm_decay,
# epsilon to prevent 0s in variance.
'epsilon': batch_norm_epsilon,
'scale': True,
'fused': True,
}
weights_regularizer = slim.l2_regularizer(weight_decay)
weights_initializer = slim.variance_scaling_initializer(mode='FAN_OUT')
with arg_scope([slim.fully_connected, slim.conv2d, slim.separable_conv2d],
weights_regularizer=weights_regularizer,
weights_initializer=weights_initializer):
with arg_scope([slim.fully_connected],
activation_fn=None, scope='FC'):
with arg_scope([slim.conv2d, slim.separable_conv2d],
activation_fn=None, biases_initializer=None):
with arg_scope([slim.batch_norm], **batch_norm_params) as sc:
return sc
def nasnet_mobile_arg_scope(weight_decay=4e-5,
batch_norm_decay=0.9997,
batch_norm_epsilon=1e-3):
"""Defines the default arg scope for the NASNet-A Mobile ImageNet model.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: Decay for batch norm moving average.
batch_norm_epsilon: Small float added to variance to avoid dividing by zero
in batch norm.
Returns:
An `arg_scope` to use for the NASNet Mobile Model.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': batch_norm_decay,
# epsilon to prevent 0s in variance.
'epsilon': batch_norm_epsilon,
'scale': True,
'fused': True,
}
weights_regularizer = slim.l2_regularizer(weight_decay)
weights_initializer = slim.variance_scaling_initializer(mode='FAN_OUT')
with arg_scope([slim.fully_connected, slim.conv2d, slim.separable_conv2d],
weights_regularizer=weights_regularizer,
weights_initializer=weights_initializer):
with arg_scope([slim.fully_connected],
activation_fn=None, scope='FC'):
with arg_scope([slim.conv2d, slim.separable_conv2d],
activation_fn=None, biases_initializer=None):
with arg_scope([slim.batch_norm], **batch_norm_params) as sc:
return sc
def nasnet_large_arg_scope(weight_decay=5e-5,
batch_norm_decay=0.9997,
batch_norm_epsilon=1e-3):
"""Defines the default arg scope for the NASNet-A Large ImageNet model.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: Decay for batch norm moving average.
batch_norm_epsilon: Small float added to variance to avoid dividing by zero
in batch norm.
Returns:
An `arg_scope` to use for the NASNet Large Model.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': batch_norm_decay,
# epsilon to prevent 0s in variance.
'epsilon': batch_norm_epsilon,
'scale': True,
'fused': True,
}
weights_regularizer = slim.l2_regularizer(weight_decay)
weights_initializer = slim.variance_scaling_initializer(mode='FAN_OUT')
with arg_scope([slim.fully_connected, slim.conv2d, slim.separable_conv2d],
weights_regularizer=weights_regularizer,
weights_initializer=weights_initializer):
with arg_scope([slim.fully_connected],
activation_fn=None, scope='FC'):
with arg_scope([slim.conv2d, slim.separable_conv2d],
activation_fn=None, biases_initializer=None):
with arg_scope([slim.batch_norm], **batch_norm_params) as sc:
return sc
def _build_aux_head(net, end_points, num_classes, hparams, scope):
"""Auxiliary head used for all models across all datasets."""
activation_fn = tf.nn.relu6 if hparams.use_bounded_activation else tf.nn.relu
with tf.variable_scope(scope):
aux_logits = tf.identity(net)
with tf.variable_scope('aux_logits'):
aux_logits = slim.avg_pool2d(
aux_logits, [5, 5], stride=3, padding='VALID')
aux_logits = slim.conv2d(aux_logits, 128, [1, 1], scope='proj')
aux_logits = slim.batch_norm(aux_logits, scope='aux_bn0')
aux_logits = activation_fn(aux_logits)
# Shape of feature map before the final layer.
shape = aux_logits.shape
if hparams.data_format == 'NHWC':
shape = shape[1:3]
else:
shape = shape[2:4]
aux_logits = slim.conv2d(aux_logits, 768, shape, padding='VALID')
aux_logits = slim.batch_norm(aux_logits, scope='aux_bn1')
aux_logits = activation_fn(aux_logits)
aux_logits = slim.flatten(aux_logits)
aux_logits = slim.fully_connected(aux_logits, num_classes)
end_points['AuxLogits'] = aux_logits
def _imagenet_stem(inputs, hparams, stem_cell, current_step=None):
"""Stem used for models trained on ImageNet."""
num_stem_cells = 2
# 149 x 149 x 32
num_stem_filters = int(32 * hparams.stem_multiplier)
net = slim.conv2d(
inputs, num_stem_filters, [3, 3], stride=2, scope='conv0',
padding='VALID')
net = slim.batch_norm(net, scope='conv0_bn')
# Run the reduction cells
cell_outputs = [None, net]
filter_scaling = 1.0 / (hparams.filter_scaling_rate**num_stem_cells)
for cell_num in range(num_stem_cells):
net = stem_cell(
net,
scope='cell_stem_{}'.format(cell_num),
filter_scaling=filter_scaling,
stride=2,
prev_layer=cell_outputs[-2],
cell_num=cell_num,
current_step=current_step)
cell_outputs.append(net)
filter_scaling *= hparams.filter_scaling_rate
return net, cell_outputs
def _cifar_stem(inputs, hparams):
"""Stem used for models trained on Cifar."""
num_stem_filters = int(hparams.num_conv_filters * hparams.stem_multiplier)
net = slim.conv2d(
inputs,
num_stem_filters,
3,
scope='l1_stem_3x3')
net = slim.batch_norm(net, scope='l1_stem_bn')
return net, [None, net]
def build_nasnet_cifar(images, num_classes,
is_training=True,
config=None,
current_step=None):
"""Build NASNet model for the Cifar Dataset."""
hparams = cifar_config() if config is None else copy.deepcopy(config)
_update_hparams(hparams, is_training)
if tf.test.is_gpu_available() and hparams.data_format == 'NHWC':
tf.logging.info(
'A GPU is available on the machine, consider using NCHW '
'data format for increased speed on GPU.')
if hparams.data_format == 'NCHW':
images = tf.transpose(a=images, perm=[0, 3, 1, 2])
# Calculate the total number of cells in the network
# Add 2 for the reduction cells
total_num_cells = hparams.num_cells + 2
normal_cell = nasnet_utils.NasNetANormalCell(
hparams.num_conv_filters, hparams.drop_path_keep_prob,
total_num_cells, hparams.total_training_steps,
hparams.use_bounded_activation)
reduction_cell = nasnet_utils.NasNetAReductionCell(
hparams.num_conv_filters, hparams.drop_path_keep_prob,
total_num_cells, hparams.total_training_steps,
hparams.use_bounded_activation)
with arg_scope([slim.dropout, nasnet_utils.drop_path, slim.batch_norm],
is_training=is_training):
with arg_scope([slim.avg_pool2d,
slim.max_pool2d,
slim.conv2d,
slim.batch_norm,
slim.separable_conv2d,
nasnet_utils.factorized_reduction,
nasnet_utils.global_avg_pool,
nasnet_utils.get_channel_index,
nasnet_utils.get_channel_dim],
data_format=hparams.data_format):
return _build_nasnet_base(images,
normal_cell=normal_cell,
reduction_cell=reduction_cell,
num_classes=num_classes,
hparams=hparams,
is_training=is_training,
stem_type='cifar',
current_step=current_step)
build_nasnet_cifar.default_image_size = 32
def build_nasnet_mobile(images, num_classes,
is_training=True,
final_endpoint=None,
config=None,
current_step=None):
"""Build NASNet Mobile model for the ImageNet Dataset."""
hparams = (mobile_imagenet_config() if config is None
else copy.deepcopy(config))
_update_hparams(hparams, is_training)
if tf.test.is_gpu_available() and hparams.data_format == 'NHWC':
tf.logging.info(
'A GPU is available on the machine, consider using NCHW '
'data format for increased speed on GPU.')
if hparams.data_format == 'NCHW':
images = tf.transpose(a=images, perm=[0, 3, 1, 2])
# Calculate the total number of cells in the network
# Add 2 for the reduction cells
total_num_cells = hparams.num_cells + 2
# If ImageNet, then add an additional two for the stem cells
total_num_cells += 2
normal_cell = nasnet_utils.NasNetANormalCell(
hparams.num_conv_filters, hparams.drop_path_keep_prob,
total_num_cells, hparams.total_training_steps,
hparams.use_bounded_activation)
reduction_cell = nasnet_utils.NasNetAReductionCell(
hparams.num_conv_filters, hparams.drop_path_keep_prob,
total_num_cells, hparams.total_training_steps,
hparams.use_bounded_activation)
with arg_scope([slim.dropout, nasnet_utils.drop_path, slim.batch_norm],
is_training=is_training):
with arg_scope([slim.avg_pool2d,
slim.max_pool2d,
slim.conv2d,
slim.batch_norm,
slim.separable_conv2d,
nasnet_utils.factorized_reduction,
nasnet_utils.global_avg_pool,
nasnet_utils.get_channel_index,
nasnet_utils.get_channel_dim],
data_format=hparams.data_format):
return _build_nasnet_base(images,
normal_cell=normal_cell,
reduction_cell=reduction_cell,
num_classes=num_classes,
hparams=hparams,
is_training=is_training,
stem_type='imagenet',
final_endpoint=final_endpoint,
current_step=current_step)
build_nasnet_mobile.default_image_size = 224
def build_nasnet_large(images, num_classes,
is_training=True,
final_endpoint=None,
config=None,
current_step=None):
"""Build NASNet Large model for the ImageNet Dataset."""
hparams = (large_imagenet_config() if config is None
else copy.deepcopy(config))
_update_hparams(hparams, is_training)
if tf.test.is_gpu_available() and hparams.data_format == 'NHWC':
tf.logging.info(
'A GPU is available on the machine, consider using NCHW '
'data format for increased speed on GPU.')
if hparams.data_format == 'NCHW':
images = tf.transpose(a=images, perm=[0, 3, 1, 2])
# Calculate the total number of cells in the network
# Add 2 for the reduction cells
total_num_cells = hparams.num_cells + 2
# If ImageNet, then add an additional two for the stem cells
total_num_cells += 2
normal_cell = nasnet_utils.NasNetANormalCell(
hparams.num_conv_filters, hparams.drop_path_keep_prob,
total_num_cells, hparams.total_training_steps,
hparams.use_bounded_activation)
reduction_cell = nasnet_utils.NasNetAReductionCell(
hparams.num_conv_filters, hparams.drop_path_keep_prob,
total_num_cells, hparams.total_training_steps,
hparams.use_bounded_activation)
with arg_scope([slim.dropout, nasnet_utils.drop_path, slim.batch_norm],
is_training=is_training):
with arg_scope([slim.avg_pool2d,
slim.max_pool2d,
slim.conv2d,
slim.batch_norm,
slim.separable_conv2d,
nasnet_utils.factorized_reduction,
nasnet_utils.global_avg_pool,
nasnet_utils.get_channel_index,
nasnet_utils.get_channel_dim],
data_format=hparams.data_format):
return _build_nasnet_base(images,
normal_cell=normal_cell,
reduction_cell=reduction_cell,
num_classes=num_classes,
hparams=hparams,
is_training=is_training,
stem_type='imagenet',
final_endpoint=final_endpoint,
current_step=current_step)
build_nasnet_large.default_image_size = 331
def _build_nasnet_base(images,
normal_cell,
reduction_cell,
num_classes,
hparams,
is_training,
stem_type,
final_endpoint=None,
current_step=None):
"""Constructs a NASNet image model."""
end_points = {}
def add_and_check_endpoint(endpoint_name, net):
end_points[endpoint_name] = net
return final_endpoint and (endpoint_name == final_endpoint)
# Find where to place the reduction cells or stride normal cells
reduction_indices = nasnet_utils.calc_reduction_layers(
hparams.num_cells, hparams.num_reduction_layers)
stem_cell = reduction_cell
if stem_type == 'imagenet':
stem = lambda: _imagenet_stem(images, hparams, stem_cell)
elif stem_type == 'cifar':
stem = lambda: _cifar_stem(images, hparams)
else:
raise ValueError('Unknown stem_type: ', stem_type)
net, cell_outputs = stem()
if add_and_check_endpoint('Stem', net): return net, end_points
# Setup for building in the auxiliary head.
aux_head_cell_idxes = []
if len(reduction_indices) >= 2:
aux_head_cell_idxes.append(reduction_indices[1] - 1)
# Run the cells
filter_scaling = 1.0
# true_cell_num accounts for the stem cells
true_cell_num = 2 if stem_type == 'imagenet' else 0
activation_fn = tf.nn.relu6 if hparams.use_bounded_activation else tf.nn.relu
for cell_num in range(hparams.num_cells):
stride = 1
if hparams.skip_reduction_layer_input:
prev_layer = cell_outputs[-2]
if cell_num in reduction_indices:
filter_scaling *= hparams.filter_scaling_rate
net = reduction_cell(
net,
scope='reduction_cell_{}'.format(reduction_indices.index(cell_num)),
filter_scaling=filter_scaling,
stride=2,
prev_layer=cell_outputs[-2],
cell_num=true_cell_num,
current_step=current_step)
if add_and_check_endpoint(
'Reduction_Cell_{}'.format(reduction_indices.index(cell_num)), net):
return net, end_points
true_cell_num += 1
cell_outputs.append(net)
if not hparams.skip_reduction_layer_input:
prev_layer = cell_outputs[-2]
net = normal_cell(
net,
scope='cell_{}'.format(cell_num),
filter_scaling=filter_scaling,
stride=stride,
prev_layer=prev_layer,
cell_num=true_cell_num,
current_step=current_step)
if add_and_check_endpoint('Cell_{}'.format(cell_num), net):
return net, end_points
true_cell_num += 1
if (hparams.use_aux_head and cell_num in aux_head_cell_idxes and
num_classes and is_training):
aux_net = activation_fn(net)
_build_aux_head(aux_net, end_points, num_classes, hparams,
scope='aux_{}'.format(cell_num))
cell_outputs.append(net)
# Final softmax layer
with tf.variable_scope('final_layer'):
net = activation_fn(net)
net = nasnet_utils.global_avg_pool(net)
if add_and_check_endpoint('global_pool', net) or not num_classes:
return net, end_points
net = slim.dropout(net, hparams.dense_dropout_keep_prob, scope='dropout')
logits = slim.fully_connected(net, num_classes)
if add_and_check_endpoint('Logits', logits):
return net, end_points
predictions = tf.nn.softmax(logits, name='predictions')
if add_and_check_endpoint('Predictions', predictions):
return net, end_points
return logits, end_points
| 20,805 | 36.897996 | 80 | py |
models | models-master/research/slim/nets/nasnet/__init__.py | 1 | 0 | 0 | py |
|
models | models-master/research/slim/nets/mobilenet/mobilenet_v2.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Mobilenet V2.
Architecture: https://arxiv.org/abs/1801.04381
The base model gives 72.2% accuracy on ImageNet, with 300MMadds,
3.4 M parameters.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import functools
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets.mobilenet import conv_blocks as ops
from nets.mobilenet import mobilenet as lib
op = lib.op
expand_input = ops.expand_input_by_factor
# pyformat: disable
# Architecture: https://arxiv.org/abs/1801.04381
V2_DEF = dict(
defaults={
# Note: these parameters of batch norm affect the architecture
# that's why they are here and not in training_scope.
(slim.batch_norm,): {'center': True, 'scale': True},
(slim.conv2d, slim.fully_connected, slim.separable_conv2d): {
'normalizer_fn': slim.batch_norm, 'activation_fn': tf.nn.relu6
},
(ops.expanded_conv,): {
'expansion_size': expand_input(6),
'split_expansion': 1,
'normalizer_fn': slim.batch_norm,
'residual': True
},
(slim.conv2d, slim.separable_conv2d): {'padding': 'SAME'}
},
spec=[
op(slim.conv2d, stride=2, num_outputs=32, kernel_size=[3, 3]),
op(ops.expanded_conv,
expansion_size=expand_input(1, divisible_by=1),
num_outputs=16),
op(ops.expanded_conv, stride=2, num_outputs=24),
op(ops.expanded_conv, stride=1, num_outputs=24),
op(ops.expanded_conv, stride=2, num_outputs=32),
op(ops.expanded_conv, stride=1, num_outputs=32),
op(ops.expanded_conv, stride=1, num_outputs=32),
op(ops.expanded_conv, stride=2, num_outputs=64),
op(ops.expanded_conv, stride=1, num_outputs=64),
op(ops.expanded_conv, stride=1, num_outputs=64),
op(ops.expanded_conv, stride=1, num_outputs=64),
op(ops.expanded_conv, stride=1, num_outputs=96),
op(ops.expanded_conv, stride=1, num_outputs=96),
op(ops.expanded_conv, stride=1, num_outputs=96),
op(ops.expanded_conv, stride=2, num_outputs=160),
op(ops.expanded_conv, stride=1, num_outputs=160),
op(ops.expanded_conv, stride=1, num_outputs=160),
op(ops.expanded_conv, stride=1, num_outputs=320),
op(slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=1280)
],
)
# pyformat: enable
# Mobilenet v2 Definition with group normalization.
V2_DEF_GROUP_NORM = copy.deepcopy(V2_DEF)
V2_DEF_GROUP_NORM['defaults'] = {
(slim.conv2d, slim.fully_connected, slim.separable_conv2d): {
'normalizer_fn': slim.group_norm, # pylint: disable=C0330
'activation_fn': tf.nn.relu6, # pylint: disable=C0330
}, # pylint: disable=C0330
(ops.expanded_conv,): {
'expansion_size': ops.expand_input_by_factor(6),
'split_expansion': 1,
'normalizer_fn': slim.group_norm,
'residual': True
},
(slim.conv2d, slim.separable_conv2d): {
'padding': 'SAME'
}
}
@slim.add_arg_scope
def mobilenet(input_tensor,
num_classes=1001,
depth_multiplier=1.0,
scope='MobilenetV2',
conv_defs=None,
finegrain_classification_mode=False,
min_depth=None,
divisible_by=None,
activation_fn=None,
**kwargs):
"""Creates mobilenet V2 network.
Inference mode is created by default. To create training use training_scope
below.
with slim.arg_scope(mobilenet_v2.training_scope()):
logits, endpoints = mobilenet_v2.mobilenet(input_tensor)
Args:
input_tensor: The input tensor
num_classes: number of classes
depth_multiplier: The multiplier applied to scale number of
channels in each layer.
scope: Scope of the operator
conv_defs: Allows to override default conv def.
finegrain_classification_mode: When set to True, the model
will keep the last layer large even for small multipliers. Following
https://arxiv.org/abs/1801.04381
suggests that it improves performance for ImageNet-type of problems.
*Note* ignored if final_endpoint makes the builder exit earlier.
min_depth: If provided, will ensure that all layers will have that
many channels after application of depth multiplier.
divisible_by: If provided will ensure that all layers # channels
will be divisible by this number.
activation_fn: Activation function to use, defaults to tf.nn.relu6 if not
specified.
**kwargs: passed directly to mobilenet.mobilenet:
prediction_fn- what prediction function to use.
reuse-: whether to reuse variables (if reuse set to true, scope
must be given).
Returns:
logits/endpoints pair
Raises:
ValueError: On invalid arguments
"""
if conv_defs is None:
conv_defs = V2_DEF
if 'multiplier' in kwargs:
raise ValueError('mobilenetv2 doesn\'t support generic '
'multiplier parameter use "depth_multiplier" instead.')
if finegrain_classification_mode:
conv_defs = copy.deepcopy(conv_defs)
if depth_multiplier < 1:
conv_defs['spec'][-1].params['num_outputs'] /= depth_multiplier
if activation_fn:
conv_defs = copy.deepcopy(conv_defs)
defaults = conv_defs['defaults']
conv_defaults = (
defaults[(slim.conv2d, slim.fully_connected, slim.separable_conv2d)])
conv_defaults['activation_fn'] = activation_fn
depth_args = {}
# NB: do not set depth_args unless they are provided to avoid overriding
# whatever default depth_multiplier might have thanks to arg_scope.
if min_depth is not None:
depth_args['min_depth'] = min_depth
if divisible_by is not None:
depth_args['divisible_by'] = divisible_by
with slim.arg_scope((lib.depth_multiplier,), **depth_args):
return lib.mobilenet(
input_tensor,
num_classes=num_classes,
conv_defs=conv_defs,
scope=scope,
multiplier=depth_multiplier,
**kwargs)
mobilenet.default_image_size = 224
def wrapped_partial(func, *args, **kwargs):
partial_func = functools.partial(func, *args, **kwargs)
functools.update_wrapper(partial_func, func)
return partial_func
# Wrappers for mobilenet v2 with depth-multipliers. Be noticed that
# 'finegrain_classification_mode' is set to True, which means the embedding
# layer will not be shrinked when given a depth-multiplier < 1.0.
mobilenet_v2_140 = wrapped_partial(mobilenet, depth_multiplier=1.4)
mobilenet_v2_050 = wrapped_partial(mobilenet, depth_multiplier=0.50,
finegrain_classification_mode=True)
mobilenet_v2_035 = wrapped_partial(mobilenet, depth_multiplier=0.35,
finegrain_classification_mode=True)
@slim.add_arg_scope
def mobilenet_base(input_tensor, depth_multiplier=1.0, **kwargs):
"""Creates base of the mobilenet (no pooling and no logits) ."""
return mobilenet(input_tensor,
depth_multiplier=depth_multiplier,
base_only=True, **kwargs)
@slim.add_arg_scope
def mobilenet_base_group_norm(input_tensor, depth_multiplier=1.0, **kwargs):
"""Creates base of the mobilenet (no pooling and no logits) ."""
kwargs['conv_defs'] = V2_DEF_GROUP_NORM
kwargs['conv_defs']['defaults'].update({
(slim.group_norm,): {
'groups': kwargs.pop('groups', 8)
}
})
return mobilenet(
input_tensor, depth_multiplier=depth_multiplier, base_only=True, **kwargs)
def training_scope(**kwargs):
"""Defines MobilenetV2 training scope.
Usage:
with slim.arg_scope(mobilenet_v2.training_scope()):
logits, endpoints = mobilenet_v2.mobilenet(input_tensor)
Args:
**kwargs: Passed to mobilenet.training_scope. The following parameters
are supported:
weight_decay- The weight decay to use for regularizing the model.
stddev- Standard deviation for initialization, if negative uses xavier.
dropout_keep_prob- dropout keep probability
bn_decay- decay for the batch norm moving averages.
Returns:
An `arg_scope` to use for the mobilenet v2 model.
"""
return lib.training_scope(**kwargs)
__all__ = ['training_scope', 'mobilenet_base', 'mobilenet', 'V2_DEF']
| 8,994 | 35.714286 | 80 | py |
models | models-master/research/slim/nets/mobilenet/conv_blocks.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Convolution blocks for mobilenet."""
import contextlib
import functools
import tensorflow.compat.v1 as tf
import tf_slim as slim
def _fixed_padding(inputs, kernel_size, rate=1):
"""Pads the input along the spatial dimensions independently of input size.
Pads the input such that if it was used in a convolution with 'VALID' padding,
the output would have the same dimensions as if the unpadded input was used
in a convolution with 'SAME' padding.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
rate: An integer, rate for atrous convolution.
Returns:
output: A tensor of size [batch, height_out, width_out, channels] with the
input, either intact (if kernel_size == 1) or padded (if kernel_size > 1).
"""
kernel_size_effective = [kernel_size[0] + (kernel_size[0] - 1) * (rate - 1),
kernel_size[0] + (kernel_size[0] - 1) * (rate - 1)]
pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1]
pad_beg = [pad_total[0] // 2, pad_total[1] // 2]
pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]]
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg[0], pad_end[0]],
[pad_beg[1], pad_end[1]], [0, 0]])
return padded_inputs
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def _split_divisible(num, num_ways, divisible_by=8):
"""Evenly splits num, num_ways so each piece is a multiple of divisible_by."""
assert num % divisible_by == 0
assert num / num_ways >= divisible_by
# Note: want to round down, we adjust each split to match the total.
base = num // num_ways // divisible_by * divisible_by
result = []
accumulated = 0
for i in range(num_ways):
r = base
while accumulated + r < num * (i + 1) / num_ways:
r += divisible_by
result.append(r)
accumulated += r
assert accumulated == num
return result
@contextlib.contextmanager
def _v1_compatible_scope_naming(scope):
"""v1 compatible scope naming."""
if scope is None: # Create uniqified separable blocks.
with tf.variable_scope(None, default_name='separable') as s, \
tf.name_scope(s.original_name_scope):
yield ''
else:
# We use scope_depthwise, scope_pointwise for compatibility with V1 ckpts.
# which provide numbered scopes.
scope += '_'
yield scope
@slim.add_arg_scope
def split_separable_conv2d(input_tensor,
num_outputs,
scope=None,
normalizer_fn=None,
stride=1,
rate=1,
endpoints=None,
use_explicit_padding=False):
"""Separable mobilenet V1 style convolution.
Depthwise convolution, with default non-linearity,
followed by 1x1 depthwise convolution. This is similar to
slim.separable_conv2d, but differs in tha it applies batch
normalization and non-linearity to depthwise. This matches
the basic building of Mobilenet Paper
(https://arxiv.org/abs/1704.04861)
Args:
input_tensor: input
num_outputs: number of outputs
scope: optional name of the scope. Note if provided it will use
scope_depthwise for deptwhise, and scope_pointwise for pointwise.
normalizer_fn: which normalizer function to use for depthwise/pointwise
stride: stride
rate: output rate (also known as dilation rate)
endpoints: optional, if provided, will export additional tensors to it.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
Returns:
output tesnor
"""
with _v1_compatible_scope_naming(scope) as scope:
dw_scope = scope + 'depthwise'
endpoints = endpoints if endpoints is not None else {}
kernel_size = [3, 3]
padding = 'SAME'
if use_explicit_padding:
padding = 'VALID'
input_tensor = _fixed_padding(input_tensor, kernel_size, rate)
net = slim.separable_conv2d(
input_tensor,
None,
kernel_size,
depth_multiplier=1,
stride=stride,
rate=rate,
normalizer_fn=normalizer_fn,
padding=padding,
scope=dw_scope)
endpoints[dw_scope] = net
pw_scope = scope + 'pointwise'
net = slim.conv2d(
net,
num_outputs, [1, 1],
stride=1,
normalizer_fn=normalizer_fn,
scope=pw_scope)
endpoints[pw_scope] = net
return net
def expand_input_by_factor(n, divisible_by=8):
return lambda num_inputs, **_: _make_divisible(num_inputs * n, divisible_by)
def split_conv(input_tensor,
num_outputs,
num_ways,
scope,
divisible_by=8,
**kwargs):
"""Creates a split convolution.
Split convolution splits the input and output into
'num_blocks' blocks of approximately the same size each,
and only connects $i$-th input to $i$ output.
Args:
input_tensor: input tensor
num_outputs: number of output filters
num_ways: num blocks to split by.
scope: scope for all the operators.
divisible_by: make sure that every part is divisiable by this.
**kwargs: will be passed directly into conv2d operator
Returns:
tensor
"""
b = input_tensor.get_shape().as_list()[3]
if num_ways == 1 or min(b // num_ways,
num_outputs // num_ways) < divisible_by:
# Don't do any splitting if we end up with less than 8 filters
# on either side.
return slim.conv2d(input_tensor, num_outputs, [1, 1], scope=scope, **kwargs)
outs = []
input_splits = _split_divisible(b, num_ways, divisible_by=divisible_by)
output_splits = _split_divisible(
num_outputs, num_ways, divisible_by=divisible_by)
inputs = tf.split(input_tensor, input_splits, axis=3, name='split_' + scope)
base = scope
for i, (input_tensor, out_size) in enumerate(zip(inputs, output_splits)):
scope = base + '_part_%d' % (i,)
n = slim.conv2d(input_tensor, out_size, [1, 1], scope=scope, **kwargs)
n = tf.identity(n, scope + '_output')
outs.append(n)
return tf.concat(outs, 3, name=scope + '_concat')
@slim.add_arg_scope
def expanded_conv(input_tensor,
num_outputs,
expansion_size=expand_input_by_factor(6),
stride=1,
rate=1,
kernel_size=(3, 3),
residual=True,
normalizer_fn=None,
split_projection=1,
split_expansion=1,
split_divisible_by=8,
expansion_transform=None,
depthwise_location='expansion',
depthwise_channel_multiplier=1,
endpoints=None,
use_explicit_padding=False,
padding='SAME',
inner_activation_fn=None,
depthwise_activation_fn=None,
project_activation_fn=tf.identity,
depthwise_fn=slim.separable_conv2d,
expansion_fn=split_conv,
projection_fn=split_conv,
scope=None):
"""Depthwise Convolution Block with expansion.
Builds a composite convolution that has the following structure
expansion (1x1) -> depthwise (kernel_size) -> projection (1x1)
Args:
input_tensor: input
num_outputs: number of outputs in the final layer.
expansion_size: the size of expansion, could be a constant or a callable.
If latter it will be provided 'num_inputs' as an input. For forward
compatibility it should accept arbitrary keyword arguments.
Default will expand the input by factor of 6.
stride: depthwise stride
rate: depthwise rate
kernel_size: depthwise kernel
residual: whether to include residual connection between input
and output.
normalizer_fn: batchnorm or otherwise
split_projection: how many ways to split projection operator
(that is conv expansion->bottleneck)
split_expansion: how many ways to split expansion op
(that is conv bottleneck->expansion) ops will keep depth divisible
by this value.
split_divisible_by: make sure every split group is divisible by this number.
expansion_transform: Optional function that takes expansion
as a single input and returns output.
depthwise_location: where to put depthwise covnvolutions supported
values None, 'input', 'output', 'expansion'
depthwise_channel_multiplier: depthwise channel multiplier:
each input will replicated (with different filters)
that many times. So if input had c channels,
output will have c x depthwise_channel_multpilier.
endpoints: An optional dictionary into which intermediate endpoints are
placed. The keys "expansion_output", "depthwise_output",
"projection_output" and "expansion_transform" are always populated, even
if the corresponding functions are not invoked.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
padding: Padding type to use if `use_explicit_padding` is not set.
inner_activation_fn: activation function to use in all inner convolutions.
If none, will rely on slim default scopes.
depthwise_activation_fn: activation function to use for deptwhise only.
If not provided will rely on slim default scopes. If both
inner_activation_fn and depthwise_activation_fn are provided,
depthwise_activation_fn takes precedence over inner_activation_fn.
project_activation_fn: activation function for the project layer.
(note this layer is not affected by inner_activation_fn)
depthwise_fn: Depthwise convolution function.
expansion_fn: Expansion convolution function. If use custom function then
"split_expansion" and "split_divisible_by" will be ignored.
projection_fn: Projection convolution function. If use custom function then
"split_projection" and "split_divisible_by" will be ignored.
scope: optional scope.
Returns:
Tensor of depth num_outputs
Raises:
TypeError: on inval
"""
conv_defaults = {}
dw_defaults = {}
if inner_activation_fn is not None:
conv_defaults['activation_fn'] = inner_activation_fn
dw_defaults['activation_fn'] = inner_activation_fn
if depthwise_activation_fn is not None:
dw_defaults['activation_fn'] = depthwise_activation_fn
# pylint: disable=g-backslash-continuation
with tf.variable_scope(scope, default_name='expanded_conv') as s, \
tf.name_scope(s.original_name_scope), \
slim.arg_scope((slim.conv2d,), **conv_defaults), \
slim.arg_scope((slim.separable_conv2d,), **dw_defaults):
prev_depth = input_tensor.get_shape().as_list()[3]
if depthwise_location not in [None, 'input', 'output', 'expansion']:
raise TypeError('%r is unknown value for depthwise_location' %
depthwise_location)
if use_explicit_padding:
if padding != 'SAME':
raise TypeError('`use_explicit_padding` should only be used with '
'"SAME" padding.')
padding = 'VALID'
depthwise_func = functools.partial(
depthwise_fn,
num_outputs=None,
kernel_size=kernel_size,
depth_multiplier=depthwise_channel_multiplier,
stride=stride,
rate=rate,
normalizer_fn=normalizer_fn,
padding=padding,
scope='depthwise')
# b1 -> b2 * r -> b2
# i -> (o * r) (bottleneck) -> o
input_tensor = tf.identity(input_tensor, 'input')
net = input_tensor
if depthwise_location == 'input':
if use_explicit_padding:
net = _fixed_padding(net, kernel_size, rate)
net = depthwise_func(net, activation_fn=None)
net = tf.identity(net, name='depthwise_output')
if endpoints is not None:
endpoints['depthwise_output'] = net
if callable(expansion_size):
inner_size = expansion_size(num_inputs=prev_depth)
else:
inner_size = expansion_size
if inner_size > net.shape[3]:
if expansion_fn == split_conv:
expansion_fn = functools.partial(
expansion_fn,
num_ways=split_expansion,
divisible_by=split_divisible_by,
stride=1)
net = expansion_fn(
net,
inner_size,
scope='expand',
normalizer_fn=normalizer_fn)
net = tf.identity(net, 'expansion_output')
if endpoints is not None:
endpoints['expansion_output'] = net
if depthwise_location == 'expansion':
if use_explicit_padding:
net = _fixed_padding(net, kernel_size, rate)
net = depthwise_func(net)
net = tf.identity(net, name='depthwise_output')
if endpoints is not None:
endpoints['depthwise_output'] = net
if expansion_transform:
net = expansion_transform(expansion_tensor=net, input_tensor=input_tensor)
# Note in contrast with expansion, we always have
# projection to produce the desired output size.
if projection_fn == split_conv:
projection_fn = functools.partial(
projection_fn,
num_ways=split_projection,
divisible_by=split_divisible_by,
stride=1)
net = projection_fn(
net,
num_outputs,
scope='project',
normalizer_fn=normalizer_fn,
activation_fn=project_activation_fn)
if endpoints is not None:
endpoints['projection_output'] = net
if depthwise_location == 'output':
if use_explicit_padding:
net = _fixed_padding(net, kernel_size, rate)
net = depthwise_func(net, activation_fn=None)
net = tf.identity(net, name='depthwise_output')
if endpoints is not None:
endpoints['depthwise_output'] = net
if callable(residual): # custom residual
net = residual(input_tensor=input_tensor, output_tensor=net)
elif (residual and
# stride check enforces that we don't add residuals when spatial
# dimensions are None
stride == 1 and
# Depth matches
net.get_shape().as_list()[3] ==
input_tensor.get_shape().as_list()[3]):
net += input_tensor
return tf.identity(net, name='output')
@slim.add_arg_scope
def squeeze_excite(input_tensor,
divisible_by=8,
squeeze_factor=3,
inner_activation_fn=tf.nn.relu,
gating_fn=tf.sigmoid,
squeeze_input_tensor=None,
pool=None):
"""Squeeze excite block for Mobilenet V3.
If the squeeze_input_tensor - or the input_tensor if squeeze_input_tensor is
None - contains variable dimensions (Nonetype in tensor shape), perform
average pooling (as the first step in the squeeze operation) by calling
reduce_mean across the H/W of the input tensor.
Args:
input_tensor: input tensor to apply SE block to.
divisible_by: ensures all inner dimensions are divisible by this number.
squeeze_factor: the factor of squeezing in the inner fully connected layer
inner_activation_fn: non-linearity to be used in inner layer.
gating_fn: non-linearity to be used for final gating function
squeeze_input_tensor: custom tensor to use for computing gating activation.
If provided the result will be input_tensor * SE(squeeze_input_tensor)
instead of input_tensor * SE(input_tensor).
pool: if number is provided will average pool with that kernel size
to compute inner tensor, followed by bilinear upsampling.
Returns:
Gated input_tensor. (e.g. X * SE(X))
"""
with tf.variable_scope('squeeze_excite'):
if squeeze_input_tensor is None:
squeeze_input_tensor = input_tensor
input_size = input_tensor.shape.as_list()[1:3]
pool_height, pool_width = squeeze_input_tensor.shape.as_list()[1:3]
stride = 1
if pool is not None and pool_height >= pool:
pool_height, pool_width, stride = pool, pool, pool
input_channels = squeeze_input_tensor.shape.as_list()[3]
output_channels = input_tensor.shape.as_list()[3]
squeeze_channels = _make_divisible(
input_channels / squeeze_factor, divisor=divisible_by)
if pool is None:
pooled = tf.reduce_mean(squeeze_input_tensor, axis=[1, 2], keepdims=True)
else:
pooled = tf.nn.avg_pool(
squeeze_input_tensor, (1, pool_height, pool_width, 1),
strides=(1, stride, stride, 1),
padding='VALID')
squeeze = slim.conv2d(
pooled,
kernel_size=(1, 1),
num_outputs=squeeze_channels,
normalizer_fn=None,
activation_fn=inner_activation_fn)
excite_outputs = output_channels
excite = slim.conv2d(squeeze, num_outputs=excite_outputs,
kernel_size=[1, 1],
normalizer_fn=None,
activation_fn=gating_fn)
if pool is not None:
# Note: As of 03/20/2019 only BILINEAR (the default) with
# align_corners=True has gradients implemented in TPU.
excite = tf.image.resize_images(
excite, input_size,
align_corners=True)
result = input_tensor * excite
return result
| 18,334 | 37.681435 | 80 | py |
models | models-master/research/slim/nets/mobilenet/mobilenet_v2_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for mobilenet_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from six.moves import range
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets.mobilenet import conv_blocks as ops
from nets.mobilenet import mobilenet
from nets.mobilenet import mobilenet_v2
def find_ops(optype):
"""Find ops of a given type in graphdef or a graph.
Args:
optype: operation type (e.g. Conv2D)
Returns:
List of operations.
"""
gd = tf.get_default_graph()
return [var for var in gd.get_operations() if var.type == optype]
class MobilenetV2Test(tf.test.TestCase):
def testCreation(self):
spec = dict(mobilenet_v2.V2_DEF)
_, ep = mobilenet.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=spec)
num_convs = len(find_ops('Conv2D'))
# This is mostly a sanity test. No deep reason for these particular
# constants.
#
# All but first 2 and last one have two convolutions, and there is one
# extra conv that is not in the spec. (logits)
self.assertEqual(num_convs, len(spec['spec']) * 2 - 2)
# Check that depthwise are exposed.
for i in range(2, 17):
self.assertIn('layer_%d/depthwise_output' % i, ep)
def testCreationNoClasses(self):
spec = copy.deepcopy(mobilenet_v2.V2_DEF)
net, ep = mobilenet.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=spec,
num_classes=None)
self.assertIs(net, ep['global_pool'])
def testImageSizes(self):
for input_size, output_size in [(224, 7), (192, 6), (160, 5),
(128, 4), (96, 3)]:
tf.reset_default_graph()
_, ep = mobilenet_v2.mobilenet(
tf.placeholder(tf.float32, (10, input_size, input_size, 3)))
self.assertEqual(ep['layer_18/output'].get_shape().as_list()[1:3],
[output_size] * 2)
def testWithSplits(self):
spec = copy.deepcopy(mobilenet_v2.V2_DEF)
spec['overrides'] = {
(ops.expanded_conv,): dict(split_expansion=2),
}
_, _ = mobilenet.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=spec)
num_convs = len(find_ops('Conv2D'))
# All but 3 op has 3 conv operatore, the remainign 3 have one
# and there is one unaccounted.
self.assertEqual(num_convs, len(spec['spec']) * 3 - 5)
def testWithOutputStride8(self):
out, _ = mobilenet.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
output_stride=8,
scope='MobilenetV2')
self.assertEqual(out.get_shape().as_list()[1:3], [28, 28])
def testDivisibleBy(self):
tf.reset_default_graph()
mobilenet_v2.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
divisible_by=16,
min_depth=32)
s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]
s = set(s)
self.assertSameElements([32, 64, 96, 160, 192, 320, 384, 576, 960, 1280,
1001], s)
def testDivisibleByWithArgScope(self):
tf.reset_default_graph()
# Verifies that depth_multiplier arg scope actually works
# if no default min_depth is provided.
with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32):
mobilenet_v2.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 2)),
conv_defs=mobilenet_v2.V2_DEF,
depth_multiplier=0.1)
s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]
s = set(s)
self.assertSameElements(s, [32, 192, 128, 1001])
def testFineGrained(self):
tf.reset_default_graph()
# Verifies that depth_multiplier arg scope actually works
# if no default min_depth is provided.
mobilenet_v2.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 2)),
conv_defs=mobilenet_v2.V2_DEF,
depth_multiplier=0.01,
finegrain_classification_mode=True)
s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]
s = set(s)
# All convolutions will be 8->48, except for the last one.
self.assertSameElements(s, [8, 48, 1001, 1280])
def testMobilenetBase(self):
tf.reset_default_graph()
# Verifies that mobilenet_base returns pre-pooling layer.
with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32):
net, _ = mobilenet_v2.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
depth_multiplier=0.1)
self.assertEqual(net.get_shape().as_list(), [10, 7, 7, 128])
def testWithOutputStride16(self):
tf.reset_default_graph()
out, _ = mobilenet.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
output_stride=16)
self.assertEqual(out.get_shape().as_list()[1:3], [14, 14])
def testMultiplier(self):
op = mobilenet.op
new_def = copy.deepcopy(mobilenet_v2.V2_DEF)
def inverse_multiplier(output_params, multiplier):
output_params['num_outputs'] = int(
output_params['num_outputs'] / multiplier)
new_def['spec'][0] = op(
slim.conv2d,
kernel_size=(3, 3),
multiplier_func=inverse_multiplier,
num_outputs=16)
_ = mobilenet_v2.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=new_def,
depth_multiplier=0.1)
s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]
# Expect first layer to be 160 (16 / 0.1), and other layers
# their max(original size * 0.1, 8)
self.assertEqual([160, 8, 48, 8, 48], s[:5])
def testWithOutputStride8AndExplicitPadding(self):
tf.reset_default_graph()
out, _ = mobilenet.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
output_stride=8,
use_explicit_padding=True,
scope='MobilenetV2')
self.assertEqual(out.get_shape().as_list()[1:3], [28, 28])
def testWithOutputStride16AndExplicitPadding(self):
tf.reset_default_graph()
out, _ = mobilenet.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
output_stride=16,
use_explicit_padding=True)
self.assertEqual(out.get_shape().as_list()[1:3], [14, 14])
def testBatchNormScopeDoesNotHaveIsTrainingWhenItsSetToNone(self):
sc = mobilenet.training_scope(is_training=None)
self.assertNotIn('is_training', sc[slim.arg_scope_func_key(
slim.batch_norm)])
def testBatchNormScopeDoesHasIsTrainingWhenItsNotNone(self):
sc = mobilenet.training_scope(is_training=False)
self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])
sc = mobilenet.training_scope(is_training=True)
self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])
sc = mobilenet.training_scope()
self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])
if __name__ == '__main__':
tf.test.main()
| 7,902 | 35.929907 | 80 | py |
models | models-master/research/slim/nets/mobilenet/mobilenet_v3.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Mobilenet V3 conv defs and helper functions.
# pylint: disable=line-too-long
Model definitions and layer breakdowns:
==================
==== V3 LARGE ====
==================
Conv2D MobilenetV3/Conv/Conv2D 351.2 k 1x224x224x3 432.0 5.42 M 1x112x112x16
Relu6 MobilenetV3/Conv/hard_swish/Relu6 ? - ? ? 1x112x112x16
DepthConv MobilenetV3/expanded_conv/depthwise/depthwise 401.4 k - 144.0 1.81 M 1x112x112x16
Relu MobilenetV3/expanded_conv/depthwise/Relu ? - ? ? 1x112x112x16
Conv2D MobilenetV3/expanded_conv/project/Conv2D 401.4 k 1x112x112x16 256.0 3.21 M 1x112x112x16
Conv2D MobilenetV3/expanded_conv_1/expand/Conv2D 1.00 M 1x112x112x16 1.02 k 12.8 M 1x112x112x64
Relu MobilenetV3/expanded_conv_1/expand/Relu ? - ? ? 1x112x112x64
DepthConv MobilenetV3/expanded_conv_1/depthwise/depthwise 1.00 M - 576.0 1.81 M 1x56x56x64
Relu MobilenetV3/expanded_conv_1/depthwise/Relu ? - ? ? 1x56x56x64
Conv2D MobilenetV3/expanded_conv_1/project/Conv2D 276.0 k 1x56x56x64 1.54 k 4.82 M 1x56x56x24
Conv2D MobilenetV3/expanded_conv_2/expand/Conv2D 301.1 k 1x56x56x24 1.73 k 5.42 M 1x56x56x72
Relu MobilenetV3/expanded_conv_2/expand/Relu ? - ? ? 1x56x56x72
DepthConv MobilenetV3/expanded_conv_2/depthwise/depthwise 451.6 k - 648.0 2.03 M 1x56x56x72
Relu MobilenetV3/expanded_conv_2/depthwise/Relu ? - ? ? 1x56x56x72
Conv2D MobilenetV3/expanded_conv_2/project/Conv2D 301.1 k 1x56x56x72 1.73 k 5.42 M 1x56x56x24
Conv2D MobilenetV3/expanded_conv_3/expand/Conv2D 301.1 k 1x56x56x24 1.73 k 5.42 M 1x56x56x72
Relu MobilenetV3/expanded_conv_3/expand/Relu ? - ? ? 1x56x56x72
DepthConv MobilenetV3/expanded_conv_3/depthwise/depthwise 282.2 k - 1.80 k 1.41 M 1x28x28x72
Relu MobilenetV3/expanded_conv_3/depthwise/Relu ? - ? ? 1x28x28x72
Conv2D MobilenetV3/expanded_conv_3/squeeze_excite/Conv/Conv2D 96.0 1x1x1x72 1.73 k 1.73 k 1x1x1x24
Relu MobilenetV3/expanded_conv_3/squeeze_excite/Conv/Relu ? - ? ? 1x1x1x24
Conv2D MobilenetV3/expanded_conv_3/squeeze_excite/Conv_1/Conv2D 96.0 1x1x1x24 1.73 k 1.73 k 1x1x1x72
Relu6 MobilenetV3/expanded_conv_3/squeeze_excite/Conv_1/Relu6 ? - ? ? 1x1x1x72
Conv2D MobilenetV3/expanded_conv_3/project/Conv2D 87.8 k 1x28x28x72 2.88 k 2.26 M 1x28x28x40
Conv2D MobilenetV3/expanded_conv_4/expand/Conv2D 125.4 k 1x28x28x40 4.80 k 3.76 M 1x28x28x120
Relu MobilenetV3/expanded_conv_4/expand/Relu ? - ? ? 1x28x28x120
DepthConv MobilenetV3/expanded_conv_4/depthwise/depthwise 188.2 k - 3.00 k 2.35 M 1x28x28x120
Relu MobilenetV3/expanded_conv_4/depthwise/Relu ? - ? ? 1x28x28x120
Conv2D MobilenetV3/expanded_conv_4/squeeze_excite/Conv/Conv2D 152.0 1x1x1x120 3.84 k 3.84 k 1x1x1x32
Relu MobilenetV3/expanded_conv_4/squeeze_excite/Conv/Relu ? - ? ? 1x1x1x32
Conv2D MobilenetV3/expanded_conv_4/squeeze_excite/Conv_1/Conv2D 152.0 1x1x1x32 3.84 k 3.84 k 1x1x1x120
Relu6 MobilenetV3/expanded_conv_4/squeeze_excite/Conv_1/Relu6 ? - ? ? 1x1x1x120
Conv2D MobilenetV3/expanded_conv_4/project/Conv2D 125.4 k 1x28x28x120 4.80 k 3.76 M 1x28x28x40
Conv2D MobilenetV3/expanded_conv_5/expand/Conv2D 125.4 k 1x28x28x40 4.80 k 3.76 M 1x28x28x120
Relu MobilenetV3/expanded_conv_5/expand/Relu ? - ? ? 1x28x28x120
DepthConv MobilenetV3/expanded_conv_5/depthwise/depthwise 188.2 k - 3.00 k 2.35 M 1x28x28x120
Relu MobilenetV3/expanded_conv_5/depthwise/Relu ? - ? ? 1x28x28x120
Conv2D MobilenetV3/expanded_conv_5/squeeze_excite/Conv/Conv2D 152.0 1x1x1x120 3.84 k 3.84 k 1x1x1x32
Relu MobilenetV3/expanded_conv_5/squeeze_excite/Conv/Relu ? - ? ? 1x1x1x32
Conv2D MobilenetV3/expanded_conv_5/squeeze_excite/Conv_1/Conv2D 152.0 1x1x1x32 3.84 k 3.84 k 1x1x1x120
Relu6 MobilenetV3/expanded_conv_5/squeeze_excite/Conv_1/Relu6 ? - ? ? 1x1x1x120
Conv2D MobilenetV3/expanded_conv_5/project/Conv2D 125.4 k 1x28x28x120 4.80 k 3.76 M 1x28x28x40
Conv2D MobilenetV3/expanded_conv_6/expand/Conv2D 219.5 k 1x28x28x40 9.60 k 7.53 M 1x28x28x240
Relu6 MobilenetV3/expanded_conv_6/expand/hard_swish/Relu6 ? - ? ? 1x28x28x240
DepthConv MobilenetV3/expanded_conv_6/depthwise/depthwise 235.2 k - 2.16 k 423.4 k 1x14x14x240
Relu6 MobilenetV3/expanded_conv_6/depthwise/hard_swish/Relu6 ? - ? ? 1x14x14x240
Conv2D MobilenetV3/expanded_conv_6/project/Conv2D 62.7 k 1x14x14x240 19.2 k 3.76 M 1x14x14x80
Conv2D MobilenetV3/expanded_conv_7/expand/Conv2D 54.9 k 1x14x14x80 16.0 k 3.14 M 1x14x14x200
Relu6 MobilenetV3/expanded_conv_7/expand/hard_swish/Relu6 ? - ? ? 1x14x14x200
DepthConv MobilenetV3/expanded_conv_7/depthwise/depthwise 78.4 k - 1.80 k 352.8 k 1x14x14x200
Relu6 MobilenetV3/expanded_conv_7/depthwise/hard_swish/Relu6 ? - ? ? 1x14x14x200
Conv2D MobilenetV3/expanded_conv_7/project/Conv2D 54.9 k 1x14x14x200 16.0 k 3.14 M 1x14x14x80
Conv2D MobilenetV3/expanded_conv_8/expand/Conv2D 51.7 k 1x14x14x80 14.7 k 2.89 M 1x14x14x184
Relu6 MobilenetV3/expanded_conv_8/expand/hard_swish/Relu6 ? - ? ? 1x14x14x184
DepthConv MobilenetV3/expanded_conv_8/depthwise/depthwise 72.1 k - 1.66 k 324.6 k 1x14x14x184
Relu6 MobilenetV3/expanded_conv_8/depthwise/hard_swish/Relu6 ? - ? ? 1x14x14x184
Conv2D MobilenetV3/expanded_conv_8/project/Conv2D 51.7 k 1x14x14x184 14.7 k 2.89 M 1x14x14x80
Conv2D MobilenetV3/expanded_conv_9/expand/Conv2D 51.7 k 1x14x14x80 14.7 k 2.89 M 1x14x14x184
Relu6 MobilenetV3/expanded_conv_9/expand/hard_swish/Relu6 ? - ? ? 1x14x14x184
DepthConv MobilenetV3/expanded_conv_9/depthwise/depthwise 72.1 k - 1.66 k 324.6 k 1x14x14x184
Relu6 MobilenetV3/expanded_conv_9/depthwise/hard_swish/Relu6 ? - ? ? 1x14x14x184
Conv2D MobilenetV3/expanded_conv_9/project/Conv2D 51.7 k 1x14x14x184 14.7 k 2.89 M 1x14x14x80
Conv2D MobilenetV3/expanded_conv_10/expand/Conv2D 109.8 k 1x14x14x80 38.4 k 7.53 M 1x14x14x480
Relu6 MobilenetV3/expanded_conv_10/expand/hard_swish/Relu6 ? - ? ? 1x14x14x480
DepthConv MobilenetV3/expanded_conv_10/depthwise/depthwise 188.2 k - 4.32 k 846.7 k 1x14x14x480
Relu6 MobilenetV3/expanded_conv_10/depthwise/hard_swish/Relu6 ? - ? ? 1x14x14x480
Conv2D MobilenetV3/expanded_conv_10/squeeze_excite/Conv/Conv2D 600.0 1x1x1x480 57.6 k 57.6 k 1x1x1x120
Relu MobilenetV3/expanded_conv_10/squeeze_excite/Conv/Relu ? - ? ? 1x1x1x120
Conv2D MobilenetV3/expanded_conv_10/squeeze_excite/Conv_1/Conv2D 600.0 1x1x1x120 57.6 k 57.6 k 1x1x1x480
Relu6 MobilenetV3/expanded_conv_10/squeeze_excite/Conv_1/Relu6 ? - ? ? 1x1x1x480
Conv2D MobilenetV3/expanded_conv_10/project/Conv2D 116.0 k 1x14x14x480 53.8 k 10.5 M 1x14x14x112
Conv2D MobilenetV3/expanded_conv_11/expand/Conv2D 153.7 k 1x14x14x112 75.3 k 14.8 M 1x14x14x672
Relu6 MobilenetV3/expanded_conv_11/expand/hard_swish/Relu6 ? - ? ? 1x14x14x672
DepthConv MobilenetV3/expanded_conv_11/depthwise/depthwise 263.4 k - 6.05 k 1.19 M 1x14x14x672
Relu6 MobilenetV3/expanded_conv_11/depthwise/hard_swish/Relu6 ? - ? ? 1x14x14x672
Conv2D MobilenetV3/expanded_conv_11/squeeze_excite/Conv/Conv2D 840.0 1x1x1x672 112.9 k 112.9 k 1x1x1x168
Relu MobilenetV3/expanded_conv_11/squeeze_excite/Conv/Relu ? - ? ? 1x1x1x168
Conv2D MobilenetV3/expanded_conv_11/squeeze_excite/Conv_1/Conv2D 840.0 1x1x1x168 112.9 k 112.9 k 1x1x1x672
Relu6 MobilenetV3/expanded_conv_11/squeeze_excite/Conv_1/Relu6 ? - ? ? 1x1x1x672
Conv2D MobilenetV3/expanded_conv_11/project/Conv2D 153.7 k 1x14x14x672 75.3 k 14.8 M 1x14x14x112
Conv2D MobilenetV3/expanded_conv_12/expand/Conv2D 153.7 k 1x14x14x112 75.3 k 14.8 M 1x14x14x672
Relu6 MobilenetV3/expanded_conv_12/expand/hard_swish/Relu6 ? - ? ? 1x14x14x672
DepthConv MobilenetV3/expanded_conv_12/depthwise/depthwise 164.6 k - 16.8 k 823.2 k 1x7x7x672
Relu6 MobilenetV3/expanded_conv_12/depthwise/hard_swish/Relu6 ? - ? ? 1x7x7x672
Conv2D MobilenetV3/expanded_conv_12/squeeze_excite/Conv/Conv2D 840.0 1x1x1x672 112.9 k 112.9 k 1x1x1x168
Relu MobilenetV3/expanded_conv_12/squeeze_excite/Conv/Relu ? - ? ? 1x1x1x168
Conv2D MobilenetV3/expanded_conv_12/squeeze_excite/Conv_1/Conv2D 840.0 1x1x1x168 112.9 k 112.9 k 1x1x1x672
Relu6 MobilenetV3/expanded_conv_12/squeeze_excite/Conv_1/Relu6 ? - ? ? 1x1x1x672
Conv2D MobilenetV3/expanded_conv_12/project/Conv2D 40.8 k 1x7x7x672 107.5 k 5.27 M 1x7x7x160
Conv2D MobilenetV3/expanded_conv_13/expand/Conv2D 54.9 k 1x7x7x160 153.6 k 7.53 M 1x7x7x960
Relu6 MobilenetV3/expanded_conv_13/expand/hard_swish/Relu6 ? - ? ? 1x7x7x960
DepthConv MobilenetV3/expanded_conv_13/depthwise/depthwise 94.1 k - 24.0 k 1.18 M 1x7x7x960
Relu6 MobilenetV3/expanded_conv_13/depthwise/hard_swish/Relu6 ? - ? ? 1x7x7x960
Conv2D MobilenetV3/expanded_conv_13/squeeze_excite/Conv/Conv2D 1.20 k 1x1x1x960 230.4 k 230.4 k 1x1x1x240
Relu MobilenetV3/expanded_conv_13/squeeze_excite/Conv/Relu ? - ? ? 1x1x1x240
Conv2D MobilenetV3/expanded_conv_13/squeeze_excite/Conv_1/Conv2D 1.20 k 1x1x1x240 230.4 k 230.4 k 1x1x1x960
Relu6 MobilenetV3/expanded_conv_13/squeeze_excite/Conv_1/Relu6 ? - ? ? 1x1x1x960
Conv2D MobilenetV3/expanded_conv_13/project/Conv2D 54.9 k 1x7x7x960 153.6 k 7.53 M 1x7x7x160
Conv2D MobilenetV3/expanded_conv_14/expand/Conv2D 54.9 k 1x7x7x160 153.6 k 7.53 M 1x7x7x960
Relu6 MobilenetV3/expanded_conv_14/expand/hard_swish/Relu6 ? - ? ? 1x7x7x960
DepthConv MobilenetV3/expanded_conv_14/depthwise/depthwise 94.1 k - 24.0 k 1.18 M 1x7x7x960
Relu6 MobilenetV3/expanded_conv_14/depthwise/hard_swish/Relu6 ? - ? ? 1x7x7x960
Conv2D MobilenetV3/expanded_conv_14/squeeze_excite/Conv/Conv2D 1.20 k 1x1x1x960 230.4 k 230.4 k 1x1x1x240
Relu MobilenetV3/expanded_conv_14/squeeze_excite/Conv/Relu ? - ? ? 1x1x1x240
Conv2D MobilenetV3/expanded_conv_14/squeeze_excite/Conv_1/Conv2D 1.20 k 1x1x1x240 230.4 k 230.4 k 1x1x1x960
Relu6 MobilenetV3/expanded_conv_14/squeeze_excite/Conv_1/Relu6 ? - ? ? 1x1x1x960
Conv2D MobilenetV3/expanded_conv_14/project/Conv2D 54.9 k 1x7x7x960 153.6 k 7.53 M 1x7x7x160
Conv2D MobilenetV3/Conv_1/Conv2D 54.9 k 1x7x7x160 153.6 k 7.53 M 1x7x7x960
Relu6 MobilenetV3/Conv_1/hard_swish/Relu6 ? - ? ? 1x7x7x960
AvgPool MobilenetV3/AvgPool2D/AvgPool ? 1x7x7x960 ? 47.0 k 1x1x1x960
Conv2D MobilenetV3/Conv_2/Conv2D 2.24 k 1x1x1x960 1.23 M 1.23 M 1x1x1x1280
Relu6 MobilenetV3/Conv_2/hard_swish/Relu6 ? - ? ? 1x1x1x1280
Conv2D MobilenetV3/Logits/Conv2d_1c_1x1/Conv2D 2.28 k 1x1x1x1280 1.28 M 1.28 M 1x1x1x1001
-----
==================
==== V3 SMALL ====
==================
op name ActMem ConvInput ConvParameters Madds OutputTensor
Conv2D MobilenetV3/Conv/Conv2D 351.2 k 1x224x224x3 432.0 5.42 M 1x112x112x16
Relu6 MobilenetV3/Conv/hard_swish/Relu6 ? - ? ? 1x112x112x16
DepthConv MobilenetV3/expanded_conv/depthwise/depthwise 250.9 k - 144.0 451.6 k 1x56x56x16
Relu MobilenetV3/expanded_conv/depthwise/Relu ? - ? ? 1x56x56x16
Conv2D MobilenetV3/expanded_conv/squeeze_excite/Conv/Conv2D 24.0 1x1x1x16 128.0 128.0 1x1x1x8
Relu MobilenetV3/expanded_conv/squeeze_excite/Conv/Relu ? - ? ? 1x1x1x8
Conv2D MobilenetV3/expanded_conv/squeeze_excite/Conv_1/Conv2D 24.0 1x1x1x8 128.0 128.0 1x1x1x16
Relu6 MobilenetV3/expanded_conv/squeeze_excite/Conv_1/Relu6 ? - ? ? 1x1x1x16
Conv2D MobilenetV3/expanded_conv/project/Conv2D 100.4 k 1x56x56x16 256.0 802.8 k 1x56x56x16
Conv2D MobilenetV3/expanded_conv_1/expand/Conv2D 276.0 k 1x56x56x16 1.15 k 3.61 M 1x56x56x72
Relu MobilenetV3/expanded_conv_1/expand/Relu ? - ? ? 1x56x56x72
DepthConv MobilenetV3/expanded_conv_1/depthwise/depthwise 282.2 k - 648.0 508.0 k 1x28x28x72
Relu MobilenetV3/expanded_conv_1/depthwise/Relu ? - ? ? 1x28x28x72
Conv2D MobilenetV3/expanded_conv_1/project/Conv2D 75.3 k 1x28x28x72 1.73 k 1.35 M 1x28x28x24
Conv2D MobilenetV3/expanded_conv_2/expand/Conv2D 87.8 k 1x28x28x24 2.11 k 1.66 M 1x28x28x88
Relu MobilenetV3/expanded_conv_2/expand/Relu ? - ? ? 1x28x28x88
DepthConv MobilenetV3/expanded_conv_2/depthwise/depthwise 138.0 k - 792.0 620.9 k 1x28x28x88
Relu MobilenetV3/expanded_conv_2/depthwise/Relu ? - ? ? 1x28x28x88
Conv2D MobilenetV3/expanded_conv_2/project/Conv2D 87.8 k 1x28x28x88 2.11 k 1.66 M 1x28x28x24
Conv2D MobilenetV3/expanded_conv_3/expand/Conv2D 94.1 k 1x28x28x24 2.30 k 1.81 M 1x28x28x96
Relu6 MobilenetV3/expanded_conv_3/expand/hard_swish/Relu6 ? - ? ? 1x28x28x96
DepthConv MobilenetV3/expanded_conv_3/depthwise/depthwise 94.1 k - 2.40 k 470.4 k 1x14x14x96
Relu6 MobilenetV3/expanded_conv_3/depthwise/hard_swish/Relu6 ? - ? ? 1x14x14x96
Conv2D MobilenetV3/expanded_conv_3/squeeze_excite/Conv/Conv2D 120.0 1x1x1x96 2.30 k 2.30 k 1x1x1x24
Relu MobilenetV3/expanded_conv_3/squeeze_excite/Conv/Relu ? - ? ? 1x1x1x24
Conv2D MobilenetV3/expanded_conv_3/squeeze_excite/Conv_1/Conv2D 120.0 1x1x1x24 2.30 k 2.30 k 1x1x1x96
Relu6 MobilenetV3/expanded_conv_3/squeeze_excite/Conv_1/Relu6 ? - ? ? 1x1x1x96
Conv2D MobilenetV3/expanded_conv_3/project/Conv2D 26.7 k 1x14x14x96 3.84 k 752.6 k 1x14x14x40
Conv2D MobilenetV3/expanded_conv_4/expand/Conv2D 54.9 k 1x14x14x40 9.60 k 1.88 M 1x14x14x240
Relu6 MobilenetV3/expanded_conv_4/expand/hard_swish/Relu6 ? - ? ? 1x14x14x240
DepthConv MobilenetV3/expanded_conv_4/depthwise/depthwise 94.1 k - 6.00 k 1.18 M 1x14x14x240
Relu6 MobilenetV3/expanded_conv_4/depthwise/hard_swish/Relu6 ? - ? ? 1x14x14x240
Conv2D MobilenetV3/expanded_conv_4/squeeze_excite/Conv/Conv2D 304.0 1x1x1x240 15.4 k 15.4 k 1x1x1x64
Relu MobilenetV3/expanded_conv_4/squeeze_excite/Conv/Relu ? - ? ? 1x1x1x64
Conv2D MobilenetV3/expanded_conv_4/squeeze_excite/Conv_1/Conv2D 304.0 1x1x1x64 15.4 k 15.4 k 1x1x1x240
Relu6 MobilenetV3/expanded_conv_4/squeeze_excite/Conv_1/Relu6 ? - ? ? 1x1x1x240
Conv2D MobilenetV3/expanded_conv_4/project/Conv2D 54.9 k 1x14x14x240 9.60 k 1.88 M 1x14x14x40
Conv2D MobilenetV3/expanded_conv_5/expand/Conv2D 54.9 k 1x14x14x40 9.60 k 1.88 M 1x14x14x240
Relu6 MobilenetV3/expanded_conv_5/expand/hard_swish/Relu6 ? - ? ? 1x14x14x240
DepthConv MobilenetV3/expanded_conv_5/depthwise/depthwise 94.1 k - 6.00 k 1.18 M 1x14x14x240
Relu6 MobilenetV3/expanded_conv_5/depthwise/hard_swish/Relu6 ? - ? ? 1x14x14x240
Conv2D MobilenetV3/expanded_conv_5/squeeze_excite/Conv/Conv2D 304.0 1x1x1x240 15.4 k 15.4 k 1x1x1x64
Relu MobilenetV3/expanded_conv_5/squeeze_excite/Conv/Relu ? - ? ? 1x1x1x64
Conv2D MobilenetV3/expanded_conv_5/squeeze_excite/Conv_1/Conv2D 304.0 1x1x1x64 15.4 k 15.4 k 1x1x1x240
Relu6 MobilenetV3/expanded_conv_5/squeeze_excite/Conv_1/Relu6 ? - ? ? 1x1x1x240
Conv2D MobilenetV3/expanded_conv_5/project/Conv2D 54.9 k 1x14x14x240 9.60 k 1.88 M 1x14x14x40
Conv2D MobilenetV3/expanded_conv_6/expand/Conv2D 31.4 k 1x14x14x40 4.80 k 940.8 k 1x14x14x120
Relu6 MobilenetV3/expanded_conv_6/expand/hard_swish/Relu6 ? - ? ? 1x14x14x120
DepthConv MobilenetV3/expanded_conv_6/depthwise/depthwise 47.0 k - 3.00 k 588.0 k 1x14x14x120
Relu6 MobilenetV3/expanded_conv_6/depthwise/hard_swish/Relu6 ? - ? ? 1x14x14x120
Conv2D MobilenetV3/expanded_conv_6/squeeze_excite/Conv/Conv2D 152.0 1x1x1x120 3.84 k 3.84 k 1x1x1x32
Relu MobilenetV3/expanded_conv_6/squeeze_excite/Conv/Relu ? - ? ? 1x1x1x32
Conv2D MobilenetV3/expanded_conv_6/squeeze_excite/Conv_1/Conv2D 152.0 1x1x1x32 3.84 k 3.84 k 1x1x1x120
Relu6 MobilenetV3/expanded_conv_6/squeeze_excite/Conv_1/Relu6 ? - ? ? 1x1x1x120
Conv2D MobilenetV3/expanded_conv_6/project/Conv2D 32.9 k 1x14x14x120 5.76 k 1.13 M 1x14x14x48
Conv2D MobilenetV3/expanded_conv_7/expand/Conv2D 37.6 k 1x14x14x48 6.91 k 1.35 M 1x14x14x144
Relu6 MobilenetV3/expanded_conv_7/expand/hard_swish/Relu6 ? - ? ? 1x14x14x144
DepthConv MobilenetV3/expanded_conv_7/depthwise/depthwise 56.4 k - 3.60 k 705.6 k 1x14x14x144
Relu6 MobilenetV3/expanded_conv_7/depthwise/hard_swish/Relu6 ? - ? ? 1x14x14x144
Conv2D MobilenetV3/expanded_conv_7/squeeze_excite/Conv/Conv2D 184.0 1x1x1x144 5.76 k 5.76 k 1x1x1x40
Relu MobilenetV3/expanded_conv_7/squeeze_excite/Conv/Relu ? - ? ? 1x1x1x40
Conv2D MobilenetV3/expanded_conv_7/squeeze_excite/Conv_1/Conv2D 184.0 1x1x1x40 5.76 k 5.76 k 1x1x1x144
Relu6 MobilenetV3/expanded_conv_7/squeeze_excite/Conv_1/Relu6 ? - ? ? 1x1x1x144
Conv2D MobilenetV3/expanded_conv_7/project/Conv2D 37.6 k 1x14x14x144 6.91 k 1.35 M 1x14x14x48
Conv2D MobilenetV3/expanded_conv_8/expand/Conv2D 65.9 k 1x14x14x48 13.8 k 2.71 M 1x14x14x288
Relu6 MobilenetV3/expanded_conv_8/expand/hard_swish/Relu6 ? - ? ? 1x14x14x288
DepthConv MobilenetV3/expanded_conv_8/depthwise/depthwise 70.6 k - 7.20 k 352.8 k 1x7x7x288
Relu6 MobilenetV3/expanded_conv_8/depthwise/hard_swish/Relu6 ? - ? ? 1x7x7x288
Conv2D MobilenetV3/expanded_conv_8/squeeze_excite/Conv/Conv2D 360.0 1x1x1x288 20.7 k 20.7 k 1x1x1x72
Relu MobilenetV3/expanded_conv_8/squeeze_excite/Conv/Relu ? - ? ? 1x1x1x72
Conv2D MobilenetV3/expanded_conv_8/squeeze_excite/Conv_1/Conv2D 360.0 1x1x1x72 20.7 k 20.7 k 1x1x1x288
Relu6 MobilenetV3/expanded_conv_8/squeeze_excite/Conv_1/Relu6 ? - ? ? 1x1x1x288
Conv2D MobilenetV3/expanded_conv_8/project/Conv2D 18.8 k 1x7x7x288 27.6 k 1.35 M 1x7x7x96
Conv2D MobilenetV3/expanded_conv_9/expand/Conv2D 32.9 k 1x7x7x96 55.3 k 2.71 M 1x7x7x576
Relu6 MobilenetV3/expanded_conv_9/expand/hard_swish/Relu6 ? - ? ? 1x7x7x576
DepthConv MobilenetV3/expanded_conv_9/depthwise/depthwise 56.4 k - 14.4 k 705.6 k 1x7x7x576
Relu6 MobilenetV3/expanded_conv_9/depthwise/hard_swish/Relu6 ? - ? ? 1x7x7x576
Conv2D MobilenetV3/expanded_conv_9/squeeze_excite/Conv/Conv2D 720.0 1x1x1x576 82.9 k 82.9 k 1x1x1x144
Relu MobilenetV3/expanded_conv_9/squeeze_excite/Conv/Relu ? - ? ? 1x1x1x144
Conv2D MobilenetV3/expanded_conv_9/squeeze_excite/Conv_1/Conv2D 720.0 1x1x1x144 82.9 k 82.9 k 1x1x1x576
Relu6 MobilenetV3/expanded_conv_9/squeeze_excite/Conv_1/Relu6 ? - ? ? 1x1x1x576
Conv2D MobilenetV3/expanded_conv_9/project/Conv2D 32.9 k 1x7x7x576 55.3 k 2.71 M 1x7x7x96
Conv2D MobilenetV3/expanded_conv_10/expand/Conv2D 32.9 k 1x7x7x96 55.3 k 2.71 M 1x7x7x576
Relu6 MobilenetV3/expanded_conv_10/expand/hard_swish/Relu6 ? - ? ? 1x7x7x576
DepthConv MobilenetV3/expanded_conv_10/depthwise/depthwise 56.4 k - 14.4 k 705.6 k 1x7x7x576
Relu6 MobilenetV3/expanded_conv_10/depthwise/hard_swish/Relu6 ? - ? ? 1x7x7x576
Conv2D MobilenetV3/expanded_conv_10/squeeze_excite/Conv/Conv2D 720.0 1x1x1x576 82.9 k 82.9 k 1x1x1x144
Relu MobilenetV3/expanded_conv_10/squeeze_excite/Conv/Relu ? - ? ? 1x1x1x144
Conv2D MobilenetV3/expanded_conv_10/squeeze_excite/Conv_1/Conv2D 720.0 1x1x1x144 82.9 k 82.9 k 1x1x1x576
Relu6 MobilenetV3/expanded_conv_10/squeeze_excite/Conv_1/Relu6 ? - ? ? 1x1x1x576
Conv2D MobilenetV3/expanded_conv_10/project/Conv2D 32.9 k 1x7x7x576 55.3 k 2.71 M 1x7x7x96
Conv2D MobilenetV3/Conv_1/Conv2D 32.9 k 1x7x7x96 55.3 k 2.71 M 1x7x7x576
Relu6 MobilenetV3/Conv_1/hard_swish/Relu6 ? - ? ? 1x7x7x576
AvgPool MobilenetV3/AvgPool2D/AvgPool ? 1x7x7x576 ? 28.2 k 1x1x1x576
Conv2D MobilenetV3/Conv_2/Conv2D 1.60 k 1x1x1x576 589.8 k 589.8 k 1x1x1x1024
Relu6 MobilenetV3/Conv_2/hard_swish/Relu6 ? - ? ? 1x1x1x1024
Conv2D MobilenetV3/Logits/Conv2d_1c_1x1/Conv2D 2.02 k 1x1x1x1024 1.03 M 1.03 M 1x1x1x1001
-----
Total Total 2.96 M - 2.53 M 56.5 M -
====================
==== V3 EDGETPU ====
====================
op name ActMem ConvInput ConvParameters Madds OutputTensor
Conv2D MobilenetEdgeTPU/Conv/Conv2D 551.9 k 1x224x224x3 864.0 10.8 M 1x112x112x32
Relu MobilenetEdgeTPU/Conv/Relu ? - ? ? 1x112x112x32
Conv2D MobilenetEdgeTPU/expanded_conv/project/Conv2D 602.1 k 1x112x112x32 512.0 6.42 M 1x112x112x16
Conv2D MobilenetEdgeTPU/expanded_conv_1/expand/Conv2D 602.1 k 1x112x112x16 18.4 k 57.8 M 1x56x56x128
Relu MobilenetEdgeTPU/expanded_conv_1/expand/Relu ? - ? ? 1x56x56x128
Conv2D MobilenetEdgeTPU/expanded_conv_1/project/Conv2D 501.8 k 1x56x56x128 4.10 k 12.8 M 1x56x56x32
Conv2D MobilenetEdgeTPU/expanded_conv_2/expand/Conv2D 501.8 k 1x56x56x32 36.9 k 115.6 M 1x56x56x128
Relu MobilenetEdgeTPU/expanded_conv_2/expand/Relu ? - ? ? 1x56x56x128
Conv2D MobilenetEdgeTPU/expanded_conv_2/project/Conv2D 501.8 k 1x56x56x128 4.10 k 12.8 M 1x56x56x32
Conv2D MobilenetEdgeTPU/expanded_conv_3/expand/Conv2D 501.8 k 1x56x56x32 36.9 k 115.6 M 1x56x56x128
Relu MobilenetEdgeTPU/expanded_conv_3/expand/Relu ? - ? ? 1x56x56x128
Conv2D MobilenetEdgeTPU/expanded_conv_3/project/Conv2D 501.8 k 1x56x56x128 4.10 k 12.8 M 1x56x56x32
Conv2D MobilenetEdgeTPU/expanded_conv_4/expand/Conv2D 501.8 k 1x56x56x32 36.9 k 115.6 M 1x56x56x128
Relu MobilenetEdgeTPU/expanded_conv_4/expand/Relu ? - ? ? 1x56x56x128
Conv2D MobilenetEdgeTPU/expanded_conv_4/project/Conv2D 501.8 k 1x56x56x128 4.10 k 12.8 M 1x56x56x32
Conv2D MobilenetEdgeTPU/expanded_conv_5/expand/Conv2D 301.1 k 1x56x56x32 73.7 k 57.8 M 1x28x28x256
Relu MobilenetEdgeTPU/expanded_conv_5/expand/Relu ? - ? ? 1x28x28x256
Conv2D MobilenetEdgeTPU/expanded_conv_5/project/Conv2D 238.3 k 1x28x28x256 12.3 k 9.63 M 1x28x28x48
Conv2D MobilenetEdgeTPU/expanded_conv_6/expand/Conv2D 188.2 k 1x28x28x48 82.9 k 65.0 M 1x28x28x192
Relu MobilenetEdgeTPU/expanded_conv_6/expand/Relu ? - ? ? 1x28x28x192
Conv2D MobilenetEdgeTPU/expanded_conv_6/project/Conv2D 188.2 k 1x28x28x192 9.22 k 7.23 M 1x28x28x48
Conv2D MobilenetEdgeTPU/expanded_conv_7/expand/Conv2D 188.2 k 1x28x28x48 82.9 k 65.0 M 1x28x28x192
Relu MobilenetEdgeTPU/expanded_conv_7/expand/Relu ? - ? ? 1x28x28x192
Conv2D MobilenetEdgeTPU/expanded_conv_7/project/Conv2D 188.2 k 1x28x28x192 9.22 k 7.23 M 1x28x28x48
Conv2D MobilenetEdgeTPU/expanded_conv_8/expand/Conv2D 188.2 k 1x28x28x48 82.9 k 65.0 M 1x28x28x192
Relu MobilenetEdgeTPU/expanded_conv_8/expand/Relu ? - ? ? 1x28x28x192
Conv2D MobilenetEdgeTPU/expanded_conv_8/project/Conv2D 188.2 k 1x28x28x192 9.22 k 7.23 M 1x28x28x48
Conv2D MobilenetEdgeTPU/expanded_conv_9/expand/Conv2D 338.7 k 1x28x28x48 18.4 k 14.5 M 1x28x28x384
Relu MobilenetEdgeTPU/expanded_conv_9/expand/Relu ? - ? ? 1x28x28x384
DepthConv MobilenetEdgeTPU/expanded_conv_9/depthwise/depthwise 376.3 k - 3.46 k 677.4 k 1x14x14x384
Relu MobilenetEdgeTPU/expanded_conv_9/depthwise/Relu ? - ? ? 1x14x14x384
Conv2D MobilenetEdgeTPU/expanded_conv_9/project/Conv2D 94.1 k 1x14x14x384 36.9 k 7.23 M 1x14x14x96
Conv2D MobilenetEdgeTPU/expanded_conv_10/expand/Conv2D 94.1 k 1x14x14x96 36.9 k 7.23 M 1x14x14x384
Relu MobilenetEdgeTPU/expanded_conv_10/expand/Relu ? - ? ? 1x14x14x384
DepthConv MobilenetEdgeTPU/expanded_conv_10/depthwise/depthwise 150.5 k - 3.46 k 677.4 k 1x14x14x384
Relu MobilenetEdgeTPU/expanded_conv_10/depthwise/Relu ? - ? ? 1x14x14x384
Conv2D MobilenetEdgeTPU/expanded_conv_10/project/Conv2D 94.1 k 1x14x14x384 36.9 k 7.23 M 1x14x14x96
Conv2D MobilenetEdgeTPU/expanded_conv_11/expand/Conv2D 94.1 k 1x14x14x96 36.9 k 7.23 M 1x14x14x384
Relu MobilenetEdgeTPU/expanded_conv_11/expand/Relu ? - ? ? 1x14x14x384
DepthConv MobilenetEdgeTPU/expanded_conv_11/depthwise/depthwise 150.5 k - 3.46 k 677.4 k 1x14x14x384
Relu MobilenetEdgeTPU/expanded_conv_11/depthwise/Relu ? - ? ? 1x14x14x384
Conv2D MobilenetEdgeTPU/expanded_conv_11/project/Conv2D 94.1 k 1x14x14x384 36.9 k 7.23 M 1x14x14x96
Conv2D MobilenetEdgeTPU/expanded_conv_12/expand/Conv2D 94.1 k 1x14x14x96 36.9 k 7.23 M 1x14x14x384
Relu MobilenetEdgeTPU/expanded_conv_12/expand/Relu ? - ? ? 1x14x14x384
DepthConv MobilenetEdgeTPU/expanded_conv_12/depthwise/depthwise 150.5 k - 3.46 k 677.4 k 1x14x14x384
Relu MobilenetEdgeTPU/expanded_conv_12/depthwise/Relu ? - ? ? 1x14x14x384
Conv2D MobilenetEdgeTPU/expanded_conv_12/project/Conv2D 94.1 k 1x14x14x384 36.9 k 7.23 M 1x14x14x96
Conv2D MobilenetEdgeTPU/expanded_conv_13/expand/Conv2D 169.3 k 1x14x14x96 73.7 k 14.5 M 1x14x14x768
Relu MobilenetEdgeTPU/expanded_conv_13/expand/Relu ? - ? ? 1x14x14x768
DepthConv MobilenetEdgeTPU/expanded_conv_13/depthwise/depthwise 301.1 k - 6.91 k 1.35 M 1x14x14x768
Relu MobilenetEdgeTPU/expanded_conv_13/depthwise/Relu ? - ? ? 1x14x14x768
Conv2D MobilenetEdgeTPU/expanded_conv_13/project/Conv2D 169.3 k 1x14x14x768 73.7 k 14.5 M 1x14x14x96
Conv2D MobilenetEdgeTPU/expanded_conv_14/expand/Conv2D 94.1 k 1x14x14x96 36.9 k 7.23 M 1x14x14x384
Relu MobilenetEdgeTPU/expanded_conv_14/expand/Relu ? - ? ? 1x14x14x384
DepthConv MobilenetEdgeTPU/expanded_conv_14/depthwise/depthwise 150.5 k - 3.46 k 677.4 k 1x14x14x384
Relu MobilenetEdgeTPU/expanded_conv_14/depthwise/Relu ? - ? ? 1x14x14x384
Conv2D MobilenetEdgeTPU/expanded_conv_14/project/Conv2D 94.1 k 1x14x14x384 36.9 k 7.23 M 1x14x14x96
Conv2D MobilenetEdgeTPU/expanded_conv_15/expand/Conv2D 94.1 k 1x14x14x96 36.9 k 7.23 M 1x14x14x384
Relu MobilenetEdgeTPU/expanded_conv_15/expand/Relu ? - ? ? 1x14x14x384
DepthConv MobilenetEdgeTPU/expanded_conv_15/depthwise/depthwise 150.5 k - 3.46 k 677.4 k 1x14x14x384
Relu MobilenetEdgeTPU/expanded_conv_15/depthwise/Relu ? - ? ? 1x14x14x384
Conv2D MobilenetEdgeTPU/expanded_conv_15/project/Conv2D 94.1 k 1x14x14x384 36.9 k 7.23 M 1x14x14x96
Conv2D MobilenetEdgeTPU/expanded_conv_16/expand/Conv2D 94.1 k 1x14x14x96 36.9 k 7.23 M 1x14x14x384
Relu MobilenetEdgeTPU/expanded_conv_16/expand/Relu ? - ? ? 1x14x14x384
DepthConv MobilenetEdgeTPU/expanded_conv_16/depthwise/depthwise 150.5 k - 3.46 k 677.4 k 1x14x14x384
Relu MobilenetEdgeTPU/expanded_conv_16/depthwise/Relu ? - ? ? 1x14x14x384
Conv2D MobilenetEdgeTPU/expanded_conv_16/project/Conv2D 94.1 k 1x14x14x384 36.9 k 7.23 M 1x14x14x96
Conv2D MobilenetEdgeTPU/expanded_conv_17/expand/Conv2D 169.3 k 1x14x14x96 73.7 k 14.5 M 1x14x14x768
Relu MobilenetEdgeTPU/expanded_conv_17/expand/Relu ? - ? ? 1x14x14x768
DepthConv MobilenetEdgeTPU/expanded_conv_17/depthwise/depthwise 188.2 k - 19.2 k 940.8 k 1x7x7x768
Relu MobilenetEdgeTPU/expanded_conv_17/depthwise/Relu ? - ? ? 1x7x7x768
Conv2D MobilenetEdgeTPU/expanded_conv_17/project/Conv2D 45.5 k 1x7x7x768 122.9 k 6.02 M 1x7x7x160
Conv2D MobilenetEdgeTPU/expanded_conv_18/expand/Conv2D 39.2 k 1x7x7x160 102.4 k 5.02 M 1x7x7x640
Relu MobilenetEdgeTPU/expanded_conv_18/expand/Relu ? - ? ? 1x7x7x640
DepthConv MobilenetEdgeTPU/expanded_conv_18/depthwise/depthwise 62.7 k - 16.0 k 784.0 k 1x7x7x640
Relu MobilenetEdgeTPU/expanded_conv_18/depthwise/Relu ? - ? ? 1x7x7x640
Conv2D MobilenetEdgeTPU/expanded_conv_18/project/Conv2D 39.2 k 1x7x7x640 102.4 k 5.02 M 1x7x7x160
Conv2D MobilenetEdgeTPU/expanded_conv_19/expand/Conv2D 39.2 k 1x7x7x160 102.4 k 5.02 M 1x7x7x640
Relu MobilenetEdgeTPU/expanded_conv_19/expand/Relu ? - ? ? 1x7x7x640
DepthConv MobilenetEdgeTPU/expanded_conv_19/depthwise/depthwise 62.7 k - 16.0 k 784.0 k 1x7x7x640
Relu MobilenetEdgeTPU/expanded_conv_19/depthwise/Relu ? - ? ? 1x7x7x640
Conv2D MobilenetEdgeTPU/expanded_conv_19/project/Conv2D 39.2 k 1x7x7x640 102.4 k 5.02 M 1x7x7x160
Conv2D MobilenetEdgeTPU/expanded_conv_20/expand/Conv2D 39.2 k 1x7x7x160 102.4 k 5.02 M 1x7x7x640
Relu MobilenetEdgeTPU/expanded_conv_20/expand/Relu ? - ? ? 1x7x7x640
DepthConv MobilenetEdgeTPU/expanded_conv_20/depthwise/depthwise 62.7 k - 16.0 k 784.0 k 1x7x7x640
Relu MobilenetEdgeTPU/expanded_conv_20/depthwise/Relu ? - ? ? 1x7x7x640
Conv2D MobilenetEdgeTPU/expanded_conv_20/project/Conv2D 39.2 k 1x7x7x640 102.4 k 5.02 M 1x7x7x160
Conv2D MobilenetEdgeTPU/expanded_conv_21/expand/Conv2D 70.6 k 1x7x7x160 204.8 k 10.0 M 1x7x7x1280
Relu MobilenetEdgeTPU/expanded_conv_21/expand/Relu ? - ? ? 1x7x7x1280
DepthConv MobilenetEdgeTPU/expanded_conv_21/depthwise/depthwise 125.4 k - 11.5 k 564.5 k 1x7x7x1280
Relu MobilenetEdgeTPU/expanded_conv_21/depthwise/Relu ? - ? ? 1x7x7x1280
Conv2D MobilenetEdgeTPU/expanded_conv_21/project/Conv2D 72.1 k 1x7x7x1280 245.8 k 12.0 M 1x7x7x192
Conv2D MobilenetEdgeTPU/Conv_1/Conv2D 72.1 k 1x7x7x192 245.8 k 12.0 M 1x7x7x1280
Relu MobilenetEdgeTPU/Conv_1/Relu ? - ? ? 1x7x7x1280
AvgPool MobilenetEdgeTPU/Logits/AvgPool2D ? 1x7x7x1280 ? 62.7 k 1x1x1x1280
Conv2D MobilenetEdgeTPU/Logits/Conv2d_1c_1x1/Conv2D 2.28 k 1x1x1x1280 1.28 M 1.28 M 1x1x1x1001
-----
Total Total 11.6 M - 4.05 M 990.7 M -
# pylint: enable=line-too-long
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import functools
import numpy as np
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets.mobilenet import conv_blocks as ops
from nets.mobilenet import mobilenet as lib
op = lib.op
expand_input = ops.expand_input_by_factor
# Squeeze Excite with all parameters filled-in, we use hard-sigmoid
# for gating function and relu for inner activation function.
squeeze_excite = functools.partial(
ops.squeeze_excite, squeeze_factor=4,
inner_activation_fn=tf.nn.relu,
gating_fn=lambda x: tf.nn.relu6(x+3)*0.16667)
# Wrap squeeze excite op as expansion_transform that takes
# both expansion and input tensor.
_se4 = lambda expansion_tensor, input_tensor: squeeze_excite(expansion_tensor)
def hard_swish(x):
with tf.name_scope('hard_swish'):
return x * tf.nn.relu6(x + np.float32(3)) * np.float32(1. / 6.)
def reduce_to_1x1(input_tensor, default_size=7, **kwargs):
h, w = input_tensor.shape.as_list()[1:3]
if h is not None and w == h:
k = [h, h]
else:
k = [default_size, default_size]
return slim.avg_pool2d(input_tensor, kernel_size=k, **kwargs)
def mbv3_op(ef, n, k, s=1, act=tf.nn.relu, se=None, **kwargs):
"""Defines a single Mobilenet V3 convolution block.
Args:
ef: expansion factor
n: number of output channels
k: stride of depthwise
s: stride
act: activation function in inner layers
se: squeeze excite function.
**kwargs: passed to expanded_conv
Returns:
An object (lib._Op) for inserting in conv_def, representing this operation.
"""
return op(
ops.expanded_conv,
expansion_size=expand_input(ef),
kernel_size=(k, k),
stride=s,
num_outputs=n,
inner_activation_fn=act,
expansion_transform=se,
**kwargs)
def mbv3_fused(ef, n, k, s=1, **kwargs):
"""Defines a single Mobilenet V3 convolution block.
Args:
ef: expansion factor
n: number of output channels
k: stride of depthwise
s: stride
**kwargs: will be passed to mbv3_op
Returns:
An object (lib._Op) for inserting in conv_def, representing this operation.
"""
expansion_fn = functools.partial(slim.conv2d, kernel_size=k, stride=s)
return mbv3_op(
ef,
n,
k=1,
s=s,
depthwise_location=None,
expansion_fn=expansion_fn,
**kwargs)
mbv3_op_se = functools.partial(mbv3_op, se=_se4)
DEFAULTS = {
(ops.expanded_conv,):
dict(
normalizer_fn=slim.batch_norm,
residual=True),
(slim.conv2d, slim.fully_connected, slim.separable_conv2d): {
'normalizer_fn': slim.batch_norm,
'activation_fn': tf.nn.relu,
},
(slim.batch_norm,): {
'center': True,
'scale': True
},
}
DEFAULTS_GROUP_NORM = {
(ops.expanded_conv,): dict(normalizer_fn=slim.group_norm, residual=True),
(slim.conv2d, slim.fully_connected, slim.separable_conv2d): {
'normalizer_fn': slim.group_norm,
'activation_fn': tf.nn.relu,
},
(slim.group_norm,): {
'groups': 8
},
}
# Compatible checkpoint: http://mldash/5511169891790690458#scalars
V3_LARGE = dict(
defaults=dict(DEFAULTS),
spec=([
# stage 1
op(slim.conv2d, stride=2, num_outputs=16, kernel_size=(3, 3),
activation_fn=hard_swish),
mbv3_op(ef=1, n=16, k=3),
mbv3_op(ef=4, n=24, k=3, s=2),
mbv3_op(ef=3, n=24, k=3, s=1),
mbv3_op_se(ef=3, n=40, k=5, s=2),
mbv3_op_se(ef=3, n=40, k=5, s=1),
mbv3_op_se(ef=3, n=40, k=5, s=1),
mbv3_op(ef=6, n=80, k=3, s=2, act=hard_swish),
mbv3_op(ef=2.5, n=80, k=3, s=1, act=hard_swish),
mbv3_op(ef=184/80., n=80, k=3, s=1, act=hard_swish),
mbv3_op(ef=184/80., n=80, k=3, s=1, act=hard_swish),
mbv3_op_se(ef=6, n=112, k=3, s=1, act=hard_swish),
mbv3_op_se(ef=6, n=112, k=3, s=1, act=hard_swish),
mbv3_op_se(ef=6, n=160, k=5, s=2, act=hard_swish),
mbv3_op_se(ef=6, n=160, k=5, s=1, act=hard_swish),
mbv3_op_se(ef=6, n=160, k=5, s=1, act=hard_swish),
op(slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=960,
activation_fn=hard_swish),
op(reduce_to_1x1, default_size=7, stride=1, padding='VALID'),
op(slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=1280,
normalizer_fn=None, activation_fn=hard_swish)
]))
# 72.2% accuracy.
V3_LARGE_MINIMALISTIC = dict(
defaults=dict(DEFAULTS),
spec=([
# stage 1
op(slim.conv2d, stride=2, num_outputs=16, kernel_size=(3, 3)),
mbv3_op(ef=1, n=16, k=3),
mbv3_op(ef=4, n=24, k=3, s=2),
mbv3_op(ef=3, n=24, k=3, s=1),
mbv3_op(ef=3, n=40, k=3, s=2),
mbv3_op(ef=3, n=40, k=3, s=1),
mbv3_op(ef=3, n=40, k=3, s=1),
mbv3_op(ef=6, n=80, k=3, s=2),
mbv3_op(ef=2.5, n=80, k=3, s=1),
mbv3_op(ef=184 / 80., n=80, k=3, s=1),
mbv3_op(ef=184 / 80., n=80, k=3, s=1),
mbv3_op(ef=6, n=112, k=3, s=1),
mbv3_op(ef=6, n=112, k=3, s=1),
mbv3_op(ef=6, n=160, k=3, s=2),
mbv3_op(ef=6, n=160, k=3, s=1),
mbv3_op(ef=6, n=160, k=3, s=1),
op(slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=960),
op(reduce_to_1x1, default_size=7, stride=1, padding='VALID'),
op(slim.conv2d,
stride=1,
kernel_size=[1, 1],
num_outputs=1280,
normalizer_fn=None)
]))
# Compatible run: http://mldash/2023283040014348118#scalars
V3_SMALL = dict(
defaults=dict(DEFAULTS),
spec=([
# stage 1
op(slim.conv2d, stride=2, num_outputs=16, kernel_size=(3, 3),
activation_fn=hard_swish),
mbv3_op_se(ef=1, n=16, k=3, s=2),
mbv3_op(ef=72./16, n=24, k=3, s=2),
mbv3_op(ef=(88./24), n=24, k=3, s=1),
mbv3_op_se(ef=4, n=40, k=5, s=2, act=hard_swish),
mbv3_op_se(ef=6, n=40, k=5, s=1, act=hard_swish),
mbv3_op_se(ef=6, n=40, k=5, s=1, act=hard_swish),
mbv3_op_se(ef=3, n=48, k=5, s=1, act=hard_swish),
mbv3_op_se(ef=3, n=48, k=5, s=1, act=hard_swish),
mbv3_op_se(ef=6, n=96, k=5, s=2, act=hard_swish),
mbv3_op_se(ef=6, n=96, k=5, s=1, act=hard_swish),
mbv3_op_se(ef=6, n=96, k=5, s=1, act=hard_swish),
op(slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=576,
activation_fn=hard_swish),
op(reduce_to_1x1, default_size=7, stride=1, padding='VALID'),
op(slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=1024,
normalizer_fn=None, activation_fn=hard_swish)
]))
# 62% accuracy.
V3_SMALL_MINIMALISTIC = dict(
defaults=dict(DEFAULTS),
spec=([
# stage 1
op(slim.conv2d, stride=2, num_outputs=16, kernel_size=(3, 3)),
mbv3_op(ef=1, n=16, k=3, s=2),
mbv3_op(ef=72. / 16, n=24, k=3, s=2),
mbv3_op(ef=(88. / 24), n=24, k=3, s=1),
mbv3_op(ef=4, n=40, k=3, s=2),
mbv3_op(ef=6, n=40, k=3, s=1),
mbv3_op(ef=6, n=40, k=3, s=1),
mbv3_op(ef=3, n=48, k=3, s=1),
mbv3_op(ef=3, n=48, k=3, s=1),
mbv3_op(ef=6, n=96, k=3, s=2),
mbv3_op(ef=6, n=96, k=3, s=1),
mbv3_op(ef=6, n=96, k=3, s=1),
op(slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=576),
op(reduce_to_1x1, default_size=7, stride=1, padding='VALID'),
op(slim.conv2d,
stride=1,
kernel_size=[1, 1],
num_outputs=1024,
normalizer_fn=None)
]))
# EdgeTPU friendly variant of MobilenetV3 that uses fused convolutions
# instead of depthwise in the early layers.
V3_EDGETPU = dict(
defaults=dict(DEFAULTS),
spec=[
op(slim.conv2d, stride=2, num_outputs=32, kernel_size=(3, 3)),
mbv3_fused(k=3, s=1, ef=1, n=16),
mbv3_fused(k=3, s=2, ef=8, n=32),
mbv3_fused(k=3, s=1, ef=4, n=32),
mbv3_fused(k=3, s=1, ef=4, n=32),
mbv3_fused(k=3, s=1, ef=4, n=32),
mbv3_fused(k=3, s=2, ef=8, n=48),
mbv3_fused(k=3, s=1, ef=4, n=48),
mbv3_fused(k=3, s=1, ef=4, n=48),
mbv3_fused(k=3, s=1, ef=4, n=48),
mbv3_op(k=3, s=2, ef=8, n=96),
mbv3_op(k=3, s=1, ef=4, n=96),
mbv3_op(k=3, s=1, ef=4, n=96),
mbv3_op(k=3, s=1, ef=4, n=96),
mbv3_op(k=3, s=1, ef=8, n=96, residual=False),
mbv3_op(k=3, s=1, ef=4, n=96),
mbv3_op(k=3, s=1, ef=4, n=96),
mbv3_op(k=3, s=1, ef=4, n=96),
mbv3_op(k=5, s=2, ef=8, n=160),
mbv3_op(k=5, s=1, ef=4, n=160),
mbv3_op(k=5, s=1, ef=4, n=160),
mbv3_op(k=5, s=1, ef=4, n=160),
mbv3_op(k=3, s=1, ef=8, n=192),
op(slim.conv2d, stride=1, num_outputs=1280, kernel_size=(1, 1)),
])
@slim.add_arg_scope
def mobilenet(input_tensor,
num_classes=1001,
depth_multiplier=1.0,
scope='MobilenetV3',
conv_defs=None,
finegrain_classification_mode=False,
use_groupnorm=False,
**kwargs):
"""Creates mobilenet V3 network.
Inference mode is created by default. To create training use training_scope
below.
with slim.arg_scope(mobilenet_v3.training_scope()):
logits, endpoints = mobilenet_v3.mobilenet(input_tensor)
Args:
input_tensor: The input tensor
num_classes: number of classes
depth_multiplier: The multiplier applied to scale number of
channels in each layer.
scope: Scope of the operator
conv_defs: Which version to create. Could be large/small or
any conv_def (see mobilenet_v3.py for examples).
finegrain_classification_mode: When set to True, the model
will keep the last layer large even for small multipliers. Following
https://arxiv.org/abs/1801.04381
it improves performance for ImageNet-type of problems.
*Note* ignored if final_endpoint makes the builder exit earlier.
use_groupnorm: When set to True, use group_norm as normalizer_fn.
**kwargs: passed directly to mobilenet.mobilenet:
prediction_fn- what prediction function to use.
reuse-: whether to reuse variables (if reuse set to true, scope
must be given).
Returns:
logits/endpoints pair
Raises:
ValueError: On invalid arguments
"""
if conv_defs is None:
conv_defs = V3_LARGE
if 'multiplier' in kwargs:
raise ValueError('mobilenetv2 doesn\'t support generic '
'multiplier parameter use "depth_multiplier" instead.')
if use_groupnorm:
conv_defs = copy.deepcopy(conv_defs)
conv_defs['defaults'] = dict(DEFAULTS_GROUP_NORM)
conv_defs['defaults'].update({
(slim.group_norm,): {
'groups': kwargs.pop('groups', 8)
}
})
if finegrain_classification_mode:
conv_defs = copy.deepcopy(conv_defs)
conv_defs['spec'][-1] = conv_defs['spec'][-1]._replace(
multiplier_func=lambda params, multiplier: params)
depth_args = {}
with slim.arg_scope((lib.depth_multiplier,), **depth_args):
return lib.mobilenet(
input_tensor,
num_classes=num_classes,
conv_defs=conv_defs,
scope=scope,
multiplier=depth_multiplier,
**kwargs)
mobilenet.default_image_size = 224
training_scope = lib.training_scope
@slim.add_arg_scope
def mobilenet_base(input_tensor, depth_multiplier=1.0, **kwargs):
"""Creates base of the mobilenet (no pooling and no logits) ."""
return mobilenet(
input_tensor, depth_multiplier=depth_multiplier, base_only=True, **kwargs)
def wrapped_partial(func, new_defaults=None,
**kwargs):
"""Partial function with new default parameters and updated docstring."""
if not new_defaults:
new_defaults = {}
def func_wrapper(*f_args, **f_kwargs):
new_kwargs = dict(new_defaults)
new_kwargs.update(f_kwargs)
return func(*f_args, **new_kwargs)
functools.update_wrapper(func_wrapper, func)
partial_func = functools.partial(func_wrapper, **kwargs)
functools.update_wrapper(partial_func, func)
return partial_func
large = wrapped_partial(mobilenet, conv_defs=V3_LARGE)
small = wrapped_partial(mobilenet, conv_defs=V3_SMALL)
edge_tpu = wrapped_partial(mobilenet,
new_defaults={'scope': 'MobilenetEdgeTPU'},
conv_defs=V3_EDGETPU)
edge_tpu_075 = wrapped_partial(
mobilenet,
new_defaults={'scope': 'MobilenetEdgeTPU'},
conv_defs=V3_EDGETPU,
depth_multiplier=0.75,
finegrain_classification_mode=True)
# Minimalistic model that does not have Squeeze Excite blocks,
# Hardswish, or 5x5 depthwise convolution.
# This makes the model very friendly for a wide range of hardware
large_minimalistic = wrapped_partial(mobilenet, conv_defs=V3_LARGE_MINIMALISTIC)
small_minimalistic = wrapped_partial(mobilenet, conv_defs=V3_SMALL_MINIMALISTIC)
def _reduce_consecutive_layers(conv_defs, start_id, end_id, multiplier=0.5):
"""Reduce the outputs of consecutive layers with multiplier.
Args:
conv_defs: Mobilenet conv_defs.
start_id: 0-based index of the starting conv_def to be reduced.
end_id: 0-based index of the last conv_def to be reduced.
multiplier: The multiplier by which to reduce the conv_defs.
Returns:
Mobilenet conv_defs where the output sizes from layers [start_id, end_id],
inclusive, are reduced by multiplier.
Raises:
ValueError if any layer to be reduced does not have the 'num_outputs'
attribute.
"""
defs = copy.deepcopy(conv_defs)
for d in defs['spec'][start_id:end_id+1]:
d.params.update({
'num_outputs': np.int(np.round(d.params['num_outputs'] * multiplier))
})
return defs
V3_LARGE_DETECTION = _reduce_consecutive_layers(V3_LARGE, 13, 16)
V3_SMALL_DETECTION = _reduce_consecutive_layers(V3_SMALL, 9, 12)
__all__ = ['training_scope', 'mobilenet', 'V3_LARGE', 'V3_SMALL', 'large',
'small', 'V3_LARGE_DETECTION', 'V3_SMALL_DETECTION']
| 68,394 | 88.757218 | 171 | py |
models | models-master/research/slim/nets/mobilenet/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/slim/nets/mobilenet/mobilenet.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Mobilenet Base Class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import copy
import os
import tensorflow.compat.v1 as tf
import tf_slim as slim
@slim.add_arg_scope
def apply_activation(x, name=None, activation_fn=None):
return activation_fn(x, name=name) if activation_fn else x
def _fixed_padding(inputs, kernel_size, rate=1):
"""Pads the input along the spatial dimensions independently of input size.
Pads the input such that if it was used in a convolution with 'VALID' padding,
the output would have the same dimensions as if the unpadded input was used
in a convolution with 'SAME' padding.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
rate: An integer, rate for atrous convolution.
Returns:
output: A tensor of size [batch, height_out, width_out, channels] with the
input, either intact (if kernel_size == 1) or padded (if kernel_size > 1).
"""
kernel_size_effective = [kernel_size[0] + (kernel_size[0] - 1) * (rate - 1),
kernel_size[0] + (kernel_size[0] - 1) * (rate - 1)]
pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1]
pad_beg = [pad_total[0] // 2, pad_total[1] // 2]
pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]]
padded_inputs = tf.pad(
tensor=inputs,
paddings=[[0, 0], [pad_beg[0], pad_end[0]], [pad_beg[1], pad_end[1]],
[0, 0]])
return padded_inputs
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return int(new_v)
@contextlib.contextmanager
def _set_arg_scope_defaults(defaults):
"""Sets arg scope defaults for all items present in defaults.
Args:
defaults: dictionary/list of pairs, containing a mapping from
function to a dictionary of default args.
Yields:
context manager where all defaults are set.
"""
if hasattr(defaults, 'items'):
items = list(defaults.items())
else:
items = defaults
if not items:
yield
else:
func, default_arg = items[0]
with slim.arg_scope(func, **default_arg):
with _set_arg_scope_defaults(items[1:]):
yield
@slim.add_arg_scope
def depth_multiplier(output_params,
multiplier,
divisible_by=8,
min_depth=8,
**unused_kwargs):
if 'num_outputs' not in output_params:
return
d = output_params['num_outputs']
output_params['num_outputs'] = _make_divisible(d * multiplier, divisible_by,
min_depth)
_Op = collections.namedtuple('Op', ['op', 'params', 'multiplier_func'])
def op(opfunc, multiplier_func=depth_multiplier, **params):
multiplier = params.pop('multiplier_transform', multiplier_func)
return _Op(opfunc, params=params, multiplier_func=multiplier)
class NoOpScope(object):
"""No-op context manager."""
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, traceback):
return False
def safe_arg_scope(funcs, **kwargs):
"""Returns `slim.arg_scope` with all None arguments removed.
Args:
funcs: Functions to pass to `arg_scope`.
**kwargs: Arguments to pass to `arg_scope`.
Returns:
arg_scope or No-op context manager.
Note: can be useful if None value should be interpreted as "do not overwrite
this parameter value".
"""
filtered_args = {name: value for name, value in kwargs.items()
if value is not None}
if filtered_args:
return slim.arg_scope(funcs, **filtered_args)
else:
return NoOpScope()
@slim.add_arg_scope
def mobilenet_base( # pylint: disable=invalid-name
inputs,
conv_defs,
multiplier=1.0,
final_endpoint=None,
output_stride=None,
use_explicit_padding=False,
scope=None,
is_training=False):
"""Mobilenet base network.
Constructs a network from inputs to the given final endpoint. By default
the network is constructed in inference mode. To create network
in training mode use:
with slim.arg_scope(mobilenet.training_scope()):
logits, endpoints = mobilenet_base(...)
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
conv_defs: A list of op(...) layers specifying the net architecture.
multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
final_endpoint: The name of last layer, for early termination for
for V1-based networks: last layer is "layer_14", for V2: "layer_20"
output_stride: An integer that specifies the requested ratio of input to
output spatial resolution. If not None, then we invoke atrous convolution
if necessary to prevent the network from reducing the spatial resolution
of the activation maps. Allowed values are 1 or any even number, excluding
zero. Typical values are 8 (accurate fully convolutional mode), 16
(fast fully convolutional mode), and 32 (classification mode).
NOTE- output_stride relies on all consequent operators to support dilated
operators via "rate" parameter. This might require wrapping non-conv
operators to operate properly.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
scope: optional variable scope.
is_training: How to setup batch_norm and other ops. Note: most of the time
this does not need be set directly. Use mobilenet.training_scope() to set
up training instead. This parameter is here for backward compatibility
only. It is safe to set it to the value matching
training_scope(is_training=...). It is also safe to explicitly set
it to False, even if there is outer training_scope set to to training.
(The network will be built in inference mode). If this is set to None,
no arg_scope is added for slim.batch_norm's is_training parameter.
Returns:
tensor_out: output tensor.
end_points: a set of activations for external use, for example summaries or
losses.
Raises:
ValueError: depth_multiplier <= 0, or the target output_stride is not
allowed.
"""
if multiplier <= 0:
raise ValueError('multiplier is not greater than zero.')
# Set conv defs defaults and overrides.
conv_defs_defaults = conv_defs.get('defaults', {})
conv_defs_overrides = conv_defs.get('overrides', {})
if use_explicit_padding:
conv_defs_overrides = copy.deepcopy(conv_defs_overrides)
conv_defs_overrides[
(slim.conv2d, slim.separable_conv2d)] = {'padding': 'VALID'}
if output_stride is not None:
if output_stride == 0 or (output_stride > 1 and output_stride % 2):
raise ValueError('Output stride must be None, 1 or a multiple of 2.')
# a) Set the tensorflow scope
# b) set padding to default: note we might consider removing this
# since it is also set by mobilenet_scope
# c) set all defaults
# d) set all extra overrides.
# pylint: disable=g-backslash-continuation
with _scope_all(scope, default_scope='Mobilenet'), \
safe_arg_scope([slim.batch_norm], is_training=is_training), \
_set_arg_scope_defaults(conv_defs_defaults), \
_set_arg_scope_defaults(conv_defs_overrides):
# The current_stride variable keeps track of the output stride of the
# activations, i.e., the running product of convolution strides up to the
# current network layer. This allows us to invoke atrous convolution
# whenever applying the next convolution would result in the activations
# having output stride larger than the target output_stride.
current_stride = 1
# The atrous convolution rate parameter.
rate = 1
net = inputs
# Insert default parameters before the base scope which includes
# any custom overrides set in mobilenet.
end_points = {}
scopes = {}
for i, opdef in enumerate(conv_defs['spec']):
params = dict(opdef.params)
opdef.multiplier_func(params, multiplier)
stride = params.get('stride', 1)
if output_stride is not None and current_stride == output_stride:
# If we have reached the target output_stride, then we need to employ
# atrous convolution with stride=1 and multiply the atrous rate by the
# current unit's stride for use in subsequent layers.
layer_stride = 1
layer_rate = rate
rate *= stride
else:
layer_stride = stride
layer_rate = 1
current_stride *= stride
# Update params.
params['stride'] = layer_stride
# Only insert rate to params if rate > 1 and kernel size is not [1, 1].
if layer_rate > 1:
if tuple(params.get('kernel_size', [])) != (1, 1):
# We will apply atrous rate in the following cases:
# 1) When kernel_size is not in params, the operation then uses
# default kernel size 3x3.
# 2) When kernel_size is in params, and if the kernel_size is not
# equal to (1, 1) (there is no need to apply atrous convolution to
# any 1x1 convolution).
params['rate'] = layer_rate
# Set padding
if use_explicit_padding:
if 'kernel_size' in params:
net = _fixed_padding(net, params['kernel_size'], layer_rate)
else:
params['use_explicit_padding'] = True
end_point = 'layer_%d' % (i + 1)
try:
net = opdef.op(net, **params)
except Exception:
print('Failed to create op %i: %r params: %r' % (i, opdef, params))
raise
end_points[end_point] = net
scope = os.path.dirname(net.name)
scopes[scope] = end_point
if final_endpoint is not None and end_point == final_endpoint:
break
# Add all tensors that end with 'output' to
# endpoints
for t in net.graph.get_operations():
scope = os.path.dirname(t.name)
bn = os.path.basename(t.name)
if scope in scopes and t.name.endswith('output'):
end_points[scopes[scope] + '/' + bn] = t.outputs[0]
return net, end_points
@contextlib.contextmanager
def _scope_all(scope, default_scope=None):
with tf.variable_scope(scope, default_name=default_scope) as s,\
tf.name_scope(s.original_name_scope):
yield s
@slim.add_arg_scope
def mobilenet(inputs,
num_classes=1001,
prediction_fn=slim.softmax,
reuse=None,
scope='Mobilenet',
base_only=False,
use_reduce_mean_for_pooling=False,
**mobilenet_args):
"""Mobilenet model for classification, supports both V1 and V2.
Note: default mode is inference, use mobilenet.training_scope to create
training network.
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer
is omitted and the input features to the logits layer (before dropout)
are returned instead.
prediction_fn: a function to get predictions out of logits
(default softmax).
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
base_only: if True will only create the base of the network (no pooling
and no logits).
use_reduce_mean_for_pooling: if True use the reduce_mean for pooling. If
True use the global_pool function that provides some optimization.
**mobilenet_args: passed to mobilenet_base verbatim.
- conv_defs: list of conv defs
- multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
- output_stride: will ensure that the last layer has at most total stride.
If the architecture calls for more stride than that provided
(e.g. output_stride=16, but the architecture has 5 stride=2 operators),
it will replace output_stride with fractional convolutions using Atrous
Convolutions.
Returns:
logits: the pre-softmax activations, a tensor of size
[batch_size, num_classes]
end_points: a dictionary from components of the network to the corresponding
activation tensor.
Raises:
ValueError: Input rank is invalid.
"""
is_training = mobilenet_args.get('is_training', False)
input_shape = inputs.get_shape().as_list()
if len(input_shape) != 4:
raise ValueError('Expected rank 4 input, was: %d' % len(input_shape))
with tf.variable_scope(scope, 'Mobilenet', reuse=reuse) as scope:
inputs = tf.identity(inputs, 'input')
net, end_points = mobilenet_base(inputs, scope=scope, **mobilenet_args)
if base_only:
return net, end_points
net = tf.identity(net, name='embedding')
with tf.variable_scope('Logits'):
net = global_pool(net, use_reduce_mean_for_pooling)
end_points['global_pool'] = net
if not num_classes:
return net, end_points
net = slim.dropout(net, scope='Dropout', is_training=is_training)
# 1 x 1 x num_classes
# Note: legacy scope name.
logits = slim.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
biases_initializer=tf.zeros_initializer(),
scope='Conv2d_1c_1x1')
logits = tf.squeeze(logits, [1, 2])
logits = tf.identity(logits, name='output')
end_points['Logits'] = logits
if prediction_fn:
end_points['Predictions'] = prediction_fn(logits, 'Predictions')
return logits, end_points
def global_pool(input_tensor,
use_reduce_mean_for_pooling=False,
pool_op=tf.nn.avg_pool2d):
"""Applies avg pool to produce 1x1 output.
NOTE: This function is funcitonally equivalenet to reduce_mean, but it has
baked in average pool which has better support across hardware.
Args:
input_tensor: input tensor
use_reduce_mean_for_pooling: if True use reduce_mean for pooling
pool_op: pooling op (avg pool is default)
Returns:
a tensor batch_size x 1 x 1 x depth.
"""
if use_reduce_mean_for_pooling:
return tf.reduce_mean(
input_tensor, [1, 2], keepdims=True, name='ReduceMean')
else:
shape = input_tensor.get_shape().as_list()
if shape[1] is None or shape[2] is None:
kernel_size = tf.convert_to_tensor(value=[
1,
tf.shape(input=input_tensor)[1],
tf.shape(input=input_tensor)[2], 1
])
else:
kernel_size = [1, shape[1], shape[2], 1]
output = pool_op(
input_tensor, ksize=kernel_size, strides=[1, 1, 1, 1], padding='VALID')
# Recover output shape, for unknown shape.
output.set_shape([None, 1, 1, None])
return output
def training_scope(is_training=True,
weight_decay=0.00004,
stddev=0.09,
dropout_keep_prob=0.8,
bn_decay=0.997):
"""Defines Mobilenet training scope.
Usage:
with slim.arg_scope(mobilenet.training_scope()):
logits, endpoints = mobilenet_v2.mobilenet(input_tensor)
# the network created will be trainble with dropout/batch norm
# initialized appropriately.
Args:
is_training: if set to False this will ensure that all customizations are
set to non-training mode. This might be helpful for code that is reused
across both training/evaluation, but most of the time training_scope with
value False is not needed. If this is set to None, the parameters is not
added to the batch_norm arg_scope.
weight_decay: The weight decay to use for regularizing the model.
stddev: Standard deviation for initialization, if negative uses xavier.
dropout_keep_prob: dropout keep probability (not set if equals to None).
bn_decay: decay for the batch norm moving averages (not set if equals to
None).
Returns:
An argument scope to use via arg_scope.
"""
# Note: do not introduce parameters that would change the inference
# model here (for example whether to use bias), modify conv_def instead.
batch_norm_params = {
'decay': bn_decay,
'is_training': is_training
}
if stddev < 0:
weight_intitializer = slim.initializers.xavier_initializer()
else:
weight_intitializer = tf.truncated_normal_initializer(
stddev=stddev)
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope(
[slim.conv2d, slim.fully_connected, slim.separable_conv2d],
weights_initializer=weight_intitializer,
normalizer_fn=slim.batch_norm), \
slim.arg_scope([mobilenet_base, mobilenet], is_training=is_training),\
safe_arg_scope([slim.batch_norm], **batch_norm_params), \
safe_arg_scope([slim.dropout], is_training=is_training,
keep_prob=dropout_keep_prob), \
slim.arg_scope([slim.conv2d], \
weights_regularizer=slim.l2_regularizer(weight_decay)), \
slim.arg_scope([slim.separable_conv2d], weights_regularizer=None) as s:
return s
| 18,440 | 36.711656 | 80 | py |
models | models-master/research/slim/nets/mobilenet/mobilenet_v3_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for google3.third_party.tensorflow_models.slim.nets.mobilenet.mobilenet_v3."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from nets.mobilenet import mobilenet_v3
from google3.testing.pybase import parameterized
class MobilenetV3Test(tf.test.TestCase, parameterized.TestCase):
# pylint: disable = g-unreachable-test-method
def assertVariablesHaveNormalizerFn(self, use_groupnorm):
global_variables = [v.name for v in tf.global_variables()]
has_batch_norm = False
has_group_norm = False
for global_variable in global_variables:
if 'BatchNorm' in global_variable:
has_batch_norm = True
if 'GroupNorm' in global_variable:
has_group_norm = True
if use_groupnorm:
self.assertFalse(has_batch_norm)
self.assertTrue(has_group_norm)
else:
self.assertTrue(has_batch_norm)
self.assertFalse(has_group_norm)
@parameterized.named_parameters(('without_groupnorm', False),
('with_groupnorm', True))
def testMobilenetV3Large(self, use_groupnorm):
logits, endpoints = mobilenet_v3.mobilenet(
tf.placeholder(tf.float32, (1, 224, 224, 3)),
use_groupnorm=use_groupnorm)
self.assertEqual(endpoints['layer_19'].shape, [1, 1, 1, 1280])
self.assertEqual(logits.shape, [1, 1001])
self.assertVariablesHaveNormalizerFn(use_groupnorm)
@parameterized.named_parameters(('without_groupnorm', False),
('with_groupnorm', True))
def testMobilenetV3Small(self, use_groupnorm):
_, endpoints = mobilenet_v3.mobilenet(
tf.placeholder(tf.float32, (1, 224, 224, 3)),
conv_defs=mobilenet_v3.V3_SMALL,
use_groupnorm=use_groupnorm)
self.assertEqual(endpoints['layer_15'].shape, [1, 1, 1, 1024])
self.assertVariablesHaveNormalizerFn(use_groupnorm)
@parameterized.named_parameters(('without_groupnorm', False),
('with_groupnorm', True))
def testMobilenetEdgeTpu(self, use_groupnorm):
_, endpoints = mobilenet_v3.edge_tpu(
tf.placeholder(tf.float32, (1, 224, 224, 3)),
use_groupnorm=use_groupnorm)
self.assertIn('Inference mode is created by default',
mobilenet_v3.edge_tpu.__doc__)
self.assertEqual(endpoints['layer_24'].shape, [1, 7, 7, 1280])
self.assertStartsWith(
endpoints['layer_24'].name, 'MobilenetEdgeTPU')
self.assertVariablesHaveNormalizerFn(use_groupnorm)
def testMobilenetEdgeTpuChangeScope(self):
_, endpoints = mobilenet_v3.edge_tpu(
tf.placeholder(tf.float32, (1, 224, 224, 3)), scope='Scope')
self.assertStartsWith(
endpoints['layer_24'].name, 'Scope')
@parameterized.named_parameters(('without_groupnorm', False),
('with_groupnorm', True))
def testMobilenetV3BaseOnly(self, use_groupnorm):
result, endpoints = mobilenet_v3.mobilenet(
tf.placeholder(tf.float32, (1, 224, 224, 3)),
conv_defs=mobilenet_v3.V3_LARGE,
use_groupnorm=use_groupnorm,
base_only=True,
final_endpoint='layer_17')
# Get the latest layer before average pool.
self.assertEqual(endpoints['layer_17'].shape, [1, 7, 7, 960])
self.assertEqual(result, endpoints['layer_17'])
self.assertVariablesHaveNormalizerFn(use_groupnorm)
def testMobilenetV3BaseOnly_VariableInput(self):
result, endpoints = mobilenet_v3.mobilenet(
tf.placeholder(tf.float32, (None, None, None, 3)),
conv_defs=mobilenet_v3.V3_LARGE,
base_only=True,
final_endpoint='layer_17')
# Get the latest layer before average pool.
self.assertEqual(endpoints['layer_17'].shape.as_list(),
[None, None, None, 960])
self.assertEqual(result, endpoints['layer_17'])
# Use reduce mean for pooling and check for operation 'ReduceMean' in graph
@parameterized.named_parameters(('without_groupnorm', False),
('with_groupnorm', True))
def testMobilenetV3WithReduceMean(self, use_groupnorm):
_, _ = mobilenet_v3.mobilenet(
tf.placeholder(tf.float32, (1, 224, 224, 3)),
conv_defs=mobilenet_v3.V3_SMALL,
use_groupnorm=use_groupnorm,
use_reduce_mean_for_pooling=True)
g = tf.get_default_graph()
reduce_mean = [v for v in g.get_operations() if 'ReduceMean' in v.name]
self.assertNotEmpty(reduce_mean)
self.assertVariablesHaveNormalizerFn(use_groupnorm)
@parameterized.named_parameters(('without_groupnorm', False),
('with_groupnorm', True))
def testMobilenetV3WithOutReduceMean(self, use_groupnorm):
_, _ = mobilenet_v3.mobilenet(
tf.placeholder(tf.float32, (1, 224, 224, 3)),
conv_defs=mobilenet_v3.V3_SMALL,
use_groupnorm=use_groupnorm,
use_reduce_mean_for_pooling=False)
g = tf.get_default_graph()
reduce_mean = [v for v in g.get_operations() if 'ReduceMean' in v.name]
self.assertEmpty(reduce_mean)
self.assertVariablesHaveNormalizerFn(use_groupnorm)
if __name__ == '__main__':
# absltest.main()
tf.test.main()
| 5,913 | 40.943262 | 87 | py |
models | models-master/research/slim/deployment/model_deploy.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Deploy Slim models across multiple clones and replicas.
# TODO(sguada) docstring paragraph by (a) motivating the need for the file and
# (b) defining clones.
# TODO(sguada) describe the high-level components of model deployment.
# E.g. "each model deployment is composed of several parts: a DeploymentConfig,
# which captures A, B and C, an input_fn which loads data.. etc
To easily train a model on multiple GPUs or across multiple machines this
module provides a set of helper functions: `create_clones`,
`optimize_clones` and `deploy`.
Usage:
g = tf.Graph()
# Set up DeploymentConfig
config = model_deploy.DeploymentConfig(num_clones=2, clone_on_cpu=True)
# Create the global step on the device storing the variables.
with tf.device(config.variables_device()):
global_step = slim.create_global_step()
# Define the inputs
with tf.device(config.inputs_device()):
images, labels = LoadData(...)
inputs_queue = slim.data.prefetch_queue((images, labels))
# Define the optimizer.
with tf.device(config.optimizer_device()):
optimizer = tf.train.MomentumOptimizer(FLAGS.learning_rate, FLAGS.momentum)
# Define the model including the loss.
def model_fn(inputs_queue):
images, labels = inputs_queue.dequeue()
predictions = CreateNetwork(images)
slim.losses.log_loss(predictions, labels)
model_dp = model_deploy.deploy(config, model_fn, [inputs_queue],
optimizer=optimizer)
# Run training.
slim.learning.train(model_dp.train_op, my_log_dir,
summary_op=model_dp.summary_op)
The Clone namedtuple holds together the values associated with each call to
model_fn:
* outputs: The return values of the calls to `model_fn()`.
* scope: The scope used to create the clone.
* device: The device used to create the clone.
DeployedModel namedtuple, holds together the values needed to train multiple
clones:
* train_op: An operation that run the optimizer training op and include
all the update ops created by `model_fn`. Present only if an optimizer
was specified.
* summary_op: An operation that run the summaries created by `model_fn`
and process_gradients.
* total_loss: A `Tensor` that contains the sum of all losses created by
`model_fn` plus the regularization losses.
* clones: List of `Clone` tuples returned by `create_clones()`.
DeploymentConfig parameters:
* num_clones: Number of model clones to deploy in each replica.
* clone_on_cpu: True if clones should be placed on CPU.
* replica_id: Integer. Index of the replica for which the model is
deployed. Usually 0 for the chief replica.
* num_replicas: Number of replicas to use.
* num_ps_tasks: Number of tasks for the `ps` job. 0 to not use replicas.
* worker_job_name: A name for the worker job.
* ps_job_name: A name for the parameter server job.
TODO(sguada):
- describe side effect to the graph.
- what happens to summaries and update_ops.
- which graph collections are altered.
- write a tutorial on how to use this.
- analyze the possibility of calling deploy more than once.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow.compat.v1 as tf
import tf_slim as slim
__all__ = ['create_clones',
'deploy',
'optimize_clones',
'DeployedModel',
'DeploymentConfig',
'Clone',
]
# Namedtuple used to represent a clone during deployment.
Clone = collections.namedtuple('Clone',
['outputs', # Whatever model_fn() returned.
'scope', # The scope used to create it.
'device', # The device used to create.
])
# Namedtuple used to represent a DeployedModel, returned by deploy().
DeployedModel = collections.namedtuple('DeployedModel',
['train_op', # The `train_op`
'summary_op', # The `summary_op`
'total_loss', # The loss `Tensor`
'clones', # A list of `Clones` tuples.
])
# Default parameters for DeploymentConfig
_deployment_params = {'num_clones': 1,
'clone_on_cpu': False,
'replica_id': 0,
'num_replicas': 1,
'num_ps_tasks': 0,
'worker_job_name': 'worker',
'ps_job_name': 'ps'}
def create_clones(config, model_fn, args=None, kwargs=None):
"""Creates multiple clones according to config using a `model_fn`.
The returned values of `model_fn(*args, **kwargs)` are collected along with
the scope and device used to created it in a namedtuple
`Clone(outputs, scope, device)`
Note: it is assumed that any loss created by `model_fn` is collected at
the tf.GraphKeys.LOSSES collection.
To recover the losses, summaries or update_ops created by the clone use:
```python
losses = tf.get_collection(tf.GraphKeys.LOSSES, clone.scope)
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, clone.scope)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, clone.scope)
```
The deployment options are specified by the config object and support
deploying one or several clones on different GPUs and one or several replicas
of such clones.
The argument `model_fn` is called `config.num_clones` times to create the
model clones as `model_fn(*args, **kwargs)`.
If `config` specifies deployment on multiple replicas then the default
tensorflow device is set appropriatly for each call to `model_fn` and for the
slim variable creation functions: model and global variables will be created
on the `ps` device, the clone operations will be on the `worker` device.
Args:
config: A DeploymentConfig object.
model_fn: A callable. Called as `model_fn(*args, **kwargs)`
args: Optional list of arguments to pass to `model_fn`.
kwargs: Optional list of keyword arguments to pass to `model_fn`.
Returns:
A list of namedtuples `Clone`.
"""
clones = []
args = args or []
kwargs = kwargs or {}
with slim.arg_scope([slim.model_variable, slim.variable],
device=config.variables_device()):
# Create clones.
for i in range(0, config.num_clones):
with tf.name_scope(config.clone_scope(i)) as clone_scope:
clone_device = config.clone_device(i)
with tf.device(clone_device):
with tf.variable_scope(tf.get_variable_scope(),
reuse=True if i > 0 else None):
outputs = model_fn(*args, **kwargs)
clones.append(Clone(outputs, clone_scope, clone_device))
return clones
def _gather_clone_loss(clone, num_clones, regularization_losses):
"""Gather the loss for a single clone.
Args:
clone: A Clone namedtuple.
num_clones: The number of clones being deployed.
regularization_losses: Possibly empty list of regularization_losses
to add to the clone losses.
Returns:
A tensor for the total loss for the clone. Can be None.
"""
# The return value.
sum_loss = None
# Individual components of the loss that will need summaries.
clone_loss = None
regularization_loss = None
# Compute and aggregate losses on the clone device.
with tf.device(clone.device):
all_losses = []
clone_losses = tf.get_collection(tf.GraphKeys.LOSSES, clone.scope)
if clone_losses:
clone_loss = tf.add_n(clone_losses, name='clone_loss')
if num_clones > 1:
clone_loss = tf.div(clone_loss, 1.0 * num_clones,
name='scaled_clone_loss')
all_losses.append(clone_loss)
if regularization_losses:
regularization_loss = tf.add_n(regularization_losses,
name='regularization_loss')
all_losses.append(regularization_loss)
if all_losses:
sum_loss = tf.add_n(all_losses)
# Add the summaries out of the clone device block.
if clone_loss is not None:
tf.summary.scalar('/'.join(filter(None,
['Losses', clone.scope, 'clone_loss'])),
clone_loss)
if regularization_loss is not None:
tf.summary.scalar('Losses/regularization_loss', regularization_loss)
return sum_loss
def _optimize_clone(optimizer, clone, num_clones, regularization_losses,
**kwargs):
"""Compute losses and gradients for a single clone.
Args:
optimizer: A tf.Optimizer object.
clone: A Clone namedtuple.
num_clones: The number of clones being deployed.
regularization_losses: Possibly empty list of regularization_losses
to add to the clone losses.
**kwargs: Dict of kwarg to pass to compute_gradients().
Returns:
A tuple (clone_loss, clone_grads_and_vars).
- clone_loss: A tensor for the total loss for the clone. Can be None.
- clone_grads_and_vars: List of (gradient, variable) for the clone.
Can be empty.
"""
sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)
clone_grad = None
if sum_loss is not None:
with tf.device(clone.device):
clone_grad = optimizer.compute_gradients(sum_loss, **kwargs)
return sum_loss, clone_grad
def optimize_clones(clones, optimizer,
regularization_losses=None,
**kwargs):
"""Compute clone losses and gradients for the given list of `Clones`.
Note: The regularization_losses are added to the first clone losses.
Args:
clones: List of `Clones` created by `create_clones()`.
optimizer: An `Optimizer` object.
regularization_losses: Optional list of regularization losses. If None it
will gather them from tf.GraphKeys.REGULARIZATION_LOSSES. Pass `[]` to
exclude them.
**kwargs: Optional list of keyword arguments to pass to `compute_gradients`.
Returns:
A tuple (total_loss, grads_and_vars).
- total_loss: A Tensor containing the average of the clone losses including
the regularization loss.
- grads_and_vars: A List of tuples (gradient, variable) containing the sum
of the gradients for each variable.
"""
grads_and_vars = []
clones_losses = []
num_clones = len(clones)
if regularization_losses is None:
regularization_losses = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES)
for clone in clones:
with tf.name_scope(clone.scope):
clone_loss, clone_grad = _optimize_clone(
optimizer, clone, num_clones, regularization_losses, **kwargs)
if clone_loss is not None:
clones_losses.append(clone_loss)
grads_and_vars.append(clone_grad)
# Only use regularization_losses for the first clone
regularization_losses = None
# Compute the total_loss summing all the clones_losses.
total_loss = tf.add_n(clones_losses, name='total_loss')
# Sum the gradients across clones.
grads_and_vars = _sum_clones_gradients(grads_and_vars)
return total_loss, grads_and_vars
def deploy(config,
model_fn,
args=None,
kwargs=None,
optimizer=None,
summarize_gradients=False):
"""Deploys a Slim-constructed model across multiple clones.
The deployment options are specified by the config object and support
deploying one or several clones on different GPUs and one or several replicas
of such clones.
The argument `model_fn` is called `config.num_clones` times to create the
model clones as `model_fn(*args, **kwargs)`.
The optional argument `optimizer` is an `Optimizer` object. If not `None`,
the deployed model is configured for training with that optimizer.
If `config` specifies deployment on multiple replicas then the default
tensorflow device is set appropriatly for each call to `model_fn` and for the
slim variable creation functions: model and global variables will be created
on the `ps` device, the clone operations will be on the `worker` device.
Args:
config: A `DeploymentConfig` object.
model_fn: A callable. Called as `model_fn(*args, **kwargs)`
args: Optional list of arguments to pass to `model_fn`.
kwargs: Optional list of keyword arguments to pass to `model_fn`.
optimizer: Optional `Optimizer` object. If passed the model is deployed
for training with that optimizer.
summarize_gradients: Whether or not add summaries to the gradients.
Returns:
A `DeployedModel` namedtuple.
"""
# Gather initial summaries.
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
# Create Clones.
clones = create_clones(config, model_fn, args, kwargs)
first_clone = clones[0]
# Gather update_ops from the first clone. These contain, for example,
# the updates for the batch_norm variables created by model_fn.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone.scope)
train_op = None
total_loss = None
with tf.device(config.optimizer_device()):
if optimizer:
# Place the global step on the device storing the variables.
with tf.device(config.variables_device()):
global_step = slim.get_or_create_global_step()
# Compute the gradients for the clones.
total_loss, clones_gradients = optimize_clones(clones, optimizer)
if clones_gradients:
if summarize_gradients:
# Add summaries to the gradients.
summaries |= set(_add_gradients_summaries(clones_gradients))
# Create gradient updates.
grad_updates = optimizer.apply_gradients(clones_gradients,
global_step=global_step)
update_ops.append(grad_updates)
update_op = tf.group(*update_ops)
with tf.control_dependencies([update_op]):
train_op = tf.identity(total_loss, name='train_op')
else:
clones_losses = []
regularization_losses = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES)
for clone in clones:
with tf.name_scope(clone.scope):
clone_loss = _gather_clone_loss(clone, len(clones),
regularization_losses)
if clone_loss is not None:
clones_losses.append(clone_loss)
# Only use regularization_losses for the first clone
regularization_losses = None
if clones_losses:
total_loss = tf.add_n(clones_losses, name='total_loss')
# Add the summaries from the first clone. These contain the summaries
# created by model_fn and either optimize_clones() or _gather_clone_loss().
summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES,
first_clone.scope))
if total_loss is not None:
# Add total_loss to summary.
summaries.add(tf.summary.scalar('total_loss', total_loss))
if summaries:
# Merge all summaries together.
summary_op = tf.summary.merge(list(summaries), name='summary_op')
else:
summary_op = None
return DeployedModel(train_op, summary_op, total_loss, clones)
def _sum_clones_gradients(clone_grads):
"""Calculate the sum gradient for each shared variable across all clones.
This function assumes that the clone_grads has been scaled appropriately by
1 / num_clones.
Args:
clone_grads: A List of List of tuples (gradient, variable), one list per
`Clone`.
Returns:
List of tuples of (gradient, variable) where the gradient has been summed
across all clones.
"""
sum_grads = []
for grad_and_vars in zip(*clone_grads):
# Note that each grad_and_vars looks like the following:
# ((grad_var0_clone0, var0), ... (grad_varN_cloneN, varN))
grads = []
var = grad_and_vars[0][1]
for g, v in grad_and_vars:
assert v == var
if g is not None:
grads.append(g)
if grads:
if len(grads) > 1:
sum_grad = tf.add_n(grads, name=var.op.name + '/sum_grads')
else:
sum_grad = grads[0]
sum_grads.append((sum_grad, var))
return sum_grads
def _add_gradients_summaries(grads_and_vars):
"""Add histogram summaries to gradients.
Note: The summaries are also added to the SUMMARIES collection.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
Returns:
The _list_ of the added summaries for grads_and_vars.
"""
summaries = []
for grad, var in grads_and_vars:
if grad is not None:
if isinstance(grad, tf.IndexedSlices):
grad_values = grad.values
else:
grad_values = grad
summaries.append(tf.summary.histogram(var.op.name + ':gradient',
grad_values))
summaries.append(tf.summary.histogram(var.op.name + ':gradient_norm',
tf.global_norm([grad_values])))
else:
tf.logging.info('Var %s has no gradient', var.op.name)
return summaries
class DeploymentConfig(object):
"""Configuration for deploying a model with `deploy()`.
You can pass an instance of this class to `deploy()` to specify exactly
how to deploy the model to build. If you do not pass one, an instance built
from the default deployment_hparams will be used.
"""
def __init__(self,
num_clones=1,
clone_on_cpu=False,
replica_id=0,
num_replicas=1,
num_ps_tasks=0,
worker_job_name='worker',
ps_job_name='ps'):
"""Create a DeploymentConfig.
The config describes how to deploy a model across multiple clones and
replicas. The model will be replicated `num_clones` times in each replica.
If `clone_on_cpu` is True, each clone will placed on CPU.
If `num_replicas` is 1, the model is deployed via a single process. In that
case `worker_device`, `num_ps_tasks`, and `ps_device` are ignored.
If `num_replicas` is greater than 1, then `worker_device` and `ps_device`
must specify TensorFlow devices for the `worker` and `ps` jobs and
`num_ps_tasks` must be positive.
Args:
num_clones: Number of model clones to deploy in each replica.
clone_on_cpu: If True clones would be placed on CPU.
replica_id: Integer. Index of the replica for which the model is
deployed. Usually 0 for the chief replica.
num_replicas: Number of replicas to use.
num_ps_tasks: Number of tasks for the `ps` job. 0 to not use replicas.
worker_job_name: A name for the worker job.
ps_job_name: A name for the parameter server job.
Raises:
ValueError: If the arguments are invalid.
"""
if num_replicas > 1:
if num_ps_tasks < 1:
raise ValueError('When using replicas num_ps_tasks must be positive')
if num_replicas > 1 or num_ps_tasks > 0:
if not worker_job_name:
raise ValueError('Must specify worker_job_name when using replicas')
if not ps_job_name:
raise ValueError('Must specify ps_job_name when using parameter server')
if replica_id >= num_replicas:
raise ValueError('replica_id must be less than num_replicas')
self._num_clones = num_clones
self._clone_on_cpu = clone_on_cpu
self._replica_id = replica_id
self._num_replicas = num_replicas
self._num_ps_tasks = num_ps_tasks
self._ps_device = '/job:' + ps_job_name if num_ps_tasks > 0 else ''
self._worker_device = '/job:' + worker_job_name if num_ps_tasks > 0 else ''
@property
def num_clones(self):
return self._num_clones
@property
def clone_on_cpu(self):
return self._clone_on_cpu
@property
def replica_id(self):
return self._replica_id
@property
def num_replicas(self):
return self._num_replicas
@property
def num_ps_tasks(self):
return self._num_ps_tasks
@property
def ps_device(self):
return self._ps_device
@property
def worker_device(self):
return self._worker_device
def caching_device(self):
"""Returns the device to use for caching variables.
Variables are cached on the worker CPU when using replicas.
Returns:
A device string or None if the variables do not need to be cached.
"""
if self._num_ps_tasks > 0:
return lambda op: op.device
else:
return None
def clone_device(self, clone_index):
"""Device used to create the clone and all the ops inside the clone.
Args:
clone_index: Int, representing the clone_index.
Returns:
A value suitable for `tf.device()`.
Raises:
ValueError: if `clone_index` is greater or equal to the number of clones".
"""
if clone_index >= self._num_clones:
raise ValueError('clone_index must be less than num_clones')
device = ''
if self._num_ps_tasks > 0:
device += self._worker_device
if self._clone_on_cpu:
device += '/device:CPU:0'
else:
device += '/device:GPU:%d' % clone_index
return device
def clone_scope(self, clone_index):
"""Name scope to create the clone.
Args:
clone_index: Int, representing the clone_index.
Returns:
A name_scope suitable for `tf.name_scope()`.
Raises:
ValueError: if `clone_index` is greater or equal to the number of clones".
"""
if clone_index >= self._num_clones:
raise ValueError('clone_index must be less than num_clones')
scope = ''
if self._num_clones > 1:
scope = 'clone_%d' % clone_index
return scope
def optimizer_device(self):
"""Device to use with the optimizer.
Returns:
A value suitable for `tf.device()`.
"""
if self._num_ps_tasks > 0 or self._num_clones > 0:
return self._worker_device + '/device:CPU:0'
else:
return ''
def inputs_device(self):
"""Device to use to build the inputs.
Returns:
A value suitable for `tf.device()`.
"""
device = ''
if self._num_ps_tasks > 0:
device += self._worker_device
device += '/device:CPU:0'
return device
def variables_device(self):
"""Returns the device to use for variables created inside the clone.
Returns:
A value suitable for `tf.device()`.
"""
device = ''
if self._num_ps_tasks > 0:
device += self._ps_device
device += '/device:CPU:0'
class _PSDeviceChooser(object):
"""Slim device chooser for variables when using PS."""
def __init__(self, device, tasks):
self._device = device
self._tasks = tasks
self._task = 0
def choose(self, op):
if op.device:
return op.device
node_def = op if isinstance(op, tf.NodeDef) else op.node_def
if node_def.op.startswith('Variable'):
t = self._task
self._task = (self._task + 1) % self._tasks
d = '%s/task:%d' % (self._device, t)
return d
else:
return op.device
if not self._num_ps_tasks:
return device
else:
chooser = _PSDeviceChooser(device, self._num_ps_tasks)
return chooser.choose
| 23,855 | 34.237814 | 80 | py |
models | models-master/research/slim/deployment/model_deploy_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for model_deploy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
import tf_slim as slim
from deployment import model_deploy
class DeploymentConfigTest(tf.test.TestCase):
def testDefaults(self):
deploy_config = model_deploy.DeploymentConfig()
self.assertEqual(slim.get_variables(), [])
self.assertEqual(deploy_config.caching_device(), None)
self.assertDeviceEqual(deploy_config.clone_device(0), 'GPU:0')
self.assertEqual(deploy_config.clone_scope(0), '')
self.assertDeviceEqual(deploy_config.optimizer_device(), 'CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(), 'CPU:0')
self.assertDeviceEqual(deploy_config.variables_device(), 'CPU:0')
def testCPUonly(self):
deploy_config = model_deploy.DeploymentConfig(clone_on_cpu=True)
self.assertEqual(deploy_config.caching_device(), None)
self.assertDeviceEqual(deploy_config.clone_device(0), 'CPU:0')
self.assertEqual(deploy_config.clone_scope(0), '')
self.assertDeviceEqual(deploy_config.optimizer_device(), 'CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(), 'CPU:0')
self.assertDeviceEqual(deploy_config.variables_device(), 'CPU:0')
def testMultiGPU(self):
deploy_config = model_deploy.DeploymentConfig(num_clones=2)
self.assertEqual(deploy_config.caching_device(), None)
self.assertDeviceEqual(deploy_config.clone_device(0), 'GPU:0')
self.assertDeviceEqual(deploy_config.clone_device(1), 'GPU:1')
self.assertEqual(deploy_config.clone_scope(0), 'clone_0')
self.assertEqual(deploy_config.clone_scope(1), 'clone_1')
self.assertDeviceEqual(deploy_config.optimizer_device(), 'CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(), 'CPU:0')
self.assertDeviceEqual(deploy_config.variables_device(), 'CPU:0')
def testPS(self):
deploy_config = model_deploy.DeploymentConfig(num_clones=1, num_ps_tasks=1)
self.assertDeviceEqual(deploy_config.clone_device(0),
'/job:worker/device:GPU:0')
self.assertEqual(deploy_config.clone_scope(0), '')
self.assertDeviceEqual(deploy_config.optimizer_device(),
'/job:worker/device:CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(),
'/job:worker/device:CPU:0')
with tf.device(deploy_config.variables_device()):
a = tf.Variable(0)
b = tf.Variable(0)
c = tf.no_op()
d = slim.variable('a', [],
caching_device=deploy_config.caching_device())
self.assertDeviceEqual(a.device, '/job:ps/task:0/device:CPU:0')
self.assertDeviceEqual(a.device, a.value().device)
self.assertDeviceEqual(b.device, '/job:ps/task:0/device:CPU:0')
self.assertDeviceEqual(b.device, b.value().device)
self.assertDeviceEqual(c.device, '')
self.assertDeviceEqual(d.device, '/job:ps/task:0/device:CPU:0')
self.assertDeviceEqual(d.value().device, '')
def testMultiGPUPS(self):
deploy_config = model_deploy.DeploymentConfig(num_clones=2, num_ps_tasks=1)
self.assertEqual(deploy_config.caching_device()(tf.no_op()), '')
self.assertDeviceEqual(deploy_config.clone_device(0),
'/job:worker/device:GPU:0')
self.assertDeviceEqual(deploy_config.clone_device(1),
'/job:worker/device:GPU:1')
self.assertEqual(deploy_config.clone_scope(0), 'clone_0')
self.assertEqual(deploy_config.clone_scope(1), 'clone_1')
self.assertDeviceEqual(deploy_config.optimizer_device(),
'/job:worker/device:CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(),
'/job:worker/device:CPU:0')
def testReplicasPS(self):
deploy_config = model_deploy.DeploymentConfig(num_replicas=2,
num_ps_tasks=2)
self.assertDeviceEqual(deploy_config.clone_device(0),
'/job:worker/device:GPU:0')
self.assertEqual(deploy_config.clone_scope(0), '')
self.assertDeviceEqual(deploy_config.optimizer_device(),
'/job:worker/device:CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(),
'/job:worker/device:CPU:0')
def testReplicasMultiGPUPS(self):
deploy_config = model_deploy.DeploymentConfig(num_replicas=2,
num_clones=2,
num_ps_tasks=2)
self.assertDeviceEqual(deploy_config.clone_device(0),
'/job:worker/device:GPU:0')
self.assertDeviceEqual(deploy_config.clone_device(1),
'/job:worker/device:GPU:1')
self.assertEqual(deploy_config.clone_scope(0), 'clone_0')
self.assertEqual(deploy_config.clone_scope(1), 'clone_1')
self.assertDeviceEqual(deploy_config.optimizer_device(),
'/job:worker/device:CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(),
'/job:worker/device:CPU:0')
def testVariablesPS(self):
deploy_config = model_deploy.DeploymentConfig(num_ps_tasks=2)
with tf.device(deploy_config.variables_device()):
a = tf.Variable(0)
b = tf.Variable(0)
c = tf.no_op()
d = slim.variable('a', [],
caching_device=deploy_config.caching_device())
self.assertDeviceEqual(a.device, '/job:ps/task:0/device:CPU:0')
self.assertDeviceEqual(a.device, a.value().device)
self.assertDeviceEqual(b.device, '/job:ps/task:1/device:CPU:0')
self.assertDeviceEqual(b.device, b.value().device)
self.assertDeviceEqual(c.device, '')
self.assertDeviceEqual(d.device, '/job:ps/task:0/device:CPU:0')
self.assertDeviceEqual(d.value().device, '')
def LogisticClassifier(inputs, labels, scope=None, reuse=None):
with tf.variable_scope(scope, 'LogisticClassifier', [inputs, labels],
reuse=reuse):
predictions = slim.fully_connected(inputs, 1, activation_fn=tf.sigmoid,
scope='fully_connected')
slim.losses.log_loss(predictions, labels)
return predictions
def BatchNormClassifier(inputs, labels, scope=None, reuse=None):
with tf.variable_scope(scope, 'BatchNormClassifier', [inputs, labels],
reuse=reuse):
inputs = slim.batch_norm(inputs, decay=0.1, fused=True)
predictions = slim.fully_connected(inputs, 1,
activation_fn=tf.sigmoid,
scope='fully_connected')
slim.losses.log_loss(predictions, labels)
return predictions
class CreatecloneTest(tf.test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
self._logdir = self.get_temp_dir()
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def testCreateLogisticClassifier(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = LogisticClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
clone = clones[0]
self.assertEqual(len(slim.get_variables()), 2)
for v in slim.get_variables():
self.assertDeviceEqual(v.device, 'CPU:0')
self.assertDeviceEqual(v.value().device, 'CPU:0')
self.assertEqual(clone.outputs.op.name,
'LogisticClassifier/fully_connected/Sigmoid')
self.assertEqual(clone.scope, '')
self.assertDeviceEqual(clone.device, 'GPU:0')
self.assertEqual(len(slim.losses.get_losses()), 1)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(update_ops, [])
def testCreateSingleclone(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
clone = clones[0]
self.assertEqual(len(slim.get_variables()), 5)
for v in slim.get_variables():
self.assertDeviceEqual(v.device, 'CPU:0')
self.assertDeviceEqual(v.value().device, 'CPU:0')
self.assertEqual(clone.outputs.op.name,
'BatchNormClassifier/fully_connected/Sigmoid')
self.assertEqual(clone.scope, '')
self.assertDeviceEqual(clone.device, 'GPU:0')
self.assertEqual(len(slim.losses.get_losses()), 1)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), 2)
def testCreateMulticlone(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
num_clones = 4
deploy_config = model_deploy.DeploymentConfig(num_clones=num_clones)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(slim.get_variables()), 5)
for v in slim.get_variables():
self.assertDeviceEqual(v.device, 'CPU:0')
self.assertDeviceEqual(v.value().device, 'CPU:0')
self.assertEqual(len(clones), num_clones)
for i, clone in enumerate(clones):
self.assertEqual(
clone.outputs.op.name,
'clone_%d/BatchNormClassifier/fully_connected/Sigmoid' % i)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, clone.scope)
self.assertEqual(len(update_ops), 2)
self.assertEqual(clone.scope, 'clone_%d/' % i)
self.assertDeviceEqual(clone.device, 'GPU:%d' % i)
def testCreateOnecloneWithPS(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1,
num_ps_tasks=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(clones), 1)
clone = clones[0]
self.assertEqual(clone.outputs.op.name,
'BatchNormClassifier/fully_connected/Sigmoid')
self.assertDeviceEqual(clone.device, '/job:worker/device:GPU:0')
self.assertEqual(clone.scope, '')
self.assertEqual(len(slim.get_variables()), 5)
for v in slim.get_variables():
self.assertDeviceEqual(v.device, '/job:ps/task:0/CPU:0')
self.assertDeviceEqual(v.device, v.value().device)
def testCreateMulticloneWithPS(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=2,
num_ps_tasks=2)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(slim.get_variables()), 5)
for i, v in enumerate(slim.get_variables()):
t = i % 2
self.assertDeviceEqual(v.device, '/job:ps/task:%d/device:CPU:0' % t)
self.assertDeviceEqual(v.device, v.value().device)
self.assertEqual(len(clones), 2)
for i, clone in enumerate(clones):
self.assertEqual(
clone.outputs.op.name,
'clone_%d/BatchNormClassifier/fully_connected/Sigmoid' % i)
self.assertEqual(clone.scope, 'clone_%d/' % i)
self.assertDeviceEqual(clone.device, '/job:worker/device:GPU:%d' % i)
class OptimizeclonesTest(tf.test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
self._logdir = self.get_temp_dir()
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def testCreateLogisticClassifier(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = LogisticClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(slim.get_variables()), 2)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(update_ops, [])
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
optimizer)
self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
self.assertEqual(total_loss.op.name, 'total_loss')
for g, v in grads_and_vars:
self.assertDeviceEqual(g.device, 'GPU:0')
self.assertDeviceEqual(v.device, 'CPU:0')
def testCreateSingleclone(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(slim.get_variables()), 5)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), 2)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
optimizer)
self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
self.assertEqual(total_loss.op.name, 'total_loss')
for g, v in grads_and_vars:
self.assertDeviceEqual(g.device, 'GPU:0')
self.assertDeviceEqual(v.device, 'CPU:0')
def testCreateMulticlone(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
num_clones = 4
deploy_config = model_deploy.DeploymentConfig(num_clones=num_clones)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(slim.get_variables()), 5)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), num_clones * 2)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
optimizer)
self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
self.assertEqual(total_loss.op.name, 'total_loss')
for g, v in grads_and_vars:
self.assertDeviceEqual(g.device, '')
self.assertDeviceEqual(v.device, 'CPU:0')
def testCreateMulticloneCPU(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
model_args = (tf_inputs, tf_labels)
num_clones = 4
deploy_config = model_deploy.DeploymentConfig(num_clones=num_clones,
clone_on_cpu=True)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, model_args)
self.assertEqual(len(slim.get_variables()), 5)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), num_clones * 2)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
optimizer)
self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
self.assertEqual(total_loss.op.name, 'total_loss')
for g, v in grads_and_vars:
self.assertDeviceEqual(g.device, '')
self.assertDeviceEqual(v.device, 'CPU:0')
def testCreateOnecloneWithPS(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
model_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1,
num_ps_tasks=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, model_args)
self.assertEqual(len(slim.get_variables()), 5)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), 2)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
optimizer)
self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
self.assertEqual(total_loss.op.name, 'total_loss')
for g, v in grads_and_vars:
self.assertDeviceEqual(g.device, '/job:worker/device:GPU:0')
self.assertDeviceEqual(v.device, '/job:ps/task:0/CPU:0')
class DeployTest(tf.test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
self._logdir = self.get_temp_dir()
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def _addBesselsCorrection(self, sample_size, expected_var):
correction_factor = sample_size / (sample_size - 1)
expected_var *= correction_factor
return expected_var
def testLocalTrainOp(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
model_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=2,
clone_on_cpu=True)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
self.assertEqual(slim.get_variables(), [])
model = model_deploy.deploy(deploy_config, model_fn, model_args,
optimizer=optimizer)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), 4)
self.assertEqual(len(model.clones), 2)
self.assertEqual(model.total_loss.op.name, 'total_loss')
self.assertEqual(model.summary_op.op.name, 'summary_op/summary_op')
self.assertEqual(model.train_op.op.name, 'train_op')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
moving_mean = slim.get_variables_by_name('moving_mean')[0]
moving_variance = slim.get_variables_by_name('moving_variance')[0]
initial_loss = sess.run(model.total_loss)
initial_mean, initial_variance = sess.run([moving_mean,
moving_variance])
self.assertAllClose(initial_mean, [0.0, 0.0, 0.0, 0.0])
self.assertAllClose(initial_variance, [1.0, 1.0, 1.0, 1.0])
for _ in range(10):
sess.run(model.train_op)
final_loss = sess.run(model.total_loss)
self.assertLess(final_loss, initial_loss / 5.0)
final_mean, final_variance = sess.run([moving_mean,
moving_variance])
expected_mean = np.array([0.125, 0.25, 0.375, 0.25])
expected_var = np.array([0.109375, 0.1875, 0.234375, 0.1875])
expected_var = self._addBesselsCorrection(16, expected_var)
self.assertAllClose(final_mean, expected_mean)
self.assertAllClose(final_variance, expected_var)
def testNoSummariesOnGPU(self):
with tf.Graph().as_default():
deploy_config = model_deploy.DeploymentConfig(num_clones=2)
# clone function creates a fully_connected layer with a regularizer loss.
def ModelFn():
inputs = tf.constant(1.0, shape=(10, 20), dtype=tf.float32)
reg = slim.l2_regularizer(0.001)
slim.fully_connected(inputs, 30, weights_regularizer=reg)
model = model_deploy.deploy(
deploy_config, ModelFn,
optimizer=tf.train.GradientDescentOptimizer(1.0))
# The model summary op should have a few summary inputs and all of them
# should be on the CPU.
self.assertTrue(model.summary_op.op.inputs)
for inp in model.summary_op.op.inputs:
self.assertEqual('/device:CPU:0', inp.device)
def testNoSummariesOnGPUForEvals(self):
with tf.Graph().as_default():
deploy_config = model_deploy.DeploymentConfig(num_clones=2)
# clone function creates a fully_connected layer with a regularizer loss.
def ModelFn():
inputs = tf.constant(1.0, shape=(10, 20), dtype=tf.float32)
reg = slim.l2_regularizer(0.001)
slim.fully_connected(inputs, 30, weights_regularizer=reg)
# No optimizer here, it's an eval.
model = model_deploy.deploy(deploy_config, ModelFn)
# The model summary op should have a few summary inputs and all of them
# should be on the CPU.
self.assertTrue(model.summary_op.op.inputs)
for inp in model.summary_op.op.inputs:
self.assertEqual('/device:CPU:0', inp.device)
if __name__ == '__main__':
tf.test.main()
| 24,284 | 41.530648 | 80 | py |
models | models-master/research/slim/deployment/__init__.py | 1 | 0 | 0 | py |
|
models | models-master/research/slim/datasets/download_and_convert_visualwakewords_lib.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Helper functions to generate the Visual WakeWords dataset.
It filters raw COCO annotations file to Visual WakeWords Dataset
annotations. The resulting annotations and COCO images are then converted
to TF records.
See download_and_convert_visualwakewords.py for the sample usage.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import hashlib
import io
import json
import os
import contextlib2
import PIL.Image
import six
import tensorflow.compat.v1 as tf
from datasets import dataset_utils
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.flags.DEFINE_string(
'coco_train_url',
'http://images.cocodataset.org/zips/train2014.zip',
'Link to zip file containing coco training data')
tf.app.flags.DEFINE_string(
'coco_validation_url',
'http://images.cocodataset.org/zips/val2014.zip',
'Link to zip file containing coco validation data')
tf.app.flags.DEFINE_string(
'coco_annotations_url',
'http://images.cocodataset.org/annotations/annotations_trainval2014.zip',
'Link to zip file containing coco annotation data')
FLAGS = tf.app.flags.FLAGS
def download_coco_dataset(dataset_dir):
"""Download the coco dataset.
Args:
dataset_dir: Path where coco dataset should be downloaded.
"""
dataset_utils.download_and_uncompress_zipfile(FLAGS.coco_train_url,
dataset_dir)
dataset_utils.download_and_uncompress_zipfile(FLAGS.coco_validation_url,
dataset_dir)
dataset_utils.download_and_uncompress_zipfile(FLAGS.coco_annotations_url,
dataset_dir)
def create_labels_file(foreground_class_of_interest,
visualwakewords_labels_file):
"""Generate visualwakewords labels file.
Args:
foreground_class_of_interest: category from COCO dataset that is filtered by
the visualwakewords dataset
visualwakewords_labels_file: output visualwakewords label file
"""
labels_to_class_names = {0: 'background', 1: foreground_class_of_interest}
with open(visualwakewords_labels_file, 'w') as fp:
for label in labels_to_class_names:
fp.write(str(label) + ':' + str(labels_to_class_names[label]) + '\n')
def create_visual_wakeword_annotations(annotations_file,
visualwakewords_annotations_file,
small_object_area_threshold,
foreground_class_of_interest):
"""Generate visual wakewords annotations file.
Loads COCO annotation json files to generate visualwakewords annotations file.
Args:
annotations_file: JSON file containing COCO bounding box annotations
visualwakewords_annotations_file: path to output annotations file
small_object_area_threshold: threshold on fraction of image area below which
small object bounding boxes are filtered
foreground_class_of_interest: category from COCO dataset that is filtered by
the visual wakewords dataset
"""
# default object of interest is person
foreground_class_of_interest_id = 1
with tf.gfile.GFile(annotations_file, 'r') as fid:
groundtruth_data = json.load(fid)
images = groundtruth_data['images']
# Create category index
category_index = {}
for category in groundtruth_data['categories']:
if category['name'] == foreground_class_of_interest:
foreground_class_of_interest_id = category['id']
category_index[category['id']] = category
# Create annotations index, a map of image_id to it's annotations
tf.logging.info('Building annotations index...')
annotations_index = collections.defaultdict(
lambda: collections.defaultdict(list))
# structure is { "image_id": {"objects" : [list of the image annotations]}}
for annotation in groundtruth_data['annotations']:
annotations_index[annotation['image_id']]['objects'].append(annotation)
missing_annotation_count = len(images) - len(annotations_index)
tf.logging.info('%d images are missing annotations.',
missing_annotation_count)
# Create filtered annotations index
annotations_index_filtered = {}
for idx, image in enumerate(images):
if idx % 100 == 0:
tf.logging.info('On image %d of %d', idx, len(images))
annotations = annotations_index[image['id']]
annotations_filtered = _filter_annotations(
annotations, image, small_object_area_threshold,
foreground_class_of_interest_id)
annotations_index_filtered[image['id']] = annotations_filtered
with open(visualwakewords_annotations_file, 'w') as fp:
json.dump(
{
'images': images,
'annotations': annotations_index_filtered,
'categories': category_index
}, fp)
def _filter_annotations(annotations, image, small_object_area_threshold,
foreground_class_of_interest_id):
"""Filters COCO annotations to visual wakewords annotations.
Args:
annotations: dicts with keys: {
u'objects': [{u'id', u'image_id', u'category_id', u'segmentation',
u'area', u'bbox' : [x,y,width,height], u'iscrowd'}] } Notice
that bounding box coordinates in the official COCO dataset
are given as [x, y, width, height] tuples using absolute
coordinates where x, y represent the top-left (0-indexed)
corner.
image: dict with keys: [u'license', u'file_name', u'coco_url', u'height',
u'width', u'date_captured', u'flickr_url', u'id']
small_object_area_threshold: threshold on fraction of image area below which
small objects are filtered
foreground_class_of_interest_id: category of COCO dataset which visual
wakewords filters
Returns:
annotations_filtered: dict with keys: {
u'objects': [{"area", "bbox" : [x,y,width,height]}],
u'label',
}
"""
objects = []
image_area = image['height'] * image['width']
for annotation in annotations['objects']:
normalized_object_area = annotation['area'] / image_area
category_id = int(annotation['category_id'])
# Filter valid bounding boxes
if category_id == foreground_class_of_interest_id and \
normalized_object_area > small_object_area_threshold:
objects.append({
u'area': annotation['area'],
u'bbox': annotation['bbox'],
})
label = 1 if objects else 0
return {
'objects': objects,
'label': label,
}
def create_tf_record_for_visualwakewords_dataset(annotations_file, image_dir,
output_path, num_shards):
"""Loads Visual WakeWords annotations/images and converts to tf.Record format.
Args:
annotations_file: JSON file containing bounding box annotations.
image_dir: Directory containing the image files.
output_path: Path to output tf.Record file.
num_shards: number of output file shards.
"""
with contextlib2.ExitStack() as tf_record_close_stack, \
tf.gfile.GFile(annotations_file, 'r') as fid:
output_tfrecords = dataset_utils.open_sharded_output_tfrecords(
tf_record_close_stack, output_path, num_shards)
groundtruth_data = json.load(fid)
images = groundtruth_data['images']
annotations_index = groundtruth_data['annotations']
annotations_index = {int(k): v for k, v in six.iteritems(annotations_index)}
# convert 'unicode' key to 'int' key after we parse the json file
for idx, image in enumerate(images):
if idx % 100 == 0:
tf.logging.info('On image %d of %d', idx, len(images))
annotations = annotations_index[image['id']]
tf_example = _create_tf_example(image, annotations, image_dir)
shard_idx = idx % num_shards
output_tfrecords[shard_idx].write(tf_example.SerializeToString())
def _create_tf_example(image, annotations, image_dir):
"""Converts image and annotations to a tf.Example proto.
Args:
image: dict with keys: [u'license', u'file_name', u'coco_url', u'height',
u'width', u'date_captured', u'flickr_url', u'id']
annotations: dict with objects (a list of image annotations) and a label.
{u'objects':[{"area", "bbox" : [x,y,width,height}], u'label'}. Notice
that bounding box coordinates in the COCO dataset are given as[x, y,
width, height] tuples using absolute coordinates where x, y represent
the top-left (0-indexed) corner. This function also converts to the format
that can be used by the Tensorflow Object Detection API (which is [ymin,
xmin, ymax, xmax] with coordinates normalized relative to image size).
image_dir: directory containing the image files.
Returns:
tf_example: The converted tf.Example
Raises:
ValueError: if the image pointed to by data['filename'] is not a valid JPEG
"""
image_height = image['height']
image_width = image['width']
filename = image['file_name']
image_id = image['id']
full_path = os.path.join(image_dir, filename)
with tf.gfile.GFile(full_path, 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = PIL.Image.open(encoded_jpg_io)
key = hashlib.sha256(encoded_jpg).hexdigest()
xmin, xmax, ymin, ymax, area = [], [], [], [], []
for obj in annotations['objects']:
(x, y, width, height) = tuple(obj['bbox'])
xmin.append(float(x) / image_width)
xmax.append(float(x + width) / image_width)
ymin.append(float(y) / image_height)
ymax.append(float(y + height) / image_height)
area.append(obj['area'])
feature_dict = {
'image/height':
dataset_utils.int64_feature(image_height),
'image/width':
dataset_utils.int64_feature(image_width),
'image/filename':
dataset_utils.bytes_feature(filename.encode('utf8')),
'image/source_id':
dataset_utils.bytes_feature(str(image_id).encode('utf8')),
'image/key/sha256':
dataset_utils.bytes_feature(key.encode('utf8')),
'image/encoded':
dataset_utils.bytes_feature(encoded_jpg),
'image/format':
dataset_utils.bytes_feature('jpeg'.encode('utf8')),
'image/class/label':
dataset_utils.int64_feature(annotations['label']),
'image/object/bbox/xmin':
dataset_utils.float_list_feature(xmin),
'image/object/bbox/xmax':
dataset_utils.float_list_feature(xmax),
'image/object/bbox/ymin':
dataset_utils.float_list_feature(ymin),
'image/object/bbox/ymax':
dataset_utils.float_list_feature(ymax),
'image/object/area':
dataset_utils.float_list_feature(area),
}
example = tf.train.Example(features=tf.train.Features(feature=feature_dict))
return example
| 11,608 | 39.309028 | 80 | py |
models | models-master/research/slim/datasets/dataset_factory.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A factory-pattern class which returns classification image/label pairs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datasets import cifar10
from datasets import flowers
from datasets import imagenet
from datasets import mnist
from datasets import visualwakewords
datasets_map = {
'cifar10': cifar10,
'flowers': flowers,
'imagenet': imagenet,
'mnist': mnist,
'visualwakewords': visualwakewords,
}
def get_dataset(name, split_name, dataset_dir, file_pattern=None, reader=None):
"""Given a dataset name and a split_name returns a Dataset.
Args:
name: String, the name of the dataset.
split_name: A train/test split name.
dataset_dir: The directory where the dataset files are stored.
file_pattern: The file pattern to use for matching the dataset source files.
reader: The subclass of tf.ReaderBase. If left as `None`, then the default
reader defined by each dataset is used.
Returns:
A `Dataset` class.
Raises:
ValueError: If the dataset `name` is unknown.
"""
if name not in datasets_map:
raise ValueError('Name of dataset unknown %s' % name)
return datasets_map[name].get_split(
split_name,
dataset_dir,
file_pattern,
reader)
| 1,995 | 32.266667 | 80 | py |
models | models-master/research/slim/datasets/dataset_utils.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains utilities for downloading and converting datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import tarfile
import zipfile
from six.moves import urllib
import tensorflow.compat.v1 as tf
LABELS_FILENAME = 'labels.txt'
def int64_feature(values):
"""Returns a TF-Feature of int64s.
Args:
values: A scalar or list of values.
Returns:
A TF-Feature.
"""
if not isinstance(values, (tuple, list)):
values = [values]
return tf.train.Feature(int64_list=tf.train.Int64List(value=values))
def bytes_list_feature(values):
"""Returns a TF-Feature of list of bytes.
Args:
values: A string or list of strings.
Returns:
A TF-Feature.
"""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=values))
def float_list_feature(values):
"""Returns a TF-Feature of list of floats.
Args:
values: A float or list of floats.
Returns:
A TF-Feature.
"""
return tf.train.Feature(float_list=tf.train.FloatList(value=values))
def bytes_feature(values):
"""Returns a TF-Feature of bytes.
Args:
values: A string.
Returns:
A TF-Feature.
"""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values]))
def float_feature(values):
"""Returns a TF-Feature of floats.
Args:
values: A scalar of list of values.
Returns:
A TF-Feature.
"""
if not isinstance(values, (tuple, list)):
values = [values]
return tf.train.Feature(float_list=tf.train.FloatList(value=values))
def image_to_tfexample(image_data, image_format, height, width, class_id):
return tf.train.Example(features=tf.train.Features(feature={
'image/encoded': bytes_feature(image_data),
'image/format': bytes_feature(image_format),
'image/class/label': int64_feature(class_id),
'image/height': int64_feature(height),
'image/width': int64_feature(width),
}))
def download_url(url, dataset_dir):
"""Downloads the tarball or zip file from url into filepath.
Args:
url: The URL of a tarball or zip file.
dataset_dir: The directory where the temporary files are stored.
Returns:
filepath: path where the file is downloaded.
"""
filename = url.split('/')[-1]
filepath = os.path.join(dataset_dir, filename)
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(url, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
return filepath
def download_and_uncompress_tarball(tarball_url, dataset_dir):
"""Downloads the `tarball_url` and uncompresses it locally.
Args:
tarball_url: The URL of a tarball file.
dataset_dir: The directory where the temporary files are stored.
"""
filepath = download_url(tarball_url, dataset_dir)
tarfile.open(filepath, 'r:gz').extractall(dataset_dir)
def download_and_uncompress_zipfile(zip_url, dataset_dir):
"""Downloads the `zip_url` and uncompresses it locally.
Args:
zip_url: The URL of a zip file.
dataset_dir: The directory where the temporary files are stored.
"""
filename = zip_url.split('/')[-1]
filepath = os.path.join(dataset_dir, filename)
if tf.gfile.Exists(filepath):
print('File {filename} has been already downloaded at {filepath}. '
'Unzipping it....'.format(filename=filename, filepath=filepath))
else:
filepath = download_url(zip_url, dataset_dir)
with zipfile.ZipFile(filepath, 'r') as zip_file:
for member in zip_file.namelist():
memberpath = os.path.join(dataset_dir, member)
# extract only if file doesn't exist
if not (os.path.exists(memberpath) or os.path.isfile(memberpath)):
zip_file.extract(member, dataset_dir)
def write_label_file(labels_to_class_names,
dataset_dir,
filename=LABELS_FILENAME):
"""Writes a file with the list of class names.
Args:
labels_to_class_names: A map of (integer) labels to class names.
dataset_dir: The directory in which the labels file should be written.
filename: The filename where the class names are written.
"""
labels_filename = os.path.join(dataset_dir, filename)
with tf.gfile.Open(labels_filename, 'w') as f:
for label in labels_to_class_names:
class_name = labels_to_class_names[label]
f.write('%d:%s\n' % (label, class_name))
def has_labels(dataset_dir, filename=LABELS_FILENAME):
"""Specifies whether or not the dataset directory contains a label map file.
Args:
dataset_dir: The directory in which the labels file is found.
filename: The filename where the class names are written.
Returns:
`True` if the labels file exists and `False` otherwise.
"""
return tf.gfile.Exists(os.path.join(dataset_dir, filename))
def read_label_file(dataset_dir, filename=LABELS_FILENAME):
"""Reads the labels file and returns a mapping from ID to class name.
Args:
dataset_dir: The directory in which the labels file is found.
filename: The filename where the class names are written.
Returns:
A map from a label (integer) to class name.
"""
labels_filename = os.path.join(dataset_dir, filename)
with tf.gfile.Open(labels_filename, 'rb') as f:
lines = f.read().decode()
lines = lines.split('\n')
lines = filter(None, lines)
labels_to_class_names = {}
for line in lines:
index = line.index(':')
labels_to_class_names[int(line[:index])] = line[index+1:]
return labels_to_class_names
def open_sharded_output_tfrecords(exit_stack, base_path, num_shards):
"""Opens all TFRecord shards for writing and adds them to an exit stack.
Args:
exit_stack: A context2.ExitStack used to automatically closed the TFRecords
opened in this function.
base_path: The base path for all shards
num_shards: The number of shards
Returns:
The list of opened TFRecords. Position k in the list corresponds to shard k.
"""
tf_record_output_filenames = [
'{}-{:05d}-of-{:05d}'.format(base_path, idx, num_shards)
for idx in range(num_shards)
]
tfrecords = [
exit_stack.enter_context(tf.python_io.TFRecordWriter(file_name))
for file_name in tf_record_output_filenames
]
return tfrecords
| 7,151 | 28.676349 | 80 | py |
models | models-master/research/slim/datasets/build_imagenet_data.py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts ImageNet data to TFRecords file format with Example protos.
The raw ImageNet data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
...
where 'n01440764' is the unique synset label associated with
these images.
The training data set consists of 1000 sub-directories (i.e. labels)
each containing 1200 JPEG images for a total of 1.2M JPEG images.
The evaluation data set consists of 1000 sub-directories (i.e. labels)
each containing 50 JPEG images for a total of 50K JPEG images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of 1024 and 128 TFRecord files, respectively.
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-00127-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
Each validation TFRecord file contains ~390 records. Each training TFREcord
file contains ~1250 records. Each record within the TFRecord file is a
serialized Example proto. The Example proto contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [1, 1000] where 0 is not used.
image/class/synset: string specifying the unique ID of the label,
e.g. 'n01440764'
image/class/text: string specifying the human-readable version of the label
e.g. 'red fox, Vulpes vulpes'
image/object/bbox/xmin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/xmax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/label: integer specifying the index in a classification
layer. The label ranges from [1, 1000] where 0 is not used. Note this is
always identical to the image label.
Note that the length of xmin is identical to the length of xmax, ymin and ymax
for each example.
Running this script using 16 threads may take around ~2.5 hours on a HP Z420.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow.compat.v1 as tf
tf.app.flags.DEFINE_string('train_directory', '/tmp/',
'Training data directory')
tf.app.flags.DEFINE_string('validation_directory', '/tmp/',
'Validation data directory')
tf.app.flags.DEFINE_string('output_directory', '/tmp/',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 1024,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 128,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 8,
'Number of threads to preprocess the images.')
# The labels file contains a list of valid labels are held in this file.
# Assumes that the file contains entries as such:
# n01440764
# n01443537
# n01484850
# where each line corresponds to a label expressed as a synset. We map
# each synset contained in the file to an integer (based on the alphabetical
# ordering). See below for details.
tf.app.flags.DEFINE_string('labels_file',
'imagenet_lsvrc_2015_synsets.txt',
'Labels file')
# This file containing mapping from synset to human-readable label.
# Assumes each line of the file looks like:
#
# n02119247 black fox
# n02119359 silver fox
# n02119477 red fox, Vulpes fulva
#
# where each line corresponds to a unique mapping. Note that each line is
# formatted as <synset>\t<human readable label>.
tf.app.flags.DEFINE_string('imagenet_metadata_file',
'imagenet_metadata.txt',
'ImageNet metadata file')
# This file is the output of process_bounding_box.py
# Assumes each line of the file looks like:
#
# n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
#
# where each line corresponds to one bounding box annotation associated
# with an image. Each line can be parsed as:
#
# <JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
#
# Note that there might exist mulitple bounding box annotations associated
# with an image file.
tf.app.flags.DEFINE_string('bounding_box_file',
'./imagenet_2012_bounding_boxes.csv',
'Bounding box file')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, synset, human, bbox,
height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
synset: string, unique WordNet ID specifying the label, e.g., 'n02323233'
human: string, human-readable label, e.g., 'red fox, Vulpes vulpes'
bbox: list of bounding boxes; each box is a list of integers
specifying [xmin, ymin, xmax, ymax]. All boxes are assumed to belong to
the same label as the image label.
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
xmin = []
ymin = []
xmax = []
ymax = []
for b in bbox:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([xmin, ymin, xmax, ymax], b)]
# pylint: enable=expression-not-assigned
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/synset': _bytes_feature(synset),
'image/class/text': _bytes_feature(human),
'image/object/bbox/xmin': _float_feature(xmin),
'image/object/bbox/xmax': _float_feature(xmax),
'image/object/bbox/ymin': _float_feature(ymin),
'image/object/bbox/ymax': _float_feature(ymax),
'image/object/bbox/label': _int64_feature([label] * len(xmin)),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(os.path.basename(filename)),
'image/encoded': _bytes_feature(image_buffer)}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that converts CMYK JPEG data to RGB JPEG data.
self._cmyk_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_jpeg(self._cmyk_data, channels=0)
self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def cmyk_to_rgb(self, image_data):
return self._sess.run(self._cmyk_to_rgb,
feed_dict={self._cmyk_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
# File list from:
# https://groups.google.com/forum/embed/?place=forum/torch7#!topic/torch7/fOSTXHIESSU
return 'n02105855_2933.JPEG' in filename
def _is_cmyk(filename):
"""Determine if file contains a CMYK JPEG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a JPEG encoded with CMYK color space.
"""
# File list from:
# https://github.com/cytsai/ilsvrc-cmyk-image-list
blacklist = ['n01739381_1309.JPEG', 'n02077923_14822.JPEG',
'n02447366_23489.JPEG', 'n02492035_15739.JPEG',
'n02747177_10752.JPEG', 'n03018349_4028.JPEG',
'n03062245_4620.JPEG', 'n03347037_9675.JPEG',
'n03467068_12171.JPEG', 'n03529860_11437.JPEG',
'n03544143_17228.JPEG', 'n03633091_5218.JPEG',
'n03710637_5125.JPEG', 'n03961711_5286.JPEG',
'n04033995_2932.JPEG', 'n04258138_17003.JPEG',
'n04264628_27969.JPEG', 'n04336792_7448.JPEG',
'n04371774_5854.JPEG', 'n04596742_4225.JPEG',
'n07583066_647.JPEG', 'n13037406_4650.JPEG']
return filename.split('/')[-1] in blacklist
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
image_data = tf.gfile.GFile(filename, 'r').read()
# Clean the dirty data.
if _is_png(filename):
# 1 image is a PNG.
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
elif _is_cmyk(filename):
# 22 JPEG images are in CMYK colorspace.
print('Converting CMYK to RGB for %s' % filename)
image_data = coder.cmyk_to_rgb(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in xrange(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
synset = synsets[i]
human = humans[i]
bbox = bboxes[i]
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(filename, image_buffer, label,
synset, human, bbox,
height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, synsets, labels, humans,
bboxes, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(synsets)
assert len(filenames) == len(labels)
assert len(filenames) == len(humans)
assert len(filenames) == len(bboxes)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in xrange(len(spacing) - 1):
ranges.append([spacing[i], spacing[i+1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in xrange(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the ImageNet data set resides in JPEG files located in
the following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
where 'n01440764' is the unique synset label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
n01440764
n01443537
n01484850
where each line corresponds to a label expressed as a synset. We map
each synset contained in the file to an integer (based on the alphabetical
ordering) starting with the integer 1 corresponding to the synset
contained in the first line.
The reason we start the integer labels at 1 is to reserve label 0 as an
unused background class.
Returns:
filenames: list of strings; each string is a path to an image file.
synsets: list of strings; each string is a unique WordNet ID.
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
challenge_synsets = [
l.strip() for l in tf.gfile.GFile(labels_file, 'r').readlines()
]
labels = []
filenames = []
synsets = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for synset in challenge_synsets:
jpeg_file_path = '%s/%s/*.JPEG' % (data_dir, synset)
matching_files = tf.gfile.Glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
synsets.extend([synset] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(challenge_synsets)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = range(len(filenames))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
synsets = [synsets[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(challenge_synsets), data_dir))
return filenames, synsets, labels
def _find_human_readable_labels(synsets, synset_to_human):
"""Build a list of human-readable labels.
Args:
synsets: list of strings; each string is a unique WordNet ID.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
Returns:
List of human-readable strings corresponding to each synset.
"""
humans = []
for s in synsets:
assert s in synset_to_human, ('Failed to find: %s' % s)
humans.append(synset_to_human[s])
return humans
def _find_image_bounding_boxes(filenames, image_to_bboxes):
"""Find the bounding boxes for a given image file.
Args:
filenames: list of strings; each string is a path to an image file.
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
Returns:
List of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
"""
num_image_bbox = 0
bboxes = []
for f in filenames:
basename = os.path.basename(f)
if basename in image_to_bboxes:
bboxes.append(image_to_bboxes[basename])
num_image_bbox += 1
else:
bboxes.append([])
print('Found %d images with bboxes out of %d images' % (
num_image_bbox, len(filenames)))
return bboxes
def _process_dataset(name, directory, num_shards, synset_to_human,
image_to_bboxes):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
"""
filenames, synsets, labels = _find_image_files(directory, FLAGS.labels_file)
humans = _find_human_readable_labels(synsets, synset_to_human)
bboxes = _find_image_bounding_boxes(filenames, image_to_bboxes)
_process_image_files(name, filenames, synsets, labels,
humans, bboxes, num_shards)
def _build_synset_lookup(imagenet_metadata_file):
"""Build lookup for synset to human-readable label.
Args:
imagenet_metadata_file: string, path to file containing mapping from
synset to human-readable label.
Assumes each line of the file looks like:
n02119247 black fox
n02119359 silver fox
n02119477 red fox, Vulpes fulva
where each line corresponds to a unique mapping. Note that each line is
formatted as <synset>\t<human readable label>.
Returns:
Dictionary of synset to human labels, such as:
'n02119022' --> 'red fox, Vulpes vulpes'
"""
lines = tf.gfile.GFile(imagenet_metadata_file, 'r').readlines()
synset_to_human = {}
for l in lines:
if l:
parts = l.strip().split('\t')
assert len(parts) == 2
synset = parts[0]
human = parts[1]
synset_to_human[synset] = human
return synset_to_human
def _build_bounding_box_lookup(bounding_box_file):
"""Build a lookup from image file to bounding boxes.
Args:
bounding_box_file: string, path to file with bounding boxes annotations.
Assumes each line of the file looks like:
n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
where each line corresponds to one bounding box annotation associated
with an image. Each line can be parsed as:
<JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
Note that there might exist mulitple bounding box annotations associated
with an image file. This file is the output of process_bounding_boxes.py.
Returns:
Dictionary mapping image file names to a list of bounding boxes. This list
contains 0+ bounding boxes.
"""
lines = tf.gfile.GFile(bounding_box_file, 'r').readlines()
images_to_bboxes = {}
num_bbox = 0
num_image = 0
for l in lines:
if l:
parts = l.split(',')
assert len(parts) == 5, ('Failed to parse: %s' % l)
filename = parts[0]
xmin = float(parts[1])
ymin = float(parts[2])
xmax = float(parts[3])
ymax = float(parts[4])
box = [xmin, ymin, xmax, ymax]
if filename not in images_to_bboxes:
images_to_bboxes[filename] = []
num_image += 1
images_to_bboxes[filename].append(box)
num_bbox += 1
print('Successfully read %d bounding boxes '
'across %d images.' % (num_bbox, num_image))
return images_to_bboxes
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Build a map from synset to human-readable label.
synset_to_human = _build_synset_lookup(FLAGS.imagenet_metadata_file)
image_to_bboxes = _build_bounding_box_lookup(FLAGS.bounding_box_file)
# Run it!
_process_dataset('validation', FLAGS.validation_directory,
FLAGS.validation_shards, synset_to_human, image_to_bboxes)
_process_dataset('train', FLAGS.train_directory, FLAGS.train_shards,
synset_to_human, image_to_bboxes)
if __name__ == '__main__':
tf.app.run()
| 26,229 | 36.152975 | 87 | py |
models | models-master/research/slim/datasets/download_and_convert_mnist.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Downloads and converts MNIST data to TFRecords of TF-Example protos.
This module downloads the MNIST data, uncompresses it, reads the files
that make up the MNIST data and creates two TFRecord datasets: one for train
and one for test. Each TFRecord dataset is comprised of a set of TF-Example
protocol buffers, each of which contain a single image and label.
The script should take about a minute to run.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import sys
import numpy as np
from six.moves import urllib
import tensorflow.compat.v1 as tf
from datasets import dataset_utils
# The URLs where the MNIST data can be downloaded.
_DATA_URL = 'http://yann.lecun.com/exdb/mnist/'
_TRAIN_DATA_FILENAME = 'train-images-idx3-ubyte.gz'
_TRAIN_LABELS_FILENAME = 'train-labels-idx1-ubyte.gz'
_TEST_DATA_FILENAME = 't10k-images-idx3-ubyte.gz'
_TEST_LABELS_FILENAME = 't10k-labels-idx1-ubyte.gz'
_IMAGE_SIZE = 28
_NUM_CHANNELS = 1
# The names of the classes.
_CLASS_NAMES = [
'zero',
'one',
'two',
'three',
'four',
'five',
'size',
'seven',
'eight',
'nine',
]
def _extract_images(filename, num_images):
"""Extract the images into a numpy array.
Args:
filename: The path to an MNIST images file.
num_images: The number of images in the file.
Returns:
A numpy array of shape [number_of_images, height, width, channels].
"""
print('Extracting images from: ', filename)
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(
_IMAGE_SIZE * _IMAGE_SIZE * num_images * _NUM_CHANNELS)
data = np.frombuffer(buf, dtype=np.uint8)
data = data.reshape(num_images, _IMAGE_SIZE, _IMAGE_SIZE, _NUM_CHANNELS)
return data
def _extract_labels(filename, num_labels):
"""Extract the labels into a vector of int64 label IDs.
Args:
filename: The path to an MNIST labels file.
num_labels: The number of labels in the file.
Returns:
A numpy array of shape [number_of_labels]
"""
print('Extracting labels from: ', filename)
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(1 * num_labels)
labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)
return labels
def _add_to_tfrecord(data_filename, labels_filename, num_images,
tfrecord_writer):
"""Loads data from the binary MNIST files and writes files to a TFRecord.
Args:
data_filename: The filename of the MNIST images.
labels_filename: The filename of the MNIST labels.
num_images: The number of images in the dataset.
tfrecord_writer: The TFRecord writer to use for writing.
"""
images = _extract_images(data_filename, num_images)
labels = _extract_labels(labels_filename, num_images)
shape = (_IMAGE_SIZE, _IMAGE_SIZE, _NUM_CHANNELS)
with tf.Graph().as_default():
image = tf.placeholder(dtype=tf.uint8, shape=shape)
encoded_png = tf.image.encode_png(image)
with tf.Session('') as sess:
for j in range(num_images):
sys.stdout.write('\r>> Converting image %d/%d' % (j + 1, num_images))
sys.stdout.flush()
png_string = sess.run(encoded_png, feed_dict={image: images[j]})
example = dataset_utils.image_to_tfexample(
png_string, 'png'.encode(), _IMAGE_SIZE, _IMAGE_SIZE, labels[j])
tfrecord_writer.write(example.SerializeToString())
def _get_output_filename(dataset_dir, split_name):
"""Creates the output filename.
Args:
dataset_dir: The directory where the temporary files are stored.
split_name: The name of the train/test split.
Returns:
An absolute file path.
"""
return '%s/mnist_%s.tfrecord' % (dataset_dir, split_name)
def _download_dataset(dataset_dir):
"""Downloads MNIST locally.
Args:
dataset_dir: The directory where the temporary files are stored.
"""
for filename in [_TRAIN_DATA_FILENAME,
_TRAIN_LABELS_FILENAME,
_TEST_DATA_FILENAME,
_TEST_LABELS_FILENAME]:
filepath = os.path.join(dataset_dir, filename)
if not os.path.exists(filepath):
print('Downloading file %s...' % filename)
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %.1f%%' % (
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(_DATA_URL + filename,
filepath,
_progress)
print()
with tf.gfile.GFile(filepath) as f:
size = f.size()
print('Successfully downloaded', filename, size, 'bytes.')
def _clean_up_temporary_files(dataset_dir):
"""Removes temporary files used to create the dataset.
Args:
dataset_dir: The directory where the temporary files are stored.
"""
for filename in [_TRAIN_DATA_FILENAME,
_TRAIN_LABELS_FILENAME,
_TEST_DATA_FILENAME,
_TEST_LABELS_FILENAME]:
filepath = os.path.join(dataset_dir, filename)
tf.gfile.Remove(filepath)
def run(dataset_dir):
"""Runs the download and conversion operation.
Args:
dataset_dir: The dataset directory where the dataset is stored.
"""
if not tf.gfile.Exists(dataset_dir):
tf.gfile.MakeDirs(dataset_dir)
training_filename = _get_output_filename(dataset_dir, 'train')
testing_filename = _get_output_filename(dataset_dir, 'test')
if tf.gfile.Exists(training_filename) and tf.gfile.Exists(testing_filename):
print('Dataset files already exist. Exiting without re-creating them.')
return
_download_dataset(dataset_dir)
# First, process the training data:
with tf.python_io.TFRecordWriter(training_filename) as tfrecord_writer:
data_filename = os.path.join(dataset_dir, _TRAIN_DATA_FILENAME)
labels_filename = os.path.join(dataset_dir, _TRAIN_LABELS_FILENAME)
_add_to_tfrecord(data_filename, labels_filename, 60000, tfrecord_writer)
# Next, process the testing data:
with tf.python_io.TFRecordWriter(testing_filename) as tfrecord_writer:
data_filename = os.path.join(dataset_dir, _TEST_DATA_FILENAME)
labels_filename = os.path.join(dataset_dir, _TEST_LABELS_FILENAME)
_add_to_tfrecord(data_filename, labels_filename, 10000, tfrecord_writer)
# Finally, write the labels file:
labels_to_class_names = dict(zip(range(len(_CLASS_NAMES)), _CLASS_NAMES))
dataset_utils.write_label_file(labels_to_class_names, dataset_dir)
_clean_up_temporary_files(dataset_dir)
print('\nFinished converting the MNIST dataset!')
| 7,399 | 32.333333 | 80 | py |
models | models-master/research/slim/datasets/preprocess_imagenet_validation_data.py | #!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Process the ImageNet Challenge bounding boxes for TensorFlow model training.
Associate the ImageNet 2012 Challenge validation data set with labels.
The raw ImageNet validation data set is expected to reside in JPEG files
located in the following directory structure.
data_dir/ILSVRC2012_val_00000001.JPEG
data_dir/ILSVRC2012_val_00000002.JPEG
...
data_dir/ILSVRC2012_val_00050000.JPEG
This script moves the files into a directory structure like such:
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
...
where 'n01440764' is the unique synset label associated with
these images.
This directory reorganization requires a mapping from validation image
number (i.e. suffix of the original file) to the associated label. This
is provided in the ImageNet development kit via a Matlab file.
In order to make life easier and divorce ourselves from Matlab, we instead
supply a custom text file that provides this mapping for us.
Sample usage:
./preprocess_imagenet_validation_data.py ILSVRC2012_img_val \
imagenet_2012_validation_synset_labels.txt
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from six.moves import xrange # pylint: disable=redefined-builtin
if __name__ == '__main__':
if len(sys.argv) < 3:
print('Invalid usage\n'
'usage: preprocess_imagenet_validation_data.py '
'<validation data dir> <validation labels file>')
sys.exit(-1)
data_dir = sys.argv[1]
validation_labels_file = sys.argv[2]
# Read in the 50000 synsets associated with the validation data set.
labels = [l.strip() for l in open(validation_labels_file).readlines()]
unique_labels = set(labels)
# Make all sub-directories in the validation data dir.
for label in unique_labels:
labeled_data_dir = os.path.join(data_dir, label)
os.makedirs(labeled_data_dir)
# Move all of the image to the appropriate sub-directory.
for i in xrange(len(labels)):
basename = 'ILSVRC2012_val_000%.5d.JPEG' % (i + 1)
original_filename = os.path.join(data_dir, basename)
if not os.path.exists(original_filename):
print('Failed to find: ', original_filename)
sys.exit(-1)
new_filename = os.path.join(data_dir, labels[i], basename)
os.rename(original_filename, new_filename)
| 3,069 | 35.547619 | 80 | py |
models | models-master/research/slim/datasets/download_and_convert_cifar10.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Downloads and converts cifar10 data to TFRecords of TF-Example protos.
This module downloads the cifar10 data, uncompresses it, reads the files
that make up the cifar10 data and creates two TFRecord datasets: one for train
and one for test. Each TFRecord dataset is comprised of a set of TF-Example
protocol buffers, each of which contain a single image and label.
The script should take several minutes to run.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import tarfile
import numpy as np
from six.moves import cPickle
from six.moves import urllib
import tensorflow.compat.v1 as tf
from datasets import dataset_utils
# The URL where the CIFAR data can be downloaded.
_DATA_URL = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
# The number of training files.
_NUM_TRAIN_FILES = 5
# The height and width of each image.
_IMAGE_SIZE = 32
# The names of the classes.
_CLASS_NAMES = [
'airplane',
'automobile',
'bird',
'cat',
'deer',
'dog',
'frog',
'horse',
'ship',
'truck',
]
def _add_to_tfrecord(filename, tfrecord_writer, offset=0):
"""Loads data from the cifar10 pickle files and writes files to a TFRecord.
Args:
filename: The filename of the cifar10 pickle file.
tfrecord_writer: The TFRecord writer to use for writing.
offset: An offset into the absolute number of images previously written.
Returns:
The new offset.
"""
with tf.gfile.Open(filename, 'rb') as f:
if sys.version_info < (3,):
data = cPickle.load(f)
else:
data = cPickle.load(f, encoding='bytes')
images = data[b'data']
num_images = images.shape[0]
images = images.reshape((num_images, 3, 32, 32))
labels = data[b'labels']
with tf.Graph().as_default():
image_placeholder = tf.placeholder(dtype=tf.uint8)
encoded_image = tf.image.encode_png(image_placeholder)
with tf.Session('') as sess:
for j in range(num_images):
sys.stdout.write('\r>> Reading file [%s] image %d/%d' % (
filename, offset + j + 1, offset + num_images))
sys.stdout.flush()
image = np.squeeze(images[j]).transpose((1, 2, 0))
label = labels[j]
png_string = sess.run(encoded_image,
feed_dict={image_placeholder: image})
example = dataset_utils.image_to_tfexample(
png_string, b'png', _IMAGE_SIZE, _IMAGE_SIZE, label)
tfrecord_writer.write(example.SerializeToString())
return offset + num_images
def _get_output_filename(dataset_dir, split_name):
"""Creates the output filename.
Args:
dataset_dir: The dataset directory where the dataset is stored.
split_name: The name of the train/test split.
Returns:
An absolute file path.
"""
return '%s/cifar10_%s.tfrecord' % (dataset_dir, split_name)
def _download_and_uncompress_dataset(dataset_dir):
"""Downloads cifar10 and uncompresses it locally.
Args:
dataset_dir: The directory where the temporary files are stored.
"""
filename = _DATA_URL.split('/')[-1]
filepath = os.path.join(dataset_dir, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(_DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dataset_dir)
def _clean_up_temporary_files(dataset_dir):
"""Removes temporary files used to create the dataset.
Args:
dataset_dir: The directory where the temporary files are stored.
"""
filename = _DATA_URL.split('/')[-1]
filepath = os.path.join(dataset_dir, filename)
tf.gfile.Remove(filepath)
tmp_dir = os.path.join(dataset_dir, 'cifar-10-batches-py')
tf.gfile.DeleteRecursively(tmp_dir)
def run(dataset_dir):
"""Runs the download and conversion operation.
Args:
dataset_dir: The dataset directory where the dataset is stored.
"""
if not tf.gfile.Exists(dataset_dir):
tf.gfile.MakeDirs(dataset_dir)
training_filename = _get_output_filename(dataset_dir, 'train')
testing_filename = _get_output_filename(dataset_dir, 'test')
if tf.gfile.Exists(training_filename) and tf.gfile.Exists(testing_filename):
print('Dataset files already exist. Exiting without re-creating them.')
return
dataset_utils.download_and_uncompress_tarball(_DATA_URL, dataset_dir)
# First, process the training data:
with tf.python_io.TFRecordWriter(training_filename) as tfrecord_writer:
offset = 0
for i in range(_NUM_TRAIN_FILES):
filename = os.path.join(dataset_dir,
'cifar-10-batches-py',
'data_batch_%d' % (i + 1)) # 1-indexed.
offset = _add_to_tfrecord(filename, tfrecord_writer, offset)
# Next, process the testing data:
with tf.python_io.TFRecordWriter(testing_filename) as tfrecord_writer:
filename = os.path.join(dataset_dir,
'cifar-10-batches-py',
'test_batch')
_add_to_tfrecord(filename, tfrecord_writer)
# Finally, write the labels file:
labels_to_class_names = dict(zip(range(len(_CLASS_NAMES)), _CLASS_NAMES))
dataset_utils.write_label_file(labels_to_class_names, dataset_dir)
_clean_up_temporary_files(dataset_dir)
print('\nFinished converting the Cifar10 dataset!')
| 6,335 | 30.839196 | 80 | py |
models | models-master/research/slim/datasets/download_and_convert_flowers.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Downloads and converts Flowers data to TFRecords of TF-Example protos.
This module downloads the Flowers data, uncompresses it, reads the files
that make up the Flowers data and creates two TFRecord datasets: one for train
and one for test. Each TFRecord dataset is comprised of a set of TF-Example
protocol buffers, each of which contain a single image and label.
The script should take about a minute to run.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import random
import sys
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
from datasets import dataset_utils
# The URL where the Flowers data can be downloaded.
_DATA_URL = 'http://download.tensorflow.org/example_images/flower_photos.tgz'
# The number of images in the validation set.
_NUM_VALIDATION = 350
# Seed for repeatability.
_RANDOM_SEED = 0
# The number of shards per dataset split.
_NUM_SHARDS = 5
class ImageReader(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def read_image_dims(self, sess, image_data):
image = self.decode_jpeg(sess, image_data)
return image.shape[0], image.shape[1]
def decode_jpeg(self, sess, image_data):
image = sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _get_filenames_and_classes(dataset_dir):
"""Returns a list of filenames and inferred class names.
Args:
dataset_dir: A directory containing a set of subdirectories representing
class names. Each subdirectory should contain PNG or JPG encoded images.
Returns:
A list of image file paths, relative to `dataset_dir` and the list of
subdirectories, representing class names.
"""
flower_root = os.path.join(dataset_dir, 'flower_photos')
directories = []
class_names = []
for filename in os.listdir(flower_root):
path = os.path.join(flower_root, filename)
if os.path.isdir(path):
directories.append(path)
class_names.append(filename)
photo_filenames = []
for directory in directories:
for filename in os.listdir(directory):
path = os.path.join(directory, filename)
photo_filenames.append(path)
return photo_filenames, sorted(class_names)
def _get_dataset_filename(dataset_dir, split_name, shard_id):
output_filename = 'flowers_%s_%05d-of-%05d.tfrecord' % (
split_name, shard_id, _NUM_SHARDS)
return os.path.join(dataset_dir, output_filename)
def _convert_dataset(split_name, filenames, class_names_to_ids, dataset_dir):
"""Converts the given filenames to a TFRecord dataset.
Args:
split_name: The name of the dataset, either 'train' or 'validation'.
filenames: A list of absolute paths to png or jpg images.
class_names_to_ids: A dictionary from class names (strings) to ids
(integers).
dataset_dir: The directory where the converted datasets are stored.
"""
assert split_name in ['train', 'validation']
num_per_shard = int(math.ceil(len(filenames) / float(_NUM_SHARDS)))
with tf.Graph().as_default():
image_reader = ImageReader()
with tf.Session('') as sess:
for shard_id in range(_NUM_SHARDS):
output_filename = _get_dataset_filename(
dataset_dir, split_name, shard_id)
with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
start_ndx = shard_id * num_per_shard
end_ndx = min((shard_id+1) * num_per_shard, len(filenames))
for i in range(start_ndx, end_ndx):
sys.stdout.write('\r>> Converting image %d/%d shard %d' % (
i+1, len(filenames), shard_id))
sys.stdout.flush()
# Read the filename:
image_data = tf.gfile.GFile(filenames[i], 'rb').read()
height, width = image_reader.read_image_dims(sess, image_data)
class_name = os.path.basename(os.path.dirname(filenames[i]))
class_id = class_names_to_ids[class_name]
example = dataset_utils.image_to_tfexample(
image_data, b'jpg', height, width, class_id)
tfrecord_writer.write(example.SerializeToString())
sys.stdout.write('\n')
sys.stdout.flush()
def _clean_up_temporary_files(dataset_dir):
"""Removes temporary files used to create the dataset.
Args:
dataset_dir: The directory where the temporary files are stored.
"""
filename = _DATA_URL.split('/')[-1]
filepath = os.path.join(dataset_dir, filename)
tf.gfile.Remove(filepath)
tmp_dir = os.path.join(dataset_dir, 'flower_photos')
tf.gfile.DeleteRecursively(tmp_dir)
def _dataset_exists(dataset_dir):
for split_name in ['train', 'validation']:
for shard_id in range(_NUM_SHARDS):
output_filename = _get_dataset_filename(
dataset_dir, split_name, shard_id)
if not tf.gfile.Exists(output_filename):
return False
return True
def run(dataset_dir):
"""Runs the download and conversion operation.
Args:
dataset_dir: The dataset directory where the dataset is stored.
"""
if not tf.gfile.Exists(dataset_dir):
tf.gfile.MakeDirs(dataset_dir)
if _dataset_exists(dataset_dir):
print('Dataset files already exist. Exiting without re-creating them.')
return
dataset_utils.download_and_uncompress_tarball(_DATA_URL, dataset_dir)
photo_filenames, class_names = _get_filenames_and_classes(dataset_dir)
class_names_to_ids = dict(
list(zip(class_names, list(range(len(class_names))))))
# Divide into train and test:
random.seed(_RANDOM_SEED)
random.shuffle(photo_filenames)
training_filenames = photo_filenames[_NUM_VALIDATION:]
validation_filenames = photo_filenames[:_NUM_VALIDATION]
# First, convert the training and validation sets.
_convert_dataset('train', training_filenames, class_names_to_ids,
dataset_dir)
_convert_dataset('validation', validation_filenames, class_names_to_ids,
dataset_dir)
# Finally, write the labels file:
labels_to_class_names = dict(
list(zip(list(range(len(class_names))), class_names)))
dataset_utils.write_label_file(labels_to_class_names, dataset_dir)
_clean_up_temporary_files(dataset_dir)
print('\nFinished converting the Flowers dataset!')
| 7,298 | 32.791667 | 80 | py |
models | models-master/research/slim/datasets/cifar10.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides data for the Cifar10 dataset.
The dataset scripts used to create the dataset can be found at:
tensorflow/models/research/slim/datasets/download_and_convert_cifar10.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow.compat.v1 as tf
import tf_slim as slim
from datasets import dataset_utils
_FILE_PATTERN = 'cifar10_%s.tfrecord'
SPLITS_TO_SIZES = {'train': 50000, 'test': 10000}
_NUM_CLASSES = 10
_ITEMS_TO_DESCRIPTIONS = {
'image': 'A [32 x 32 x 3] color image.',
'label': 'A single integer between 0 and 9',
}
def get_split(split_name, dataset_dir, file_pattern=None, reader=None):
"""Gets a dataset tuple with instructions for reading cifar10.
Args:
split_name: A train/test split name.
dataset_dir: The base directory of the dataset sources.
file_pattern: The file pattern to use when matching the dataset sources.
It is assumed that the pattern contains a '%s' string so that the split
name can be inserted.
reader: The TensorFlow reader type.
Returns:
A `Dataset` namedtuple.
Raises:
ValueError: if `split_name` is not a valid train/test split.
"""
if split_name not in SPLITS_TO_SIZES:
raise ValueError('split name %s was not recognized.' % split_name)
if not file_pattern:
file_pattern = _FILE_PATTERN
file_pattern = os.path.join(dataset_dir, file_pattern % split_name)
# Allowing None in the signature so that dataset_factory can use the default.
if not reader:
reader = tf.TFRecordReader
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='png'),
'image/class/label': tf.FixedLenFeature(
[], tf.int64, default_value=tf.zeros([], dtype=tf.int64)),
}
items_to_handlers = {
'image': slim.tfexample_decoder.Image(shape=[32, 32, 3]),
'label': slim.tfexample_decoder.Tensor('image/class/label'),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
labels_to_names = None
if dataset_utils.has_labels(dataset_dir):
labels_to_names = dataset_utils.read_label_file(dataset_dir)
return slim.dataset.Dataset(
data_sources=file_pattern,
reader=reader,
decoder=decoder,
num_samples=SPLITS_TO_SIZES[split_name],
items_to_descriptions=_ITEMS_TO_DESCRIPTIONS,
num_classes=_NUM_CLASSES,
labels_to_names=labels_to_names)
| 3,246 | 32.132653 | 80 | py |
models | models-master/research/slim/datasets/visualwakewords.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides data for Visual WakeWords Dataset with images+labels.
Visual WakeWords Dataset derives from the COCO dataset to design tiny models
classifying two classes, such as person/not-person. The COCO annotations
are filtered to two classes: person and not-person (or another user-defined
category). Bounding boxes for small objects with area less than 5% of the image
area are filtered out.
See build_visualwakewords_data.py which generates the Visual WakeWords dataset
annotations from the raw COCO dataset and converts them to TFRecord.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow.compat.v1 as tf
import tf_slim as slim
from datasets import dataset_utils
_FILE_PATTERN = '%s.record-*'
_SPLITS_TO_SIZES = {
'train': 82783,
'val': 40504,
}
_ITEMS_TO_DESCRIPTIONS = {
'image': 'A color image of varying height and width.',
'label': 'The label id of the image, an integer in {0, 1}',
'object/bbox': 'A list of bounding boxes.',
}
_NUM_CLASSES = 2
# labels file
LABELS_FILENAME = 'labels.txt'
def get_split(split_name, dataset_dir, file_pattern=None, reader=None):
"""Gets a dataset tuple with instructions for reading ImageNet.
Args:
split_name: A train/test split name.
dataset_dir: The base directory of the dataset sources.
file_pattern: The file pattern to use when matching the dataset sources. It
is assumed that the pattern contains a '%s' string so that the split name
can be inserted.
reader: The TensorFlow reader type.
Returns:
A `Dataset` namedtuple.
Raises:
ValueError: if `split_name` is not a valid train/test split.
"""
if split_name not in _SPLITS_TO_SIZES:
raise ValueError('split name %s was not recognized.' % split_name)
if not file_pattern:
file_pattern = _FILE_PATTERN
file_pattern = os.path.join(dataset_dir, file_pattern % split_name)
# Allowing None in the signature so that dataset_factory can use the default.
if reader is None:
reader = tf.TFRecordReader
keys_to_features = {
'image/encoded':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/format':
tf.FixedLenFeature((), tf.string, default_value='jpeg'),
'image/class/label':
tf.FixedLenFeature([], dtype=tf.int64, default_value=-1),
'image/object/bbox/xmin':
tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin':
tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax':
tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax':
tf.VarLenFeature(dtype=tf.float32),
}
items_to_handlers = {
'image':
slim.tfexample_decoder.Image('image/encoded', 'image/format'),
'label':
slim.tfexample_decoder.Tensor('image/class/label'),
'object/bbox':
slim.tfexample_decoder.BoundingBox(['ymin', 'xmin', 'ymax', 'xmax'],
'image/object/bbox/'),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
labels_to_names = None
labels_file = os.path.join(dataset_dir, LABELS_FILENAME)
if tf.gfile.Exists(labels_file):
labels_to_names = dataset_utils.read_label_file(dataset_dir)
return slim.dataset.Dataset(
data_sources=file_pattern,
reader=reader,
decoder=decoder,
num_samples=_SPLITS_TO_SIZES[split_name],
items_to_descriptions=_ITEMS_TO_DESCRIPTIONS,
num_classes=_NUM_CLASSES,
labels_to_names=labels_to_names)
| 4,353 | 33.015625 | 80 | py |
models | models-master/research/slim/datasets/__init__.py | 1 | 0 | 0 | py |
|
models | models-master/research/slim/datasets/imagenet.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides data for the ImageNet ILSVRC 2012 Dataset plus some bounding boxes.
Some images have one or more bounding boxes associated with the label of the
image. See details here: http://image-net.org/download-bboxes
ImageNet is based upon WordNet 3.0. To uniquely identify a synset, we use
"WordNet ID" (wnid), which is a concatenation of POS ( i.e. part of speech )
and SYNSET OFFSET of WordNet. For more information, please refer to the
WordNet documentation[http://wordnet.princeton.edu/wordnet/documentation/].
"There are bounding boxes for over 3000 popular synsets available.
For each synset, there are on average 150 images with bounding boxes."
WARNING: Don't use for object detection, in this case all the bounding boxes
of the image belong to just one class.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from six.moves import urllib
import tensorflow.compat.v1 as tf
import tf_slim as slim
from datasets import dataset_utils
# TODO(nsilberman): Add tfrecord file type once the script is updated.
_FILE_PATTERN = '%s-*'
_SPLITS_TO_SIZES = {
'train': 1281167,
'validation': 50000,
}
_ITEMS_TO_DESCRIPTIONS = {
'image': 'A color image of varying height and width.',
'label': 'The label id of the image, integer between 0 and 999',
'label_text': 'The text of the label.',
'object/bbox': 'A list of bounding boxes.',
'object/label': 'A list of labels, one per each object.',
}
_NUM_CLASSES = 1001
# If set to false, will not try to set label_to_names in dataset
# by reading them from labels.txt or github.
LOAD_READABLE_NAMES = True
def create_readable_names_for_imagenet_labels():
"""Create a dict mapping label id to human readable string.
Returns:
labels_to_names: dictionary where keys are integers from to 1000
and values are human-readable names.
We retrieve a synset file, which contains a list of valid synset labels used
by ILSVRC competition. There is one synset one per line, eg.
# n01440764
# n01443537
We also retrieve a synset_to_human_file, which contains a mapping from synsets
to human-readable names for every synset in Imagenet. These are stored in a
tsv format, as follows:
# n02119247 black fox
# n02119359 silver fox
We assign each synset (in alphabetical order) an integer, starting from 1
(since 0 is reserved for the background class).
Code is based on
https://github.com/tensorflow/models/blob/master/research/inception/inception/data/build_imagenet_data.py#L463
"""
# pylint: disable=g-line-too-long
base_url = 'https://raw.githubusercontent.com/tensorflow/models/master/research/slim/datasets/'
synset_url = '{}/imagenet_lsvrc_2015_synsets.txt'.format(base_url)
synset_to_human_url = '{}/imagenet_metadata.txt'.format(base_url)
filename, _ = urllib.request.urlretrieve(synset_url)
synset_list = [s.strip() for s in open(filename).readlines()]
num_synsets_in_ilsvrc = len(synset_list)
assert num_synsets_in_ilsvrc == 1000
filename, _ = urllib.request.urlretrieve(synset_to_human_url)
synset_to_human_list = open(filename).readlines()
num_synsets_in_all_imagenet = len(synset_to_human_list)
assert num_synsets_in_all_imagenet == 21842
synset_to_human = {}
for s in synset_to_human_list:
parts = s.strip().split('\t')
assert len(parts) == 2
synset = parts[0]
human = parts[1]
synset_to_human[synset] = human
label_index = 1
labels_to_names = {0: 'background'}
for synset in synset_list:
name = synset_to_human[synset]
labels_to_names[label_index] = name
label_index += 1
return labels_to_names
def get_split(split_name, dataset_dir, file_pattern=None, reader=None):
"""Gets a dataset tuple with instructions for reading ImageNet.
Args:
split_name: A train/test split name.
dataset_dir: The base directory of the dataset sources.
file_pattern: The file pattern to use when matching the dataset sources.
It is assumed that the pattern contains a '%s' string so that the split
name can be inserted.
reader: The TensorFlow reader type.
Returns:
A `Dataset` namedtuple.
Raises:
ValueError: if `split_name` is not a valid train/test split.
"""
if split_name not in _SPLITS_TO_SIZES:
raise ValueError('split name %s was not recognized.' % split_name)
if not file_pattern:
file_pattern = _FILE_PATTERN
file_pattern = os.path.join(dataset_dir, file_pattern % split_name)
# Allowing None in the signature so that dataset_factory can use the default.
if reader is None:
reader = tf.TFRecordReader
keys_to_features = {
'image/encoded': tf.FixedLenFeature(
(), tf.string, default_value=''),
'image/format': tf.FixedLenFeature(
(), tf.string, default_value='jpeg'),
'image/class/label': tf.FixedLenFeature(
[], dtype=tf.int64, default_value=-1),
'image/class/text': tf.FixedLenFeature(
[], dtype=tf.string, default_value=''),
'image/object/bbox/xmin': tf.VarLenFeature(
dtype=tf.float32),
'image/object/bbox/ymin': tf.VarLenFeature(
dtype=tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(
dtype=tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(
dtype=tf.float32),
'image/object/class/label': tf.VarLenFeature(
dtype=tf.int64),
}
items_to_handlers = {
'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'),
'label': slim.tfexample_decoder.Tensor('image/class/label'),
'label_text': slim.tfexample_decoder.Tensor('image/class/text'),
'object/bbox': slim.tfexample_decoder.BoundingBox(
['ymin', 'xmin', 'ymax', 'xmax'], 'image/object/bbox/'),
'object/label': slim.tfexample_decoder.Tensor('image/object/class/label'),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
labels_to_names = None
if LOAD_READABLE_NAMES:
if dataset_utils.has_labels(dataset_dir):
labels_to_names = dataset_utils.read_label_file(dataset_dir)
else:
labels_to_names = create_readable_names_for_imagenet_labels()
dataset_utils.write_label_file(labels_to_names, dataset_dir)
return slim.dataset.Dataset(
data_sources=file_pattern,
reader=reader,
decoder=decoder,
num_samples=_SPLITS_TO_SIZES[split_name],
items_to_descriptions=_ITEMS_TO_DESCRIPTIONS,
num_classes=_NUM_CLASSES,
labels_to_names=labels_to_names)
| 7,295 | 35.848485 | 112 | py |
models | models-master/research/slim/datasets/download_and_convert_visualwakewords.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Downloads and converts VisualWakewords data to TFRecords of TF-Example protos.
This module downloads the COCO dataset, uncompresses it, derives the
VisualWakeWords dataset to create two TFRecord datasets: one for
train and one for test. Each TFRecord dataset is comprised of a set of
TF-Example protocol buffers, each of which contain a single image and label.
The script should take several minutes to run.
Please note that this tool creates sharded output files.
VisualWakeWords dataset is used to design tiny models classifying two classes,
such as person/not-person. The two steps to generate the VisualWakeWords
dataset from the COCO dataset are given below:
1. Use COCO annotations to create VisualWakeWords annotations:
Note: A bounding box is 'valid' if it has the foreground_class_of_interest
(e.g. person) and it's area is greater than 0.5% of the image area.
The resulting annotations file has the following fields, where 'images' are
the same as COCO dataset. 'categories' only contains information about the
foreground_class_of_interest (e.g. person) and 'annotations' maps an image to
objects (a list of valid bounding boxes) and label (value is 1 if it has
atleast one valid bounding box, otherwise 0)
images[{
"id", "width", "height", "file_name", "flickr_url", "coco_url",
"license", "date_captured",
}]
categories{
"id": {"id", "name", "supercategory"}
}
annotations{
"image_id": {"objects":[{"area", "bbox" : [x,y,width,height]}], "label"}
}
2. Use VisualWakeWords annotations to create TFRecords:
The resulting TFRecord file contains the following features:
{ image/height, image/width, image/source_id, image/encoded,
image/class/label_text, image/class/label,
image/object/class/text,
image/object/bbox/ymin, image/object/bbox/xmin, image/object/bbox/ymax,
image/object/bbox/xmax, image/object/area
image/filename, image/format, image/key/sha256}
For classification models, you need the image/encoded and image/class/label.
Example usage:
Run download_and_convert_data.py in the parent directory as follows:
python download_and_convert_visualwakewords.py --logtostderr \
--dataset_name=visualwakewords \
--dataset_dir="${DATASET_DIR}" \
--small_object_area_threshold=0.005 \
--foreground_class_of_interest='person'
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow.compat.v1 as tf
from datasets import download_and_convert_visualwakewords_lib
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.flags.DEFINE_string(
'coco_dirname', 'coco_dataset',
'A subdirectory in visualwakewords dataset directory'
'containing the coco dataset')
FLAGS = tf.app.flags.FLAGS
def run(dataset_dir, small_object_area_threshold, foreground_class_of_interest):
"""Runs the download and conversion operation.
Args:
dataset_dir: The dataset directory where the dataset is stored.
small_object_area_threshold: Threshold of fraction of image area below which
small objects are filtered
foreground_class_of_interest: Build a binary classifier based on the
presence or absence of this object in the image.
"""
# 1. Download the coco dataset into a subdirectory under the visualwakewords
# dataset directory
coco_dir = os.path.join(dataset_dir, FLAGS.coco_dirname)
if not tf.gfile.IsDirectory(coco_dir):
tf.gfile.MakeDirs(coco_dir)
download_and_convert_visualwakewords_lib.download_coco_dataset(coco_dir)
# Path to COCO annotations
train_annotations_file = os.path.join(coco_dir, 'annotations',
'instances_train2014.json')
val_annotations_file = os.path.join(coco_dir, 'annotations',
'instances_val2014.json')
train_image_dir = os.path.join(coco_dir, 'train2014')
val_image_dir = os.path.join(coco_dir, 'val2014')
# Path to VisualWakeWords annotations
visualwakewords_annotations_train = os.path.join(
dataset_dir, 'instances_visualwakewords_train2014.json')
visualwakewords_annotations_val = os.path.join(
dataset_dir, 'instances_visualwakewords_val2014.json')
visualwakewords_labels_filename = os.path.join(dataset_dir, 'labels.txt')
train_output_path = os.path.join(dataset_dir, 'train.record')
val_output_path = os.path.join(dataset_dir, 'val.record')
# 2. Create a labels file
tf.logging.info('Creating a labels file...')
download_and_convert_visualwakewords_lib.create_labels_file(
foreground_class_of_interest, visualwakewords_labels_filename)
# 3. Use COCO annotations to create VisualWakeWords annotations
tf.logging.info('Creating train VisualWakeWords annotations...')
download_and_convert_visualwakewords_lib.create_visual_wakeword_annotations(
train_annotations_file, visualwakewords_annotations_train,
small_object_area_threshold, foreground_class_of_interest)
tf.logging.info('Creating validation VisualWakeWords annotations...')
download_and_convert_visualwakewords_lib.create_visual_wakeword_annotations(
val_annotations_file, visualwakewords_annotations_val,
small_object_area_threshold, foreground_class_of_interest)
# 4. Use VisualWakeWords annotations to create the TFRecords
tf.logging.info('Creating train TFRecords for VisualWakeWords dataset...')
download_and_convert_visualwakewords_lib.create_tf_record_for_visualwakewords_dataset(
visualwakewords_annotations_train,
train_image_dir,
train_output_path,
num_shards=100)
tf.logging.info(
'Creating validation TFRecords for VisualWakeWords dataset...')
download_and_convert_visualwakewords_lib.create_tf_record_for_visualwakewords_dataset(
visualwakewords_annotations_val,
val_image_dir,
val_output_path,
num_shards=10)
| 6,539 | 40.132075 | 88 | py |
models | models-master/research/slim/datasets/process_bounding_boxes.py | #!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Process the ImageNet Challenge bounding boxes for TensorFlow model training.
This script is called as
process_bounding_boxes.py <dir> [synsets-file]
Where <dir> is a directory containing the downloaded and unpacked bounding box
data. If [synsets-file] is supplied, then only the bounding boxes whose
synstes are contained within this file are returned. Note that the
[synsets-file] file contains synset ids, one per line.
The script dumps out a CSV text file in which each line contains an entry.
n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
The entry can be read as:
<JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
The bounding box for <JPEG file name> contains two points (xmin, ymin) and
(xmax, ymax) specifying the lower-left corner and upper-right corner of a
bounding box in *relative* coordinates.
The user supplies a directory where the XML files reside. The directory
structure in the directory <dir> is assumed to look like this:
<dir>/nXXXXXXXX/nXXXXXXXX_YYYY.xml
Each XML file contains a bounding box annotation. The script:
(1) Parses the XML file and extracts the filename, label and bounding box info.
(2) The bounding box is specified in the XML files as integer (xmin, ymin) and
(xmax, ymax) *relative* to image size displayed to the human annotator. The
size of the image displayed to the human annotator is stored in the XML file
as integer (height, width).
Note that the displayed size will differ from the actual size of the image
downloaded from image-net.org. To make the bounding box annotation useable,
we convert bounding box to floating point numbers relative to displayed
height and width of the image.
Note that each XML file might contain N bounding box annotations.
Note that the points are all clamped at a range of [0.0, 1.0] because some
human annotations extend outside the range of the supplied image.
See details here: http://image-net.org/download-bboxes
(3) By default, the script outputs all valid bounding boxes. If a
[synsets-file] is supplied, only the subset of bounding boxes associated
with those synsets are outputted. Importantly, one can supply a list of
synsets in the ImageNet Challenge and output the list of bounding boxes
associated with the training images of the ILSVRC.
We use these bounding boxes to inform the random distortion of images
supplied to the network.
If you run this script successfully, you will see the following output
to stderr:
> Finished processing 544546 XML files.
> Skipped 0 XML files not in ImageNet Challenge.
> Skipped 0 bounding boxes not in ImageNet Challenge.
> Wrote 615299 bounding boxes from 544546 annotated images.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os.path
import sys
import xml.etree.ElementTree as ET
from six.moves import xrange # pylint: disable=redefined-builtin
class BoundingBox(object):
pass
def GetItem(name, root, index=0):
count = 0
for item in root.iter(name):
if count == index:
return item.text
count += 1
# Failed to find "index" occurrence of item.
return -1
def GetInt(name, root, index=0):
return int(GetItem(name, root, index))
def FindNumberBoundingBoxes(root):
index = 0
while True:
if GetInt('xmin', root, index) == -1:
break
index += 1
return index
def ProcessXMLAnnotation(xml_file):
"""Process a single XML file containing a bounding box."""
# pylint: disable=broad-except
try:
tree = ET.parse(xml_file)
except Exception:
print('Failed to parse: ' + xml_file, file=sys.stderr)
return None
# pylint: enable=broad-except
root = tree.getroot()
num_boxes = FindNumberBoundingBoxes(root)
boxes = []
for index in xrange(num_boxes):
box = BoundingBox()
# Grab the 'index' annotation.
box.xmin = GetInt('xmin', root, index)
box.ymin = GetInt('ymin', root, index)
box.xmax = GetInt('xmax', root, index)
box.ymax = GetInt('ymax', root, index)
box.width = GetInt('width', root)
box.height = GetInt('height', root)
box.filename = GetItem('filename', root) + '.JPEG'
box.label = GetItem('name', root)
xmin = float(box.xmin) / float(box.width)
xmax = float(box.xmax) / float(box.width)
ymin = float(box.ymin) / float(box.height)
ymax = float(box.ymax) / float(box.height)
# Some images contain bounding box annotations that
# extend outside of the supplied image. See, e.g.
# n03127925/n03127925_147.xml
# Additionally, for some bounding boxes, the min > max
# or the box is entirely outside of the image.
min_x = min(xmin, xmax)
max_x = max(xmin, xmax)
box.xmin_scaled = min(max(min_x, 0.0), 1.0)
box.xmax_scaled = min(max(max_x, 0.0), 1.0)
min_y = min(ymin, ymax)
max_y = max(ymin, ymax)
box.ymin_scaled = min(max(min_y, 0.0), 1.0)
box.ymax_scaled = min(max(max_y, 0.0), 1.0)
boxes.append(box)
return boxes
if __name__ == '__main__':
if len(sys.argv) < 2 or len(sys.argv) > 3:
print('Invalid usage\n'
'usage: process_bounding_boxes.py <dir> [synsets-file]',
file=sys.stderr)
sys.exit(-1)
xml_files = glob.glob(sys.argv[1] + '/*/*.xml')
print('Identified %d XML files in %s' % (len(xml_files), sys.argv[1]),
file=sys.stderr)
if len(sys.argv) == 3:
labels = set([l.strip() for l in open(sys.argv[2]).readlines()])
print('Identified %d synset IDs in %s' % (len(labels), sys.argv[2]),
file=sys.stderr)
else:
labels = None
skipped_boxes = 0
skipped_files = 0
saved_boxes = 0
saved_files = 0
for file_index, one_file in enumerate(xml_files):
# Example: <...>/n06470073/n00141669_6790.xml
label = os.path.basename(os.path.dirname(one_file))
# Determine if the annotation is from an ImageNet Challenge label.
if labels is not None and label not in labels:
skipped_files += 1
continue
bboxes = ProcessXMLAnnotation(one_file)
assert bboxes is not None, 'No bounding boxes found in ' + one_file
found_box = False
for bbox in bboxes:
if labels is not None:
if bbox.label != label:
# Note: There is a slight bug in the bounding box annotation data.
# Many of the dog labels have the human label 'Scottish_deerhound'
# instead of the synset ID 'n02092002' in the bbox.label field. As a
# simple hack to overcome this issue, we only exclude bbox labels
# *which are synset ID's* that do not match original synset label for
# the XML file.
if bbox.label in labels:
skipped_boxes += 1
continue
# Guard against improperly specified boxes.
if (bbox.xmin_scaled >= bbox.xmax_scaled or
bbox.ymin_scaled >= bbox.ymax_scaled):
skipped_boxes += 1
continue
# Note bbox.filename occasionally contains '%s' in the name. This is
# data set noise that is fixed by just using the basename of the XML file.
image_filename = os.path.splitext(os.path.basename(one_file))[0]
print('%s.JPEG,%.4f,%.4f,%.4f,%.4f' %
(image_filename,
bbox.xmin_scaled, bbox.ymin_scaled,
bbox.xmax_scaled, bbox.ymax_scaled))
saved_boxes += 1
found_box = True
if found_box:
saved_files += 1
else:
skipped_files += 1
if not file_index % 5000:
print('--> processed %d of %d XML files.' %
(file_index + 1, len(xml_files)),
file=sys.stderr)
print('--> skipped %d boxes and %d XML files.' %
(skipped_boxes, skipped_files), file=sys.stderr)
print('Finished processing %d XML files.' % len(xml_files), file=sys.stderr)
print('Skipped %d XML files not in ImageNet Challenge.' % skipped_files,
file=sys.stderr)
print('Skipped %d bounding boxes not in ImageNet Challenge.' % skipped_boxes,
file=sys.stderr)
print('Wrote %d bounding boxes from %d annotated images.' %
(saved_boxes, saved_files),
file=sys.stderr)
print('Finished.', file=sys.stderr)
| 8,858 | 33.877953 | 80 | py |
models | models-master/research/slim/datasets/mnist.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides data for the MNIST dataset.
The dataset scripts used to create the dataset can be found at:
tensorflow/models/research/slim/datasets/download_and_convert_mnist.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow.compat.v1 as tf
import tf_slim as slim
from datasets import dataset_utils
_FILE_PATTERN = 'mnist_%s.tfrecord'
_SPLITS_TO_SIZES = {'train': 60000, 'test': 10000}
_NUM_CLASSES = 10
_ITEMS_TO_DESCRIPTIONS = {
'image': 'A [28 x 28 x 1] grayscale image.',
'label': 'A single integer between 0 and 9',
}
def get_split(split_name, dataset_dir, file_pattern=None, reader=None):
"""Gets a dataset tuple with instructions for reading MNIST.
Args:
split_name: A train/test split name.
dataset_dir: The base directory of the dataset sources.
file_pattern: The file pattern to use when matching the dataset sources.
It is assumed that the pattern contains a '%s' string so that the split
name can be inserted.
reader: The TensorFlow reader type.
Returns:
A `Dataset` namedtuple.
Raises:
ValueError: if `split_name` is not a valid train/test split.
"""
if split_name not in _SPLITS_TO_SIZES:
raise ValueError('split name %s was not recognized.' % split_name)
if not file_pattern:
file_pattern = _FILE_PATTERN
file_pattern = os.path.join(dataset_dir, file_pattern % split_name)
# Allowing None in the signature so that dataset_factory can use the default.
if reader is None:
reader = tf.TFRecordReader
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='raw'),
'image/class/label': tf.FixedLenFeature(
[1], tf.int64, default_value=tf.zeros([1], dtype=tf.int64)),
}
items_to_handlers = {
'image': slim.tfexample_decoder.Image(shape=[28, 28, 1], channels=1),
'label': slim.tfexample_decoder.Tensor('image/class/label', shape=[]),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
labels_to_names = None
if dataset_utils.has_labels(dataset_dir):
labels_to_names = dataset_utils.read_label_file(dataset_dir)
return slim.dataset.Dataset(
data_sources=file_pattern,
reader=reader,
decoder=decoder,
num_samples=_SPLITS_TO_SIZES[split_name],
num_classes=_NUM_CLASSES,
items_to_descriptions=_ITEMS_TO_DESCRIPTIONS,
labels_to_names=labels_to_names)
| 3,273 | 32.408163 | 80 | py |
models | models-master/research/slim/datasets/flowers.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides data for the flowers dataset.
The dataset scripts used to create the dataset can be found at:
tensorflow/models/research/slim/datasets/download_and_convert_flowers.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow.compat.v1 as tf
import tf_slim as slim
from datasets import dataset_utils
_FILE_PATTERN = 'flowers_%s_*.tfrecord'
SPLITS_TO_SIZES = {'train': 3320, 'validation': 350}
_NUM_CLASSES = 5
_ITEMS_TO_DESCRIPTIONS = {
'image': 'A color image of varying size.',
'label': 'A single integer between 0 and 4',
}
def get_split(split_name, dataset_dir, file_pattern=None, reader=None):
"""Gets a dataset tuple with instructions for reading flowers.
Args:
split_name: A train/validation split name.
dataset_dir: The base directory of the dataset sources.
file_pattern: The file pattern to use when matching the dataset sources.
It is assumed that the pattern contains a '%s' string so that the split
name can be inserted.
reader: The TensorFlow reader type.
Returns:
A `Dataset` namedtuple.
Raises:
ValueError: if `split_name` is not a valid train/validation split.
"""
if split_name not in SPLITS_TO_SIZES:
raise ValueError('split name %s was not recognized.' % split_name)
if not file_pattern:
file_pattern = _FILE_PATTERN
file_pattern = os.path.join(dataset_dir, file_pattern % split_name)
# Allowing None in the signature so that dataset_factory can use the default.
if reader is None:
reader = tf.TFRecordReader
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='png'),
'image/class/label': tf.FixedLenFeature(
[], tf.int64, default_value=tf.zeros([], dtype=tf.int64)),
}
items_to_handlers = {
'image': slim.tfexample_decoder.Image(),
'label': slim.tfexample_decoder.Tensor('image/class/label'),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
labels_to_names = None
if dataset_utils.has_labels(dataset_dir):
labels_to_names = dataset_utils.read_label_file(dataset_dir)
return slim.dataset.Dataset(
data_sources=file_pattern,
reader=reader,
decoder=decoder,
num_samples=SPLITS_TO_SIZES[split_name],
items_to_descriptions=_ITEMS_TO_DESCRIPTIONS,
num_classes=_NUM_CLASSES,
labels_to_names=labels_to_names)
| 3,251 | 32.183673 | 80 | py |
models | models-master/research/slim/preprocessing/inception_preprocessing.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides utilities to preprocess images for the Inception networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.python.ops import control_flow_ops
def apply_with_random_selector(x, func, num_cases):
"""Computes func(x, sel), with sel sampled from [0...num_cases-1].
Args:
x: input Tensor.
func: Python function to apply.
num_cases: Python int32, number of cases to sample sel from.
Returns:
The result of func(x, sel), where func receives the value of the
selector as a python integer, but sel is sampled dynamically.
"""
sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
# Pass the real x only to one of the func calls.
return control_flow_ops.merge([
func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case)
for case in range(num_cases)])[0]
def distort_color(image, color_ordering=0, fast_mode=True, scope=None):
"""Distort the color of a Tensor image.
Each color distortion is non-commutative and thus ordering of the color ops
matters. Ideally we would randomly permute the ordering of the color ops.
Rather then adding that level of complication, we select a distinct ordering
of color ops for each preprocessing thread.
Args:
image: 3-D Tensor containing single image in [0, 1].
color_ordering: Python int, a type of distortion (valid values: 0-3).
fast_mode: Avoids slower ops (random_hue and random_contrast)
scope: Optional scope for name_scope.
Returns:
3-D Tensor color-distorted image on range [0, 1]
Raises:
ValueError: if color_ordering not in [0, 3]
"""
with tf.name_scope(scope, 'distort_color', [image]):
if fast_mode:
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
else:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
else:
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_ordering == 1:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
elif color_ordering == 2:
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
elif color_ordering == 3:
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
else:
raise ValueError('color_ordering must be in [0, 3]')
# The random_* ops do not necessarily clamp.
return tf.clip_by_value(image, 0.0, 1.0)
def distorted_bounding_box_crop(image,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0),
max_attempts=100,
scope=None):
"""Generates cropped_image using a one of the bboxes randomly distorted.
See `tf.image.sample_distorted_bounding_box` for more documentation.
Args:
image: 3-D Tensor of image (it will be converted to floats in [0, 1]).
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax]. If num_boxes is 0 then it would use the whole
image.
min_object_covered: An optional `float`. Defaults to `0.1`. The cropped
area of the image must contain at least this fraction of any bounding box
supplied.
aspect_ratio_range: An optional list of `floats`. The cropped area of the
image must have an aspect ratio = width / height within this range.
area_range: An optional list of `floats`. The cropped area of the image
must contain a fraction of the supplied image within in this range.
max_attempts: An optional `int`. Number of attempts at generating a cropped
region of the image of the specified constraints. After `max_attempts`
failures, return the entire image.
scope: Optional scope for name_scope.
Returns:
A tuple, a 3-D Tensor cropped_image and the distorted bbox
"""
with tf.name_scope(scope, 'distorted_bounding_box_crop', [image, bbox]):
# Each bounding box has shape [1, num_boxes, box coords] and
# the coordinates are ordered [ymin, xmin, ymax, xmax].
# A large fraction of image datasets contain a human-annotated bounding
# box delineating the region of the image containing the object of interest.
# We choose to create a new bounding box for the object which is a randomly
# distorted version of the human-annotated bounding box that obeys an
# allowed range of aspect ratios, sizes and overlap with the human-annotated
# bounding box. If no box is supplied, then we assume the bounding box is
# the entire image.
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box
# Crop the image to the specified bounding box.
cropped_image = tf.slice(image, bbox_begin, bbox_size)
return cropped_image, distort_bbox
def preprocess_for_train(image,
height,
width,
bbox,
fast_mode=True,
scope=None,
add_image_summaries=True,
random_crop=True,
use_grayscale=False):
"""Distort one image for training a network.
Distorting images provides a useful technique for augmenting the data
set during training in order to make the network invariant to aspects
of the image that do not effect the label.
Additionally it would create image_summaries to display the different
transformations applied to the image.
Args:
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
[0, 1], otherwise it would converted to tf.float32 assuming that the range
is [0, MAX], where MAX is largest positive representable number for
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details).
height: integer
width: integer
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax].
fast_mode: Optional boolean, if True avoids slower transformations (i.e.
bi-cubic resizing, random_hue or random_contrast).
scope: Optional scope for name_scope.
add_image_summaries: Enable image summaries.
random_crop: Enable random cropping of images during preprocessing for
training.
use_grayscale: Whether to convert the image from RGB to grayscale.
Returns:
3-D float Tensor of distorted image used for training with range [-1, 1].
"""
with tf.name_scope(scope, 'distort_image', [image, height, width, bbox]):
if bbox is None:
bbox = tf.constant([0.0, 0.0, 1.0, 1.0],
dtype=tf.float32,
shape=[1, 1, 4])
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Each bounding box has shape [1, num_boxes, box coords] and
# the coordinates are ordered [ymin, xmin, ymax, xmax].
image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
bbox)
if add_image_summaries:
tf.summary.image('image_with_bounding_boxes', image_with_box)
if not random_crop:
distorted_image = image
else:
distorted_image, distorted_bbox = distorted_bounding_box_crop(image, bbox)
# Restore the shape since the dynamic slice based upon the bbox_size loses
# the third dimension.
distorted_image.set_shape([None, None, 3])
image_with_distorted_box = tf.image.draw_bounding_boxes(
tf.expand_dims(image, 0), distorted_bbox)
if add_image_summaries:
tf.summary.image('images_with_distorted_bounding_box',
image_with_distorted_box)
# This resizing operation may distort the images because the aspect
# ratio is not respected. We select a resize method in a round robin
# fashion based on the thread number.
# Note that ResizeMethod contains 4 enumerated resizing methods.
# We select only 1 case for fast_mode bilinear.
num_resize_cases = 1 if fast_mode else 4
distorted_image = apply_with_random_selector(
distorted_image,
lambda x, method: tf.image.resize_images(x, [height, width], method),
num_cases=num_resize_cases)
if add_image_summaries:
tf.summary.image(('cropped_' if random_crop else '') + 'resized_image',
tf.expand_dims(distorted_image, 0))
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Randomly distort the colors. There are 1 or 4 ways to do it.
num_distort_cases = 1 if fast_mode else 4
distorted_image = apply_with_random_selector(
distorted_image,
lambda x, ordering: distort_color(x, ordering, fast_mode),
num_cases=num_distort_cases)
if use_grayscale:
distorted_image = tf.image.rgb_to_grayscale(distorted_image)
if add_image_summaries:
tf.summary.image('final_distorted_image',
tf.expand_dims(distorted_image, 0))
distorted_image = tf.subtract(distorted_image, 0.5)
distorted_image = tf.multiply(distorted_image, 2.0)
return distorted_image
def preprocess_for_eval(image,
height,
width,
central_fraction=0.875,
scope=None,
central_crop=True,
use_grayscale=False):
"""Prepare one image for evaluation.
If height and width are specified it would output an image with that size by
applying resize_bilinear.
If central_fraction is specified it would crop the central fraction of the
input image.
Args:
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
[0, 1], otherwise it would converted to tf.float32 assuming that the range
is [0, MAX], where MAX is largest positive representable number for
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details).
height: integer
width: integer
central_fraction: Optional Float, fraction of the image to crop.
scope: Optional scope for name_scope.
central_crop: Enable central cropping of images during preprocessing for
evaluation.
use_grayscale: Whether to convert the image from RGB to grayscale.
Returns:
3-D float Tensor of prepared image.
"""
with tf.name_scope(scope, 'eval_image', [image, height, width]):
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
if use_grayscale:
image = tf.image.rgb_to_grayscale(image)
# Crop the central region of the image with an area containing 87.5% of
# the original image.
if central_crop and central_fraction:
image = tf.image.central_crop(image, central_fraction=central_fraction)
if height and width:
# Resize the image to the specified height and width.
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width],
align_corners=False)
image = tf.squeeze(image, [0])
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image
def preprocess_image(image,
height,
width,
is_training=False,
bbox=None,
fast_mode=True,
add_image_summaries=True,
crop_image=True,
use_grayscale=False):
"""Pre-process one image for training or evaluation.
Args:
image: 3-D Tensor [height, width, channels] with the image. If dtype is
tf.float32 then the range should be [0, 1], otherwise it would converted
to tf.float32 assuming that the range is [0, MAX], where MAX is largest
positive representable number for int(8/16/32) data type (see
`tf.image.convert_image_dtype` for details).
height: integer, image expected height.
width: integer, image expected width.
is_training: Boolean. If true it would transform an image for train,
otherwise it would transform it for evaluation.
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
fast_mode: Optional boolean, if True avoids slower transformations.
add_image_summaries: Enable image summaries.
crop_image: Whether to enable cropping of images during preprocessing for
both training and evaluation.
use_grayscale: Whether to convert the image from RGB to grayscale.
Returns:
3-D float Tensor containing an appropriately scaled image
Raises:
ValueError: if user does not provide bounding box
"""
if is_training:
return preprocess_for_train(
image,
height,
width,
bbox,
fast_mode,
add_image_summaries=add_image_summaries,
random_crop=crop_image,
use_grayscale=use_grayscale)
else:
return preprocess_for_eval(
image,
height,
width,
central_crop=crop_image,
use_grayscale=use_grayscale)
| 15,551 | 41.961326 | 80 | py |
models | models-master/research/slim/preprocessing/vgg_preprocessing.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides utilities to preprocess images.
The preprocessing steps for VGG were introduced in the following technical
report:
Very Deep Convolutional Networks For Large-Scale Image Recognition
Karen Simonyan and Andrew Zisserman
arXiv technical report, 2015
PDF: http://arxiv.org/pdf/1409.1556.pdf
ILSVRC 2014 Slides: http://www.robots.ox.ac.uk/~karen/pdf/ILSVRC_2014.pdf
CC-BY-4.0
More information can be obtained from the VGG website:
www.robots.ox.ac.uk/~vgg/research/very_deep/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
_R_MEAN = 123.68
_G_MEAN = 116.78
_B_MEAN = 103.94
_RESIZE_SIDE_MIN = 256
_RESIZE_SIDE_MAX = 512
def _crop(image, offset_height, offset_width, crop_height, crop_width):
"""Crops the given image using the provided offsets and sizes.
Note that the method doesn't assume we know the input image size but it does
assume we know the input image rank.
Args:
image: an image of shape [height, width, channels].
offset_height: a scalar tensor indicating the height offset.
offset_width: a scalar tensor indicating the width offset.
crop_height: the height of the cropped image.
crop_width: the width of the cropped image.
Returns:
the cropped (and resized) image.
Raises:
InvalidArgumentError: if the rank is not 3 or if the image dimensions are
less than the crop size.
"""
original_shape = tf.shape(image)
rank_assertion = tf.Assert(
tf.equal(tf.rank(image), 3),
['Rank of image must be equal to 3.'])
with tf.control_dependencies([rank_assertion]):
cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_height),
tf.greater_equal(original_shape[1], crop_width)),
['Crop size greater than the image size.'])
offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
with tf.control_dependencies([size_assertion]):
image = tf.slice(image, offsets, cropped_shape)
return tf.reshape(image, cropped_shape)
def _random_crop(image_list, crop_height, crop_width):
"""Crops the given list of images.
The function applies the same crop to each image in the list. This can be
effectively applied when there are multiple image inputs of the same
dimension such as:
image, depths, normals = _random_crop([image, depths, normals], 120, 150)
Args:
image_list: a list of image tensors of the same dimension but possibly
varying channel.
crop_height: the new height.
crop_width: the new width.
Returns:
the image_list with cropped images.
Raises:
ValueError: if there are multiple image inputs provided with different size
or the images are smaller than the crop dimensions.
"""
if not image_list:
raise ValueError('Empty image_list.')
# Compute the rank assertions.
rank_assertions = []
for i in range(len(image_list)):
image_rank = tf.rank(image_list[i])
rank_assert = tf.Assert(
tf.equal(image_rank, 3),
['Wrong rank for tensor %s [expected] [actual]',
image_list[i].name, 3, image_rank])
rank_assertions.append(rank_assert)
with tf.control_dependencies([rank_assertions[0]]):
image_shape = tf.shape(image_list[0])
image_height = image_shape[0]
image_width = image_shape[1]
crop_size_assert = tf.Assert(
tf.logical_and(
tf.greater_equal(image_height, crop_height),
tf.greater_equal(image_width, crop_width)),
['Crop size greater than the image size.'])
asserts = [rank_assertions[0], crop_size_assert]
for i in range(1, len(image_list)):
image = image_list[i]
asserts.append(rank_assertions[i])
with tf.control_dependencies([rank_assertions[i]]):
shape = tf.shape(image)
height = shape[0]
width = shape[1]
height_assert = tf.Assert(
tf.equal(height, image_height),
['Wrong height for tensor %s [expected][actual]',
image.name, height, image_height])
width_assert = tf.Assert(
tf.equal(width, image_width),
['Wrong width for tensor %s [expected][actual]',
image.name, width, image_width])
asserts.extend([height_assert, width_assert])
# Create a random bounding box.
#
# Use tf.random_uniform and not numpy.random.rand as doing the former would
# generate random numbers at graph eval time, unlike the latter which
# generates random numbers at graph definition time.
with tf.control_dependencies(asserts):
max_offset_height = tf.reshape(image_height - crop_height + 1, [])
with tf.control_dependencies(asserts):
max_offset_width = tf.reshape(image_width - crop_width + 1, [])
offset_height = tf.random_uniform(
[], maxval=max_offset_height, dtype=tf.int32)
offset_width = tf.random_uniform(
[], maxval=max_offset_width, dtype=tf.int32)
return [_crop(image, offset_height, offset_width,
crop_height, crop_width) for image in image_list]
def _central_crop(image_list, crop_height, crop_width):
"""Performs central crops of the given image list.
Args:
image_list: a list of image tensors of the same dimension but possibly
varying channel.
crop_height: the height of the image following the crop.
crop_width: the width of the image following the crop.
Returns:
the list of cropped images.
"""
outputs = []
for image in image_list:
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
offset_height = (image_height - crop_height) / 2
offset_width = (image_width - crop_width) / 2
outputs.append(_crop(image, offset_height, offset_width,
crop_height, crop_width))
return outputs
def _mean_image_subtraction(image, means):
"""Subtracts the given means from each image channel.
For example:
means = [123.68, 116.779, 103.939]
image = _mean_image_subtraction(image, means)
Note that the rank of `image` must be known.
Args:
image: a tensor of size [height, width, C].
means: a C-vector of values to subtract from each channel.
Returns:
the centered image.
Raises:
ValueError: If the rank of `image` is unknown, if `image` has a rank other
than three or if the number of channels in `image` doesn't match the
number of values in `means`.
"""
if image.get_shape().ndims != 3:
raise ValueError('Input must be of size [height, width, C>0]')
num_channels = image.get_shape().as_list()[-1]
if len(means) != num_channels:
raise ValueError('len(means) must match the number of channels')
channels = tf.split(axis=2, num_or_size_splits=num_channels, value=image)
for i in range(num_channels):
channels[i] -= means[i]
return tf.concat(axis=2, values=channels)
def _smallest_size_at_least(height, width, smallest_side):
"""Computes new shape with the smallest side equal to `smallest_side`.
Computes new shape with the smallest side equal to `smallest_side` while
preserving the original aspect ratio.
Args:
height: an int32 scalar tensor indicating the current height.
width: an int32 scalar tensor indicating the current width.
smallest_side: A python integer or scalar `Tensor` indicating the size of
the smallest side after resize.
Returns:
new_height: an int32 scalar tensor indicating the new height.
new_width: and int32 scalar tensor indicating the new width.
"""
smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)
height = tf.to_float(height)
width = tf.to_float(width)
smallest_side = tf.to_float(smallest_side)
scale = tf.cond(tf.greater(height, width),
lambda: smallest_side / width,
lambda: smallest_side / height)
new_height = tf.to_int32(tf.rint(height * scale))
new_width = tf.to_int32(tf.rint(width * scale))
return new_height, new_width
def _aspect_preserving_resize(image, smallest_side):
"""Resize images preserving the original aspect ratio.
Args:
image: A 3-D image `Tensor`.
smallest_side: A python integer or scalar `Tensor` indicating the size of
the smallest side after resize.
Returns:
resized_image: A 3-D tensor containing the resized image.
"""
smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)
shape = tf.shape(image)
height = shape[0]
width = shape[1]
new_height, new_width = _smallest_size_at_least(height, width, smallest_side)
image = tf.expand_dims(image, 0)
resized_image = tf.image.resize_bilinear(image, [new_height, new_width],
align_corners=False)
resized_image = tf.squeeze(resized_image)
resized_image.set_shape([None, None, 3])
return resized_image
def preprocess_for_train(image,
output_height,
output_width,
resize_side_min=_RESIZE_SIDE_MIN,
resize_side_max=_RESIZE_SIDE_MAX,
use_grayscale=False):
"""Preprocesses the given image for training.
Note that the actual resizing scale is sampled from
[`resize_size_min`, `resize_size_max`].
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
resize_side_min: The lower bound for the smallest side of the image for
aspect-preserving resizing.
resize_side_max: The upper bound for the smallest side of the image for
aspect-preserving resizing.
use_grayscale: Whether to convert the image from RGB to grayscale.
Returns:
A preprocessed image.
"""
resize_side = tf.random_uniform(
[], minval=resize_side_min, maxval=resize_side_max+1, dtype=tf.int32)
image = _aspect_preserving_resize(image, resize_side)
image = _random_crop([image], output_height, output_width)[0]
image.set_shape([output_height, output_width, 3])
image = tf.to_float(image)
if use_grayscale:
image = tf.image.rgb_to_grayscale(image)
image = tf.image.random_flip_left_right(image)
return _mean_image_subtraction(image, [_R_MEAN, _G_MEAN, _B_MEAN])
def preprocess_for_eval(image,
output_height,
output_width,
resize_side,
use_grayscale=False):
"""Preprocesses the given image for evaluation.
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
resize_side: The smallest side of the image for aspect-preserving resizing.
use_grayscale: Whether to convert the image from RGB to grayscale.
Returns:
A preprocessed image.
"""
image = _aspect_preserving_resize(image, resize_side)
image = _central_crop([image], output_height, output_width)[0]
image.set_shape([output_height, output_width, 3])
image = tf.to_float(image)
if use_grayscale:
image = tf.image.rgb_to_grayscale(image)
return _mean_image_subtraction(image, [_R_MEAN, _G_MEAN, _B_MEAN])
def preprocess_image(image,
output_height,
output_width,
is_training=False,
resize_side_min=_RESIZE_SIDE_MIN,
resize_side_max=_RESIZE_SIDE_MAX,
use_grayscale=False):
"""Preprocesses the given image.
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
is_training: `True` if we're preprocessing the image for training and
`False` otherwise.
resize_side_min: The lower bound for the smallest side of the image for
aspect-preserving resizing. If `is_training` is `False`, then this value
is used for rescaling.
resize_side_max: The upper bound for the smallest side of the image for
aspect-preserving resizing. If `is_training` is `False`, this value is
ignored. Otherwise, the resize side is sampled from
[resize_size_min, resize_size_max].
use_grayscale: Whether to convert the image from RGB to grayscale.
Returns:
A preprocessed image.
"""
if is_training:
return preprocess_for_train(image, output_height, output_width,
resize_side_min, resize_side_max,
use_grayscale)
else:
return preprocess_for_eval(image, output_height, output_width,
resize_side_min, use_grayscale)
| 13,583 | 34.560209 | 80 | py |
models | models-master/research/slim/preprocessing/preprocessing_factory.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains a factory for building various models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from preprocessing import cifarnet_preprocessing
from preprocessing import inception_preprocessing
from preprocessing import lenet_preprocessing
from preprocessing import vgg_preprocessing
def get_preprocessing(name, is_training=False, use_grayscale=False):
"""Returns preprocessing_fn(image, height, width, **kwargs).
Args:
name: The name of the preprocessing function.
is_training: `True` if the model is being used for training and `False`
otherwise.
use_grayscale: Whether to convert the image from RGB to grayscale.
Returns:
preprocessing_fn: A function that preprocessing a single image (pre-batch).
It has the following signature:
image = preprocessing_fn(image, output_height, output_width, ...).
Raises:
ValueError: If Preprocessing `name` is not recognized.
"""
preprocessing_fn_map = {
'cifarnet': cifarnet_preprocessing,
'inception': inception_preprocessing,
'inception_v1': inception_preprocessing,
'inception_v2': inception_preprocessing,
'inception_v3': inception_preprocessing,
'inception_v4': inception_preprocessing,
'inception_resnet_v2': inception_preprocessing,
'lenet': lenet_preprocessing,
'mobilenet_v1': inception_preprocessing,
'mobilenet_v2': inception_preprocessing,
'mobilenet_v2_035': inception_preprocessing,
'mobilenet_v3_small': inception_preprocessing,
'mobilenet_v3_large': inception_preprocessing,
'mobilenet_v3_small_minimalistic': inception_preprocessing,
'mobilenet_v3_large_minimalistic': inception_preprocessing,
'mobilenet_edgetpu': inception_preprocessing,
'mobilenet_edgetpu_075': inception_preprocessing,
'mobilenet_v2_140': inception_preprocessing,
'nasnet_mobile': inception_preprocessing,
'nasnet_large': inception_preprocessing,
'pnasnet_mobile': inception_preprocessing,
'pnasnet_large': inception_preprocessing,
'resnet_v1_50': vgg_preprocessing,
'resnet_v1_101': vgg_preprocessing,
'resnet_v1_152': vgg_preprocessing,
'resnet_v1_200': vgg_preprocessing,
'resnet_v2_50': vgg_preprocessing,
'resnet_v2_101': vgg_preprocessing,
'resnet_v2_152': vgg_preprocessing,
'resnet_v2_200': vgg_preprocessing,
'vgg': vgg_preprocessing,
'vgg_a': vgg_preprocessing,
'vgg_16': vgg_preprocessing,
'vgg_19': vgg_preprocessing,
}
if name not in preprocessing_fn_map:
raise ValueError('Preprocessing name [%s] was not recognized' % name)
def preprocessing_fn(image, output_height, output_width, **kwargs):
return preprocessing_fn_map[name].preprocess_image(
image,
output_height,
output_width,
is_training=is_training,
use_grayscale=use_grayscale,
**kwargs)
return preprocessing_fn
| 3,692 | 38.287234 | 80 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.