seed
stringlengths 25
2.89k
| seed_api
stringlengths 14
102
| index
int64 0
14.8k
|
---|---|---|
import tensorflow as tf
if do_norm:
deconv = tf.layers.batch_normalization(deconv, momentum = 0.9)
if activation_function == "relu":
deconv = tf.nn.relu(deconv, name = 'relu')
if activation_function == "leakyrelu":
deconv = tf.nn.leaky_relu(deconv, alpha=relu_factor)
if activation_function == "elu":
deconv = tf.nn.elu(deconv, name = 'elu')
return deconv
| tensorflow.nn.elu | 11,000 |
import tensorflow as tf
super().__init__(*args, num_parameters=2, input_shape=input_shape, name=name, **kwargs)
self.epsilon = epsilon
def _forward(self, x1, x2, **kwargs):
"""
A log normal RV X = exp(mu + sigma*Z) where Z ~ N(0,I).
The forward pass scales to a standard log normal with mu=0, sigma=1 by computing:
exp(Z) = (X / exp(mu))^(1/sigma)
"""
params = self.parameterizer(x1)
mus, log_sigmas = params[:,:,:,0::2], params[:,:,:,1::2]
# compute softplus activation
z2, ldj = log_gaussianize(x2, mus, log_sigmas)
z2 = tf.where(x2 > self.epsilon, z2, x2)
ldj = tf.where(x2 > self.epsilon, ldj, tf.zeros_like(ldj))
return z2, tf.math.reduce_sum(ldj, axis=[1,2,3])
def _inverse(self, x1, z2, **kwargs):
params = self.parameterizer(x1)
mus, log_sigmas = params[:,:,:,0::2], params[:,:,:,1::2]
x2, ldj = log_gaussianize(z2, mus, log_sigmas, inverse=tf.constant(True))
x2 = tf.where(z2 > self.epsilon, x2, z2)
ldj = tf.where(z2 > self.epsilon, ldj, tf.zeros_like(ldj))
return x2, tf.math.reduce_sum(ldj, axis=[1,2,3])
def half_gaussianize(x, log_sigmas, inverse=tf.constant(False)):
if inverse:
z = tf.math.exp(log_sigmas)*x
| tensorflow.zeros_like | 11,001 |
import tensorflow as tf
self.grads_and_vars, global_step=tf.contrib.framework.get_global_step())
def predict(self, state, sess=None):
sess = sess or tf.get_default_session()
state=featurize_state(state);
return sess.run(self.action, { self.state: [state] })[0]
| tensorflow.get_default_session | 11,002 |
import tensorflow as tf
self._eval_image_summary('pred', expected)
self._eval_image_summary('nois', noisy)
def _eval_image_summary(self, name, encdoding_batch):
summary = self.image_summaries[name].eval(feed_dict={self.encoding: encdoding_batch})
self.summary_writer.add_summary(summary, global_step=self.get_past_epochs())
def _add_decoding_summary(self, name, var, collection='train'):
var = var[:FLAGS.visualiza_max]
var = tf.concat(tf.unstack(var), axis=0)
var = tf.expand_dims(var, dim=0)
color_s = tf.summary.image(name, var[..., :3], max_outputs=FLAGS.visualiza_max)
var = tf.expand_dims(var[..., 3], dim=3)
bw_s = tf.summary.image('depth_' + name, var, max_outputs=FLAGS.visualiza_max)
return tf.summary.merge([color_s, bw_s])
# TRAINING PROGRESS EVENTS
def _on_training_start(self, sess):
# Writers and savers
self.summary_writer = tf.summary.FileWriter(FLAGS.logdir, sess.graph)
self.saver = tf.train.Saver()
self._build_embedding_saver(sess)
self._restore_model(sess)
| tensorflow.summary.image | 11,003 |
import tensorflow as tf
#add_layer 函数里面所有的with都是为了tensorboard添加上去的
def add_layer(inputs, in_size, out_size, activation_function=None,nameScope="layer"):
# add one more layer and return the output of this layer
with tf.name_scope(nameScope):
with tf.name_scope('weights'):
Weights = tf.Variable(tf.random_normal([in_size, out_size]), name='W')
with tf.name_scope('biases'):
biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, name='b')
with tf.name_scope('Wx_plus_b'):
Wx_plus_b = tf.add(tf.matmul(inputs, Weights), biases)
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b, )
return outputs
# 这个就是在tensorboard上可视化的时候的区别:
# 使用with tf.name_scope('inputs')可以将xs和ys包含进来
# 形成一个大的图层,图层的名字就是with tf.name_scope()方法里的参数。
with tf.name_scope('inputs'):
| tensorflow.matmul | 11,004 |
import tensorflow as tf
shape = x.get_shape().as_list()
with tf.variable_scope(name):
beta = tf.get_variable('beta', [shape[-1]], initializer=tf.constant_initializer(0.))
gamma = tf.get_variable('gamma', [shape[-1]], initializer=tf.random_normal_initializer(1., 0.02))
pop_mean = tf.get_variable('pop_mean', [shape[-1]], initializer=tf.constant_initializer(0.), trainable=False)
pop_var = tf.get_variable('pop_var', [shape[-1]], initializer=tf.constant_initializer(1.), trainable=False)
if pop_mean not in tf.moving_average_variables():
tf.add_to_collection(tf.GraphKeys.MOVING_AVERAGE_VARIABLES, pop_mean)
tf.add_to_collection(tf.GraphKeys.MOVING_AVERAGE_VARIABLES, pop_var)
def func1():
# execute at training time
batch_mean, batch_var = tf.nn.moments(x, range(len(shape) - 1))
update_mean = tf.assign_sub(pop_mean, (1 - decay)*(pop_mean - batch_mean))
update_var = tf.assign_sub(pop_var, (1 - decay)*(pop_var - batch_var))
with tf.control_dependencies([update_mean, update_var]):
return tf.nn.batch_normalization(x, batch_mean, batch_var, beta, gamma, epsilon)
def func2():
# execute at test time
return tf.nn.batch_normalization(x, pop_mean, pop_var, beta, gamma, epsilon)
return tf.cond(train, func1, func2)
def average_gradients(tower_grads):
average_grads = []
for grad_and_vars in zip(*tower_grads):
| tensorflow.assign_sub | 11,005 |
import tensorflow as tf
self.seq_lens = tf.placeholder(tf.int64, [batch_size], name='seq_lens')
self.x = tf.placeholder(tf.int32, [batch_size, max_sequence_len],
name='x')
self.y = tf.placeholder(tf.float32, [batch_size, out_vocab_size],
name='y')
self.example_weights = tf.placeholder(tf.float32, [batch_size],
name='example_weights')
| tensorflow.placeholder | 11,006 |
import tensorflow as tf
self.loss_d_sum = tf.summary.scalar("d_loss",self.discriminator_loss)
self.db_loss_real_sum = tf.summary.scalar("db_loss_real", self.D_B_loss_real)
self.db_loss_fake_sum = tf.summary.scalar("db_loss_fake", self.D_B_loss_fake)
| tensorflow.summary.scalar | 11,007 |
import tensorflow as tf
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, logits, probabilities)
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
| tensorflow.nn.log_softmax | 11,008 |
import tensorflow as tf
with tf.variable_scope('x_entropy'):
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
| tensorflow.nn.softmax_cross_entropy_with_logits | 11,009 |
import tensorflow as tf
def main(_, run_eval_loop=True):
with tf.name_scope('inputs'):
noise, one_hot_labels = _get_generator_inputs(
| tensorflow.name_scope | 11,010 |
import tensorflow as tf
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
inp = [tf.constant(0.5, shape=[2, 2])] * 2
_, enc_state = tf.nn.rnn(
tf.nn.rnn_cell.GRUCell(2), inp, dtype=tf.float32)
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
cell = tf.nn.rnn_cell.OutputProjectionWrapper(
tf.nn.rnn_cell.GRUCell(2), 4)
dec, mem = tf.nn.seq2seq.rnn_decoder(dec_inp, enc_state, cell)
sess.run([tf.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
| tensorflow.nn.seq2seq.rnn_decoder | 11,011 |
import tensorflow as tf
X = tf.nn.relu(X)
X = tf.nn.conv2d(X, W, (1, 1, 1, 1), padding='SAME')
X = self._add_batch_norm(X, out_ch, is_train=is_train)
X = tf.reshape(X, (-1, w, h, out_ch)) # Sanity shape check
return X
| tensorflow.reshape | 11,012 |
import tensorflow as tf
if decoder.use_dropout:
size = tf.shape(output_)[1]
noise_shape = [1, size] if decoder.pervasive_dropout else None
output_ = tf.nn.dropout(output_, keep_prob=decoder.deep_layer_keep_prob, noise_shape=noise_shape)
else:
if decoder.pred_maxout_layer:
| tensorflow.nn.dropout | 11,013 |
import tensorflow as tf
mask=mask
)
feature_hidden_size = feature_hidden.shape[-1]
# reshape back to original first and second dimension
if len(original_feature_hidden.shape) > 2:
sequence_length = original_feature_hidden.shape[1]
feature_hidden = tf.reshape(
feature_hidden,
[-1, sequence_length, feature_hidden_size]
)
return feature_hidden
| tensorflow.reshape | 11,014 |
from tensorflow.python.framework import tensor_util
def _TopKShape(op):
"""Shape function for TopK and TopKV2 ops."""
input_shape = op.inputs[0].get_shape().with_rank_at_least(1)
if len(op.inputs) >= 2:
k = tensor_util.constant_value(op.inputs[1])
else:
k = op.get_attr("k")
last = input_shape[-1].value
| tensorflow.python.framework.tensor_util.constant_value | 11,015 |
import tensorflow as tf
Integer representation of this number.
"""
x_l = tf.stop_gradient(tf.to_int32(tf.reshape(x_bit, [-1, num_bits])))
x_labels = []
| tensorflow.reshape | 11,016 |
import tensorflow as tf
# a_fc3 = tf.layers.dense(a_fc2, 64, tf.nn.relu, kernel_initializer=w_initializer,
# bias_initializer=b_initializer, name='agent_fc3_e')
self.q_eval = tf.layers.dense(a_fc1, self.num_a, kernel_initializer=w_initializer,
bias_initializer=b_initializer, name='q_e')
# ------------------ build target_net ------------------
with tf.variable_scope('target_net'):
a_fc1_ = tf.layers.dense(self.s_, 128, tf.nn.relu, kernel_initializer=w_initializer,
bias_initializer=b_initializer, name='agent_fc1_t')
# a_fc2_ = tf.layers.dense(a_fc1_, 128, tf.nn.relu, kernel_initializer=w_initializer,
# bias_initializer=b_initializer, name='agent_fc2_t')
# a_fc3_ = tf.layers.dense(a_fc2_, 64, tf.nn.relu, kernel_initializer=w_initializer,
# bias_initializer=b_initializer, name='agent_fc3_t')
self.q_next = tf.layers.dense(a_fc1_, self.num_a, kernel_initializer=w_initializer,
bias_initializer=b_initializer, name='q_t')
# [batch*n_agents, 1]
self.q_selected = tf.reduce_sum(tf.multiply(self.q_eval, self.a), axis=1)
# ------------------ build mixing_net ------------------
with tf.variable_scope('mixing_net'):
# [batch, n_agents]
self.q_concat = tf.reshape(self.q_selected, [-1, self.n_agents])
self.q_concat_ =tf.reshape(self.q_m_, [-1, self.n_agents])
with tf.variable_scope('eval_hyper'):
| tensorflow.layers.dense | 11,017 |
import tensorflow as tf
# [1,none,none,2], 第一个none应该为(行*9)
# 第二次[1,none,none,18]
to_tf = tf.transpose(reshaped, [0, 2, 3, 1])
return to_tf
def _softmax_layer(self, bottom, name):
if name == 'rpn_cls_prob_reshape':
input_shape = tf.shape(bottom)
# tf.reshape()中-1的应用,-1表示不知道该填什么数字合适的情况下,可以选择,由python通过原数组和其他的值推测出来
# 每一行是1个anchor的前景、背景得分,先显示所有点产生的第一种anchor,然后是所有点产生的第二种anchor,........
bottom_reshaped = tf.reshape(bottom, [-1, input_shape[-1]])
reshaped_score = tf.nn.softmax(bottom_reshaped, name=name)
return tf.reshape(reshaped_score, input_shape) # [1,none,none,2]
return tf.nn.softmax(bottom, name=name)
def _proposal_top_layer(self, rpn_cls_prob, rpn_bbox_pred, name):
with tf.variable_scope(name):
rois, rpn_scores = tf.py_func(proposal_top_layer,
[rpn_cls_prob, rpn_bbox_pred, self._im_info,
self._feat_stride, self._anchors, self._num_anchors],
[tf.float32, tf.float32])
rois.set_shape([cfg.FLAGS.rpn_top_n, 5])
rpn_scores.set_shape([cfg.FLAGS.rpn_top_n, 1])
return rois, rpn_scores
| tensorflow.reshape | 11,018 |
import tensorflow as tf
# [time_steps, batch_size, dimension].
chars = tf.transpose(chars, [1, 0, 2])
# The outer loop cycles through the layers of the RNN; the inner loop
# executes the time steps for a particular layer.
batch_size = int(chars.shape[1])
for l in range(len(self.cells)):
cell = self.cells[l]
outputs = []
state = cell.zero_state(batch_size, tf.float32)
# Unstack the inputs to obtain a list of batches, one for each time step.
chars = tf.unstack(chars, axis=0)
for ch in chars:
output, state = cell(ch, state)
outputs.append(output)
# The outputs of this layer are the inputs of the subsequent layer.
chars = tf.stack(outputs, axis=0)
if training:
chars = tf.nn.dropout(chars, self.keep_prob)
# Extract the correct output (i.e., hidden state) for each example. All the
# character sequences in this batch were padded to the same fixed length so
| tensorflow.unstack | 11,019 |
import tensorflow as tf
num_additional_channels=predict_input_config.num_additional_channels)
input_dict = transform_fn(decoder.decode(example))
images = tf.cast(input_dict[fields.InputDataFields.image], dtype=tf.float32)
images = tf.expand_dims(images, axis=0)
true_image_shape = tf.expand_dims(
input_dict[fields.InputDataFields.true_image_shape], axis=0)
| tensorflow.expand_dims | 11,020 |
import tensorflow as tf
# These should hold all of the variables of the Q-function network and target network,
# respectively. A convenient way to get these is to make use of TF's "scope" feature.
# For example, you can create your Q-function network with the scope "q_func" like this:
# <something> = q_func(obs_t_float, num_actions, scope="q_func", reuse=False)
# And then you can obtain the variables like this:
# q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='q_func')
# Older versions of TensorFlow may require using "VARIABLES" instead of "GLOBAL_VARIABLES"
# Tip: use huber_loss (from dqn_utils) instead of squared error when defining self.total_error
self.Q_vals = q_func(obs_t_float, self.num_actions, 'q_func', reuse = tf.AUTO_REUSE)
q_func_ph = tf.gather_nd(self.Q_vals, tf.stack([tf.range(tf.shape(self.Q_vals)[0]), self.act_t_ph], axis=1))
target_q_ph = q_func(obs_tp1_float, self.num_actions, 'target_q_func', reuse = tf.AUTO_REUSE)
if double_q:
target_index = tf.math.argmax(q_func(obs_tp1_float, self.num_actions, 'q_func', reuse = tf.AUTO_REUSE), axis = 1, output_type = tf.int32)
target_v_ph = tf.gather_nd(target_q_ph, tf.stack([tf.range(tf.shape(target_q_ph)[0]), target_index], axis=1))
else:
target_v_ph = tf.math.reduce_max(target_q_ph, axis = 1)
| tensorflow.shape | 11,021 |
from tensorflow.python.framework import tensor_util
x = ops.convert_to_tensor(x, name="x")
def slice_shape(start_sum, size, name):
"""Closure to slice out shape."""
start_sum = start_sum if start_sum else (
array_ops.zeros((), dtype=dtypes.int32, name="zero"),)
if (x.get_shape().ndims is not None and
self._is_all_constant_helper(size, *start_sum)):
start = sum(tensor_util.constant_value(s) for s in start_sum)
stop = start + tensor_util.constant_value(size)
slice_ = x.get_shape()[start:stop].as_list()
if all(s is not None for s in slice_):
return ops.convert_to_tensor(slice_, dtype=dtypes.int32, name=name)
# Fall-through intended.
return array_ops.slice(array_ops.shape(x), (sum(start_sum),), (size,))
| tensorflow.python.framework.tensor_util.constant_value | 11,022 |
import tensorflow as tf
tfe.define_private_input(data_owner.player_name, data_owner.compute_gradient)
for data_owner in data_owners
))
with tf.name_scope('secure_aggregation'):
aggregated_model_grads = [
tfe.add_n(grads) / len(grads)
for grads in model_grads
| tensorflow.name_scope | 11,023 |
import tensorflow as tf
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
| tensorflow.flags.DEFINE_string | 11,024 |
import tensorflow as tf
qh_emb = tf.nn.dropout(qh_emb, 1.0 - 0.5 * self.dropout)
ch_emb = conv(ch_emb, d,
bias=True, activation=tf.nn.relu, kernel_size=5, name="char_conv", reuse=None)
qh_emb = conv(qh_emb, d,
bias=True, activation=tf.nn.relu, kernel_size=5, name="char_conv", reuse=True)
ch_emb = tf.reduce_max(ch_emb, axis=1)
qh_emb = tf.reduce_max(qh_emb, axis=1)
ch_emb = tf.reshape(ch_emb, [N * self.max_p_num, PL, -1])
qh_emb = tf.reshape(qh_emb, [N * self.max_p_num, QL, -1])
c_emb = tf.nn.dropout(tf.nn.embedding_lookup(self.word_mat, self.c), 1.0 - self.dropout)
q_emb = tf.nn.dropout(tf.nn.embedding_lookup(self.word_mat, self.q), 1.0 - self.dropout)
c_emb = tf.concat([c_emb, ch_emb], axis=2)
q_emb = tf.concat([q_emb, qh_emb], axis=2)
self.c_emb = highway(c_emb, size=d, scope="highway", dropout=self.dropout, reuse=None)
self.q_emb = highway(q_emb, size=d, scope="highway", dropout=self.dropout, reuse=True)
def _encode(self):
N, PL, QL, CL, d, dc, nh = self._params()
if self.config.fix_pretrained_vector:
dc = self.char_mat.get_shape()[-1]
| tensorflow.nn.embedding_lookup | 11,025 |
import tensorflow as tf
def l2_loss(tensor, weight=1.0, scope=None):
"""Define a L2Loss, useful for regularize, i.e. weight decay.
Args:
tensor: tensor to regularize.
weight: an optional weight to modulate the loss.
scope: Optional scope for op_scope.
Returns:
the L2 loss op.
"""
with tf.name_scope(scope):
weight = tf.convert_to_tensor(weight,
dtype=tensor.dtype.base_dtype,
name='loss_weight')
loss = tf.multiply(weight, tf.nn.l2_loss(tensor), name='value')
return loss
def lppool(inpOp, pnorm, kH, kW, dH, dW, padding, name):
with tf.variable_scope(name):
if pnorm == 2:
pwr = tf.square(inpOp)
else:
pwr = tf.pow(inpOp, pnorm)
subsamp = tf.nn.avg_pool(pwr,
ksize=[1, kH, kW, 1],
strides=[1, dH, dW, 1],
padding=padding)
| tensorflow.nn.l2_loss | 11,026 |
import tensorflow as tf
tokens: 1-D integer `Tensor` [num_timesteps*batch_size]. Each element is an
id from the vocab.
vocab_size: a `int`, vocabular size of the problem
Returns:
Float 1-D `Tensor` same shape as tokens, whose values are 1.0 on the end of
sequence and 0.0 on the others.
"""
eos_id = vocab_size - 1
return tf.cast(tf.equal(tokens, eos_id), tf.float32)
def _kl_divergence_with_logits(q_logits, p_logits, weights, num_classes):
"""Returns weighted KL divergence between distributions q and p.
Args:
q_logits: logits for 1st argument of KL divergence shape
[num_timesteps * batch_size, num_classes] if num_classes > 2, and
| tensorflow.equal | 11,027 |
import tensorflow as tf
copy the parameters from optimized Q function to the target Q function.
In Q learning we actually optimize the following error:
Q(s,a) - (r + gamma * max_a' Q'(s', a'))
Where Q' is lagging behind Q to stablize the learning. For example for Atari
Q' is set to Q once every 10000 updates training steps.
"""
import tensorflow as tf
import baselines.common.tf_util as U
def default_param_noise_filter(var):
if var not in tf.trainable_variables():
# We never perturb non-trainable vars.
return False
if "fully_connected" in var.name:
# We perturb fully-connected layers.
return True
# The remaining layers are likely conv or layer norm layers, which we do not wish to
# perturb (in the former case because they only extract features, in the latter case because
# we use them for normalization purposes). If you change your network, you will likely want
# to re-consider which layers to perturb and which to keep untouched.
return False
def build_act(make_obs_ph, q_func, num_actions, scope="deepq", reuse=None):
| tensorflow.trainable_variables | 11,028 |
from tensorflow.python.framework import ops
@ops.RegisterShape("Greater")
@ops.RegisterShape("GreaterEqual")
@ops.RegisterShape("Less")
@ops.RegisterShape("LessEqual")
@ops.RegisterShape("LogicalAnd")
@ops.RegisterShape("LogicalOr")
@ops.RegisterShape("Maximum")
@ops.RegisterShape("Minimum")
@ops.RegisterShape("Mod")
@ops.RegisterShape("Mul")
@ops.RegisterShape("NotEqual")
@ops.RegisterShape("Pow")
| tensorflow.python.framework.ops.RegisterShape | 11,029 |
import tensorflow as tf
@tf.function
def ctc_decode(inputs, batch_size, seq_length, blank_index, def_val, shift, beam_width=10):
"""Perform ctc decoding"""
# Decode uses time major
inputs = tf.transpose(a=inputs, perm=[1, 0, 2])
seq_lengths = tf.fill([batch_size], seq_length)
# Perform beam search
indices, values, shape, indices_u, values_u, shape_u, log_probs = ctc_ext_beam_search_decoder(
inputs=inputs, sequence_length=seq_lengths,
beam_width=beam_width, blank_index=blank_index, top_paths=1,
blank_label=0)
| tensorflow.fill | 11,030 |
import tensorflow as tf
return logits
def top(self, body_output, features):
if isinstance(body_output, dict):
logits = {}
for k, v in body_output.iteritems():
logits[k] = self._top_single(v, features)
return logits
else:
return self._top_single(body_output, features)
def _loss_single(self, logits, features):
if not self._problem_hparams:
log_warn(_no_problem_err("loss"))
return (tf.constant(0., dtype=tf.float32),
tf.constant(1., dtype=tf.float32))
target_modality = self._problem_hparams.target_modality
loss_num, loss_den = target_modality.loss(logits, features["targets"])
loss_num *= self._problem_hparams.loss_multiplier
return loss_num, loss_den
def loss(self, logits, features):
if isinstance(logits, dict):
losses = {}
for k, v in logits.iteritems():
losses[k] = self._loss_single(v, features)
return tf.add_n([n / d for n, d in logits.values()])
else:
return self._loss_single(logits, features)
| tensorflow.constant | 11,031 |
import tensorflow as tf
def mmd_loss(source_samples, target_samples, weight, name='mmd_loss'):
"""Adds a similarity loss term, the MMD between two representations.
This Maximum Mean Discrepancy (MMD) loss is calculated with a number of
different Gaussian kernels.
Args:
source_samples: a tensor of shape [num_samples, num_features].
target_samples: a tensor of shape [num_samples, num_features].
weight: the weight of the MMD loss.
scope: optional name scope for summary tags.
Returns:
a scalar tensor representing the MMD loss value.
"""
with tf.name_scope(name):
sigmas = [
1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 5, 10, 15, 20, 25, 30, 35, 100, 1e3, 1e4, 1e5, 1e6
]
gaussian_kernel = partial(util.gaussian_kernel_matrix, sigmas=tf.constant(sigmas))
loss_value = maximum_mean_discrepancy(source_samples, target_samples, kernel=gaussian_kernel)
loss_value = tf.maximum(1e-4, loss_value) * weight
assert_op = tf.Assert(tf.is_finite(loss_value), [loss_value])
with tf.control_dependencies([assert_op]):
tag = 'MMD_Loss'
barrier = tf.no_op(tag)
return loss_value
| tensorflow.name_scope | 11,032 |
import tensorflow as tf
wshape = [rf, rf, nin, nf]
with tf.variable_scope(scope):
w = tf.get_variable("w", wshape, initializer=ortho_init(init_scale))
b = tf.get_variable("b", bias_var_shape, initializer=tf.constant_initializer(0.0))
if not one_dim_bias and data_format == 'NHWC':
b = tf.reshape(b, bshape)
return tf.nn.conv2d(x, w, strides=strides, padding=pad, data_format=data_format) + b
def fc(x, scope, nh, *, init_scale=1.0, init_bias=0.0):
with tf.variable_scope(scope):
nin = x.get_shape()[1].value
w = tf.get_variable("w", [nin, nh], initializer=ortho_init(init_scale))
| tensorflow.nn.conv2d | 11,033 |
import tensorflow as tf
context_vectors.append(context_vector)
weights.append(weights_)
context_vector = tf.concat(context_vectors, axis=-1)
weights = sum(weights) / len(weights)
if encoder.attn_mapping:
with tf.variable_scope(scope):
context_vector = dense(context_vector, encoder.attn_mapping, use_bias=False, name='output')
return context_vector, weights
def multi_attention(state, hidden_states, encoders, encoder_input_length, pos=None, aggregation_method='sum',
| tensorflow.variable_scope | 11,034 |
import tensorflow as tf
dtype=tensor.dtype.base_dtype,
name='loss_weight')
loss = tf.multiply(weight, tf.nn.l2_loss(tensor), name='value')
return loss
def lppool(inpOp, pnorm, kH, kW, dH, dW, padding, name):
with tf.variable_scope(name):
if pnorm == 2:
pwr = tf.square(inpOp)
else:
pwr = tf.pow(inpOp, pnorm)
| tensorflow.variable_scope | 11,035 |
import tensorflow as tf
channels: scalar, size of timing embeddings to create. The number of
different timescales is equal to channels / 2.
position: a Tensor with shape [batch, seq_len]
min_timescale: a float
max_timescale: a float
Returns:
a Tensor of timing signals [batch, seq_len, channels]
"""
num_timescales = channels // 2
log_timescale_increment = (
math.log(float(max_timescale) / float(min_timescale)) /
(tf.to_float(num_timescales) - 1))
inv_timescales = min_timescale * tf.exp(
tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
scaled_time = (
tf.expand_dims(tf.to_float(position), 2) * tf.expand_dims(
tf.expand_dims(inv_timescales, 0), 0))
signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=2)
signal = tf.pad(signal, [[0, 0], [0, 0], [0, tf.mod(channels, 2)]])
return signal
def embedding_lookup(input_ids,
vocab_size,
embedding_size=128,
initializer_range=0.02,
word_embedding_name="word_embeddings",
| tensorflow.range | 11,036 |
import tensorflow as tf
if __name__ == '__main__':
tf.test.main()
| tensorflow.test.main | 11,037 |
import tensorflow as tf
loss = tf.maximum(0.0, (tgt_larg - tgt_small) - (pred_larg - pred_small) + margin)
| tensorflow.maximum | 11,038 |
import tensorflow as tf
Return:
A tuple of list: (nodes, adjcents)
nodes: A list of N + 1 `tf.Tensor` of `int64`, N is the number of
hops. Specify node set of each hop, including the root.
adjcents: A list of N `tf.SparseTensor` of `int64`. Specify adjacent
matrix between hops.
"""
nodes = tf.reshape(nodes, [-1])
nodes_list = [nodes]
adj_list = []
for hop_edge_types in edge_types:
neighbor, weight, _ = get_full_neighbor(nodes, hop_edge_types)
next_nodes, next_idx = tf.unique(neighbor.values, out_idx=tf.int64)
next_indices = tf.stack([neighbor.indices[:, 0], next_idx], 1)
next_values = weight.values
next_shape = [tf.size(nodes), tf.size(next_nodes)]
next_adj = tf.sparse.SparseTensor(next_indices, next_values, next_shape)
next_adj = tf.sparse.reorder(next_adj)
nodes_list.append(next_nodes)
adj_list.append(next_adj)
nodes = next_nodes
return nodes_list, adj_list
| tensorflow.unique | 11,039 |
import tensorflow as tf
#print(image.shape)
#print(label.shape)
images,labels=tf.train.shuffle_batch([image,label],
batch_size=batch_size,num_threads=10,capacity=10000,min_after_dequeue=200)
return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size])
def get_test_batch(image,label,batch_size):
images,labels=tf.train.batch([image,label],batch_size=batch_size)
return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size])
def get_valid_batch(image,label,batch_size):
images,labels=tf.train.batch([image,label],batch_size=batch_size)
return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size])
class trainwork(object):
def __init__(self):
with tf.variable_scope('scop'):
self.w1=tf.get_variable('w1', [4096,1024],initializer=tf.contrib.layers.xavier_initializer_conv2d())
self.w2=tf.get_variable('w2', [1024,classnum],initializer=tf.contrib.layers.xavier_initializer_conv2d())
self.b1 = tf.get_variable('b1', [1024],initializer=tf.constant_initializer(0.0))
self.b2 = tf.get_variable('b2', [classnum],initializer=tf.constant_initializer(0.0))
def inference(self,images):
images=tf.cast(images,tf.float32)/255.0
| tensorflow.reshape | 11,040 |
import tensorflow as tf
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"label_ids":
tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),
})
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
def main(_):
| tensorflow.constant | 11,041 |
import tensorflow as tf
def add_train_stats(model, hparams):
with tf.variable_scope("stats") as scope:
for i in range(hparams.tacotron_num_gpus):
tf.summary.histogram("mel_outputs %d" % i, model.tower_mel_outputs[i])
tf.summary.histogram("mel_targets %d" % i, model.tower_mel_targets[i])
tf.summary.scalar("before_loss", model.before_loss)
tf.summary.scalar("after_loss", model.after_loss)
if hparams.predict_linear:
tf.summary.scalar("linear_loss", model.linear_loss)
for i in range(hparams.tacotron_num_gpus):
tf.summary.histogram("mel_outputs %d" % i, model.tower_linear_outputs[i])
| tensorflow.summary.scalar | 11,042 |
import tensorflow as tf
if categorical:
x_dtype = x.dtype
x = x if x_dtype == tf.string else tf.strings.as_string(x)
elements, counts = count_per_key(x)
if x_dtype != elements.dtype:
elements = tf.strings.to_number(elements, tf.int64)
return counts, elements
if boundaries is None:
boundaries = tf.range(11, dtype=tf.float32) / 10.0
elif isinstance(boundaries, int) or (isinstance(boundaries, tf.Tensor) and
boundaries.get_shape().ndims == 0):
min_value, max_value = _min_and_max(x, True)
boundaries = tf.linspace(
tf.cast(min_value, tf.float32), tf.cast(max_value, tf.float32),
tf.cast(boundaries, tf.int64))
# Shift the boundaries slightly to account for floating point errors,
# and due to the fact that the rightmost boundary is essentially ignored.
boundaries = tf.expand_dims(tf.cast(boundaries, tf.float32), 0) - 0.0001
bucket_indices = tf_utils.assign_buckets(
tf.cast(x, tf.float32), remove_leftmost_boundary(boundaries))
bucket_vocab, counts = count_per_key(tf.strings.as_string(bucket_indices))
counts = tf_utils.reorder_histogram(bucket_vocab, counts,
tf.size(boundaries) - 1)
return counts, boundaries
| tensorflow.cast | 11,043 |
import tensorflow as tf
class Model(object):
def __init__(self, vars):
self.saver = tf.train.Saver(vars)
def session(self, sess):
if sess is not None:
self.sess = sess
else:
config_proto = tf.ConfigProto()
config_proto.gpu_options.allow_growth = True
self.sess = tf.Session(config=config_proto)
def initialize(self):
self.sess.run(tf.global_variables_initializer())
def save(self, path):
self.saver.save(self.sess, path)
| tensorflow.ConfigProto | 11,044 |
import tensorflow as tf
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
@classmethod
def _read_csv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter=",")
lines = []
for line in reader:
lines.append(line)
return lines
class FAQProcessor(DataProcessor):
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_csv(os.path.join(data_dir, "train.csv")), "train")
| tensorflow.gfile.Open | 11,045 |
import tensorflow as tf
# Extract 8 most features as mentioned in paper
self.k_pooled = tf.nn.top_k(tf.transpose(self.layers[-1], [0,2,1]), k=8, name='k_pool', sorted=False)[0]
print("8-maxpooling:", self.k_pooled.get_shape())
self.flatten = tf.reshape(self.k_pooled, (-1, 512*8))
# fc1
with tf.variable_scope('fc1'):
w = tf.get_variable('w', [self.flatten.get_shape()[1], 2048], initializer=he_normal,
regularizer=regularizer)
b = tf.get_variable('b', [2048], initializer=tf.constant_initializer(1.0))
out = tf.matmul(self.flatten, w) + b
self.fc1 = tf.nn.relu(out)
# fc2
with tf.variable_scope('fc2'):
w = tf.get_variable('w', [self.fc1.get_shape()[1], 2048], initializer=he_normal,
regularizer=regularizer)
b = tf.get_variable('b', [2048], initializer=tf.constant_initializer(1.0))
out = tf.matmul(self.fc1, w) + b
self.fc2 = tf.nn.relu(out)
| tensorflow.matmul | 11,046 |
import tensorflow as tf
image = self._decode_image(parsed_tensors)
boxes = self._decode_boxes(parsed_tensors)
areas = self._decode_areas(parsed_tensors)
is_crowds = tf.cond(
tf.greater(tf.shape(parsed_tensors['image/object/is_crowd'])[0], 0),
lambda: tf.cast(parsed_tensors['image/object/is_crowd'], dtype=tf.bool),
lambda: tf.zeros_like(parsed_tensors['image/object/class/label'], dtype=tf.bool)) # pylint: disable=line-too-long
if self._include_mask:
masks = self._decode_masks(parsed_tensors)
| tensorflow.shape | 11,047 |
import tensorflow.contrib.graph_editor as ge
# partial derivatives to the checkpointed tensors and xs
ops_to_copy = fast_backward_ops(seed_ops=[y.op for y in ys],
stop_at_ts=checkpoints, within_ops=fwd_ops)
debug_print("Found %s ops to copy within fwd_ops %s, seed %s, stop_at %s",
len(ops_to_copy), fwd_ops, [r.op for r in ys], checkpoints)
debug_print("ops_to_copy = %s", ops_to_copy)
debug_print("Processing list %s", ys)
copied_sgv, info = ge.copy_with_input_replacements(ge.sgv(ops_to_copy), {})
for origin_op, op in info._transformed_ops.items():
op._set_device(origin_op.node_def.device)
copied_ops = info._transformed_ops.values()
debug_print("Copied %s to %s", ops_to_copy, copied_ops)
ge.reroute_ts(checkpoints_disconnected.values(), checkpoints_disconnected.keys(), can_modify=copied_ops)
debug_print("Rewired %s in place of %s restricted to %s",
checkpoints_disconnected.values(), checkpoints_disconnected.keys(), copied_ops)
| tensorflow.contrib.graph_editor.sgv | 11,048 |
import tensorflow as tf
(next_sentence_loss, next_sentence_example_loss,
next_sentence_log_probs) = get_next_sentence_output(
bert_config, model.get_pooled_output(), next_sentence_labels)
total_loss = masked_lm_loss + next_sentence_loss
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
| tensorflow.trainable_variables | 11,049 |
import tensorflow as tf
"""
For Pendulum-v0
"""
class PolicyEstimator_Pendulum():
def __init__(self, entropy_beta=0.01, learning_rate=0.01, par_idx=0,scope="policy_estimator"):
w_init = tf.random_normal_initializer(0.,.1);
with tf.variable_scope(scope+"_"+str(par_idx)):
# state, target and action
self.state = tf.placeholder(tf.float32, [None,num_state], name="state")
self.target = tf.placeholder(tf.float32,[None,1], name="target")
self.a_his = tf.placeholder(tf.float32, [None, num_action], name="action_hist")
# layers
l_a = tf.layers.dense(self.state, 200, tf.nn.relu6, kernel_initializer=w_init, name='la')
self.mu = tf.layers.dense(l_a, num_action, tf.nn.tanh, kernel_initializer=w_init, name='mu') # estimated action value
self.sigma = tf.layers.dense(l_a, num_action, tf.nn.softplus, kernel_initializer=w_init, name='sigma') # estimated variance
| tensorflow.placeholder | 11,050 |
from tensorflow.contrib import metrics as contrib_metrics
concat1 = contrib_metrics.streaming_concat(logits)
concat2 = contrib_metrics.streaming_concat(label_ids)
| tensorflow.contrib.metrics.streaming_concat | 11,051 |
import tensorflow as tf
# data input pipeline
with tf.variable_scope(self.data_scope):
| tensorflow.variable_scope | 11,052 |
import tensorflow as tf
tail_embed = tf.nn.embedding_lookup(self.tail_embedding_vars, self.tail_input)
# Model output
raw_output = tf.reduce_sum(tf.mul(tf.mul(head_embed, rel_embed), tail_embed), 1)
self.output, self.loss = self._create_output_and_loss(raw_output)
# Optimization
self.train_step = self.opt.minimize(self.loss)
if self.maxnorm is not None:
# Post-processing to limit embedding vars to L2 ball
head_constraint = self._norm_constraint_op(self.head_embedding_vars,
tf.unique(self.head_input)[0],
self.maxnorm)
rel_constraint = self._norm_constraint_op(self.rel_embedding_vars,
tf.unique(self.rel_input)[0],
self.maxnorm)
tail_constraint = self._norm_constraint_op(self.tail_embedding_vars,
tf.unique(self.tail_input)[0],
self.maxnorm)
self.post_step = [head_constraint, rel_constraint, tail_constraint]
| tensorflow.unique | 11,053 |
import tensorflow as tf
self.rewards_ph = tf.placeholder(tf.float32, shape=(None, 1), name='rewards')
self.is_demo_ph = tf.placeholder(tf.float32, shape=(None, 1), name='is_demonstrations')
self.weight_ph = tf.placeholder(tf.float32, shape=(None, 1), name='importance_weight')
self.actions_ph = tf.placeholder(tf.float32, shape=(None,) + self.action_space.shape,
name='actions')
self.learning_rate_ph = tf.placeholder(tf.float32, [], name="learning_rate_ph")
if self.n_step:
self.next_observations_ph_n = self.target_policy.obs_ph
self.processed_next_obs_ph_n = self.target_policy.processed_obs
self.rewards_ph_n = tf.placeholder(tf.float32, shape=(None, 1), name='n_step_rewards')
self.terminals_ph_n = tf.placeholder(tf.float32, shape=(None, 1), name='n_step_terminals')
with tf.variable_scope("model", reuse=False):
# Create the policy
# first return value corresponds to deterministic actions
# policy_out corresponds to stochastic actions, used for training
# logp_pi is the log probability of actions taken by the policy
self.deterministic_action, policy_out, logp_pi = self.policy_tf.make_actor(self.processed_obs_ph)
# Monitor the entropy of the policy,
# this is not used for training
self.entropy = tf.reduce_mean(self.policy_tf.entropy)
self.obs_ph, self.actions_ph, self.deterministic_actions_ph = self._get_pretrain_placeholders()
# Use two Q-functions to improve performance by reducing overestimation bias.
qf1, qf2, value_fn = self.policy_tf.make_critics(self.processed_obs_ph, self.actions_ph,
create_qf=True, create_vf=True)
| tensorflow.variable_scope | 11,054 |
import tensorflow as tf
self.y = tf.placeholder(tf.float32,
[batch_size, max_sequence_len, out_vocab_size],
name='y')
# The bidirectional rnn code requires seq_lens as int64
self.seq_lens = tf.placeholder(tf.int64, [batch_size], name='seq_lens')
self.example_weights = tf.placeholder(tf.float32, [batch_size],
name='example_weights')
embeddings = c2v.GetEmbeddings(self.x)
self._inputs = [tf.squeeze(input_, [1]) for input_ in
| tensorflow.placeholder | 11,055 |
import tensorflow as tf
self.x0_tf = tf.placeholder(tf.float32, shape=(None, self.x0.shape[1]))
self.x1_tf = tf.placeholder(tf.float32, shape=(None, self.x1.shape[1]))
self.u0_tf = tf.placeholder(tf.float32, shape=(None, self.u0.shape[1]))
| tensorflow.placeholder | 11,056 |
import tensorflow as tf
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, logits, probabilities) = create_model(
bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
| tensorflow.trainable_variables | 11,057 |
import tensorflow as tf
with tf.variable_scope(decoderscope) as scope:
if reuse_decoder: scope.reuse_variables()
# print('vnet scope', is_train, reuse_unet)
# print('VNET Latent:', X.get_shape().as_list())
with tf.variable_scope('decoder'):
X = decoder_conf('d3', X, 512, F, 1, norm, reuse_decoder, is_train, self.args.dropout) # 12 > 14
if self.args.skip_connections: X = tf.concat((X, X2), axis=-1)
X = decoder_conf('u4', X, 256, F, 2, norm, reuse_decoder, is_train, self.args.dropout) # 14 > 28
X = decoder_conf('d4', X, 256, F, 1, norm, reuse_decoder, is_train, self.args.dropout) # 28 > 30
if self.args.skip_connections: X = tf.concat((X, X1), axis=-1)
X = decoder_conf('u5', X, 128, F, 2, norm, reuse_decoder, is_train, self.args.dropout) # 30 > 60
X_LATE = X
X = decoder_conf('d5', X, 128, F, 1, norm, reuse_decoder, is_train, self.args.dropout) # 60 > 62
if self.args.skip_connections: X = tf.concat((X, X0), axis=-1)
X = decoder_conf('u6', X, 64, F, 2, norm, reuse_decoder, is_train, self.args.dropout) # 62 > 124
X = decoder_conf('d6', X, 64, 5, 1, norm, reuse_decoder, is_train, self.args.dropout) # 124 > 128
X = decoder_conf('out', X, self.args.num_classes, 1, 1, '', reuse_decoder, is_train, slope=1.0, stddev=0.02,
use_bias=False)
| tensorflow.concat | 11,058 |
import tensorflow as tf
scope_name = str(micros)
op_list = []
with tf.name_scope(scope_name):
yield op_list
g = tf.get_default_graph()
op_list.extend(ge.select_ops(scope_name+"/.*", graph=g))
def _to_op(tensor_or_op):
if hasattr(tensor_or_op, "op"):
| tensorflow.get_default_graph | 11,059 |
import tensorflow as tf
initializer=tf.constant_initializer(self.dale_out),
trainable=False)
# Connectivity weight matrices:
self.input_Connectivity = tf.get_variable('input_Connectivity', [N_rec, N_in],
initializer=tf.constant_initializer(
self.input_connectivity_mask),
trainable=False)
self.rec_Connectivity = tf.get_variable('rec_Connectivity', [N_rec, N_rec],
initializer=tf.constant_initializer(
self.recurrent_connectivity_mask),
trainable=False)
self.output_Connectivity = tf.get_variable('output_Connectivity', [N_out, N_rec],
initializer=tf.constant_initializer(
self.output_connectivity_mask),
trainable=False)
# ------------------------------------------------
# Network loss
# ------------------------------------------------
self.predictions, self.states = self.compute_predictions()
self.error = self.mean_square_error()
self.loss = self.error + self.regularization()
# regularized loss function
def reg_loss(self):
| tensorflow.constant_initializer | 11,060 |
import tensorflow as tf
if __name__ == "__main__":
flags.mark_flag_as_required("input_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
| tensorflow.app.run | 11,061 |
from tensorflow.python.ops import array_ops
with ops.Graph().as_default(), ops.device("/device:GPU:0"):
model = cudnn_rnn_ops.CudnnLSTM(num_layers, num_units, num_units)
params_size_t = model.params_size()
input_data = variables.Variable(
array_ops.ones([seq_length, batch_size, num_units]))
input_h = variables.Variable(
array_ops.ones([num_layers, batch_size, num_units]))
input_c = variables.Variable(
array_ops.ones([num_layers, batch_size, num_units]))
params = variables.Variable(
array_ops.ones([params_size_t]), validate_shape=False)
output, output_h, output_c = model(
is_training=True,
| tensorflow.python.ops.array_ops.ones | 11,062 |
import tensorflow as tf
rnn_state=self.rnn_cell.zero_state(batch_size, dtype),
latent_encoded=tf.zeros(
| tensorflow.zeros | 11,063 |
import tensorflow as tf
with tf.name_scope('local_grad'):
grads = tf.gradients(self.loss, self.var_list)
grads, _ = tf.clip_by_global_norm(grads, 40)
with tf.name_scope('sync'): # worker和global的同步过程
with tf.name_scope('pull'): # 获取global参数,复制到local—net
self.pull_params_op = tf.group(*[v1.assign(v2)
for v1, v2 in zip(self.var_list, globalAC.var_list)])
with tf.name_scope('push'): # 将参数传送到gloabl中去
self.update_params_op = OPT.apply_gradients(zip(grads, globalAC.var_list))
# 其中传送的是local—net的actor和critic的参数梯度grads,具体计算在上面定义
| tensorflow.name_scope | 11,064 |
import tensorflow as tf
all_params = []
all_drop = {}
@deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release
def __init__(self, prev_layer, name=None):
if name is None:
raise ValueError('Layer must have a name.')
scope_name = tf.get_variable_scope().name
if scope_name:
name = scope_name + '/' + name
self.name = name
# get all properties of previous layer(s)
if isinstance(prev_layer, Layer): # 1. for normal layer have only 1 input i.e. DenseLayer
# Hint : list(), dict() is pass by value (shallow), without them,
# it is pass by reference.
| tensorflow.get_variable_scope | 11,065 |
import tensorflow as tf
input_tensor: The input tensor of tensorflow graph.
output_tensor: The output tensor of tensorflow graph.
use_mlir_converter: Whether or not to use MLIRConverter to convert the
model.
Returns:
The tflite inference result.
"""
converter = tf.lite.TFLiteConverter.from_session(sess, [input_tensor],
[output_tensor])
tflite = converter.convert()
converter.experimental_enable_mlir_converter = use_mlir_converter
interpreter = tf.lite.Interpreter(model_content=tflite)
try:
interpreter.allocate_tensors()
except ValueError:
assert False
input_index = (interpreter.get_input_details()[0]["index"])
interpreter.set_tensor(input_index, test_inputs)
interpreter.invoke()
output_index = (interpreter.get_output_details()[0]["index"])
result = interpreter.get_tensor(output_index)
# Reset all variables so it will not pollute other inferences.
| tensorflow.lite.Interpreter | 11,066 |
import tensorflow as tf
target_samples = tf.nn.l2_normalize(target_samples, 1)
source_cov = tf.matmul(tf.transpose(source_samples), source_samples)
target_cov = tf.matmul(tf.transpose(target_samples), target_samples)
corr_loss = tf.reduce_mean(tf.square(source_cov - target_cov)) * weight
assert_op = tf.Assert(tf.is_finite(corr_loss), [corr_loss])
with tf.control_dependencies([assert_op]):
tag = 'Correlation Loss'
barrier = tf.no_op(tag)
return corr_loss
def maximum_mean_discrepancy(x,
y,
kernel=util.gaussian_kernel_matrix,
| tensorflow.no_op | 11,067 |
import tensorflow as tf
# The name of placeholder for keep_prob is the same with the name
# of the Layer.
if is_fix:
self.outputs = tf.nn.dropout(self.inputs, keep, seed=seed, name=name)
else:
LayersConfig.set_keep[name] = tf.placeholder(tf.float32)
self.outputs = tf.nn.dropout(self.inputs, LayersConfig.set_keep[name], seed=seed, name=name) # 1.2
# self.all_layers = list(layer.all_layers)
# self.all_params = list(layer.all_params)
| tensorflow.placeholder | 11,068 |
import tensorflow as tf
z = tf.random_normal([2, 10], dtype=tf.float32)
x, _ = networks.generator(
z, progress, _num_filters_stub,
networks.ResolutionSchedule(
start_resolutions=(4, 4), scale_base=2, num_resolutions=3))
fake_loss = tf.reduce_sum(tf.square(x))
grad_norms = [
_get_grad_norm(
fake_loss, tf.trainable_variables('.*/progressive_gan_block_1/.*')),
_get_grad_norm(
fake_loss, tf.trainable_variables('.*/progressive_gan_block_2/.*')),
_get_grad_norm(
fake_loss, tf.trainable_variables('.*/progressive_gan_block_3/.*'))
]
grad_norms_output = None
with self.test_session(use_gpu=True) as sess:
sess.run(tf.global_variables_initializer())
x1_np = sess.run(x, feed_dict={current_image_id_ph: 0.12})
x2_np = sess.run(x, feed_dict={current_image_id_ph: 1.8})
| tensorflow.trainable_variables | 11,069 |
import tensorflow as tf
logit_probs = predictions[:, :, :, :nr_mix]
predictions = tf.reshape(predictions[:, :, :, nr_mix:], inputs_shape + [nr_mix * 3])
means = predictions[:, :, :, :, :nr_mix]
log_scales = tf.maximum(predictions[:, :, :, :, nr_mix:2 * nr_mix], -7.)
coeffs = tf.nn.tanh(predictions[:, :, :, :, 2 * nr_mix:3 * nr_mix])
inputs = tf.reshape(inputs, inputs_shape + [1]) + tf.zeros(inputs_shape + [nr_mix])
m2 = tf.reshape(means[:, :, :, 1, :] + coeffs[:, :, :, 0, :] * inputs[:, :, :, 0, :],
[inputs_shape[0], inputs_shape[1], inputs_shape[2], 1, nr_mix])
m3 = tf.reshape(
means[:, :, :, 2, :] + coeffs[:, :, :, 1, :] * inputs[:, :, :, 0, :] +
coeffs[:, :, :, 2, :] * inputs[:, :, :, 1, :],
[inputs_shape[0], inputs_shape[1], inputs_shape[2], 1, nr_mix])
means = tf.concat([
tf.reshape(means[:, :, :, 0, :],
[inputs_shape[0], inputs_shape[1], inputs_shape[2], 1, nr_mix]), m2, m3
],
axis=3)
centered_inputs = inputs - means
inv_stdv = tf.exp(-log_scales)
plus_in = inv_stdv * (centered_inputs + 1. / 255.)
cdf_plus = tf.nn.sigmoid(plus_in)
min_in = inv_stdv * (centered_inputs - 1. / 255.)
cdf_min = tf.nn.sigmoid(min_in)
log_cdf_plus = plus_in - tf.nn.softplus(plus_in)
log_one_minus_cdf_min = -tf.nn.softplus(min_in)
cdf_delta = cdf_plus - cdf_min
| tensorflow.reshape | 11,070 |
import tensorflow as tf
with tf.variable_scope('source2token_self_attn'):
inter_block_logits = bn_dense_layer(self_attn_result, ivec, True, 0., 'bn_dense_map', 'linear',
False, wd, keep_prob, is_train) # bs,bn,bl,vec
inter_block_logits_masked = exp_mask_for_high_rank(inter_block_logits, rep_mask_split) # bs,bn,bl,vec
inter_block_soft = tf.nn.softmax(inter_block_logits_masked, 2) # bs,bn,bl,vec
inter_block_attn_output = tf.reduce_sum(self_attn_result * inter_block_soft, 2) # bs,bn,vec
with tf.variable_scope('self_attn_inter_block'):
inter_block_attn_output_mask = tf.cast(tf.ones([bs, bn], tf.int32), tf.bool)
| tensorflow.nn.softmax | 11,071 |
import tensorflow as tf
return 1.0 if x == 0.0 else 0.0
x = tf.constant(0.0, tf.float64)
y, = tf.py_func(literal, [x], [tf.float64])
self.assertAllClose(y.eval(), 1.0)
| tensorflow.py_func | 11,072 |
import tensorflow as tf
zero_debias=False)
n = tf.reduce_sum(updated_ema_count, axis=-1, keep_dims=True)
| tensorflow.reduce_sum | 11,073 |
import tensorflow as tf
"train":{
"loss":loss,
"logits":logits,
"train_op":train_op,
"cross_entropy":label_loss,
"distillation_loss":distillation_loss["distillation_loss"],
"kd_num":tf.reduce_sum(features["distillation_ratio"]),
"ce_num":tf.reduce_sum(features["label_ratio"]),
"teacher_logit":teacher_logit,
"student_logit":student_logit,
"label_ratio":features["label_ratio"],
"distilaltion_logits_loss":distillation_loss["distillation_logits_loss"],
| tensorflow.reduce_sum | 11,074 |
import tensorflow as tf
return loss
class ValueEstimator_MountainCarContinuous():
def __init__(self, learning_rate=0.1, par_idx=0,scope="value_estimator"):
w_init = tf.random_normal_initializer(0.,.1);
with tf.variable_scope(scope+"_"+str(par_idx)):
# state and target
self.state = tf.placeholder(tf.float32, [None,400], "state")
self.target = tf.placeholder(tf.float32, [None,1], name="target")
# layers
self.value_estimate = tf.layers.dense(self.state, 1, kernel_initializer=w_init, name='v') # estimated value for state
# loss and optimizer
self.loss = tf.squared_difference(self.value_estimate, self.target)
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
self.train_op = self.optimizer.minimize(
self.loss, global_step=tf.contrib.framework.get_global_step())
def predict(self, state, sess=None):
sess = sess or tf.get_default_session()
state=featurize_state(state);
return sess.run(self.value_estimate, { self.state: [state] })[0][0]
def update(self, state, target, sess=None):
sess = sess or tf.get_default_session()
for st_idx in range(len(state)):
state[st_idx]=featurize_state(state[st_idx]);
feed_dict = { self.state: state, self.target: target }
_, loss = sess.run([self.train_op, self.loss], feed_dict)
| tensorflow.train.AdamOptimizer | 11,075 |
import tensorflow as tf
private_samples = tf.nn.l2_normalize(private_samples, 1)
shared_samples = tf.nn.l2_normalize(shared_samples, 1)
correlation_matrix = tf.matmul(private_samples, shared_samples, transpose_a=True)
cost = tf.reduce_mean(tf.square(correlation_matrix)) * weight
cost = tf.where(cost > 0, cost, 0, name='value')
assert_op = tf.Assert(tf.is_finite(cost), [cost])
with tf.control_dependencies([assert_op]):
barrier = tf.no_op(name)
return cost
def log_quaternion_loss_batch(predictions, labels, name='log_quaternion_batch_loss'):
"""A helper function to compute the error between quaternions.
| tensorflow.is_finite | 11,076 |
import tensorflow as tf
model.fit(
train_dataset,
epochs=20,
steps_per_epoch=steps_per_epoch,
validation_steps=val_steps,
validation_data=test_dataset,
callbacks=[])
model.save_weights('./test/segmentation')
print(create_mask(model(tf.ones((1, 512, 512, 3)), False)))
if __name__ == '__main__':
logging.set_verbosity(logging.WARNING)
app.run(main)
| tensorflow.ones | 11,077 |
import tensorflow as tf
for class_id in range(self.max_num_classes):
# Do the same for the ground truth and predictions
sdf_values = tf.zeros_like(samples_world)[:, 0:1]
for mtype, (classes, sdfs, poses) in enumerate([
(labeled_classes, labeled_sdfs, labeled_poses),
(predicted_classes, predicted_sdfs, predicted_poses)]):
for i in range(classes.shape[0]):
if class_id == classes[i]:
sdf = tf.expand_dims(sdfs[i], -1)
sdf = sdf * -1.0 # inside positive, outside zero
samples_object = centernet_utils.transform_pointcloud(
tf.reshape(samples_world, [1, 1, -1, 3]),
tf.reshape(poses[2][i], [1, 1, 3]),
tf.reshape(poses[0][i], [1, 1, 3, 3]),
tf.reshape(poses[1][i], [1, 1, 3]), inverse=True) * 2.0
samples_object = \
(samples_object * (29.0/32.0) / 2.0 + 0.5) * 32.0 - 0.5
samples = tf.squeeze(samples_object)
interpolated = trilinear.interpolate(sdf, samples)
sdf_values += tf.math.sign(tf.nn.relu(interpolated + self.tol))
status2 = False
if status2:
a = 2
| tensorflow.reshape | 11,078 |
import tensorflow as tf
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,
masked_lm_weights, next_sentence_example_loss,
next_sentence_log_probs, next_sentence_labels):
"""Computes the loss and accuracy of the model."""
masked_lm_log_probs = tf.reshape(masked_lm_log_probs,
[-1, masked_lm_log_probs.shape[-1]])
masked_lm_predictions = tf.argmax(
masked_lm_log_probs, axis=-1, output_type=tf.int32)
masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])
masked_lm_ids = tf.reshape(masked_lm_ids, [-1])
masked_lm_weights = tf.reshape(masked_lm_weights, [-1])
masked_lm_accuracy = tf.metrics.accuracy(
labels=masked_lm_ids,
predictions=masked_lm_predictions,
weights=masked_lm_weights)
masked_lm_mean_loss = tf.metrics.mean(
values=masked_lm_example_loss, weights=masked_lm_weights)
| tensorflow.reshape | 11,079 |
import tensorflow.contrib.eager as tfe
dataset = random_dataset()
if defun:
model.call = tfe.defun(model.call)
with tf.device(device()):
| tensorflow.contrib.eager.defun | 11,080 |
import tensorflow as tf
v_norm = tf.nn.l2_normalize(self.v,axis=[0,1,3])
t = tf.nn.conv2d_transpose(input_var,v_norm,
output_shape=shapes,
strides=self.strides,
padding='SAME',
data_format='NHWC')
mu,var = tf.nn.moments(t,axes=[0,1,2])
std = tf.sqrt(var+self.epsilon)
return [tf.assign(self.g,1/std),tf.assign(self.b,-1.*mu/std)]
require_init = tf.reduce_any(tf.is_nan(self.g))
init_ops = tf.cond(require_init,_init,lambda : [self.g,self.b])
with tf.control_dependencies(init_ops):
w = tf.reshape(self.g,[1,1,tf.shape(self.v)[2],1]) * tf.nn.l2_normalize(self.v,axis=[0,1,3])
return tf.nn.bias_add(
tf.nn.conv2d_transpose(input_var,w,
output_shape=shapes,
strides=self.strides,
padding='SAME',
data_format='NHWC'),
| tensorflow.cond | 11,081 |
import tensorflow as tf
else:
q = tf.nn.softmax(q_logits)
p = tf.nn.softmax(p_logits)
| tensorflow.nn.softmax | 11,082 |
from tensorflow.contrib.layers.python.layers import initializers
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
| tensorflow.contrib.layers.python.layers.initializers.xavier_initializer | 11,083 |
import tensorflow as tf
return model_fn
def get_masked_lm_output(bert_config, input_tensor, output_weights, positions,
label_ids):
"""Get loss and log probs for the masked LM."""
input_tensor = gather_indexes(input_tensor, positions)
with tf.variable_scope("cls/predictions"):
# We apply one more non-linear transformation before the output layer.
# This matrix is not used after pre-training.
with tf.variable_scope("transform"):
input_tensor = tf.layers.dense(
input_tensor,
units=bert_config.hidden_size,
activation=modeling.get_activation(bert_config.hidden_act),
kernel_initializer=modeling.create_initializer(
bert_config.initializer_range))
input_tensor = modeling.layer_norm(input_tensor)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
output_bias = tf.get_variable(
"output_bias",
| tensorflow.variable_scope | 11,084 |
import tensorflow as tf
(summary_op, monitored_values) = self._add_monitoring_of_values()
# Add saver
tf_vars = tf.global_variables()
saver = tf.train.Saver(tf_vars)
| tensorflow.global_variables | 11,085 |
import tensorflow as tf
inf=work.inference(batch_image)
loss=work.softmax_loss(inf,batch_label)
opti=work.optimer(loss,learnrate)
test_image_batch,test_label_batch=get_test_batch(test_image,test_label,testnum)
test_inf=work.test_inference(test_image_batch)
test_labels=tf.one_hot(test_label_batch,classnum)
test_pre = tf.reshape(test_inf, [testnum, classnum])
correct_prediction=tf.equal(tf.argmax(test_inf,1),tf.argmax(test_labels,1))
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
test_pre = tf.argmax(test_pre, 1)
test_true = tf.argmax(test_labels, 1)
valid_image_batch,valid_label_batch=get_valid_batch(valid_image,valid_label,validnum)
valid_inf=work.valid_inference(valid_image_batch)
valid_labels=tf.one_hot(valid_label_batch,classnum)
| tensorflow.argmax | 11,086 |
import tensorflow as tf
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
| tensorflow.constant | 11,087 |
import tensorflow as tf
w2 = tf.Variable(tf.random_normal([input_size, attention_size], stddev=0.1))
b = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
v = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
with tf.name_scope('v'):
# Applying fully connected layer with non-linear activation to each of the B*T timestamps;
# the shape of `tmp` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size
tmp1 = tf.tensordot(facts, w1, axes=1)
tmp2 = tf.tensordot(query, w2, axes=1)
tmp2 = tf.reshape(tmp2, [-1, 1, tf.shape(tmp2)[-1]])
tmp = tf.tanh((tmp1 + tmp2) + b)
# For each of the timestamps its vector of size A from `tmp` is reduced with `v` vector
v_dot_tmp = tf.tensordot(tmp, v, axes=1, name='v_dot_tmp') # (B,T) shape
| tensorflow.tensordot | 11,088 |
import tensorflow as tf
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, logits, probabilities)
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings, do_serve):
| tensorflow.nn.softmax | 11,089 |
import tensorflow as tf
# initializer=tf.keras.initializers.lecun_normal(),
dtype=tf.float32)
V = tf.get_variable(name="attn_V", shape=[2 * self.config.hidden_size, 1],
initializer=tf.contrib.layers.xavier_initializer(),
# initializer=tf.truncated_normal_initializer(),
# initializer=tf.keras.initializers.lecun_normal(),
dtype=tf.float32)
U = tf.get_variable(name="attn_U",
shape=[2 * self.config.hidden_size, 2 * self.config.hidden_size],
initializer=tf.contrib.layers.xavier_initializer(),
# initializer=tf.truncated_normal_initializer(),
# initializer=tf.keras.initializers.lecun_normal(),
dtype=tf.float32)
self.position_emb = tf.reshape(self.position_emb, [-1, 2 * self.config.hidden_size])
shape = tf.shape(output)
output = tf.reshape(output, [-1, 2 * self.config.hidden_size])
atten_hidden = tf.tanh(
tf.add(
tf.matmul(self.position_emb, W),
tf.matmul(output, U)))
alpha = tf.nn.softmax(
tf.reshape(tf.matmul(atten_hidden, V), [-1, shape[1], 1]), axis=1)
output = tf.reshape(output, [-1, shape[1], 2 * self.config.hidden_size])
C = tf.multiply(alpha, output)
return tf.concat([output, C], axis=-1)
| tensorflow.reshape | 11,090 |
import tensorflow as tf
tft_layer = self.transform_features_layer()
if not drop_unused_features:
tf.compat.v1.logging.warning(
'Unused features are always dropped in the TF 2.x '
| tensorflow.compat.v1.logging.warning | 11,091 |
import tensorflow as tf
API."""
import inspect
from collections.abc import Iterable
from typing import Optional
try:
import tensorflow as tf
from tensorflow.keras.layers import Layer
CORRECT_TF_VERSION = int(tf.__version__.split(".")[0]) > 1
except ImportError:
# The following allows this module to be imported even if TensorFlow is not installed. Users
# will instead see an ImportError when instantiating the KerasLayer.
from abc import ABC
Layer = ABC
CORRECT_TF_VERSION = False
| tensorflow.__version__.split | 11,092 |
import tensorflow as tf
return X
def _add_fully_connected(self, X, in_shape, out_ch, no_reg=False):
ch = np.prod(in_shape)
X = tf.reshape(X, (-1, ch))
W = self._make_var('W', (ch, out_ch), no_reg=no_reg)
X = tf.matmul(X, W)
X = tf.reshape(X, (-1, out_ch)) # Sanity shape check
return X
def _add_factorized_reduction(self, X, in_w, in_h, in_ch, out_ch, is_train=False):
'''
Output is of shape (in_w // 2, in_h // 2, out_ch)
'''
| tensorflow.reshape | 11,093 |
import tensorflow as tf
if not file_pattern:
file_pattern = _FILE_PATTERN
file_pattern = os.path.join(dataset_dir, file_pattern % split_name)
# Allowing None in the signature so that dataset_factory can use the default.
if not reader:
reader = tf.TFRecordReader
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='png'),
'image/class/label': tf.FixedLenFeature(
[], tf.int64, default_value=tf.zeros([], dtype=tf.int64)),
}
items_to_handlers = {
'image': slim.tfexample_decoder.Image(shape=[32, 32, 3]),
'label': slim.tfexample_decoder.Tensor('image/class/label'),
}
| tensorflow.FixedLenFeature | 11,094 |
import tensorflow as tf
)
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
| tensorflow.flags.DEFINE_string | 11,095 |
from tensorflow.contrib.learn.python.learn.estimators import composable_model
"""
super(_DNNLinearCombinedBaseEstimator, self).__init__(
model_dir=model_dir, config=config)
num_ps_replicas = config.num_ps_replicas if config else 0
self._linear_model = composable_model.LinearComposableModel(
num_label_columns=target_column.num_label_columns,
optimizer=linear_optimizer,
gradient_clip_norm=gradient_clip_norm,
num_ps_replicas=num_ps_replicas)
| tensorflow.contrib.learn.python.learn.estimators.composable_model.LinearComposableModel | 11,096 |
from tensorflow.python.framework import ops
def _move_tensors(tensors, device):
"""Moves a list of tensors to a device by concatenating/splitting them."""
# Reset the device setting to avoid weird interactions with device merging
# logic.
with ops.device(None):
if all(tensor.shape == tensor_shape.scalar() for tensor in tensors):
with ops.device(tensors[0].device):
values = array_ops.stack(tensors)
with ops.device(device):
return array_ops.unstack(values)
else:
with ops.device(tensors[0].device):
| tensorflow.python.framework.ops.device | 11,097 |
import tensorflow as tf
tf.int64, "global_epoch")
tf.add_to_collection("global_epoch", self.global_epoch)
# this creates an operation to add to all trainable variables a white noise of param
# std = tf.sqrt(variance)/10
def create_random_update_op(self):
vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
update_opts = []
for var in vars:
_, variance = tf.nn.moments(tf.reshape(var,[-1]),axes=[0])
normal = tf.distributions.Normal(loc=0.0, scale=tf.sqrt(variance)/10)
white_noise = normal.sample(var.get_shape())
update_opts.append(var.assign(var + white_noise))
self.random_update_op = tf.group(update_opts)
#apply clipping
def _clip_gradients(self, grads_and_vars, grad_clipping_tuple):
clipping_method, clipping_kwargs = grad_clipping_tuple
grads_and_vars_not_none = [(g, v) for (g, v) in grads_and_vars if g is not None]
grads = [g for (g, v) in grads_and_vars_not_none]
variables = [v for (g, v) in grads_and_vars_not_none]
| tensorflow.sqrt | 11,098 |
import tensorflow as tf
# upsampling the logits followed by argmax, or (2) argmax followed by
# nearest neighbor upsampling. The second option may introduce the "blocking
# effect" but is computationally efficient.
if model_options.prediction_with_upsampled_logits:
logits = _resize_bilinear(logits,
#tf.shape(images)[1:3],
tf.TensorShape([512,512]),
scales_to_logits[MERGED_LOGITS_SCOPE].dtype)
predictions[output] = tf.argmax(logits, 3, output_type=tf.dtypes.int32)
#predictions[output + PROB_SUFFIX] = tf.nn.softmax(logits)
else:
argmax_results = tf.argmax(logits, 3, output_type=tf.dtypes.int32)
argmax_results = tf.image.resize_nearest_neighbor(
tf.expand_dims(argmax_results, 3),
tf.shape(images)[1:3],
align_corners=True,
name='resize_prediction')
predictions[output] = tf.squeeze(argmax_results, 3)
#predictions[output + PROB_SUFFIX] = tf.image.resize_bilinear(
# tf.nn.softmax(logits),
# tf.shape(images)[1:3],
# align_corners=True,
# name='resize_prob')
return predictions
def multi_scale_logits(images,
model_options,
| tensorflow.shape | 11,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.