seed
stringlengths 25
2.89k
| seed_api
stringlengths 14
102
| index
int64 0
14.8k
|
---|---|---|
import tensorflow as tf
# 4. 当使用CPU时,TensorFlow默认占据大部分CPU内存。虽然这也是时常期望的,但是我们能谨慎分配GPU内存。当TensorFlow一直不释放GPU内存时,如有必要,我们可以设置GPU内存增长选项让GPU内存分配缓慢增大到最大限制
config.gpu_options.allow_growth = True
sess_grow = tf.Session(config=config)
# 5. 如果希望限制死TensorFlow使用GPU内存的百分比,可以使用config设置per_process_gpu_memory_fraction
config.gpu_options.per_process_gpu_memory_fraction = 0.4
sess_limited = tf.Session(config=config)
# 6. 有时,我们希望代码健壮到可以决定运行多少GPU合适。TensorFlow有内建函数可以探测到。如果我们期望代码在GPU内存合适时利用GPU计算能力,并分配指定操作给GPU,那么该功能是有益的
if tf.test.is_built_with_cuda(): pass
# 7. 我们希望分配指定操作给GPU。下面是一个示例代码,做了一些简单的计算,并将它们分配给主CPU和两个副GPU
with tf.device('/cpu:0'):
a = tf.constant([1.0, 3.0, 5.0], shape=[1,3])
b = tf.constant([2.0, 4.0, 6.0], shape=[3, 1])
with tf.device('/gpu:0'):
c = tf.matmul(a,b)
c = tf.reshape(c, [-1])
with tf.device('/gpu:1'):
d = tf.matmul(b, a)
flat_d = tf.reshape(d, [-1])
combined = tf.multiply(c, flat_d)
print(sess.run(combined))
| tensorflow.constant | 11,700 |
import tensorflow as tf
for i in xrange(200):
outputs.append(f(tf.fill([1, 5], i), tf.fill([1, 5], i)))
| tensorflow.fill | 11,701 |
from tensorflow.python.ops import variable_scope
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
covariance: A `Tensor` representing the current unbiased sample covariance,
`comoment` / (`count` - 1).
update_op: An operation that updates the local variables appropriately.
Raises:
ValueError: If labels and predictions are of different sizes or if either
`metrics_collections` or `updates_collections` are not a list or tuple.
"""
with variable_scope.variable_scope(name, 'covariance', [predictions, labels]):
predictions, labels = tensor_util.remove_squeezable_dimensions(
predictions, labels)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
count = _create_local('count', [])
mean_prediction = _create_local('mean_prediction', [])
mean_label = _create_local('mean_label', [])
comoment = _create_local('comoment', []) # C_A in update equation
if weights is None:
batch_count = math_ops.to_float(array_ops.size(labels)) # n_B in eqn
weighted_predictions = predictions
weighted_labels = labels
| tensorflow.python.ops.variable_scope.variable_scope | 11,702 |
import tensorflow as tf
masked_lm_positions, masked_lm_ids, masked_lm_weights)
(next_sentence_loss, next_sentence_example_loss,
next_sentence_log_probs) = get_next_sentence_output(
bert_config, model.get_pooled_output(), next_sentence_labels)
total_loss = masked_lm_loss + next_sentence_loss
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
| tensorflow.trainable_variables | 11,703 |
import tensorflow as tf
[loss, opti, batch_label, batch_image, inf],feed_dict=feed_dict)
if i > 0 and i % report_step == 0:
accuracy_np = sess.run([accuracy],feed_dict=feed_dict)
print(i, accuracy_np, loss_np)
if i > 0 and i % save_step == 0:
tf.train.Saver().save(sess, path)
tf.train.Saver().save(sess, path)
coord.request_stop()
coord.join(threads)
| tensorflow.train.Saver | 11,704 |
import tensorflow as tf
tgtimg_h2 = lrelu(conv2d(tgtimg_h1, self.df_dim*4, name='h2_conv'))
tgtimg_h3 = lrelu(conv2d(tgtimg_h2, self.df_dim*8, name='h3_conv'))
tgtimg_h4 = lrelu(linear(tf.reshape(tgtimg_h3, [self.batch_size, -1]), featsize, 'h4_lin'))
tgtimg_z = lrelu(linear(tgtimg_h4, featsize, 'hz_lin'))
with tf.variable_scope("translate") as scope:
trans_h0 = lrelu(linear(tf.concat([srcimg_z, tgtctx_z], 1), featsize, 'trans_h0'))
trans_z = linear(trans_h0, featsize, 'trans_z')
self.translated_z = trans_z
| tensorflow.variable_scope | 11,705 |
import tensorflow as tf
mean, variance = tf.nn.normalize_moments(counts,
shifted_sum_x,
shifted_sum_x2,
shift,
name="normalize_moments")
second_moment = variance + tf.square(mean)
return mean, variance, second_moment
def build_moving_stats():
return (
tf.identity(self._moving_mean),
tf.identity(self._moving_variance),
tf.identity(self._moving_second_moment),
)
mean, variance, second_moment = utils.smart_cond(
use_batch_stats,
build_batch_stats,
build_moving_stats,
)
| tensorflow.identity | 11,706 |
import tensorflow as tf
noise = tf.random_uniform(tf.shape(logits))
return tf.argmax(logits - tf.log(-tf.log(noise)), 1)
| tensorflow.log | 11,707 |
import tensorflow as tf
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([], tf.int64),
"is_real_example": tf.FixedLenFeature([], tf.int64),
}
| tensorflow.FixedLenFeature | 11,708 |
import tensorflow as tf
filter_shape = [3, embedding_size, 4, 64]
W = tf.get_variable(name='W_1', shape=filter_shape,
initializer=he_normal,
regularizer=regularizer)
paddings = [[0, 0], [1, 1], [0, 0], [0, 0]]
cnn_inputs = tf.pad(cnn_inputs, paddings, "CONSTANT")
#print("cnn_inputs shape:", cnn_inputs.shape)
inputs = tf.nn.conv2d(cnn_inputs, W, strides=[1, 1, 1, 1], padding="VALID", name="first_conv")
inputs = tf.layers.batch_normalization(inputs, axis=-1, training=self.is_training)
inputs = tf.nn.relu(inputs, name="first_relu")
#print("temp cnn output shape:", inputs.shape)
inputs = tf.squeeze(inputs, axis=2)
#print("squeeze shape", inputs.shape)
#inputs = tf.nn.relu(inputs)
| tensorflow.nn.conv2d | 11,709 |
import tensorflow as tf
candidate_mention_scores = tf.squeeze(candidate_mention_scores, 1) # [k]
k = tf.to_int32(tf.floor(tf.to_float(tf.shape(context_outputs)[0]) * self.config["top_span_ratio"]))
top_span_indices = coref_ops.extract_spans(tf.expand_dims(candidate_mention_scores, 0),
tf.expand_dims(candidate_starts, 0),
tf.expand_dims(candidate_ends, 0),
tf.expand_dims(k, 0),
util.shape(context_outputs, 0),
True) # [1, k]
top_span_indices.set_shape([1, None])
top_span_indices = tf.squeeze(top_span_indices, 0) # [k]
| tensorflow.expand_dims | 11,710 |
import tensorflow as tf
Returns:
"""
with tf.variable_scope('anchor_generator'):
if offset is None:
offset = [stride[0]/2, stride[1]/2]
features_width = tf.cast(features_width, tf.int32)
| tensorflow.variable_scope | 11,711 |
import tensorflow as tf
reg = tf.contrib.layers.l2_regularizer(1e-3)
with tf.variable_scope(name, reuse=reuse):
| tensorflow.variable_scope | 11,712 |
import tensorflow as tf
def _softmax_layer(self, bottom, name):
if name == 'rpn_cls_prob_reshape':
input_shape = tf.shape(bottom)
# tf.reshape()中-1的应用,-1表示不知道该填什么数字合适的情况下,可以选择,由python通过原数组和其他的值推测出来
# 每一行是1个anchor的前景、背景得分,先显示所有点产生的第一种anchor,然后是所有点产生的第二种anchor,........
bottom_reshaped = tf.reshape(bottom, [-1, input_shape[-1]])
reshaped_score = tf.nn.softmax(bottom_reshaped, name=name)
return tf.reshape(reshaped_score, input_shape) # [1,none,none,2]
return tf.nn.softmax(bottom, name=name)
def _proposal_top_layer(self, rpn_cls_prob, rpn_bbox_pred, name):
with tf.variable_scope(name):
rois, rpn_scores = tf.py_func(proposal_top_layer,
[rpn_cls_prob, rpn_bbox_pred, self._im_info,
self._feat_stride, self._anchors, self._num_anchors],
[tf.float32, tf.float32])
rois.set_shape([cfg.FLAGS.rpn_top_n, 5])
rpn_scores.set_shape([cfg.FLAGS.rpn_top_n, 1])
return rois, rpn_scores
def _proposal_layer(self, rpn_cls_prob, rpn_bbox_pred, name):
with tf.variable_scope(name):
# 返回的rois中多加了一列0在第一列
rois, rpn_scores = tf.py_func(proposal_layer,
| tensorflow.py_func | 11,713 |
import tensorflow as tf
values=[tf.zeros((batch_size, 1)), tf.ones((batch_size, 1))], axis=0)
grl = gradient_reverse(samples)
grl = tf.reshape(grl, (-1, samples.get_shape().as_list()[1]))
grl = fc(grl, 100, True, None, activation=relu, name='fc1')
logits = fc(grl, 1, True, None, activation=None, name='fc2')
domain_predictions = tf.sigmoid(logits)
domain_loss = tf.losses.log_loss(domain_selection_mask, domain_predictions, weights=weight)
domain_accuracy = util.accuracy_tf(domain_selection_mask, tf.round(domain_predictions))
assert_op = tf.Assert(tf.is_finite(domain_loss), [domain_loss])
with tf.control_dependencies([assert_op]):
| tensorflow.sigmoid | 11,714 |
import tensorflow as tf
'warmup_steps', 100,
'The total steps to warm-up.')
# for learning rate piecewise_constant decay
tf.app.flags.DEFINE_string(
'decay_boundaries', '2, 3',
'Learning rate decay boundaries by global_step (comma-separated list).')
tf.app.flags.DEFINE_string(
'lr_decay_factors', '1, 0.5, 0.1',
'The values of learning_rate decay factor for each segment between boundaries (comma-separated list).')
# checkpoint related configuration
tf.app.flags.DEFINE_string(
'checkpoint_path', './model',
'The path to a checkpoint from which to fine-tune.')
tf.app.flags.DEFINE_string(
'checkpoint_model_scope', '',
'Model scope in the checkpoint. None if the same as the trained model.')
tf.app.flags.DEFINE_string(
#'blouse', 'dress', 'outwear', 'skirt', 'trousers', 'all'
'model_scope', None,
'Model scope name used to replace the name_scope in checkpoint.')
tf.app.flags.DEFINE_string(
'checkpoint_exclude_scopes', None,
'Comma-separated list of scopes of variables to exclude when restoring from a checkpoint.')
tf.app.flags.DEFINE_boolean(
'ignore_missing_vars', True,
'When restoring a checkpoint would ignore missing variables.')
| tensorflow.app.flags.DEFINE_string | 11,715 |
import tensorflow as tf
mask0 = tf.constant([[1, 0],
[0, 1]], dtype=tf.float32)
mask1 = tf.constant([[1, 1],
[0, 1]], dtype=tf.float32)
mask2 = tf.constant([[1, 0],
[1, 1]], dtype=tf.float32)
mask3 = tf.constant([[1, 1],
[1, 1]], dtype=tf.float32)
mask4 = tf.constant([[0, 0],
[0, 0]], dtype=tf.float32)
mask5 = tf.constant([[1, 0],
[1, 0]], dtype=tf.float32)
masks = tf.stack([mask0, mask1, mask2, mask3, mask4, mask5])
scores = tf.constant([[0.05, 1.0, 0.2],
[0.9, 0.1, 0.3],
[0.95, 0.92, 0.1],
| tensorflow.constant | 11,716 |
import tensorflow as tf
tf.logging.info("*** Features ***")
| tensorflow.logging.info | 11,717 |
import tensorflow as tf
self.layers.append(conv_block)
# Extract 8 most features as mentioned in paper
self.k_pooled = tf.nn.top_k(tf.transpose(self.layers[-1], [0,2,1]), k=8, name='k_pool', sorted=False)[0]
print("8-maxpooling:", self.k_pooled.get_shape())
self.flatten = tf.reshape(self.k_pooled, (-1, 512*8))
# fc1
with tf.variable_scope('fc1'):
w = tf.get_variable('w', [self.flatten.get_shape()[1], 2048], initializer=he_normal,
| tensorflow.reshape | 11,718 |
import tensorflow as tf
def _body(layer_id, inputs, prev_c, prev_h, anchors, anchors_w_1, arc_seq,
entropy, log_prob):
indices = tf.range(0, layer_id, dtype=tf.int32)
start_id = 4 * (layer_id - 2)
prev_layers = []
for i in range(2): # index_1, index_2
next_c, next_h = stack_lstm(inputs, prev_c, prev_h, self.w_lstm)
prev_c, prev_h = next_c, next_h
query = anchors_w_1.gather(indices)
query = tf.reshape(query, [layer_id, self.lstm_size])
query = tf.tanh(query + tf.matmul(next_h[-1], self.w_attn_2))
query = tf.matmul(query, self.v_attn)
logits = tf.reshape(query, [1, layer_id])
if self.temperature is not None:
logits /= self.temperature
if self.tanh_constant is not None:
logits = self.tanh_constant * tf.tanh(logits)
index = tf.multinomial(logits, 1)
index = tf.to_int32(index)
| tensorflow.reshape | 11,719 |
import tensorflow as tf
top_antecedent_scores = top_fast_antecedent_scores + self.get_slow_antecedent_scores(top_span_emb, top_antecedents, top_antecedent_emb, top_antecedent_offsets, top_span_speaker_ids, genre_emb) # [k, c]
top_antecedent_weights = tf.nn.softmax(tf.concat([dummy_scores, top_antecedent_scores], 1)) # [k, c + 1]
top_antecedent_emb = tf.concat([tf.expand_dims(top_span_emb, 1), top_antecedent_emb], 1) # [k, c + 1, emb]
attended_span_emb = tf.reduce_sum(tf.expand_dims(top_antecedent_weights, 2) * top_antecedent_emb, 1) # [k, emb]
with tf.variable_scope("f"):
f = tf.sigmoid(util.projection(tf.concat([top_span_emb, attended_span_emb], 1), util.shape(top_span_emb, -1))) # [k, emb]
top_span_emb = f * attended_span_emb + (1 - f) * top_span_emb # [k, emb]
top_antecedent_scores = tf.concat([dummy_scores, top_antecedent_scores], 1) # [k, c + 1]
top_antecedent_cluster_ids = tf.gather(top_span_cluster_ids, top_antecedents) # [k, c]
top_antecedent_cluster_ids += tf.to_int32(tf.log(tf.to_float(top_antecedents_mask))) # [k, c]
same_cluster_indicator = tf.equal(top_antecedent_cluster_ids, tf.expand_dims(top_span_cluster_ids, 1)) # [k, c]
non_dummy_indicator = tf.expand_dims(top_span_cluster_ids > 0, 1) # [k, 1]
pairwise_labels = tf.logical_and(same_cluster_indicator, non_dummy_indicator) # [k, c]
dummy_labels = tf.logical_not(tf.reduce_any(pairwise_labels, 1, keepdims=True)) # [k, 1]
top_antecedent_labels = tf.concat([dummy_labels, pairwise_labels], 1) # [k, c + 1]
loss = self.softmax_loss(top_antecedent_scores, top_antecedent_labels) # [k]
loss = tf.reduce_sum(loss) # []
return [candidate_starts, candidate_ends, candidate_mention_scores, top_span_starts, top_span_ends, top_antecedents, top_antecedent_scores], loss
| tensorflow.to_float | 11,720 |
from tensorflow.python.ops import init_ops
def output_size(self):
return self._num_units
def __call__(self, inputs, state, att_score):
return self.call(inputs, state, att_score)
def call(self, inputs, state, att_score=None):
"""Gated recurrent unit (GRU) with nunits cells."""
if self._gate_linear is None:
bias_ones = self._bias_initializer
if self._bias_initializer is None:
bias_ones = init_ops.constant_initializer(1.0, dtype=inputs.dtype)
with vs.variable_scope("gates"): # Reset gate and update gate.
self._gate_linear = _Linear(
[inputs, state],
2 * self._num_units,
True,
bias_initializer=bias_ones,
kernel_initializer=self._kernel_initializer)
| tensorflow.python.ops.init_ops.constant_initializer | 11,721 |
import tensorflow as tf
kernel_size: a list of 2 ints
stride: a list of 2 ints
Returns:
Variable tensor
"""
with tf.variable_scope(scope) as sc:
kernel_h, kernel_w = kernel_size
stride_h, stride_w = stride
outputs = tf.nn.avg_pool(inputs,
ksize=[1, kernel_h, kernel_w, 1],
strides=[1, stride_h, stride_w, 1],
padding=padding,
name=sc.name)
return outputs
def max_pool3d(inputs,
| tensorflow.nn.avg_pool | 11,722 |
import tensorflow as tf
means, that the corresponding input to this method should be a `Tensor` with
each element corresponding to a single output of an encoding. So this can be
a single element, in the one-to-many setting, or multiple elements, in the
many-to-one setting.
The `update_state` method thus can compute arbitrary function of the
relevant values. In this case, it maintains a rolling average of previous
states, where the weight to be used depends on the number of updates
received. Note that the specific implementation is not necessarily useful or
efficient; it rather serves as an illustration of what can be done.
"""
num_updates = state_update_tensors[
self.NORM_STATE_UPDATE_KEY].shape.num_elements()
norm_mean = tf.reduce_mean(state_update_tensors[self.NORM_STATE_UPDATE_KEY])
weight = 0.9**num_updates # Use a stronger weight for more updates.
new_factor = (
weight * state[self.FACTOR_STATE_KEY] + (1 - weight) / norm_mean)
return {self.FACTOR_STATE_KEY: new_factor}
def get_params(self, state):
"""See base class."""
params = {self.FACTOR_PARAM_KEY: state[self.FACTOR_STATE_KEY]}
return params, params
def encode(self, x, encode_params):
"""See base class."""
| tensorflow.reduce_mean | 11,723 |
import tensorflow as tf
update_mean_op = moving_averages.assign_moving_average(
variable=self._moving_mean,
value=mean,
decay=self._decay_rate,
name="update_moving_mean").op
update_variance_op = moving_averages.assign_moving_average(
variable=self._moving_variance,
value=variance,
decay=self._decay_rate,
name="update_moving_variance").op
return update_mean_op, update_variance_op
def build_no_ops():
return (tf.no_op(), tf.no_op())
# Only make the ops if we know that `is_training=True`, or the value of
# `is_training` is unknown.
is_training_const = utils.constant_value(is_training)
if is_training_const is None or is_training_const:
update_mean_op, update_variance_op = utils.smart_cond(
is_training,
build_update_ops,
build_no_ops,
)
# Every new connection creates a new op which adds its contribution
# to the running average when ran.
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_mean_op)
| tensorflow.no_op | 11,724 |
import tensorflow as tf
train_batch = self.config['batch_size']*self.n_gpus
d = d.repeat().batch(train_batch).prefetch(train_batch)
self.dataset_iterators[n] = d.make_one_shot_iterator()
else:
d = d.batch(self.config['eval_batch_size']*self.n_gpus)
self.dataset_iterators[n] = d.make_initializable_iterator()
output_types = d.output_types
output_shapes = d.output_shapes
self.datasets[n] = d
# Perform compatibility checks with the inputs of the child model
for i, spec in self.input_spec.items():
assert i in output_shapes
tf.TensorShape(output_shapes[i]).assert_is_compatible_with(
tf.TensorShape(spec['shape']))
# Used for input shapes of the prediction network
if self.data_shape is None:
self.data_shape = output_shapes
# Handle for the feedable iterator
self.handle = tf.placeholder(tf.string, shape=[])
iterator = tf.data.Iterator.from_string_handle(
self.handle, output_types, output_shapes)
data = iterator.get_next()
# Build the actual training and evaluation models
self._train_graph(data)
| tensorflow.TensorShape | 11,725 |
import tensorflow as tf
# get one 'word' embedding for the full tweet
tweet_embedding = c2v.GetEmbeddings(self.x)[:,1,:]
logits = tf.nn.xw_plus_b(tweet_embedding, hidden, bias)
self.probs = tf.nn.softmax(logits)
self._xent = tf.nn.softmax_cross_entropy_with_logits(logits, self.y)
| tensorflow.nn.xw_plus_b | 11,726 |
import tensorflow as tf
stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)
update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))
act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph],
outputs=[output_actions, q_values, s_value, a_values, update_eps_expr],
| tensorflow.cond | 11,727 |
import tensorflow as tf
key: tf.expand_dims(tensor, -1)
for key, tensor in feature_placeholders.items()
}
features[TIMESERIES_COL] = tf.squeeze(features[TIMESERIES_COL], axis = [2])
return tf.estimator.export.ServingInputReceiver(features, feature_placeholders)
# Create custom estimator's train and evaluate function
def train_and_evaluate(output_dir, use_keras):
if use_keras:
| tensorflow.estimator.export.ServingInputReceiver | 11,728 |
import tensorflow as tf
learning_rate = get_learning_rate_decay(params.learning_rate,
global_step, params)
learning_rate = tf.convert_to_tensor(learning_rate, dtype=tf.float32)
tf.summary.scalar("learning_rate", learning_rate)
# Create optimizer
| tensorflow.summary.scalar | 11,729 |
import tensorflow as tf
print(img_h3.get_shape())
img_h4 = lrelu(linear(tf.nn.dropout(tf.reshape(img_h3, [self.batch_size, -1]), keep_prob), featsize, 'h4_lin'))
img_z = lrelu(linear(tf.nn.dropout(img_h4, keep_prob), featsize, 'hz_lin'))
return img_h0, img_h1, img_h2, img_h3, img_h4, img_z
| tensorflow.nn.dropout | 11,730 |
import tensorflow as tf
def din_fcn_shine(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False):
if isinstance(facts, tuple):
# In case of Bi-RNN, concatenate the forward and the backward RNN outputs.
facts = tf.concat(facts, 2)
if time_major:
# (T,B,D) => (B,T,D)
facts = tf.array_ops.transpose(facts, [1, 0, 2])
# Trainable parameters
mask = tf.equal(mask, tf.ones_like(mask))
facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer
querry_size = query.get_shape().as_list()[-1]
query = tf.layers.dense(query, facts_size, activation=None, name='f1_trans_shine' + stag)
query = prelu(query)
queries = tf.tile(query, [1, tf.shape(facts)[1]])
queries = tf.reshape(queries, tf.shape(facts))
din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)
d_layer_1_all = tf.layers.dense(din_all, facts_size, activation=tf.nn.sigmoid, name='f1_shine_att' + stag)
d_layer_2_all = tf.layers.dense(d_layer_1_all, facts_size, activation=tf.nn.sigmoid, name='f2_shine_att' + stag)
d_layer_2_all = tf.reshape(d_layer_2_all, tf.shape(facts))
output = d_layer_2_all
return output
| tensorflow.layers.dense | 11,731 |
import tensorflow as tf
class VariationalDense:
"""Variational Dense Layer Class"""
def __init__(self, n_in, n_out, dropout_mask_ph,
model_prob=0.9, model_lam=3e-4, activation=None, name="hidden"):
self.model_prob = model_prob # probability to keep units
self.model_lam = model_lam # l^2 / 2*tau: l=1e-2, tau=[0.1, 0.15, 0.2]
self.dropout_mask_ph = dropout_mask_ph # placeholder: p_s * i_s
self.p_s = tf.shape(self.dropout_mask_ph)[0] # post sample size
self.DM = tf.zeros(shape=[self.p_s, n_in, n_in]) # Dropout masks: p_s * i_s * i_s
self.DM = tf.linalg.set_diag(self.DM, self.dropout_mask_ph)
kernel_initializer = tf.initializers.truncated_normal(mean=0.0, stddev=0.01)
self.model_W = tf.get_variable("{}_W".format(name), initializer=kernel_initializer([n_in, n_out])) # variational parameters
self.model_b = tf.get_variable("{}_b".format(name), initializer=tf.zeros([n_out]))
self.model_DMW = tf.einsum('pij,jk->pik', self.DM, self.model_W) # Masked weight: p_s * i_s * o_s
self.model_tiled_b = tf.tile(tf.reshape(self.model_b, [1, n_out]), [self.p_s, 1])
if activation is None:
self.activation = tf.identity
else:
self.activation = activation
| tensorflow.initializers.truncated_normal | 11,732 |
import tensorflow as tf
def get_variable(name, shape):
return tf.get_variable(name, shape, tf.float32,
tf.initializers.truncated_normal(0,0.01))
def Qmix_mixer(agent_qs, state, state_dim, n_agents, n_h_mixer):
"""
Args:
agent_qs: shape [batch, n_agents]
state: shape [batch, state_dim]
state_dim: integer
n_agents: integer
n_h_mixer: integer
"""
agent_qs_reshaped = tf.reshape(agent_qs, [-1, 1, n_agents])
# n_h_mixer * n_agents because result will be reshaped into matrix
hyper_w_1 = get_variable('hyper_w_1', [state_dim, n_h_mixer*n_agents])
hyper_w_final = get_variable('hyper_w_final', [state_dim, n_h_mixer])
hyper_b_1 = tf.get_variable('hyper_b_1', [state_dim, n_h_mixer])
hyper_b_final_l1 = tf.layers.dense(inputs=state, units=n_h_mixer, activation=tf.nn.relu,
use_bias=False, name='hyper_b_final_l1')
hyper_b_final = tf.layers.dense(inputs=hyper_b_final_l1, units=1, activation=None,
use_bias=False, name='hyper_b_final')
# First layer
| tensorflow.reshape | 11,733 |
import tensorflow as tf
pred_x = pred_x * tf.cast(pred_max>0, tf.float32) + tf.cast(pred_max<=0, tf.float32) * (width / 2.)
pred_y = pred_y * tf.cast(pred_max>0, tf.float32) + tf.cast(pred_max<=0, tf.float32) * (height / 2.)
if config.PRED_DEBUG:
pred_indices_ = tf.squeeze(pred_indices)
image_ = tf.squeeze(image) * 255.
pred_heatmap = tf.one_hot(pred_indices_, heatmap_size*heatmap_size, on_value=1., off_value=0., axis=-1, dtype=tf.float32)
pred_heatmap = tf.reshape(pred_heatmap, [-1, heatmap_size, heatmap_size])
if data_format == 'channels_first':
image_ = tf.transpose(image_, perm=(1, 2, 0))
save_image_op = tf.py_func(save_image_with_heatmap,
[image_, height, width,
heatmap_size,
tf.reshape(pred_heatmap * 255., [-1, heatmap_size, heatmap_size]),
tf.reshape(predictions, [-1, heatmap_size, heatmap_size]),
config.left_right_group_map[category][0],
config.left_right_group_map[category][1],
config.left_right_group_map[category][2]],
tf.int64, stateful=True)
| tensorflow.transpose | 11,734 |
import tensorflow as tf
# Random initialization Load weights from weights path
# for Initial state, Weight matrices, and bias weights
# ------------------------------------------------
if self.load_weights_path is None:
# random initializations
init_state_initializer = tf.random_normal_initializer(mean=0.1, stddev=0.01)
W_in_initializer = tf.constant_initializer(
0.1 * np.random.uniform(-1, 1, size=(self.N_rec, self.N_in)))
W_rec_initializer = tf.constant_initializer(self.initial_W())
W_out_initializer = tf.constant_initializer(
| tensorflow.random_normal_initializer | 11,735 |
import tensorflow as tf
def add_eval_stats(summary_writer, step, linear_loss, before_loss, after_loss, stop_token_loss,
loss):
values = [
tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_before_loss",
simple_value=before_loss),
tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_after_loss",
simple_value=after_loss),
| tensorflow.Summary.Value | 11,736 |
import tensorflow as tf
unhead_org_idx, sl_unhead, rep_unhead_mask,
dep_org_idx, sl_dep, rep_dep_mask,
rep_dep_tensor, direction
):
with tf.name_scope('pooling_for_un_head'):
undep_idxs = tf.tile(tf.expand_dims(dep_org_idx, 1), [1, sl_unhead, 1]) # [bs, sluh, sld]
unhead_idxs = tf.tile(tf.expand_dims(unhead_org_idx, 2), [1, 1, sl_dep]) # [bs, sluh, sld]
if direction is None:
direct_mask_un = tf.not_equal(unhead_idxs, undep_idxs) # [bs, sluh, sld]
else:
| tensorflow.expand_dims | 11,737 |
import tensorflow as tf
def accuracy(logits, labels):
'''
Evaluate the quality of the logits at predicting the label
'''
# for summary
with tf.name_scope('accuracy') as scope:
correct = tf.equal(tf.arg_max(logits,1), tf.arg_max(labels,1))
correct = tf.cast(correct, tf.float32)
accuracy = tf.reduce_mean(correct)*100.0
tf.summary.scalar(scope+'accuracy',accuracy)
return accuracy
def num_correct_prediction(logits, labels):
'''
Evaluate the quality of the logits at predicting the label
'''
correct = tf.equal(tf.arg_max(logits,1), tf.arg_max(labels,1))
correct = tf.cast(correct, tf.int32)
| tensorflow.summary.scalar | 11,738 |
from tensorflow.python.platform import gfile
# previous checkpoints.
save3 = tf.train.Saver(saver_def=save.as_saver_def())
# Exercise the first helper.
# Adding s2 again (old s2 is removed first, then new s2 appended)
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s3, s2], save.last_checkpoints)
self.assertFalse(gfile.Exists(s1))
self.assertFalse(gfile.Exists(save._MetaGraphFilename(s1)))
self.assertTrue(gfile.Exists(s3))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s3)))
self.assertTrue(gfile.Exists(s2))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))
# Adding s1 (s3 should now be deleted as oldest in list)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save.last_checkpoints)
self.assertFalse(gfile.Exists(s3))
self.assertFalse(gfile.Exists(save._MetaGraphFilename(s3)))
self.assertTrue(gfile.Exists(s2))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))
self.assertTrue(gfile.Exists(s1))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1)))
| tensorflow.python.platform.gfile.Exists | 11,739 |
import tensorflow as tf
outputs[-1] * mask_bw, seq_lengths=seq_len, seq_dim=0, batch_dim=1)
out_bw, _ = gru_bw(inputs_bw, initial_state=(init_bw, ))
out_bw = tf.reverse_sequence(
out_bw, seq_lengths=seq_len, seq_dim=0, batch_dim=1)
| tensorflow.reverse_sequence | 11,740 |
import tensorflow as tf
y_centers = tf.cast(tf.range(features_height), tf.float32)
y_centers = y_centers * stride[0]
# x_centers = x_centers + offset[1]
# y_centers = y_centers + offset[0]
x_centers, y_centers = tf.meshgrid(x_centers, y_centers)
widths, x_centers = tf.meshgrid(widths, x_centers)
heights, y_centers = tf.meshgrid(heights, y_centers)
anchor_centers = tf.stack([x_centers, y_centers], axis=2)
anchor_centers = tf.reshape(anchor_centers, [-1, 2])
anchor_sizes = tf.stack([widths, heights], axis=2)
| tensorflow.meshgrid | 11,741 |
import tensorflow as tf
# 'Crop size must be specified for using image-level feature.')
if model_options.model_variant == 'mobilenet_v2':
if (model_options.atrous_rates is not None or
model_options.decoder_output_stride is not None):
# Output a warning and users should make sure if the setting is desired.
tf.logging.warning('Our provided mobilenet_v2 checkpoint does not '
'include ASPP and decoder modules.')
crop_height = (
model_options.crop_size[0]
if model_options.crop_size else tf.shape(images)[1])
crop_width = (
model_options.crop_size[1]
if model_options.crop_size else tf.shape(images)[2])
# Compute the height, width for the output logits.
logits_output_stride = (
model_options.decoder_output_stride or model_options.output_stride)
logits_height = scale_dimension(
| tensorflow.shape | 11,742 |
from tensorflow.python.ops import array_ops
from tensorflow.contrib.slim.python.slim.data import test_utils
from tensorflow.contrib.slim.python.slim.data import tfexample_decoder
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
def _resize_image(image, height, width):
image = array_ops.expand_dims(image, 0)
image = image_ops.resize_bilinear(image, [height, width])
return array_ops.squeeze(image, [0])
def _create_tfrecord_dataset(tmpdir):
if not gfile.Exists(tmpdir):
gfile.MakeDirs(tmpdir)
data_sources = test_utils.create_tfrecord_files(tmpdir, num_files=1)
keys_to_features = {
'image/encoded':
parsing_ops.FixedLenFeature(
shape=(), dtype=dtypes.string, default_value=''),
'image/format':
parsing_ops.FixedLenFeature(
| tensorflow.python.ops.array_ops.squeeze | 11,743 |
import tensorflow as tf
def __init__(self):
self.mse = tf.losses.MeanSquaredError()
def __call__(self, y_pred, target, target_weight):
batch_size = y_pred.shape[0]
num_of_joints = y_pred.shape[-1]
pred = tf.reshape(tensor=y_pred, shape=(batch_size, -1, num_of_joints))
heatmap_pred_list = tf.split(value=pred, num_or_size_splits=num_of_joints, axis=-1)
gt = tf.reshape(tensor=target, shape=(batch_size, -1, num_of_joints))
heatmap_gt_list = tf.split(value=gt, num_or_size_splits=num_of_joints, axis=-1)
loss = 0.0
for i in range(num_of_joints):
heatmap_pred = tf.squeeze(heatmap_pred_list[i])
heatmap_gt = tf.squeeze(heatmap_gt_list[i])
loss += 0.5 * self.mse(y_true=heatmap_pred * target_weight[:, i],
y_pred=heatmap_gt * target_weight[:, i])
return loss / num_of_joints
| tensorflow.squeeze | 11,744 |
import tensorflow as tf
def _init():
v_norm = tf.nn.l2_normalize(self.v,axis=[0,1,3])
t = tf.nn.conv2d_transpose(input_var,v_norm,
output_shape=shapes,
strides=self.strides,
padding='SAME',
data_format='NHWC')
mu,var = tf.nn.moments(t,axes=[0,1,2])
std = tf.sqrt(var+self.epsilon)
return [tf.assign(self.g,1/std),tf.assign(self.b,-1.*mu/std)]
require_init = tf.reduce_any(tf.is_nan(self.g))
init_ops = tf.cond(require_init,_init,lambda : [self.g,self.b])
with tf.control_dependencies(init_ops):
w = tf.reshape(self.g,[1,1,tf.shape(self.v)[2],1]) * tf.nn.l2_normalize(self.v,axis=[0,1,3])
| tensorflow.sqrt | 11,745 |
import tensorflow as tf
self.dropout_input = args.dropout_input
self.clip_norm = args.clip_norm
self.embedding_init = embedding_init
self.x = tf.placeholder(tf.int32, [None, None], 'input')
self.y = tf.placeholder(tf.int32, [None, self.num_classes], 'labels')
self.seq_len = tf.placeholder(tf.int64, [None], 'input_length')
def inference(self, forward_only=None):
embed_inputs = tf.nn.embedding_lookup(self.embedding_init, self.x) ## (batch_size, seq_len, 100)
| tensorflow.placeholder | 11,746 |
import tensorflow as tf
tf.contrib.tensorboard.plugins.projector.visualize_embeddings(summary_writer, config)
def add_train_stats(model, hparams):
with tf.variable_scope("stats") as scope:
for i in range(hparams.tacotron_num_gpus):
tf.summary.histogram("mel_outputs %d" % i, model.tower_mel_outputs[i])
tf.summary.histogram("mel_targets %d" % i, model.tower_mel_targets[i])
tf.summary.scalar("before_loss", model.before_loss)
tf.summary.scalar("after_loss", model.after_loss)
if hparams.predict_linear:
tf.summary.scalar("linear_loss", model.linear_loss)
for i in range(hparams.tacotron_num_gpus):
tf.summary.histogram("mel_outputs %d" % i, model.tower_linear_outputs[i])
tf.summary.histogram("mel_targets %d" % i, model.tower_linear_targets[i])
tf.summary.scalar("regularization_loss", model.regularization_loss)
tf.summary.scalar("stop_token_loss", model.stop_token_loss)
tf.summary.scalar("loss", model.loss)
tf.summary.scalar("learning_rate", model.learning_rate) # Control learning rate decay speed
if hparams.tacotron_teacher_forcing_mode == "scheduled":
tf.summary.scalar("teacher_forcing_ratio", model.ratio) # Control teacher forcing
# ratio decay when mode = "scheduled"
| tensorflow.summary.scalar | 11,747 |
import tensorflow as tf
export_feat_tensors = {}
# Input tensors
feats_audio_nunroll = tf.placeholder(dtype, shape=[batch_size, rnn_nunroll + zack_hack, audio_context_len, audio_nbands, audio_nchannels], name='feats_audio')
feats_other_nunroll = tf.placeholder(dtype, shape=[batch_size, rnn_nunroll, nfeats], name='feats_other')
print('feats_audio: {}'.format(feats_audio_nunroll.get_shape()))
print('feats_other: {}'.format(feats_other_nunroll.get_shape()))
if mode != 'gen':
targets_nunroll = tf.placeholder(dtype, shape=[batch_size, rnn_nunroll])
# TODO: tf.ones acts as an overridable placeholder but this is still awkward
target_weights_nunroll = tf.ones([batch_size, rnn_nunroll], dtype)
# Reshape input tensors to remove nunroll dim; will briefly restore later during RNN if necessary
if cnn_rnn_zack:
feats_audio = tf.reshape(feats_audio_nunroll, shape=[batch_size, rnn_nunroll + zack_hack, audio_nbands, audio_nchannels])
else:
feats_audio = tf.reshape(feats_audio_nunroll, shape=[batch_size * rnn_nunroll, audio_context_len, audio_nbands, audio_nchannels])
feats_other = tf.reshape(feats_other_nunroll, shape=[batch_size * rnn_nunroll, nfeats])
if mode != 'gen':
targets = tf.reshape(targets_nunroll, shape=[batch_size * rnn_nunroll])
target_weights = tf.reshape(target_weights_nunroll, shape=[batch_size * rnn_nunroll])
# CNN
cnn_output = feats_audio
if do_cnn:
layer_last = feats_audio
nfilt_last = audio_nchannels
for i, ((ntime, nband, nfilt), (ptime, pband)) in enumerate(zip(cnn_filter_shapes, cnn_pool)):
layer_name = 'cnn_{}'.format(i)
| tensorflow.reshape | 11,748 |
import tensorflow as tf
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.contrib.data.map_and_batch(
| tensorflow.data.TFRecordDataset | 11,749 |
import tensorflow as tf
References:
Kolda, Tamara G., and Brett W. Bader. "Tensor decompositions and
applications." SIAM review 51.3 (2009): 455-500.
'''
def _create_model(self, train_triples):
# Count unique items to determine embedding matrix sizes
head_cnt = len(set(train_triples[:,0]))
rel_cnt = len(set(train_triples[:,1]))
tail_cnt = len(set(train_triples[:,2]))
init_sd = 1.0 / np.sqrt(self.embedding_size)
# Embedding matrices for entities and relationship types
head_init = tf.truncated_normal([head_cnt, self.embedding_size], stddev=init_sd)
rel_init = tf.truncated_normal([rel_cnt, self.embedding_size], stddev=init_sd)
tail_init = tf.truncated_normal([tail_cnt, self.embedding_size], stddev=init_sd)
if self.maxnorm is not None:
# Ensure maxnorm constraints are initially satisfied
head_init = dense_maxnorm(head_init, self.maxnorm)
rel_init = dense_maxnorm(rel_init, self.maxnorm)
tail_init = dense_maxnorm(tail_init, self.maxnorm)
self.head_embedding_vars = tf.Variable(head_init)
self.rel_embedding_vars = tf.Variable(rel_init)
self.tail_embedding_vars = tf.Variable(tail_init)
# Embedding layer for each (head, rel, tail) triple being fed in as input
head_embed = tf.nn.embedding_lookup(self.head_embedding_vars, self.head_input)
rel_embed = tf.nn.embedding_lookup(self.rel_embedding_vars, self.rel_input)
tail_embed = tf.nn.embedding_lookup(self.tail_embedding_vars, self.tail_input)
| tensorflow.truncated_normal | 11,750 |
import tensorflow as tf
output_bias = tf.get_variable(
"output_bias",
shape=[bert_config.vocab_size],
initializer=tf.zeros_initializer())
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
label_ids = tf.reshape(label_ids, [-1])
one_hot_labels = tf.one_hot(
label_ids, depth=bert_config.vocab_size, dtype=tf.float32)
| tensorflow.nn.log_softmax | 11,751 |
import tensorflow as tf
for index, (vf, logits, ep_reward) in enumerate(zip(episode_vf, episode_logits, episode_rewards)):
summary = tf.Summary()
| tensorflow.Summary | 11,752 |
from tensorflow.python.platform import gfile
except OSError:
pass # Ignore
gfile.MakeDirs(save_dir)
| tensorflow.python.platform.gfile.MakeDirs | 11,753 |
import tensorflow as tf
save.restore(sess, save_path)
self.assertAllClose(123.45, v0_2.eval())
def testVariables(self):
save_path = os.path.join(self.get_temp_dir(), "variables")
with tf.Session("", graph=tf.Graph()) as sess:
one = tf.Variable(1.0)
twos = tf.Variable([2.0, 2.0, 2.0])
init = tf.initialize_all_variables()
save = tf.train.Saver(tf.all_variables())
init.run()
save.save(sess, save_path)
with tf.Session("", graph=tf.Graph()) as sess:
one = tf.Variable(0.0)
twos = tf.Variable([0.0, 0.0, 0.0])
# Saver with no arg, defaults to 'all variables'.
save = tf.train.Saver()
save.restore(sess, save_path)
self.assertAllClose(1.0, one.eval())
self.assertAllClose([2.0, 2.0, 2.0], twos.eval())
def testSaveWithGlobalStep(self):
save_path = os.path.join(self.get_temp_dir(), "ckpt_with_global_step")
global_step_int = 5
# Save and reload one Variable named "var0".
self._SaveAndLoad("var0", 0.0, 1.0, save_path)
for use_tensor in [True, False]:
| tensorflow.Variable | 11,754 |
from tensorflow.contrib.learn.python.learn.datasets import base
dnn_hidden_units=(3, 3),
dnn_optimizer=adagrad.AdagradOptimizer(learning_rate=0.1))
input_fn = test_data.iris_input_logistic_fn
metrics = classifier.fit(input_fn=input_fn, steps=_ITERS).evaluate(
input_fn=input_fn, steps=100)
self._assertSingleClassMetrics(metrics)
def benchmarkMultiClass(self):
iris = base.load_iris()
cont_feature = feature_column.real_valued_column('feature', dimension=4)
bucketized_feature = feature_column.bucketized_column(
cont_feature, test_data.get_quantile_based_buckets(iris.data, 10))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
linear_feature_columns=(bucketized_feature,),
dnn_feature_columns=(cont_feature,),
| tensorflow.contrib.learn.python.learn.datasets.base.load_iris | 11,755 |
import tensorflow as tf
prediction_inspect = tf.reshape(prediction, [batch_size, rnn_nunroll])
prediction_final = tf.squeeze(tf.slice(prediction_inspect, [0, rnn_nunroll - 1], [-1, 1]), squeeze_dims=[1])
print('logit: {}'.format(logits.get_shape()))
# Compute loss
if mode != 'gen':
neg_log_lhoods = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=targets)
if target_weight_strategy == 'rect':
avg_neg_log_lhood = tf.reduce_mean(neg_log_lhoods)
else:
neg_log_lhoods = tf.multiply(neg_log_lhoods, target_weights)
# be careful to have at least one weight be nonzero
# should we be taking the mean elem-wise by batch? i think this is a big bug
avg_neg_log_lhood = tf.reduce_sum(neg_log_lhoods) / tf.reduce_sum(target_weights)
neg_log_lhoods_inspect = tf.reshape(neg_log_lhoods, [batch_size, rnn_nunroll])
| tensorflow.reduce_mean | 11,756 |
import tensorflow as tf
length_1 = tf.reshape(box1[3 + 0], [1])
height_1 = tf.reshape(box1[3 + 2], [1])
width_1 = tf.reshape(box1[3 + 1], [1])
| tensorflow.reshape | 11,757 |
from tensorflow.python.framework import ops
filter_in_depth = int(filter_shape[2])
filter_out_depth = int(filter_shape[3])
return ops.OpStats("weight_parameters", (filter_height * filter_width *
filter_in_depth * filter_out_depth))
@ops.RegisterStatistics("BiasAdd", "flops")
def _calc_bias_add_flops(graph, node):
"""Calculates the computing needed for BiasAdd."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
input_count = np.prod(input_shape.as_list())
return ops.OpStats("flops", input_count)
@ops.RegisterStatistics("BiasAdd", "weight_parameters")
def _calc_bias_add_weight_params(graph, node):
"""Calculates the on-disk weight parameters for BiasAdd."""
bias_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[1])
bias_shape.assert_is_fully_defined()
bias_count = np.prod(bias_shape.as_list())
return ops.OpStats("weight_parameters", bias_count)
def xw_plus_b(x, weights, biases, name=None): # pylint: disable=invalid-name
"""Computes matmul(x, weights) + biases.
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
| tensorflow.python.framework.ops.RegisterStatistics | 11,758 |
import tensorflow as tf
self.frame_size_y,
self.rgb_channels))
is_training = False
with tf.variable_scope('RGB'):
self.feature, _ = InceptionI3d(
num_classes=self.num_classes,
spatial_squeeze=True,
final_endpoint=self.output_layer,
name='inception_i3d'
)(self.rgb_images_placeholder, is_training)
init = tf.global_variables_initializer()
config = tf.ConfigProto(log_device_placement=False)
if self.on_gpu:
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
self.sess.run(init)
checkpoint_file = self.model_dir
meta_graph_location = self.model_dir + '.meta'
| tensorflow.global_variables_initializer | 11,759 |
import tensorflow as tf
conv = tf.nn.conv2d(layer_last, filters, [1, 1, 1, 1], padding=padding)
biased = tf.nn.bias_add(conv, biases)
convolved = tf.nn.relu(biased)
pool_shape = [1, ptime, pband, 1]
pooled = tf.nn.max_pool(convolved, ksize=pool_shape, strides=pool_shape, padding='SAME')
print('{}: {}'.format(layer_name, pooled.get_shape()))
export_feat_tensors[layer_name] = pooled
| tensorflow.nn.max_pool | 11,760 |
import tensorflow as tf
grid = tf.reshape(grid, [-1])
grid = tf.tile(grid, tf.stack([num_batch]))
grid = tf.reshape(grid, tf.stack([num_batch, 4, -1]))
# Transform A x (x_t', y_t', 1, d_t)^T -> (x_s, y_s, z_s, 1).
t_g = tf.matmul(theta, grid)
z_s = tf.slice(t_g, [0, 0, 0], [-1, 1, -1])
y_s = tf.slice(t_g, [0, 1, 0], [-1, 1, -1])
x_s = tf.slice(t_g, [0, 2, 0], [-1, 1, -1])
z_s_flat = tf.reshape(z_s, [-1])
y_s_flat = tf.reshape(y_s, [-1])
| tensorflow.slice | 11,761 |
import tensorflow as tf
print ("querry_size mismatch")
query = tf.concat(values = [
query,
query,
], axis=1)
if time_major:
# (T,B,D) => (B,T,D)
facts = tf.array_ops.transpose(facts, [1, 0, 2])
mask = tf.equal(mask, tf.ones_like(mask))
facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer
querry_size = query.get_shape().as_list()[-1]
queries = tf.tile(query, [1, tf.shape(facts)[1]])
queries = tf.reshape(queries, tf.shape(facts))
din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)
d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag)
d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag)
d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag)
d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]])
scores = d_layer_3_all
# Mask
# key_masks = tf.sequence_mask(facts_length, tf.shape(facts)[1]) # [B, T]
key_masks = tf.expand_dims(mask, 1) # [B, 1, T]
paddings = tf.ones_like(scores) * (-2 ** 32 + 1)
scores = tf.where(key_masks, scores, paddings) # [B, 1, T]
| tensorflow.shape | 11,762 |
import tensorflow as tf
# Test externally provided output projection.
w = tf.get_variable("proj_w", [2, 5])
b = tf.get_variable("proj_b", [5])
with tf.variable_scope("proj_seq2seq"):
dec, _ = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
| tensorflow.get_variable | 11,763 |
import tensorflow as tf
print(sess.run(tf.cholesky(identity_matrix)))
print('\nselfAdjointEig(D):')
print(sess.run(tf.self_adjoint_eig(D)))
print(sess.run(tf.div(13, 4)))
print(sess.run(tf.truediv(13, 4)))
print(sess.run(tf.floordiv(13, 4)))
print(sess.run(tf.mod(13.2, 4)))
print(sess.run(tf.cross([1, 0, 0], [0, 1, 0])))
print(sess.run(tf.square([1, 2, 3])))
| tensorflow.truediv | 11,764 |
import tensorflow as tf
actions_arguments = tf.gather(batch_action_arguments, self.batch_idx)
with tf.name_scope('model'):
encoder_embedding = embedding(
| tensorflow.name_scope | 11,765 |
import tensorflow as tf
def prelu(_x, scope=''):
"""parametric ReLU activation"""
with tf.variable_scope(name_or_scope=scope, default_name="prelu"):
_alpha = tf.get_variable("prelu_"+scope, shape=_x.get_shape()[-1],
dtype=_x.dtype, initializer=tf.constant_initializer(0.1))
return tf.maximum(0.0, _x) + _alpha * tf.minimum(0.0, _x)
def calc_auc(raw_arr):
"""Summary
| tensorflow.minimum | 11,766 |
import tensorflow as tf
output = tf.reshape(
input_transformed,
tf.stack([num_batch, out_depth, out_height, out_width, num_channels]))
return output
with tf.variable_scope(name):
output = _transform(theta, voxels, out_size, z_near, z_far)
return output
| tensorflow.variable_scope | 11,767 |
import tensorflow as tf
def sample_pair(batch):
num_sam = tools.shape(batch)[0]
index = tf.range(num_sam)
tgt1 = tf.slice(batch, [0, 1], [num_sam, 1])
pred1 = tf.slice(batch, [0, 0], [num_sam, 1])
def uniform():
| tensorflow.slice | 11,768 |
import tensorflow as tf
# In eager mode, all shapes are known, so these tests do not need to
# execute.
if tf.executing_eagerly():
return
| tensorflow.executing_eagerly | 11,769 |
import tensorflow as tf
labels_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='labels')
batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
control_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='control')
phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
| tensorflow.placeholder | 11,770 |
import tensorflow as tf
def bias_variable(shape):
return tf.get_variable('b', shape, initializer=tf.constant_initializer(0.))
def keep_prob(dropout, train):
return tf.cond(train, lambda: tf.constant(dropout), lambda: tf.constant(1.))
def softmax_ce_with_logits(logits, labels):
return tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits)
def sigmoid_ce_with_logits(logits, labels):
return tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits)
def sigmoid_kl_with_logits(logits, targets):
assert isinstance(targets, float)
if targets in [0., 1.]:
entropy = 0.
else:
entropy = - targets*tf.log(targets) - (1. - targets)*tf.log(1. - targets)
return sigmoid_ce_with_logits(logits, tf.ones_like(logits)*targets) - entropy
| tensorflow.nn.sigmoid_cross_entropy_with_logits | 11,771 |
import tensorflow as tf
y0_f = tf.to_float(y0)
y1_f = tf.to_float(y1)
z0_f = tf.to_float(z0)
z1_f = tf.to_float(z1)
| tensorflow.to_float | 11,772 |
import tensorflow as tf
n_batch_size = args.n_batch_size
reg_lambda = args.reg_lambda
keep_prob = args.keep_prob
cross_stitch_enabled = args.cross_stitch_enabled
with tf.variable_scope("placeholder"):
X = tf.placeholder(tf.float32, (None, 128), "X")
y_1 = tf.placeholder(tf.float32, (None, n_output_1), "y_1")
y_2 = tf.placeholder(tf.float32, (None, n_output_2), "y_2")
is_training = tf.placeholder(tf.bool, (), "is_training")
| tensorflow.variable_scope | 11,773 |
import tensorflow as tf
op_id = tf.reshape(op_id, [1])
arc_seq = arc_seq.write(start_id + 2 * i + 1, op_id)
curr_log_prob = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=op_id)
log_prob += curr_log_prob
curr_ent = tf.stop_gradient(tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=tf.nn.softmax(logits)))
entropy += curr_ent
inputs = tf.nn.embedding_lookup(self.w_emb, op_id)
next_c, next_h = stack_lstm(inputs, prev_c, prev_h, self.w_lstm)
anchors = anchors.write(layer_id, next_h[-1])
anchors_w_1 = anchors_w_1.write(layer_id, tf.matmul(next_h[-1], self.w_attn_1))
inputs = self.g_emb
return (layer_id + 1, inputs, next_c, next_h, anchors, anchors_w_1,
| tensorflow.nn.embedding_lookup | 11,774 |
import tensorflow as tf
:return: [Tensor] Convolution output.
"""
with tf.variable_scope(name):
in_filters = ksize[2]
out_filters = ksize[3]
n = ksize[0] * ksize[1] * out_filters
init = tf.truncated_normal_initializer(
mean=0.0, stddev=np.sqrt(2.0 / n), seed=0, dtype=dtype)
def _reg(x):
if weight_decay is not None:
return tf.multiply(tf.nn.l2_loss(x), weight_decay)
else:
return None
if weight_decay is not None:
reg = _reg
else:
reg = None
kernel = tf.get_variable(
'w', ksize, initializer=init, regularizer=reg, dtype=dtype, trainable=True)
| tensorflow.nn.l2_loss | 11,775 |
import tensorflow as tf
self.t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/target_net')
def _build_net(self, s, scope, trainable):
with tf.variable_scope(scope):
init_w = tf.random_normal_initializer(0., 0.01)
init_b = tf.constant_initializer(0.01)
net = tf.layers.dense(s, 500, activation=tf.nn.relu,
kernel_initializer=init_w, bias_initializer=init_b, name='l1', trainable=trainable)
net = tf.layers.dense(net, 200, activation=tf.nn.relu,
kernel_initializer=init_w, bias_initializer=init_b, name='l2', trainable=trainable)
with tf.variable_scope('a'):
actions = tf.layers.dense(net, self.a_dim, activation=tf.nn.tanh, kernel_initializer=init_w,
bias_initializer=init_b, name='a', trainable=trainable)
scaled_a = tf.multiply(actions, self.action_bound, name='scaled_a') # Scale output to -action_bound to action_bound
return scaled_a
def learn(self, s): # batch update
self.sess.run(self.train_op, feed_dict={S: s})
if self.t_replace_counter % self.t_replace_iter == 0:
self.sess.run([tf.assign(t, e) for t, e in zip(self.t_params, self.e_params)])
self.t_replace_counter += 1
def choose_action(self, s):
| tensorflow.layers.dense | 11,776 |
import tensorflow as tf
new_mean = tf.assign_sub(
mean,
tf.check_numerics(
decay * (mean - cur_mean), "NaN in moving mean."))
with tf.name_scope(name, "AssignMovingAvg", [var, cur_var, decay]):
with ops.colocate_with(var):
new_var = tf.assign_sub(
var,
tf.check_numerics(decay * (var - cur_var),
"NaN in moving variance."))
with tf.name_scope(name, "IncrementTime", [step]):
with ops.colocate_with(step):
new_step = tf.assign_add(step, 1.)
used_var += 0. * new_mean * new_var * new_step
used_var += epsilon
| tensorflow.check_numerics | 11,777 |
import tensorflow as tf
if isinstance(facts, tuple):
# In case of Bi-RNN, concatenate the forward and the backward RNN outputs.
facts = tf.concat(facts, 2)
| tensorflow.concat | 11,778 |
from tensorflow.contrib import layers
def _define_vars(self, params):
pass
def inference_graph(self, data):
with ops.device(self.device_assigner):
# Compute activations for the neural network.
nn_activations = layers.fully_connected(data, self.params.layer_size)
for _ in range(1, self.params.num_layers):
# pylint: disable=W0106
nn_activations = layers.fully_connected(nn_activations,
self.params.layer_size)
return nn_activations
class ManyToOneLayer(hybrid_layer.HybridLayer):
def _define_vars(self, params):
pass
def inference_graph(self, data):
| tensorflow.contrib.layers.fully_connected | 11,779 |
import tensorflow as tf
if context is not None and encoder.use_context:
state = tf.concat([state, context], axis=1)
e = compute_energy(hidden_states, state, encoder, input_length=encoder_input_length, **kwargs)
mask = tf.sequence_mask(encoder_input_length, maxlen=tf.shape(hidden_states)[1], dtype=tf.float32)
e *= mask
if encoder.attn_norm_fun == 'none':
weights = e
elif encoder.attn_norm_fun == 'sigmoid':
weights = tf.nn.sigmoid(e)
elif encoder.attn_norm_fun == 'max':
weights = tf.one_hot(tf.argmax(e, -1), depth=tf.shape(e)[1])
else:
e -= tf.reduce_max(e, axis=1, keep_dims=True)
T = encoder.attn_temperature or 1.0
exp = tf.exp(e / T) * mask
weights = exp / tf.reduce_sum(exp, axis=-1, keep_dims=True)
weighted_average = tf.reduce_sum(tf.expand_dims(weights, 2) * hidden_states, axis=1)
return weighted_average, weights
def no_attention(state, hidden_states, *args, **kwargs):
batch_size = tf.shape(state)[0]
weighted_average = tf.zeros(shape=tf.stack([batch_size, 0]))
weights = tf.zeros(shape=[batch_size, tf.shape(hidden_states)[1]])
| tensorflow.reduce_max | 11,780 |
import tensorflow as tf
else:
d_checkpoints[r] += dr
def _unsparsify(x):
if not isinstance(x, tf.IndexedSlices):
return x
assert x.dense_shape is not None, "memory_saving_gradients encountered sparse gradients of unknown shape"
indices = x.indices
while indices.shape.ndims < x.values.shape.ndims:
indices = tf.expand_dims(indices, -1)
return tf.scatter_nd(indices, x.values, x.dense_shape)
# partial derivatives to xs (usually the params of the neural net)
d_xs_new = dv[len(checkpoints_other):]
for j in range(len(xs)):
if d_xs_new[j] is not None:
if d_xs[j] is None:
d_xs[j] = _unsparsify(d_xs_new[j])
else:
| tensorflow.scatter_nd | 11,781 |
import tensorflow as tf
tf.gfile.MakeDirs(output_dir)
summary_writer = tf.summary.FileWriter(output_dir)
| tensorflow.summary.FileWriter | 11,782 |
import tensorflow as tf
reward = np.reshape(reward, ((batch_size, trace_length)))
reward_buffer = np.zeros(((batch_size, trace_length+1)))
reward_buffer[:, :trace_length] = reward
discounted_reward = np.zeros(((batch_size, trace_length)))
for t in range(trace_length-1, -1, -1):
reward_buffer[:,t+1:] *= y
discounted_reward[:,t] = np.sum(reward_buffer[:,t:],1)
return np.reshape(discounted_reward,(batch_size *trace_length))
def make_cube(trace_length):
cube = tf.Variable(tf.zeros([trace_length, trace_length, trace_length]))
cube_ops = []
for i in range(trace_length):
cube_ops.append(cube[i, :(i+1), :(i+1)].assign(tf.ones([i+1, i+1])))
return cube, cube_ops
| tensorflow.zeros | 11,783 |
import tensorflow as tf
head_etd = tf.expand_dims(head, 2) # bs,slh,1,vec
logits = scaled_tanh(dependent_etd + head_etd + f_bias, 5.0) # bs,slh,sld,vec
logits_masked = exp_mask_for_high_rank(logits, attn_mask) # bs,slh,sld,vec
attn_score = tf.nn.softmax(logits_masked, 2) # bs,slh,sld,vec
attn_score = mask_for_high_rank(attn_score, attn_mask)
attn_result = tf.reduce_sum(attn_score * rep_map_tile, 2) # bs,slh,vec -> head_org_idx
return attn_result
| tensorflow.nn.softmax | 11,784 |
import tensorflow as tf
logging.warning(
'Input layer does not contain zero weights, so apply CQAT instead.')
centroids_mask = None
centroids, lookup = get_unique(weights)
num_centroids = tf.size(centroids)
if self.preserve_sparsity:
sparsity_mask = tf.math.divide_no_nan(weights, weights)
| tensorflow.size | 11,785 |
import tensorflow as tf
time_step_spec=environment.time_step_spec(),
action_spec=environment.action_spec(),
reward_network=network,
optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=LR),
epsilon=EPSILON)
elif FLAGS.agent == 'Mix':
| tensorflow.compat.v1.train.AdamOptimizer | 11,786 |
import tensorflow as tf
def get_keypoint(image, targets, predictions, heatmap_size, height, width, category, clip_at_zero=True, data_format='channels_last', name=None):
predictions = tf.reshape(predictions, [1, -1, heatmap_size*heatmap_size])
| tensorflow.reshape | 11,787 |
import tensorflow as tf
replaced_list = var_list
if self._scale != 1.0:
loss = tf.scalar_mul(self._scale, loss)
gradvar = self._optimizer.compute_gradients(loss, replaced_list, *args, **kwargs)
final_gradvar = []
for orig_var, (grad, var) in zip(var_list, gradvar):
if var is not orig_var:
grad = tf.cast(grad, orig_var.dtype)
if self._scale != 1.0:
grad = tf.scalar_mul(1. / self._scale, grad)
final_gradvar.append((grad, orig_var))
return final_gradvar
def apply_gradients(self, *args, **kwargs):
return self._optimizer.apply_gradients(*args, **kwargs)
def main(argv=None):
start1 = time.time()
import os
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu_list
| tensorflow.scalar_mul | 11,788 |
import tensorflow as tf
x = tf.layers.dense(x, size, activation=tf.nn.relu)
mean = tf.layers.dense(
x, action_space.shape[0], activation=tf.tanh,
kernel_initializer=mean_weights_initializer)
logstd = tf.get_variable(
"logstd", mean.shape[2:], tf.float32, logstd_initializer)
logstd = tf.tile(
logstd[None, None],
[tf.shape(mean)[0], tf.shape(mean)[1]] + [1] * (mean.shape.ndims - 2))
with tf.variable_scope("value"):
x = flat_observations
for size in config.value_layers:
x = tf.layers.dense(x, size, activation=tf.nn.relu)
value = tf.layers.dense(x, 1)[..., 0]
mean = tf.check_numerics(mean, "mean")
logstd = tf.check_numerics(logstd, "logstd")
| tensorflow.shape | 11,789 |
import tensorflow as tf
"""
logits, losses = self(features) # pylint: disable=not-callable
if self.hparams.sampling_method == "argmax":
samples = tf.argmax(logits, axis=-1)
else:
assert self.hparams.sampling_method == "random"
def multinomial_squeeze(logits, temperature=1.0):
logits_shape = common_layers.shape_list(logits)
reshaped_logits = (
tf.reshape(logits, [-1, logits_shape[-1]]) / temperature)
choices = tf.multinomial(reshaped_logits, 1)
choices = tf.reshape(choices, logits_shape[:-1])
return choices
samples = multinomial_squeeze(logits, self.hparams.sampling_temp)
return samples, logits, losses
def _shard_features(self, features): # pylint: disable=missing-docstring
sharded_features = dict()
for k, v in six.iteritems(features):
v = tf.convert_to_tensor(v)
v_shape = common_layers.shape_list(v)
| tensorflow.reshape | 11,790 |
import tensorflow as tf
scales_to_logits = outputs_to_scales_to_logits[output]
logits = scales_to_logits[MERGED_LOGITS_SCOPE]
# There are two ways to obtain the final prediction results: (1) bilinear
# upsampling the logits followed by argmax, or (2) argmax followed by
# nearest neighbor upsampling. The second option may introduce the "blocking
# effect" but is computationally efficient.
if model_options.prediction_with_upsampled_logits:
logits = _resize_bilinear(logits,
#tf.shape(images)[1:3],
tf.TensorShape([512,512]),
scales_to_logits[MERGED_LOGITS_SCOPE].dtype)
predictions[output] = tf.argmax(logits, 3, output_type=tf.dtypes.int32)
#predictions[output + PROB_SUFFIX] = tf.nn.softmax(logits)
else:
argmax_results = tf.argmax(logits, 3, output_type=tf.dtypes.int32)
argmax_results = tf.image.resize_nearest_neighbor(
tf.expand_dims(argmax_results, 3),
tf.shape(images)[1:3],
align_corners=True,
name='resize_prediction')
predictions[output] = tf.squeeze(argmax_results, 3)
#predictions[output + PROB_SUFFIX] = tf.image.resize_bilinear(
| tensorflow.argmax | 11,791 |
import tensorflow as tf
def validation_mapper(byte):
image = tf.image.decode_jpeg(
tf.reshape(byte, shape=[]), 3, **JPEG_OPT)
image = resize_shortest_edge(image, tf.shape(image), 256)
image = center_crop(image, 224)
image = tf.reverse(image, axis=[2]) # to BGR
return image
| tensorflow.shape | 11,792 |
import tensorflow as tf
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
| tensorflow.train.start_queue_runners | 11,793 |
import tensorflow as tf
self.assertEqual(np.int64(15), v.eval())
def testSomeErrors(self):
with tf.Graph().as_default():
v0 = tf.Variable([10.0], name="v0")
v1 = tf.Variable([20.0], name="v1")
v2 = tf.Variable([20.0], name="v2")
v2._set_save_slice_info(tf.Variable.SaveSliceInfo("v1", [1], [0], [1]))
# By default the name used for "v2" will be "v1" and raise an error.
with self.assertRaisesRegexp(ValueError, "same name: v1"):
tf.train.Saver([v0, v1, v2])
| tensorflow.Variable | 11,794 |
import tensorflow as tf
labels = host_labels
with tf.device(self.devices[device_num]):
# Rescale to [0, 1)
images *= 1. / 256
# Rescale to [-1,1] instead of [0, 1)
images = tf.subtract(images, 0.5)
images = tf.multiply(images, 2.0)
if self.data_format == 'NCHW':
images = tf.transpose(images, [0, 3, 1, 2])
if input_data_type != data_type:
images = tf.cast(images, data_type)
network = ConvNetBuilder(
images, input_nchan, phase_train, self.data_format, data_type)
self.model_conf.add_inference(network)
# Add the final fully-connected class layer
logits = network.affine(nclass, activation='linear')
if not phase_train:
top_1_op = tf.reduce_sum(
tf.cast(tf.nn.in_top_k(logits, labels, 1), data_type))
top_5_op = tf.reduce_sum(
tf.cast(tf.nn.in_top_k(logits, labels, 5), data_type))
return (logits, top_1_op, top_5_op)
| tensorflow.cast | 11,795 |
import tensorflow as tf
validation_input_files.extend(tf.gfile.Glob(input_pattern))
if FLAGS.validation_input_dir is not None:
for filename in tf.gfile.ListDirectory(FLAGS.validation_input_dir):
validation_input_files.extend(tf.gfile.Glob(os.path.join(FLAGS.validation_input_dir, filename)))
tf.logging.info("*** Input Validation Files ***")
for input_file in validation_input_files:
tf.logging.info(" %s" % input_file)
config = tf.ConfigProto()
if FLAGS.xla:
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
if FLAGS.use_hvd:
config.gpu_options.visible_device_list = str(hvd.local_rank())
config.gpu_options.allow_growth=True
run_config = tf.estimator.RunConfig(
model_dir=FLAGS.output_dir,
| tensorflow.ConfigProto | 11,796 |
import tensorflow as tf
if time_major:
# (T,B,D) => (B,T,D)
facts = tf.array_ops.transpose(facts, [1, 0, 2])
# Trainable parameters
facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer
querry_size = query.get_shape().as_list()[-1]
query = tf.layers.dense(query, facts_size, activation=None, name='f1' + stag)
query = prelu(query)
queries = tf.tile(query, [1, tf.shape(facts)[1]])
queries = tf.reshape(queries, tf.shape(facts))
din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)
d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag)
d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag)
d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag)
d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]])
scores = d_layer_3_all
# Mask
if mask is not None:
# key_masks = tf.sequence_mask(facts_length, tf.shape(facts)[1]) # [B, T]
key_masks = tf.expand_dims(mask, 1) # [B, 1, T]
paddings = tf.ones_like(scores) * (-2 ** 32 + 1)
if not forCnn:
scores = tf.where(key_masks, scores, paddings) # [B, 1, T]
| tensorflow.layers.dense | 11,797 |
import tensorflow as tf
# `sloppy` mode means that the interleaving is not exact. This adds
# even more randomness to the training pipeline.
d = d.apply(
tf.contrib.data.parallel_interleave(
tf.data.TFRecordDataset,
sloppy=is_training,
cycle_length=cycle_length))
d = d.shuffle(buffer_size=100)
else:
d = tf.data.TFRecordDataset(input_files)
# Since we evaluate for a fixed number of steps we don't want to encounter
# out-of-range exceptions.
# d = d.repeat()
# We must `drop_remainder` on training because the TPU requires fixed
# size dimensions. For eval, we assume we are evaluating on the CPU or GPU
# and we *don't* want to drop the remainder, otherwise we wont cover
# every sample.
d = d.apply(
| tensorflow.data.TFRecordDataset | 11,798 |
import tensorflow as tf
self.X = tf.placeholder(tf.int32, (None, None))
self.training = tf.placeholder(tf.bool, None)
| tensorflow.placeholder | 11,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.