seed
stringlengths 25
2.89k
| seed_api
stringlengths 14
102
| index
int64 0
14.8k
|
---|---|---|
import tensorflow as tf
initializer=tf.constant_initializer(b_init))
b_soft_no_learn = np.array(
[0.25, 0.25] + [-0.25] * (self.num_branches - 2), dtype=np.float32)
b_soft_no_learn = np.reshape(b_soft_no_learn, [1, self.num_branches])
self.b_soft_no_learn = tf.constant(b_soft_no_learn, dtype=tf.float32)
with tf.variable_scope("attention"):
self.w_attn_1 = tf.get_variable("w_1", [self.lstm_size, self.lstm_size])
self.w_attn_2 = tf.get_variable("w_2", [self.lstm_size, self.lstm_size])
self.v_attn = tf.get_variable("v", [self.lstm_size, 1])
def _build_sampler(self, prev_c=None, prev_h=None, use_bias=False):
"""Build the sampler ops and the log_prob ops."""
print ("-" * 80)
| tensorflow.get_variable | 12,400 |
import tensorflow as tf
num_steps=2,
).prefetch(3)
self._iterator = iter(dataset)
experience, unused_info = next(self._iterator)
if self._relabel_type in ["soft", "random"]:
experience = self._soft_relabel(experience)
elif self._relabel_type in ["last", "future"]:
# Reassign the next_states to have the same goal as the current states
_, tasks = self._task_distribution.split(experience.observation[:, 0])
next_states, _ = self._task_distribution.split(experience.observation[:,
1])
next_states_and_tasks = self._task_distribution.combine(
next_states, tasks)
new_observation = tf.concat(
[
experience.observation[:, 0][:, None], next_states_and_tasks[:,
None]
],
axis=1,
)
assert new_observation.shape == experience.observation.shape
experience = experience.replace(observation=new_observation)
if self._relabel_type is not None:
# Recompute rewards and done flags
states, tasks = self._task_distribution.split(experience.observation[:,
0])
| tensorflow.concat | 12,401 |
import tensorflow as tf
uniques:
([1.0, 2.0, 3.0])
output final index:
([[0, 1],
[1, 2],
[2, 2],
[0, 1]]
)
"""
t_flatten = tf.reshape(t, shape=(-1,))
uniques, index = tf.unique(t_flatten)
return uniques, tf.reshape(index, shape=tf.shape(t))
class _ClusterPreserveInfo(object):
"""ClusterPreserveInfo."""
def __init__(self, weight_attrs, quantize_config_attrs):
| tensorflow.reshape | 12,402 |
import tensorflow as tf
def f(a):
return a
f(tf.constant([1]))
# Intentionally using tf.Session() instead of self.test_session() to have
# control over closing the session. test_session() is a cached session.
with tf.Session():
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord)
time.sleep(_SLEEP_TIME)
coord.request_stop() # Calls close operation.
coord.join()
# Session closed.
| tensorflow.Session | 12,403 |
import tensorflow as tf
logits) = classifier.classifier(model_config,
model.get_pooled_output(),
num_labels,
label_ids,
dropout_prob)
label_loss = tf.reduce_sum(per_example_loss * features["label_ratio"]) / (1e-10+tf.reduce_sum(features["label_ratio"]))
tf.get_variable_scope().reuse_variables()
(tgt_loss,
tgt_per_example_loss,
| tensorflow.reduce_sum | 12,404 |
import tensorflow as tf
use_xavier: bool, whether to use xavier initializer
Returns:
Variable Tensor
"""
if use_xavier:
initializer = tf.contrib.layers.xavier_initializer()
var = _variable_on_cpu(name, shape, initializer)
else:
# initializer = tf.truncated_normal_initializer(stddev=stddev)
with tf.device('/cpu:0'):
var = tf.truncated_normal(shape, stddev=np.sqrt(2 / shape[-1]))
var = tf.round(var * tf.constant(1000, dtype=tf.float32)) / tf.constant(1000, dtype=tf.float32)
var = tf.Variable(var, name='weights')
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
| tensorflow.device | 12,405 |
import tensorflow as tf
# saved model restoring
if args.restore:
# Restore saved model if the user requested it, default = True
try:
checkpoint_state = tf.train.get_checkpoint_state(save_dir)
if checkpoint_state and checkpoint_state.model_checkpoint_path:
log("Loading checkpoint {}".format(checkpoint_state.model_checkpoint_path),
slack=True)
| tensorflow.train.get_checkpoint_state | 12,406 |
import tensorflow as tf
initializer=tf.random_normal_initializer(stddev=stddev))
# print("w", w.get_shape())
try:
deconv = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape,
strides=[1, d_h, d_w, 1])
| tensorflow.nn.conv2d_transpose | 12,407 |
import tensorflow as tf
class StackedBilstmCrfModel(Model):
@classmethod
def default_params(cls):
default_params = {
'stacked_layers': 2
}
return default_params
def bilstm_layer(self, embeddings, nwords):
t = tf.transpose(embeddings, perm=[1, 0, 2])
lstm_cell_fw = tf.contrib.rnn.LSTMBlockFusedCell(self.params['lstm_size'])
lstm_cell_bw = tf.contrib.rnn.LSTMBlockFusedCell(self.params['lstm_size'])
lstm_cell_bw = tf.contrib.rnn.TimeReversedFusedRNN(lstm_cell_bw)
output_fw, _ = lstm_cell_fw(t, dtype=tf.float32,
sequence_length=nwords)
output_bw, _ = lstm_cell_bw(t, dtype=tf.float32,
sequence_length=nwords)
output = tf.concat([output_fw, output_bw], axis=-1)
# transpose it back
output = tf.transpose(output, perm=[1, 0, 2])
return output
def call(self, embeddings, nwords):
| tensorflow.contrib.rnn.LSTMBlockFusedCell | 12,408 |
import tensorflow as tf
l[:, :, :, :, 2 * nr_mix:3 * nr_mix]) * sel, 4)
# sample from logistic & clip to interval
# we don't actually round to the nearest 8bit value when sampling
u = tf.random_uniform(tf.shape(means), minval=1e-5, maxval=1. - 1e-5)
x = means + tf.exp(log_scales) * (tf.log(u) - tf.log(1. - u))
x0 = tf.minimum(tf.maximum(x[:, :, :, 0], -1.), 1.)
| tensorflow.shape | 12,409 |
import tensorflow as tf
init_fw, init_bw = self.inits[layer]
mask_fw, mask_bw = self.dropout_mask[layer]
with tf.variable_scope("fw_{}".format(layer)):
out_fw, _ = tf.nn.dynamic_rnn(
gru_fw, outputs[-1] * mask_fw, seq_len, initial_state=init_fw, dtype=tf.float32)
with tf.variable_scope("bw_{}".format(layer)):
inputs_bw = tf.reverse_sequence(
outputs[-1] * mask_bw, seq_lengths=seq_len, seq_dim=1, batch_dim=0)
out_bw, _ = tf.nn.dynamic_rnn(
gru_bw, inputs_bw, seq_len, initial_state=init_bw, dtype=tf.float32)
out_bw = tf.reverse_sequence(
out_bw, seq_lengths=seq_len, seq_dim=1, batch_dim=0)
outputs.append(tf.concat([out_fw, out_bw], axis=2))
if concat_layers:
res = tf.concat(outputs[1:], axis=2)
else:
res = outputs[-1]
return res
class ptr_net:
def __init__(self, batch, hidden, keep_prob=1.0, is_train=None, scope="ptr_net"):
self.gru = tf.contrib.rnn.GRUCell(hidden)
self.batch = batch
| tensorflow.concat | 12,410 |
import tensorflow as tf
self.end_label = tf.placeholder(tf.int32, [None], "answer_label2")
else:
self.c = tf.placeholder(tf.int32, [self.config.batch_size * self.max_p_num, self.config.max_p_len],
"context")
self.q = tf.placeholder(tf.int32, [self.config.batch_size * self.max_p_num, self.config.max_q_len],
"question")
self.ch = tf.placeholder(tf.int32, [self.config.batch_size * self.max_p_num, self.config.max_p_len,
self.config.max_ch_len], "context_char")
self.qh = tf.placeholder(tf.int32, [self.config.batch_size * self.max_p_num, self.config.max_q_len,
self.config.max_ch_len], "question_char")
self.start_label = tf.placeholder(tf.int32, [self.config.batch_size], "answer_label1")
self.end_label = tf.placeholder(tf.int32, [self.config.batch_size], "answer_label2")
| tensorflow.placeholder | 12,411 |
import tensorflow as tf
self.actions = tf.placeholder(tf.float32, [None, self.a_dim], 'action')
self.state = tf.placeholder(tf.float32, [None, self.s_dim[0]], 'state')
self.advantage = tf.placeholder(tf.float32, [None, 1], 'advantage')
self.rewards = tf.placeholder(tf.float32, [None, 1], 'rewards')
self.keep_prob = tf.placeholder(tf.float32, name='dropout_keep_prob')
# Dateset with experiennce replay
self.dataset = tf.data.Dataset.from_tensor_slices({'state': self.state, 'actions': self.actions,
'rewards': self.rewards, 'advantage': self.advantage})
self.dataset = self.dataset.batch(self.MINIBATCH, drop_remainder=True)
self.data_iter = self.dataset.make_initializable_iterator()
batch = self.data_iter.get_next()
# Call ppo net
| tensorflow.data.Dataset.from_tensor_slices | 12,412 |
import tensorflow as tf
x_blend_np = sess.run(x_blend)
x_blend_expected_np = 0.8 * sess.run(
layers.upscale(layers.downscale(x, 2), 2)) + 0.2 * x_np
self.assertNDArrayNear(x_blend_np, x_blend_expected_np, 1.0e-6)
def test_num_filters(self):
self.assertEqual(networks.num_filters(1, 4096, 1, 256), 256)
self.assertEqual(networks.num_filters(5, 4096, 1, 256), 128)
def test_generator_grad_norm_progress(self):
stable_stage_num_images = 2
transition_stage_num_images = 3
current_image_id_ph = tf.placeholder(tf.int32, [])
progress = networks.compute_progress(
current_image_id_ph,
stable_stage_num_images,
transition_stage_num_images,
num_blocks=3)
z = tf.random_normal([2, 10], dtype=tf.float32)
x, _ = networks.generator(
z, progress, _num_filters_stub,
networks.ResolutionSchedule(
start_resolutions=(4, 4), scale_base=2, num_resolutions=3))
fake_loss = tf.reduce_sum(tf.square(x))
grad_norms = [
| tensorflow.placeholder | 12,413 |
import tensorflow as tf
# In the demo, we are doing a simple classification task on the entire
# segment.
#
# If you want to use the token-level output, use model_bak.get_sequence_output()
# instead.
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
| tensorflow.zeros_initializer | 12,414 |
import tensorflow as tf
Args:
xs: 4-D `tensor` [batch_size, height, width, channels], input
Returns:
a `float` decov loss
"""
with tf.name_scope(name):
x = tf.reshape(xs, [int(xs.get_shape()[0]), -1])
m = tf.reduce_mean(x, 0, True)
z = tf.expand_dims(x - m, 2)
corr = tf.reduce_mean(tf.matmul(z, tf.transpose(z, perm=[0, 2, 1])), 0)
corr_frob_sqr = tf.reduce_sum(tf.square(corr))
corr_diag_sqr = tf.reduce_sum(tf.square(tf.diag_part(corr)))
loss = 0.5 * (corr_frob_sqr - corr_diag_sqr)
return loss
| tensorflow.reduce_mean | 12,415 |
import tensorflow as tf
else:
raise ValueError('Number of scales must stay constant or decrease, got {}'.format(FLAGS.pm))
out = tf.nn.max_pool3d(bottom, ksize=[1,kernel_size,1,1,1], strides=[1,1,1,1,1], padding='VALID')
shape = out.get_shape()
print('scale{}'.format(l + 1))
print('\t{} --> {}'.format(bottom.name, out.name))
print('\t{} --> {}'.format(bottom.get_shape(), out.get_shape()))
with tf.variable_scope('fully_connected'):
bottom = out
bottom_shape = bottom.get_shape().as_list()
reshape = tf.reshape(
bottom,
[-1, bottom_shape[1] * bottom_shape[2] * bottom_shape[3] * bottom_shape[4]])
W_fc1 = weight_variable([bottom_shape[1] * bottom_shape[2] * bottom_shape[3] * bottom_shape[4], NUM_CLASSES()])
| tensorflow.variable_scope | 12,416 |
import tensorflow as tf
pos = beam_search.resize_like(pos, symbol)
max_pos = beam_search.resize_like(max_pos, symbol)
pos += tf.to_float(is_not_ins)
if max_pos is not None:
pos = tf.minimum(pos, tf.to_float(max_pos))
return pos
def generate(state, input_, context):
if decoder.pred_use_lstm_state is False: # for back-compatibility
| tensorflow.to_float | 12,417 |
import tensorflow as tf
"""Decorator to capture ops created in the block.
with capture_ops() as ops:
# create some ops
print(ops) # => prints ops created.
"""
micros = int(time.time()*10**6)
scope_name = str(micros)
op_list = []
with tf.name_scope(scope_name):
yield op_list
g = tf.get_default_graph()
op_list.extend(ge.select_ops(scope_name+"/.*", graph=g))
def _to_op(tensor_or_op):
if hasattr(tensor_or_op, "op"):
return tensor_or_op.op
| tensorflow.name_scope | 12,418 |
import tensorflow as tf
network = resnet_model.imagenet_resnet_v2(
resnet_size=18, num_classes=class_num, mode='se', data_format=None)
inputs= network(inputs=inputs, is_training=training)
feat = tf.nn.l2_normalize(inputs, 1, 1e-10, name='feat')
inputs = tf.layers.dense(inputs=inputs, units=class_num)
# inputs = tf.layers.dense(inputs=feat, units=class_num)
inputs = tf.identity(inputs, 'final_dense')
return inputs, feat
# image_size = 32, img_channels = 3, class_num = 10 in cifar10
x = tf.placeholder(tf.float32, shape=[None, image_size, image_size, img_channels])
label = tf.placeholder(tf.float32, shape=[None,])
one_hot_labels = tf.one_hot(indices=tf.cast(label, tf.int32), depth=class_num)
training_flag = tf.placeholder(tf.bool)
learning_rate = tf.placeholder(tf.float32, name='learning_rate')
logits, feat = resnet_model_fn(x, training=training_flag)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_labels, logits=logits))
Focal_loss = tf.reduce_mean(focal_loss(one_hot_labels, logits, alpha=0.5))
l2_loss = weight_decay * tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables()])
Center_loss, Centers = center_loss(feat, tf.cast(label, dtype=tf.int32), 0.95, class_num)
Total_loss = cost + l2_loss
optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=momentum, use_nesterov=True)
# Batch norm requires update_ops to be added as a train_op dependency.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
| tensorflow.placeholder | 12,419 |
import tensorflow as tf
# Book keeping
step = 0
time_window = ValueWindow(100)
loss_window = ValueWindow(100)
saver = tf.train.Saver(max_to_keep=5)
log("Tacotron training set to a maximum of {} steps".format(args.tacotron_train_steps))
# Memory allocation on the GPU as needed
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
# Train
with tf.Session(config=config) as sess:
try:
summary_writer = tf.summary.FileWriter(tensorboard_dir, sess.graph)
sess.run(tf.global_variables_initializer())
# saved model restoring
if args.restore:
# Restore saved model if the user requested it, default = True
try:
checkpoint_state = tf.train.get_checkpoint_state(save_dir)
if checkpoint_state and checkpoint_state.model_checkpoint_path:
log("Loading checkpoint {}".format(checkpoint_state.model_checkpoint_path),
slack=True)
| tensorflow.Session | 12,420 |
import tensorflow as tf
simple_value=before_loss),
tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_after_loss",
simple_value=after_loss),
tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/stop_token_loss",
simple_value=stop_token_loss),
tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_loss", simple_value=loss),
| tensorflow.Summary.Value | 12,421 |
import tensorflow as tf
height, width = 224, 224
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Conv2d_13_pointwise']
feed_dict = {inputs: input_np}
tf.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
def testGlobalPoolUnknownImageShape(self):
tf.reset_default_graph()
batch_size = 1
height, width = 250, 300
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
| tensorflow.global_variables_initializer | 12,422 |
import tensorflow as tf
return blk_indices_crop
def _strides_one():
# Calculate otuput indices when strides = 1.
return blk_indices[:, :q_shape[1], :q_shape[2], :]
strides_gt_one = tf.logical_or(tf.greater(strides[1], 1), tf.greater(strides[2], 1))
blk_indices_crop = tf.cond(strides_gt_one, _strides_gt_one, _strides_one)
y = tf.scatter_nd(blk_indices_crop, q, out_shape)
return y
return tf.cond(
tf.equal(tf.size(blk_indices_), 0), lambda: tf.zeros(out_shape, dtype=x.dtype),
_conv_nonzero)
# returns an int64 start timer handle that should be passed to cuda_timer_end_op
def cuda_timer_start_op():
return sbnet_module.cuda_timer_start()
# returns a float
def cuda_timer_end_op(start_timer):
return sbnet_module.cuda_timer_end(start_timer)
| tensorflow.zeros | 12,423 |
import tensorflow as tf
return x
def lstm():
'''
Build LSTM cell
'''
pass
def loss(logits, labels):
'''
Compute loss
'''
with tf.name_scope('loss') as scope:
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels, name='cross-entropy')
loss = tf.reduce_mean(cross_entropy, name='loss')
tf.summary.scalar(scope+'/loss', loss)
return loss
def accuracy(logits, labels):
'''
Evaluate the quality of the logits at predicting the label
'''
# for summary
with tf.name_scope('accuracy') as scope:
| tensorflow.nn.softmax_cross_entropy_with_logits | 12,424 |
import tensorflow as tf
output_dims = get_deconv2d_output_dims(input_dims,
filter_dims,
stride_dims,
padding)
with tf.variable_scope(scope):
deconv_weight = tf.Variable(
tf.random_normal([filter_h, filter_w, num_channels_out, num_channels_in], stddev=0.1, dtype=tf.float32))
deconv_bias = tf.Variable(tf.zeros([num_channels_out], dtype=tf.float32))
map = tf.nn.conv2d_transpose(input_data, deconv_weight, output_dims, strides=[1, stride_h, stride_w, 1],
padding=padding)
map = tf.nn.bias_add(map, deconv_bias)
activation = non_linear_fn(map)
| tensorflow.zeros | 12,425 |
import tensorflow as tf
LTA = tf.matmul(L, A_tiled, transpose_a=True) # R x M x N
else: # pragma: no cover
raise ValueError("Bad dimension for q_sqrt: %s" %
str(q_sqrt.get_shape().ndims))
if full_cov:
fvar = fvar + tf.matmul(LTA, LTA, transpose_a=True) # R x N x N
else:
fvar = fvar + tf.reduce_sum(tf.square(LTA), 1) # R x N
if not full_cov:
fvar = tf.transpose(fvar) # N x R
return fmean, fvar # N x R, R x N x N or N x R
| tensorflow.square | 12,426 |
import tensorflow as tf
def correlation(x, y):
x = x - tf.reduce_mean(x, axis=-1, keepdims=True)
y = y - tf.reduce_mean(y, axis=-1, keepdims=True)
x = tf.nn.l2_normalize(x, -1)
y = tf.nn.l2_normalize(y, -1)
return -tf.reduce_sum(x*y, axis=-1) # higher the better
def kd(x, y):
x_prob = tf.nn.softmax(x)
print(x_prob.get_shape(), y.get_shape(), tf.reduce_sum(x_prob * y, axis=-1).get_shape())
return -tf.reduce_sum(x_prob * y, axis=-1) # higher the better
def mse(x, y):
x = x - tf.reduce_mean(x, axis=-1, keepdims=True)
y = y - tf.reduce_mean(y, axis=-1, keepdims=True)
return tf.reduce_sum((x-y)**2, axis=-1) # lower the better
def kd_distance(x, y, dist_type):
| tensorflow.reduce_sum | 12,427 |
import tensorflow as tf
# we don't actually round to the nearest 8bit value when sampling
u = tf.random_uniform(tf.shape(means), minval=1e-5, maxval=1. - 1e-5)
x = means + tf.exp(log_scales) * (tf.log(u) - tf.log(1. - u))
x0 = tf.minimum(tf.maximum(x[:, :, :, 0], -1.), 1.)
x1 = tf.minimum(tf.maximum(
x[:, :, :, 1] + coeffs[:, :, :, 0] * x0, -1.), 1.)
x2 = tf.minimum(tf.maximum(
x[:, :, :, 2] + coeffs[:, :, :, 1] * x0 + coeffs[:, :, :, 2] * x1, -1.), 1.)
| tensorflow.maximum | 12,428 |
import tensorflow as tf
true_positives_lower_bound(labels, logits, weights, surrogate_type),
'false_positives_upper_bound':
false_positives_upper_bound(labels, logits, weights, surrogate_type)
}
return loss, other_outputs
def precision_at_recall_loss(labels,
logits,
target_recall,
weights=1.0,
dual_rate_factor=0.1,
label_priors=None,
surrogate_type='xent',
lambdas_initializer=tf.constant_initializer(1.0),
reuse=None,
variables_collections=None,
trainable=True,
scope=None):
"""Computes precision at recall loss.
The loss is based on a surrogate of the form
wt * loss(-) + lambdas * (pi * (b - 1) + wt * loss(+))
where:
- loss(-) is the cross-entropy loss on the negative examples
- loss(+) is the cross-entropy loss on the positive examples
- wt is a scalar or tensor of per-example weights
- b is the target recall
- pi is the label_priors.
The per-example weights change not only the coefficients of individual
| tensorflow.constant_initializer | 12,429 |
import tensorflow as tf
inp = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)]
out = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)]
| tensorflow.placeholder | 12,430 |
import tensorflow as tf
global_step=self.global_step)
ranker_updates = opt_ranker.apply_gradients(zip(ranking_model_gradients, ranking_model_params))
self.updates = tf.group(denoise_updates, ranker_updates)
def DenoisingNet(self, list_size, forward_only=False, scope=None):
with tf.variable_scope(scope or "denoising_model"):
# If we are in testing, do not compute propensity
if forward_only:
return tf.ones_like(self.output)#, tf.ones_like(self.output)
input_vec_size = list_size*4
def propensity_network(input_data, index):
reuse = None if index < 1 else True
propensity_initializer = tf.constant_initializer(0.001) if self.hparams.constant_propensity_initialization else None
with tf.variable_scope("propensity_network", initializer=propensity_initializer,
reuse=reuse):
output_data = input_data
current_size = input_vec_size
output_sizes = [
int((list_size+1)/2) + 1,
int((list_size+1)/4) + 1,
1
]
for i in range(len(output_sizes)):
expand_W = tf.get_variable("W_%d" % i, [current_size, output_sizes[i]])
expand_b = tf.get_variable("b_%d" % i, [output_sizes[i]])
output_data = tf.nn.bias_add(tf.matmul(output_data, expand_W), expand_b)
| tensorflow.constant_initializer | 12,431 |
import tensorflow as tf
# Build the processing and model for the worker.
with tf.device(self.cpu_device):
nclass, images_splits, labels_splits = add_image_preprocessing(
self.dataset, input_nchan, image_size, self.batch_size,
len(self.devices), input_data_type, self.resize_method,
not FLAGS.eval)
update_ops = None
staging_delta_ops = []
for device_num in range(len(self.devices)):
with self.variable_mgr.create_outer_variable_scope(
device_num), tf.name_scope('tower_%i' % device_num) as name_scope:
results = self.add_forward_pass_and_gradients(
images_splits[device_num], labels_splits[device_num], nclass,
phase_train, device_num, input_data_type, data_type, input_nchan,
use_synthetic_gpu_images, gpu_copy_stage_ops, gpu_compute_stage_ops,
gpu_grad_stage_ops)
if phase_train:
losses.append(results[0])
device_grads.append(results[1])
else:
all_logits.append(results[0])
all_top_1_ops.append(results[1])
| tensorflow.name_scope | 12,432 |
import tensorflow as tf
print("episodes %d" % len(episode_rewards))
print("exploration %f" % exploration.value(t))
print("learning_rate %f" % optimizer_spec.lr_schedule.value(t))
mean_rew_summ = tf.Summary(value=[tf.Summary.Value(tag='mean_rew',simple_value=mean_episode_reward)])
best_mean_rew_summ = tf.Summary(value=[tf.Summary.Value(tag='best_mean_rew',simple_value=best_mean_episode_reward)])
writer.add_summary(mean_rew_summ, global_step=t)
| tensorflow.Summary.Value | 12,433 |
import tensorflow as tf
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
| tensorflow.nn.sigmoid | 12,434 |
import tensorflow as tf
}
_EVAL_FEATURE_MAP = {
movielens.USER_COLUMN: tf.FixedLenFeature([], dtype=tf.string),
movielens.ITEM_COLUMN: tf.FixedLenFeature([], dtype=tf.string),
rconst.DUPLICATE_MASK: tf.FixedLenFeature([], dtype=tf.string)
}
class DatasetManager(object):
"""Helper class for handling TensorFlow specific data tasks.
| tensorflow.FixedLenFeature | 12,435 |
import tensorflow as tf
num_classes = self._hparams.num_classes
is_binary = num_classes == 1
is_binary = is_binary or (num_classes <= 0 and logits.shape[1] == 1)
if is_binary:
pred = tf.greater(logits, 0)
logits = tf.reshape(logits, [-1])
else:
pred = tf.argmax(logits, 1)
pred = tf.cast(tf.reshape(pred, [-1]), tf.int64)
| tensorflow.greater | 12,436 |
import tensorflow as tf
z_t = 1 / z_t
d_t = 1 / z_t
x_t /= z_t
y_t /= z_t
x_t_flat = tf.reshape(x_t, (1, -1))
y_t_flat = tf.reshape(y_t, (1, -1))
d_t_flat = tf.reshape(d_t, (1, -1))
ones = tf.ones_like(x_t_flat)
grid = tf.concat([d_t_flat, y_t_flat, x_t_flat, ones], 0)
return grid
def _transform(theta, input_dim, out_size, z_near, z_far):
with tf.variable_scope('_transform'):
num_batch = input_dim.get_shape().as_list()[0]
num_channels = input_dim.get_shape().as_list()[4]
theta = tf.reshape(theta, (-1, 4, 4))
theta = tf.cast(theta, 'float32')
out_depth = out_size[0]
| tensorflow.concat | 12,437 |
import tensorflow.contrib.slim as slim
with tf.variable_scope(tf.get_variable_scope()):
for i in range(num_gpu):
with tf.device('/gpu:%d' % i):
with tf.name_scope('tower_%d' % i):
with slim.arg_scope(
[slim.model_variable, slim.variable],
device='/device:CPU:0'):
with slim.arg_scope([slim.conv2d, slim.conv2d_in_plane,
slim.conv2d_transpose, slim.separable_conv2d,
| tensorflow.contrib.slim.arg_scope | 12,438 |
import tensorflow as tf
with config.unlocked:
config.logdir = logdir
message = 'Start a new run and write summaries and checkpoints to {}.'
tf.logging.info(message.format(config.logdir))
tf.gfile.MakeDirs(config.logdir)
config_path = os.path.join(config.logdir, 'config.yaml')
with tf.gfile.GFile(config_path, 'w') as file_:
yaml.dump(
config, file_, yaml.Dumper,
allow_unicode=True,
default_flow_style=False)
else:
| tensorflow.gfile.GFile | 12,439 |
import tensorflow as tf
else:
initializer = tf.random_normal_initializer(stddev=weight_scale)
with tf.device('/cpu:0'):
embedding = get_variable('embedding_{}'.format(decoder.name), shape=embedding_shape, initializer=initializer)
input_shape = tf.shape(decoder_inputs)
batch_size = input_shape[0]
time_steps = input_shape[1]
scope_name = 'decoder_{}'.format(decoder.name)
scope_name += '/' + '_'.join(encoder.name for encoder in encoders)
| tensorflow.shape | 12,440 |
import tensorflow as tf
if encoder.conv_lstm_size:
cell = BasicConvLSTMCell([feature_size, channels], encoder.conv_lstm_size, 1)
encoder_inputs_, _ = tf.nn.bidirectional_dynamic_rnn(
cell, cell, encoder_inputs_,
dtype=tf.float32
| tensorflow.nn.bidirectional_dynamic_rnn | 12,441 |
import tensorflow as tf
follows:[x, y, z, length, width, height, yaw].
box2: Input tensor with shape [B, 7] where the inner dimensions are as
follows:[x, y, z, length, width, height, yaw].
Returns:
The IoU between the two bounding boxes.
"""
box1 = box1.numpy() if isinstance(box1, tf.Tensor) else box1
box2 = box2.numpy() if isinstance(box2, tf.Tensor) else box2
box1 = box1.astype(np.float32)
box2 = box2.astype(np.float32)
# rotates around z, while we rotate around y so need to swap
center_1 = tf.reshape(box1[0:3][[0, 2, 1]], [1, 3])
center_2 = tf.reshape(box2[0:3][[0, 2, 1]], [1, 3])
rotation_z_1 = tf.reshape(box1[-1], [1])
rotation_z_2 = tf.reshape(box2[-1], [1])
length_1 = tf.reshape(box1[3 + 0], [1])
height_1 = tf.reshape(box1[3 + 2], [1])
width_1 = tf.reshape(box1[3 + 1], [1])
length_2 = tf.reshape(box2[3 + 0], [1])
height_2 = tf.reshape(box2[3 + 2], [1])
width_2 = tf.reshape(box2[3 + 1], [1])
iou = np.squeeze(np_box_ops.iou3d_7dof_box(
length_1, height_1, width_1, center_1, rotation_z_1,
| tensorflow.reshape | 12,442 |
import tensorflow as tf
os.mkdir(weights_dir + '/best_models')
# Create a saver.
saver = tf.train.Saver(max_to_keep=None)
if self.is_summary:
training_batch_summary_op = tf.merge_all_summaries(key=TRAINING_BATCH_SUMMARIES)
training_epoch_summary_op = tf.merge_all_summaries(key=TRAINING_EPOCH_SUMMARIES)
validation_batch_summary_op = tf.merge_all_summaries(key=VALIDATION_BATCH_SUMMARIES)
validation_epoch_summary_op = tf.merge_all_summaries(key=VALIDATION_EPOCH_SUMMARIES)
# Build an initialization operation to run below.
init = tf.global_variables_initializer()
| tensorflow.merge_all_summaries | 12,443 |
import tensorflow as tf
clf_h = tf.reshape(h, [-1, n_embd])
pool_idx = tf.cast(tf.argmax(tf.cast(tf.equal(X[:, :, 0], clf_token), tf.float32), 1), tf.int32)
clf_h = tf.gather(clf_h, tf.range(shape_list(X)[0], dtype=tf.int32)*n_ctx+pool_idx)
clf_h = tf.reshape(clf_h, [-1, 2, n_embd])
if train and clf_pdrop > 0:
shape = shape_list(clf_h)
shape[1] = 1
clf_h = tf.nn.dropout(clf_h, 1-clf_pdrop, shape)
clf_h = tf.reshape(clf_h, [-1, n_embd])
clf_logits = clf(clf_h, 1, train=train)
clf_logits = tf.reshape(clf_logits, [-1, 2])
clf_losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=clf_logits, labels=Y)
return clf_logits, clf_losses, lm_losses
def mgpu_train(*xs):
gpu_ops = []
| tensorflow.reshape | 12,444 |
import tensorflow as tf
config = get_config()
eval_config = get_config()
eval_config.batch_size = 1
eval_config.num_steps = 1
with tf.Graph().as_default():
initializer = tf.random_uniform_initializer(-config.init_scale,
config.init_scale)
with tf.name_scope("Train"):
train_input = PTBInput(config=config, data=train_data, name="TrainInput")
with tf.variable_scope("Model", reuse=None, initializer=initializer):
m = PTBModel(is_training=True, config=config, input_=train_input)
tf.summary.scalar("Training Loss", m.cost)
tf.summary.scalar("Learning Rate", m.lr)
with tf.name_scope("Valid"):
valid_input = PTBInput(config=config, data=valid_data, name="ValidInput")
with tf.variable_scope("Model", reuse=True, initializer=initializer):
mvalid = PTBModel(is_training=False, config=config, input_=valid_input)
tf.summary.scalar("Validation Loss", mvalid.cost)
with tf.name_scope("Test"):
test_input = PTBInput(
config=eval_config, data=test_data, name="TestInput")
with tf.variable_scope("Model", reuse=True, initializer=initializer):
mtest = PTBModel(is_training=False, config=eval_config,
input_=test_input)
| tensorflow.summary.scalar | 12,445 |
import tensorflow as tf
import tensorflow as tf
import numpy as np
try:
import cPickle
except:
import _pickle as cPickle
def relu(x, name, alpha):
if alpha > 0:
return tf.maximum(alpha * x, x, name=name)
else:
return tf.nn.relu(x, name=name)
def get_variable(name, shape, dtype, initializer, trainable=True, regularizer=None):
with tf.device('/cpu:0'):
var = tf.get_variable(name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer, trainable=trainable,
collections=[tf.GraphKeys.WEIGHTS, tf.GraphKeys.GLOBAL_VARIABLES])
| tensorflow.maximum | 12,446 |
import tensorflow as tf
z = tf.expand_dims(z, 0)
loc, scale = hyper_decoder(z)
return tf.squeeze(loc, [0]), tf.squeeze(scale, [0])
locs, scales = tf.map_fn(loop_hyper_deocder, z_hats, dtype=(tf.float32, tf.float32),
parallel_iterations=1, back_prop=False)
lower_bound = 1e-9# TODO
scales = tf.maximum(scales, lower_bound)
print("Hyper Decoder")
z_strings, z_min_v, z_max_v = entropy_bottleneck.compress(zs)
z_shape = tf.shape(zs)[:]
print("Entropy Encode (Hyper)")
| tensorflow.maximum | 12,447 |
import tensorflow as tf
return tf.matmul(hidden, w)
def build_loss(self):
cutoff_vf_manager = tf.reshape(tf.stop_gradient(self.manager_vf), [-1])
dot = tf.reduce_sum(tf.multiply(self.s_diff, self.g), axis=1)
gcut = tf.stop_gradient(self.g)
mag = tf.norm(self.s_diff, axis=1) * tf.norm(gcut, axis=1) + .0001
dcos = dot / mag
manager_loss = -tf.reduce_sum((self.r - cutoff_vf_manager) * dcos)
| tensorflow.stop_gradient | 12,448 |
import tensorflow as tf
loss: A `Tensor` of the same shape as `logits` with the component-wise loss.
other_outputs: An empty dictionary, for consistency.
Raises:
ValueError: If `surrogate_type` is not `xent` or `hinge`.
"""
with tf.name_scope(scope, 'roc_auc', [labels, logits, weights]):
# Convert inputs to tensors and standardize dtypes.
labels, logits, weights, original_shape = _prepare_labels_logits_weights(labels, logits, weights)
# Create tensors of pairwise differences for logits and labels, and
# pairwise products of weights. These have shape
# [batch_size, batch_size, num_labels].
logits_difference = tf.expand_dims(logits, 0) - tf.expand_dims(logits, 1)
labels_difference = tf.expand_dims(labels, 0) - tf.expand_dims(labels, 1)
weights_product = tf.expand_dims(weights, 0) * tf.expand_dims(weights, 1)
signed_logits_difference = labels_difference * logits_difference
raw_loss = losses_utils.weighted_surrogate_loss(
labels=tf.ones_like(signed_logits_difference),
logits=signed_logits_difference,
surrogate_type=surrogate_type)
weighted_loss = weights_product * raw_loss
# Zero out entries of the loss where labels_difference zero (so loss is only
| tensorflow.expand_dims | 12,449 |
import tensorflow as tf
# Re-Initialize from the checkpoint so that you will have the latest models up.
tf.train.init_from_checkpoint(ckpt_dir,
{'main_level/agent/online/network_0/': 'main_level/agent/online/network_0'})
tf.train.init_from_checkpoint(ckpt_dir,
{'main_level/agent/online/network_1/': 'main_level/agent/online/network_1'})
# Create a new session with a new tf graph.
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
sess.run(tf.global_variables_initializer()) # initialize the checkpoint.
# This is the node that will accept the input.
input_nodes = tf.get_default_graph().get_tensor_by_name('main_level/agent/main/online/' + \
'network_0/observation/observation:0')
# This is the node that will produce the output.
output_nodes = tf.get_default_graph().get_operation_by_name('main_level/agent/main/online/' + \
'network_1/ppo_head_0/policy')
# Save the model as a servable model.
tf.saved_model.simple_save(session=sess,
export_dir='model',
inputs={"observation": input_nodes},
outputs={"policy": output_nodes.outputs[0]})
# Move to the appropriate folder. Don't mind the directory, this just works.
# rl-cart-pole is the name of the model. Remember it.
shutil.move('model/', model_dir + '/model/tf-model/00000001/')
# EASE will pick it up and upload to the right path.
print("Success")
def _save_onnx_model(self):
| tensorflow.get_default_graph | 12,450 |
import tensorflow as tf
if FLAGS.write_to_disk:
image_write_ops = tf.write_file(
'%s/%s'% (FLAGS.eval_dir, 'conditional_gan.png'),
tf.image.encode_png(data_provider.float_image_to_uint8(
reshaped_img[0])))
# For unit testing, use `run_eval_loop=False`.
if not run_eval_loop: return
tf.contrib.training.evaluate_repeatedly(
FLAGS.checkpoint_dir,
hooks=[tf.contrib.training.SummaryAtEndHook(FLAGS.eval_dir),
tf.contrib.training.StopAfterNEvalsHook(1)],
eval_ops=image_write_ops,
max_number_of_evaluations=FLAGS.max_number_of_evaluations)
def _get_generator_inputs(num_images_per_class, num_classes, noise_dims):
# Since we want a grid of numbers for the conditional generator, manually
# construct the desired class labels.
num_images_generated = num_images_per_class * num_classes
noise = tf.random_normal([num_images_generated, noise_dims])
labels = [lbl for lbl in range(num_classes) for _
| tensorflow.contrib.training.StopAfterNEvalsHook | 12,451 |
import tensorflow as tf
self.compute_shape(l2_shape[3], self.ff_pool_strides[1][2]),
final_dim]
else:
l2_shape = tf.identity(x_shape)
# Initialize hidden layer activities
| tensorflow.identity | 12,452 |
import tensorflow as tf
self.resolution])
inter = tf.transpose(tf.reduce_max(inter, axis=a))
im = axs[fig_obj_count, mtype * 2 + 1].matshow(inter.numpy())
plt.colorbar(im, ax=axs[fig_obj_count, mtype * 2 + 1])
print(mtype, fig_obj_count, 1)
if mtype == 1:
values = sdf_values
inter = tf.reshape(values, [self.resolution,
self.resolution,
self.resolution])
inter = tf.transpose(tf.reduce_max(inter, axis=a))
im = axs[fig_obj_count, 4].matshow(inter.numpy())
plt.colorbar(im, ax=axs[fig_obj_count, 4])
print(mtype, fig_obj_count, 2)
fig_obj_count += 1
| tensorflow.reshape | 12,453 |
import tensorflow as tf
self.yp1 = tf.argmax(tf.reduce_max(outer, axis=2), axis=1)
self.yp2 = tf.argmax(tf.reduce_max(outer, axis=1), axis=1)
def _compute_loss(self):
def focal_loss(logits, labels, weights=None, alpha=0.25, gamma=2):
logits = tf.nn.sigmoid(logits)
zeros = array_ops.zeros_like(logits, dtype=logits.dtype)
pos_p_sub = array_ops.where(labels > zeros, labels - logits, zeros)
neg_p_sub = array_ops.where(labels > zeros, zeros, logits)
cross_ent = - alpha * (pos_p_sub ** gamma) * tf.log(tf.clip_by_value(logits, 1e-8, 1.0)) \
- (1 - alpha) * (neg_p_sub ** gamma) * tf.log(tf.clip_by_value(1.0 - logits, 1e-8, 1.0))
return tf.reduce_sum(cross_ent, 1)
start_label = tf.one_hot(self.start_label, tf.shape(self.logits1)[1], axis=1)
end_label = tf.one_hot(self.end_label, tf.shape(self.logits2)[1], axis=1)
if self.config.loss_type == 'cross_entropy':
start_loss = tf.nn.softmax_cross_entropy_with_logits(
logits=self.logits1, labels=start_label)
end_loss = tf.nn.softmax_cross_entropy_with_logits(
logits=self.logits2, labels=end_label)
self.loss = tf.reduce_mean(start_loss + end_loss)
else:
start_loss = focal_loss(tf.nn.softmax(self.logits1, -1), start_label)
end_loss = focal_loss(tf.nn.softmax(self.logits2, -1), end_label)
self.loss = tf.reduce_mean(start_loss + end_loss)
self.logger.info("loss type %s" % self.config.loss_type)
| tensorflow.shape | 12,454 |
import tensorflow as tf
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
def file_based_input_fn_builder(input_file, seq_length, is_training,
drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
| tensorflow.FixedLenFeature | 12,455 |
import tensorflow as tf
meta_graph_def = slice_saver.export_meta_graph(filename)
with tf.Graph().as_default():
# Restores from MetaGraphDef.
new_saver = tf.train.import_meta_graph(filename)
# Generates a new MetaGraphDef.
new_meta_graph_def = new_saver.export_meta_graph()
# It should be the same as the original.
self.assertProtoEquals(meta_graph_def, new_meta_graph_def)
def _testGraphExtensionSave(self):
test_dir = self._TestDir("graph_extension")
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
with self.test_session(graph=tf.Graph()) as sess:
# Creates an inference graph.
# Hidden 1
images = tf.constant(1.2, tf.float32, shape=[100, 28])
with tf.name_scope("hidden1"):
weights = tf.Variable(
tf.truncated_normal([28, 128],
stddev=1.0 / math.sqrt(float(28))),
name="weights")
biases = tf.Variable(tf.zeros([128]),
name="biases")
hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases)
# Hidden 2
with tf.name_scope("hidden2"):
| tensorflow.Graph | 12,456 |
import tensorflow as tf
with tf.variable_scope('soft_replacement'):
self.target_replace_op = [tf.assign(t, e) for t, e in zip(t_params, e_params)]
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
train_log_dir = 'logs/' + current_time
self.summary_writer = tf.summary.FileWriter(train_log_dir, self.sess.graph)
def _build_net(self): # we use parameter sharing among agents
with tf.variable_scope(self.name):
# ------------------ all inputs ------------------------
self.S = tf.placeholder(tf.float32, [None, self.num_global_s], name='S') # input Global State
self.s = tf.placeholder(tf.float32, [None, self.num_s], name='s1') # input state for agent1
self.S_ = tf.placeholder(tf.float32, [None, self.num_global_s], name='S_') # input Next Global State
self.s_ = tf.placeholder(tf.float32, [None, self.num_s], name='s1_') # input next state for agent1
self.R = tf.placeholder(tf.float32, [None, ], name='R') # input Reward
self.a = tf.placeholder(tf.float32, [None, self.num_a], name='a') # input Action onehot for agent1
self.done = tf.placeholder(tf.float32, [None, ], name='done') # input Done info ???
self.q_m_ = tf.placeholder(tf.float32, [None, ], name='q_value_next_max')
| tensorflow.variable_scope | 12,457 |
from tensorflow.contrib import tpu as contrib_tpu
# Compute accuracy
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions,
weights=is_real_example)
loss = tf.metrics.mean(
values=per_example_loss,
weights=is_real_example)
return {"matthew_corr": (mcc, tf.group(tp_op, tn_op, fp_op, fn_op)),
"eval_accuracy": accuracy, "eval_loss": loss,}
eval_metrics = (metric_fn,
[per_example_loss, label_ids, logits, is_real_example])
output_spec = contrib_tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
output_spec = contrib_tpu.TPUEstimatorSpec(
mode=mode,
predictions={
"probabilities": probabilities,
"predictions": predictions
},
scaffold_fn=scaffold_fn)
return output_spec
| tensorflow.contrib.tpu.TPUEstimatorSpec | 12,458 |
import tensorflow as tf
if i == num_gpu - 1:
regularization_losses = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES)
# weight_decay_loss = tf.add_n(slim.losses.get_regularization_losses())
total_losses = total_losses + tf.add_n(regularization_losses)
tf.get_variable_scope().reuse_variables()
grads = optimizer.compute_gradients(total_losses)
if cfgs.GRADIENT_CLIPPING_BY_NORM is not None:
grads = slim.learning.clip_gradient_norms(grads, cfgs.GRADIENT_CLIPPING_BY_NORM)
tower_grads.append(grads)
self.log_printer(fcos, optimizer, global_step, tower_grads, total_loss_dict, num_gpu*cfgs.BATCH_SIZE, graph)
| tensorflow.get_variable_scope | 12,459 |
import tensorflow as tf
rf,
stride,
pad='VALID',
init_scale=1.0,
data_format='NHWC',
one_dim_bias=False):
channel_ax = 3
strides = [1, stride, stride, 1]
bshape = [1, 1, 1, nf]
bias_var_shape = [nf] if one_dim_bias else [1, nf, 1, 1]
nin = x.get_shape()[channel_ax].value
wshape = [rf, rf, nin, nf]
with tf.variable_scope(scope):
w = tf.get_variable(
"w", wshape, initializer=self.ortho_init(init_scale))
b = tf.get_variable(
"b",
bias_var_shape,
initializer=tf.constant_initializer(0.0))
if not one_dim_bias and data_format == 'NHWC':
b = tf.reshape(b, bshape)
return tf.nn.conv2d(
x,
w,
| tensorflow.variable_scope | 12,460 |
import tensorflow as tf
input_eval = tf.expand_dims(input_eval, 0)
predictions, hidden = model(input_eval, hidden)
predicted_id = tf.argmax(predictions[-1]).numpy()
start_string += " " + idx2word[predicted_id]
out_string += " " + idx2word[predicted_id]
| tensorflow.argmax | 12,461 |
import tensorflow as tf
# We do not allow the loss to become negative.
cost = tf.where(cost > 0, cost, 0, name='value')
return cost
| tensorflow.where | 12,462 |
import tensorflow as tf
pred = tf.argmax(logits, 1)
pred = tf.cast(tf.reshape(pred, [-1]), tf.int64)
| tensorflow.reshape | 12,463 |
import tensorflow as tf
utils.add_gradient_summary(grad, var)
return optimizer.apply_gradients(grads)
def main(argv=None):
keep_probability = tf.placeholder(tf.float32, name="keep_probabilty")
image = tf.placeholder(tf.float32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3], name="input_image")
#debug
annotation = tf.placeholder(tf.int32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 1], name="annotation")
# annotation = tf.placeholder(tf.int32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3], name="annotation")
| tensorflow.placeholder | 12,464 |
import tensorflow as tf
minval=0,
maxval=self.config.n_classes,
dtype=tf.int32)
global_step = tf.Variable(0., trainable=False)
model = revnet.RevNet(config=config)
_, saved_hidden = model(x)
| tensorflow.Variable | 12,465 |
import tensorflow as tf
model_io_fn = model_io.ModelIO(model_io_config)
tvars = model_io_fn.get_params(model_config.scope,
not_storage_params=not_storage_params)
print(tvars)
if load_pretrained == "yes":
model_io_fn.load_pretrained(tvars,
init_checkpoint,
exclude_scope=exclude_scope)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer_fn = optimizer.Optimizer(opt_config)
model_io_fn.print_params(tvars, string=", trainable params")
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer_fn.get_train_op(loss, tvars,
opt_config.init_lr,
opt_config.num_train_steps,
**kargs)
model_io_fn.set_saver()
if kargs.get("task_index", 1) == 0 and kargs.get("run_config", None):
training_hooks = []
elif kargs.get("task_index", 1) == 0:
model_io_fn.get_hooks(kargs.get("checkpoint_dir", None),
kargs.get("num_storage_steps", 1000))
| tensorflow.get_collection | 12,466 |
import tensorflow as tf
def evaluate(defun=False):
model = mnist.create_model(data_format())
dataset = random_dataset()
if defun:
model.call = tfe.defun(model.call)
with tf.device(device()):
mnist_eager.test(model, dataset)
class MNISTTest(tf.test.TestCase):
"""Run tests for MNIST eager loop."""
def setUp(self):
if not keras_utils.is_v2_0():
tf.compat.v1.enable_v2_behavior()
super(MNISTTest, self).setUp()
def test_train(self):
train(defun=False)
def test_evaluate(self):
evaluate(defun=False)
def test_train_with_defun(self):
train(defun=True)
def test_evaluate_with_defun(self):
evaluate(defun=True)
| tensorflow.compat.v1.enable_v2_behavior | 12,467 |
import tensorflow as tf
def export_params(output_dir, name, params):
if not tf.gfile.Exists(output_dir):
tf.gfile.MkDir(output_dir)
| tensorflow.gfile.MkDir | 12,468 |
import tensorflow as tf
dtype=tf.float32)
self.assertAllClose(ious.numpy(), expected_ious.numpy())
def test_instance_non_maximum_suppression_1d_scores(self):
mask0 = tf.constant([[1, 0],
[0, 1]], dtype=tf.float32)
mask1 = tf.constant([[1, 1],
[0, 1]], dtype=tf.float32)
| tensorflow.constant | 12,469 |
import tensorflow as tf
broadcast_mean = tf.reshape(mean, target_shape)
broadcast_var = tf.reshape(var, target_shape)
broadcast_gamma = tf.reshape(gamma, target_shape)
broadcast_beta = tf.reshape(beta, target_shape)
normed = tf.nn.batch_normalization(x, broadcast_mean, broadcast_var,
broadcast_beta, broadcast_gamma, epsilon)
| tensorflow.reshape | 12,470 |
from tensorflow.python.ops import math_ops
@distribution_util.AppendDocstring(_poisson_sample_note)
def _log_cdf(self, x):
return math_ops.log(self.cdf(x))
@distribution_util.AppendDocstring(_poisson_sample_note)
def _cdf(self, x):
x = self._assert_valid_sample(x, check_integer=False)
return math_ops.igammac(math_ops.floor(x + 1), self.rate)
def _log_normalization(self):
return self.rate
def _log_unnormalized_prob(self, x):
x = self._assert_valid_sample(x, check_integer=True)
return x * math_ops.log(self.rate) - math_ops.lgamma(x + 1)
def _mean(self):
return array_ops.identity(self.rate)
def _variance(self):
return array_ops.identity(self.rate)
@distribution_util.AppendDocstring(
"""Note: when `rate` is an integer, there are actually two modes: `rate`
and `rate - 1`. In this case we return the larger, i.e., `rate`.""")
def _mode(self):
return math_ops.floor(self.rate)
| tensorflow.python.ops.math_ops.log | 12,471 |
import tensorflow as tf
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
| tensorflow.contrib.tpu.TPUEstimator | 12,472 |
from tensorflow.python.framework import ops
b_is_sparse: If `True`, `b` is treated as a sparse matrix.
name: Name for the operation (optional).
Returns:
A `Tensor` of the same type as `a`.
"""
with ops.op_scope([a, b], name, "MatMul") as name:
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
if a.dtype == types.float32 and (a_is_sparse or b_is_sparse):
return sparse_matmul(a, b,
transpose_a=transpose_a,
transpose_b=transpose_b,
a_is_sparse=a_is_sparse,
b_is_sparse=b_is_sparse,
name=name)
| tensorflow.python.framework.ops.convert_to_tensor | 12,473 |
import tensorflow as tf
b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))
gc = tf.get_variable("gc", [nh], initializer=tf.constant_initializer(1.0))
bc = tf.get_variable("bc", [nh], initializer=tf.constant_initializer(0.0))
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
for idx, (x, m) in enumerate(zip(xs, ms)):
c = c*(1-m)
h = h*(1-m)
z = _ln(tf.matmul(x, wx), gx, bx) + _ln(tf.matmul(h, wh), gh, bh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f*c + i*u
h = o*tf.tanh(_ln(c, gc, bc))
xs[idx] = h
s = tf.concat(axis=1, values=[c, h])
return xs, s
def conv_to_fc(x):
nh = np.prod([v.value for v in x.get_shape()[1:]])
x = tf.reshape(x, [-1, nh])
return x
def discount_with_dones(rewards, dones, gamma):
discounted = []
r = 0
| tensorflow.tanh | 12,474 |
import tensorflow as tf
chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)
update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))
_act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph],
outputs=output_actions,
givens={update_eps_ph: -1.0, stochastic_ph: True},
| tensorflow.cond | 12,475 |
import tensorflow as tf
'parameter_server, replicated, distributed_replicated, independent'))
tf.flags.DEFINE_boolean(
'use_nccl', True,
'Whether to use nccl all-reduce primitives where possible')
# Distributed training flags.
tf.flags.DEFINE_string('job_name', '',
'One of "ps", "worker", "". Empty for local training')
tf.flags.DEFINE_string('ps_hosts', '', 'Comma-separated list of target hosts')
tf.flags.DEFINE_string('worker_hosts', '',
'Comma-separated list of target hosts')
tf.flags.DEFINE_integer('task_index', 0, 'Index of task within the job')
tf.flags.DEFINE_string('server_protocol', 'grpc', 'protocol for servers')
tf.flags.DEFINE_boolean('cross_replica_sync', True, '')
# Summary and Save & load checkpoints.
| tensorflow.flags.DEFINE_string | 12,476 |
import tensorflow as tf
with tf.name_scope(clone.scope):
clone_loss, clone_grad = _optimize_clone(
optimizer, clone, num_clones, regularization_losses, **kwargs)
if clone_loss is not None:
clones_losses.append(clone_loss)
grads_and_vars.append(clone_grad)
# Only use regularization_losses for the first clone
regularization_losses = None
# Compute the total_loss summing all the clones_losses.
total_loss = tf.add_n(clones_losses, name='total_loss')
# Sum the gradients accross clones.
grads_and_vars = _sum_clones_gradients(grads_and_vars)
return total_loss, grads_and_vars
def deploy(config,
model_fn,
args=None,
| tensorflow.add_n | 12,477 |
import tensorflow as tf
print(pred_Y)
# Loss,train_step,evaluation
l2 = config.lambda_loss_amount * \
sum(tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables())
# Softmax loss and L2
cost = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(pred_Y, Y)) + l2
train_step = tf.train.AdamOptimizer(
learning_rate=config.learning_rate).minimize(cost)
correct_prediction = tf.equal(tf.argmax(pred_Y, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, dtype=tf.float32))
# --------------------------------------------
# step4: Hooray, now train the neural network
| tensorflow.train.AdamOptimizer | 12,478 |
import tensorflow as tf
def loss_function(logits, labels):
# global cross_entropy # HACK TESTING
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels, name='xentropy')
loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
return loss
| tensorflow.reduce_mean | 12,479 |
import tensorflow as tf
bh = tf.get_variable("bh", [nh*4], initializer=tf.constant_initializer(0.0))
b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))
gc = tf.get_variable("gc", [nh], initializer=tf.constant_initializer(1.0))
bc = tf.get_variable("bc", [nh], initializer=tf.constant_initializer(0.0))
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
for idx, (x, m) in enumerate(zip(xs, ms)):
c = c*(1-m)
h = h*(1-m)
z = _ln(tf.matmul(x, wx), gx, bx) + _ln(tf.matmul(h, wh), gh, bh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f*c + i*u
h = o*tf.tanh(_ln(c, gc, bc))
xs[idx] = h
s = tf.concat(axis=1, values=[c, h])
return xs, s
| tensorflow.matmul | 12,480 |
import tensorflow as tf
TensorFlow works with the (y_min, x_min, y_max, x_max) order while we work
with the (x_min, y_min, x_max, y_min).
While both encoding options have its advantages and disadvantages we
decided to use the (x_min, y_min, x_max, y_min), forcing use to switch to
TensorFlow's every time we want to use a std function that handles bounding
boxes.
Args:
bboxes: A Tensor of shape (total_bboxes, 4)
Returns:
bboxes: A Tensor of shape (total_bboxes, 4) with the order swaped.
"""
with tf.name_scope('BoundingBoxTransform/change_order'):
first_min, second_min, first_max, second_max = tf.unstack(
bboxes, axis=1
)
bboxes = tf.stack(
[second_min, first_min, second_max, first_max], axis=1
)
return bboxes
if __name__ == '__main__':
import numpy as np
bboxes = tf.placeholder(tf.float32)
bboxes_val = [[10, 10, 20, 22]]
| tensorflow.unstack | 12,481 |
import tensorflow as tf
| tensorflow.shape | 12,482 |
import tensorflow as tf
Returns:
loss: Loss tensor of type float.
"""
with tf.name_scope('segment_loss'):
# logits = tf.reshape(logits, (-1, num_classes))
epsilon = tf.constant(value=1e-7)
labels = tf.to_float(labels)
# labels = tf.to_float(tf.reshape(labels, (-1, num_classes)))
softmax = tf.nn.softmax(logits) + epsilon
| tensorflow.constant | 12,483 |
import tensorflow as tf
def generate_seq2seq_mask(attention_mask, mask_sequence, seq_type, **kargs):
if seq_type == 'seq2seq':
if mask_sequence is not None:
seq_shape = get_shape_list(mask_sequence, expected_rank=2)
seq_len = seq_shape[1]
ones = tf.ones((1, seq_len, seq_len))
a_mask = tf.matrix_band_part(ones, -1, 0)
s_ex12 = tf.expand_dims(tf.expand_dims(mask_sequence, 1), 2)
s_ex13 = tf.expand_dims(tf.expand_dims(mask_sequence, 1), 3)
a_mask = (1 - s_ex13) * (1 - s_ex12) + s_ex13 * a_mask
# generate mask of batch x seq_len x seq_len
a_mask = tf.reshape(a_mask, (-1, seq_len, seq_len))
out_mask = attention_mask * a_mask
else:
ones = tf.ones_like(attention_mask[:1])
mask = (tf.matrix_band_part(ones, -1, 0))
| tensorflow.expand_dims | 12,484 |
import tensorflow as tf
mvn = tfd.MultivariateNormalDiag([[mu]], [[sigma]], validate_args=True)
self.assertFalse(tf.contrib.util.constant_value(mvn.is_scalar_event()))
self.assertFalse(tf.contrib.util.constant_value(mvn.is_scalar_batch()))
# We now test every codepath within the underlying is_scalar_helper
# function.
# Test case 1, 2.
x = tf.placeholder_with_default(input=1, shape=[])
# None would fire an exception were it actually executed.
self.assertTrue(normal._is_scalar_helper(x.shape, lambda: None))
self.assertTrue(
normal._is_scalar_helper(tf.TensorShape(None), lambda: tf.shape(x)))
x = tf.placeholder_with_default(input=[1], shape=[1])
# None would fire an exception were it actually executed.
self.assertFalse(normal._is_scalar_helper(x.shape, lambda: None))
self.assertFalse(
normal._is_scalar_helper(tf.TensorShape(None), lambda: tf.shape(x)))
# There's no notion of partially known shapes in eager mode, so exit
# early.
if tf.executing_eagerly():
return
# Test case 3.
x = tf.placeholder_with_default(input=1, shape=None)
is_scalar = normal._is_scalar_helper(x.shape, lambda: tf.shape(x))
| tensorflow.placeholder_with_default | 12,485 |
import tensorflow as tf
[inputs_shape[0], inputs_shape[1], inputs_shape[2], 1, nr_mix]), m2, m3
],
axis=3)
centered_inputs = inputs - means
inv_stdv = tf.exp(-log_scales)
plus_in = inv_stdv * (centered_inputs + 1. / 255.)
cdf_plus = tf.nn.sigmoid(plus_in)
min_in = inv_stdv * (centered_inputs - 1. / 255.)
cdf_min = tf.nn.sigmoid(min_in)
log_cdf_plus = plus_in - tf.nn.softplus(plus_in)
log_one_minus_cdf_min = -tf.nn.softplus(min_in)
cdf_delta = cdf_plus - cdf_min
mid_in = inv_stdv * centered_inputs
log_pdf_mid = mid_in - log_scales - 2. * tf.nn.softplus(mid_in)
log_probs = tf.select(
inputs < -0.999, log_cdf_plus,
tf.select(
inputs > 0.999, log_one_minus_cdf_min,
tf.select(cdf_delta > 1e-5, tf.log(tf.maximum(cdf_delta, 1e-12)),
log_pdf_mid - np.log(127.5))))
| tensorflow.nn.softplus | 12,486 |
from tensorflow.python.platform import gfile
self.assertTrue(gfile.Exists(s3))
# Create a second helper, identical to the first.
save2 = tf.train.Saver(saver_def=save.as_saver_def())
save2.set_last_checkpoints(save.last_checkpoints)
# Create a third helper, with the same configuration but no knowledge of
# previous checkpoints.
save3 = tf.train.Saver(saver_def=save.as_saver_def())
# Exercise the first helper.
# Adding s2 again (old s2 is removed first, then new s2 appended)
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s3, s2], save.last_checkpoints)
self.assertFalse(gfile.Exists(s1))
self.assertFalse(gfile.Exists(save._MetaGraphFilename(s1)))
self.assertTrue(gfile.Exists(s3))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s3)))
self.assertTrue(gfile.Exists(s2))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))
# Adding s1 (s3 should now be deleted as oldest in list)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save.last_checkpoints)
self.assertFalse(gfile.Exists(s3))
self.assertFalse(gfile.Exists(save._MetaGraphFilename(s3)))
self.assertTrue(gfile.Exists(s2))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))
self.assertTrue(gfile.Exists(s1))
| tensorflow.python.platform.gfile.Exists | 12,487 |
import tensorflow as tf
if (rightmost_transposed_ndims is None) == (perm is None):
raise ValueError('Must specify exactly one of '
'`rightmost_transposed_ndims` and `perm`.')
if rightmost_transposed_ndims is not None:
rightmost_transposed_ndims = tf.convert_to_tensor(
value=rightmost_transposed_ndims,
dtype=np.int32,
name='rightmost_transposed_ndims')
rightmost_transposed_ndims_ = tf.get_static_value(
rightmost_transposed_ndims)
with tf.control_dependencies(_maybe_validate_rightmost_transposed_ndims(
rightmost_transposed_ndims, validate_args)):
rightmost_transposed_ndims = tf.identity(rightmost_transposed_ndims)
perm = tf.range(
start=rightmost_transposed_ndims - 1,
limit=-1,
| tensorflow.get_static_value | 12,488 |
import tensorflow as tf
@layer
def embedding_layer(tensor, vocab_size=None, embedding_dim=None, embedding_matrix=None, **opts):
if embedding_matrix is None:
initializer = tf.contrib.layers.xavier_initializer(uniform=True)
embedding_matrix = tf.get_variable("embedding_matrix", initializer=initializer(shape=(vocab_size, embedding_dim)))
out = tf.nn.embedding_lookup(embedding_matrix, tensor)
return out
@layer
def recurrent_layer(tensor, cell=None, hidden_dims=128, sequence_length=None, decoder_fn=None,
activation=tf.nn.tanh, initializer=tf.orthogonal_initializer(), initial_state=None,
keep_prob=1.0,
return_final_state=False, return_next_cell_input=True, **opts):
if cell is None:
cell = tf.contrib.rnn.BasicRNNCell(hidden_dims, activation=activation)
# cell = tf.contrib.rnn.LSTMCell(hidden_dims, activation=activation)
if keep_prob < 1.0:
keep_prob = _global_keep_prob(keep_prob)
cell = tf.contrib.rnn.DropoutWrapper(cell, keep_prob, keep_prob)
if opts.get("name"):
tf.add_to_collection(opts.get("name"), cell)
| tensorflow.orthogonal_initializer | 12,489 |
import tensorflow as tf
tf.set_random_seed(random_seed) #make reproducible results
input_size_x += input_size_y
"""Define the graph inputs"""
batch_size = tf.placeholder(tf.int32, [], name='batch_size')
x = tf.placeholder(tf.float32, [None, num_steps, input_size_x], name='x')
y = tf.placeholder(tf.float32, [None, num_steps, input_size_y], name='y')
input_prob = tf.placeholder(tf.float32, name='input_prob')
state_prob = tf.placeholder(tf.float32,name='state_prob')
output_prob = tf.placeholder(tf.float32,name='output_prob')
rnn_inputs = x
"""Define a single cell with variational dropout"""
def get_a_cell(state_size,input_prob,state_prob,num_input):
if cell_type == 'LSTM':
| tensorflow.placeholder | 12,490 |
import tensorflow as tf
self.seq_lens = tf.placeholder(tf.int64, [batch_size], name='seq_lens')
self.x = tf.placeholder(tf.int32, [batch_size, max_sequence_len],
name='x')
| tensorflow.placeholder | 12,491 |
from tensorflow.python.ops import parsing_ops
image = image_ops.resize_bilinear(image, [height, width])
return array_ops.squeeze(image, [0])
def _create_tfrecord_dataset(tmpdir):
if not gfile.Exists(tmpdir):
gfile.MakeDirs(tmpdir)
data_sources = test_utils.create_tfrecord_files(tmpdir, num_files=1)
keys_to_features = {
'image/encoded':
parsing_ops.FixedLenFeature(
shape=(), dtype=dtypes.string, default_value=''),
'image/format':
parsing_ops.FixedLenFeature(
shape=(), dtype=dtypes.string, default_value='jpeg'),
'image/class/label':
parsing_ops.FixedLenFeature(
shape=[1],
dtype=dtypes.int64,
default_value=array_ops.zeros(
[1], dtype=dtypes.int64))
}
items_to_handlers = {
'image': tfexample_decoder.Image(),
'label': tfexample_decoder.Tensor('image/class/label'),
}
| tensorflow.python.ops.parsing_ops.FixedLenFeature | 12,492 |
import tensorflow as tf
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual(2, len(res[0]))
self.assertEqual((2, 2), res[0][0].c.shape)
self.assertEqual((2, 2), res[0][0].h.shape)
self.assertEqual((2, 2), res[0][1].c.shape)
self.assertEqual((2, 2), res[0][1].h.shape)
# pylint: disable=unused-variable,invalid-name
def testDynamicAttentionDecoderStateIsTuple(self):
with self.test_session() as sess:
with tf.variable_scope(
"root", initializer=tf.constant_initializer(0.5)):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
cell = tf.nn.rnn_cell.MultiRNNCell(cells=[cell] * 2,
state_is_tuple=True)
inp = tf.constant(0.5, shape=[2, 2, 2])
enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size])
for e in enc_outputs])
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
dec, mem = tf.nn.seq2seq.attention_decoder(
dec_inp, enc_state,
attn_states, cell, output_size=4)
sess.run([tf.global_variables_initializer()])
res = sess.run(dec)
| tensorflow.constant_initializer | 12,493 |
from tensorflow.python.ops import state_ops
predictions_idx=predictions_idx, labels=labels, class_id=class_id,
weights=weights)
batch_total_fn = math_ops.to_double(math_ops.reduce_sum(fn))
var = contrib_variables.local_variable(
array_ops.zeros([], dtype=dtypes.float64), name=scope)
return var, state_ops.assign_add(var, batch_total_fn, name='update')
def streaming_mean_absolute_error(predictions, labels, weights=None,
metrics_collections=None,
updates_collections=None,
| tensorflow.python.ops.state_ops.assign_add | 12,494 |
from tensorflow.contrib.layers.python.layers import utils
shifted_sum_x,
shifted_sum_x2,
shift,
name="normalize_moments")
second_moment = variance + tf.square(mean)
return mean, variance, second_moment
def build_moving_stats():
return (
tf.identity(self._moving_mean),
tf.identity(self._moving_variance),
tf.identity(self._moving_second_moment),
)
mean, variance, second_moment = utils.smart_cond(
use_batch_stats,
build_batch_stats,
build_moving_stats,
)
return mean, variance, second_moment
def _build_update_ops_variance(self, mean, variance, is_training):
"""Builds the moving average update ops when using moving variance.
Args:
mean: The mean value to update with.
variance: The variance value to update with.
is_training: Boolean Tensor to indicate if we're currently in
| tensorflow.contrib.layers.python.layers.utils.smart_cond | 12,495 |
from tensorflow.python.framework import ops
Returns:
A `Tensor` with the same type as `value`.
"""
with ops.op_scope([value, bias], name, "BiasAdd") as name:
value = ops.convert_to_tensor(value, name="input")
bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
return gen_nn_ops._bias_add(value, bias, data_format=data_format, name=name)
ops.RegisterShape("BiasAdd")(common_shapes.bias_add_shape)
| tensorflow.python.framework.ops.convert_to_tensor | 12,496 |
from tensorflow.python.ops import array_ops
nn_activations.append(
layers.fully_connected(
nn_activations[-1],
self.params.layer_size))
nn_activations_tensor = array_ops.concat(
nn_activations, 1, name="flattened_nn_activations")
return nn_activations_tensor
| tensorflow.python.ops.array_ops.concat | 12,497 |
import tensorflow as tf
self.assertEqual(model.tasks[0], model.GetTask())
self.assertEqual(model.tasks[0], model.SampleTask(None))
def testExponentialMovingAverage(self):
p = base_model.SingleTaskModel.Params()
p.task = BaseTaskTest.TestParams()
p.task.input = base_input_generator.BaseSequenceInputGenerator.Params()
p.train.ema_decay = 0.9
model = p.cls(p)
model._task.CreateChild('a',
layers.BatchNormLayer.Params().Set(name='a', dim=1))
model._task._train_op = tf.no_op()
model._task.ApplyExponentialMovingAverage(model.ema)
with tf.variable_scope('', reuse=True):
beta = tf.get_variable('a/beta/var')
mean = tf.get_variable('a/moving_mean/var')
self.assertIsNotNone(model.ema.average(beta))
self.assertIsNone(model.ema.average(mean))
class MultiTaskModelTest(tf.test.TestCase):
| tensorflow.no_op | 12,498 |
import tensorflow as tf
logit_probs = predictions[:, :, :, :nr_mix]
predictions = tf.reshape(predictions[:, :, :, nr_mix:], inputs_shape + [nr_mix * 3])
means = predictions[:, :, :, :, :nr_mix]
log_scales = tf.maximum(predictions[:, :, :, :, nr_mix:2 * nr_mix], -7.)
coeffs = tf.nn.tanh(predictions[:, :, :, :, 2 * nr_mix:3 * nr_mix])
inputs = tf.reshape(inputs, inputs_shape + [1]) + tf.zeros(inputs_shape + [nr_mix])
m2 = tf.reshape(means[:, :, :, 1, :] + coeffs[:, :, :, 0, :] * inputs[:, :, :, 0, :],
[inputs_shape[0], inputs_shape[1], inputs_shape[2], 1, nr_mix])
m3 = tf.reshape(
means[:, :, :, 2, :] + coeffs[:, :, :, 1, :] * inputs[:, :, :, 0, :] +
coeffs[:, :, :, 2, :] * inputs[:, :, :, 1, :],
[inputs_shape[0], inputs_shape[1], inputs_shape[2], 1, nr_mix])
| tensorflow.reshape | 12,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.