seed
stringlengths 25
2.89k
| seed_api
stringlengths 14
102
| index
int64 0
14.8k
|
---|---|---|
from tensorflow.python.ops import gen_math_ops
\\\\(y = |x|\\\\).
See [`tf.complex_abs()`](#tf_complex_abs) to compute the absolute value of a complex
number.
Args:
x: A `Tensor` of type `float`, `double`, `int32`, or `int64`.
name: A name for the operation (optional).
Returns:
A `Tensor` the same size and type as `x` with absolute values.
"""
with ops.op_scope([x], name, "Abs") as name:
x = ops.convert_to_tensor(x, name="x")
if x.dtype == types.complex64:
return gen_math_ops.complex_abs(x, name=name)
return gen_math_ops._abs(x, name=name)
def pow(x, y, name=None):
"""Computes the power of one value to another.
Given a tensor `x` and a tensor `y`, this operation computes \\\\(x^y\\\\) for
corresponding elements in `x` and `y`. For example:
```
# tensor 'x' is [[2, 2]], [3, 3]]
# tensor 'y' is [[8, 16], [2, 3]]
tf.pow(x, y) ==> [[256, 65536], [9, 27]]
|
tensorflow.python.ops.gen_math_ops.complex_abs
| 3,400 |
from tensorflow.python.platform import gfile
# Check that s1 is still here, but s2 is gone.
self.assertTrue(gfile.Exists(s1))
self.assertFalse(gfile.Exists(s2))
self.assertTrue(gfile.Exists(s3))
self.assertTrue(gfile.Exists(s4))
|
tensorflow.python.platform.gfile.Exists
| 3,401 |
import tensorflow as tf
for i in range(hparams.tacotron_num_gpus):
tf.summary.histogram("mel_outputs %d" % i, model.tower_mel_outputs[i])
tf.summary.histogram("mel_targets %d" % i, model.tower_mel_targets[i])
tf.summary.scalar("before_loss", model.before_loss)
tf.summary.scalar("after_loss", model.after_loss)
if hparams.predict_linear:
tf.summary.scalar("linear_loss", model.linear_loss)
for i in range(hparams.tacotron_num_gpus):
tf.summary.histogram("mel_outputs %d" % i, model.tower_linear_outputs[i])
tf.summary.histogram("mel_targets %d" % i, model.tower_linear_targets[i])
tf.summary.scalar("regularization_loss", model.regularization_loss)
tf.summary.scalar("stop_token_loss", model.stop_token_loss)
|
tensorflow.summary.scalar
| 3,402 |
from tensorflow.contrib.framework.python.framework import checkpoint_utils
def weights(self):
"""Returns the cluster weights."""
return checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_WEIGHT)
|
tensorflow.contrib.framework.python.framework.checkpoint_utils.load_variable
| 3,403 |
import tensorflow as tf
stitch3_1, stitch3_2 = apply_cross_stitch(fc3_1, fc3_2)
else:
stitch3_1, stitch3_2 = fc3_1, fc3_2
dropout3_1 = contrib.layers.dropout(stitch3_1, keep_prob=keep_prob, is_training=is_training,
scope="dropout3_1")
dropout3_2 = contrib.layers.dropout(stitch3_2, keep_prob=keep_prob, is_training=is_training,
scope="dropout3_2")
output_1 = contrib.layers.fully_connected(dropout3_1, n_output_1, activation_fn=None, scope="output_1")
output_2 = contrib.layers.fully_connected(dropout3_2, n_output_2, activation_fn=None, scope="output_2")
with tf.variable_scope("loss"):
loss_base_1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_1, logits=output_1))
loss_base_2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_2, logits=output_2))
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
loss_total = loss_base_1 + loss_base_2 + tf.reduce_sum(reg_losses)
with tf.variable_scope("evaluation"):
accuracy_1 = tf.reduce_mean(tf.cast(tf.equal(
tf.argmax(output_1, axis=-1),
tf.argmax(y_1, axis=-1)), tf.float32), name="accuracy_1")
accuracy_2 = tf.reduce_mean(tf.cast(tf.equal(
tf.argmax(output_2, axis=-1),
|
tensorflow.variable_scope
| 3,404 |
import tensorflow as tf
if full_output_cov:
fvar = (
tf.matrix_diag(tf.tile((eKff - tf.trace(Li_eKuffu_Lit))[:, None], [1, num_func])) +
tf.matrix_diag(tf.einsum("nij,dji->nd", Li_eKuffu_Lit, cov)) +
|
tensorflow.trace
| 3,405 |
import tensorflow as tf
return c
# Create the bi-directional LSTM
with tf.variable_scope('wordrnn'):
with tf.variable_scope('fw'):
cell_fw = GetCell()
with tf.variable_scope('bw'):
cell_bw = GetCell()
rnnout, _, _ = tf.nn.bidirectional_rnn(cell_fw, cell_bw, self._inputs,
dtype=tf.float32,
sequence_length=self.seq_lens)
|
tensorflow.variable_scope
| 3,406 |
import tensorflow as tf
self.summary_tags += tag
self.summary_placeholders[tag] = tf.placeholder('float32', None, name=tag)
self.summary_ops[tag] = tf.summary.scalar(tag, self.summary_placeholders[tag])
for tag, shape in self.images_summary_tags:
self.summary_tags += tag
self.summary_placeholders[tag] = tf.placeholder('float32', shape, name=tag)
self.summary_ops[tag] = tf.summary.image(tag, self.summary_placeholders[tag], max_outputs=10)
def add_summary(self, step, summaries_dict=None, summaries_merged=None):
"""
|
tensorflow.placeholder
| 3,407 |
import tensorflow as tf
click_feature = [tf.expand_dims(tf.zeros_like(self.labels[i]) , -1) for _ in range(4*list_size)]
click_feature[i] = tf.expand_dims(tf.ones_like(self.labels[i]) , -1)
# click_feature[list_size:]=[tf.expand_dims(tf.zeros_like(self.labels[i]) , -1) for _ in range(3*list_size)]
click_feature[list_size:list_size+i] =[tf.expand_dims(self.labels[k] , -1) for k in range(i-1,-1,-1)]
click_feature[2*list_size:2*list_size+i+1]=[tf.expand_dims(self.types[k] , -1) for k in range(i,-1,-1)]
click_feature[3*list_size:3*list_size+list_size-i-1]=[tf.expand_dims(self.types[k] , -1) for k in range(i+1,list_size)]
# Predict propensity with a simple network
output_propensity_list.append(propensity_network(tf.concat(click_feature, 1), i))
|
tensorflow.expand_dims
| 3,408 |
import tensorflow as tf
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
|
tensorflow.flags.DEFINE_string
| 3,409 |
import tensorflow as tf
normalizer_params={"is_training": self.train}):
self.fc1 = tf.contrib.layers.fully_connected(self.flatten, self.config.cifar10_cnn["fc1_nb_units"])
self.fc2 = tf.contrib.layers.fully_connected(self.fc1, self.config.data["num_categories"], activation_fn=None)
# Compute loss
with tf.name_scope("loss"):
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.fc2, labels=self.y))
# Optimizer
with tf.name_scope("training_op"):
self.training_op = tf.compat.v1.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
# Perf metrics
with tf.name_scope("accuracy"):
prediction = tf.equal(tf.argmax(self.fc2, 1), tf.argmax(self.y, 1))
self.accuracy = tf.reduce_mean(tf.cast(prediction, tf.float32))
|
tensorflow.name_scope
| 3,410 |
import tensorflow as tf
# self.imitation_loss = self.bc_loss #+ self.next_loc_loss_il
# Get gradients from local network using local losses and
# normalize the gradients using clipping
local_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope+'/qvalues')
self.gradients = tf.gradients(self.loss, local_vars)
self.var_norms = tf.global_norm(local_vars)
grads, self.grad_norms = tf.clip_by_global_norm(self.gradients, GRAD_CLIP)
# Apply local gradients to global network
global_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, GLOBAL_NET_SCOPE+'/qvalues')
self.apply_grads = trainer.apply_gradients(zip(grads, global_vars))
|
tensorflow.global_norm
| 3,411 |
import tensorflow as tf
# bgr to rgb (opencv uses bgr)
channels = tf.unstack(image, axis=-1)
image = tf.stack([channels[2], channels[1], channels[0]], axis=-1)
# dims for normalization
width = tf.to_float(tf.shape(image)[2])
height = tf.to_float(tf.shape(image)[1])
# from [x1, y1, x2, y2, cls] to normalized [y1, x1, y1, x1]
cols = tf.unstack(boxes, axis=1)
boxes = tf.stack([cols[1] / height,
cols[0] / width,
cols[3] / height,
cols[2] / width], axis=1)
# add batch dimension (assume batch_size==1)
#assert image.get_shape()[0] == 1
|
tensorflow.unstack
| 3,412 |
import tensorflow as tf
if cell is None:
cell = tf.contrib.rnn.BasicRNNCell(hidden_dims, activation=activation)
# cell = tf.contrib.rnn.LSTMCell(hidden_dims, activation=activation)
if keep_prob < 1.0:
keep_prob = _global_keep_prob(keep_prob)
cell = tf.contrib.rnn.DropoutWrapper(cell, keep_prob, keep_prob)
if opts.get("name"):
tf.add_to_collection(opts.get("name"), cell)
if decoder_fn is None:
outputs, final_state = tf.nn.dynamic_rnn(cell, tensor,
sequence_length=sequence_length, initial_state=initial_state, dtype=tf.float32)
final_context_state = None
else:
# TODO: turn off sequence_length?
outputs, final_state, final_context_state = seq2seq.dynamic_rnn_decoder(
cell, decoder_fn, inputs=None, sequence_length=sequence_length)
if return_final_state:
return final_state
else:
return outputs
|
tensorflow.nn.dynamic_rnn
| 3,413 |
import tensorflow as tf
# Optimizers
autoencoder_optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
beta1=beta1).minimize(autoencoder_loss)
discriminator_g_optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
beta1=beta1).minimize(dc_g_loss, var_list=dc_g_var)
discriminator_c_optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
beta1=beta1).minimize(dc_c_loss, var_list=dc_c_var)
generator_optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
beta1=beta1).minimize(generator_loss, var_list=en_var)
supervised_encoder_optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
beta1=beta1).minimize(supervised_encoder_loss,
var_list=en_var)
init = tf.global_variables_initializer()
# Reshape immages to display them
input_images = tf.reshape(x_input, [-1, 28, 28, 1])
generated_images = tf.reshape(decoder_output, [-1, 28, 28, 1])
# Tensorboard visualization
tf.summary.scalar(name='Autoencoder Loss', tensor=autoencoder_loss)
tf.summary.scalar(name='Discriminator gauss Loss', tensor=dc_g_loss)
tf.summary.scalar(name='Discriminator categorical Loss', tensor=dc_c_loss)
tf.summary.scalar(name='Generator Loss', tensor=generator_loss)
tf.summary.scalar(name='Supervised Encoder Loss', tensor=supervised_encoder_loss)
tf.summary.histogram(name='Encoder Gauss Distribution', values=encoder_output_latent)
|
tensorflow.global_variables_initializer
| 3,414 |
import tensorflow as tf
Returns:
A dictionary of tensors obtained by applying data augmentation ops to the
input tensor dictionary.
"""
tensor_dict[fields.InputDataFields.image] = tf.expand_dims(
tf.cast(tensor_dict[fields.InputDataFields.image], dtype=tf.float32), 0)
include_instance_masks = (fields.InputDataFields.groundtruth_instance_masks
in tensor_dict)
include_keypoints = (fields.InputDataFields.groundtruth_keypoints
|
tensorflow.cast
| 3,415 |
import tensorflow as tf
dynamic_bstride=block_params.bstrides,
dynamic_boffset=block_params.boffset,
transpose=transpose)
# Convolution on patches.
if transpose:
q = tf.nn.conv2d(p, w, strides, 'VALID', data_format='NCHW', use_cudnn_on_gpu=True)
else:
q = tf.nn.conv2d(p, w, strides, 'VALID', use_cudnn_on_gpu=True)
# Allocate output tensor.
if use_var:
y = sbnet_module.sparse_scatter_var(
q,
indices.bin_counts,
indices.active_block_indices,
|
tensorflow.nn.conv2d
| 3,416 |
import tensorflow as tf
for i in range(max_train_step):
batch_x, batch_gt = mnist.train.next_batch(batch_size)
sess.run(train_op, feed_dict={x: batch_x, gt: batch_gt})
if i % 100 == 0:
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(gt, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print('=> accuracy: {}'.format(sess.run(accuracy, feed_dict={x: mnist.test.images, gt: mnist.test.labels})))
saver.save(sess, 'mnist/mnist_{:02d}.ckpt'.format(int(i / 100) + 1))
|
tensorflow.argmax
| 3,417 |
import tensorflow as tf
return_dict["cls_logits"] = cls_logits
return return_dict
def get_race_loss(FLAGS, features, is_training):
"""Loss for downstream multi-choice QA tasks such as RACE."""
bsz_per_core = tf.shape(features["input_ids"])[0]
def _transform_features(feature):
out = tf.reshape(feature, [bsz_per_core, 4, -1])
out = tf.transpose(out, [2, 0, 1])
out = tf.reshape(out, [-1, bsz_per_core * 4])
return out
|
tensorflow.shape
| 3,418 |
import tensorflow as tf
y_permute_dim = list(range(get_ndim(y)))
y_permute_dim = [y_permute_dim.pop(-2)] + y_permute_dim
xt = tf.reshape(x, [-1, x_shape[-1]])
yt = tf.reshape(tf.transpose(y, perm=y_permute_dim), [y_shape[-2], -1])
return tf.reshape(
tf.matmul(xt, yt), x_shape[:-1] + y_shape[:-2] + y_shape[-1:])
out = tf.matmul(x, y)
return out
|
tensorflow.matmul
| 3,419 |
from tensorflow.contrib.framework import deprecated_arg_values
feed_fn=feed_fn,
batch_size=batch_size,
steps=steps,
metrics=custom_metrics,
name=name,
checkpoint_path=checkpoint_path,
hooks=hooks)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE,
estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict(self, x=None, input_fn=None, batch_size=None, as_iterable=True):
"""Returns predicted scores for given features.
Args:
|
tensorflow.contrib.framework.deprecated_arg_values
| 3,420 |
import tensorflow as tf
edge_type2dim=edge_type2dim,
placeholders=placeholders,
batch_size=FLAGS.batch_size,
margin=FLAGS.max_margin
)
print("Initialize session")
sess = tf.Session()
sess.run(tf.global_variables_initializer())
feed_dict = {}
###########################################################
#
# Train model
#
###########################################################
|
tensorflow.global_variables_initializer
| 3,421 |
import tensorflow as tf
# Increment episode count.
with tf.control_dependencies(control_inputs=(assignment,)):
assignment = tf.assign_add(ref=self.episode_count, value=num_episodes)
# Increment memory index.
with tf.control_dependencies(control_inputs=(assignment,)):
assignment = tf.assign(
ref=self.episode_indices[-1],
value=tf.where(self.memory_index + num_instances > self.capacity,
self.episode_indices[self.episode_count - 1], self.capacity - 1)
)
with tf.control_dependencies(control_inputs=(assignment,)):
assignment = tf.assign(ref=self.memory_index, value=((self.memory_index + num_instances) % self.capacity))
with tf.control_dependencies(control_inputs=(assignment,)):
return tf.no_op()
def tf_retrieve_indices(self, indices):
"""
Fetches experiences for given indices.
Args:
indices: Index tensor
|
tensorflow.control_dependencies
| 3,422 |
import tensorflow as tf
pool = tf.transpose(pool, [0,2,1])
# Linear
elif downsampling_type=='linear':
pool = tf.layers.conv1d(inputs=inputs, filters=inputs.get_shape()[2], kernel_size=3,
strides=2, padding='same', use_bias=False)
# Maxpooling
else:
pool = tf.layers.max_pooling1d(inputs=inputs, pool_size=3, strides=2, padding='same', name=name)
if optional_shortcut:
shortcut = tf.layers.conv1d(inputs=shortcut, filters=shortcut.get_shape()[2], kernel_size=1,
strides=2, padding='same', use_bias=False)
print("-"*5)
print("Optional Downsampling Shortcut:", shortcut.get_shape())
print("-"*5)
|
tensorflow.layers.max_pooling1d
| 3,423 |
from tensorflow.python.ops import nn_ops
return None, self._loss, sampled_words
def calculate_encoder_features(self, encoder_states, encoder_dim):
options = self.options
input_shape = tf.shape(encoder_states)
batch_size = input_shape[0]
passage_len = input_shape[1]
with variable_scope.variable_scope("attention_decoder"):
encoder_features = tf.expand_dims(encoder_states, axis=2) # now is shape [batch_size, passage_len, 1, encoder_dim]
W_h = variable_scope.get_variable("W_h", [1, 1, encoder_dim, options.attention_vec_size])
self.W_h = W_h
encoder_features = nn_ops.conv2d(encoder_features, W_h, [1, 1, 1, 1], "SAME") # [batch_size, passage_len, 1, attention_vec_size]
encoder_features = tf.reshape(encoder_features, [batch_size, passage_len, options.attention_vec_size])
return encoder_features
def decode_mode(self, word_vocab, beam_size, state_t_1, context_t_1, coverage_t_1, word_t,
encoder_states, encoder_features, passage_word_idx, passage_mask):
options = self.options
with variable_scope.variable_scope("attention_decoder"):
v = variable_scope.get_variable("v", [options.attention_vec_size])
v = tf.expand_dims(tf.expand_dims(v, axis=0), axis=0)
w_c = None
|
tensorflow.python.ops.nn_ops.conv2d
| 3,424 |
import tensorflow as tf
errors = U.huber_loss(td_error)
weighted_error = tf.reduce_mean(importance_weights_ph * errors)
# compute optimization op (potentially with gradient clipping)
if grad_norm_clipping is not None:
gradients = optimizer.compute_gradients(weighted_error, var_list=q_func_vars)
for i, (grad, var) in enumerate(gradients):
if grad is not None:
gradients[i] = (tf.clip_by_norm(grad, grad_norm_clipping), var)
optimize_expr = optimizer.apply_gradients(gradients)
else:
optimize_expr = optimizer.minimize(weighted_error, var_list=q_func_vars)
# update_target_fn will be called periodically to copy Q network to target Q network
update_target_expr = []
for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name),
|
tensorflow.clip_by_norm
| 3,425 |
import tensorflow as tf
texts_test = [x for ix, x in enumerate(texts) if ix in test_indices]
target_train = [x for ix, x in enumerate(target) if ix in train_indices]
target_test = [x for ix, x in enumerate(target) if ix in test_indices]
# Setup Index Matrix for one-hot-encoding
identity_mat = tf.diag(tf.ones(shape=[embedding_size]))
# Create variables for logistic regression
A = tf.Variable(tf.random_normal(shape=[embedding_size,1]))
b = tf.Variable(tf.random_normal(shape=[1,1]))
# Initialize placeholders
x_data = tf.placeholder(shape=[sentence_size], dtype=tf.int32)
y_target = tf.placeholder(shape=[1, 1], dtype=tf.float32)
# Text-Vocab Embedding
x_embed = tf.nn.embedding_lookup(identity_mat, x_data)
x_col_sums = tf.reduce_sum(x_embed, 0)
# Declare model operations
x_col_sums_2D = tf.expand_dims(x_col_sums, 0)
model_output = tf.add(tf.matmul(x_col_sums_2D, A), b)
# Declare loss function (Cross Entropy loss)
|
tensorflow.placeholder
| 3,426 |
from tensorflow.python.ops import math_ops
def _run_metrics(predictions, targets, metrics, weights):
result = {}
targets = math_ops.cast(targets, predictions.dtype)
for name, metric in six.iteritems(metrics or {}):
if "weights" in inspect.getargspec(metric)[0]:
result[name] = metric(predictions, targets, weights=weights)
|
tensorflow.python.ops.math_ops.cast
| 3,427 |
import tensorflow as tf
new_mems = {mem_name: xlnet_model.get_new_memory()}
lookup_table = xlnet_model.get_embedding_table()
initializer = xlnet_model.get_initializer()
with tf.variable_scope("model", reuse=tf.AUTO_REUSE):
# LM loss
lm_loss = modeling.lm_loss(
hidden=output,
target=tgt,
|
tensorflow.variable_scope
| 3,428 |
import tensorflow as tf
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
|
tensorflow.train.Scaffold
| 3,429 |
import tensorflow as tf
A0 = euclidean_norm_squared(tf.subtract(tf.expand_dims(X, 0), tf.expand_dims(X, 1)), axis=2)
A = tf.reduce_sum(phi_sampling(A0/(4*y), D))
B0 = euclidean_norm_squared(tf.subtract(tf.expand_dims(Y, 0), tf.expand_dims(Y, 1)), axis=2)
B = tf.reduce_sum(phi_sampling(B0/(4*y), D))
C0 = euclidean_norm_squared(tf.subtract(tf.expand_dims(X, 0), tf.expand_dims(Y, 1)), axis=2)
C = tf.reduce_sum(phi_sampling(C0/(4*y), D))
return T*(A + B - 2*C)
|
tensorflow.expand_dims
| 3,430 |
import tensorflow as tf
# we don't actually round to the nearest 8bit value when sampling
u = tf.random_uniform(tf.shape(means), minval=1e-5, maxval=1. - 1e-5)
x = means + tf.exp(log_scales) * (tf.log(u) - tf.log(1. - u))
x0 = tf.minimum(tf.maximum(x[:, :, :, 0], -1.), 1.)
x1 = tf.minimum(tf.maximum(
x[:, :, :, 1] + coeffs[:, :, :, 0] * x0, -1.), 1.)
x2 = tf.minimum(tf.maximum(
x[:, :, :, 2] + coeffs[:, :, :, 1] * x0 + coeffs[:, :, :, 2] * x1, -1.), 1.)
return tf.concat([tf.reshape(x0, xs[:-1] + [1]), tf.reshape(x1, xs[:-1] + [1]), tf.reshape(x2, xs[:-1] + [1])], 3)
|
tensorflow.reshape
| 3,431 |
import tensorflow as tf
[batch_size],
minval=1,
maxval=nclass,
dtype=tf.int32,
name='synthetic_labels')
# Note: This results in a H2D copy, but no computation
# Note: This avoids recomputation of the random values, but still
# results in a H2D copy.
images = tf.contrib.framework.local_variable(images, name='images')
labels = tf.contrib.framework.local_variable(labels, name='labels')
# Change to 0-based (don't use background class like Inception does)
labels -= 1
if num_compute_devices == 1:
images_splits = [images]
labels_splits = [labels]
else:
images_splits = tf.split(images, num_compute_devices, 0)
labels_splits = tf.split(labels, num_compute_devices, 0)
|
tensorflow.contrib.framework.local_variable
| 3,432 |
import tensorflow as tf
st = tf.SparseTensor(indices, values, shape)
st_handles = add_many_sparse_to_tensors_map(st)
st_roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=st_handles.op, sparse_handles=st_handles)
st_roundtrip_op = st_roundtrip.values.op
st_serialized = tf.serialize_many_sparse(st)
st_deserialized = tf.deserialize_many_sparse(
st_serialized, dtype=values.dtype)
st_deserialized_op = st_deserialized.values.op
tf.global_variables_initializer().run()
st_roundtrip_values = sess.run(st_roundtrip)
st_deserialized_values = sess.run(st_deserialized)
|
tensorflow.deserialize_many_sparse
| 3,433 |
import tensorflow as tf
input_tensor = gather_indexes(input_tensor, positions)
with tf.variable_scope("cls/predictions"):
# We apply one more non-linear transformation before the output layer.
|
tensorflow.variable_scope
| 3,434 |
import tensorflow as tf
next_sentence_log_probs) = get_next_sentence_output(
bert_config, model.get_pooled_output(), next_sentence_labels)
total_loss = masked_lm_loss + next_sentence_loss
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
|
tensorflow.train.init_from_checkpoint
| 3,435 |
import tensorflow as tf
self.e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/eval_net')
self.t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/target_net')
with tf.variable_scope('target_q'):
self.target_q = R + self.gamma * self.q_
with tf.variable_scope('abs_TD'):
self.abs_td = tf.abs(self.target_q - self.q)
self.ISWeights = tf.placeholder(tf.float32, [None, 1], name='IS_weights')
with tf.variable_scope('TD_error'):
self.loss = tf.reduce_mean(self.ISWeights * tf.squared_difference(self.target_q, self.q))
with tf.variable_scope('C_train'):
self.train_op = tf.train.AdamOptimizer(self.lr).minimize(self.loss, global_step=GLOBAL_STEP)
with tf.variable_scope('a_grad'):
self.a_grads = tf.gradients(self.q, a)[0] # tensor of gradients of each sample (None, a_dim)
def _build_net(self, s, a, scope, trainable):
with tf.variable_scope(scope):
init_w = tf.random_normal_initializer(0., 0.01)
init_b = tf.constant_initializer(0.01)
with tf.variable_scope('l1'):
n_l1 = 700
# combine the action and states together in this way
|
tensorflow.train.AdamOptimizer
| 3,436 |
import tensorflow as tf
opt = tf.train.GradientDescentOptimizer(learning_rate)
elif FLAGS.optimizer == 'rmsprop':
opt = tf.train.RMSPropOptimizer(learning_rate, FLAGS.rmsprop_decay,
momentum=FLAGS.rmsprop_momentum,
epsilon=FLAGS.rmsprop_epsilon)
else:
raise ValueError('Optimizer "%s" was not recognized', FLAGS.optimizer)
self.variable_mgr.append_apply_gradients_ops(
gradient_state, opt, clipped_grads, training_ops)
train_op = tf.group(*(training_ops + update_ops + extra_nccl_ops))
with tf.device(self.cpu_device):
if self.task_index == 0 and FLAGS.summary_verbosity > 0:
tf.summary.scalar('learning_rate', learning_rate)
tf.summary.scalar('total_loss', total_loss)
for grad, var in avg_grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
fetches = [train_op, total_loss] + enqueue_ops
return (enqueue_ops, fetches)
def add_forward_pass_and_gradients(
self, host_images, host_labels, nclass, phase_train, device_num,
input_data_type, data_type, input_nchan, use_synthetic_gpu_images,
gpu_copy_stage_ops, gpu_compute_stage_ops, gpu_grad_stage_ops):
|
tensorflow.summary.scalar
| 3,437 |
import tensorflow as tf
if FLAGS.ckpt_no is not None and not tf.gfile.Exists(path_ckpt):
with tf.gfile.GFile(path_ckpt, "w") as writer:
writer.write('model_checkpoint_path: "%s-%s"\n' % (os.path.join(FLAGS.recover_dir, "model.ckpt"), str(FLAGS.ckpt_no)))
writer.write('all_model_checkpoint_paths: "%s-%s"\n' % (os.path.join(FLAGS.recover_dir, "model.ckpt"), str(FLAGS.ckpt_no)))
if FLAGS.ckpt_no_input is not None and not tf.gfile.Exists(path_ckpt_input):
with tf.gfile.GFile(path_ckpt_input, "w") as writer:
writer.write('model_checkpoint_path: "%s-%s"\n' % (os.path.join(FLAGS.recover_dir, "input.ckpt"), str(FLAGS.ckpt_no_input)))
writer.write('all_model_checkpoint_paths: "%s-%s"\n' % (os.path.join(FLAGS.recover_dir, "input.ckpt"), str(FLAGS.ckpt_no_input)))
if FLAGS.use_hvd and hvd.rank() == 0 and (FLAGS.do_train or FLAGS.do_train_eval):
|
tensorflow.gfile.Exists
| 3,438 |
from tensorflow.python.framework import ops
]
cell = lambda: lstm_ops.LSTMBlockCell(num_units=num_units) # pylint: disable=cell-var-from-loop
multi_cell = rnn_cell.MultiRNNCell(
[cell() for _ in range(num_layers)])
outputs, final_state = core_rnn.static_rnn(
multi_cell, inputs, dtype=dtypes.float32)
trainable_variables = ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients([outputs, final_state],
trainable_variables)
training_op = control_flow_ops.group(*gradients)
self._BenchmarkOp(training_op, "tf_rnn_lstm_block_cell %s %s" %
(config_name, self._GetConfigDesc(config)))
|
tensorflow.python.framework.ops.get_collection
| 3,439 |
import tensorflow as tf
element_shape=(facts[:, 0, :].get_shape()))
_, output_op, _ = tf.while_loop(cond, body, [facts, output_ta, 0])
self_attention = output_op.stack()
self_attention = tf.transpose(self_attention, perm = [1, 0, 2])
return self_attention
def din_fcn_shine(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False):
if isinstance(facts, tuple):
# In case of Bi-RNN, concatenate the forward and the backward RNN outputs.
facts = tf.concat(facts, 2)
if time_major:
# (T,B,D) => (B,T,D)
facts = tf.array_ops.transpose(facts, [1, 0, 2])
# Trainable parameters
mask = tf.equal(mask, tf.ones_like(mask))
facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer
querry_size = query.get_shape().as_list()[-1]
|
tensorflow.concat
| 3,440 |
import tensorflow as tf
if opt:
# we have to hardcode the max batch size here! use the batch size from the generator as this will be used for PG
N, CL = config.batch_size if not self.demo else config.batch_size, config.char_limit
self.c_maxlen = tf.reduce_max(self.c_len)
self.q_maxlen = tf.reduce_max(self.q_len)
self.c = tf.slice(self.c, [0, 0], [N, self.c_maxlen])
self.q = tf.slice(self.q, [0, 0], [N, self.q_maxlen])
self.c_mask = tf.slice(self.c_mask, [0, 0], [N, self.c_maxlen])
self.q_mask = tf.slice(self.q_mask, [0, 0], [N, self.q_maxlen])
self.ch = tf.slice(self.ch, [0, 0, 0], [N, self.c_maxlen, CL])
self.qh = tf.slice(self.qh, [0, 0, 0], [N, self.q_maxlen, CL])
self.y1 = tf.argmax(tf.slice(self.y1, [0, 0], [N, self.c_maxlen]),axis=-1)
self.y2 = tf.argmax(tf.slice(self.y2, [0, 0], [N, self.c_maxlen]),axis=-1)
else:
self.c_maxlen, self.q_maxlen = config.para_limit, config.ques_limit
self.ch_len = tf.reshape(tf.reduce_sum(
tf.cast(tf.cast(self.ch, tf.bool), tf.int32), axis=2), [-1])
self.qh_len = tf.reshape(tf.reduce_sum(
tf.cast(tf.cast(self.qh, tf.bool), tf.int32), axis=2), [-1])
self.forward()
|
tensorflow.slice
| 3,441 |
import tensorflow as tf
.FullyConnected('fct', out_dim=6, nl=tf.identity,
W_init=tf.constant_initializer(),
b_init=tf.constant_initializer([1, 0, HALF_DIFF, 0, 1, HALF_DIFF]))())
# output 6 parameters for affine transformation
stn = tf.reshape(stn, [-1, 2, 3], name='affine') # bx2x3
stn = tf.reshape(tf.transpose(stn, [2, 0, 1]), [3, -1]) # 3 x (bx2)
coor = tf.reshape(tf.matmul(xys, stn),
[WARP_TARGET_SIZE, WARP_TARGET_SIZE, -1, 2])
|
tensorflow.reshape
| 3,442 |
import tensorflow as tf
eval_config.num_steps = 1
with tf.Graph().as_default():
initializer = tf.random_uniform_initializer(-config.init_scale,
|
tensorflow.Graph
| 3,443 |
import tensorflow as tf
kl_sum =tf.reduce_sum(kl_sum)
assert_almost_equal(kl_sum.eval(), kl_batch.eval())
def tf_kl_1d(q_mu, q_sigma, p_var=1.0):
p_var = tf.ones_like(q_sigma) if p_var is None else p_var
q_var = tf.square(q_sigma)
kl = 0.5 * (q_var / p_var + tf.square(q_mu) / p_var - 1 + tf.log(p_var / q_var))
return tf.reduce_sum(kl)
@pytest.mark.parametrize('white', [True, False])
def test_oned(session_tf, white, mu, sqrt, K_batch):
"""
Check that the KL divergence matches a 1D by-hand calculation.
"""
|
tensorflow.reduce_sum
| 3,444 |
import tensorflow as tf
self.labels = kwargs.pop("labels", "labels")
@classmethod
def from_tfrecord_files(cls, input_files, **kwargs) -> tf.data.Dataset:
dataset = utils.read_tfrecord_files(input_files, **kwargs)
d = cls(examples=None, **kwargs)
# parse example
features = {
d.input_ids: tf.io.VarLenFeature(tf.int64),
d.token_type_ids: tf.io.VarLenFeature(tf.int64),
d.attention_mask: tf.io.VarLenFeature(tf.int64),
d.labels: tf.io.VarLenFeature(tf.int64),
}
dataset = dataset.map(
lambda x: tf.io.parse_example(x, features),
num_parallel_calls=utils.AUTOTUNE,
|
tensorflow.io.VarLenFeature
| 3,445 |
import tensorflow as tf
def gather_indexes(sequence_tensor, positions):
"""Gathers the vectors at the specific positions over a minibatch."""
sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3)
batch_size = sequence_shape[0]
seq_length = sequence_shape[1]
width = sequence_shape[2]
flat_offsets = tf.reshape(
tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
flat_positions = tf.reshape(positions + flat_offsets, [-1])
flat_sequence_tensor = tf.reshape(sequence_tensor,
[batch_size * seq_length, width])
output_tensor = tf.gather(flat_sequence_tensor, flat_positions)
return output_tensor
def input_fn_builder(input_files,
max_seq_length,
|
tensorflow.range
| 3,446 |
import tensorflow as tf
types = (types, np.bool)
shapes = (shapes, tf.TensorShape([batch_size]))
|
tensorflow.TensorShape
| 3,447 |
import tensorflow as tf
# 得到前景和背景anchor的index
rpn_select = tf.where(tf.not_equal(rpn_label, -1))
rpn_cls_score = tf.reshape(tf.gather(rpn_cls_score, rpn_select), [-1, 2])
rpn_label = tf.reshape(tf.gather(rpn_label, rpn_select), [-1])
rpn_cross_entropy = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(logits=rpn_cls_score, labels=rpn_label))
# RPN, bbox loss
rpn_bbox_pred = self._predictions['rpn_bbox_pred']
rpn_bbox_targets = self._anchor_targets['rpn_bbox_targets']
rpn_bbox_inside_weights = self._anchor_targets['rpn_bbox_inside_weights']
rpn_bbox_outside_weights = self._anchor_targets['rpn_bbox_outside_weights']
|
tensorflow.nn.sparse_softmax_cross_entropy_with_logits
| 3,448 |
import tensorflow as tf
Returns:
l2_out: L2-normalized output tensor of shape [batch_size, 192]
Hint: Parameter reuse indicates whether the inference graph should use
parameter sharing or not. You can study how to implement parameter sharing
in TensorFlow from the following sources:
https://www.tensorflow.org/versions/r0.11/how_tos/variable_scope/index.html
"""
with tf.variable_scope('Siamese', reuse=reuse):
########################
# PUT YOUR CODE HERE #
########################
########################
logits = self.__forward_pass(x, reuse)
l2_out = tf.nn.l2_normalize(logits, dim=1)
########################
|
tensorflow.variable_scope
| 3,449 |
import tensorflow as tf
pi_dropout_mask_generator = DropoutMaskGenerator(pi_in_dim, hidden_sizes, model_prob=1.0 - dropout_rate)
pi_dropout_mask_phs = pi_dropout_mask_generator.generate_dropout_mask_placeholders()
pi, pi_reg = mlp_variational(x, pi_dropout_mask_phs, list(hidden_sizes) + [act_dim], activation, output_activation, dropout_rate)
pi = act_limit * pi
with tf.variable_scope('q1'):
q1_in_ph = tf.concat([x, a], axis=-1)
q1_in_dim = q1_in_ph.shape.as_list()[1]
q1_dropout_mask_generator = DropoutMaskGenerator(q1_in_dim, hidden_sizes, model_prob=1.0 - dropout_rate)
q1_dropout_mask_phs = q1_dropout_mask_generator.generate_dropout_mask_placeholders()
|
tensorflow.variable_scope
| 3,450 |
import tensorflow as tf
dilations=(1, dilation, dilation, 1))
x = x * mask_ratio
if use_bias:
bias = tf.get_variable("bias" + id, [channels], initializer=tf.constant_initializer(0.0))
x = tf.nn.bias_add(x, bias)
return x * update_mask
x = tf.nn.conv2d(input, filters, strides=[1, stride, stride, 1], padding=padding, name='zero-conv_' + id,
|
tensorflow.nn.bias_add
| 3,451 |
import tensorflow as tf
'use_ohkm', True,
'Wether we will use the ohkm for hard keypoints.')
tf.app.flags.DEFINE_string(
'data_format', 'channels_first', # 'channels_first' or 'channels_last'
'A flag to override the data format used in the model. channels_first '
'provides a performance boost on GPU but is not always compatible '
'with CPU. If left unspecified, the data format will be chosen '
'automatically based on whether TensorFlow was built for CPU or GPU.')
# optimizer related configuration
tf.app.flags.DEFINE_integer(
'tf_random_seed', 20180417, 'Random seed for TensorFlow initializers.')
tf.app.flags.DEFINE_float(
'weight_decay', 1e-5, 'The weight decay on the model weights.')
tf.app.flags.DEFINE_float(
'mse_weight', 1., 'The weight decay on the model weights.')
tf.app.flags.DEFINE_float(
'momentum', 0.9,
'The momentum for the MomentumOptimizer and RMSPropOptimizer.')
tf.app.flags.DEFINE_float('learning_rate', 1e-4, 'Initial learning rate.')#1e-3
tf.app.flags.DEFINE_float(
'end_learning_rate', 0.000001,
'The minimal end learning rate used by a polynomial decay learning rate.')
tf.app.flags.DEFINE_float(
'warmup_learning_rate', 0.00001,
'The start warm-up learning rate to avoid NAN.')
tf.app.flags.DEFINE_integer(
|
tensorflow.app.flags.DEFINE_float
| 3,452 |
import tensorflow as tf
repeat_op_sq = tf.square((repeat_op - tf.transpose(repeat_op)))
weights = repeat_op_sq / tf.to_float((num_ratings - 1)**2)
pred_ = predictions**y_pow
try:
pred_norm = pred_ / \
(eps + tf.reshape(tf.reduce_sum(pred_, 1), [-1, 1]))
except Exception:
pred_norm = pred_ / \
(eps + tf.reshape(tf.reduce_sum(pred_, 1), [batch_size, 1]))
hist_rater_a = tf.reduce_sum(pred_norm, 0)
hist_rater_b = tf.reduce_sum(labels, 0)
conf_mat = tf.matmul(tf.transpose(pred_norm), labels)
nom = tf.reduce_sum(weights * conf_mat)
denom = tf.reduce_sum(weights * tf.matmul(
tf.reshape(hist_rater_a, [num_ratings, 1]), tf.reshape(hist_rater_b, [1, num_ratings])) /
tf.to_float(batch_size))
try:
|
tensorflow.reduce_sum
| 3,453 |
import tensorflow as tf
>>> m.optimize(X, Y, 'L-BFGS-B', disp=False, ftol=.0001)
message: 'SciPy optimizer completed successfully.'
success: True
x: array([...])
>>> print("m.a: {:.3f}".format(np.asscalar(m.a.value)))
m.a: 0.001
>>> print("m.b: {:.3f}".format(np.asscalar(m.b.value)))
m.b: 1.000
"""
X_key = X if isinstance(X, tf.Tensor) else None
Y_key = Y if isinstance(Y, tf.Tensor) else None
key = ("_Model__loss", X_key, Y_key)
if key not in self.cache:
X_tensor = (X if isinstance(X, tf.Tensor) else
tf.placeholder(tf.as_dtype(X.dtype)))
Y_tensor = (Y if isinstance(Y, tf.Tensor) else
tf.placeholder(tf.as_dtype(Y.dtype)))
self.cache[key] = (self._compile_loss(X_tensor, Y_tensor),
X_tensor, Y_tensor)
loss, X_tensor, Y_tensor = self.cache[key]
feed_dict = self.feed_dict
if not isinstance(X, tf.Tensor): feed_dict[X_tensor] = X
if not isinstance(Y, tf.Tensor): feed_dict[Y_tensor] = Y
variables = [p.free_state for p in self.params if not p.fixed]
variables = utils.unique(variables)
free_state = tf.concat(0, [tf.reshape(v, [-1]) for v in variables])
|
tensorflow.as_dtype
| 3,454 |
from tensorflow.python.ops import variables
burn_in_steps = 10
benchmark_steps = 40
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
for i in xrange(burn_in_steps + benchmark_steps):
if i == burn_in_steps:
|
tensorflow.python.ops.variables.global_variables_initializer
| 3,455 |
import tensorflow as tf
tf.summary.histogram(v.name[:-2] + '_hist', v)
tf.summary.histogram(v.name[:-2] + '_grad_hist', g)
with tf.control_dependencies([train_op]), tf.name_scope('ema'):
ema = tf.train.ExponentialMovingAverage(decay=MOVING_AVERAGE_DECAY, num_updates=global_step)
train_op = ema.apply(tf.trainable_variables())
return tf.estimator.EstimatorSpec(mode, loss=total_loss, train_op=train_op)
def add_weight_decay(weight_decay):
"""Add L2 regularization to all (or some) trainable kernel weights."""
weight_decay = tf.constant(
weight_decay, tf.float32,
[], 'weight_decay'
)
trainable_vars = tf.trainable_variables()
kernels = [
v for v in trainable_vars
if ('weights' in v.name or 'kernel' in v.name) and 'depthwise_weights' not in v.name
]
for K in kernels:
x = tf.multiply(weight_decay, tf.nn.l2_loss(K))
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, x)
|
tensorflow.constant
| 3,456 |
from tensorflow.contrib.learn.python.learn.datasets import base
validation = DataSet(validation_images,
validation_labels,
dtype=dtype,
reshape=reshape)
test = DataSet(test_images, test_labels, dtype=dtype, reshape=reshape)
return base.Datasets(train=train, validation=validation, test=test)
def load_mnist():
return read_data_sets('MNIST_data')
|
tensorflow.contrib.learn.python.learn.datasets.base.Datasets
| 3,457 |
import tensorflow as tf
height = shape[0]
width = shape[1]
height_assert = tf.Assert(
tf.equal(height, image_height),
['Wrong height for tensor %s [expected][actual]',
image.name, height, image_height])
width_assert = tf.Assert(
tf.equal(width, image_width),
['Wrong width for tensor %s [expected][actual]',
image.name, width, image_width])
asserts.extend([height_assert, width_assert])
# Create a random bounding box.
#
# Use tf.random_uniform and not numpy.random.rand as doing the former would
|
tensorflow.equal
| 3,458 |
import tensorflow as tf
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
# Start a second session. In that session the variables
# have not been initialized either.
with self.test_session(graph=tf.Graph()) as sess:
v0 = tf.Variable(-1.0, name="v0")
v1 = tf.Variable(-1.0, name="v1")
save = tf.train.Saver([v0, v1])
with self.assertRaisesWithPredicateMatch(
tf.OpError, lambda e: "uninitialized value v0" in e.message):
|
tensorflow.Graph
| 3,459 |
from tensorflow.python.ops import math_ops
predictions_2d = array_ops.reshape(predictions, [-1, 1])
labels_2d = array_ops.reshape(
math_ops.cast(labels, dtype=dtypes.bool), [1, -1])
|
tensorflow.python.ops.math_ops.cast
| 3,460 |
import tensorflow as tf
def testEmbeddingTiedRNNSeq2Seq(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
enc_inp = [tf.constant(1, tf.int32, shape=[2]) for i in range(2)]
dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
dec, mem = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp, cell, num_symbols=5, embedding_size=2)
sess.run([tf.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
|
tensorflow.global_variables_initializer
| 3,461 |
import tensorflow as tf
tgt_posi_dif = tf.where(geq, tgt_dif, -tgt_dif)
pred_posi_dif = tf.where(geq, pred_dif, -pred_dif)
loss = tf.maximum(0., tgt_posi_dif - pred_posi_dif)
cstr_pct = tf.math.count_nonzero(loss, dtype=tf.float32) / tf.cast(tf.reduce_prod(tf.shape(loss)), tf.float32)
unorm_w = tf.exp((tgt_flat1 + tgt_flat2)/temp)
loss = unorm_w * loss / (tf.reduce_sum(unorm_w))
a = tf.print(tf.reduce_sum(unorm_w))
with tf.control_dependencies([a]):
final_loss = tf.reduce_sum(loss)
return final_loss, cstr_pct
def contra_traj_lossV8(pred, tgt, horizon=12):
horizon_pred, horizon_tgt = horizon_sumV1(pred, horizon), horizon_sumV1(tgt, horizon)
# horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon)
horizon_pred1, horizon_pred2 = tf.split(horizon_pred, 2, axis=0)
horizon_tgt1, horizon_tgt2 = tf.split(horizon_tgt, 2, axis=0)
pred_flat1, pred_flat2 = tf.reshape(horizon_pred1, [-1, 1]), tf.reshape(horizon_pred2, [1, -1])
tgt_flat1, tgt_flat2 = tf.reshape(horizon_tgt1, [-1, 1]), tf.reshape(horizon_tgt2, [1, -1])
|
tensorflow.reduce_sum
| 3,462 |
import tensorflow as tf
def get_variables(self):
return {'w':self.w,'b':self.b}
class DilatedConv3D(object) :
def __init__(self,name,input_dim,output_dim,k_t=2,k_h=3,k_w=3,d_t=2,d_h=1,d_w=1,
stddev=0.02, data_format='NDHWC') :
with tf.variable_scope(name) :
assert(data_format == 'NDHWC')
self.w = tf.get_variable('w', [k_t, k_h, k_w, input_dim, output_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev))
self.b = tf.get_variable('b',[output_dim], initializer=tf.constant_initializer(0.0))
self.strides = [1,1,1]
|
tensorflow.variable_scope
| 3,463 |
from tensorflow.python.framework import ops
TypeError: If `x` and `y` have different dtypes.
"""
with ops.op_scope([x, y], name, "truediv") as name:
x = ops.convert_to_tensor(x, name="x")
|
tensorflow.python.framework.ops.op_scope
| 3,464 |
import tensorflow as tf
if __name__ == "__main__":
tf.test.main()
|
tensorflow.test.main
| 3,465 |
import tensorflow as tf
Returns:
A transformed tensor (tf.float32).
"""
with tf.variable_scope('_interpolate'):
num_batch = im.get_shape().as_list()[0]
depth = im.get_shape().as_list()[1]
height = im.get_shape().as_list()[2]
width = im.get_shape().as_list()[3]
channels = im.get_shape().as_list()[4]
x = tf.to_float(x)
y = tf.to_float(y)
z = tf.to_float(z)
depth_f = tf.to_float(depth)
height_f = tf.to_float(height)
width_f = tf.to_float(width)
# Number of disparity interpolated.
out_depth = out_size[0]
out_height = out_size[1]
out_width = out_size[2]
zero = tf.zeros([], dtype='int32')
# 0 <= z < depth, 0 <= y < height & 0 <= x < width.
|
tensorflow.to_float
| 3,466 |
import tensorflow as tf
G = tf.Print(
self.end_points_G['softmax'],
[tf.reduce_mean(G_means), tf.reduce_mean(G_vars)],
"generator mean and average var",
|
tensorflow.reduce_mean
| 3,467 |
import tensorflow as tf
y_ = tf.placeholder(tf.float32, [None, FLAGS.num_classes])
# Whether model is training
train = tf.placeholder(tf.bool, [])
# Build the graph for the deep net
y_conv, img_summary = deepnn(x, train)
# Define your loss function - softmax_cross_entropy
with tf.variable_scope('x_entropy'):
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
# Define your AdamOptimiser, using FLAGS.learning_rate to minimixe the loss function
decayed_learning_rate = tf.train.exponential_decay(FLAGS.learning_rate, tf.Variable(0, trainable=False), 1000, 0.8)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
optimiser = tf.train.AdamOptimizer(decayed_learning_rate, name="Adam").minimize(cross_entropy)
# calculate the prediction and the accuracy
accuracy, acc_op = tf.metrics.accuracy(labels=tf.argmax(y_, axis=1), predictions=tf.argmax(y_conv, axis=1))
loss_summary = tf.summary.scalar('Loss', cross_entropy)
acc_summary = tf.summary.scalar('Accuracy', accuracy)
# summaries for TensorBoard visualisation
validation_summary = tf.summary.merge([img_summary, acc_summary])
training_summary = tf.summary.merge([img_summary, loss_summary])
test_summary = tf.summary.merge([img_summary, acc_summary])
|
tensorflow.control_dependencies
| 3,468 |
import tensorflow as tf
+ self.b_out
else:
new_output = tf.matmul(tf.nn.relu(new_state),
self.W_out * self.output_Connectivity, transpose_b=True, name="3") \
+ self.b_out
return new_output
def rnn_step_scan(self, state, rnn_in):
if self.dale_ratio:
new_state = (1-self.alpha) * state \
+ self.alpha * (
tf.matmul(
tf.nn.relu(state),
tf.matmul(
tf.abs(self.W_rec) * self.rec_Connectivity,
self.Dale_rec, name="in_1"),
transpose_b=True, name="1")
+ tf.matmul(
rnn_in,
tf.abs(self.W_in) * self.input_Connectivity,
transpose_b=True, name="2")
+ self.b_rec) \
+ np.sqrt(2.0 * self.alpha * self.rec_noise * self.rec_noise)\
* tf.random_normal(state.get_shape(), mean=0.0, stddev=1.0)
else:
new_state = ((1 - self.alpha) * state) \
|
tensorflow.nn.relu
| 3,469 |
import tensorflow as tf
mu,var = tf.nn.moments(t,axes=[0,1,2])
std = tf.sqrt(var+self.epsilon)
return [tf.assign(self.g,1/std),tf.assign(self.b,-1.*mu/std)]
require_init = tf.reduce_any(tf.is_nan(self.g))
init_ops = tf.cond(require_init,_init,lambda : [self.g,self.b])
with tf.control_dependencies(init_ops):
w = tf.reshape(self.g,[1,1,tf.shape(self.v)[2],1]) * tf.nn.l2_normalize(self.v,axis=[0,1,3])
return tf.nn.bias_add(
tf.nn.conv2d_transpose(input_var,w,
output_shape=shapes,
strides=self.strides,
padding='SAME',
data_format='NHWC'),
self.b,data_format='NHWC',name=name)
def get_variables(self):
#TODO: self.v should be l2-normalized or not? / currently not.
return {'v':self.v,'b':self.b,'g':self.g}
|
tensorflow.nn.conv2d_transpose
| 3,470 |
import tensorflow as tf
input_shape = [batch_size, image_size, image_size, input_nchan]
images = tf.truncated_normal(
input_shape,
dtype=input_data_type,
stddev=1e-1,
name='synthetic_images')
labels = tf.random_uniform(
[batch_size],
minval=1,
maxval=nclass,
dtype=tf.int32,
name='synthetic_labels')
# Note: This results in a H2D copy, but no computation
# Note: This avoids recomputation of the random values, but still
# results in a H2D copy.
images = tf.contrib.framework.local_variable(images, name='images')
labels = tf.contrib.framework.local_variable(labels, name='labels')
# Change to 0-based (don't use background class like Inception does)
labels -= 1
if num_compute_devices == 1:
images_splits = [images]
labels_splits = [labels]
else:
images_splits = tf.split(images, num_compute_devices, 0)
labels_splits = tf.split(labels, num_compute_devices, 0)
return nclass, images_splits, labels_splits
def create_config_proto():
config = tf.ConfigProto()
|
tensorflow.contrib.framework.local_variable
| 3,471 |
import tensorflow as tf
def lnlstm(xs, ms, s, scope, nh, init_scale=1.0):
nbatch, nin = [v.value for v in xs[0].get_shape()]
with tf.variable_scope(scope):
wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale))
gx = tf.get_variable("gx", [nh*4], initializer=tf.constant_initializer(1.0))
bx = tf.get_variable("bx", [nh*4], initializer=tf.constant_initializer(0.0))
wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale))
gh = tf.get_variable("gh", [nh*4], initializer=tf.constant_initializer(1.0))
bh = tf.get_variable("bh", [nh*4], initializer=tf.constant_initializer(0.0))
|
tensorflow.constant_initializer
| 3,472 |
import tensorflow as tf
geq = tf.cast(tgt_dif > 0, tf.bool)
tgt_posi_dif = tf.where(geq, tgt_dif, -tgt_dif)
pred_posi_dif = tf.where(geq, pred_dif, -pred_dif)
loss = tf.maximum(0., tgt_posi_dif - pred_posi_dif)
|
tensorflow.where
| 3,473 |
import tensorflow as tf
writer.close()
def file_based_input_fn_builder(input_file, seq_length, is_training,
drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([], tf.int64),
"is_real_example": tf.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
|
tensorflow.FixedLenFeature
| 3,474 |
import tensorflow as tf
learning_rate = 0.0001
logging.info(" lambda_l2_w: %f" % lambda_l2_w)
logging.info(" learning_rate: %f" % learning_rate)
# Mean-square-error i.e. quadratic-cost
mse = tf.reduce_sum(tf.squared_difference(y, x_recon), 1)
mse = tf.reduce_mean(mse) # in theano: mse = ((y - x) ** 2 ).sum(axis=1).mean()
# mse = tf.reduce_mean(tf.reduce_sum(tf.square(tf.sub(y, x_recon)), 1))
# mse = tf.reduce_mean(tf.squared_difference(y, x_recon)) # <haodong>: Error
# mse = tf.sqrt(tf.reduce_mean(tf.square(y - x_recon))) # <haodong>: Error
# Cross-entropy
# ce = cost.cross_entropy(y, x_recon) # <haodong>: list , list , Error (only be used for softmax output)
# ce = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y, x_recon)) # <haodong>: list , list , Error (only be used for softmax output)
# ce = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(y, x_recon)) # <haodong>: list , index , Error (only be used for softmax output)
L2_w = tf.contrib.layers.l2_regularizer(lambda_l2_w)(self.train_params[0]) \
+ tf.contrib.layers.l2_regularizer(lambda_l2_w)(self.train_params[2]) # faster than the code below
# L2_w = lambda_l2_w * tf.reduce_mean(tf.square(self.train_params[0])) + lambda_l2_w * tf.reduce_mean( tf.square(self.train_params[2]))
# DropNeuro
# P_o = cost.lo_regularizer(0.03)(
# self.train_params[0]) # + cost.lo_regularizer(0.5)(self.train_params[2]) # <haodong>: if add lo on decoder, no neuron will be broken
# P_i = cost.li_regularizer(0.03)(self.train_params[0]) # + cost.li_regularizer(0.001)(self.train_params[2])
# L1 of activation outputs
activation_out = self.all_layers[-2]
L1_a = 0.001 * tf.reduce_mean(activation_out) # <haodong>: theano: T.mean( self.a[i] ) # some neuron are broken, white and black
# L1_a = 0.001 * tf.reduce_mean( tf.reduce_sum(activation_out, 0) ) # <haodong>: some neuron are broken, white and black
# L1_a = 0.001 * 100 * tf.reduce_mean( tf.reduce_sum(activation_out, 1) ) # <haodong>: some neuron are broken, white and black
|
tensorflow.contrib.layers.l2_regularizer
| 3,475 |
import tensorflow as tf
@gin.configurable(module='trax.data', denylist=['dataset', 'training'])
def wmt_concat_preprocess(dataset, training, max_length=-1, max_eval_length=-1):
"""Preprocessing for WMT: filter exceeding maximum length and concatenate."""
dataset = wmt_preprocess(dataset, training, max_length, max_eval_length)
def concat_and_add_mask(features, targets):
inp = features['inputs']
pad = tf.expand_dims(tf.zeros_like(inp[0]), axis=0)
concat = tf.concat([inp, pad, targets], axis=0)
mask = tf.concat([tf.zeros_like(inp), pad, tf.ones_like(targets)], axis=0)
features['inputs'] = concat
features['mask'] = mask
return features, concat
dataset = dataset.map(concat_and_add_mask)
return dataset
|
tensorflow.zeros_like
| 3,476 |
import tensorflow as tf
s = tf.shape(x)
sh = x.get_shape().as_list()
x = tf.reshape(x, [tf.reduce_prod(s[:-1]), sh[-1]])
x = dense(x, num_units, **kwargs)
return tf.reshape(x, [-1] + sh[1:-1] + [num_units])
def dense(x, num_units, scope="dense", training=True, ema=None, init=False, bias_initializer=tf.constant_initializer(0.)):
with tf.variable_scope(scope):
V = tf.get_variable('V', shape=[int(x.get_shape()[1]), num_units], dtype=tf.float32,
initializer=tf.random_normal_initializer(0, 0.05), trainable=True)
g = tf.get_variable('g', shape=[num_units], dtype=tf.float32,
initializer=tf.constant_initializer(1.), trainable=True)
b = tf.get_variable('b', shape=[num_units], dtype=tf.float32,
initializer=bias_initializer, trainable=True)
def maybe_avg(v):
if ema is not None and not init:
v = tf.cond(training, lambda: v, lambda: ema.average(v))
return v
if init:
x = tf.matmul(x, tf.nn.l2_normalize(V.initialized_value(), 0))
|
tensorflow.constant_initializer
| 3,477 |
import tensorflow as tf
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
|
tensorflow.contrib.tpu.TPUEstimator
| 3,478 |
import tensorflow as tf
else:
tf.set_random_seed(i)
np.random.seed(i)
random.seed(i)
def get_session():
tf.reset_default_graph()
tf_config = tf.ConfigProto(
inter_op_parallelism_threads=1,
intra_op_parallelism_threads=1)
session = tf.Session(config=tf_config)
print("AVAILABLE GPUS: ", get_available_gpus())
return session
def get_env(env_id, seed):
env = gym.make(env_id)
set_global_seeds(seed)
env.seed(seed)
|
tensorflow.Session
| 3,479 |
from tensorflow.python.ops import math_ops
def _event_shape(self):
return constant_op.constant([], dtype=dtypes.int32)
def _get_event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
"""See the documentation for tf.random_gamma for more details."""
return 1. / random_ops.random_gamma([n], self.alpha, beta=self.beta,
dtype=self.dtype, seed=seed)
def _log_prob(self, x):
x = control_flow_ops.with_dependencies([check_ops.assert_positive(x)] if
self.validate_args else [], x)
return (self.alpha * math_ops.log(self.beta) -
math_ops.lgamma(self.alpha) -
(self.alpha + 1.) * math_ops.log(x) - self.beta / x)
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _log_cdf(self, x):
return math_ops.log(self._cdf(x))
def _cdf(self, x):
x = control_flow_ops.with_dependencies([check_ops.assert_positive(x)] if
self.validate_args else [], x)
# Note that igammac returns the upper regularized incomplete gamma
# function Q(a, x), which is what we want for the CDF.
|
tensorflow.python.ops.math_ops.log
| 3,480 |
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False, device_count={'GPU': gpu})
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.5
# Placeholders
self.sess = tf.Session(config=config)
self.s_dim, self.a_dim = env.observation_space.shape, env.action_space.shape[0]
self.a_bound = (env.action_space.high - env.action_space.low) / 2
self.actions = tf.placeholder(tf.float32, [None, self.a_dim], 'action')
self.state = tf.placeholder(tf.float32, [None, self.s_dim[0]], 'state')
self.advantage = tf.placeholder(tf.float32, [None, 1], 'advantage')
self.rewards = tf.placeholder(tf.float32, [None, 1], 'discounted_r')
# Dateset with experiennce replay
self.dataset = tf.data.Dataset.from_tensor_slices({'state': self.state, 'actions': self.actions,
'rewards': self.rewards, 'advantage': self.advantage})
self.dataset = self.dataset.shuffle(buffer_size=10000)
self.dataset = self.dataset.batch(self.MINIBATCH)
self.dataset = self.dataset.cache()
self.dataset = self.dataset.repeat(self.EPOCHS)
self.data_iter = self.dataset.make_initializable_iterator()
batch = self.data_iter.get_next()
# Call ppo net
pi_old, pi_old_params = self.build_anet(batch['state'], 'oldpi')
pi, pi_params = self.build_anet(batch['state'], 'pi')
pi_eval, _ = self.build_anet(self.state, 'pi', reuse=True)
|
tensorflow.data.Dataset.from_tensor_slices
| 3,481 |
import tensorflow as tf
neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1)
basic_loss = tf.add(tf.subtract(pos_dist, neg_dist), alpha)
loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)
return loss
|
tensorflow.maximum
| 3,482 |
import tensorflow as tf
self.grads_and_vars = self._optimizer.compute_gradients(total_loss)
# clip gradients
clipped_grads_and_vars = self._clip_gradients(self.grads_and_vars, self._grad_clipping_tuple)
# compute norms in case they need to be logged
self.gradient_norms = [tf.norm(g) + NUMTOL for (g, v) in clipped_grads_and_vars]
self.weight_norms = [tf.norm(v) + NUMTOL for (g, v) in clipped_grads_and_vars]
# check that gradients are finite
grads = [tf.check_numerics(g, "grads is not finite") for (g, v) in clipped_grads_and_vars]
variables = [tf.check_numerics(v, "grads is not finite") for (g, v) in clipped_grads_and_vars]
self.gradient_weight_global_norms = [tf.global_norm(grads), tf.global_norm(variables)]
# 2nd part of minimize: apply_gradient
optimizer_step = self._optimizer.apply_gradients(clipped_grads_and_vars, global_step=self.global_step)
update_ops = tf.group(*self.update_ops)
self.training_op = tf.group(update_ops, optimizer_step)
def set_check_ops(self):
self._check_ops = 1
|
tensorflow.global_norm
| 3,483 |
import tensorflow as tf
def main():
if not tf.io.gfile.exists(a.output_dir):
tf.io.gfile.makedirs(a.output_dir)
|
tensorflow.io.gfile.exists
| 3,484 |
import tensorflow as tf
"""
sess = tf.get_default_session()
if variables is None:
variables = tf.global_variables()
else:
variables = list(variables)
if len(variables) == 0:
return []
if semver.match(tf.__version__, '<1.0.0'):
init_flag = sess.run(
tf.pack([tf.is_variable_initialized(v) for v in variables]))
else:
init_flag = sess.run(
tf.stack([tf.is_variable_initialized(v) for v in variables]))
return [v for v, f in zip(variables, init_flag) if not f]
def get_hard_target_model_updates(target, source):
"""Return list of target model update ops.
These are hard target updates. The source weights are copied
directly to the target network.
Parameters
----------
target: keras.models.Model
The target model. Should have same architecture as source model.
|
tensorflow.is_variable_initialized
| 3,485 |
import tensorflow as tf
metrics = self.evaluate('validation', mute=True)
tf.logging.info(
'Iter {:4d}: loss {:.4f}'.format(i, loss) +
''.join([', {} {:.4f}'.format(m, metrics[m]) for m in metrics]))
if output_dir is not None:
train_writer.add_summary(summaries, i)
metrics_summaries = tf.Summary(value=[
tf.Summary.Value(tag=m, simple_value=v)
for m, v in metrics.items()])
train_writer.add_summary(metrics_summaries, i)
tf.logging.info('Training finished')
def predict(self, data, keys='*', batch=False):
assert set(data.keys()) >= set(self.data_shape.keys())
if isinstance(keys, str):
if keys == '*':
op = self.pred_out # just gather all outputs
else:
op = self.pred_out[keys]
else:
op = {k: self.pred_out[k] for k in keys}
|
tensorflow.logging.info
| 3,486 |
import tensorflow as tf
# approximately as (ignoring boundary issues and initial_value):
#
# cumsum(decay_prods * sequence) / decay_prods
# where decay_prods = reverse_cumprod(decay)
#
# One reason this hasn't been done is that multiplying then dividing again by
# products of decays isn't ideal numerically, in particular if any of the
# decays are zero it results in NaNs.
with tf.name_scope(name, values=[sequence, decay, initial_value]):
if sequence_lengths is not None:
# Zero out sequence and decay beyond sequence_lengths.
with tf.control_dependencies(
[tf.assert_equal(sequence.shape[0], decay.shape[0])]):
mask = tf.sequence_mask(sequence_lengths, maxlen=sequence.shape[0],
dtype=sequence.dtype)
mask = tf.transpose(mask)
|
tensorflow.name_scope
| 3,487 |
import tensorflow as tf
multiple sorted values. Default to 10 intervals over the 0-1 range, or
find the min/max if an int is provided (not recommended because
multi-phase analysis is inefficient).
categorical: (Optional) A `bool` that treats `x` as discrete values if true.
name: (Optional) A name for this operation.
Returns:
counts: The histogram, as counts per bin.
boundaries: A `Tensor` used to build the histogram representing boundaries.
"""
with tf.compat.v1.name_scope(name, 'histogram'):
x = tf.reshape(tf_utils.get_values(x), [-1])
if categorical:
x_dtype = x.dtype
x = x if x_dtype == tf.string else tf.strings.as_string(x)
elements, counts = count_per_key(x)
if x_dtype != elements.dtype:
elements = tf.strings.to_number(elements, tf.int64)
return counts, elements
if boundaries is None:
|
tensorflow.compat.v1.name_scope
| 3,488 |
import tensorflow as tf
d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag)
d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]])
scores = d_layer_3_all
# Mask
if mask is not None:
# key_masks = tf.sequence_mask(facts_length, tf.shape(facts)[1]) # [B, T]
key_masks = tf.expand_dims(mask, 1) # [B, 1, T]
paddings = tf.ones_like(scores) * (-2 ** 32 + 1)
if not forCnn:
scores = tf.where(key_masks, scores, paddings) # [B, 1, T]
# Scale
# scores = scores / (facts.get_shape().as_list()[-1] ** 0.5)
|
tensorflow.ones_like
| 3,489 |
import tensorflow as tf
Args:
config: A `DeploymentConfig` object.
model_fn: A callable. Called as `model_fn(*args, **kwargs)`
args: Optional list of arguments to pass to `model_fn`.
kwargs: Optional list of keyword arguments to pass to `model_fn`.
optimizer: Optional `Optimizer` object. If passed the model is deployed
for training with that optimizer.
summarize_gradients: Whether or not add summaries to the gradients.
Returns:
A `DeployedModel` namedtuple.
"""
# Gather initial summaries.
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
# Create Clones.
clones = create_clones(config, model_fn, args, kwargs)
first_clone = clones[0]
# Gather update_ops from the first clone. These contain, for example,
# the updates for the batch_norm variables created by model_fn.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone.scope)
train_op = None
total_loss = None
with tf.device(config.optimizer_device()):
if optimizer:
# Place the global step on the device storing the variables.
|
tensorflow.get_collection
| 3,490 |
import tensorflow as tf
self.compute_shape(l2_shape[3], self.ff_pool_strides[1][2]),
final_dim]
else:
l2_shape = tf.identity(x_shape)
# Initialize hidden layer activities
if self.hidden_init == 'identity':
l1_h2 = tf.identity(x)
l2_h2 = tf.zeros(l2_shape, dtype=self.dtype)
l3_h2 = tf.zeros(l3_shape, dtype=self.dtype)
elif self.hidden_init == 'random':
l1_h2 = tf.random_normal(x_shape, dtype=self.dtype)
l2_h2 = tf.random_normal(l2_shape, dtype=self.dtype)
l3_h2 = tf.random_normal(l3_shape, dtype=self.dtype)
elif self.hidden_init == 'zeros':
l1_h2 = tf.zeros(x_shape, dtype=self.dtype)
l2_h2 = tf.zeros(l2_shape, dtype=self.dtype)
l3_h2 = tf.zeros(l3_shape, dtype=self.dtype)
else:
raise RuntimeError
# While loop
elems = [
i0,
x,
l1_h2,
|
tensorflow.random_normal
| 3,491 |
import tensorflow as tf
keepdims: A boolean, whether to keep the dimensions or not.
If keepdims is False, the rank of the tensor is reduced
by 1 for each entry in axis. If keep_dims is True,
the reduced dimensions are retained with length 1.
Returns
-------
A tensor with the mean of elements of x.
"""
axis = _normalize_axis(axis, get_ndim(x))
if x.dtype.base_dtype == tf.bool:
x = tf.cast(x, tf.float32)
return tf.reduce_mean(x, axis=axis, keep_dims=keepdims)
def dot(x, y):
"""Multiplies 2 tensors (and/or variables) and returns a *tensor*.
When attempting to multiply a ND tensor
with a ND tensor, it reproduces the Theano behavior.
(e.g. (2, 3).(4, 3, 5) = (2, 4, 5))
Parameters
----------
x: Tensor or variable.
|
tensorflow.reduce_mean
| 3,492 |
import tensorflow as tf
grads = optimizer.compute_gradients(loss_val, var_list=var_list)
if FLAGS.debug:
# print(len(var_list))
for grad, var in grads:
utils.add_gradient_summary(grad, var)
return optimizer.apply_gradients(grads)
def train_z(loss,Z):
return tf.gradients(ys = loss, xs = Z)
def main(argv=None):
keep_probability = tf.placeholder(tf.float32, name="keep_probabilty")
image = tf.placeholder(tf.float32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3], name="input_image")
annotation = tf.placeholder(tf.float32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3], name="annotation")
z = tf.placeholder(tf.float32, shape=[None, 4, 4, 128], name="z")
# pred_annotation, logits = inference(image, keep_probability,z)
# tf.summary.image("input_image", image, max_outputs=2)
# tf.summary.image("ground_truth", tf.cast(annotation, tf.uint8), max_outputs=2)
# tf.summary.image("pred_annotation", tf.cast(pred_annotation, tf.uint8), max_outputs=2)
# loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
# labels=tf.squeeze(annotation, squeeze_dims=[3]),
# name="entropy")))
|
tensorflow.placeholder
| 3,493 |
import tensorflow as tf
# Initialize session
sess = tf.Session()
sess.run(tf.global_variables_initializer())
|
tensorflow.Session
| 3,494 |
import tensorflow as tf
if param_noise:
act_f = build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse,
param_noise_filter_func=param_noise_filter_func)
else:
act_f = build_act(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse)
with tf.variable_scope(scope, reuse=reuse):
# set up placeholders
obs_t_input = U.ensure_tf_input(make_obs_ph("obs_t"))
act_t_ph = tf.placeholder(tf.int32, [None], name="action")
rew_t_ph = tf.placeholder(tf.float32, [None], name="reward")
obs_tp1_input = U.ensure_tf_input(make_obs_ph("obs_tp1"))
|
tensorflow.variable_scope
| 3,495 |
import tensorflow as tf
reg += self.L1_in * tf.reduce_mean(tf.abs(self.W_in) * self.input_Connectivity)
reg += self.L1_rec * tf.reduce_mean(tf.abs(self.W_rec) * self.rec_Connectivity)
if self.dale_ratio:
reg += self.L1_out * tf.reduce_mean(tf.matmul(tf.abs(self.W_out) * self.output_Connectivity, self.Dale_out))
else:
reg += self.L1_out * tf.reduce_mean(tf.abs(self.W_out) * self.output_Connectivity)
# L2 weight regularization
reg += self.L2_in * tf.reduce_mean(tf.square(tf.abs(self.W_in) * self.input_Connectivity))
reg += self.L2_rec * tf.reduce_mean(tf.square(tf.abs(self.W_rec) * self.rec_Connectivity))
|
tensorflow.abs
| 3,496 |
import tensorflow as tf
labels=gtboxes_and_label_h[0, :, -1],
method=0)
tf.summary.image('Compare/gtboxes_h_gpu:%d' % i, gtboxes_in_img_h)
if cfgs.ADD_BOX_IN_TENSORBOARD:
detections_in_img = self.drawer.draw_boxes_with_categories_and_scores(
img_batch=tf.expand_dims(img[0, :, :, :], axis=0),
boxes=outputs[0],
scores=outputs[1],
labels=outputs[2],
method=2)
tf.summary.image('Compare/final_detection_gpu:%d' % i, detections_in_img)
loss_dict = outputs[-1]
total_loss_dict, total_losses = self.loss_dict(loss_dict, num_gpu)
if i == num_gpu - 1:
regularization_losses = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES)
# weight_decay_loss = tf.add_n(slim.losses.get_regularization_losses())
total_losses = total_losses + tf.add_n(regularization_losses)
|
tensorflow.summary.image
| 3,497 |
import tensorflow as tf
seq_len=self.q_len,
scope="Encoder_Residual_Block",
reuse=True, # Share the weights between passage and question
bias=False,
dropout=self.dropout)
def _fuse(self):
with tf.variable_scope("Context_to_Query_Attention_Layer"):
C = tf.tile(tf.expand_dims(self.c_embed_encoding, 2), [1, 1, self.max_q_len, 1])
Q = tf.tile(tf.expand_dims(self.q_embed_encoding, 1), [1, self.max_p_len, 1, 1])
S = trilinear([C, Q, C * Q], input_keep_prob=1.0 - self.dropout)
mask_q = tf.expand_dims(self.q_mask, 1)
S_ = tf.nn.softmax(mask_logits(S, mask=mask_q))
mask_c = tf.expand_dims(self.c_mask, 2)
S_T = tf.transpose(tf.nn.softmax(mask_logits(S, mask=mask_c), dim=1), (0, 2, 1))
self.c2q = tf.matmul(S_, self.q_embed_encoding)
self.q2c = tf.matmul(tf.matmul(S_, S_T), self.c_embed_encoding)
self.attention_outputs = [self.c_embed_encoding, self.c2q, self.c_embed_encoding * self.c2q,
self.c_embed_encoding * self.q2c]
|
tensorflow.expand_dims
| 3,498 |
import tensorflow as tf
The output tensor of the block; shape should match inputs.
"""
shortcut = inputs
inputs = batch_norm(inputs, training, data_format)
inputs = tf.nn.relu(inputs)
# The projection shortcut should come after the first batch norm and ReLU
# since it performs a 1x1 convolution.
|
tensorflow.nn.relu
| 3,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.