seed
stringlengths 25
2.89k
| seed_api
stringlengths 14
102
| index
int64 0
14.8k
|
---|---|---|
import tensorflow as tf
if stride > 1:
X = self._calibrate(X, w, h, ch, w // stride, h // stride, ch, is_train=is_train)
X = tf.reshape(X, (-1, w // stride, h // stride, ch)) # Sanity shape check
return X
def _add_max_pool_3x3_op(self, X, input_idx, ni, w, h, ch, is_reduction, is_dynamic, is_train):
filter_size = 3
stride = 2 if is_reduction else 1
with tf.variable_scope('max_pool_3x3_op'):
X = tf.nn.max_pool(X, ksize=(1, filter_size, filter_size, 1), strides=[1, stride, stride, 1], padding='SAME')
X = tf.reshape(X, (-1, w // stride, h // stride, ch)) # Sanity shape check
return X
def _add_separable_conv_3x3_op(self, *args, **kwargs):
return self._add_separable_conv_op(*args, **kwargs, filter_size=3)
def _add_separable_conv_5x5_op(self, *args, **kwargs):
return self._add_separable_conv_op(*args, **kwargs, filter_size=5)
| tensorflow.nn.max_pool | 14,100 |
import tensorflow as tf
pool_center = AvgPooling('mappool', gmap, 9, stride=8, padding='VALID')
with argscope(Conv2D, kernel_shape=3, nl=tf.nn.relu,
W_init=tf.random_normal_initializer(stddev=0.01)):
shared = (LinearWrap(image)
| tensorflow.random_normal_initializer | 14,101 |
import tensorflow as tf
'Whether evaluate the model only once.')
tf.app.flags.DEFINE_string('log_root', '',
'Directory to keep the checkpoints. Should be a '
'parent directory of FLAGS.train_dir/eval_dir.')
tf.app.flags.DEFINE_integer('num_gpus', 0,
'Number of gpus used for training. (0 or 1)')
tf.app.flags.DEFINE_integer('num_residual_units', 5,
'num of residual units')
tf.app.flags.DEFINE_string('Optimizer', 'mom',
'The optimizer used to train the model.')
tf.app.flags.DEFINE_bool('RCE_train', False,
'Whether use RCE to train the model.')
tf.app.flags.DEFINE_string('attack_method', 'fgsm',
'The attacking method used')
tf.app.flags.DEFINE_float('eps', 0.01,
'The eps in attacking methods.')
tf.app.flags.DEFINE_string('save_pwd', None,
'')
epoch_jsma = 100
num_classes = 10
if FLAGS.dataset == 'cifar10':
| tensorflow.app.flags.DEFINE_string | 14,102 |
import tensorflow as tf
ch_emb = tf.nn.dropout(ch_emb, 1.0 - 0.5 * self.dropout)
qh_emb = tf.nn.dropout(qh_emb, 1.0 - 0.5 * self.dropout)
ch_emb = conv(ch_emb, d,
bias=True, activation=tf.nn.relu, kernel_size=5, name="char_conv", reuse=None)
qh_emb = conv(qh_emb, d,
bias=True, activation=tf.nn.relu, kernel_size=5, name="char_conv", reuse=True)
ch_emb = tf.reduce_max(ch_emb, axis=1)
qh_emb = tf.reduce_max(qh_emb, axis=1)
ch_emb = tf.reshape(ch_emb, [N * self.max_p_num, PL, -1])
qh_emb = tf.reshape(qh_emb, [N * self.max_p_num, QL, -1])
c_emb = tf.nn.dropout(tf.nn.embedding_lookup(self.word_mat, self.c), 1.0 - self.dropout)
q_emb = tf.nn.dropout(tf.nn.embedding_lookup(self.word_mat, self.q), 1.0 - self.dropout)
c_emb = tf.concat([c_emb, ch_emb], axis=2)
q_emb = tf.concat([q_emb, qh_emb], axis=2)
self.c_emb = highway(c_emb, size=d, scope="highway", dropout=self.dropout, reuse=None)
self.q_emb = highway(q_emb, size=d, scope="highway", dropout=self.dropout, reuse=True)
def _encode(self):
| tensorflow.reshape | 14,103 |
import tensorflow as tf
out_width = get_deconv_dim(width, stride_w, kernel_w, padding)
output_shape = tf.stack([batch_size, out_height, out_width, num_output_channels], axis=0)
outputs = tf.nn.conv2d_transpose(inputs, kernel, output_shape,
[1, stride_h, stride_w, 1],
padding=padding)
biases = _variable_on_cpu('biases', [num_output_channels],
tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
# outputs = batch_norm_for_conv2d(outputs, is_training,
# bn_decay=bn_decay, scope='bn')
outputs = tf.layers.batch_normalization(outputs, momentum=0.99, epsilon=1e-6, training=is_training)
| tensorflow.constant_initializer | 14,104 |
import tensorflow as tf
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
| tensorflow.matmul | 14,105 |
import tensorflow as tf
dec, mem = tf.nn.seq2seq.attention_decoder(
dec_inp, enc_state,
attn_states, cell, output_size=4,
num_heads=2)
sess.run([tf.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testAttentionDecoderStateIsTuple(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
cell = tf.nn.rnn_cell.MultiRNNCell(cells=[cell] * 2,
state_is_tuple=True)
inp = [tf.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size])
for e in enc_outputs])
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
dec, mem = tf.nn.seq2seq.attention_decoder(
dec_inp, enc_state,
attn_states, cell, output_size=4)
sess.run([tf.global_variables_initializer()])
res = sess.run(dec)
| tensorflow.constant_initializer | 14,106 |
import tensorflow as tf
return {
"train":{
"loss":loss,
"logits":logits,
"train_op":train_op,
"cross_entropy":label_loss,
"distillation_loss":distillation_loss["distillation_loss"],
"kd_num":tf.reduce_sum(features["distillation_ratio"]),
"ce_num":tf.reduce_sum(features["label_ratio"]),
"teacher_logit":teacher_logit,
"student_logit":student_logit,
"label_ratio":features["label_ratio"],
"distilaltion_logits_loss":distillation_loss["distillation_logits_loss"],
"distilaltion_feature_loss":distillation_loss["distillation_feature_loss"],
"distillation_loss":distillation_loss["distillation_loss"],
"st_accuracy":st_accuracy,
"te_accuracy":te_accuracy,
| tensorflow.reduce_sum | 14,107 |
import tensorflow as tf
tf.nn.rnn_cell.LSTMCell(
size_layers, initializer=tf.orthogonal_initializer(), reuse=reuse
| tensorflow.orthogonal_initializer | 14,108 |
import tensorflow as tf
dtype=LayersConfig.tf_dtype,
)
sum_word_embeddings = tf.reduce_sum(word_embeddings, axis=1)
| tensorflow.reduce_sum | 14,109 |
from tensorflow.python.ops import array_ops
`false_positives` variables appropriately, and whose value matches
`precision`.
Raises:
ValueError: If `ignore_mask` is not `None` and its shape doesn't match
`predictions`, or if `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
ValueError: If `top_k_predictions` has rank < 2.
"""
default_name = _at_k_name('precision', class_id=class_id)
with ops.name_scope(
name, default_name,
(top_k_predictions, labels, ignore_mask, weights)) as scope:
rank = array_ops.rank(top_k_predictions)
check_rank_op = control_flow_ops.Assert(
math_ops.greater_equal(rank, 2),
['top_k_predictions must have rank 2 or higher, e.g. [batch_size, k].'])
with ops.control_dependencies([check_rank_op]):
return _streaming_sparse_precision_at_k(
top_k_idx=top_k_predictions,
labels=labels,
class_id=class_id,
ignore_mask=ignore_mask,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=scope)
| tensorflow.python.ops.array_ops.rank | 14,110 |
import tensorflow as tf
writer.write("%s = %s\n" % (key, str(result[key])))
if FLAGS.do_predict:
predict_examples = processor.get_test_examples(FLAGS.data_dir)
predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
file_based_convert_examples_to_features(predict_examples, label_list,
FLAGS.max_seq_length, tokenizer,
predict_file)
tf.logging.info("***** Running prediction*****")
tf.logging.info(" Num examples = %d", len(predict_examples))
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
if FLAGS.use_tpu:
# Warning: According to tpu_estimator.py Prediction on TPU is an
# experimental feature and hence not supported here
raise ValueError("Prediction in TPU not supported")
predict_drop_remainder = True if FLAGS.use_tpu else False
predict_input_fn = file_based_input_fn_builder(
input_file=predict_file,
seq_length=FLAGS.max_seq_length,
| tensorflow.logging.info | 14,111 |
import tensorflow as tf
#print(label.shape)
images,labels=tf.train.shuffle_batch([image,label],
batch_size=batch_size,num_threads=10,capacity=10000,min_after_dequeue=200)
return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size])
def get_test_batch(image,label,batch_size):
| tensorflow.reshape | 14,112 |
import tensorflow as tf
ids_placeholder = tf.placeholder('int32',
| tensorflow.placeholder | 14,113 |
import tensorflow as tf
t_vars['d_vars'], self.d_losses[-1], d_optimizer, gradient_noise_scale=0.0)
self.capped_g_grads = self._clip_grad_global_norms(
t_vars['g_vars'], self.g_losses[-1], g_optimizer, gradient_noise_scale=0.0)
else:
self.capped_d_grads = self._clip_grad_norms(
d_optimizer.compute_gradients(self.d_losses[-1], t_vars['d_vars']))
self.capped_g_grads = self._clip_grad_norms(
g_optimizer.compute_gradients(self.g_losses[-1], t_vars['g_vars']))
global_step = tf.get_variable(
'global_step', [], initializer=tf.constant_initializer(0), trainable=False)
if self.gradient_multipliers is not None:
with tf.name_scope('multiply_grads'):
self.capped_d_grads = self._multiply_gradients(self.capped_d_grads,
self.gradient_multipliers)
apply_d_gradient_op = d_optimizer.apply_gradients(self.capped_d_grads, global_step=global_step)
apply_g_gradient_op = g_optimizer.apply_gradients(self.capped_g_grads, global_step=global_step)
self.train_op_d = control_flow_ops.with_dependencies([apply_d_gradient_op], self.d_losses[-1])
self.train_op_g = control_flow_ops.with_dependencies([apply_g_gradient_op], self.g_losses[-1])
| tensorflow.constant_initializer | 14,114 |
import tensorflow as tf
input_batch,
reduction_indices,
keep_dims=True,
shift=shift,
name="batch_norm_ss")
mean, variance = tf.nn.normalize_moments(counts,
shifted_sum_x,
shifted_sum_x2,
shift,
name="normalize_moments")
second_moment = variance + tf.square(mean)
| tensorflow.nn.normalize_moments | 14,115 |
import tensorflow as tf
std = [0.229, 0.224, 0.225]
if self.image_bgr:
mean = mean[::-1]
std = std[::-1]
image_mean = tf.constant(mean, dtype=tf.float32) * 255.
image_std = tf.constant(std, dtype=tf.float32) * 255.
image = (image - image_mean) / image_std
return image
@staticmethod
def compute_loss_and_error(logits, label, label_smoothing=0.):
if label_smoothing != 0.:
nclass = logits.shape[-1]
label = tf.one_hot(label, nclass) if label.shape.ndims == 1 else label
if label.shape.ndims == 1:
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
else:
loss = tf.losses.softmax_cross_entropy(
label, logits, label_smoothing=label_smoothing,
reduction=tf.losses.Reduction.NONE)
loss = tf.reduce_mean(loss, name='xentropy-loss')
def prediction_incorrect(logits, label, topk=1, name='incorrect_vector'):
with tf.name_scope('prediction_incorrect'):
x = tf.logical_not(tf.nn.in_top_k(logits, label, topk))
| tensorflow.one_hot | 14,116 |
import tensorflow as tf
hops. Specify node set of each hop, including the root.
adjcents: A list of N `tf.SparseTensor` of `int64`. Specify adjacent
matrix between hops.
"""
nodes = tf.reshape(nodes, [-1])
nodes_list = [nodes]
adj_list = []
for hop_edge_types in edge_types:
neighbor, weight, _ = get_full_neighbor(nodes, hop_edge_types)
next_nodes, next_idx = tf.unique(neighbor.values, out_idx=tf.int64)
next_indices = tf.stack([neighbor.indices[:, 0], next_idx], 1)
next_values = weight.values
next_shape = tf.stack([tf.size(nodes), tf.size(next_nodes)])
next_shape = tf.cast(next_shape, tf.int64)
next_adj = tf.SparseTensor(next_indices, next_values, next_shape)
next_adj = tf.sparse_reorder(next_adj)
nodes_list.append(next_nodes)
adj_list.append(next_adj)
nodes = next_nodes
return nodes_list, adj_list
| tensorflow.stack | 14,117 |
import tensorflow as tf
units=bert_config.hidden_size,
activation=modeling.get_activation(bert_config.hidden_act),
kernel_initializer=modeling.create_initializer(
bert_config.initializer_range))
input_tensor = modeling.layer_norm(input_tensor)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
output_bias = tf.get_variable(
"output_bias",
shape=[bert_config.vocab_size],
initializer=tf.zeros_initializer())
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
if clip:
log_probs = tf.log(tf.clip_by_value(tf.nn.softmax(logits, axis=-1), 1e-6, 1.0 - 1e-6))
else:
log_probs = tf.nn.log_softmax(logits, axis=-1)
label_ids = tf.reshape(label_ids, [-1])
label_weights = tf.reshape(label_weights, [-1])
one_hot_labels = tf.one_hot(
label_ids, depth=bert_config.vocab_size, dtype=tf.float32)
# The `positions` tensor might be zero-padded (if the sequence is too
# short to have the maximum number of predictions). The `label_weights`
# tensor has a value of 1.0 for every real prediction and 0.0 for the
# padding predictions.
per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])
| tensorflow.nn.softmax | 14,118 |
import tensorflow as tf
['Wrong width for tensor %s [expected][actual]',
image.name, width, image_width])
asserts.extend([height_assert, width_assert])
# Create a random bounding box.
#
# Use tf.random_uniform and not numpy.random.rand as doing the former would
# generate random numbers at graph eval time, unlike the latter which
# generates random numbers at graph definition time.
max_offset_height = control_flow_ops.with_dependencies(
asserts, tf.reshape(image_height - crop_height + 1, []))
max_offset_width = control_flow_ops.with_dependencies(
asserts, tf.reshape(image_width - crop_width + 1, []))
offset_height = tf.random_uniform(
[], maxval=max_offset_height, dtype=tf.int32)
offset_width = tf.random_uniform(
[], maxval=max_offset_width, dtype=tf.int32)
return [_crop(image, offset_height, offset_width,
crop_height, crop_width) for image in image_list]
def _central_crop(image_list, crop_height, crop_width):
"""Performs central crops of the given image list.
Args:
image_list: a list of image tensors of the same dimension but possibly
varying channel.
crop_height: the height of the image following the crop.
crop_width: the width of the image following the crop.
| tensorflow.random_uniform | 14,119 |
import tensorflow as tf
@registry.register_model
class FeedForwardCategoricalPolicy(PolicyBase):
"""Feed-forward categorical."""
def body(self, features):
observations = features["inputs_raw"]
observations = tf.cast(observations, tf.float32)
flat_observations = tf.layers.flatten(observations)
with tf.variable_scope("policy"):
x = flat_observations
for size in self.hparams.policy_layers:
x = tf.layers.dense(x, size, activation=tf.nn.relu)
logits = tf.layers.dense(x, self.hparams.problem.num_actions)
logits = tf.expand_dims(logits, axis=1)
with tf.variable_scope("value"):
x = flat_observations
for size in self.hparams.value_layers:
x = tf.layers.dense(x, size, activation=tf.nn.relu)
value = tf.layers.dense(x, 1)
logits = clip_logits(logits, self.hparams)
return {"target_policy": logits, "target_value": value}
@registry.register_model
class FeedForwardCnnSmallCategoricalPolicy(PolicyBase):
"""Small cnn network with categorical output."""
def body(self, features):
| tensorflow.expand_dims | 14,120 |
import tensorflow as tf
model_options.crop_size[1]
if model_options.crop_size else tf.shape(images)[2])
| tensorflow.shape | 14,121 |
import tensorflow as tf
classifier_utils.file_based_convert_examples_to_features(
train_examples, label_list, FLAGS.max_seq_length, tokenizer,
train_file, task_name)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", FLAGS.train_step)
train_input_fn = classifier_utils.file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
| tensorflow.logging.info | 14,122 |
import tensorflow as tf
num_actions_arguments = data.batch_actions_arguments.shape[2]
actions_arguments_vocabulary_length = len(data.idx2word_action_arguments)
with tf.name_scope('data'):
batch_histories = tf.Variable(data.batch_histories, name='histories',
trainable=False)
batch_actions_template = tf.Variable(data.batch_actions_template, name='actions',
trainable=False)
batch_action_arguments = tf.Variable(data.batch_actions_arguments, name='actions_arguments',
trainable=False)
histories = tf.gather(batch_histories, self.batch_idx)
actions_template = tf.gather(batch_actions_template, self.batch_idx)
actions_arguments = tf.gather(batch_action_arguments, self.batch_idx)
with tf.name_scope('model'):
encoder_embedding = embedding(
input=histories,
length=histories_vocabulary_length,
size=histories_embedding_size,
name='encoder_embedding'
)
with tf.name_scope("UtterancesEncoder"):
| tensorflow.gather | 14,123 |
import tensorflow as tf
`[num_nodes * count1]`, `[num_nodes * count1 * count2]` ...
"""
neighbors_list = [tf.reshape(nodes, [-1])]
weights_list = []
type_list = []
for hop_edge_types, count in zip(edge_types, counts):
neighbors, weights, types = sample_neighbor(
neighbors_list[-1], hop_edge_types, count, default_node=default_node)
neighbors_list.append(tf.reshape(neighbors, [-1]))
weights_list.append(tf.reshape(weights, [-1]))
type_list.append(tf.reshape(weights, [-1]))
return neighbors_list, weights_list, type_list
def get_multi_hop_neighbor(nodes, edge_types):
"""
Get multi-hop neighbors with adjacent matrix.
| tensorflow.reshape | 14,124 |
from tensorflow.python.ops import array_ops
def get_weight_tensor(self, features):
if not self._weight_column_name:
return None
else:
return array_ops.reshape(
math_ops.to_float(features[self._weight_column_name]), shape=(-1,))
@property
def problem_type(self):
return self._problem_type
def _weighted_loss(self, loss, weight_tensor):
"""Returns cumulative weighted loss."""
unweighted_loss = array_ops.reshape(loss, shape=(-1,))
weighted_loss = math_ops.multiply(unweighted_loss,
array_ops.reshape(
weight_tensor, shape=(-1,)))
return weighted_loss
def training_loss(self, logits, target, features, name="training_loss"):
"""Returns training loss tensor for this head.
Training loss is different from the loss reported on the tensorboard as we
should respect the example weights when computing the gradient.
L = sum_{i} w_{i} * l_{i} / B
| tensorflow.python.ops.array_ops.reshape | 14,125 |
import tensorflow as tf
)
return update_scale_expr
# Functionality to update the threshold for parameter space noise.
update_param_noise_thres_expr = param_noise_threshold.assign(
tf.cond(update_param_noise_threshold_ph >= 0, lambda: update_param_noise_threshold_ph,
lambda: param_noise_threshold))
# Put everything together.
perturbed_deterministic_actions = tf.argmax(perturbable_policy.q_values, axis=1)
deterministic_actions = tf.argmax(policy.q_values, axis=1)
batch_size = tf.shape(policy.obs_ph)[0]
n_actions = ac_space.nvec if isinstance(ac_space, MultiDiscrete) else ac_space.n
random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=n_actions, dtype=tf.int64)
chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
perturbed_stochastic_actions = tf.where(chose_random, random_actions, perturbed_deterministic_actions)
stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
perturbed_output_actions = tf.cond(stochastic_ph, lambda: perturbed_stochastic_actions,
lambda: deterministic_actions)
output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)
update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))
updates = [
update_eps_expr,
tf.cond(reset_ph, lambda: perturb_vars(original_scope="model", perturbed_scope="perturbed_model/model"),
lambda: tf.group(*[])),
tf.cond(update_param_noise_scale_ph, lambda: update_scale(), lambda: tf.Variable(0., trainable=False)),
update_param_noise_thres_expr,
| tensorflow.stack | 14,126 |
import tensorflow as tf
control_inputs = [restore_op[-1]] if restore_op else None
with tf.control_dependencies(control_inputs):
if args.restore_params:
filepath = r"./embedding_variables"
op = sok_saver.restore_from_file(embedding_layer.embedding_variable, filepath)
else:
op = sok_saver.load_embedding_values(embedding_layer.embedding_variable, init_tensors[i])
restore_op.append(op)
loss_fn = tf.keras.losses.BinaryCrossentropy(from_logits=True, reduction="none")
def _replica_loss(labels, logits):
loss = loss_fn(labels, logits)
return tf.nn.compute_average_loss(loss, global_batch_size=args.global_batch_size)
def _train_step(inputs, labels, training):
def _step_fn(inputs, labels):
logit, embedding_vector = sok_sparse_demo(inputs, training=training)
loss = _replica_loss(labels, logit)
| tensorflow.keras.losses.BinaryCrossentropy | 14,127 |
import tensorflow as tf
self.local_condition_features = variables[idx]
self.local_condition_features.set_shape(self._placeholders[idx].shape)
idx += 1
#If global conditioning disabled override g inputs with None
if hparams.gin_channels < 0:
self.global_condition_features = None
else:
self.global_condition_features = variables[idx]
self.global_condition_features.set_shape(self._placeholders[idx].shape)
# Create queue for buffering eval data
eval_queue = tf.FIFOQueue(1, queue_types, name='eval_queue')
self._eval_enqueue_op = eval_queue.enqueue(self._placeholders)
eval_variables = eval_queue.dequeue()
self.eval_inputs = eval_variables[0]
self.eval_inputs.set_shape(self._placeholders[0].shape)
self.eval_targets = eval_variables[1]
self.eval_targets.set_shape(self._placeholders[1].shape)
self.eval_input_lengths = eval_variables[2]
self.eval_input_lengths.set_shape(self._placeholders[2].shape)
eval_idx = 3
| tensorflow.FIFOQueue | 14,128 |
import tensorflow as tf
config.gpu_options.per_process_gpu_memory_fraction = 0.4
sess_limited = tf.Session(config=config)
| tensorflow.Session | 14,129 |
import tensorflow as tf
x0_valid = tf.to_float(
tf.less_equal(x0, max_x) & tf.greater_equal(x0, 0))
x1_valid = tf.to_float(
tf.less_equal(x1, max_x) & tf.greater_equal(x1, 0))
y0_valid = tf.to_float(
tf.less_equal(y0, max_y) & tf.greater_equal(y0, 0))
y1_valid = tf.to_float(
tf.less_equal(y1, max_y) & tf.greater_equal(y1, 0))
z0_valid = tf.to_float(
tf.less_equal(z0, max_z) & tf.greater_equal(z0, 0))
z1_valid = tf.to_float(
tf.less_equal(z1, max_z) & tf.greater_equal(z1, 0))
w_z0_y0_x0 = tf.expand_dims(((x1_f - x) * (y1_f - y) *
(z1_f - z) * x1_valid * y1_valid * z1_valid),
1)
w_z0_y0_x1 = tf.expand_dims(((x - x0_f) * (y1_f - y) *
(z1_f - z) * x0_valid * y1_valid * z1_valid),
1)
w_z0_y1_x0 = tf.expand_dims(((x1_f - x) * (y - y0_f) *
(z1_f - z) * x1_valid * y0_valid * z1_valid),
1)
w_z0_y1_x1 = tf.expand_dims(((x - x0_f) * (y - y0_f) *
(z1_f - z) * x0_valid * y0_valid * z1_valid),
1)
w_z1_y0_x0 = tf.expand_dims(((x1_f - x) * (y1_f - y) *
| tensorflow.expand_dims | 14,130 |
import tensorflow as tf
NUMBER_OF_CLASSES = 2
def get_input_function():
"""A function to get test inputs. Returns an image with one box."""
image = tf.random_uniform([32, 32, 3], dtype=tf.float32)
key = tf.constant('image_000000')
class_label = tf.random_uniform(
[1], minval=0, maxval=NUMBER_OF_CLASSES, dtype=tf.int32)
box_label = tf.random_uniform(
[1, 4], minval=0.4, maxval=0.6, dtype=tf.float32)
return {
fields.InputDataFields.image: image,
| tensorflow.constant | 14,131 |
import tensorflow as tf
return avg_loss
def compute_contra_loss(pred1, pred2, tgt1, tgt2, hard_ratio=1.0):
geq = tf.cast((tgt1 - tgt2) > 0, tf.bool)
tgt_larg = tf.where(geq, tgt1, tgt2)
tgt_small = tf.where(geq, tgt2, tgt1)
pred_larg = tf.where(geq, pred1, pred2)
| tensorflow.cast | 14,132 |
import tensorflow as tf
return X
def _count_model_parameters(self):
tf_trainable_vars = tf.trainable_variables()
num_params = 0
# utils.logger.log('Model parameters:')
| tensorflow.trainable_variables | 14,133 |
import tensorflow as tf
# Restores from MetaGraphDef.
new_saver = tf.train.import_meta_graph(filename)
# Generates a new MetaGraphDef.
new_meta_graph_def = new_saver.export_meta_graph()
# It should be the same as the original.
self.assertProtoEquals(meta_graph_def, new_meta_graph_def)
def testAddCollectionDefFails(self):
with self.test_session():
# Creates a graph.
v0 = tf.Variable(10.0, name="v0")
# Creates a saver.
save = tf.train.Saver({"v0": v0})
# Generates MetaGraphDef.
meta_graph_def = meta_graph_pb2.MetaGraphDef()
# Verifies that collection with unsupported key will not be added.
tf.add_to_collection(save, 3)
save._add_collection_def(meta_graph_def, save)
self.assertEqual(len(meta_graph_def.collection_def), 0)
# Verifies that collection where item type does not match expected
# type will not be added.
tf.add_to_collection("int_collection", 3)
| tensorflow.train.Saver | 14,134 |
import tensorflow as tf
mobilenet_v1.Conv(kernel=[3, 3], stride=2, depth=32),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=64),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=2, depth=128),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=512)
]
inputs = tf.random_uniform((batch_size, height, width, 3))
net, end_points = mobilenet_v1.mobilenet_v1_base(
inputs, final_endpoint='Conv2d_3_pointwise', conv_defs=conv_defs)
self.assertTrue(net.op.name.startswith('MobilenetV1/Conv2d_3'))
self.assertListEqual(net.get_shape().as_list(),
[batch_size, 56, 56, 512])
| tensorflow.random_uniform | 14,135 |
import tensorflow as tf
Subclasses can override this function in order to preprocess, and can
yield any number of strings.
Args:
filepath: a string
Yields:
unicode strings.
"""
f = tf.gfile.Open(filepath)
b = f.read()
yield text_encoder.to_unicode_ignore_errors(b)
def file_generator(self,
filepaths,
max_chars_per_file=None,
max_chars_total=None):
"""Read complete text of input files and yield unicode strings.
| tensorflow.gfile.Open | 14,136 |
import tensorflow as tf
def update_model(self, *grads):
params = [self.w0, self.b0, self.w1, self.b1]
grads = [tf.cast(grad, tf.float32) for grad in grads]
with tf.name_scope('update'):
update_op = tf.group(*[
param.assign(param - grad * self.LEARNING_RATE)
| tensorflow.name_scope | 14,137 |
import tensorflow as tf
images,
model_options,
weight_decay=weight_decay,
reuse=reuse,
is_training=is_training,
fine_tune_batch_norm=fine_tune_batch_norm,
nas_training_hyper_parameters=nas_training_hyper_parameters)
if model_options.decoder_output_stride:
crop_size = model_options.crop_size
if crop_size is None:
crop_size = [tf.shape(images)[1], tf.shape(images)[2]]
features = refine_by_decoder(
features,
end_points,
crop_size=crop_size,
decoder_output_stride=model_options.decoder_output_stride,
decoder_use_separable_conv=model_options.decoder_use_separable_conv,
decoder_use_sum_merge=model_options.decoder_use_sum_merge,
decoder_filters=model_options.decoder_filters,
decoder_output_is_logits=model_options.decoder_output_is_logits,
weight_decay=weight_decay,
| tensorflow.shape | 14,138 |
import tensorflow as tf
out_width = get_deconv_dim(width, stride_w, kernel_w, padding)
output_shape = tf.stack([batch_size, out_height, out_width, num_output_channels], axis=0)
outputs = tf.nn.conv2d_transpose(inputs, kernel, output_shape,
[1, stride_h, stride_w, 1],
padding=padding)
biases = _variable_on_cpu('biases', [num_output_channels],
tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
# outputs = batch_norm_for_conv2d(outputs, is_training,
# bn_decay=bn_decay, scope='bn')
outputs = tf.layers.batch_normalization(outputs, momentum=0.99, epsilon=1e-6, training=is_training)
if activation_fn is not None:
# outputs = activation_fn(outputs)
outputs = tf.nn.leaky_relu(outputs, alpha=0.2)
return outputs
def conv3d(inputs,
num_output_channels,
kernel_size,
scope,
stride=[1, 1, 1],
padding='SAME',
| tensorflow.layers.batch_normalization | 14,139 |
import tensorflow as tf
image = self._do_cutout(image, w, h, cutout_size)
return (image, clazz)
(images, classes) = _prepare(images, classes)
dataset = tf.data.Dataset.from_tensor_slices((images, classes)).repeat()
if is_train:
dataset = dataset.apply(tf.data.experimental.map_and_batch(map_func=_preprocess_train, batch_size=batch_size))
else:
dataset = dataset.batch(batch_size)
dataset_itr = dataset.make_initializable_iterator()
(images_batch, classes_batch) = dataset_itr.get_next()
dataset_init_op = dataset_itr.initializer
| tensorflow.data.experimental.map_and_batch | 14,140 |
import tensorflow as tf
print(f'a * tf_v = {sess.run(a * tf_v)}')
weights = tf.constant([[1.0, -2], [-3, 4]]);
regular_l1 = tf.contrib.layers.l1_regularizer(0.5)(weights)
regular_l2 = tf.contrib.layers.l2_regularizer(0.5)(weights)
print(f'\nregular_l1={sess.run(regular_l1)} regular_l2={sess.run(regular_l2)}')
val_val = sess.run(val)
print('\nval=' + str(val_val))
print(f'\nargmax_0={val_val.argmax(0)} argmax_1={val_val.argmax(1)}')
print('\ntf.argmax(val, 0)=' + str(sess.run(tf.argmax(val, 0))))
print('tf.argmax(val, 1)=' + str(sess.run(tf.argmax(val, 1))))
values, indices = sess.run(top_k)
print(f'\ntop_k: values={values}\nindices={indices}')
print(f'in_top_k = {sess.run(in_top_k)}')
sess.close()
| tensorflow.argmax | 14,141 |
import tensorflow as tf
wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale))
gh = tf.get_variable("gh", [nh*4], initializer=tf.constant_initializer(1.0))
bh = tf.get_variable("bh", [nh*4], initializer=tf.constant_initializer(0.0))
b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))
gc = tf.get_variable("gc", [nh], initializer=tf.constant_initializer(1.0))
bc = tf.get_variable("bc", [nh], initializer=tf.constant_initializer(0.0))
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
for idx, (x, m) in enumerate(zip(xs, ms)):
c = c*(1-m)
h = h*(1-m)
z = _ln(tf.matmul(x, wx), gx, bx) + _ln(tf.matmul(h, wh), gh, bh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
| tensorflow.split | 14,142 |
import tensorflow as tf
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
| tensorflow.to_int32 | 14,143 |
import tensorflow as tf
if bias:
b = bias_variable([shape[-2]])
h = h + b
return h
def phase_shift_3d(x, r):
batch_size, d, h, w, c = x.get_shape().as_list()
x = tf.reshape(x, (batch_size, d, h, w, r, r, r))
for ns in [d, h, w]:
x = tf.split(x, ns, 1)
x = tf.concat([tf.squeeze(v, 1) for v in x], 3)
return tf.reshape(x, (batch_size, d*r, h*r, w*r, 1))
def subpixel_conv3d(x, r, out_channels):
x = tf.split(x, out_channels, 4)
x = tf.concat([phase_shift_3d(v, r) for v in x], 4)
return x
def pixel_shuffler_3d(x, r, k, out_channels, name):
in_channels = x.get_shape.as_list()[4]
| tensorflow.squeeze | 14,144 |
import tensorflow as tf
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
| tensorflow.flags.DEFINE_string | 14,145 |
from tensorflow.contrib.metrics.python.ops import set_ops
Integer `Tensor` of shape [D1, ... DN], where each value is the number of
relevant values for that row.
Raises:
ValueError: if inputs have invalid dtypes or values.
"""
if k < 1:
raise ValueError('Invalid k=%s.' % k)
with ops.name_scope(None, 'num_relevant', (labels,)) as scope:
# For SparseTensor, calculate separate count for each row.
if isinstance(labels, (ops.SparseTensor, ops.SparseTensorValue)):
labels_sizes = set_ops.set_size(labels)
return math_ops.minimum(labels_sizes, k, name=scope)
# For dense Tensor, calculate scalar count based on last dimension, and
# tile across labels shape.
labels_shape = array_ops.shape(labels)
labels_size = labels_shape[-1]
num_relevant_scalar = math_ops.minimum(labels_size, k)
return array_ops.fill(labels_shape[0:-1], num_relevant_scalar, name=scope)
| tensorflow.contrib.metrics.python.ops.set_ops.set_size | 14,146 |
import tensorflow as tf
tf.app.flags.DEFINE_integer(
'save_checkpoints_secs', 3600,
'The frequency with which the model is saved, in seconds.')
# model related configuration
tf.app.flags.DEFINE_integer(
'train_image_size', 384,
'The size of the input image for the model to use.')
tf.app.flags.DEFINE_integer(
| tensorflow.app.flags.DEFINE_integer | 14,147 |
import tensorflow as tf
head_selection = tf.logical_and(rep_mask, head_selection)
rep_dep_tensor, rep_dep_mask, dep_org_idx = reduce_data_rep_max_len(rep_map, dep_selection)
rep_head_tensor,rep_head_mask, head_org_idx = reduce_data_rep_max_len(rep_map, head_selection)
sl_dep, sl_head = tf.shape(rep_dep_tensor)[1], tf.shape(rep_head_tensor)[1]
if keep_unselected:
| tensorflow.shape | 14,148 |
import tensorflow as tf
embed_inputs = tf.nn.embedding_lookup(self.embedding_init, self.x) ## (batch_size, seq_len, 100)
with tf.variable_scope('hidden', reuse=forward_only):
with tf.variable_scope('lstm_cell'):
lstm_cell = tf.nn.rnn_cell.LSTMCell(num_units=self.num_hidden, use_peepholes=False,
# forget_bias=0.0,
activation=tf.nn.relu,
# initializer=tf.truncated_normal_initializer(stddev=0.1),
# initializer=tf.random_uniform_initializer(-0.003, 0.003),
initializer=tf.contrib.layers.xavier_initializer(),
state_is_tuple=True)
if not forward_only:
lstm_cell = tf.nn.rnn_cell.DropoutWrapper(cell=lstm_cell, output_keep_prob=self.dropout_output)
# lstm_cell = tf.nn.rnn_cell.MultiRNNCell(cells=[lstm_cell] * 4, state_is_tuple=True)
if not forward_only:
embed_inputs = tf.nn.dropout(embed_inputs, keep_prob=self.dropout_input)
rnn_outputs, output_states = tf.nn.dynamic_rnn(
cell=lstm_cell,
inputs=embed_inputs,
dtype=tf.float32,
sequence_length=self.seq_len,
) ## (batch_size, seq_len, num_hidden)
| tensorflow.nn.rnn_cell.DropoutWrapper | 14,149 |
from tensorflow.python.framework import tensor_shape
logits_shape = op.inputs[0].get_shape()
input_shape = logits_shape.with_rank(2)
batch_size = input_shape[0]
# labels_shape
op.inputs[1].get_shape().merge_with(tensor_shape.vector(batch_size))
return [tensor_shape.vector(batch_size.value), input_shape]
@ops.RegisterShape("SoftmaxCrossEntropyWithLogits")
def _SoftmaxCrossEntropyWithLogitsShape(op):
| tensorflow.python.framework.tensor_shape.vector | 14,150 |
import tensorflow as tf
if not mute:
pbar.update(1)
if i == max_iterations:
break
if not mute:
tf.logging.info('Finished evaluation')
if max_iterations:
pbar.close()
# List of dicts to dict of lists
| tensorflow.logging.info | 14,151 |
import tensorflow as tf
x_aug = tf.nn.separable_conv2d(tf.expand_dims(tf.pad(x,self.paddings,'SYMMETRIC'), 0), self.kernel, self.pointwise_filter,strides=[1, 1, 1, 1], padding='VALID')
x_aug = tf.squeeze(x_aug)
return tf.concat([x, x_aug],axis=2)
def high_low_pass(self,x):
x_low = tf.nn.separable_conv2d(tf.expand_dims(tf.pad(x,self.paddings,'SYMMETRIC'), 0), self.kernel, self.pointwise_filter,strides=[1, 1, 1, 1], padding='VALID')
x_low = tf.squeeze(x_low)
x_high = x - x_low
return tf.concat([x, x_high, x_low],axis=2)
def no_op(self,x):
return x
| tensorflow.squeeze | 14,152 |
from tensorflow.python.client import device_lib
replay_buffer_size=1000000,
batch_size=32,
gamma=0.99,
learning_starts=50000,
learning_freq=4,
frame_history_len=4,
target_update_freq=10000,
grad_norm_clipping=10
)
env.close()
def get_available_gpus():
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
return [x.physical_device_desc for x in local_device_protos if x.device_type == 'GPU']
def set_global_seeds(i):
try:
import tensorflow as tf
except ImportError:
pass
else:
tf.set_random_seed(i)
np.random.seed(i)
random.seed(i)
| tensorflow.python.client.device_lib.list_local_devices | 14,153 |
import tensorflow as tf
predictions = tf.argmax(logits, 1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEqual(output.shape, (batch_size,))
| tensorflow.global_variables_initializer | 14,154 |
from tensorflow.python.ops import nn
"""
if k < 1:
raise ValueError('Invalid k=%s.' % k)
with ops.name_scope(
None, 'average_precision', (predictions, labels, k)) as scope:
# Calculate top k indices to produce [D1, ... DN, k] tensor.
_, predictions_idx = nn.top_k(predictions, k)
predictions_idx = math_ops.to_int64(predictions_idx, name='predictions_idx')
# Expand dims to produce [D1, ... DN, k, 1] tensor. This gives us a separate
# prediction for each k, so we can calculate separate true positive values
# for each k.
| tensorflow.python.ops.nn.top_k | 14,155 |
import tensorflow as tf
If predict_keypoints is True the dictionary also contains:
keypoints: [batch_size, 1, num_keypoints, 2]
Raises:
ValueError: if num_predictions_per_location is not 1.
"""
if num_predictions_per_location != 1:
raise ValueError('Currently FullyConnectedBoxPredictor only supports '
'predicting a single box per class per location.')
spatial_averaged_image_features = tf.reduce_mean(image_features, [1, 2],
keep_dims=True,
name='AvgPool')
flattened_image_features = slim.flatten(spatial_averaged_image_features)
if self._use_dropout:
flattened_image_features = slim.dropout(flattened_image_features,
keep_prob=self._dropout_keep_prob,
is_training=self._is_training)
with slim.arg_scope(self._fc_hyperparams):
| tensorflow.reduce_mean | 14,156 |
from tensorflow.python.ops import math_ops
weights, array_ops.ones_like(average_precision),
name='broadcast_weights')
batch_max = math_ops.reduce_sum(broadcast_weights, name='batch_max')
max_update = state_ops.assign_add(max_var, batch_max, name='update')
| tensorflow.python.ops.math_ops.reduce_sum | 14,157 |
import tensorflow as tf
layer2, weights2 = new_conv_layer(input=layer1, name="conv2", num_input_channels=64, num_filters=64, filter_size=5,
ac_fun=tf.nn.relu, pool_ksize=[1, 3, 3, 1])
with tf.name_scope('flatten'):
layer3, num_features = flatten_layer(layer2)
# fully connected layers
with tf.variable_scope('fc1'):
layer4, weights4 = new_fc_layer(input=layer3, name="fc1", num_inputs=num_features, num_outputs=fc_size1)
# print(layer4)
with tf.variable_scope('fc2'):
logits, weights5 = new_fc_layer(input=layer4, name="fc2", num_inputs=fc_size1, num_outputs=fc_size2)
| tensorflow.variable_scope | 14,158 |
import tensorflow as tf
pred_dif = pred_flat1 - pred_flat2
geq = tf.cast(tgt_dif > 0, tf.bool)
tgt_posi_dif = tf.where(geq, tgt_dif, -tgt_dif)
pred_posi_dif = tf.where(geq, pred_dif, -pred_dif)
loss = tf.maximum(0., tgt_posi_dif - pred_posi_dif)
cstr_pct = tf.math.count_nonzero(loss, dtype=tf.float32) / tf.cast(tf.reduce_prod(tf.shape(loss)), tf.float32)
final_loss = tf.reduce_mean(loss)
return final_loss, cstr_pct
def contra_traj_lossV9(pred, tgt, horizon=12, margin=1):
horizon_pred, horizon_tgt = horizon_sumV1(pred, horizon), horizon_sumV1(tgt, horizon)
# horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon)
| tensorflow.reduce_mean | 14,159 |
import tensorflow as tf
samples = tf.TensorArray(dtype=tf.int64, size=time_steps)
inputs = tf.TensorArray(dtype=tf.int64, size=time_steps).unstack(tf.to_int64(tf.transpose(decoder_inputs)))
states = tf.TensorArray(dtype=tf.float32, size=time_steps)
weights = tf.TensorArray(dtype=tf.float32, size=time_steps)
attns = tf.TensorArray(dtype=tf.float32, size=time_steps)
| tensorflow.TensorArray | 14,160 |
import tensorflow as tf
if filename is not None and prefix is not None:
raise ValueError('Only one of filename or prefix can be specified.')
if filename is None:
filename = prefix + tf.compat.v1.get_default_graph().get_name_scope()
# Replace non-alpha characters (excluding whitespaces) with '_'.
filename = re.sub(r'[^\w\s-]', '_', filename).strip()
# Replace whitespaces with '-'.
return re.sub(r'[-\s]+', '-', filename)
| tensorflow.compat.v1.get_default_graph | 14,161 |
import tensorflow as tf
if bias != -1:
bias = tf.get_variable('biases', [num_filters], initializer=tf.constant_initializer(bias))
variable_summaries(bias)
conv = tf.nn.bias_add(conv, bias)
tf.add_to_collection('debug_layers', conv)
return conv
@staticmethod
def _relu(name, x):
| tensorflow.add_to_collection | 14,162 |
import tensorflow as tf
return {
"masked_lm_accuracy": masked_lm_accuracy,
"masked_lm_loss": masked_lm_mean_loss,
"next_sentence_accuracy": next_sentence_accuracy,
"next_sentence_loss": next_sentence_mean_loss,
}
eval_metrics = metric_fn(
masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,
masked_lm_weights, next_sentence_example_loss,
next_sentence_log_probs, next_sentence_labels
)
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=total_loss,
eval_metric_ops=eval_metrics)
else:
raise ValueError("Only TRAIN and EVAL modes are supported: %s" % (mode))
return output_spec
return model_fn
def get_masked_lm_output(bert_config, input_tensor, output_weights, positions,
| tensorflow.estimator.EstimatorSpec | 14,163 |
import tensorflow as tf
.AvgPooling('downsample', 2)
.Conv2D('conv0', 20, 5, padding='VALID')
.MaxPooling('pool0', 2)
.Conv2D('conv1', 20, 5, padding='VALID')
.FullyConnected('fc1', out_dim=32)
.FullyConnected('fct', out_dim=6, nl=tf.identity,
W_init=tf.constant_initializer(),
b_init=tf.constant_initializer([1, 0, HALF_DIFF, 0, 1, HALF_DIFF]))())
# output 6 parameters for affine transformation
stn = tf.reshape(stn, [-1, 2, 3], name='affine') # bx2x3
stn = tf.reshape(tf.transpose(stn, [2, 0, 1]), [3, -1]) # 3 x (bx2)
coor = tf.reshape(tf.matmul(xys, stn),
[WARP_TARGET_SIZE, WARP_TARGET_SIZE, -1, 2])
coor = tf.transpose(coor, [2, 0, 1, 3], 'sampled_coords') # b h w 2
| tensorflow.constant_initializer | 14,164 |
from tensorflow.python.framework import ops
@ops.RegisterShape("CountUpTo")
def _CountUpToShape(op):
"""Shape function for the CountUpTo op."""
return [op.inputs[0].get_shape().merge_with(tensor_shape.scalar())]
@ops.RegisterShape("ScatterAdd")
@ops.RegisterShape("ScatterSub")
@ops.RegisterShape("ScatterUpdate")
def _ScatterUpdateShape(op):
"""Shape function for the sparse update ops."""
var_shape = op.inputs[0].get_shape()
indices_shape = op.inputs[1].get_shape()
unused_updates_shape = op.inputs[2].get_shape().merge_with(
indices_shape.concatenate(var_shape[1:]))
return [var_shape]
| tensorflow.python.framework.ops.RegisterShape | 14,165 |
import tensorflow as tf
else:
# matrix vector -> tile concat
sequence_max_length = hidden.shape[1]
multipliers = tf.concat(
[[1], [sequence_max_length], [1]],
0
)
tiled_representation = tf.tile(
tf.expand_dims(dependency_final_hidden, 1),
multipliers
)
# todo future: maybe modify this with TF2 mask mechanics
sequence_length = sequence_length_3D(hidden)
mask = tf.sequence_mask(
sequence_length,
| tensorflow.expand_dims | 14,166 |
import tensorflow as tf
loc_pred = tf.reshape(y_pred[0], [num_batch * num_prior, 4])
landm_pred = tf.reshape(y_pred[1], [num_batch * num_prior, 10])
| tensorflow.reshape | 14,167 |
import tensorflow as tf
mean_dist, mean_pred_error = tf.reduce_mean(dists), tf.reduce_mean(pred_error)
improvement = (mean_dist-mean_pred_error)/mean_dist
pairwise_improvement = tf.nn.relu(dists[1:] - pred_error)
pairwise_improvement_bool = tf.cast(pairwise_improvement > 0, pairwise_improvement.dtype)
self.pairwise_improvement_bool = pairwise_improvement_bool
metrics.append(tf.summary.scalar('training/avg_dist', mean_dist))
metrics.append(tf.summary.scalar('training/pred_dist', mean_pred_error))
metrics.append(tf.summary.scalar('training/improvement', improvement))
metrics.append(tf.summary.scalar('training/improvement_abs', tf.nn.relu(improvement)))
metrics.append(tf.summary.histogram('training/improvement_abs_hist', nut.nan_to_zero(improvement)))
metrics.append(tf.summary.scalar('training/improvement_pairwise', tf.reduce_mean(pairwise_improvement_bool)))
metrics.append(tf.summary.histogram('training/improvement_pairwise_hist', pairwise_improvement_bool))
self.eval_summs = tf.summary.merge(metrics)
def _build_embedding_saver(self, sess):
"""To use embedding visualizer data has to be stored in variable
since we would like to visualize TEST_SET, this variable should not affect
common checkpoint of the model.
Hence, we build a separate variable with a separate saver."""
embedding_shape = [int(len(self.test_set) / FLAGS.batch_size) * FLAGS.batch_size,
self.encode.get_shape().as_list()[1]]
tsv_path = os.path.join(FLAGS.logdir, 'metadata.tsv')
| tensorflow.reduce_mean | 14,168 |
import tensorflow as tf
def testIntegratedGradientsGetMask(self):
with tf.Graph().as_default() as graph:
x = tf.placeholder(shape=[None, 3], dtype=tf.float32)
y = 5 * x[:, 0] + x[:, 0] * x[:, 1] + tf.sin(x[:, 2])
with tf.Session() as sess:
# Calculate the value of `y` at the baseline.
x_baseline_val = np.array([[0.5, 0.8, 1.0]], dtype=np.float)
| tensorflow.sin | 14,169 |
import tensorflow as tf
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
| tensorflow.FixedLenFeature | 14,170 |
import tensorflow as tf
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([], tf.int64),
"is_real_example": tf.FixedLenFeature([], tf.int64),
| tensorflow.FixedLenFeature | 14,171 |
import tensorflow as tf
weights=is_real_example)
return {"pred": concat1, "label_ids": concat2, "pearson": pearson,
"MSE": mse, "eval_loss": loss,}
elif task_name == "cola":
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
"""Compute Matthew's correlations for STS-B."""
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
# https://en.wikipedia.org/wiki/Matthews_correlation_coefficient
tp, tp_op = tf.metrics.true_positives(
predictions, label_ids, weights=is_real_example)
tn, tn_op = tf.metrics.true_negatives(
predictions, label_ids, weights=is_real_example)
fp, fp_op = tf.metrics.false_positives(
predictions, label_ids, weights=is_real_example)
fn, fn_op = tf.metrics.false_negatives(
predictions, label_ids, weights=is_real_example)
# Compute Matthew's correlation
mcc = tf.div_no_nan(
tp * tn - fp * fn,
tf.pow((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn), 0.5))
# Compute accuracy
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions,
weights=is_real_example)
loss = tf.metrics.mean(
values=per_example_loss,
| tensorflow.metrics.false_negatives | 14,172 |
import tensorflow as tf
weighted_average = tf.reduce_sum(hidden_states * tf.expand_dims(weights, axis=2), axis=1)
| tensorflow.expand_dims | 14,173 |
import tensorflow as tf
def seq_to_batch(h, flat = False):
shape = h[0].get_shape().as_list()
if not flat:
assert(len(shape) > 1)
nh = h[0].get_shape()[-1].value
return tf.reshape(tf.concat(axis=1, values=h), [-1, nh])
else:
return tf.reshape(tf.stack(values=h, axis=1), [-1])
def lstm(xs, ms, s, scope, nh, init_scale=1.0):
| tensorflow.concat | 14,174 |
from tensorflow.python.ops import math_ops
def _log_prob(self, x):
x = control_flow_ops.with_dependencies([check_ops.assert_positive(x)] if
self.validate_args else [], x)
return (self.alpha * math_ops.log(self.beta) -
math_ops.lgamma(self.alpha) -
(self.alpha + 1.) * math_ops.log(x) - self.beta / x)
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
| tensorflow.python.ops.math_ops.log | 14,175 |
import tensorflow as tf
)
# Wrap logits in a control dependency for the build discriminator
# tensors to ensure discriminator internals are built.
with tf.control_dependencies(
control_inputs=[self.build_discriminator_tensors]):
logits = tf.identity(
input=logits, name="{}_logits_identity".format(self.name)
| tensorflow.control_dependencies | 14,176 |
import tensorflow as tf
h = tf.nn.dropout(h, 0.5)
h_logits = tf.matmul(h, w_h) + b_h
| tensorflow.matmul | 14,177 |
import tensorflow as tf
tf.config.experimental.set_memory_growth(gpu, True)
else:
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.5
set_session(tf.Session(config=config))
def pytest_generate_tests(metafunc):
# This function generates the list of tests for pytest, based
| tensorflow.Session | 14,178 |
import tensorflow as tf
loss = cross_entropy + loc_loss + params['weight_decay'] * tf.add_n(
[tf.nn.l2_loss(v) for v in tf.trainable_variables()
if 'batch_normalization' not in v.name])
total_loss = tf.identity(loss, name='total_loss')
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_or_create_global_step()
lr_values = [params['learning_rate'] * decay for decay in params['lr_decay_factors']]
learning_rate = tf.train.piecewise_constant(tf.cast(global_step, tf.int32),
[int(_) for _ in params['decay_boundaries']],
lr_values)
truncated_learning_rate = tf.maximum(learning_rate, tf.constant(params['end_learning_rate'], dtype=learning_rate.dtype))
# Create a tensor named learning_rate for logging purposes.
tf.identity(truncated_learning_rate, name='learning_rate')
tf.summary.scalar('learning_rate', truncated_learning_rate)
optimizer = tf.train.MomentumOptimizer(learning_rate=truncated_learning_rate,
momentum=params['momentum'])
# Batch norm requires update_ops to be added as a train_op dependency.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss, global_step)
else:
train_op = None
cls_accuracy = tf.metrics.accuracy(glabels, predictions['classes'])
metrics = {'cls_accuracy': cls_accuracy}
| tensorflow.summary.scalar | 14,179 |
import tensorflow as tf
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
| tensorflow.matmul | 14,180 |
import tensorflow as tf
x_centers, y_centers = tf.meshgrid(x_centers, y_centers)
widths, x_centers = tf.meshgrid(widths, x_centers)
heights, y_centers = tf.meshgrid(heights, y_centers)
anchor_centers = tf.stack([x_centers, y_centers], axis=2)
anchor_centers = tf.reshape(anchor_centers, [-1, 2])
anchor_sizes = tf.stack([widths, heights], axis=2)
anchor_sizes = tf.reshape(anchor_sizes, [-1, 2])
anchors = tf.concat([anchor_centers - .5 * anchor_sizes,
anchor_centers + .5 * anchor_sizes], 1)
# anchors = box_utils.convert_yxyx_to_xyxy_format(anchors)
return anchors
if __name__ == '__main__':
anchor_size = [128, 128]
anchor_stride = [8, 8]
anchor_offset = [0, 0]
| tensorflow.concat | 14,181 |
import tensorflow as tf
def self_attention(facts, ATTENTION_SIZE, mask, stag='null'):
if len(facts.get_shape().as_list()) == 2:
facts = tf.expand_dims(facts, 1)
| tensorflow.expand_dims | 14,182 |
import tensorflow as tf
self.IRK_times = tmp[q**2+q:]
# tf placeholders and graph
self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
log_device_placement=True))
self.x0_tf = tf.placeholder(tf.float32, shape=(None, self.x0.shape[1]))
self.x1_tf = tf.placeholder(tf.float32, shape=(None, self.x1.shape[1]))
self.u0_tf = tf.placeholder(tf.float32, shape=(None, self.u0.shape[1]))
self.u1_tf = tf.placeholder(tf.float32, shape=(None, self.u1.shape[1]))
self.dummy_x0_tf = tf.placeholder(tf.float32, shape=(None, self.q)) # dummy variable for fwd_gradients
self.dummy_x1_tf = tf.placeholder(tf.float32, shape=(None, self.q)) # dummy variable for fwd_gradients
self.U0_pred = self.net_U0(self.x0_tf) # N0 x q
self.U1_pred = self.net_U1(self.x1_tf) # N1 x q
self.loss = tf.reduce_sum(tf.square(self.u0_tf - self.U0_pred)) + \
tf.reduce_sum(tf.square(self.u1_tf - self.U1_pred))
| tensorflow.placeholder | 14,183 |
import tensorflow as tf
v = tf.expand_dims(tf.expand_dims(v, axis=0), axis=0)
w_c = None
if options.use_coverage:
with variable_scope.variable_scope("coverage"):
w_c = variable_scope.get_variable("w_c", [options.attention_vec_size])
w_c = tf.expand_dims(tf.expand_dims(w_c, axis=0), axis=0)
word_t_representation = self.embedding_lookup(word_t)
(state_t, context_t, coverage_t, attn_dist_t, p_gen_t, output_t) = self.one_step_decoder(
state_t_1, context_t_1, coverage_t_1, word_t_representation, encoder_states, encoder_features,
passage_word_idx, passage_mask, v, w_c, word_vocab)
vocab_scores = tf.log(output_t)
greedy_prediction = tf.reshape(tf.argmax(output_t, 1),[-1]) # calcualte greedy
multinomial_prediction = tf.reshape(tf.multinomial(vocab_scores, 1),[-1]) # calculate multinomial
topk_log_probs, topk_ids = tf.nn.top_k(vocab_scores, beam_size) # calculate topK
return (state_t, context_t, coverage_t, attn_dist_t, p_gen_t, output_t, topk_log_probs, topk_ids,
greedy_prediction, multinomial_prediction)
def merge_prob_dist_for_one_step(self, vocab_dist, attn_dist, p_gen, passage_word_idx, passage_mask=None):
'''
max_phrase_size: an input placehoder indications the maximum phrase size inside this batch
vocab_dist: [batch_size, vsize]
attn_dist: [batch_size, passage_length]
p_gen: [batch_size, 1]
| tensorflow.multinomial | 14,184 |
import tensorflow as tf
def conv2d(self,name, x, W, stride, bias):
with tf.variable_scope(name) as scope:
| tensorflow.variable_scope | 14,185 |
from tensorflow.python.framework import ops
ops.RegisterShape("L2Loss")(common_shapes.scalar_shape)
ops.RegisterShape("LRN")(common_shapes.unchanged_shape_with_rank(4))
@ops.RegisterShape("LRNGrad")
def _LRNGradShape(op):
| tensorflow.python.framework.ops.RegisterShape | 14,186 |
import tensorflow as tf
import tensorflow as tf
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
a = tf.constant_initializer([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')
b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')
c = tf.matmul(a, b)
# Runs the op.
| tensorflow.constant | 14,187 |
import tensorflow as tf
# -----------------------------------------------------------------------------
# ATTRIBUTION METHODS
# -----------------------------------------------------------------------------
"""
Returns zero attributions. For testing only.
"""
class DummyZero(GradientBasedMethod):
def get_symbolic_attribution(self,):
return tf.gradients(ys=self.T, xs=self.X)
@classmethod
def nonlinearity_grad_override(cls, op, grad):
input = op.inputs[0]
return tf.zeros_like(input)
"""
Saliency maps
| tensorflow.gradients | 14,188 |
import tensorflow as tf
dataset = utils.read_tfrecord_files(input_files, **kwargs)
d = cls(examples=None, **kwargs)
# parse example
features = {
d.input_ids: tf.io.VarLenFeature(tf.int64),
d.token_type_ids: tf.io.VarLenFeature(tf.int64),
d.attention_mask: tf.io.VarLenFeature(tf.int64),
d.labels: tf.io.VarLenFeature(tf.int64),
}
dataset = dataset.map(
lambda x: tf.io.parse_example(x, features),
num_parallel_calls=utils.AUTOTUNE,
).prefetch(utils.AUTOTUNE)
dataset = dataset.map(
lambda x: (
tf.cast(tf.sparse.to_dense(x[d.input_ids]), tf.int32),
tf.cast(tf.sparse.to_dense(x[d.token_type_ids]), tf.int32),
tf.cast(tf.sparse.to_dense(x[d.attention_mask]), tf.int32),
tf.cast(tf.sparse.to_dense(x[d.labels]), tf.int32),
),
num_parallel_calls=utils.AUTOTUNE,
).prefetch(utils.AUTOTUNE)
# do transformation
return d(dataset, **kwargs)
def parse_examples_to_dataset(self):
if not self.examples:
logging.info("self.examples is empty or None, skipped.")
return None
input_ids, token_type_ids, attention_mask, labels = [], [], [], []
| tensorflow.sparse.to_dense | 14,189 |
import tensorflow as tf
import argparse
import numpy as np
import tensorflow as tf
INPUT_DIM = 5
OUTPUT_DIM = 3
def generator_fn(generator_inputs):
outputs = tf.layers.dense(generator_inputs, OUTPUT_DIM)
return outputs
def discriminator_fn(data, generator_inputs):
outputs = tf.layers.dense(data, 1)
return outputs
| tensorflow.layers.dense | 14,190 |
import tensorflow as tf
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=model_output, labels=y_target))
# Prediction operation
prediction = tf.sigmoid(model_output)
# Declare optimizer
my_opt = tf.train.GradientDescentOptimizer(0.001)
train_step = my_opt.minimize(loss)
# Intitialize Variables
init = tf.global_variables_initializer()
sess.run(init)
# Start Logistic Regression
print('Starting Training Over {} Sentences.'.format(len(texts_train)))
loss_vec = []
train_acc_all = []
train_acc_avg = []
for ix, t in enumerate(vocab_processor.fit_transform(texts_train)):
y_data = [[target_train[ix]]]
| tensorflow.global_variables_initializer | 14,191 |
import tensorflow as tf
enc_inp, dec_inp, cell, num_symbols=5, embedding_size=2,
output_projection=(w, b))
sess.run([tf.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
# Test that previous-feeding model ignores inputs after the first.
dec_inp2 = [tf.constant(0, tf.int32, shape=[2])] * 3
with tf.variable_scope("other"):
d3, _ = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp2, cell, num_symbols=5, embedding_size=2,
feed_previous=tf.constant(True))
sess.run([tf.global_variables_initializer()])
tf.get_variable_scope().reuse_variables()
d1, _ = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp, cell, num_symbols=5, embedding_size=2,
feed_previous=True)
| tensorflow.variable_scope | 14,192 |
from tensorflow.python.framework import tensor_util
@ops.RegisterShape("DepthwiseConv2dNativeBackpropFilter")
def _DepthwiseConv2dNativeBackpropFilterShape(op):
"""Shape function for the DepthwiseConv2dNativeBackpropFilter op."""
filter_shape = tensor_util.constant_value(op.inputs[1])
if filter_shape is not None:
return [tensor_shape.TensorShape(filter_shape.tolist())]
else:
return [tensor_shape.unknown_shape(ndims=4)]
| tensorflow.python.framework.tensor_util.constant_value | 14,193 |
import tensorflow as tf
num_filters = d,
num_heads = nh,
seq_len = self.c_len,
scope = "Model_Encoder",
bias = False,
reuse = True if i > 0 else None,
dropout = self.dropout)
)
with tf.variable_scope("Output_Layer"):
start_logits = tf.squeeze(conv(tf.concat([self.enc[1], self.enc[2]],axis = -1),1, bias = False, name = "start_pointer"),-1)
end_logits = tf.squeeze(conv(tf.concat([self.enc[1], self.enc[3]],axis = -1),1, bias = False, name = "end_pointer"), -1)
self.logits = [mask_logits(start_logits, mask = self.c_mask),
mask_logits(end_logits, mask = self.c_mask)]
logits1, logits2 = [l for l in self.logits]
outer = tf.matmul(tf.expand_dims(tf.nn.softmax(logits1), axis=2),
| tensorflow.variable_scope | 14,194 |
from tensorflow.python.framework import ops as _ops
def graph_def_version(name=None):
r"""TODO: add doc.
Args:
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
"""
result = _op_def_lib.apply_op("GraphDefVersion", name=name)
return result
_ops.RegisterShape("GraphDefVersion")(None)
_kernel_label_outputs = ["result"]
def kernel_label(name=None):
r"""TODO: add doc.
Args:
name: A name for the operation (optional).
Returns:
A `Tensor` of type `string`.
"""
result = _op_def_lib.apply_op("KernelLabel", name=name)
return result
| tensorflow.python.framework.ops.RegisterShape | 14,195 |
from tensorflow.python.ops import math_ops
labels, labels, weights=weights, name='variance_labels')
pearson_r = _safe_div(
cov,
math_ops.mul(math_ops.sqrt(var_predictions), math_ops.sqrt(var_labels)),
'pearson_r')
with ops.control_dependencies(
[update_cov, update_var_predictions, update_var_labels]):
| tensorflow.python.ops.math_ops.sqrt | 14,196 |
import tensorflow as tf
h1 -> conv -> h2 -> conv -> h3 -> fb -> h2 h2 -> fb -> h1 h1 h1
"""
# LAYER 1
_, l1_h2 = self.hgru_ops(
i0=i0,
x=x,
h2=l1_h2,
layer='h1',
layer_idx=0)
# Intermediate FF
if self.batch_norm:
with tf.variable_scope(
'l1_h2_bn',
reuse=self.scope_reuse) as scope:
l1_h2 = tf.contrib.layers.batch_norm(
inputs=l1_h2,
scale=True,
center=True,
fused=True,
renorm=False,
param_initializers=self.param_initializer,
updates_collections=None,
scope=scope,
reuse=self.reuse,
is_training=self.train)
| tensorflow.variable_scope | 14,197 |
import tensorflow as tf
self.generator = generator
self.model_pred = importlib.import_module(self.flags.model) # import network module
self.use_trans_loss = self.flags.use_transformation_loss
self.use_input_trans = self.flags.use_input_transform
self.use_feature_trans = self.flags.use_feature_transform
self.is_training_pl = tf.placeholder(tf.bool, shape=(), name='is_training_pl')
self.bn_decay = train_rotation_prediction.get_bn_decay(batch)
self.get_pred = partial(self.model_pred.get_model,
is_training=self.is_training_pl,
bn_decay=self.bn_decay,
| tensorflow.placeholder | 14,198 |
import tensorflow as tf
imshape: Tensor with shape (2, )
where the first value is height and the next is width.
Returns
Tensor with same shape as bboxes but making sure that none
of the bboxes are outside the image.
"""
with tf.name_scope('BoundingBoxTransform/clip_bboxes'):
bboxes = tf.cast(bboxes, dtype=tf.float32)
imshape = tf.cast(imshape, dtype=tf.float32)
x1, y1, x2, y2 = tf.split(bboxes, 4, axis=1)
width = imshape[1]
height = imshape[0]
x1 = tf.maximum(tf.minimum(x1, width - 1.0), 0.0)
x2 = tf.maximum(tf.minimum(x2, width - 1.0), 0.0)
y1 = tf.maximum(tf.minimum(y1, height - 1.0), 0.0)
y2 = tf.maximum(tf.minimum(y2, height - 1.0), 0.0)
| tensorflow.cast | 14,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.