seed
stringlengths 25
2.89k
| seed_api
stringlengths 14
102
| index
int64 0
14.8k
|
---|---|---|
import tensorflow as tf
tf.global_variables_initializer().run()
| tensorflow.global_variables_initializer | 13,200 |
import tensorflow as tf
attn_mask = tf.logical_and(direct_mask_tile, rep_mask_tile, name='attn_mask') # bs,bn,bl,bl
# attention
f_bias = tf.get_variable('f_bias', [ivec], tf.float32, tf.constant_initializer(0.))
dependent_head = linear(
rep_map, 2 * ivec, False, 0., 'linear_dependent_head', False, wd, keep_prob, is_train) # bs,bn,bl,2vec
dependent, head = tf.split(dependent_head, 2, 3)
dependent_etd = tf.expand_dims(dependent, 2) # bs,bn,1,bl,vec
head_etd = tf.expand_dims(head, 3) # bs,bn,bl,1,vec
logits = scaled_tanh(dependent_etd + head_etd + f_bias, 5.0) # bs,bn,bl,bl,vec
logits_masked = exp_mask_for_high_rank(logits, attn_mask)
attn_score = tf.nn.softmax(logits_masked, 3) # bs,bn,bl,bl,vec
attn_score = mask_for_high_rank(attn_score, attn_mask) # bs,bn,bl,bl,vec
self_attn_result = tf.reduce_sum(attn_score * rep_map_tile, 3) # bs,bn,bl,vec
with tf.variable_scope('source2token_self_attn'):
inter_block_logits = bn_dense_layer(self_attn_result, ivec, True, 0., 'bn_dense_map', 'linear',
False, wd, keep_prob, is_train) # bs,bn,bl,vec
inter_block_logits_masked = exp_mask_for_high_rank(inter_block_logits, rep_mask_split) # bs,bn,bl,vec
inter_block_soft = tf.nn.softmax(inter_block_logits_masked, 2) # bs,bn,bl,vec
inter_block_attn_output = tf.reduce_sum(self_attn_result * inter_block_soft, 2) # bs,bn,vec
with tf.variable_scope('self_attn_inter_block'):
inter_block_attn_output_mask = tf.cast(tf.ones([bs, bn], tf.int32), tf.bool)
block_ct_res = directional_attention_with_dense(
inter_block_attn_output, inter_block_attn_output_mask, direction, 'disa',
| tensorflow.reduce_sum | 13,201 |
import tensorflow as tf
num_cpu_threads=4,
batch_size=None,
use_hvd=True):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
def input_fn(params):
"""The actual input function."""
# batch_size = params["batch_size"]
name_to_features = {
"input_ids":
tf.FixedLenFeature([max_seq_length], tf.int64),
"input_mask":
tf.FixedLenFeature([max_seq_length], tf.int64),
"segment_ids":
tf.FixedLenFeature([max_seq_length], tf.int64),
"masked_lm_positions":
tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
"masked_lm_ids":
tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
"masked_lm_weights":
tf.FixedLenFeature([max_predictions_per_seq], tf.float32),
| tensorflow.FixedLenFeature | 13,202 |
from tensorflow.python.ops import math_ops
dimensions of `predictions_idx` and `labels`.
name: Name of operation.
Returns:
A [D1, ... DN] `Tensor` of true positive counts.
"""
with ops.name_scope(name, 'true_positives', (predictions_idx, labels)):
labels, predictions_idx = _maybe_select_class_id(
labels, predictions_idx, class_id)
tp = set_ops.set_size(set_ops.set_intersection(predictions_idx, labels))
tp = math_ops.to_double(tp)
if weights is not None:
weights = math_ops.to_double(weights)
tp = math_ops.mul(tp, weights)
return tp
def _streaming_sparse_true_positive_at_k(predictions_idx,
labels,
k=None,
class_id=None,
weights=None,
name=None):
"""Calculates weighted per step true positives for recall@k and precision@k.
| tensorflow.python.ops.math_ops.to_double | 13,203 |
from tensorflow.python.ops import math_ops
return math_ops.select(
math_ops.greater(true_positives + false_positives, 0),
| tensorflow.python.ops.math_ops.greater | 13,204 |
import tensorflow as tf
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size)
if FLAGS.do_train:
tf.logging.info("***** Running training *****")
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
train_input_fn = input_fn_builder(
input_files=input_files,
max_seq_length=FLAGS.max_seq_length,
max_predictions_per_seq=FLAGS.max_predictions_per_seq,
is_training=True)
estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps)
| tensorflow.logging.info | 13,205 |
import tensorflow as tf
noise_shape = [1, size] if decoder.pervasive_dropout else [tf.shape(input_)[0], size]
embedded_input = tf.nn.dropout(embedded_input, keep_prob=decoder.embedding_keep_prob,
| tensorflow.nn.dropout | 13,206 |
import tensorflow as tf
inp = [tf.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
| tensorflow.nn.rnn | 13,207 |
import tensorflow as tf
initializer=tf.random_normal_initializer(stddev=stddev))
self.b = tf.get_variable('b',[output_dim],
initializer=tf.constant_initializer(0.0))
def __call__(self,input_var,name=None,w=None,b=None,**kwargs) :
w = w if w is not None else self.w
b = b if b is not None else self.b
if( input_var.shape.ndims > 2 ) :
dims = tf.reduce_prod(tf.shape(input_var)[1:])
return tf.matmul(tf.reshape(input_var,[-1,dims]),w) + b
else :
return tf.matmul(input_var,w)+b
def get_variables(self):
return {'w':self.w,'b':self.b}
class WeightNormLinear(object):
def __init__(self,name,input_dim,output_dim,stddev=0.02,epsilon=1e-10) :
with tf.variable_scope(name) :
self.v = tf.get_variable('v',[input_dim, output_dim],
initializer=tf.random_normal_initializer(stddev=stddev))
self.g = tf.get_variable('g',[output_dim],
initializer=tf.constant_initializer(float('nan')))
self.b = tf.get_variable('b',[output_dim],
| tensorflow.matmul | 13,208 |
import tensorflow as tf
# channels dim
im_flat = tf.reshape(im, tf.stack([-1, channels]))
im_flat = tf.to_float(im_flat)
i_z0_y0_x0 = tf.gather(im_flat, idx_z0_y0_x0)
i_z0_y0_x1 = tf.gather(im_flat, idx_z0_y0_x1)
i_z0_y1_x0 = tf.gather(im_flat, idx_z0_y1_x0)
i_z0_y1_x1 = tf.gather(im_flat, idx_z0_y1_x1)
i_z1_y0_x0 = tf.gather(im_flat, idx_z1_y0_x0)
i_z1_y0_x1 = tf.gather(im_flat, idx_z1_y0_x1)
i_z1_y1_x0 = tf.gather(im_flat, idx_z1_y1_x0)
i_z1_y1_x1 = tf.gather(im_flat, idx_z1_y1_x1)
# Finally calculate interpolated values.
x0_f = tf.to_float(x0)
x1_f = tf.to_float(x1)
y0_f = tf.to_float(y0)
y1_f = tf.to_float(y1)
z0_f = tf.to_float(z0)
z1_f = tf.to_float(z1)
# Check the out-of-boundary case.
x0_valid = tf.to_float(
tf.less_equal(x0, max_x) & tf.greater_equal(x0, 0))
x1_valid = tf.to_float(
tf.less_equal(x1, max_x) & tf.greater_equal(x1, 0))
y0_valid = tf.to_float(
tf.less_equal(y0, max_y) & tf.greater_equal(y0, 0))
y1_valid = tf.to_float(
tf.less_equal(y1, max_y) & tf.greater_equal(y1, 0))
z0_valid = tf.to_float(
tf.less_equal(z0, max_z) & tf.greater_equal(z0, 0))
| tensorflow.to_float | 13,209 |
import tensorflow as tf
for filename in tf.gfile.ListDirectory(FLAGS.validation_input_dir):
validation_input_files.extend(tf.gfile.Glob(os.path.join(FLAGS.validation_input_dir, filename)))
tf.logging.info("*** Input Validation Files ***")
for input_file in validation_input_files:
tf.logging.info(" %s" % input_file)
config = tf.ConfigProto()
if FLAGS.xla:
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
| tensorflow.logging.info | 13,210 |
import tensorflow as tf
"""Benchmark Graph performance."""
hparams = get_default_hparams()
tf.enable_resource_variables()
for sample_size in [10, 25, 50, 100, 200]:
hparams.n_samples = sample_size
tf.reset_default_graph()
with tf.Graph().as_default():
energy_fn, _, _ = l2hmc.get_scg_energy_fn()
x = tf.random_normal([hparams.n_samples, hparams.x_dim],
dtype=tf.float32)
dynamics = l2hmc.Dynamics(
x_dim=hparams.x_dim,
minus_loglikelihood_fn=energy_fn,
n_steps=hparams.n_steps,
eps=hparams.eps)
loss, _, _ = l2hmc.compute_loss(dynamics, x)
| tensorflow.random_normal | 13,211 |
import tensorflow as tf
x = tf.image.random_contrast(x, lower=lower, upper=upper)
x = tf.image.random_saturation(x, lower=lower, upper=upper)
| tensorflow.image.random_saturation | 13,212 |
import tensorflow as tf
padding = padding.upper()
if isinstance(kernel_size, list):
filter_shape = [kernel_size[0], kernel_size[1]] + [in_channel / split, out_channel]
else:
filter_shape = [kernel_size, kernel_size] + [in_channel / split, out_channel]
if isinstance(stride, list):
strides = [1, stride[0], stride[1], 1] if data_format == 'NHWC' \
else [1, 1, stride[0], stride[1]]
else:
strides = [1, stride, stride, 1] if data_format == 'NHWC' \
else [1, 1, stride, stride]
if w_init is None:
w_init = tf.contrib.layers.variance_scaling_initializer()
if b_init is None:
b_init = tf.constant_initializer()
w = tf.get_variable('W', filter_shape, initializer=w_init)
b = None
if use_bias:
b = tf.get_variable('b', [out_channel], initializer=b_init)
if split == 1:
conv = tf.nn.conv2d(inputdata, w, strides, padding, data_format=data_format)
else:
inputs = tf.split(inputdata, split, channel_axis)
kernels = tf.split(w, split, 3)
| tensorflow.contrib.layers.variance_scaling_initializer | 13,213 |
from tensorflow.contrib import layers
parent_scope = "dnn"
input_layer_partitioner = (partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas, min_slice_size=64 << 20))
input_layer_scope = parent_scope + "/input_from_feature_columns"
with variable_scope.variable_scope(
input_layer_scope,
values=list(six.itervalues(features)),
partitioner=input_layer_partitioner) as scope:
net = layers.input_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
weight_collections=[parent_scope],
scope=scope)
hidden_layer_partitioner = (
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas))
| tensorflow.contrib.layers.input_from_feature_columns | 13,214 |
import tensorflow as tf
#output = tf.reduce_sum(facts * tf.expand_dims(alphas, -1), 1)
output = facts * tf.expand_dims(alphas, -1)
output = tf.reshape(output, tf.shape(facts))
# output = output / (facts.get_shape().as_list()[-1] ** 0.5)
if not return_alphas:
return output
else:
return output, alphas
def din_fcn_attention(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False, forCnn=False):
if isinstance(facts, tuple):
# In case of Bi-RNN, concatenate the forward and the backward RNN outputs.
facts = tf.concat(facts, 2)
if len(facts.get_shape().as_list()) == 2:
facts = tf.expand_dims(facts, 1)
if time_major:
# (T,B,D) => (B,T,D)
facts = tf.array_ops.transpose(facts, [1, 0, 2])
# Trainable parameters
facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer
querry_size = query.get_shape().as_list()[-1]
query = tf.layers.dense(query, facts_size, activation=None, name='f1' + stag)
query = prelu(query)
queries = tf.tile(query, [1, tf.shape(facts)[1]])
| tensorflow.concat | 13,215 |
import tensorflow as tf
x = tf.layers.conv2d(
x, 64, (4, 4), strides=(2, 2), name="conv2",
activation=common_layers.belu, padding="SAME")
x = tf.nn.dropout(x, rate=dropout)
x = tf.layers.conv2d(
x, 128, (4, 4), strides=(2, 2), name="conv3",
activation=common_layers.belu, padding="SAME")
flat_x = tf.layers.flatten(x)
flat_x = tf.nn.dropout(flat_x, rate=dropout)
x = tf.layers.dense(flat_x, 128, activation=tf.nn.relu, name="dense1")
logits = tf.layers.dense(
x, self.hparams.problem.num_actions, name="dense2"
)
logits = tf.expand_dims(logits, axis=1)
logits = clip_logits(logits, self.hparams)
value = tf.layers.dense(x, 1, name="value")
return {"target_policy": logits, "target_value": value}
| tensorflow.layers.dense | 13,216 |
import tensorflow as tf
with tf.compat.v1.Session() as sess:
sess_graph = tf.compat.v1.Session(graph=graph, config=config)
while num_remaining_images >= batch_size:
# Reads and preprocess data
np_images, np_labels = sess.run([images[0], labels[0]])
np_labels -= 1
#print(np_labels.shape)
num_processed_images += batch_size
num_remaining_images -= batch_size
# Compute inference on the preprocessed data
predictions1 = sess_graph.run(output_tensor,
{input_tensor: np_images})
#predictions = predictions +1
#print(predictions1)
predictions2 = tf.argmax(input=predictions1, axis=1)
predictions = sess.run(predictions2)
top1 += batch_size - (np.count_nonzero(predictions - np_labels))
#print(top1/num_processed_images)
#print(num_processed_images)
#print(predictions)
#accuracy1 = tf.reduce_sum(
# tf.nn.in_top_k(tf.cast(tf.Variable(predictions2), tf.float32),
# tf.cast((tf.constant(np_labels), 1), tf.float32)))
accuracy1 = tf.reduce_sum(
input_tensor=tf.cast(tf.nn.in_top_k(predictions=tf.constant(predictions1),
targets=tf.constant(np_labels), k=1), tf.float32))
accuracy5 = tf.reduce_sum(
input_tensor=tf.cast(tf.nn.in_top_k(predictions=tf.constant(predictions1),
| tensorflow.argmax | 13,217 |
import tensorflow as tf
def func1():
# execute at training time
batch_mean, batch_var = tf.nn.moments(x, range(len(shape) - 1))
update_mean = tf.assign_sub(pop_mean, (1 - decay)*(pop_mean - batch_mean))
update_var = tf.assign_sub(pop_var, (1 - decay)*(pop_var - batch_var))
with tf.control_dependencies([update_mean, update_var]):
return tf.nn.batch_normalization(x, batch_mean, batch_var, beta, gamma, epsilon)
def func2():
# execute at test time
| tensorflow.control_dependencies | 13,218 |
import tensorflow as tf
inputs = fixed_padding(inputs, kernel_size, data_format)
return tf.layers.conv2d(
inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides,
padding=('SAME' if strides == 1 else 'VALID'), use_bias=False,
kernel_initializer=tf.variance_scaling_initializer(),
data_format=data_format)
################################################################################
| tensorflow.variance_scaling_initializer | 13,219 |
import tensorflow as tf
# rois除以h,w就得到了rois在特征图上的位置
x1 = tf.slice(rois, [0, 1], [-1, 1], name="x1") / width
y1 = tf.slice(rois, [0, 2], [-1, 1], name="y1") / height
x2 = tf.slice(rois, [0, 3], [-1, 1], name="x2") / width
y2 = tf.slice(rois, [0, 4], [-1, 1], name="y2") / height
# Won't be backpropagated to rois anyway, but to save time
bboxes = tf.stop_gradient(tf.concat([y1, x1, y2, x2], axis=1))
# 'roi_pooling_size', 7
pre_pool_size = cfg.FLAGS.roi_pooling_size * 2
# 把rois对于的特征图上的部分crop出来,然后resize打破14*14的大小
crops = tf.image.crop_and_resize(bottom, bboxes, tf.to_int32(batch_ids), [pre_pool_size, pre_pool_size], name="crops")
return slim.max_pool2d(crops, [2, 2], padding='SAME')
def _dropout_layer(self, bottom, name, ratio=0.5):
return tf.nn.dropout(bottom, ratio, name=name)
def _anchor_target_layer(self, rpn_cls_score, name):
with tf.variable_scope(name):
# 这里的index是对于所有anchor而言
# (1, 1, A * height, width)
# (1, height, width, A * 4)
# (1, height, width, A * 4)
# (1, height, width, A * 4)
rpn_labels, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights = tf.py_func(
anchor_target_layer,
[rpn_cls_score, self._gt_boxes, self._im_info, self._feat_stride, self._anchors, self._num_anchors],
[tf.float32, tf.float32, tf.float32, tf.float32])
#self._gt_boxes = tf.placeholder(tf.float32, shape=[None, 5]) #gt_boxes缩放之后的坐标以及所属类别的标号
| tensorflow.nn.dropout | 13,220 |
import tensorflow as tf
class TrainR3DetGWD(Train):
def get_gtboxes_and_label(self, gtboxes_and_label_h, gtboxes_and_label_r, num_objects):
return gtboxes_and_label_h[:int(num_objects), :].astype(np.float32), \
gtboxes_and_label_r[:int(num_objects), :].astype(np.float32)
def main(self):
with tf.Graph().as_default() as graph, tf.device('/cpu:0'):
num_gpu = len(cfgs.GPU_GROUP.strip().split(','))
global_step = slim.get_or_create_global_step()
lr = self.warmup_lr(cfgs.LR, global_step, cfgs.WARM_SETP, num_gpu)
tf.summary.scalar('lr', lr)
optimizer = tf.train.MomentumOptimizer(lr, momentum=cfgs.MOMENTUM)
r3det_gwd = build_whole_network.DetectionNetworkR3DetGWD(cfgs=self.cfgs,
is_training=True)
with tf.name_scope('get_batch'):
if cfgs.IMAGE_PYRAMID:
shortside_len_list = tf.constant(cfgs.IMG_SHORT_SIDE_LEN)
shortside_len = tf.random_shuffle(shortside_len_list)[0]
else:
shortside_len = cfgs.IMG_SHORT_SIDE_LEN
img_name_batch, img_batch, gtboxes_and_label_batch, num_objects_batch, img_h_batch, img_w_batch = \
self.reader.next_batch(dataset_name=cfgs.DATASET_NAME,
batch_size=cfgs.BATCH_SIZE * num_gpu,
| tensorflow.train.MomentumOptimizer | 13,221 |
import tensorflow as tf
input_size = inputs.get_shape()[2].value
dtype = inputs.dtype
x = inputs
x0 = tf.transpose(x, perm=[1, 2,0]) # (num_nodes, total_arg_size, batch_size)
x0 = tf.reshape(x0, shape=[self._num_nodes, input_size * batch_size])
x = tf.expand_dims(x0, axis=0)
scope = tf.get_variable_scope()
with tf.variable_scope(scope):
if self._max_diffusion_step == 0:
pass
else:
for support in self._supports:
x1 = tf.sparse_tensor_dense_matmul(support, x0)
x = self._concat(x, x1)
| tensorflow.get_variable_scope | 13,222 |
import tensorflow as tf
weighted_average = tf.reduce_sum(hidden_states * tf.expand_dims(weights, axis=2), axis=1)
return weighted_average, weights
def last_state_attention(hidden_states, encoder_input_length, *args, **kwargs):
weights = tf.one_hot(encoder_input_length - 1, tf.shape(hidden_states)[1])
weights = tf.to_float(weights)
weighted_average = tf.reduce_sum(hidden_states * tf.expand_dims(weights, axis=2), axis=1)
return weighted_average, weights
def local_attention(state, hidden_states, encoder, encoder_input_length, pos=None, scope=None, context=None, **kwargs):
batch_size = tf.shape(state)[0]
attn_length = tf.shape(hidden_states)[1]
if context is not None and encoder.use_context:
state = tf.concat([state, context], axis=1)
state_size = state.get_shape()[1].value
with tf.variable_scope(scope or 'attention_{}'.format(encoder.name)):
encoder_input_length = tf.to_float(tf.expand_dims(encoder_input_length, axis=1))
if pos is not None:
pos = tf.reshape(pos, [-1, 1])
| tensorflow.shape | 13,223 |
import tensorflow as tf
tt_cores = list(tt_cores)
if convert_to_tensors:
with tf.name_scope(name):
for i in range(len(tt_cores)):
name = "core%d" % i
tt_cores[i] = tf.convert_to_tensor(tt_cores[i], name=name)
if not _are_tt_cores_valid(tt_cores, shape, tt_ranks):
raise ValueError('The tt_cores provided to TensorTrain constructor are '
'not valid, have different dtypes, or are inconsistent '
| tensorflow.convert_to_tensor | 13,224 |
import tensorflow as tf
return outputs
cpn_backbone = cpn.cascaded_pyramid_net
if 'seresnext50' in FLAGS.backbone:
cpn_backbone = cpn.xt_cascaded_pyramid_net
def keypoint_model_fn(features, labels, mode, params):
targets = labels['targets']
shape = labels['shape']
classid = labels['classid']
key_v = labels['key_v']
isvalid = labels['isvalid']
norm_value = labels['norm_value']
cur_batch_size = tf.shape(features)[0]
#features= tf.ones_like(features)
with tf.variable_scope(params['model_scope'], default_name=None, values=[features], reuse=tf.AUTO_REUSE):
pred_outputs = cpn_backbone(features, config.class_num_joints[(params['model_scope'] if 'all' not in params['model_scope'] else '*')], params['heatmap_size'], (mode == tf.estimator.ModeKeys.TRAIN), params['data_format'])
if params['data_format'] == 'channels_last':
pred_outputs = [tf.transpose(pred_outputs[ind], [0, 3, 1, 2], name='outputs_trans_{}'.format(ind)) for ind in list(range(len(pred_outputs)))]
score_map = pred_outputs[-1]
pred_x, pred_y = get_keypoint(features, targets, score_map, params['heatmap_size'], params['train_image_size'], params['train_image_size'], (params['model_scope'] if 'all' not in params['model_scope'] else '*'), clip_at_zero=True, data_format=params['data_format'])
# this is important!!!
| tensorflow.shape | 13,225 |
import tensorflow as tf
if not white:
A = tf.matrix_triangular_solve(tf.transpose(Lm), A, lower=False)
# construct the conditional mean
fmean = tf.matmul(A, f, transpose_a=True)
if q_sqrt is not None:
if q_sqrt.get_shape().ndims == 2:
LTA = A * tf.expand_dims(tf.transpose(q_sqrt), 2) # R x M x N
elif q_sqrt.get_shape().ndims == 3:
L = tf.matrix_band_part(q_sqrt, -1, 0) # R x M x M
A_tiled = tf.tile(tf.expand_dims(A, 0), tf.stack([num_func, 1, 1]))
LTA = tf.matmul(L, A_tiled, transpose_a=True) # R x M x N
else: # pragma: no cover
raise ValueError("Bad dimension for q_sqrt: %s" %
str(q_sqrt.get_shape().ndims))
| tensorflow.transpose | 13,226 |
import tensorflow as tf
total_loss_dict, total_losses = self.loss_dict(loss_dict, num_gpu)
if i == num_gpu - 1:
regularization_losses = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES)
# weight_decay_loss = tf.add_n(slim.losses.get_regularization_losses())
total_losses = total_losses + tf.add_n(regularization_losses)
tf.get_variable_scope().reuse_variables()
grads = optimizer.compute_gradients(total_losses)
if cfgs.GRADIENT_CLIPPING_BY_NORM is not None:
grads = slim.learning.clip_gradient_norms(grads, cfgs.GRADIENT_CLIPPING_BY_NORM)
| tensorflow.add_n | 13,227 |
import tensorflow as tf
labels, logits, weights, original_shape = _prepare_labels_logits_weights(labels, logits, weights)
# Create tensors of pairwise differences for logits and labels, and
# pairwise products of weights. These have shape
# [batch_size, batch_size, num_labels].
logits_difference = tf.expand_dims(logits, 0) - tf.expand_dims(logits, 1)
labels_difference = tf.expand_dims(labels, 0) - tf.expand_dims(labels, 1)
weights_product = tf.expand_dims(weights, 0) * tf.expand_dims(weights, 1)
signed_logits_difference = labels_difference * logits_difference
raw_loss = losses_utils.weighted_surrogate_loss(
labels=tf.ones_like(signed_logits_difference),
logits=signed_logits_difference,
surrogate_type=surrogate_type)
| tensorflow.expand_dims | 13,228 |
import tensorflow as tf
tf.flags.DEFINE_float("learning_rate", "1e-4", "Learning rate for Adam Optimizer")
tf.flags.DEFINE_string("model_dir", "Model_zoo/", "Path to vgg model mat")
| tensorflow.flags.DEFINE_string | 13,229 |
import tensorflow as tf
loss += self.helper_dst.calc_loss(logits, logits_dst)
tf.summary.scalar('loss', loss)
for key, value in metrics.items():
tf.summary.scalar(key, value)
# learning rate schedule
self.global_step = tf.train.get_or_create_global_step()
lrn_rate, self.nb_iters_train = self.setup_lrn_rate(self.global_step)
# overall pruning ratios of trainable & maskable variables
pr_trainable = calc_prune_ratio(self.trainable_vars)
pr_maskable = calc_prune_ratio(self.maskable_vars)
| tensorflow.train.get_or_create_global_step | 13,230 |
import tensorflow as tf
reg_loss = reg_decay * tf.add_n(reg_losses)
self._mark_for_monitoring('reg_loss', reg_loss)
# Add loss from auxiliary logits
aux_loss = tf.constant(0, dtype=tf.float32)
for aux_logits in aux_logits_list:
log_probs = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=aux_logits, labels=classes)
aux_loss += aux_loss_mul * tf.reduce_mean(log_probs)
total_loss = loss + reg_loss + aux_loss
return total_loss
def _add_global_avg_pool(self, X, in_w, in_h, in_ch):
X = tf.nn.relu(X)
X = tf.reduce_mean(X, (1, 2))
X = tf.reshape(X, (-1, in_ch)) # Sanity shape check
return X
def _count_model_parameters(self):
tf_trainable_vars = tf.trainable_variables()
num_params = 0
# utils.logger.log('Model parameters:')
for var in tf_trainable_vars:
# utils.logger.log(str(var))
num_params += np.prod([dim.value for dim in var.get_shape()])
utils.logger.log('Model has {} parameters'.format(num_params))
return num_params
| tensorflow.reduce_mean | 13,231 |
import tensorflow as tf
def testNoInput(self):
with self.test_session():
x, = tf.py_func(lambda: 42.0, [], [tf.float64])
self.assertAllClose(x.eval(), 42.0)
def testCleanup(self):
for _ in xrange(1000):
g = tf.Graph()
with g.as_default():
c = tf.constant([1.], tf.float32)
_ = tf.py_func(lambda x: x + 1, [c], [tf.float32])
self.assertTrue(script_ops._py_funcs.size() < 100)
def testError(self):
| tensorflow.Graph | 13,232 |
import tensorflow as tf
InputExample(unique_id, line))
unique_id += 1
return examples
def model_fn_builder(bert_config, init_checkpoint, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
masked_lm_positions = features["masked_lm_positions"]
masked_lm_ids = features["masked_lm_ids"]
model = modeling.BertModel(
| tensorflow.logging.info | 13,233 |
import tensorflow as tf
print("*" * 80)
print(f"Step: {step}, loss: {loss_v}, embedding_vector:\n{emb_vector_v}")
sok_results.append(emb_vector_v)
sess.run(save_op)
name = list()
for embedding_layer in sok_sparse_demo.embedding_layers:
name.append(embedding_layer.embedding_variable.m_var_name)
return sok_results, name
def get_tf_results(args, init_tensors, *random_samples):
graph = tf.Graph()
with graph.as_default():
tf_sparse_demo = TFDemo(vocabulary_size=args.max_vocabulary_size_per_gpu * args.gpu_num,
embedding_vec_size=args.embedding_vec_size,
combiner=args.combiner,
slot_num=args.slot_num,
max_nnz=args.max_nnz,
use_hashtable=args.use_hashtable,
num_of_dense_layers=0)
optimizer = utils.get_dense_optimizer(args.optimizer)(learning_rate=0.1)
loss_fn = tf.keras.losses.BinaryCrossentropy(from_logits=True)
| tensorflow.Graph | 13,234 |
import tensorflow as tf
)
run_config = tf.estimator.RunConfig(
| tensorflow.estimator.RunConfig | 13,235 |
import tensorflow as tf
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape().as_list() for a in args]
for shape in shapes:
if len(shape) != 2:
raise ValueError("Linear is expecting 2D arguments: %s" % str(shapes))
if not shape[1]:
raise ValueError("Linear expects shape[1] of arguments: %s" % str(shapes))
else:
total_arg_size += shape[1]
# Now the computation.
with tf.variable_scope(scope or "Linear"):
matrix = tf.get_variable("Matrix", [total_arg_size, output_size])
if len(args) == 1:
res = tf.matmul(args[0], matrix)
else:
res = tf.matmul(tf.concat(values=args, axis=1), matrix)
if not bias:
return res
bias_term = tf.get_variable("Bias", [output_size], initializer=tf.constant_initializer(bias_start))
return res + bias_term
def _clip_and_normalize(word_probs, epsilon):
| tensorflow.variable_scope | 13,236 |
import tensorflow as tf
inputs = tf.random_uniform((batch_size, height, width, 3))
with self.assertRaises(ValueError):
_ = mobilenet_v1.mobilenet_v1(
inputs, num_classes, depth_multiplier=-0.1)
with self.assertRaises(ValueError):
_ = mobilenet_v1.mobilenet_v1(
inputs, num_classes, depth_multiplier=0.0)
def testHalfSizeImages(self):
batch_size = 5
height, width = 112, 112
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Conv2d_13_pointwise']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 4, 4, 1024])
def testUnknownImageShape(self):
tf.reset_default_graph()
batch_size = 2
height, width = 224, 224
num_classes = 1000
| tensorflow.random_uniform | 13,237 |
import tensorflow as tf
with tf.variable_scope('anchor_generator'):
if offset is None:
offset = [stride[0]/2, stride[1]/2]
features_width = tf.cast(features_width, tf.int32)
features_height = tf.cast(features_height, tf.int32)
scales = tf.convert_to_tensor(scales, dtype=tf.float32)
ratios = tf.convert_to_tensor(ratios, dtype=tf.float32)
offset = tf.convert_to_tensor(offset, dtype=tf.float32)
scales_grid, ratios_grid = tf.meshgrid(scales,
ratios)
scales_grid = tf.reshape(scales_grid, [-1, 1])
ratios_grid = tf.reshape(ratios_grid, [-1, 1])
ratio_sqrts = tf.sqrt(ratios_grid)
heights = scales_grid / ratio_sqrts * base_size[1]
widths = scales_grid * ratio_sqrts * base_size[0]
| tensorflow.meshgrid | 13,238 |
import tensorflow as tf
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
| tensorflow.gfile.Open | 13,239 |
import tensorflow as tf
# def feature_encoders(self):
# return {"inputs": text_encoder.TextEncoder(), "targets": text_encoder.TextEncoder()}
def example_reading_spec(self):
data_fields = {"inputs": tf.VarLenFeature(tf.int64), "targets": tf.VarLenFeature(tf.int64)}
data_items_to_decoders = None
return (data_fields, data_items_to_decoders)
| tensorflow.VarLenFeature | 13,240 |
import tensorflow as tf
sample_func = sample_pair(batch)
def sample_compute(_):
pairs = sample_func()
loss = compute_contra_loss(*pairs, hard_ratio=hard_ratio)
pct = tf.math.count_nonzero(loss, dtype=tf.float32) / tf.size(loss, out_type=tf.float32)
p = tf.cond(tf.random_uniform((), dtype=tf.float32) < 1e-4,
lambda: tf.print('csrt acc ', [pct]),
lambda: tf.no_op())
with tf.control_dependencies([p]):
return tf.reduce_mean(loss)
loss = tf.map_fn(fn=lambda inp: sample_compute(inp), elems=tf.range(resample), dtype=tf.float32,
| tensorflow.random_uniform | 13,241 |
import tensorflow as tf
def testRNNDecoder(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
inp = [tf.constant(0.5, shape=[2, 2])] * 2
_, enc_state = tf.nn.rnn(
| tensorflow.constant_initializer | 13,242 |
import tensorflow as tf
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
| tensorflow.to_int32 | 13,243 |
import tensorflow as tf
self.c_mask = tf.cast(self.c, tf.bool)
self.q_mask = tf.cast(self.q, tf.bool)
self.c_len = tf.reduce_sum(tf.cast(self.c_mask, tf.int32), axis=1)
| tensorflow.cast | 13,244 |
import tensorflow as tf
rel_embedding_shape = [rel_cnt, self.embedding_size * self.embedding_size]
entity_init = tf.truncated_normal(entity_embedding_shape, stddev=init_sd)
rel_init = tf.truncated_normal(rel_embedding_shape, stddev=init_sd)
if self.maxnorm is not None:
# Ensure maxnorm constraints are initially satisfied
entity_init = dense_maxnorm(entity_init, self.maxnorm)
rel_init = dense_maxnorm(rel_init, self.maxnorm)
self.entity_embedding_vars = tf.Variable(entity_init)
self.rel_embedding_vars = tf.Variable(rel_init)
# Embedding layer for each (head, rel, tail) triple being fed in as input
head_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.head_input)
tail_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.tail_input)
rel_embed = tf.nn.embedding_lookup(self.rel_embedding_vars, self.rel_input)
# Reshape rel_embed into square D x D matrices
rel_embed_square = tf.reshape(rel_embed, (-1, self.embedding_size, self.embedding_size))
# Reshape head_embed and tail_embed to be suitable for the matrix multiplication
| tensorflow.Variable | 13,245 |
import tensorflow as tf
def test_default_encoding_stage(self):
"""Tests the correctness of `default_encoding_stage`."""
stage = self.default_encoding_stage()
self.assertIsInstance(stage,
(encoding_stage.EncodingStageInterface,
encoding_stage.AdaptiveEncodingStageInterface))
# Calling the method again should create a new instance.
new_stage = self.default_encoding_stage()
self.assertIsNot(stage, new_stage)
def test_encoding_stage_constructor_does_not_modify_graph(self):
"""Tests that the constructor of encoding stage does not modify graph."""
graph_def = tf.get_default_graph().as_graph_def()
self.default_encoding_stage()
new_graph_def = tf.get_default_graph().as_graph_def()
tf.test.assert_equal_graph_def(graph_def, new_graph_def)
def test_encoding_stage_name(self):
"""Tests that the `name` property returns a string."""
stage = self.default_encoding_stage()
self.assertIsInstance(stage.name, str)
def test_default_input_is_tensor_with_fully_defined_shape(self):
"""Tests that `default_input` returns a `Tesnor` of fully defined shape."""
x = self.default_input()
self.assertIsInstance(x, tf.Tensor)
self.assertTrue(x.shape.is_fully_defined())
| tensorflow.get_default_graph | 13,246 |
import tensorflow as tf
# @2.self-attention in block
# mask generation
sl_indices = tf.range(block_len, dtype=tf.int32)
sl_col, sl_row = tf.meshgrid(sl_indices, sl_indices)
if direction == 'forward':
direct_mask = tf.greater(sl_row, sl_col) # bl,bl
else:
direct_mask = tf.greater(sl_col, sl_row) # bl,bl
direct_mask_tile = tf.tile(
tf.expand_dims(tf.expand_dims(direct_mask, 0), 0), [bs, bn, 1, 1]) # bs,bn,bl,bl
rep_mask_tile_1 = tf.tile(tf.expand_dims(rep_mask_split, 2), [1, 1, bl, 1]) # bs,bn,bl,bl
rep_mask_tile_2 = tf.tile(tf.expand_dims(rep_mask_split, 3), [1, 1, 1, bl]) # bs,bn,bl,bl
rep_mask_tile = tf.logical_and(rep_mask_tile_1, rep_mask_tile_2)
attn_mask = tf.logical_and(direct_mask_tile, rep_mask_tile, name='attn_mask') # bs,bn,bl,bl
| tensorflow.greater | 13,247 |
from tensorflow.python.framework import constant_op
iris.data[:, i], dtype=dtypes.float32), (-1, 1))
})
# The following shows how to provide the SparseTensor data for
# a SparseColumn.
features['dummy_sparse_column'] = sparse_tensor.SparseTensor(
values=('en', 'fr', 'zh'),
indices=((0, 0), (0, 1), (60, 0)),
dense_shape=(len(iris.target), 2))
labels = array_ops.reshape(
constant_op.constant(
iris.target, dtype=dtypes.int32), (-1, 1))
return features, labels
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
feature_column.real_valued_column(str(i)) for i in range(4)
]
linear_features = [
| tensorflow.python.framework.constant_op.constant | 13,248 |
import tensorflow as tf
horizon_tgt = horizon_sumV1(tgt, horizon)
pred1, pred2 = tf.split(horizon_pred, 2, axis=0)
tgt1, tgt2 = tf.split(horizon_tgt, 2, axis=0)
geq = tf.cast((tgt1 - tgt2) > 0, tf.bool)
tgt_larg = tf.where(geq, tgt1, tgt2)
tgt_small = tf.where(geq, tgt2, tgt1)
pred_larg = tf.where(geq, pred1, pred2)
pred_small = tf.where(geq, pred2, pred1)
loss = tf.maximum(0.0, ((tgt_larg - tgt_small) - (pred_larg - pred_small)))
loss = tf.reduce_mean(loss)
return loss
# randrom horizon
def contra_traj_lossV3(pred, tgt, horizon=12):
# Step-wise contrastive loss
horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon)
# pred1, pred2 = tf.split(horizon_pred, 2, axis=0)
# tgt1, tgt2 = tf.split(horizon_tgt, 2, axis=0)
| tensorflow.reduce_mean | 13,249 |
import tensorflow as tf
"""
if split_name not in split_to_sizes:
raise ValueError('split name %s was not recognized.' % split_name)
file_pattern = os.path.join(dataset_dir, file_pattern % split_name)
# Allowing None in the signature so that dataset_factory can use the default.
if reader is None:
reader = tf.TFRecordReader
# Features in Pascal VOC TFRecords.
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'),
'image/height': tf.FixedLenFeature([1], tf.int64),
'image/width': tf.FixedLenFeature([1], tf.int64),
'image/channels': tf.FixedLenFeature([1], tf.int64),
'image/shape': tf.FixedLenFeature([3], tf.int64),
'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/label': tf.VarLenFeature(dtype=tf.int64),
'image/object/bbox/difficult': tf.VarLenFeature(dtype=tf.int64),
'image/object/bbox/truncated': tf.VarLenFeature(dtype=tf.int64),
}
items_to_handlers = {
'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'),
'shape': slim.tfexample_decoder.Tensor('image/shape'),
'object/bbox': slim.tfexample_decoder.BoundingBox(
| tensorflow.FixedLenFeature | 13,250 |
import tensorflow as tf
cases = ['ABcDEF.GH', 'ABzDEF.GH', 'ABasdfjklDEF.GH', 'AB3DEF.GH',
'AB4DEF.GH', 'ABDEF.GH', 'XYZ']
files = [tempfile.NamedTemporaryFile(prefix=c) for c in cases]
with self.test_session():
# Test exact match without wildcards.
for f in files:
self.assertEqual(tf.matching_files(f.name).eval(),
tf.compat.as_bytes(f.name))
# We will look for files matching "ABxDEF.GH*" where "x" is some wildcard.
pos = files[0].name.find(cases[0])
pattern = files[0].name[:pos] + 'AB%sDEF.GH*'
| tensorflow.matching_files | 13,251 |
import tensorflow as tf
feed_previous=tf.constant(True))
sess.run([tf.global_variables_initializer()])
tf.get_variable_scope().reuse_variables()
d1, _ = tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2, feed_previous=True)
d2, _ = tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp2, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2, feed_previous=True)
res1 = sess.run(d1)
res2 = sess.run(d2)
res3 = sess.run(d3)
| tensorflow.nn.seq2seq.embedding_attention_seq2seq | 13,252 |
import tensorflow as tf
embedded_text_expand = tf.expand_dims(self.embedded_characters, -1)
with tf.device('/cpu:0'), tf.name_scope("embedding_tags"):
W_tags = tf.get_variable("embed_W_tags", [tags_vocab_size, embedding_size], initializer=initializer)
| tensorflow.device | 13,253 |
import tensorflow as tf
serialized=serialized_example, features=self._keys_to_features)
for k in parsed_tensors:
if isinstance(parsed_tensors[k], tf.SparseTensor):
if parsed_tensors[k].dtype == tf.string:
parsed_tensors[k] = tf.sparse.to_dense(
parsed_tensors[k], default_value='')
else:
parsed_tensors[k] = tf.sparse.to_dense(
| tensorflow.sparse.to_dense | 13,254 |
import tensorflow as tf
# Loss and train op
self.loss = -self.normal_dist.log_prob(self.a_his) * self.target
# Add cross entropy cost to encourage exploration
self.loss -= entropy_beta * self.normal_dist.entropy()
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
self.grads_and_vars = self.optimizer.compute_gradients(self.loss)
self.grads=[];
self.vars=[];
for i in range(len(self.grads_and_vars)):
self.grads.append(self.grads_and_vars[i][0]);
self.vars.append(self.grads_and_vars[i][1]);
self.grads=self.grads[-1*NUM_VARS:];
self.vars=self.vars[-1*NUM_VARS:];
self.train_op = self.optimizer.apply_gradients(
self.grads_and_vars, global_step=tf.contrib.framework.get_global_step())
def predict(self, state, sess=None):
sess = sess or tf.get_default_session()
return sess.run(self.action, { self.state: [state] })[0]
def update(self, state, target, a_his, sess=None):
sess = sess or tf.get_default_session()
feed_dict = { self.state: state, self.target: target, self.a_his: a_his }
_, loss = sess.run([self.train_op, self.loss], feed_dict)
return loss
class ValueEstimator_Pendulum():
def __init__(self, learning_rate=0.1, par_idx=0,scope="value_estimator"):
w_init = tf.random_normal_initializer(0.,.1);
| tensorflow.contrib.framework.get_global_step | 13,255 |
import tensorflow as tf
strides=2,
name='pool2'
)
v = tf.reshape(pool2, [-1, 4096])
fc1 = tf.layers.dense(
inputs=v,
| tensorflow.reshape | 13,256 |
import tensorflow as tf
self._testMultiSaverCollectionSave()
self._testMultiSaverCollectionRestore()
def testBinaryAndTextFormat(self):
test_dir = self._TestDir("binary_and_text")
filename = os.path.join(test_dir, "metafile")
with self.test_session(graph=tf.Graph()):
# Creates a graph.
tf.Variable(10.0, name="v0")
# Exports the graph as binary format.
tf.train.export_meta_graph(filename, as_text=False)
with self.test_session(graph=tf.Graph()):
# Imports the binary format graph.
saver = tf.train.import_meta_graph(filename)
# Exports the graph as text format.
saver.export_meta_graph(filename, as_text=True)
with self.test_session(graph=tf.Graph()):
# Imports the text format graph.
tf.train.import_meta_graph(filename)
# Writes wrong contents to the file.
tf.train.write_graph(saver.as_saver_def(), os.path.dirname(filename),
os.path.basename(filename))
with self.test_session(graph=tf.Graph()):
# Import should fail.
with self.assertRaisesWithPredicateMatch(
IOError, lambda e: "Cannot parse file"):
| tensorflow.train.import_meta_graph | 13,257 |
import tensorflow as tf
box_xy = tf.sigmoid(box_xy)
obj = tf.sigmoid(obj)
cls = tf.sigmoid(cls)
anchors = anchors.astype(np.float32)
grid_shape = x_shape[1:3]
# print(grid_shape)
grid_h, grid_w = grid_shape[0], grid_shape[1]
# print(grid_h,tf.range(grid_h))
grid = tf.meshgrid(tf.range(grid_w), tf.range(grid_h))
grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2) # [gx, gy, 1, 2]
box_xy = (box_xy + tf.cast(grid, dtype)) * stride
box_wh = tf.exp(box_wh) * anchors
box_x1y1 = box_xy - box_wh / 2.
box_x2y2 = box_xy + box_wh / 2.
box = tf.concat([box_x1y1, box_x2y2], axis=-1)
| tensorflow.range | 13,258 |
from tensorflow.python.framework import tensor_shape
@ops.RegisterShape("LinSpace")
def _LinspaceShape(op):
num = tensor_util.ConstantValue(op.inputs[2])
return [tensor_shape.vector(num)]
| tensorflow.python.framework.tensor_shape.vector | 13,259 |
from tensorflow.contrib import tpu as contrib_tpu
optimizer=FLAGS.optimizer)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = contrib_tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
| tensorflow.contrib.tpu.TPUEstimator | 13,260 |
import tensorflow as tf
print(sess.run(zero_var))
print(sess.run(ones_var))
zero_similar = tf.Variable(tf.zeros_like(zero_var))
ones_similar = tf.Variable(tf.ones_like(ones_var))
sess.run(ones_similar.initializer)
sess.run(zero_similar.initializer)
print(sess.run(ones_similar))
print(sess.run(zero_similar))
fill_var = tf.Variable(tf.fill([row_dim, col_dim], -1))
sess.run(fill_var.initializer)
print(sess.run(fill_var))
const_var = tf.Variable(tf.constant([8, 6, 7, 5, 3, 0, 9]))
const_fill_var = tf.Variable(tf.constant(-1, shape=[row_dim, col_dim]))
sess.run(const_var.initializer)
sess.run(const_fill_var.initializer)
print(sess.run(const_var))
print(sess.run(const_fill_var))
linear_var = tf.Variable(tf.linspace(start=0.0, stop=1.0, num=3)) # Generates [0.0, 0.5, 1.0] includes the end
sequence_var = tf.Variable(tf.range(start=6, limit=15, delta=3)) # Generates [6, 9, 12] doesn't include the end
sess.run(linear_var.initializer)
sess.run(sequence_var.initializer)
print(sess.run(linear_var))
print(sess.run(sequence_var))
| tensorflow.constant | 13,261 |
import tensorflow as tf
initializer=tf.constant_initializer(0.0),
dtype=DTYPE)
# apply highways layers
def high(x, ww_carry, bb_carry, ww_tr, bb_tr):
carry_gate = tf.nn.sigmoid(tf.matmul(x, ww_carry) + bb_carry)
transform_gate = tf.nn.relu(tf.matmul(x, ww_tr) + bb_tr)
return carry_gate * transform_gate + (1.0 - carry_gate) * x
if use_highway:
highway_dim = n_filters
| tensorflow.matmul | 13,262 |
import tensorflow as tf
# Now we construct the copy model.
inp = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)]
out = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)]
weights = [tf.ones_like(inp[0], dtype=tf.float32) for _ in range(8)]
with tf.variable_scope("root"):
_, losses1 = SampleGRUSeq2Seq(inp, out, weights, per_example_loss=False)
# Now check that we did not accidentally set reuse.
self.assertEqual(False, tf.get_variable_scope().reuse)
# Construct one more model with per-example loss.
tf.get_variable_scope().reuse_variables()
_, losses2 = SampleGRUSeq2Seq(inp, out, weights, per_example_loss=True)
# First loss is scalar, the second one is a 1-dimensinal tensor.
self.assertEqual([], losses1[0].get_shape().as_list())
self.assertEqual([None], losses2[0].get_shape().as_list())
| tensorflow.get_variable_scope | 13,263 |
import tensorflow as tf
if compat.is_tf_tensor(text):
text = tf.cond(
tf.less_equal(tf.size(text), max_len), lambda: text,
lambda: tf.concat([text[:(max_len - 1)], text[-1:]], axis=0))
elif len(text) > max_len:
text = text[:(max_len - 1)] + text[-1:]
| tensorflow.concat | 13,264 |
import tensorflow as tf
'The start warm-up learning rate to avoid NAN.')
tf.app.flags.DEFINE_integer(
'warmup_steps', 100,
'The total steps to warm-up.')
# for learning rate piecewise_constant decay
tf.app.flags.DEFINE_string(
'decay_boundaries', '2, 3',
'Learning rate decay boundaries by global_step (comma-separated list).')
tf.app.flags.DEFINE_string(
'lr_decay_factors', '1, 0.5, 0.1',
'The values of learning_rate decay factor for each segment between boundaries (comma-separated list).')
# checkpoint related configuration
tf.app.flags.DEFINE_string(
'checkpoint_path', './model',
'The path to a checkpoint from which to fine-tune.')
tf.app.flags.DEFINE_string(
| tensorflow.app.flags.DEFINE_string | 13,265 |
import tensorflow as tf
Total_loss = cost + l2_loss
optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=momentum, use_nesterov=True)
# Batch norm requires update_ops to be added as a train_op dependency.
| tensorflow.train.MomentumOptimizer | 13,266 |
from tensorflow.python.platform import gfile
# Created by the first helper.
self.assertTrue(gfile.Exists(s1))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1)))
# Deleted by the first helper.
self.assertFalse(gfile.Exists(s3))
self.assertFalse(gfile.Exists(save._MetaGraphFilename(s3)))
self.assertTrue(gfile.Exists(s2))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))
# Adding s1 (s3 should now be deleted as oldest in list)
s1 = save2.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save2.last_checkpoints)
self.assertFalse(gfile.Exists(s3))
self.assertFalse(gfile.Exists(save._MetaGraphFilename(s3)))
self.assertTrue(gfile.Exists(s2))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))
self.assertTrue(gfile.Exists(s1))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1)))
# Exercise the third helper.
# Adding s2 again (but helper is unaware of previous s2)
s2 = save3.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s2], save3.last_checkpoints)
| tensorflow.python.platform.gfile.Exists | 13,267 |
import tensorflow as tf
with tf.variable_scope("placeholder"):
X = tf.placeholder(tf.float32, (None, 128), "X")
y_1 = tf.placeholder(tf.float32, (None, n_output_1), "y_1")
y_2 = tf.placeholder(tf.float32, (None, n_output_2), "y_2")
is_training = tf.placeholder(tf.bool, (), "is_training")
with tf.variable_scope("network"):
with contrib.framework.arg_scope(
[contrib.layers.fully_connected],
# he initialization
weights_initializer=contrib.layers.variance_scaling_initializer(),
# l2 regularization
| tensorflow.variable_scope | 13,268 |
import tensorflow as tf
# y_true [batch_size, num_anchor, num_classes+1]
# y_pred [batch_size, num_anchor, num_classes]
labels = y_true
anchor_state = y_true[:, :, -1] # -1 是需要忽略的, 0 是背景, 1 是存在目标
classification = y_pred
# 找出存在目标的先验框
indices_for_object = tf.where(keras.backend.equal(anchor_state, 1))
labels_for_object = tf.gather_nd(labels, indices_for_object)
classification_for_object = tf.gather_nd(classification, indices_for_object)
cls_loss_for_object = keras.backend.binary_crossentropy(labels_for_object, classification_for_object)
# 找出实际上为背景的先验框
indices_for_back = tf.where(keras.backend.equal(anchor_state, 0))
labels_for_back = tf.gather_nd(labels, indices_for_back)
| tensorflow.gather_nd | 13,269 |
import tensorflow as tf
# tensor tile
rep_map_tile = tf.tile(tf.expand_dims(rep_dep_tensor, 1), [1, sl_head, 1, 1]) # bs,slh,sld,vec
with tf.variable_scope('attention'): # bs,sl,sl,vec
f_bias = tf.get_variable('f_bias', [ivec], tf.float32, tf.constant_initializer(0.))
dependent = linear(rep_dep_tensor_dp, ivec, False, scope='linear_dependent') # bs,sld,vec
dependent_etd = tf.expand_dims(dependent, 1) # bs,1,sld,vec
head = linear(rep_head_tensor_dp, ivec, False, scope='linear_head') # bs,slh,vec
| tensorflow.constant_initializer | 13,270 |
import tensorflow as tf
tf.initialize_all_variables().run()
sess.run(logits)
# Creates a saver.
saver0 = tf.train.Saver()
saver0.save(sess, saver0_ckpt)
# Generates MetaGraphDef.
| tensorflow.train.Saver | 13,271 |
import tensorflow as tf
for grad, var in itertools.chain(*tower_gradvars):
if grad is not None:
all_grads.setdefault(var, []).append(grad)
for var, grads in all_grads.items():
if len(grads) == 1:
avg_grad = grads[0]
else:
avg_grad = tf.multiply(tf.add_n(grads), 1. / len(grads))
gradvars.append((avg_grad, var))
self.loss = tf.reduce_mean(tower_losses)
tf.summary.scalar('loss', self.loss)
# Create optimizer ops
self.global_step = tf.Variable(0, trainable=False, name='global_step')
opt = tf.train.RMSPropOptimizer(self.config['learning_rate'])
with tf.control_dependencies(update_ops):
self.trainer = opt.apply_gradients(
gradvars, global_step=self.global_step)
def _eval_graph(self, data):
tower_metrics = self._gpu_tower(data, Mode.EVAL)
with tf.device('/cpu:0'):
self.metrics = {m: tf.reduce_mean(tf.stack([t[m] for t in tower_metrics]))
for m in tower_metrics[0]}
def _pred_graph(self, data):
| tensorflow.Variable | 13,272 |
from tensorflow.python.ops import math_ops
def _create_slots(self, var_list):
# Create slots for the global solution.
for v in var_list:
self._zeros_slot(v, "vstar", self._name)
self._zeros_slot(v, "gold", self._name)
def _apply_dense(self, grad, var):
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
mu_t = math_ops.cast(self._mu_t, var.dtype.base_dtype)
vstar = self.get_slot(var, "vstar")
gold = self.get_slot(var, "gold")
var_update = state_ops.assign_sub(var, lr_t*(grad + gold + mu_t*(var-vstar))) #Update 'ref' by subtracting 'value
#Create an op that groups multiple operations.
#When this op finishes, all ops in input have finished
return control_flow_ops.group(*[var_update,])
| tensorflow.python.ops.math_ops.cast | 13,273 |
import tensorflow as tf
x = x + shortcut
x = self._relu('relu_2', x)
print('residual-unit-%s-shape: ' % name + str(x.shape.as_list()))
return x
@staticmethod
def _conv(name, x, num_filters=16, kernel_size=(3, 3), padding='SAME', stride=(1, 1),
initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0, dilation=1.0, bias=-1):
with tf.variable_scope(name):
stride = [1, stride[0], stride[1], 1]
kernel_shape = [kernel_size[0], kernel_size[1], x.shape[-1], num_filters]
w = variable_with_weight_decay(kernel_shape, initializer, l2_strength)
variable_summaries(w)
if dilation > 1:
conv = tf.nn.atrous_conv2d(x, w, dilation, padding)
else:
if type(padding)==type(''):
| tensorflow.variable_scope | 13,274 |
import tensorflow as tf
l3=tf.matmul(l2, self.w3)+self.b3
l3=tf.nn.relu(l3)
out=tf.matmul(l3, self.w4)+self.b4
return out
def test_inference(self,images):
images=tf.cast(images,tf.float32)/255.0
l1 = tf.matmul(images, self.w1)+self.b1
l1=tf.nn.relu(l1)
l2 = tf.matmul(l1, self.w2)+self.b2
l2=tf.nn.relu(l2)
l3=tf.matmul(l2, self.w3)+self.b3
l3=tf.nn.relu(l3)
out=tf.matmul(l3, self.w4)+self.b4
| tensorflow.matmul | 13,275 |
import tensorflow as tf
out.set_shape([batch_size, res1, res2, out_channels])
else:
out = tf.add(tf.nn.conv2d(inp, W, strides=strides, padding=padding), b, name='convolution')
| tensorflow.nn.conv2d | 13,276 |
import tensorflow as tf
enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size])
for e in enc_outputs])
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
dec, mem = tf.nn.seq2seq.attention_decoder(
dec_inp, enc_state,
| tensorflow.constant | 13,277 |
from tensorflow.python.ops import math_ops
# "accuracy/threshold_0.500000_mean" metric for binary classification.
metrics = {("accuracy", "classes"): metric_ops.streaming_accuracy}
predictions = math_ops.sigmoid(logits)
labels_float = math_ops.to_float(labels)
| tensorflow.python.ops.math_ops.sigmoid | 13,278 |
import tensorflow as tf
# Initialize the mean
mean = tf.get_variable(name + "_mean", shape, dtype=dtype)
# Initialize the standard deviation
pre_sigma = tf.get_variable(name + "_standard_deviation",
shape,
initializer=std_init,
dtype=dtype)
| tensorflow.get_variable | 13,279 |
import tensorflow as tf
monotonicity_dist = monotonicity_dist or 1.0
batch_size = tf.shape(attention_weights)[0]
src_len = tf.shape(attention_weights)[2]
trg_len = tf.shape(attention_weights)[1]
src_indices = tf.tile(tf.reshape(tf.range(src_len), shape=[1, 1, src_len]), [batch_size, trg_len, 1])
trg_indices = tf.tile(tf.reshape(tf.range(trg_len), shape=[1, trg_len, 1]), [batch_size, 1, src_len])
source_length = encoder_input_length[0]
target_length = tf.to_int32(tf.reduce_sum(trg_mask, axis=1))
true_src_len = tf.reshape(source_length, shape=[batch_size, 1, 1]) - 1
true_trg_len = tf.reshape(target_length, shape=[batch_size, 1, 1]) - 1
src_mask = tf.to_float(tf.sequence_mask(source_length, maxlen=src_len))
mask = tf.matmul(tf.expand_dims(trg_mask, axis=2), tf.expand_dims(src_mask, axis=1))
monotonous = tf.sqrt(((true_trg_len * src_indices - true_src_len * trg_indices) ** 2)
/ (true_trg_len**2 + true_src_len**2))
monotonous = tf.to_float(monotonous < monotonicity_dist)
non_monotonous = (1 - monotonous) * mask
attn_loss = tf.reduce_sum(attention_weights * tf.stop_gradient(non_monotonous)) / tf.to_float(batch_size)
if monotonicity_decay:
| tensorflow.reshape | 13,280 |
import tensorflow as tf
def triplet_loss(anchor, positive, negative, alpha=0.2, name='triplet_loss'):
"""Calculate the triplet loss according to the FaceNet paper.
Args:
anchor: 2-D `tensor` [batch_size, embedding_size], the embeddings for the anchor images.
positive: 2-D `tensor` [batch_size, embedding_size], the embeddings for the positive images.
negative: 2-D `tensor` [batch_size, embedding_size], the embeddings for the negative images.
alpha: positive to negative triplet distance margin
Returns:
the triplet loss.
"""
with tf.name_scope(name):
pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1)
neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1)
basic_loss = tf.add(tf.subtract(pos_dist, neg_dist), alpha)
loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)
return loss
def decov_loss(xs, name='decov_loss'):
"""Decov loss as described in https://arxiv.org/pdf/1511.06068.pdf 'Reducing
Overfitting In Deep Networks by Decorrelating Representation'.
Args:
xs: 4-D `tensor` [batch_size, height, width, channels], input
| tensorflow.name_scope | 13,281 |
from tensorflow.python.feature_column import feature_column_lib as core_feature_column
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
use_core_libs=True,
feature_columns=[
core_feature_column.numeric_column("f1"),
core_feature_column.numeric_column("f2")
],
ranking_model_pair_keys=("a", "b"))
model.fit(input_fn=_ranking_train_input_fn, steps=1000)
model.evaluate(input_fn=_ranking_train_input_fn, steps=1)
model.predict(input_fn=_infer_ranking_train_input_fn)
| tensorflow.python.feature_column.feature_column_lib.numeric_column | 13,282 |
import tensorflow as tf
def train_acregnet_model(config):
tf.reset_default_graph()
tf_config = tf.ConfigProto()
if RUN_IN_GPU:
| tensorflow.ConfigProto | 13,283 |
import tensorflow as tf
inputs_vars = tf.reduce_mean(tf.square(inputs - inputs_means), 0, keep_dims=True)
inputs = tf.Print(
inputs,
[tf.reduce_mean(inputs_means), tf.reduce_mean(inputs_vars)],
"image mean and average var",
first_n=1)
| tensorflow.reduce_mean | 13,284 |
import tensorflow as tf
self.pi_new_params = [oldp.assign(p) for p, oldp in zip(pi_params, pi_old_params)]
self.vf_new_params = [oldp.assign(p) for p, oldp in zip(vf_params, vf_old_params)]
self.sess.run(tf.global_variables_initializer())
# Tensorboard
if summary_dir is not None:
self.writer = tf.summary.FileWriter(summary_dir)
tf.summary.scalar('Loss/Policy', loss_pg)
tf.summary.scalar('Loss/Value', loss_vf)
tf.summary.scalar('Loss/Entropy', loss_entropy)
tf.summary.scalar('Loss/Total', loss)
tf.summary.scalar('Var/Epsilon', epsilon_decay)
tf.summary.scalar('Var/Policy Mode', tf.reduce_mean(pi.mode()))
tf.summary.scalar('Var/Policy Sigma', tf.reduce_mean(pi.stddev()))
tf.summary.scalar('Var/Value', tf.reduce_mean(self.vf))
self.summarise = tf.summary.merge(tf.get_collection(tf.GraphKeys.SUMMARIES))
# AC net
def build_anet(self, state_in, name, reuse=False):
reg = tf.contrib.layers.l2_regularizer(1e-3)
| tensorflow.summary.scalar | 13,285 |
import tensorflow as tf
gamma = tf.get_variable('gamma', batch_var.shape, dtype=tf.float32,
initializer=tf.ones_initializer())
# BN when training
update = 1.0 - decay
update_mu = mu.assign_sub(update * (mu - batch_mean))
update_sigma = sigma.assign_sub(update * (sigma - batch_var))
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_mu)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_sigma)
mean, var = tf.cond(self.train_flag, lambda: (batch_mean, batch_var), lambda: (mu, sigma))
bn = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-5)
tf.add_to_collection('debug_layers', bn)
return bn
| tensorflow.add_to_collection | 13,286 |
import tensorflow as tf
tf.FIFOQueue(num_workers, [tf.bool], shapes=[[]],
shared_name='%s%s' % (name_prefix, i))
for i in range(num_workers)]
queue_ops = []
# For each other worker, add an entry in a queue, signaling that it can
# finish this step.
token = tf.constant(False)
with tf.control_dependencies(enqueue_after_list):
for i, q in enumerate(sync_queues):
if i == self.task_index:
queue_ops.append(tf.no_op())
else:
queue_ops.append(q.enqueue(token))
| tensorflow.control_dependencies | 13,287 |
from tensorflow.python.ops import math_ops
Returns:
0 if `denominator` <= 0, else `numerator` / `denominator`
"""
return math_ops.select(
math_ops.greater(denominator, 0),
math_ops.truediv(numerator, denominator),
0,
name=name)
| tensorflow.python.ops.math_ops.greater | 13,288 |
import tensorflow as tf
raise NotImplementedError("other model is not supported")
def sentence_to_index(self, sentence):
list_of_index = [self._model.wv.vocab[
word].index for word in tokenize(sentence)]
return list_of_index
def get_embedding_matrix(self):
return self._model.syn0
def create_queue(sess = None, coord = None, encode_data = None,
decode_data = None, capacity = 1024, batch_size = 32, scope = None):
encode = tf.placeholder(tf.int32, shape=[None], name="encode")
decode = tf.placeholder(tf.int32, shape=[decode_max_length + 2], name="decode")
weight = tf.placeholder(tf.float32, shape=[decode_max_length + 1], name="weight")
queue = tf.PaddingFIFOQueue(capacity = capacity,
dtypes = [tf.int32, tf.int32, tf.float32],
shapes = [[None], [decode_max_length + 2], [decode_max_length + 1]],
name = 'FIFOQueue')
enqueue_op = queue.enqueue([encode, decode, weight])
def _iterator():
assert len(encode_data) == len(decode_data)
data = list(zip(encode_data, decode_data))
| tensorflow.placeholder | 13,289 |
import tensorflow as tf
decay=.1,
axes=[0],
reuse=None,
bn_lag=DEFAULT_BN_LAG):
"""Batch normalization with corresponding log determinant Jacobian."""
if reuse is None:
reuse = not train
# create variables
with tf.variable_scope(name) as scope:
if reuse:
scope.reuse_variables()
var = variable_on_cpu(
"var", [dim], tf.constant_initializer(1.), trainable=False)
mean = variable_on_cpu(
"mean", [dim], tf.constant_initializer(0.), trainable=False)
step = variable_on_cpu("step", [], tf.constant_initializer(0.), trainable=False)
# choose the appropriate moments
if train:
used_mean, used_var = tf.nn.moments(input_, axes, name="batch_norm")
cur_mean, cur_var = used_mean, used_var
if bn_lag > 0.:
used_var = stable_var(input_=input_, mean=used_mean, axes=axes)
cur_var = used_var
used_mean -= (1 - bn_lag) * (used_mean - tf.stop_gradient(mean))
used_mean /= (1. - bn_lag**(step + 1))
used_var -= (1 - bn_lag) * (used_var - tf.stop_gradient(var))
used_var /= (1. - bn_lag**(step + 1))
else:
used_mean, used_var = mean, var
cur_mean, cur_var = used_mean, used_var
| tensorflow.constant_initializer | 13,290 |
import tensorflow as tf
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
| tensorflow.train.Scaffold | 13,291 |
import tensorflow as tf
# 5. 如果希望限制死TensorFlow使用GPU内存的百分比,可以使用config设置per_process_gpu_memory_fraction
config.gpu_options.per_process_gpu_memory_fraction = 0.4
sess_limited = tf.Session(config=config)
# 6. 有时,我们希望代码健壮到可以决定运行多少GPU合适。TensorFlow有内建函数可以探测到。如果我们期望代码在GPU内存合适时利用GPU计算能力,并分配指定操作给GPU,那么该功能是有益的
if tf.test.is_built_with_cuda(): pass
# 7. 我们希望分配指定操作给GPU。下面是一个示例代码,做了一些简单的计算,并将它们分配给主CPU和两个副GPU
with tf.device('/cpu:0'):
a = tf.constant([1.0, 3.0, 5.0], shape=[1,3])
b = tf.constant([2.0, 4.0, 6.0], shape=[3, 1])
with tf.device('/gpu:0'):
c = tf.matmul(a,b)
c = tf.reshape(c, [-1])
with tf.device('/gpu:1'):
d = tf.matmul(b, a)
flat_d = tf.reshape(d, [-1])
combined = tf.multiply(c, flat_d)
print(sess.run(combined))
| tensorflow.device | 13,292 |
import tensorflow as tf
self.assertEqual(schema, expected_schema)
def test_infer_feature_schema_bad_rank(self):
with tf.compat.v1.Graph().as_default() as graph:
tensors = {
'a': tf.compat.v1.placeholder(tf.float32, ()),
| tensorflow.compat.v1.Graph | 13,293 |
from tensorflow.python.ops import math_ops
def _apply_dense(self, grad, var):
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
if var.dtype.base_dtype == tf.float16:
| tensorflow.python.ops.math_ops.cast | 13,294 |
import tensorflow as tf
with tf.control_dependencies([p]):
return tf.reduce_mean(loss)
loss = tf.map_fn(fn=lambda inp: sample_compute(inp), elems=tf.range(resample), dtype=tf.float32,
parallel_iterations=32)
final_loss = tf.reduce_mean(loss)
return final_loss
def contra_traj_lossV6(pred, tgt, horizon=12):
horizon_pred, horizon_tgt = horizon_sumV1(pred, horizon), horizon_sumV1(tgt, horizon)
# horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon)
pred_flat1, pred_flat2 = tf.reshape(horizon_pred, [-1, 1]), tf.reshape(horizon_pred, [1, -1])
tgt_flat1, tgt_flat2 = tf.reshape(horizon_tgt, [-1, 1]), tf.reshape(horizon_tgt, [1, -1])
tgt_dif = tgt_flat1 - tgt_flat2
pred_dif = pred_flat1 - pred_flat2
geq = tf.cast(tgt_dif > 0, tf.bool)
tgt_posi_dif = tf.where(geq, tgt_dif, -tgt_dif)
pred_posi_dif = tf.where(geq, pred_dif, -pred_dif)
loss = tf.maximum(0., tgt_posi_dif - pred_posi_dif)
cstr_pct = tf.math.count_nonzero(loss, dtype=tf.float32) / tf.cast(tf.reduce_prod(tf.shape(loss)), tf.float32)
final_loss = tf.reduce_mean(loss)
return final_loss, cstr_pct
def contra_traj_lossV7(pred, tgt, horizon=12, temp=100):
horizon_pred, horizon_tgt = horizon_sumV1(pred, horizon), horizon_sumV1(tgt, horizon)
| tensorflow.reshape | 13,295 |
import tensorflow as tf
inputs_list[i][2],
inputs_list[i][3]],
Tout=[tf.float32, tf.float32])
gtboxes_and_label_h = tf.reshape(gtboxes_and_label_h, [-1, 5])
gtboxes_and_label_r = tf.reshape(gtboxes_and_label_r, [-1, 6])
img = inputs_list[i][0]
img_shape = inputs_list[i][-2:]
| tensorflow.reshape | 13,296 |
import tensorflow as tf
pred_posi_dif = tf.where(geq, pred_dif, -pred_dif)
loss = tf.maximum(0., tgt_posi_dif - pred_posi_dif)
cstr_pct = tf.math.count_nonzero(loss, dtype=tf.float32) / tf.cast(tf.reduce_prod(tf.shape(loss)), tf.float32)
final_loss = tf.reduce_mean(loss)
return final_loss, cstr_pct
def contra_traj_lossV9(pred, tgt, horizon=12, margin=1):
horizon_pred, horizon_tgt = horizon_sumV1(pred, horizon), horizon_sumV1(tgt, horizon)
# horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon)
pred_flat1, pred_flat2 = tf.reshape(horizon_pred, [-1, 1]), tf.reshape(horizon_pred, [1, -1])
tgt_flat1, tgt_flat2 = tf.reshape(horizon_tgt, [-1, 1]), tf.reshape(horizon_tgt, [1, -1])
tgt_dif = tgt_flat1 - tgt_flat2
pred_dif = pred_flat1 - pred_flat2
geq = tf.cast(tgt_dif > 0, tf.bool)
# tgt_posi_dif = tf.where(geq, tgt_dif, -tgt_dif)
pred_posi_dif = tf.where(geq, pred_dif, -pred_dif)
loss = tf.maximum(0., margin-pred_posi_dif)
cstr_pct = tf.math.count_nonzero(loss, dtype=tf.float32) / tf.cast(tf.reduce_prod(tf.shape(loss)), tf.float32)
final_loss = tf.reduce_mean(loss)
| tensorflow.reshape | 13,297 |
import tensorflow as tf
specified.
Default value: `tf.size(perm)`.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
Raises:
ValueError: if both or neither `perm` and `rightmost_transposed_ndims` are
specified.
NotImplementedError: if `rightmost_transposed_ndims` is not known prior to
graph execution.
"""
with tf.name_scope(name, values=[perm, rightmost_transposed_ndims]):
if (rightmost_transposed_ndims is None) == (perm is None):
raise ValueError('Must specify exactly one of '
'`rightmost_transposed_ndims` and `perm`.')
if rightmost_transposed_ndims is not None:
rightmost_transposed_ndims = tf.convert_to_tensor(
value=rightmost_transposed_ndims,
dtype=np.int32,
name='rightmost_transposed_ndims')
rightmost_transposed_ndims_ = tf.get_static_value(
rightmost_transposed_ndims)
with tf.control_dependencies(_maybe_validate_rightmost_transposed_ndims(
| tensorflow.name_scope | 13,298 |
import tensorflow as tf
net = tf.nn.relu(tf.matmul(s, w1_s) + tf.matmul(a, w1_a) + b1)
with tf.variable_scope('l2'):
net = tf.layers.dense(net, 20, activation=tf.nn.relu, kernel_initializer=init_w,
bias_initializer=init_b, name='l2', trainable=trainable)
with tf.variable_scope('q'):
q = tf.layers.dense(net, 1, kernel_initializer=init_w, bias_initializer=init_b, trainable=trainable) # Q(s,a)
return q
| tensorflow.variable_scope | 13,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.