seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf if len_map is None: return dataset def pad_to_len(x): for key, max_len in len_map.items(): x_shape = tf.shape(x[key]) x_len = x_shape[0] if x_len < max_len: pad_shape = [ max_len - x_len, ] zeros = tf.zeros(pad_shape, dtype=x[key].dtype) x[key] = tf.concat([x[key], zeros], 0) return x return dataset.map(pad_to_len) @gin.configurable(module='trax.data', denylist=['dataset', 'training']) def add_eos_to_output_features(dataset, training, output_features='targets',
tensorflow.zeros
2,400
import tensorflow as tf if __name__ == '__main__': tf.test.main()
tensorflow.test.main
2,401
from tensorflow.python.framework import ops ops.RegisterShape("Abs")(common_shapes.unchanged_shape) ops.RegisterShape("Ceil")(common_shapes.unchanged_shape) ops.RegisterShape("Conj")(common_shapes.unchanged_shape) ops.RegisterShape("Cos")(common_shapes.unchanged_shape) ops.RegisterShape("Exp")(common_shapes.unchanged_shape) ops.RegisterShape("Floor")(common_shapes.unchanged_shape) ops.RegisterShape("Imag")(common_shapes.unchanged_shape) ops.RegisterShape("Inv")(common_shapes.unchanged_shape) ops.RegisterShape("IsFinite")(common_shapes.unchanged_shape) ops.RegisterShape("IsInf")(common_shapes.unchanged_shape) ops.RegisterShape("IsNan")(common_shapes.unchanged_shape) ops.RegisterShape("Log")(common_shapes.unchanged_shape) ops.RegisterShape("LogicalNot")(common_shapes.unchanged_shape) ops.RegisterShape("Neg")(common_shapes.unchanged_shape) ops.RegisterShape("Real")(common_shapes.unchanged_shape) ops.RegisterShape("Rsqrt")(common_shapes.unchanged_shape) ops.RegisterShape("Sign")(common_shapes.unchanged_shape) ops.RegisterShape("Sin")(common_shapes.unchanged_shape) ops.RegisterShape("Sqrt")(common_shapes.unchanged_shape) ops.RegisterShape("Square")(common_shapes.unchanged_shape)
tensorflow.python.framework.ops.RegisterShape
2,402
import tensorflow as tf weights = tf.nn.sigmoid(e) elif encoder.attn_norm_fun == 'max': weights = tf.one_hot(tf.argmax(e, -1), depth=tf.shape(e)[1]) else: e -= tf.reduce_max(e, axis=1, keep_dims=True) T = encoder.attn_temperature or 1.0 exp = tf.exp(e / T) * mask weights = exp / tf.reduce_sum(exp, axis=-1, keep_dims=True) weighted_average = tf.reduce_sum(tf.expand_dims(weights, 2) * hidden_states, axis=1) return weighted_average, weights def no_attention(state, hidden_states, *args, **kwargs): batch_size = tf.shape(state)[0] weighted_average = tf.zeros(shape=tf.stack([batch_size, 0])) weights = tf.zeros(shape=[batch_size, tf.shape(hidden_states)[1]])
tensorflow.expand_dims
2,403
import tensorflow as tf # initializer=tf.keras.initializers.lecun_normal(), dtype=tf.float32) V = tf.get_variable(name="attn_V", shape=[2 * self.config.hidden_size, 1], initializer=tf.contrib.layers.xavier_initializer(), # initializer=tf.truncated_normal_initializer(), # initializer=tf.keras.initializers.lecun_normal(),
tensorflow.contrib.layers.xavier_initializer
2,404
import tensorflow as tf rank = _rank(tensor) assert rank == 3, "Use embedding lookup layer" binary_mask = _apply_dropout_mask(tf.shape(tensor)[:2], keep_prob, normalize=False) binary_mask = tf.expand_dims(binary_mask, axis=-1) # proper broadcasting to zero out entire word vectors out = tensor * binary_mask return out @layer def relu_layer(tensor): out = tf.nn.relu(tensor) return out @layer def tanh_layer(tensor): out = tf.nn.tanh(tensor) return out @layer def softmax_layer(tensor, softmax_func=None, **opts): if softmax_func is None:
tensorflow.nn.relu
2,405
import tensorflow as tf int(s_h/ns0), int(s_h/ns0/ns1), int(s_h/ns0/ns1/ns2), int(s_h/ns0/ns1/ns2/ns3) s_w0, s_w1, s_w2, s_w3 = \ int(s_w/ns0), int(s_w/ns0/ns1), int(s_w/ns0/ns1/ns2), int(s_w/ns0/ns1/ns2/ns3) def decode(z, skip_h3, skip_h2, skip_h1, skip_h0): z_ = lrelu(linear(tf.nn.dropout(z, keep_prob), nf3*s_h3*s_w3, 'd_h0_lin')) h0 = tf.nn.dropout(tf.reshape(z_, [-1, s_h3, s_w3, nf3]), keep_prob) h1 = lrelu(deconv2d(tf.concat([h0, skip_h3], 3), [self.batch_size, s_h2, s_w2, nf2], name='d_h1', d_h=ns3, d_w=ns3)) h2 = lrelu(deconv2d(tf.concat([h1, skip_h2], 3), [self.batch_size, s_h1, s_w1, nf1], name='d_h2', d_h=ns2, d_w=ns2)) h3 = lrelu(deconv2d(tf.concat([h2, skip_h1], 3), [self.batch_size, s_h0, s_w0, nf0], name='d_h3', d_h=ns1, d_w=ns1)) print(h3.get_shape()) h4 = deconv2d(tf.concat([h3, skip_h0], 3), [self.batch_size, s_h, s_w, self.c_dim], name='d_h4', d_h=ns0, d_w=ns0) return h4 with tf.variable_scope("deconv") as scope: output_h4 = decode(trans_z, tgtctx_h3, tgtctx_h2, tgtctx_h1, tgtctx_h0) scope.reuse_variables() truthoutput_h4 = decode(tgtimg_z, tgtctx_h3, tgtctx_h2, tgtctx_h1, tgtctx_h0) self.simloss = tf.reduce_mean((trans_z - tgtimg_z) ** 2) * 1e3 print(tgtimg_z.get_shape()) self.out = output_h4 self.out2 = truthoutput_h4 print(self.out.get_shape()) self.recon1 = tf.nn.l2_loss(tgtimg - self.out)
tensorflow.concat
2,406
import tensorflow as tf def get_fc_var(self, in_size, out_size, name): """ in_size : number of input feature size out_size : number of output feature size name : block_layer name """ initial_value = tf.truncated_normal([in_size, out_size], 0.0, stddev = 1 / math.sqrt(float(in_size))) weights = self.get_var(initial_value, name, 0, name + "_weights") initial_value = tf.truncated_normal([out_size], 0.0, 1.0) biases = self.get_var(initial_value, name, 1, name + "_biases") return weights, biases def get_var(self, initial_value, name, idx, var_name): if self.data_dict is not None and idx in self.data_dict[name]: value = self.data_dict[name][idx]
tensorflow.truncated_normal
2,407
import tensorflow as tf loss = tf.maximum(0., tgt_posi_dif - pred_posi_dif) cstr_pct = tf.math.count_nonzero(loss, dtype=tf.float32) / tf.cast(tf.reduce_prod(tf.shape(loss)), tf.float32) final_loss = tf.reduce_mean(loss)
tensorflow.shape
2,408
import tensorflow as tf if linear_loss is not None: values.append(tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_linear_loss", simple_value=linear_loss)) test_summary = tf.Summary(value=values) summary_writer.add_summary(test_summary, step)
tensorflow.Summary
2,409
import tensorflow as tf """Verbosity level for summary ops. Pass 0 to disable both summaries and checkpoints.""") tf.flags.DEFINE_integer('save_summaries_steps', 0, """How often to save summaries for trained models. Pass 0 to disable summaries.""") tf.flags.DEFINE_integer('save_model_secs', 0, """How often to save trained models. Pass 0 to disable checkpoints""") tf.flags.DEFINE_string('train_dir', None, """Path to session checkpoints.""")
tensorflow.flags.DEFINE_integer
2,410
import tensorflow as tf # We use the moving mean as an estimate of the mean in order to perform # a more numerically stable calculation of the batch mean. # Copy for better stability. shift = tf.add(self._moving_mean, 0) counts, shifted_sum_x, shifted_sum_x2, _ = tf.nn.sufficient_statistics( input_batch, reduction_indices, keep_dims=True, shift=shift, name="batch_norm_ss") mean, variance = tf.nn.normalize_moments(counts, shifted_sum_x, shifted_sum_x2, shift, name="normalize_moments") return mean, variance def build_moving_stats(): return ( tf.identity(self._moving_mean), tf.identity(self._moving_variance),
tensorflow.nn.normalize_moments
2,411
import tensorflow as tf filter_h, filter_w, num_channels_out = filter_dims stride_h, stride_w = stride_dims output_dims = get_deconv2d_output_dims(input_dims, filter_dims, stride_dims, padding) with tf.variable_scope(scope): deconv_weight = tf.Variable( tf.random_normal([filter_h, filter_w, num_channels_out, num_channels_in], stddev=0.1, dtype=tf.float32)) deconv_bias = tf.Variable(tf.zeros([num_channels_out], dtype=tf.float32)) map = tf.nn.conv2d_transpose(input_data, deconv_weight, output_dims, strides=[1, stride_h, stride_w, 1], padding=padding) map = tf.nn.bias_add(map, deconv_bias) activation = non_linear_fn(map) # print(scope, 'out', activation.get_shape().as_list()) return activation def self_attention(x, channels, act_func=tf.nn.relu, scope='attention'): with tf.variable_scope(scope): batch_size, height, width, num_channels = x.get_shape().as_list()
tensorflow.nn.conv2d_transpose
2,412
import tensorflow as tf output = tf.nn.bias_add(tf.matmul(x, w), bias) return output def _bn(self, name, x): with tf.variable_scope(name): moving_average_decay = 0.9 decay = moving_average_decay batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2]) mu = tf.get_variable('mu', batch_mean.shape, dtype=tf.float32, initializer=tf.zeros_initializer(), trainable=False) tf.add_to_collection(tf.GraphKeys.GLOBAL_VARIABLES, mu) tf.add_to_collection('mu_sigma_bn', mu) sigma = tf.get_variable('sigma', batch_var.shape, dtype=tf.float32, initializer=tf.ones_initializer(), trainable=False) tf.add_to_collection(tf.GraphKeys.GLOBAL_VARIABLES, sigma) tf.add_to_collection('mu_sigma_bn', sigma) beta = tf.get_variable('beta', batch_mean.shape, dtype=tf.float32, initializer=tf.zeros_initializer()) gamma = tf.get_variable('gamma', batch_var.shape, dtype=tf.float32, initializer=tf.ones_initializer()) # BN when training update = 1.0 - decay
tensorflow.add_to_collection
2,413
import tensorflow as tf num_filters, filter_size, stride = dim with tf.variable_scope(scope):
tensorflow.variable_scope
2,414
import tensorflow as tf def cw_1d(X, y=None): def N0(mean, variance): return 1.0/(tf.sqrt(2.0 * m.pi * variance)) * tf.exp((-(mean**2))/(2*variance)) N = tf.cast(tf.shape(X)[0], tf.float32) if y is None:
tensorflow.sqrt
2,415
import tensorflow as tf def create_architecture(self, sess, mode, num_classes, tag=None, anchor_scales=(8, 16, 32), anchor_ratios=(0.5, 1, 2)): self._image = tf.placeholder(tf.float32, shape=[self._batch_size, None, None, 3]) self._im_info = tf.placeholder(tf.float32, shape=[self._batch_size, 3]) #缩放之后的图片尺寸和缩放的比例 self._gt_boxes = tf.placeholder(tf.float32, shape=[None, 5]) #gt_boxes缩放之后的坐标以及所属类别的标号
tensorflow.placeholder
2,416
import tensorflow as tf if self.maxnorm is not None: # Ensure maxnorm constraints are initially satisfied head_init = dense_maxnorm(head_init, self.maxnorm) rel_init = dense_maxnorm(rel_init, self.maxnorm) tail_init = dense_maxnorm(tail_init, self.maxnorm) self.head_embedding_vars = tf.Variable(head_init) self.rel_embedding_vars = tf.Variable(rel_init) self.tail_embedding_vars = tf.Variable(tail_init) # Embedding layer for each (head, rel, tail) triple being fed in as input head_embed = tf.nn.embedding_lookup(self.head_embedding_vars, self.head_input) rel_embed = tf.nn.embedding_lookup(self.rel_embedding_vars, self.rel_input) tail_embed = tf.nn.embedding_lookup(self.tail_embedding_vars, self.tail_input) # Model output raw_output = tf.reduce_sum(tf.mul(tf.mul(head_embed, rel_embed), tail_embed), 1) self.output, self.loss = self._create_output_and_loss(raw_output) # Optimization self.train_step = self.opt.minimize(self.loss) if self.maxnorm is not None: # Post-processing to limit embedding vars to L2 ball head_constraint = self._norm_constraint_op(self.head_embedding_vars, tf.unique(self.head_input)[0], self.maxnorm) rel_constraint = self._norm_constraint_op(self.rel_embedding_vars, tf.unique(self.rel_input)[0], self.maxnorm) tail_constraint = self._norm_constraint_op(self.tail_embedding_vars,
tensorflow.mul
2,417
import tensorflow as tf all_input_mask, shape=[num_examples, seq_length], dtype=tf.int32), "segment_ids": tf.constant( all_segment_ids, shape=[num_examples, seq_length], dtype=tf.int32),
tensorflow.constant
2,418
import tensorflow as tf # define filter mask: class_true = 1 (pos), 0 (neg), -1 (ignore) # landm_valid = 1 (w landm), 0 (w/o landm) mask_pos = tf.equal(class_true, 1) mask_neg = tf.equal(class_true, 0) mask_landm = tf.logical_and(tf.equal(landm_valid, 1), mask_pos) # landm loss (smooth L1) mask_landm_b = tf.broadcast_to(mask_landm, tf.shape(landm_true)) loss_landm = _smooth_l1_loss(tf.boolean_mask(landm_true, mask_landm_b), tf.boolean_mask(landm_pred, mask_landm_b)) loss_landm = tf.reduce_mean(loss_landm) # localization loss (smooth L1) mask_pos_b = tf.broadcast_to(mask_pos, tf.shape(loc_true)) loss_loc = _smooth_l1_loss(tf.boolean_mask(loc_true, mask_pos_b), tf.boolean_mask(loc_pred, mask_pos_b)) loss_loc = tf.reduce_mean(loss_loc) # classification loss (crossentropy) # 1. compute max conf across batch for hard negative mining loss_class = tf.where(mask_neg, 1 - class_pred[:, 0][..., tf.newaxis], 0) # 2. hard negative mining loss_class = tf.reshape(loss_class, [num_batch, num_prior]) loss_class_idx = tf.argsort(loss_class, axis=1, direction='DESCENDING') loss_class_idx_rank = tf.argsort(loss_class_idx, axis=1)
tensorflow.shape
2,419
import tensorflow as tf tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold()
tensorflow.train.Scaffold
2,420
import tensorflow as tf assert var.name == var_name_n_prune_ratio[0], \ 'unmatched variable names: %s vs. %s' % (var.name, var_name_n_prune_ratio[0]) prune_ratio = self.__calc_prune_ratio_dyn(var_name_n_prune_ratio[1]) # create a mask and non-masked backup for each variable name = var.name.replace(':0', '_mask') mask = tf.get_variable(name, initializer=tf.ones(var.shape), trainable=False) name = var.name.replace(':0', '_var_bkup') var_bkup = tf.get_variable(name, initializer=var.initialized_value(), trainable=False) # create update operations var_bkup_update_op = var_bkup.assign(tf.where(mask > 0.5, var, var_bkup)) with tf.control_dependencies([var_bkup_update_op]): mask_thres = tf.contrib.distributions.percentile(tf.abs(var_bkup), prune_ratio * 100) mask_update_op = mask.assign(tf.cast(tf.abs(var_bkup) > mask_thres, tf.float32)) with tf.control_dependencies([mask_update_op]): prune_op = var.assign(var_bkup * mask) # record pruning masks & operations masks += [mask] prune_ops += [prune_op] return masks, tf.group(prune_ops) def __calc_prune_ratio_dyn(self, prune_ratio_fnl): """Calculate the dynamic pruning ratio. Args: * prune_ratio_fnl: final pruning ratio Returns: * prune_ratio_dyn: dynamic pruning ratio
tensorflow.control_dependencies
2,421
import tensorflow as tf tf.constant(0, tf.int32, shape=[2]) for _ in range(4)] with tf.variable_scope("other"): outputs_dict3, _ = tf.nn.seq2seq.one2many_rnn_seq2seq( enc_inp, dec_inp_dict2, cell, 2, dec_symbols_dict, embedding_size=2, feed_previous=tf.constant(True)) sess.run([tf.global_variables_initializer()]) tf.get_variable_scope().reuse_variables() outputs_dict1, _ = tf.nn.seq2seq.one2many_rnn_seq2seq( enc_inp, dec_inp_dict, cell, 2, dec_symbols_dict, embedding_size=2, feed_previous=True) outputs_dict2, _ = tf.nn.seq2seq.one2many_rnn_seq2seq( enc_inp, dec_inp_dict2, cell, 2, dec_symbols_dict, embedding_size=2, feed_previous=True) res1 = sess.run(outputs_dict1["0"]) res2 = sess.run(outputs_dict2["0"]) res3 = sess.run(outputs_dict3["0"]) self.assertAllClose(res1, res2) self.assertAllClose(res1, res3) def testSequenceLoss(self):
tensorflow.nn.seq2seq.one2many_rnn_seq2seq
2,422
from tensorflow.python.eager import test if __name__ == "__main__": test.main()
tensorflow.python.eager.test.main
2,423
import tensorflow as tf offset_y, offset_x, _ = tf.unstack(bbox_begin) target_height, target_width, _ = tf.unstack(bbox_size) crop_window = tf.stack([offset_y, offset_x, target_height, target_width]) image = tf.image.decode_and_crop_jpeg( byte, crop_window, channels=3, **JPEG_OPT) image = uint8_resize_bicubic(image, [224, 224]) return image def bad(): image = tf.image.decode_jpeg( tf.reshape(byte, shape=[]), 3, **JPEG_OPT) image = resize_shortest_edge(image, jpeg_shape, 224) image = center_crop(image, 224) return image image = tf.cond(is_bad, bad, good) # TODO other imgproc image = lighting(image, 0.1, eigval=np.array([0.2175, 0.0188, 0.0045], dtype='float32') * 255.0, eigvec=np.array([[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140],
tensorflow.reshape
2,424
import tensorflow as tf y1 = y0 + 1 z0 = tf.to_int32(tf.floor(z)) z1 = z0 + 1 x0_clip = tf.clip_by_value(x0, zero, max_x) x1_clip = tf.clip_by_value(x1, zero, max_x) y0_clip = tf.clip_by_value(y0, zero, max_y) y1_clip = tf.clip_by_value(y1, zero, max_y)
tensorflow.clip_by_value
2,425
import tensorflow as tf X = tf.reshape(X, [-1, n_ctx, 2]) M = tf.reshape(M, [-1, n_ctx]) h = embed(X, we) #h=[-1,n_ctx,emb] for layer in range(n_layer): h = block(h, 'h%d'%layer, train=train, scale=True) #h=[-1,n_ctx,emb] lm_h [-1,emb] lm_h = tf.reshape(h[:, :-1], [-1, n_embd]) lm_logits = tf.matmul(lm_h, we, transpose_b=True) lm_losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=lm_logits, labels=tf.reshape(X[:, 1:, 0], [-1])) lm_losses = tf.reshape(lm_losses, [shape_list(X)[0], shape_list(X)[1]-1]) lm_losses = tf.reduce_sum(lm_losses*M[:, 1:], 1)/tf.reduce_sum(M[:, 1:], 1) clf_h = tf.reshape(h, [-1, n_embd]) pool_idx = tf.cast(tf.argmax(tf.cast(tf.equal(X[:, :, 0], clf_token), tf.float32), 1), tf.int32) clf_h = tf.gather(clf_h, tf.range(shape_list(X)[0], dtype=tf.int32)*n_ctx+pool_idx) clf_h = tf.reshape(clf_h, [-1, 2, n_embd]) if train and clf_pdrop > 0: shape = shape_list(clf_h) shape[1] = 1 clf_h = tf.nn.dropout(clf_h, 1-clf_pdrop, shape) clf_h = tf.reshape(clf_h, [-1, n_embd]) clf_logits = clf(clf_h, 1, train=train)
tensorflow.reduce_sum
2,426
import tensorflow as tf internals[name] = tf.gather(params=self.internals_memory[name], indices=indices) actions = dict() for name in sorted(self.actions_memory): actions[name] = tf.gather(params=self.actions_memory[name], indices=indices) terminal = tf.gather(params=self.terminal_memory, indices=indices) reward = tf.gather(params=self.reward_memory, indices=indices)
tensorflow.gather
2,427
import tensorflow as tf def _fake_image_resizer_fn(image, mask): return (image, mask, tf.shape(image)) class DataTransformationFnTest(test_case.TestCase): def test_combine_additional_channels_if_present(self): image = np.random.rand(4, 4, 3).astype(np.float32) additional_channels = np.random.rand(4, 4, 2).astype(np.float32) tensor_dict = { fields.InputDataFields.image: tf.constant(image), fields.InputDataFields.image_additional_channels: tf.constant(additional_channels), fields.InputDataFields.groundtruth_classes: tf.constant(np.array([1, 1], np.int32)) } input_transformation_fn = functools.partial( inputs.transform_input_data, model_preprocess_fn=_fake_model_preprocessor_fn, image_resizer_fn=_fake_image_resizer_fn,
tensorflow.constant
2,428
from tensorflow.python.client import session def test_hashed_output_v1_has_collision(self): """Tests the old version of the fingerprint concatenation has collisions. """ # The last 10 bits of 359 and 1024+359 are identical. # As a result, all the crosses collide. t1 = constant_op.constant([[359], [359 + 1024]]) t2 = constant_op.constant([list(range(10)), list(range(10))]) cross = sparse_feature_cross_op.sparse_feature_cross( [t2, t1], hashed_output=True, num_buckets=1024) cross_dense = sparse_ops.sparse_tensor_to_dense(cross) with session.Session(): values = cross_dense.eval() self.assertTrue(numpy.equal(values[0], values[1]).all()) def test_hashed_output_v2_has_no_collision(self): """Tests the new version of the fingerprint concatenation has no collisions. """ # Although the last 10 bits of 359 and 1024+359 are identical. # As a result, all the crosses shouldn't collide. t1 = constant_op.constant([[359], [359 + 1024]]) t2 = constant_op.constant([list(range(10)), list(range(10))])
tensorflow.python.client.session.Session
2,429
import tensorflow as tf with tf.variable_scope(scope): if output_length == 1: pool = tf.nn.avg_pool(input_data, [1, height, width, 1], strides=[1, 1, 1, 1], padding=padding) pool = tf.reduce_mean(pool, axis=[1, 2]) pool = tf.squeeze(pool, axis=[1, 2]) return pool
tensorflow.reduce_mean
2,430
import tensorflow as tf use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size, ) else: estimator = tf.estimator.Estimator( model_fn=model_fn, config=run_config, params={ "batch_size": FLAGS.train_batch_size if FLAGS.do_train else FLAGS.eval_batch_size,
tensorflow.estimator.Estimator
2,431
import tensorflow as tf raise ValueError("Height not divisible by 2.") if width % 2 != 0: raise ValueError("Width not divisible by 2.") weights = numpy.zeros((2, 2, channels, 4 * channels)) for idx_ch in xrange(channels): slice_2 = slice(idx_ch, (idx_ch + 1)) slice_3 = slice((idx_ch * 4), ((idx_ch + 1) * 4)) weights[:, :, slice_2, slice_3] = SQUEEZE_MATRIX shuffle_channels = [idx_ch * 4 for idx_ch in xrange(channels)] shuffle_channels += [idx_ch * 4 + 1 for idx_ch in xrange(channels)] shuffle_channels += [idx_ch * 4 + 2 for idx_ch in xrange(channels)] shuffle_channels += [idx_ch * 4 + 3 for idx_ch in xrange(channels)] shuffle_channels = numpy.array(shuffle_channels) weights = weights[:, :, :, shuffle_channels].astype("float32") if reverse: res = tf.nn.conv2d_transpose( value=input_, filter=weights, output_shape=[batch_size, height * 2, width * 2, channels], strides=[1, 2, 2, 1], padding="SAME", name="unsqueeze_2x2") else: res = tf.nn.conv2d( input=input_, filter=weights, strides=[1, 2, 2, 1], padding="SAME", name="squeeze_2x2")
tensorflow.nn.conv2d_transpose
2,432
import tensorflow as tf rf=3, stride=1, init_scale=np.sqrt(2))) nh = np.prod([v.value for v in c3.get_shape()[1:]]) h3 = tf.reshape(c3, [-1, nh]) pre_s = tf.nn.relu(self.fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2))) # Critic # 定義變數 # self.tfs = tf.placeholder(tf.float32, [None, image_features], 'state') self.tfdc_r = tf.placeholder(tf.float32, [None, 1], 'discounted_r') # 建立網路層 l1 = tf.layers.dense( inputs=pre_s, units=100, # number of hidden units activation=tf.nn.relu, name='l1' ) self.v = tf.layers.dense( inputs=l1, units=1, # output units activation=None, name='V'
tensorflow.layers.dense
2,433
import tensorflow as tf { 'pred_label':pred_label, "max_prob":max_prob } ) } ) return estimator_spec elif mode == tf.estimator.ModeKeys.EVAL: def metric_fn(per_example_loss, logits, label_ids): """Computes the loss and accuracy of the model.""" sentence_log_probs = tf.reshape( logits, [-1, logits.shape[-1]]) sentence_predictions = tf.argmax( logits, axis=-1, output_type=tf.int32) sentence_labels = tf.reshape(label_ids, [-1]) sentence_accuracy = tf.metrics.accuracy( labels=label_ids, predictions=sentence_predictions) sentence_mean_loss = tf.metrics.mean( values=per_example_loss) sentence_f = tf_metrics.f1(label_ids, sentence_predictions, num_labels, label_lst, average="macro")
tensorflow.reshape
2,434
import tensorflow as tf flattened_filters = policy_utils.flatten(conv2) self.z = tf.layers.dense(inputs=flattened_filters, units=256, activation=tf.nn.elu) def build_manager(self): with tf.variable_scope('manager'): # Calculate manager internal state self.s = tf.layers.dense(inputs=self.z, units=self.g_dim, activation=tf.nn.elu) # Calculate manager output g x = tf.expand_dims(self.s, [0]) self.manager_lstm = SingleStepLSTM(x, self.g_dim,
tensorflow.layers.dense
2,435
import tensorflow as tf kl_sum = [] for n in range(Datum.N): kl_sum.append(gauss_kl(mu[:, n][:,None], # M x 1 sqrt_diag[:, n][:, None] if diag else sqrt[n, :, :][None, :, :], # 1 x M x M or M x 1 K if shared_k else K_batch[n, :, :][None,:,:])) # 1 x M x M or M x M kl_sum =tf.reduce_sum(kl_sum) assert_almost_equal(kl_sum.eval(), kl_batch.eval()) def tf_kl_1d(q_mu, q_sigma, p_var=1.0): p_var = tf.ones_like(q_sigma) if p_var is None else p_var q_var = tf.square(q_sigma) kl = 0.5 * (q_var / p_var + tf.square(q_mu) / p_var - 1 + tf.log(p_var / q_var)) return tf.reduce_sum(kl) @pytest.mark.parametrize('white', [True, False]) def test_oned(session_tf, white, mu, sqrt, K_batch): """ Check that the KL divergence matches a 1D by-hand calculation. """ m = 0
tensorflow.square
2,436
import tensorflow as tf indices = tf.random.shuffle(indices) indices = tf.reshape(indices, [-1])
tensorflow.reshape
2,437
import tensorflow as tf # scale preds so that the class probas of each sample sum to 1 output /= tf.reduce_sum( output, axis=len(output.get_shape()) - 1, keep_dims=True) # manual computation of crossentropy epsilon = _to_tensor(_EPSILON, output.dtype.base_dtype) output = tf.clip_by_value(output, epsilon, 1. - epsilon) return -tf.reduce_sum( target * tf.log(output), axis=len(output.get_shape()) - 1) else: try: return tf.nn.softmax_cross_entropy_with_logits( labels=target, logits=output) except TypeError: return tf.nn.softmax_cross_entropy_with_logits( logits=output, labels=target) def sparse_categorical_crossentropy(output, target, from_logits=False): """Categorical crossentropy between an output tensor and a target tensor, where the target is an integer tensor. """ # Note: tf.nn.softmax_cross_entropy_with_logits # expects logits, Keras expects probabilities. if not from_logits: epsilon = _to_tensor(_EPSILON, output.dtype.base_dtype) output = tf.clip_by_value(output, epsilon, 1 - epsilon)
tensorflow.nn.softmax_cross_entropy_with_logits
2,438
import tensorflow as tf "input_mask": tf.FixedLenFeature([seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), "label_ids": tf.FixedLenFeature([], tf.int64), "is_real_example": tf.FixedLenFeature([], tf.int64), }
tensorflow.FixedLenFeature
2,439
import tensorflow as tf def test(model, eval_data): """Computes the average loss on eval_data, which should be a Dataset.""" avg_loss = tfe.metrics.Mean("loss") for (labels, chars, sequence_length) in tfe.Iterator(eval_data): predictions = model((chars, sequence_length), training=False) avg_loss(loss(labels, predictions)) print("eval/loss: %.6f\n" % avg_loss.result()) with tf.contrib.summary.always_record_summaries(): tf.contrib.summary.scalar("loss", avg_loss.result()) def train_one_epoch(model, optimizer, train_data, log_interval=10): """Trains model on train_data using optimizer.""" tf.train.get_or_create_global_step()
tensorflow.contrib.summary.always_record_summaries
2,440
import tensorflow as tf logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) label_ids = tf.reshape(label_ids, [-1]) label_weights = tf.reshape(label_weights, [-1]) one_hot_labels = tf.one_hot( label_ids, depth=bert_config.vocab_size, dtype=tf.float32 ) # The `positions` tensor might be zero-padded (if the sequence is too # short to have the maximum number of predictions). The `label_weights` # tensor has a value of 1.0 for every real prediction and 0.0 for the # padding predictions. per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1]) numerator = tf.reduce_sum(label_weights * per_example_loss) denominator = tf.reduce_sum(label_weights) + 1e-5 loss = numerator / denominator return (loss, per_example_loss, log_probs) def get_next_sentence_output(bert_config, input_tensor, labels): """Get loss and log probs for the next sentence prediction.""" # Simple binary classification. Note that 0 is "next sentence" and 1 is # "random sentence". This weight matrix is not used after pre-training. with tf.variable_scope("cls/seq_relationship"): output_weights = tf.get_variable( "output_weights",
tensorflow.reduce_sum
2,441
from tensorflow.python.platform import gfile tf.initialize_all_variables().run() self.assertEqual([], save.last_checkpoints) s1 = save.save(sess, os.path.join(save_dir, "s1")) self.assertEqual([s1], save.last_checkpoints) self.assertEqual(2, len(gfile.Glob(s1))) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1))) s2 = save.save(sess, os.path.join(save_dir, "s2")) self.assertEqual([s1, s2], save.last_checkpoints) self.assertEqual(2, len(gfile.Glob(s1))) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1))) self.assertEqual(2, len(gfile.Glob(s2))) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2))) s3 = save.save(sess, os.path.join(save_dir, "s3")) self.assertEqual([s2, s3], save.last_checkpoints) self.assertEqual(0, len(gfile.Glob(s1))) self.assertFalse(gfile.Exists(save._MetaGraphFilename(s1))) self.assertEqual(2, len(gfile.Glob(s2))) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2))) self.assertEqual(2, len(gfile.Glob(s3))) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s3)))
tensorflow.python.platform.gfile.Glob
2,442
import tensorflow as tf locs, scales = tf.map_fn(loop_hyper_deocder, zs, dtype=(tf.float32, tf.float32), parallel_iterations=1, back_prop=False) lower_bound = 1e-9# TODO scales = tf.maximum(scales, lower_bound) print("Hyper Decoder") ys = conditional_entropy_model.decompress(y_strings, locs, scales, y_min_v, y_max_v, y_shape) print("Entropy Decoder") def loop_synthesis(element): y = tf.expand_dims(element[0], 0) x_coori = tf.expand_dims(element[1], 0) x_coori= tf.cast(x_coori,tf.float32) x = synthesis_transform(x_coori,y) return tf.squeeze(x, [0]) element=[ys,x_coori] xs = tf.map_fn(loop_synthesis, element, dtype=tf.float32, parallel_iterations=1, back_prop=False) print("Synthesis Transform") return xs ###################################### write & read binary files. ######################################
tensorflow.expand_dims
2,443
import tensorflow as tf def testSPINN(self): with tf.device(self._test_device): embedding_dims = 10 d_tracker = 8 sequence_length = 15 num_transitions = 27 config_tuple = collections.namedtuple( "Config", ["d_hidden", "d_proj", "d_tracker", "predict"]) config = config_tuple( embedding_dims, embedding_dims * 2, d_tracker, False) s = spinn.SPINN(config) # Create some fake data. buffers = tf.random_normal((sequence_length, 1, config.d_proj)) transitions = tf.constant( [[3], [3], [2], [3], [3], [3], [2], [2], [2], [3], [3], [3], [2], [3], [3], [2], [2], [3], [3], [3], [2], [2], [2], [2], [3], [2], [2]], dtype=tf.int64) self.assertEqual(tf.int64, transitions.dtype) self.assertEqual((num_transitions, 1), transitions.shape) out = s(buffers, transitions, training=True) self.assertEqual(tf.float32, out.dtype) self.assertEqual((1, embedding_dims), out.shape) def testSNLIClassifierAndTrainer(self): with tf.device(self._test_device):
tensorflow.random_normal
2,444
import tensorflow as tf device='/device:CPU:0'): with slim.arg_scope([slim.conv2d, slim.conv2d_in_plane, slim.conv2d_transpose, slim.separable_conv2d, slim.fully_connected], weights_regularizer=weights_regularizer, biases_regularizer=biases_regularizer, biases_initializer=tf.constant_initializer(0.0)): gtboxes_and_label_h, gtboxes_and_label_r = tf.py_func(self.get_gtboxes_and_label, inp=[inputs_list[i][1], inputs_list[i][2], inputs_list[i][3]],
tensorflow.constant_initializer
2,445
import tensorflow as tf tf_example = tf.train.Example(features=tf.train.Features(feature=features)) writer.write(tf_example.SerializeToString()) def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" name_to_features = { "input_ids": tf.FixedLenFeature([seq_length], tf.int64), "input_mask": tf.FixedLenFeature([seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), "label_ids": tf.FixedLenFeature([], tf.int64), } def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == tf.int64:
tensorflow.FixedLenFeature
2,446
from tensorflow.python.framework import ops default_optimizer="Ftrl", default_learning_rate=1. / math.sqrt(len( self._get_linear_feature_columns()))) return [ self._linear_optimizer.apply_gradients(zip(linear_grads, linear_vars)) ] return [] def _get_dnn_vars(self): if self._get_dnn_feature_columns(): return ops.get_collection(self._dnn_weight_collection) return [] def _get_dnn_training_ops(self, dnn_grads, dnn_vars): if self._get_dnn_feature_columns(): self._dnn_optimizer = self._get_optimizer(self._dnn_optimizer, default_optimizer="Adagrad", default_learning_rate=0.05) return [self._dnn_optimizer.apply_gradients(zip(dnn_grads, dnn_vars))] return []
tensorflow.python.framework.ops.get_collection
2,447
from tensorflow.contrib.framework import tensor_util either `metrics_collections` or `updates_collections` are not a list or tuple. """ predictions, labels = tensor_util.remove_squeezable_dimensions( predictions, labels) predictions.get_shape().assert_is_compatible_with(labels.get_shape())
tensorflow.contrib.framework.tensor_util.remove_squeezable_dimensions
2,448
from tensorflow.python.framework import tensor_util self.assertEqual(base_params, copy_params) def testIsScalar(self): with self.test_session(): mu = 1. sigma = 2. normal = dists.Normal(mu, sigma, validate_args=True) self.assertTrue(tensor_util.constant_value(normal.is_scalar_event)) self.assertTrue(tensor_util.constant_value(normal.is_scalar_batch)) normal = dists.Normal([mu], [sigma], validate_args=True) self.assertTrue(tensor_util.constant_value(normal.is_scalar_event)) self.assertFalse(tensor_util.constant_value(normal.is_scalar_batch)) mvn = dists.MultivariateNormalDiag([mu], [sigma], validate_args=True) self.assertFalse(tensor_util.constant_value(mvn.is_scalar_event)) self.assertTrue(tensor_util.constant_value(mvn.is_scalar_batch)) mvn = dists.MultivariateNormalDiag([[mu]], [[sigma]], validate_args=True) self.assertFalse(tensor_util.constant_value(mvn.is_scalar_event)) self.assertFalse(tensor_util.constant_value(mvn.is_scalar_batch)) # We now test every codepath within the underlying is_scalar_helper
tensorflow.python.framework.tensor_util.constant_value
2,449
import tensorflow as tf weighted_average = dense(weighted_average, encoder.attn_size) elif pos is not None: weights = tf.to_float(tf.one_hot(tf.to_int32(tf.squeeze(pos, axis=1)), depth=attn_length)) weighted_average = tf.reduce_sum(tf.expand_dims(weights, axis=2) * hidden_states, axis=1)
tensorflow.squeeze
2,450
import tensorflow as tf if other_inputs is not None and chaining_stop_gradient: other_inputs = tf.stop_gradient(other_inputs) parameters = dict(encoders=encoders[:1], decoder=decoder, encoder_inputs=encoder_inputs[:1], other_inputs=other_inputs, training=training) attention_states, encoder_state, encoder_input_length[:1] = multi_encoder( encoder_input_length=encoder_input_length[:1], **parameters) if chaining_stop_gradient: attns = tf.stop_gradient(attns) states = tf.stop_gradient(states) decoder_outputs = tf.stop_gradient(decoder_outputs) if chaining_strategy == 'concat_attns': attention_states[0] = tf.concat([attention_states[0], attns], axis=2) elif chaining_strategy == 'concat_states': attention_states[0] = tf.concat([attention_states[0], states], axis=2) elif chaining_strategy == 'sum_attns': attention_states[0] += attns elif chaining_strategy in ('map_attns', 'map_states', 'map_outputs'):
tensorflow.stop_gradient
2,451
import tensorflow as tf assert tf.get_variable_scope().reuse is False d = tf.contrib.layers.conv2d(layer_input,filters,kernel_size=f_size,stride=2, padding='SAME') if norm: d = tf.contrib.layers.batch_norm(d) d = lrelu(d,alpha=0.2) return d
tensorflow.contrib.layers.batch_norm
2,452
import tensorflow as tf self.assertEqual(logits_.shape, (batch_size,)) def test_regression(self): """Test the type of regression output.""" batch_size = 8 hparams = { "pretrained_model_name": None, "regr_strategy": "cls_time" } inputs = tf.placeholder(tf.int32, shape=[batch_size, 6]) regressor = XLNetRegressor(hparams=hparams) logits = regressor(inputs) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) logits_ = sess.run( logits, feed_dict={inputs: np.random.randint(30521, size=(batch_size, 6))}) self.assertEqual(logits_.dtype, np.float32) if __name__ == "__main__": tf.test.main()
tensorflow.global_variables_initializer
2,453
import tensorflow as tf logits = tf.log([1.0 - self._relabel_prob, self._relabel_prob]) mask = tf.squeeze( tf.random.categorical( logits[None], num_samples=self._sample_batch_size))
tensorflow.random.categorical
2,454
import tensorflow as tf res = (input_ - used_mean) / tf.sqrt(used_var + epsilon) # de-normalize if scale: res *= gamma res += beta # update variables if train: with tf.name_scope(name, "AssignMovingAvg", [mean, cur_mean, decay]): with ops.colocate_with(mean): new_mean = tf.assign_sub( mean, tf.check_numerics(decay * (mean - cur_mean), "NaN in moving mean.")) with tf.name_scope(name, "AssignMovingAvg", [var, cur_var, decay]): with ops.colocate_with(var): new_var = tf.assign_sub(
tensorflow.name_scope
2,455
from tensorflow.python.training import saver as saver_lib # Currently we are not syncronized with saving checkpoints, which leads to # runtime errors when we are calling export on the same global step. # Exports depend on saved checkpoints for constructing the graph and # getting the global step from the graph instance saved in the checkpoint. # If the checkpoint is stale with respect to current step, the global step # is taken to be the last saved checkpoint's global step and exporter # doesn't export the same checkpoint again with the following error. logging.info("Skipping exporting because the existing checkpoint has " "already been exported. " "Consider exporting less frequently.") def end(self, session=None): super(ExportMonitor, self).end(session=session) latest_path = saver_lib.latest_checkpoint(self._estimator.model_dir) if latest_path is None: logging.info("Skipping export at the end since model has not been saved " "yet.") return try: self._last_export_dir = self._estimator.export( self.export_dir, exports_to_keep=self.exports_to_keep, signature_fn=self.signature_fn, input_fn=self._input_fn, default_batch_size=self._default_batch_size, input_feature_key=self._input_feature_key,
tensorflow.python.training.saver.latest_checkpoint
2,456
import tensorflow as tf initializer=tf.truncated_normal_initializer(stddev=0.1)) self.nnweights.append(weights) biases = tf.get_variable('biases', [hidden_layers_node[i]], initializer=tf.constant_initializer(0.0)) layer_out = tf.nn.dropout(tf.matmul(prev_x, weights) + biases, dropout_keep_prob) if activation == 'relu': layer_out = tf.nn.relu(layer_out) elif activation == 'sigmoid': layer_out = tf.nn.sigmoid(layer_out) elif activation == 'tanh': layer_out = tf.nn.tanh(layer_out) else: raise NotImplementedError('activation not recognized') prev_node = hidden_layers_node[i]
tensorflow.nn.relu
2,457
from tensorflow.python.util import compat for k in kwargs_attr: self._definition.attr[k].CopyFrom(kwargs_attr[k]) # Hash the definition and its dependencies. hasher = hashlib.sha1() def _hash_func_def(): """Hash the function definition agnostic to node/map ordering.""" def update_num(n): hasher.update(compat.as_bytes("%x" % n)) def update_str(s): update_num(len(s)) hasher.update(compat.as_bytes(s)) def update_strs(slist): update_num(len(slist)) for s in slist: update_str(s) for adef in self._definition.signature.input_arg: update_str(adef.SerializeToString()) for adef in self._definition.signature.output_arg: update_str(adef.SerializeToString()) for n in sorted(self._definition.node_def, key=lambda n: n.name):
tensorflow.python.util.compat.as_bytes
2,458
import tensorflow as tf new_shape = [1, 1, 1, channnel] if ndims == 2: new_shape = [1, channnel] if use_bias: beta = tf.get_variable('beta', [channnel], initializer=tf.constant_initializer()) beta = tf.reshape(beta, new_shape) else: beta = tf.zeros([1] * ndims, name='beta') if use_scale: gamma = tf.get_variable('gamma', [channnel], initializer=tf.constant_initializer(1.0)) gamma = tf.reshape(gamma, new_shape) else: gamma = tf.ones([1] * ndims, name='gamma') return tf.nn.batch_normalization(inputdata, mean, var, beta, gamma, epsilon, name=name) @staticmethod def instancenorm(inputdata, epsilon=1e-5, data_format='NHWC', use_affine=True, name=None): """ :param name: :param inputdata: :param epsilon: :param data_format: :param use_affine:
tensorflow.ones
2,459
import tensorflow as tf tf.range(ids_ta.size(), ids_ta.size() + tf.size(encoded_ids)), encoded_ids)
tensorflow.size
2,460
import tensorflow as tf 1) w_z0_y0_x1 = tf.expand_dims(((x - x0_f) * (y1_f - y) * (z1_f - z) * x0_valid * y1_valid * z1_valid), 1) w_z0_y1_x0 = tf.expand_dims(((x1_f - x) * (y - y0_f) * (z1_f - z) * x1_valid * y0_valid * z1_valid), 1) w_z0_y1_x1 = tf.expand_dims(((x - x0_f) * (y - y0_f) * (z1_f - z) * x0_valid * y0_valid * z1_valid), 1) w_z1_y0_x0 = tf.expand_dims(((x1_f - x) * (y1_f - y) * (z - z0_f) * x1_valid * y1_valid * z0_valid), 1) w_z1_y0_x1 = tf.expand_dims(((x - x0_f) * (y1_f - y) * (z - z0_f) * x0_valid * y1_valid * z0_valid), 1) w_z1_y1_x0 = tf.expand_dims(((x1_f - x) * (y - y0_f) * (z - z0_f) * x1_valid * y0_valid * z0_valid), 1) w_z1_y1_x1 = tf.expand_dims(((x - x0_f) * (y - y0_f) * (z - z0_f) * x0_valid * y0_valid * z0_valid), 1) output = tf.add_n([ w_z0_y0_x0 * i_z0_y0_x0, w_z0_y0_x1 * i_z0_y0_x1, w_z0_y1_x0 * i_z0_y1_x0, w_z0_y1_x1 * i_z0_y1_x1,
tensorflow.expand_dims
2,461
import tensorflow as tf return model_fn def get_masked_lm_output(bert_config, input_tensor, output_weights, positions, label_ids, label_weights, clip): """Get loss and log probs for the masked LM.""" input_tensor = gather_indexes(input_tensor, positions) with tf.variable_scope("cls/predictions"): # We apply one more non-linear transformation before the output layer. # This matrix is not used after pre-training. with tf.variable_scope("transform"): input_tensor = tf.layers.dense( input_tensor, units=bert_config.hidden_size, activation=modeling.get_activation(bert_config.hidden_act),
tensorflow.variable_scope
2,462
import tensorflow as tf self.assertEqual((2, 4), res[0].shape) res = sess.run([mem]) self.assertEqual((2, 2), res[0].shape) def testDynamicAttentionDecoder2(self): with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)): cell = tf.nn.rnn_cell.GRUCell(2) inp = tf.constant(0.5, shape=[2, 2, 2]) enc_outputs, enc_state = tf.nn.dynamic_rnn(cell, inp, dtype=tf.float32) attn_states = enc_outputs dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3 dec, mem = tf.nn.seq2seq.attention_decoder( dec_inp, enc_state, attn_states, cell, output_size=4, num_heads=2) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 4), res[0].shape) res = sess.run([mem]) self.assertEqual((2, 2), res[0].shape)
tensorflow.constant
2,463
import tensorflow as tf # Split the series because the rnn cell needs time_steps features, each of shape: hidden = tf.split(0, config.n_steps/4, hidden) # (0, 128, [128*batch_size, 32]) # New hidden's shape: a list of length "time_step" containing tensors of shape [batch_size, n_hidden] # Define LSTM cell of first hidden layer: lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(config.n_hidden, forget_bias=1.0) # Stack two LSTM layers, both layers has the same shape lsmt_layers = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * 2) # Get LSTM outputs, the states are internal to the LSTM cells,they are not our attention here
tensorflow.nn.rnn_cell.BasicLSTMCell
2,464
import tensorflow as tf unorm_w = tf.exp((tgt_flat1 + tgt_flat2)/temp) loss = unorm_w * loss / (tf.reduce_sum(unorm_w)) a = tf.print(tf.reduce_sum(unorm_w)) with tf.control_dependencies([a]): final_loss = tf.reduce_sum(loss) return final_loss, cstr_pct def contra_traj_lossV8(pred, tgt, horizon=12): horizon_pred, horizon_tgt = horizon_sumV1(pred, horizon), horizon_sumV1(tgt, horizon) # horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon) horizon_pred1, horizon_pred2 = tf.split(horizon_pred, 2, axis=0) horizon_tgt1, horizon_tgt2 = tf.split(horizon_tgt, 2, axis=0) pred_flat1, pred_flat2 = tf.reshape(horizon_pred1, [-1, 1]), tf.reshape(horizon_pred2, [1, -1]) tgt_flat1, tgt_flat2 = tf.reshape(horizon_tgt1, [-1, 1]), tf.reshape(horizon_tgt2, [1, -1]) tgt_dif = tgt_flat1 - tgt_flat2 pred_dif = pred_flat1 - pred_flat2 geq = tf.cast(tgt_dif > 0, tf.bool) tgt_posi_dif = tf.where(geq, tgt_dif, -tgt_dif) pred_posi_dif = tf.where(geq, pred_dif, -pred_dif) loss = tf.maximum(0., tgt_posi_dif - pred_posi_dif) cstr_pct = tf.math.count_nonzero(loss, dtype=tf.float32) / tf.cast(tf.reduce_prod(tf.shape(loss)), tf.float32) final_loss = tf.reduce_mean(loss)
tensorflow.split
2,465
import tensorflow as tf model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size) if FLAGS.do_train: train_file = os.path.join(FLAGS.output_dir, "train.tf_record") filed_based_convert_examples_to_features( train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file) tf.logging.info("***** Running training *****") tf.logging.info(" Num examples = %d", len(train_examples)) tf.logging.info(" Batch size = %d", FLAGS.train_batch_size) tf.logging.info(" Num steps = %d", num_train_steps) train_input_fn = file_based_input_fn_builder( input_file=train_file, seq_length=FLAGS.max_seq_length, is_training=True, drop_remainder=True) estimator.train(input_fn=train_input_fn, max_steps=num_train_steps) if FLAGS.do_eval: eval_examples = processor.get_dev_examples(FLAGS.data_dir) eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
tensorflow.logging.info
2,466
import tensorflow as tf if is_training and config.keep_prob < 1: inputs = tf.nn.dropout(inputs, config.keep_prob)
tensorflow.nn.dropout
2,467
import tensorflow as tf unorm_w = tf.exp((tgt_flat1 + tgt_flat2)/temp) loss = unorm_w * loss / (tf.reduce_sum(unorm_w))
tensorflow.reduce_sum
2,468
import tensorflow as tf big = tf.expand_dims(big, 1) abs_dif = tf.reduce_sum(tf.abs(tf.expand_dims(activation, 3) - tf.expand_dims(tf.transpose(activation, [1, 2, 0]), 0)), 2) mask = 1. - big masked = tf.exp(-abs_dif) * mask def half(tens, second): m, n, _ = tens.get_shape().as_list() return tf.slice(tens, [0, 0, second*(batch_size/2)], [m, n, batch_size/2]) f1 = tf.reduce_sum(half(masked, 0), 2) / tf.reduce_sum(half(mask, 0)) f2 = tf.reduce_sum(half(masked, 1), 2) / tf.reduce_sum(half(mask, 1)) return tf.concat([x, f1, f2], 1) def batch_norm(x, train, name, decay=0.99, epsilon=1e-5): shape = x.get_shape().as_list()
tensorflow.slice
2,469
import tensorflow as tf total_loss= total_loss, loss = loss, train_step=train_step, preds = predictions, saver= tf.train.Saver())
tensorflow.train.Saver
2,470
import tensorflow as tf state: shape [batch, state_dim] state_dim: integer n_agents: integer n_h_mixer: integer """ agent_qs_reshaped = tf.reshape(agent_qs, [-1, 1, n_agents]) # n_h_mixer * n_agents because result will be reshaped into matrix hyper_w_1 = get_variable('hyper_w_1', [state_dim, n_h_mixer*n_agents]) hyper_w_final = get_variable('hyper_w_final', [state_dim, n_h_mixer]) hyper_b_1 = tf.get_variable('hyper_b_1', [state_dim, n_h_mixer]) hyper_b_final_l1 = tf.layers.dense(inputs=state, units=n_h_mixer, activation=tf.nn.relu, use_bias=False, name='hyper_b_final_l1') hyper_b_final = tf.layers.dense(inputs=hyper_b_final_l1, units=1, activation=None, use_bias=False, name='hyper_b_final') # First layer w1 = tf.abs(tf.matmul(state, hyper_w_1)) b1 = tf.matmul(state, hyper_b_1) w1_reshaped = tf.reshape(w1, [-1, n_agents, n_h_mixer]) # reshape into batch of matrices
tensorflow.get_variable
2,471
import tensorflow as tf name: String, name of returned Keras variable. seed: Integer, random seed. Returns ------- A tf.Variable, filled with drawn samples. """ shape = tuple(map(int, shape)) if seed is None: # ensure that randomness is conditioned by the Numpy RNG seed = np.random.randint(10e8) value = tf.random_normal_initializer( mean, scale, dtype=dtype, seed=seed)(shape) return tf.Variable(value, dtype=dtype, name=name) def max(x, axis=None, keepdims=False): """Maximum value in a tensor. Parameters ---------- x: A tensor or variable. axis: An integer, the axis to find maximum values. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1. If `keepdims` is `True`,
tensorflow.Variable
2,472
import tensorflow as tf length_2 = tf.reshape(box2[3 + 0], [1])
tensorflow.reshape
2,473
import tensorflow as tf if summarize_gradients: # Add summaries to the gradients. summaries |= set(_add_gradients_summaries(clones_gradients)) # Create gradient updates. grad_updates = optimizer.apply_gradients(clones_gradients, global_step=global_step) update_ops.append(grad_updates) update_op = tf.group(*update_ops) train_op = control_flow_ops.with_dependencies([update_op], total_loss, name='train_op') else: clones_losses = [] regularization_losses = tf.get_collection( tf.GraphKeys.REGULARIZATION_LOSSES) for clone in clones: with tf.name_scope(clone.scope): clone_loss = _gather_clone_loss(clone, len(clones), regularization_losses) if clone_loss is not None: clones_losses.append(clone_loss) # Only use regularization_losses for the first clone regularization_losses = None if clones_losses: total_loss = tf.add_n(clones_losses, name='total_loss') # Add the summaries from the first clone. These contain the summaries
tensorflow.get_collection
2,474
import tensorflow as tf bshape = [1, 1, 1, nf] bias_var_shape = [nf] if one_dim_bias else [1, nf, 1, 1] nin = x.get_shape()[channel_ax].value wshape = [rf, rf, nin, nf] with tf.variable_scope(scope): w = tf.get_variable( "w", wshape, initializer=self.ortho_init(init_scale)) b = tf.get_variable( "b", bias_var_shape, initializer=tf.constant_initializer(0.0)) if not one_dim_bias and data_format == 'NHWC': b = tf.reshape(b, bshape) return tf.nn.conv2d( x, w, strides=strides, padding=pad, data_format=data_format) + b def fc(self, x, scope, nh, *, init_scale=1.0, init_bias=0.0): with tf.variable_scope(scope): nin = x.get_shape()[1].value w = tf.get_variable(
tensorflow.reshape
2,475
import tensorflow as tf if param_noise_filter_func is None: param_noise_filter_func = default_param_noise_filter with tf.variable_scope(scope, reuse=reuse): observations_ph = U.ensure_tf_input(make_obs_ph("observation")) stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic") update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps") update_param_noise_threshold_ph = tf.placeholder(tf.float32, (), name="update_param_noise_threshold") update_param_noise_scale_ph = tf.placeholder(tf.bool, (), name="update_param_noise_scale") reset_ph = tf.placeholder(tf.bool, (), name="reset")
tensorflow.placeholder
2,476
import tensorflow as tf axes=[0], bn_lag=DEFAULT_BN_LAG): """Batch normalization.""" # create variables with tf.variable_scope(name): var = variable_on_cpu( "var", [dim], tf.constant_initializer(1.), trainable=False) mean = variable_on_cpu( "mean", [dim], tf.constant_initializer(0.), trainable=False) step = variable_on_cpu("step", [], tf.constant_initializer(0.), trainable=False) if scale: gamma = variable_on_cpu("gamma", [dim], tf.constant_initializer(1.)) beta = variable_on_cpu("beta", [dim], tf.constant_initializer(0.)) # choose the appropriate moments if train: used_mean, used_var = tf.nn.moments(input_, axes, name="batch_norm") cur_mean, cur_var = used_mean, used_var if bn_lag > 0.:
tensorflow.constant_initializer
2,477
from tensorflow.python.framework import ops """ with self._name_scope(name, values=[x]): x = ops.convert_to_tensor(x, name="x") ndims = x.get_shape().ndims if ndims is None: return array_ops.rank(x, name="ndims") return ops.convert_to_tensor(ndims, dtype=dtypes.int32, name="ndims") def get_sample_ndims(self, x, name="get_sample_ndims"): """Returns number of dimensions corresponding to iid draws ("sample"). Args:
tensorflow.python.framework.ops.convert_to_tensor
2,478
import tensorflow as tf epsilon=hparams.vq_epsilon, ema=hparams.ema, means=means) inputs = None batch_size = hparams.batch_size targets = tf.random_uniform([batch_size, hparams.img_len, hparams.img_len, hparams.hidden_size], minval=-1., maxval=1.) target_space_id = None tf.train.create_global_step() decoder_output, losses, cache = latent_layers.transformer_autoencoder( inputs, targets, target_space_id, hparams) self.assertEqual(set(six.iterkeys(losses)), {"extra", "extra_loss", "latent_pred"}) self.evaluate(tf.global_variables_initializer()) decoder_output_, extra_loss_, latent_pred_ = self.evaluate( [decoder_output, losses["extra_loss"], losses["latent_pred"]]) self.assertEqual(decoder_output_.shape, (batch_size, hparams.img_len, hparams.img_len,
tensorflow.train.create_global_step
2,479
import tensorflow as tf vals = d.prob(tf.range(start = -size, limit = size + 1, dtype = tf.float32)) gauss_kernel = tf.einsum('i,j->ij',vals,vals) return gauss_kernel / tf.reduce_sum(gauss_kernel)
tensorflow.reduce_sum
2,480
import tensorflow as tf
tensorflow.name_scope
2,481
import tensorflow as tf n_classifiers=num_actions_arguments, n_classes=actions_arguments_vocabulary_length, name="softmax_2d_predictions_arguments") if FLAGS.print_variables: for v in tf.trainable_variables(): print(v.name) with tf.name_scope('loss'): one_hot_labels_action = dense_to_one_hot(actions_template, action_templates_vocabulary_length) one_hot_labels_arguments = dense_to_one_hot(actions_arguments, actions_arguments_vocabulary_length) loss_action = tf.reduce_mean( - one_hot_labels_action * tf.log(tf.clip_by_value(self.predictions_action, 1e-10, 1.0)), name='loss' ) loss_arguments = tf.reduce_mean( - one_hot_labels_arguments * tf.log(tf.clip_by_value(self.predictions_arguments, 1e-10, 1.0)), name='loss' ) self.loss = loss_action + loss_arguments tf.scalar_summary('loss', self.loss) with tf.name_scope('accuracy'):
tensorflow.clip_by_value
2,482
import tensorflow as tf cls_pred = tf.boolean_mask(cls_pred, tf.stop_gradient(final_mask)) location_pred = tf.boolean_mask(location_pred, tf.stop_gradient(positive_mask)) gtargets = tf.boolean_mask(gtargets, tf.stop_gradient(positive_mask)) predictions = { 'classes': tf.argmax(cls_pred, axis=-1), 'probabilities': tf.reduce_max(tf.nn.softmax(cls_pred, name='softmax_tensor'), axis=-1), 'bboxes_predict': tf.reshape(bboxes_pred, [-1, 4]) } if mode == tf.estimator.ModeKeys.PREDICT: return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
tensorflow.nn.softmax
2,483
from tensorflow.python.platform import gfile self.assertFalse(gfile.Exists(save._MetaGraphFilename(s1))) self.assertEqual(2, len(gfile.Glob(s2))) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2))) self.assertEqual(2, len(gfile.Glob(s3))) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s3))) class KeepCheckpointEveryNHoursTest(tf.test.TestCase): def testNonSharded(self): save_dir = os.path.join(self.get_temp_dir(), "keep_checkpoint_every_n_hours") try: gfile.DeleteRecursively(save_dir) except OSError: pass # Ignore gfile.MakeDirs(save_dir) with self.test_session() as sess: v = tf.Variable([10.0], name="v") # Run the initializer NOW to avoid the 0.5s overhead of the first Run() # call, which throws the test timing off in fastbuild mode. tf.initialize_all_variables().run() # Create a saver that will keep the last 2 checkpoints plus one every 0.7 # seconds. start_time = time.time()
tensorflow.python.platform.gfile.DeleteRecursively
2,484
import tensorflow.contrib.eager as tfe log_every=1, dev_every=2, save_every=2, lr_decay_every=1, lr_decay_by=0.75, inference_premise=inference_premise, inference_hypothesis=inference_hypothesis) class SpinnTest(test_util.TensorFlowTestCase): def setUp(self): super(SpinnTest, self).setUp() self._test_device = "gpu:0" if tfe.num_gpus() else "cpu:0" self._temp_data_dir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self._temp_data_dir) super(SpinnTest, self).tearDown() def testBundle(self): with tf.device(self._test_device): lstm_iter = [np.array([[0, 1], [2, 3]], dtype=np.float32), np.array([[0, -1], [-2, -3]], dtype=np.float32), np.array([[0, 2], [4, 6]], dtype=np.float32), np.array([[0, -2], [-4, -6]], dtype=np.float32)]
tensorflow.contrib.eager.num_gpus
2,485
import tensorflow as tf logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) label_ids = tf.reshape(label_ids, [-1]) label_weights = tf.reshape(label_weights, [-1]) one_hot_labels = tf.one_hot( label_ids, depth=bert_config.vocab_size, dtype=tf.float32) # The `positions` tensor might be zero-padded (if the sequence is too # short to have the maximum number of predictions). The `label_weights` # tensor has a value of 1.0 for every real prediction and 0.0 for the # padding predictions. per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1]) numerator = tf.reduce_sum(label_weights * per_example_loss) denominator = tf.reduce_sum(label_weights) + 1e-5 loss = numerator / denominator return (loss, per_example_loss, log_probs) def get_next_sentence_output(bert_config, input_tensor, labels): """Get loss and log probs for the next sentence prediction.""" # Simple binary classification. Note that 0 is "next sentence" and 1 is # "random sentence". This weight matrix is not used after pre-training. with tf.variable_scope("cls/seq_relationship"): output_weights = tf.get_variable( "output_weights", shape=[2, bert_config.hidden_size],
tensorflow.reduce_sum
2,486
import tensorflow as tf dual_rate_factor=dual_rate_factor) # Maybe create label_priors. label_priors = maybe_create_label_priors(label_priors, labels, weights, variables_collections) # Calculate weighted loss and other outputs. The log(2.0) term corrects for # logloss not being an upper bound on the indicator function. weighted_loss = weights * losses_utils.weighted_surrogate_loss( labels, logits, surrogate_type, positive_weights=lambdas, negative_weights=1.0) maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0 maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype) lambda_term = lambdas * label_priors * (target_recall - 1.0) * maybe_log2 loss = tf.reshape(weighted_loss + lambda_term, original_shape) other_outputs = { 'lambdas': lambdas_variable, 'label_priors': label_priors, 'true_positives_lower_bound':
tensorflow.cast
2,487
import tensorflow as tf # Test case 1, 2. x = tf.placeholder(dtype=tf.int32, shape=[]) # None would fire an exception were it actually executed. self.assertTrue(normal._is_scalar_helper(x.get_shape, lambda: None)) self.assertTrue(normal._is_scalar_helper(lambda: tf.TensorShape(None), lambda: tf.shape(x))) x = tf.placeholder(dtype=tf.int32, shape=[1]) # None would fire an exception were it actually executed. self.assertFalse(normal._is_scalar_helper(x.get_shape, lambda: None)) self.assertFalse(normal._is_scalar_helper(lambda: tf.TensorShape(None), lambda: tf.shape(x))) # Test case 3. x = tf.placeholder(dtype=tf.int32) is_scalar = normal._is_scalar_helper(x.get_shape, lambda: tf.shape(x)) self.assertTrue(is_scalar.eval(feed_dict={x: 1})) self.assertFalse(is_scalar.eval(feed_dict={x: [1]})) if __name__ == '__main__': tf.test.main()
tensorflow.shape
2,488
import tensorflow as tf geq = tf.cast(tgt_dif > 0, tf.bool) tgt_posi_dif = tf.where(geq, tgt_dif, -tgt_dif) pred_posi_dif = tf.where(geq, pred_dif, -pred_dif) loss = tf.maximum(0., tgt_posi_dif - pred_posi_dif) cstr_pct = tf.math.count_nonzero(loss, dtype=tf.float32) / tf.cast(tf.reduce_prod(tf.shape(loss)), tf.float32) final_loss = tf.reduce_mean(loss) return final_loss, cstr_pct def contra_traj_lossV7(pred, tgt, horizon=12, temp=100):
tensorflow.shape
2,489
import tensorflow as tf dynamic_bsize=bsize, dynamic_bstride=bstride, dynamic_boffset=boffset) # return a list of gradients of output with respect to each input if not doAdd: # scatter blocks of zeroes over a base tensor of ones to compute a stamp-out gradient mask for dy_dybase stamp_out_blocks = sbnet_module.sparse_scatter( tf.zeros_like(blocksX), binCounts, activeBlockIndices, tf.ones_like(grad), dynamic_bsize=bsize, dynamic_bstride=bstride, dynamic_boffset=boffset, add=False) dy_dybase = grad * stamp_out_blocks return [dout_dx, None, None, dy_dybase, None, None, None] else: # d(x+ybase)/dybase = 1, so just pass back grad as dout_dybase return [dout_dx, None, None, grad, None, None, None]
tensorflow.ones_like
2,490
import tensorflow as tf n_labeled = 1000 # Placeholders for input data and the targets x_input = tf.placeholder(dtype=tf.float32, shape=[batch_size, input_dim], name='Input') x_input_l = tf.placeholder(dtype=tf.float32, shape=[batch_size, input_dim], name='Labeled_Input') y_input = tf.placeholder(dtype=tf.float32, shape=[batch_size, n_labels], name='Labels') x_target = tf.placeholder(dtype=tf.float32, shape=[batch_size, input_dim], name='Target') real_distribution = tf.placeholder(dtype=tf.float32, shape=[batch_size, z_dim], name='Real_distribution') categorial_distribution = tf.placeholder(dtype=tf.float32, shape=[batch_size, n_labels], name='Categorical_distribution') manual_decoder_input = tf.placeholder(dtype=tf.float32, shape=[1, z_dim + n_labels], name='Decoder_input') def form_results(): """ Forms folders for each run to store the tensorboard files, saved models and the log files.
tensorflow.placeholder
2,491
import tensorflow as tf [0, 1, 0, 1, 0]], dtype=tf.int32) masks2 = tf.constant([[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [1, 1, 1, 1, 0], [1, 0, 1, 1, 1]], dtype=tf.int32) pairwise_iou = isu.points_mask_pairwise_iou(masks1=masks1, masks2=masks2) expected_iou = tf.constant([0, 1, 0.4, 0.2], dtype=tf.float32) self.assertAllClose(pairwise_iou.numpy(), expected_iou.numpy()) if __name__ == '__main__': tf.test.main()
tensorflow.constant
2,492
import tensorflow as tf def _build_net(self, s, a, scope, trainable): with tf.variable_scope(scope): init_w = tf.random_normal_initializer(0., 0.01) init_b = tf.constant_initializer(0.01) with tf.variable_scope('l1'):
tensorflow.constant_initializer
2,493
import tensorflow as tf speed_madstd = 1.4826 * np.median(np.abs(speeds - np.median(speeds))) speed_jitter = speed_madstd return 'images/sec: %.1f +/- %.1f (jitter = %.1f)' % ( speed_mean, speed_uncertainty, speed_jitter) else: return 'images/sec: %.1f' % speed_mean def load_checkpoint(saver, sess, ckpt_dir): ckpt = tf.train.get_checkpoint_state(ckpt_dir) if ckpt and ckpt.model_checkpoint_path: if os.path.isabs(ckpt.model_checkpoint_path): # Restores from checkpoint with absolute path. model_checkpoint_path = ckpt.model_checkpoint_path else: # Restores from checkpoint with relative path. model_checkpoint_path = os.path.join(ckpt_dir, ckpt.model_checkpoint_path) # Assuming model_checkpoint_path looks something like:
tensorflow.train.get_checkpoint_state
2,494
import tensorflow as tf print("Saved image: %d" % itr) if __name__ == "__main__": tf.app.run()
tensorflow.app.run
2,495
import tensorflow as tf tgtimg_z = lrelu(linear(tgtimg_h4, featsize, 'hz_lin')) with tf.variable_scope("translate") as scope: trans_h0 = lrelu(linear(tf.concat([srcimg_z, tgtctx_z], 1), featsize, 'trans_h0')) trans_z = linear(trans_h0, featsize, 'trans_z') self.translated_z = trans_z
tensorflow.concat
2,496
import tensorflow as tf metric_value = -1 best_metric_global_step = -1 global_step = -1 tf.logging.info( "Best trial info: Step: %s, Best Value Step: %s, " "Best Value: %s", global_step, best_metric_global_step, metric_value)
tensorflow.logging.info
2,497
import tensorflow as tf def __init__(self, is_training, config, input_, graph): self._is_training = is_training self._input = input_ self._rnn_params = None self._cell = None self.batch_size = input_.batch_size self.num_steps = input_.num_steps hidden_size = config.hidden_size vocab_size = config.vocab_size self.graph = graph with self.graph.as_default(): with tf.device('/cpu:0'): embedding = tf.get_variable( 'embedding', [vocab_size, hidden_size], dtype=tf.float32) inputs = tf.nn.embedding_lookup(embedding, input_.input_data) if is_training and config.keep_prob < 1: inputs = tf.nn.dropout(inputs, config.keep_prob) output, state = self._build_rnn_graph(inputs, config, is_training) softmax_w = tf.get_variable( 'softmax_w', [hidden_size, vocab_size], dtype=tf.float32) softmax_b = tf.get_variable('softmax_b', [vocab_size], dtype=tf.float32) logits = tf.nn.xw_plus_b(output, softmax_w, softmax_b) logits = tf.reshape(logits, [self.batch_size, self.num_steps, vocab_size]) loss = tf.contrib.seq2seq.sequence_loss( logits,
tensorflow.nn.embedding_lookup
2,498
import tensorflow as tf 1. """ if not isinstance(tt, TensorTrainBase): raise ValueError("`tt` has to be a Tensor Train object") else: shape = shapes.lazy_raw_shape(tt) # I guess variables=tt.tt_cores is not needed here since the output of # the function doesn't depend on the values of the TT-cores, only on their # shapes etc. But I'm not 100% sure. with tf.name_scope(name): if tt.is_tt_matrix(): return matrix_ones(shape, dtype=tt.dtype) else: return tensor_ones(shape[0, :], dtype=tt.dtype) def zeros_like(tt, name='t3f_zeros_like'): """Constructs t3f.zeros with the shape of `tt`.
tensorflow.name_scope
2,499