seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
from tensorflow.python.training import moving_averages updated_ema_means = \ moving_averages.assign_moving_average(
tensorflow.python.training.moving_averages.assign_moving_average
13,000
import tensorflow as tf for n, i in self.dataset_iterators.items(): self.dataset_handles[n] = self.sess.run(i.string_handle()) self.sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
tensorflow.global_variables_initializer
13,001
import tensorflow as tf if (("/group_" + str(gid) + "/" in name) or ("/ffn_" + str(gid) + "/" in name) or ("/attention_" + str(gid) + "/" in name)): group_matched = True tf.logging.info("%s belongs to %dth", name, gid) assignment_map[gid][tvar_name] = name if not group_matched: assignment_map[0][tvar_name] = name
tensorflow.logging.info
13,002
from tensorflow.python.ops import logging_ops self._add_hidden_layer_summary(net, "hiddenlayer_%d" % layer_id) logit = layers.legacy_fully_connected( net, self._num_label_columns(), weight_collections=[self._dnn_weight_collection], bias_collections=[self._dnn_weight_collection], name="dnn_logit") self._add_hidden_layer_summary(logit, "dnn_logit") return logit def _add_hidden_layer_summary(self, value, tag): # TODO(zakaria): Move this code to tf.learn and add test. logging_ops.scalar_summary("%s:fraction_of_zero_values" % tag, nn.zero_fraction(value)) logging_ops.histogram_summary("%s:activation" % tag, value) def _linear_logits(self, features): logits, _, _ = layers.weighted_sum_from_feature_columns( columns_to_tensors=features, feature_columns=self._get_linear_feature_columns(), num_outputs=self._num_label_columns(), weight_collections=[self._linear_weight_collection], name="linear") return logits def _get_feature_dict(self, features): if isinstance(features, dict): return features
tensorflow.python.ops.logging_ops.histogram_summary
13,003
import tensorflow as tf ratios_grid = tf.reshape(ratios_grid, [-1, 1]) ratio_sqrts = tf.sqrt(ratios_grid) heights = scales_grid / ratio_sqrts * base_size[1] widths = scales_grid * ratio_sqrts * base_size[0] x_centers = tf.cast(tf.range(features_width), tf.float32) x_centers = x_centers * stride[1] y_centers = tf.cast(tf.range(features_height), tf.float32) y_centers = y_centers * stride[0] # x_centers = x_centers + offset[1] # y_centers = y_centers + offset[0]
tensorflow.range
13,004
import tensorflow as tf in_node_a = tensor_rearrange.get_placeholder("input_0") in_node_b = tensor_rearrange.get_placeholder("input_1") in_node_c = tensor_rearrange.get_placeholder("input_2") stitched = tf.dynamic_stitch([[1, 10], [[0, 7, 9], [5, 8, 3]], [[6], [4], [2]]], [in_node_a, in_node_b, in_node_c]) # should be 11,5,4 list_of_parts = tf.dynamic_partition(tf.transpose(stitched, perm=[1, 2, 0]), [[0, 1, 2, 3], [1, 0, 2, 3], [2, 3, 1, 0], [2, 1, 0, 3], [0, 1, 2, 3]], num_partitions=4) # after permute becomes 5,4,11, return all partitions 5,11 node_a = tf.div(list_of_parts[0], list_of_parts[1]) node_b = tf.divide(list_of_parts[2], list_of_parts[3]) trace_node = tf.trace(node_a) + node_b # there is a broadcast here out_node = tf.cast(tf.count_nonzero(trace_node), dtype=tf.float32) + tf.Variable(tf.random_normal(shape=(2, 3))) placeholders = [in_node_a, in_node_b, in_node_c] predictions = [out_node] # Run and persist tfp = TensorFlowPersistor(save_dir="partition_stitch_misc") tfp.set_placeholders(placeholders) \ .set_output_tensors(predictions) \ .set_test_data(tensor_rearrange.get_test_data()) \ .build_save_frozen_graph()
tensorflow.count_nonzero
13,005
import tensorflow as tf """Calculate mix and exc horizontal activities.""" mix_kernels = getattr(self, 'mix_kernels_%s' % layer) mix_bias = getattr(self, 'mix_bias_%s' % layer) horizontal_kernels = getattr(self, 'horizontal_kernels_%s' % layer) # h_bias = getattr(self, 'h_bias_%s' % layer) g2_intermediate = self.conv_3d_op( data=h1, weights=mix_kernels, strides=[1, 1, 1, 1, 1], symmetric_weights=self.symmetric_gate_weights, dilations=self.hgru_dilations[layer_idx]) with tf.variable_scope( '%s/g2_bn' % var_scope, reuse=self.scope_reuse) as scope: g2_intermediate = tf.contrib.layers.batch_norm( inputs=g2_intermediate + mix_bias, scale=True, center=False, fused=True, renorm=False, param_initializers=self.param_initializer, updates_collections=None, scope=scope, reuse=self.reuse, is_training=self.train) g2 = self.gate_nl(g2_intermediate) # Horizontal activities c2 = self.conv_3d_op(
tensorflow.contrib.layers.batch_norm
13,006
import tensorflow as tf with tf.variable_scope("conv") as scope: srcimg_h0, srcimg_h1, srcimg_h2, srcimg_h3, srcimg_h4, srcimg_z = encode(srcimg) scope.reuse_variables() tgtimg_h0, tgtimg_h1, tgtimg_h2, tgtimg_h3, tgtimg_h4, tgtimg_z = encode(tgtimg) tgtctx_h0, tgtctx_h1, tgtctx_h2, tgtctx_h3, tgtctx_h4, tgtctx_z = encode(tgtctx) with tf.variable_scope("translate") as scope: trans_h0 = lrelu(linear(tf.nn.dropout(tf.concat([srcimg_z, tgtctx_z], 1), keep_prob), featsize, 'trans_h0')) trans_z = linear(tf.nn.dropout(trans_h0, keep_prob), featsize, 'trans_z') self.translated_z = trans_z s_h, s_w = self.output_height, self.output_width s_h0, s_h1, s_h2, s_h3 = \
tensorflow.variable_scope
13,007
import tensorflow as tf [1, 1, 2, 2, 2, 2, 2, 2], [1, 2, 2, 2, 2, 2, 2, 2], [0, 0, 0, 0, 2, 2, 2, 2], [0, 0, 0, 0, 2, 2, 2, 2]], dtype=tf.int32) instance_labels = tf.reshape(instance_labels, [-1]) (indices, masks_t) = isu.randomly_select_one_point_per_segment(instance_labels) masks = tf.transpose(masks_t) masks = tf.reshape(masks, [3, 5, 8]) expected_masks = self.get_instance_masks() selected_instances = tf.gather(instance_labels, indices) expected_selected_instances = tf.constant([0, 1, 2], dtype=tf.int32) self.assertAllEqual(selected_instances.numpy(), expected_selected_instances.numpy()) self.assertAllClose(masks.numpy(), expected_masks.numpy()) def test_inputs_Distances_to_centers(self):
tensorflow.reshape
13,008
import tensorflow as tf processed_image = utils.process_image(image, mean_pixel) with tf.variable_scope("inference"): image_net = vgg_net(weights, processed_image) conv_final_layer = image_net["conv5_3"]
tensorflow.variable_scope
13,009
import tensorflow as tf with tf.variable_scope('pool'): X = tf.nn.relu(X) X = tf.nn.avg_pool(X, ksize=(1, pool_ksize, pool_ksize, 1), strides=(1, pool_stride, pool_stride, 1), padding='VALID') w //= pool_stride h //= pool_stride # Conv 1x1 with tf.variable_scope('conv_0'): X = self._do_conv(X, w, h, ch, conv_ch, filter_size=1, no_reg=True, is_train=is_train) ch = conv_ch # Global conv with tf.variable_scope('conv_1'): X = self._do_conv(X, w, h, ch, global_conv_ch, filter_size=w, no_reg=True, is_train=is_train) ch = global_conv_ch
tensorflow.variable_scope
13,010
import tensorflow as tf logits_vec = ( rewards_vec - log_pi_vec + self._gamma * (1.0 - dones_vec) * q_vals_vec) if self._relabel_type == "random": logits_vec = tf.ones_like(logits_vec) # Hack to make sampling random ## End new version if self._normalize_cols: logits_vec = logits_vec - tf.math.reduce_logsumexp( logits_vec, axis=0)[None] relabel_indices = tf.random.categorical(logits=logits_vec, num_samples=1) ### Metrics global_step = tf.compat.v1.train.get_or_create_global_step() orig_indices = tf.range( self._sample_batch_size, dtype=relabel_indices.dtype) with tf.name_scope("relabelling"): # How often are the originally commanded goals most optimal? opt_indices = tf.argmax(logits_vec, axis=1) orig_is_opt = opt_indices == orig_indices orig_opt_frac = tf.reduce_mean(tf.cast(orig_is_opt, tf.float32)) tf.compat.v2.summary.scalar( name="orig_task_optimal", data=orig_opt_frac, step=global_step) # How often is the relabelled goal optimal? # The relabel_indices are [B, 1], so we need to remove the extra dim. relabel_is_opt = tf.squeeze(relabel_indices) == orig_indices relabel_opt_frac = tf.reduce_mean(tf.cast(relabel_is_opt, tf.float32)) tf.compat.v2.summary.scalar( name="relabel_task_optimal", data=relabel_opt_frac, step=global_step)
tensorflow.name_scope
13,011
import tensorflow as tf W_transform = tf.get_variable( 'W_transform', [highway_dim, highway_dim], initializer=tf.random_normal_initializer( mean=0.0, stddev=np.sqrt(1.0 / highway_dim)), dtype=DTYPE) b_transform = tf.get_variable( 'b_transform', [highway_dim], initializer=tf.constant_initializer(0.0), dtype=DTYPE) embedding = high(embedding, W_carry, b_carry, W_transform, b_transform) # finally project down if needed
tensorflow.constant_initializer
13,012
import tensorflow as tf # model definition - distilled model if FLAGS.enbl_dst: logits_dst = self.helper_dst.calc_logits(sess, images) # model definition - weight-sparsified model with tf.variable_scope(self.model_scope): # loss & extra evaluation metrics logits = self.forward_train(images) self.maskable_var_names = [var.name for var in self.maskable_vars] loss, metrics = self.calc_loss(labels, logits, self.trainable_vars)
tensorflow.variable_scope
13,013
import tensorflow as tf raise ValueError('rnn_mode {} not supported'.format(config.rnn_mode)) def _build_rnn_graph(self, inputs, config, is_training): def make_cell(): cell = self._get_lstm_cell(config, is_training) if is_training and config.keep_prob < 1: cell = tf.contrib.rnn.DropoutWrapper( cell, output_keep_prob=config.keep_prob) return cell cell = tf.contrib.rnn.MultiRNNCell( [make_cell() for _ in range(config.num_layers)], state_is_tuple=True)
tensorflow.contrib.rnn.DropoutWrapper
13,014
import tensorflow as tf clone_loss = tf.add_n(clone_losses, name='clone_loss') if num_clones > 1: clone_loss = tf.div(clone_loss, 1.0 * num_clones, name='scaled_clone_loss') all_losses.append(clone_loss) if regularization_losses: regularization_loss = tf.add_n(regularization_losses, name='regularization_loss') all_losses.append(regularization_loss) if all_losses: sum_loss = tf.add_n(all_losses) # Add the summaries out of the clone device block.
tensorflow.add_n
13,015
import tensorflow as tf self.sess_train = sess with tf.control_dependencies(self.update_ops): self.train_op = optimizer.apply_gradients(grads_pruned, global_step=self.global_step) self.summary_op = tf.summary.merge_all() self.log_op = [lrn_rate, loss, pr_trainable, pr_maskable] + list(metrics.values()) self.log_op_names = ['lr', 'loss', 'pr_trn', 'pr_msk'] + list(metrics.keys()) self.init_op = tf.variables_initializer(self.vars) self.init_opt_op = tf.variables_initializer(optimizer_base.variables()) if FLAGS.enbl_multi_gpu: self.bcast_op = mgw.broadcast_global_variables(0) self.saver_train = tf.train.Saver(self.vars)
tensorflow.variables_initializer
13,016
import tensorflow as tf batch_size = tf.shape(hidden)[0] time_steps = tf.shape(hidden)[1] if encoder.attn_keep_prob is not None: state_noise_shape = [1, tf.shape(state)[1]] if encoder.pervasive_dropout else None state = tf.nn.dropout(state, keep_prob=encoder.attn_keep_prob, noise_shape=state_noise_shape) hidden_noise_shape = [1, 1, tf.shape(hidden)[2]] if encoder.pervasive_dropout else None hidden = tf.nn.dropout(hidden, keep_prob=encoder.attn_keep_prob, noise_shape=hidden_noise_shape) if encoder.mult_attn: state = dense(state, encoder.attn_size, use_bias=False, name='state') hidden = dense(hidden, encoder.attn_size, use_bias=False, name='hidden') return tf.einsum('ijk,ik->ij', hidden, state) y = dense(state, encoder.attn_size, use_bias=not encoder.layer_norm, name='W_a') y = tf.expand_dims(y, axis=1) if encoder.layer_norm: y = tf.contrib.layers.layer_norm(y, scope='layer_norm_state') hidden = tf.contrib.layers.layer_norm(hidden, center=False, scope='layer_norm_hidden') y += dense(hidden, encoder.attn_size, use_bias=False, name='U_a') if encoder.position_bias and input_length is not None and time is not None: src_pos = tf.tile(tf.expand_dims(tf.range(time_steps), axis=0), [batch_size, 1]) trg_pos = tf.tile(tf.reshape(time, [1, 1]), [batch_size, time_steps]) src_len = tf.tile(tf.expand_dims(input_length, axis=1), [1, time_steps]) # - 1 pos_feats = tf.to_float(tf.stack([src_pos, trg_pos, src_len], axis=2)) pos_feats = tf.log(1 + pos_feats)
tensorflow.expand_dims
13,017
import tensorflow as tf threads = tf.train.start_queue_runners(coord=coord) tf.train.Saver().restore(sess,path)
tensorflow.train.Saver
13,018
import tensorflow as tf loss_weight: A float for loss weight. distance_fn: A function handle for computing embedding distances, which takes two embedding tensors of shape [..., embedding_dim] and returns a distance tensor of shape [...]. Returns: loss: A tensor for weighted positive pairwise loss. Shape = []. summaries: A dictionary for loss summaries. """ loss = tf.math.reduce_mean( distance_fn(anchor_embeddings, positive_embeddings)) weighted_loss = loss_weight * loss summaries = { 'pairwise_loss/PositivePair/Loss/Original': loss, 'pairwise_loss/PositivePair/Loss/Weighted': weighted_loss, 'pairwise_loss/PositivePair/Loss/Weight': tf.constant(loss_weight), } return weighted_loss, summaries
tensorflow.constant
13,019
from tensorflow.python.framework import ops recall = math_ops.div(true_positives, epsilon + true_positives + false_negatives, name='recall_' + name) return recall recall = compute_recall('value') with ops.control_dependencies([true_positives_compute_op, false_negatives_compute_op]): update_op = compute_recall('update_op') if metrics_collections: ops.add_to_collections(metrics_collections, recall) if updates_collections: ops.add_to_collections(updates_collections, update_op) return recall, update_op def _at_k_name(name, k=None, class_id=None): if k is not None: name = '%s_at_%d' % (name, k)
tensorflow.python.framework.ops.add_to_collections
13,020
import tensorflow as tf dnn_output = last_layer dnn_output_size = last_layer_size # Logistic regression with tf.variable_scope('logit') as scope: logit_w = tf.get_variable('W', shape=[dnn_output_size, 1], initializer=tf.truncated_normal_initializer(stddev=1.0 / dnn_output_size, dtype=dtype), dtype=dtype) logit_b = tf.get_variable('b', shape=[1], initializer=tf.constant_initializer(0.0), dtype=dtype) logits = tf.squeeze(tf.nn.bias_add(tf.matmul(dnn_output, logit_w), logit_b), squeeze_dims=[1]) prediction = tf.nn.sigmoid(logits) prediction_inspect = tf.reshape(prediction, [batch_size, rnn_nunroll]) prediction_final = tf.squeeze(tf.slice(prediction_inspect, [0, rnn_nunroll - 1], [-1, 1]), squeeze_dims=[1]) print('logit: {}'.format(logits.get_shape())) # Compute loss if mode != 'gen': neg_log_lhoods = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=targets) if target_weight_strategy == 'rect': avg_neg_log_lhood = tf.reduce_mean(neg_log_lhoods) else:
tensorflow.reshape
13,021
from tensorflow.python.platform import gfile log_fn('-' * 64) if is_chief: store_benchmarks({'total_images_per_sec': images_per_sec}) # Save the model checkpoint. if FLAGS.train_dir is not None and is_chief: checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt') if not gfile.Exists(FLAGS.train_dir): gfile.MakeDirs(FLAGS.train_dir) sv.saver.save(sess, checkpoint_path, global_step) if execution_barrier: # Wait for other workers to reach the end, so this worker doesn't # go away underneath them. sess.run([execution_barrier])
tensorflow.python.platform.gfile.MakeDirs
13,022
from tensorflow.python.util import compat update_str(n.name) update_str(n.op) update_strs(n.input) update_num(len(n.attr)) # NOTE: protobuf map serialization does not guarantee ordering. for k in sorted(n.attr): update_str(k) update_str(n.attr[k].SerializeToString()) _hash_func_def() # pylint: disable=protected-access self._sub_functions = temp_graph._functions for subname in sorted(self._sub_functions.keys()): hasher.update(compat.as_bytes(self._sub_functions[subname]._hash_str)) # pylint: enable=protected-access # Uses the first 8 bytes sha1 hash digest as the __hash__. self._hash_str = hasher.hexdigest()[:8] self._hash = int(self._hash_str, 16) # Finally, we decide the function name to use. If not specified, # make up something which is almost certainly unique. if not self._func_name: self._func_name = "_".join([_get_func_name(self._func), self._hash_str]) self._definition.signature.name = self._func_name if self._func.__doc__:
tensorflow.python.util.compat.as_bytes
13,023
import tensorflow as tf self.Z = tf.placeholder(tf.float32, (None, None, fourier_window_size // 2 + 1)) batch_size = tf.shape(self.X)[0] seq_lens = tf.count_nonzero(tf.reduce_sum(self.decoder_inputs, -1), 1, dtype=tf.int32) + 1 def cells(reuse=False): return tf.contrib.rnn.DropoutWrapper( tf.nn.rnn_cell.LSTMCell( size_layers, initializer=tf.orthogonal_initializer(), reuse=reuse ), state_keep_prob=dropout, output_keep_prob=dropout, ) def attention(encoder_out, seq_len, reuse=False): attention_mechanism = tf.contrib.seq2seq.LuongAttention( num_units=size_layers, memory=encoder_out, memory_sequence_length=seq_len ) return tf.contrib.seq2seq.AttentionWrapper( cell=tf.nn.rnn_cell.MultiRNNCell([cells(reuse) for _ in range(num_layers)]), attention_mechanism=attention_mechanism, attention_layer_size=size_layers, alignment_history=True, ) encoder_cells = tf.nn.rnn_cell.MultiRNNCell([cells() for _ in range(num_layers)]) encoder_out, encoder_state = tf.nn.dynamic_rnn( cell=encoder_cells, inputs=forward, sequence_length=seq_lens, dtype=tf.float32 )
tensorflow.contrib.seq2seq.LuongAttention
13,024
import tensorflow as tf # size: num_priors x num_targets ious = iou_of(tf.expand_dims(gt_boxes, axis=0), tf.expand_dims(corner_form_priors, axis=1)) # size: num_priors best_target_per_prior = tf.math.reduce_max(ious, axis=1) best_target_per_prior_index = tf.math.argmax(ious, axis=1) # size: num_targets best_prior_per_target = tf.math.reduce_max(ious, axis=0) best_prior_per_target_index = tf.math.argmax(ious, axis=0) targets = tf.range(tf.shape(best_prior_per_target_index)[0], dtype='int64') best_target_per_prior_index = tf.tensor_scatter_nd_update(best_target_per_prior_index, tf.expand_dims(best_prior_per_target_index, 1), targets) # 2.0 is used to make sure every target has a prior assigned best_target_per_prior = tf.tensor_scatter_nd_update(best_target_per_prior, tf.expand_dims(best_prior_per_target_index, 1), tf.ones_like(best_prior_per_target_index, dtype=tf.float32)*2.0) # size: num_priors labels = tf.gather(gt_labels, best_target_per_prior_index) labels = tf.where(tf.less(best_target_per_prior, iou_threshold), tf.constant(0, dtype='int64'), labels) # labels[best_target_per_prior < iou_threshold] = 0 # the backgournd id boxes = tf.gather(gt_boxes, best_target_per_prior_index) return boxes, labels class MatchPrior(object): def __init__(self, center_form_priors, center_variance, size_variance, iou_threshold): self.center_form_priors = center_form_priors self.corner_form_priors = center_form_to_corner_form(center_form_priors)
tensorflow.expand_dims
13,025
import tensorflow as tf # Flatten final block conv tensor. block_conv_flat = self.flatten_layer( inputs=tf.zeros( shape=[1, 1, 1, block_conv_size], dtype=tf.float32
tensorflow.zeros
13,026
import tensorflow as tf var = tf.Variable(0.) loss = tf.nn.l2_loss(var) train_op = opt.get_train_op(loss) self.assertTrue(tf.contrib.framework.is_tensor(train_op)) if __name__ == "__main__": tf.test.main()
tensorflow.test.main
13,027
import tensorflow as tf if int(X.get_shape()[-1]) != (r**2) * n_out_channels: raise Exception(_err_log) # bsize, a, b, c = X.get_shape().as_list() # bsize = tf.shape(X)[0] # Handling Dimension(None) type for undefined batch dim # Xs=tf.split(X,r,3) #b*h*w*r*r # Xr=tf.concat(Xs,2) #b*h*(r*w)*r # X=tf.reshape(Xr,(bsize,r*a,r*b,n_out_channel)) # b*(r*h)*(r*w)*c X = tf.depth_to_space(X, r) else: raise RuntimeError(_err_log) return X class SubpixelConv1d(Layer):
tensorflow.depth_to_space
13,028
import tensorflow as tf outputs = { 'foo': tf.convert_to_tensor([0, 1, 2, 3], dtype=tf.int64), 'bar': tf.convert_to_tensor([0, 2, 0, 2], dtype=tf.int64), } # Annotate an arbitrary proto at the schema level (not sure what global # schema boundaries would mean, but hey I'm just a test). boundaries = tf.constant([[1.0]]) message_type = annotations_pb2.BucketBoundaries.DESCRIPTOR.full_name sizes = tf.expand_dims([tf.size(boundaries)], axis=0) message_proto = tf.raw_ops.EncodeProto( sizes=sizes, values=[tf.cast(boundaries, tf.float32)], field_names=['boundaries'], message_type=message_type)[0] type_url = os.path.join('type.googleapis.com', message_type) schema_inference.annotate(type_url, message_proto) with tf.compat.v1.Session(graph=graph) as session: schema = schema_inference.infer_feature_schema(outputs, graph, session) self.assertLen(schema.annotation.extra_metadata, 1) for annotation in schema.annotation.extra_metadata: # Extract the annotated message and validate its contents message = annotations_pb2.BucketBoundaries()
tensorflow.cast
13,029
import tensorflow as tf def test_instance_non_maximum_suppression_1d_scores(self): mask0 = tf.constant([[1, 0], [0, 1]], dtype=tf.float32) mask1 = tf.constant([[1, 1], [0, 1]], dtype=tf.float32) mask2 = tf.constant([[1, 0], [1, 1]], dtype=tf.float32) mask3 = tf.constant([[1, 1], [1, 1]], dtype=tf.float32) mask4 = tf.constant([[0, 0], [0, 0]], dtype=tf.float32) mask5 = tf.constant([[1, 0], [1, 0]], dtype=tf.float32) masks = tf.stack([mask0, mask1, mask2, mask3, mask4, mask5]) classes = tf.constant([1, 2, 3, 1, 2, 3], dtype=tf.int32) scores = tf.constant([1.0, 0.9, 0.8, 0.95, 0.85, 0.6], dtype=tf.float32) (nms_masks1, nms_scores1, nms_classes1, _) = isu.instance_non_maximum_suppression_1d_scores( masks, scores, classes, min_score_thresh=0.65, min_iou_thresh=0.5, is_class_agnostic=True) nms_masks_expected1 = tf.stack([mask0, mask4]) nms_scores_expected1 = tf.constant([1.0, 0.85], dtype=tf.float32)
tensorflow.constant
13,030
import tensorflow as tf with self.test_session(): for dtype in [np.float32, np.float64]: mu = tf.Variable(0., name="mu", dtype=dtype) sigma = tf.Variable(1., name="sigma", dtype=dtype) qdist = distributions.QuantizedDistribution( base_dist_cls=distributions.Normal, mu=mu, sigma=sigma) x = np.arange(-100, 100, 2).astype(dtype) tf.initialize_all_variables().run() proba = qdist.log_prob(x) grads = tf.gradients(proba, [mu, sigma]) self._assert_all_finite(proba.eval()) self._assert_all_finite(grads[0].eval()) self._assert_all_finite(grads[1].eval()) def test_prob_and_grad_gives_finite_results_for_common_events(self):
tensorflow.initialize_all_variables
13,031
import tensorflow as tf geq = tf.cast((tgt1 - tgt2) > 0, tf.bool) tgt_larg = tf.where(geq, tgt1, tgt2) tgt_small = tf.where(geq, tgt2, tgt1) pred_larg = tf.where(geq, pred1, pred2) pred_small = tf.where(geq, pred2, pred1) loss = tf.maximum(0., (tgt_larg - tgt_small) - (pred_larg - pred_small)) if hard_ratio < 1.0: hard_num = tf.cast(tools.shape(pred1)[0] * hard_ratio, tf.int32) loss = tf.reshape(loss, [-1]) hard_loss, _ = tf.math.top_k(loss, k=hard_num) return hard_loss return loss def sample_pair(batch): num_sam = tools.shape(batch)[0]
tensorflow.reshape
13,032
import tensorflow as tf self.D_A_real = self.build_discriminator(self.image_real_A,reuse=True, name="discriminatorA") self.loss_GABA = self.lambda_l2*squared_loss(self.images_fake_A,self.image_real_A) + binary_cross_entropy_loss(labels=tf.ones_like(self.D_B_fake),logits=self.D_B_fake) self.loss_GBAB = self.lambda_l2*squared_loss(self.images_fake_B_,self.image_real_B) + binary_cross_entropy_loss(labels=tf.ones_like(self.D_A_fake),logits=self.D_A_fake) self.generator_loss = self.loss_GABA + self.loss_GBAB self.D_B_loss_real = binary_cross_entropy_loss(tf.ones_like(self.D_B_real),self.D_B_real)
tensorflow.ones_like
13,033
import tensorflow as tf types = {movielens.USER_COLUMN: rconst.USER_DTYPE, movielens.ITEM_COLUMN: rconst.ITEM_DTYPE} shapes = {movielens.USER_COLUMN: tf.TensorShape([batch_size]), movielens.ITEM_COLUMN: tf.TensorShape([batch_size])}
tensorflow.TensorShape
13,034
import tensorflow as tf cell_bw = tf.contrib.rnn.GRUCell(embed_size // 2) outputs, _ = tf.nn.bidirectional_dynamic_rnn(cell, cell_bw, dec, dtype=tf.float32) outputs = tf.concat(outputs, 2) self.Z_hat = tf.layers.dense(outputs, 1 + fourier_window_size // 2)
tensorflow.concat
13,035
import tensorflow as tf class SetFromFlat(object): def __init__(self, var_list, dtype=tf.float32): assigns = [] shapes = list(map(var_shape, var_list)) total_size = np.sum([intprod(shape) for shape in shapes]) self.theta = theta = tf.placeholder(dtype, [total_size]) start = 0 assigns = [] for (shape, v) in zip(shapes, var_list): size = intprod(shape) assigns.append(tf.assign(v, tf.reshape(theta[start:start + size], shape)))
tensorflow.placeholder
13,036
import tensorflow as tf output: A tensor. target: A tensor with the same shape as `output`. from_logits: Whether `output` is expected to be a logits tensor. By default, we consider that `output` encodes a probability distribution. # Returns A tensor. """ # Note: tf.nn.softmax_cross_entropy_with_logits # expects logits, Keras expects probabilities. if not from_logits: # transform back to logits epsilon = _to_tensor(_EPSILON, output.dtype.base_dtype) output = tf.clip_by_value(output, epsilon, 1 - epsilon) output = tf.log(output / (1 - output)) try: return tf.nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output) except TypeError: return tf.nn.sigmoid_cross_entropy_with_logits(logits=output, labels=target) def sum(x, axis=None, keepdims=False): """Sum of the values in a tensor, alongside the specified axis. Parameters ---------- x: A tensor or variable.
tensorflow.clip_by_value
13,037
import tensorflow as tf logits_shape = common_layers.shape_list(logits) reshaped_logits = ( tf.reshape(logits, [-1, logits_shape[-1]]) / temperature) choices = tf.multinomial(reshaped_logits, 1) choices = tf.reshape(choices, logits_shape[:-1]) return choices
tensorflow.multinomial
13,038
import tensorflow as tf name: The name scope of this layer. Returns: float logits Tensor. """ del num_attention_heads # unused input_shape = get_shape_list(input_tensor) hidden_size = input_shape[2] with tf.variable_scope(name): w = tf.get_variable( name="kernel", shape=[hidden_size, output_size], initializer=initializer) b = tf.get_variable( name="bias", shape=[output_size], initializer=tf.zeros_initializer) ret = tf.einsum("BFH,HO->BFO", input_tensor, w) ret += b if activation is not None: return activation(ret) else: return ret def dot_product_attention(q, k, v, bias, dropout_rate=0.0): """Dot-product attention.
tensorflow.get_variable
13,039
import tensorflow as tf def default_param_noise_filter(var): """ check whether or not a variable is perturbable or not :param var: (TensorFlow Tensor) the variable :return: (bool) can be perturb """ if var not in tf.trainable_variables(): # We never perturb non-trainable vars. return False if "fully_connected" in var.name: # We perturb fully-connected layers. return True # The remaining layers are likely conv or layer norm layers, which we do not wish to # perturb (in the former case because they only extract features, in the latter case because
tensorflow.trainable_variables
13,040
import tensorflow as tf if task_name not in processors: raise ValueError("Task not found: %s" % (task_name)) processor = processors[task_name]() label_list = processor.get_labels() tokenizer = tokenization.FullTokenizer( vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, tpu_config=tf.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores,
tensorflow.contrib.cluster_resolver.TPUClusterResolver
13,041
import tensorflow as tf attn_states, cell, output_size=4, num_heads=2) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 4), res[0].shape) res = sess.run([mem]) self.assertEqual((2, 2), res[0].shape) def testDynamicAttentionDecoder1(self): with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)): cell = tf.nn.rnn_cell.GRUCell(2) inp = tf.constant(0.5, shape=[2, 2, 2]) enc_outputs, enc_state = tf.nn.dynamic_rnn(cell, inp, dtype=tf.float32) attn_states = enc_outputs dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3 dec, mem = tf.nn.seq2seq.attention_decoder( dec_inp, enc_state, attn_states, cell, output_size=4) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res))
tensorflow.constant_initializer
13,042
import tensorflow as tf p = tf.cond(tf.random_uniform((), dtype=tf.float32) < 1e-4, lambda: tf.print('csrt acc ', [pct]), lambda: tf.no_op())
tensorflow.print
13,043
import tensorflow as tf from tqdm import tqdm from functools import partial from sklearn.utils import shuffle from sklearn.metrics import accuracy_score from opt import adam, warmup_cosine, warmup_linear, warmup_constant from datasets import rocstories from analysis import rocstories as rocstories_analysis from text_utils import TextEncoder from utils import encode_dataset, flatten, iter_data, find_trainable_variables, convert_gradient_to_tensor, shape_list, ResultLogger, assign_to_gpu, average_grads, make_path def gelu(x): return 0.5*x*(1+tf.tanh(math.sqrt(2/math.pi)*(x+0.044715*tf.pow(x, 3)))) def swish(x): return x*tf.nn.sigmoid(x) opt_fns = { 'adam':adam, } act_fns = { 'relu':tf.nn.relu, 'swish':swish, 'gelu':gelu
tensorflow.pow
13,044
import tensorflow as tf span_emb_list = [] span_start_emb = tf.gather(context_outputs, span_starts) # [k, emb] span_emb_list.append(span_start_emb) span_end_emb = tf.gather(context_outputs, span_ends) # [k, emb] span_emb_list.append(span_end_emb) span_width = 1 + span_ends - span_starts # [k] if self.config["use_features"]: span_width_index = span_width - 1 # [k] span_width_emb = tf.gather(tf.get_variable("span_width_embeddings", [self.config["max_span_width"], self.config["feature_size"]]), span_width_index) # [k, emb] span_width_emb = tf.nn.dropout(span_width_emb, self.dropout) span_emb_list.append(span_width_emb) if self.config["model_heads"]: span_indices = tf.expand_dims(tf.range(self.config["max_span_width"]), 0) + tf.expand_dims(span_starts, 1) # [k, max_span_width] span_indices = tf.minimum(util.shape(context_outputs, 0) - 1, span_indices) # [k, max_span_width] span_text_emb = tf.gather(head_emb, span_indices) # [k, max_span_width, emb] with tf.variable_scope("head_scores"): self.head_scores = util.projection(context_outputs, 1) # [num_words, 1] span_head_scores = tf.gather(self.head_scores, span_indices) # [k, max_span_width, 1] span_mask = tf.expand_dims(tf.sequence_mask(span_width, self.config["max_span_width"], dtype=tf.float32), 2) # [k, max_span_width, 1]
tensorflow.get_variable
13,045
import tensorflow as tf num_classes: num_classes for training vocab_size: a `int`, vocabular size of the problem num_power_iteration: a `int`, the number of power iteration small_constant_for_finite_diff: a `float`, Small constant for finite difference method perturb_norm_length: a `float`, Norm length of adversarial perturbation to be optimized with validatio Returns: a `float` `scalar`, KL divergence. """ logits = tf.stop_gradient(logits) weights = _end_of_seq_mask(labels, vocab_size) d = _mask_by_length(tf.random_normal(shape=tf.shape(embedded)), length) for _ in range(num_power_iteration): d = _scale_l2(d, small_constant_for_finite_diff) d_logits = logits_from_embedding_fn(embedded + d) kl = _kl_divergence_with_logits(logits, d_logits, weights, num_classes) d, = tf.gradients(kl, d, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N) d = tf.stop_gradient(d) perturb = _scale_l2(_mask_by_length(d, length), perturb_norm_length) vadv_logits = logits_from_embedding_fn(embedded + perturb) return _kl_divergence_with_logits(logits, vadv_logits, weights, num_classes)
tensorflow.shape
13,046
import tensorflow as tf def test_inputs_Distances_to_centers(self): inputs = tf.random.uniform( [100, 8], minval=-10, maxval=10.0, dtype=tf.float32) centers = tf.random.uniform( [5, 8], minval=-10, maxval=10.0, dtype=tf.float32) distances1 = isu.inputs_distances_to_centers(inputs, centers) num_centers = tf.shape(centers)[0] inputs_reshaped = tf.tile(tf.expand_dims(inputs, axis=1), tf.stack([1, num_centers, 1])) distances2 = tf.reduce_sum(tf.square(inputs_reshaped - centers), axis=2) self.assertAllClose(distances1.numpy(), distances2.numpy(), atol=0.001) def test_pairwise_iou_matrix(self): mask0 = tf.constant([[1, 0], [0, 1]], dtype=tf.float32) mask1 = tf.constant([[1, 1], [0, 1]], dtype=tf.float32)
tensorflow.stack
13,047
import tensorflow as tf # Convert the covariance_matrix up to a scale_tril and call MVNTriL. with tf.name_scope(name) as name: with tf.name_scope("init", values=[loc, covariance_matrix]): dtype = dtype_util.common_dtype([loc, covariance_matrix], tf.float32) loc = loc if loc is None else tf.convert_to_tensor( loc, name="loc", dtype=dtype) if covariance_matrix is None: scale_tril = None else: covariance_matrix = tf.convert_to_tensor( covariance_matrix, name="covariance_matrix", dtype=dtype) if validate_args: covariance_matrix = control_flow_ops.with_dependencies([ tf.assert_near( covariance_matrix, tf.matrix_transpose(covariance_matrix), message="Matrix was not symmetric") ], covariance_matrix)
tensorflow.convert_to_tensor
13,048
import tensorflow as tf output_index = (interpreter.get_output_details()[0]["index"]) result = interpreter.get_tensor(output_index) # Reset all variables so it will not pollute other inferences. interpreter.reset_all_variables() return result def testStaticRnnMultiRnnCell(self): sess = tf.compat.v1.Session(config=CONFIG) x, prediction, output_class = self.buildModel( self.buildLstmLayer(), is_dynamic_rnn=False) self.trainModel(x, prediction, output_class, sess) saver = tf.train.Saver() x, prediction, output_class, new_sess = self.saveAndRestoreModel( self.buildLstmLayer(), sess, saver, is_dynamic_rnn=False) test_inputs, expected_output = self.getInferenceResult( x, output_class, new_sess) # Test Toco-converted model. result = self.tfliteInvoke(new_sess, test_inputs, x, output_class, False) self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2)) @test_util.enable_control_flow_v2 def testDynamicRnnMultiRnnCell(self):
tensorflow.train.Saver
13,049
import tensorflow as tf phase_train: boolean tf.Variable, true indicates training phase scope: string, variable scope affn: whether to affn-transform outputs Return: normed: batch-normalized maps Ref: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow/33950177 """ name = 'batch_norm' with tf.variable_scope(name): phase_train = tf.convert_to_tensor(phase_train, dtype=tf.bool) n_out = int(x.get_shape()[3]) beta = tf.Variable(tf.constant(0.0, shape=[n_out], dtype=x.dtype), name=name+'/beta', trainable=True, dtype=x.dtype) gamma = tf.Variable(tf.constant(1.0, shape=[n_out], dtype=x.dtype), name=name+'/gamma', trainable=True, dtype=x.dtype) batch_mean, batch_var = tf.nn.moments(x, [0,1,2], name='moments') ema = tf.train.ExponentialMovingAverage(decay=0.9) def mean_var_with_update(): ema_apply_op = ema.apply([batch_mean, batch_var]) with tf.control_dependencies([ema_apply_op]): return tf.identity(batch_mean), tf.identity(batch_var)
tensorflow.constant
13,050
import tensorflow as tf trainable=trainable) # Maybe create label_priors. label_priors = maybe_create_label_priors(label_priors, labels, weights, variables_collections) label_priors = tf.reshape(label_priors, [1, num_labels, 1]) # Expand logits, labels, and weights to shape [batch_size, num_labels, 1]. logits = tf.expand_dims(logits, 2) labels = tf.expand_dims(labels, 2) weights = tf.expand_dims(weights, 2) # Calculate weighted loss and other outputs. The log(2.0) term corrects for # logloss not being an upper bound on the indicator function. loss = weights * losses_utils.weighted_surrogate_loss( labels,
tensorflow.expand_dims
13,051
import tensorflow as tf return loss # randrom horizon def contra_traj_lossV3(pred, tgt, horizon=12): # Step-wise contrastive loss horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon) # pred1, pred2 = tf.split(horizon_pred, 2, axis=0) # tgt1, tgt2 = tf.split(horizon_tgt, 2, axis=0) even = [2 * i for i in range(25)] odd = [2 * i + 1 for i in range(25)] pred1 = tf.gather(horizon_pred, even) pred2 = tf.gather(horizon_pred, odd) tgt1 = tf.gather(horizon_tgt, even) tgt2 = tf.gather(horizon_tgt, odd) geq = tf.cast((tgt1 - tgt2) > 0, tf.bool) tgt_larg = tf.where(geq, tgt1, tgt2) tgt_small = tf.where(geq, tgt2, tgt1) pred_larg = tf.where(geq, pred1, pred2) pred_small = tf.where(geq, pred2, pred1) loss = tf.maximum(0.0, ((tgt_larg - tgt_small) - (pred_larg - pred_small))) loss = tf.reduce_mean(loss) return loss
tensorflow.gather
13,052
import tensorflow as tf res = sess.run([mem]) self.assertEqual((2, 2), res[0].shape) def testBasicRNNSeq2Seq(self): with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)): inp = [tf.constant(0.5, shape=[2, 2])] * 2 dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3 cell = tf.nn.rnn_cell.OutputProjectionWrapper( tf.nn.rnn_cell.GRUCell(2), 4) dec, mem = tf.nn.seq2seq.basic_rnn_seq2seq(inp, dec_inp, cell) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 4), res[0].shape) res = sess.run([mem]) self.assertEqual((2, 2), res[0].shape) def testTiedRNNSeq2Seq(self): with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
tensorflow.global_variables_initializer
13,053
from tensorflow.python.ops import variable_scope encoder_features = tf.reshape(encoder_features, [batch_size, passage_len, options.attention_vec_size]) return encoder_features def decode_mode(self, word_vocab, beam_size, state_t_1, context_t_1, coverage_t_1, word_t, encoder_states, encoder_features, passage_word_idx, passage_mask): options = self.options with variable_scope.variable_scope("attention_decoder"): v = variable_scope.get_variable("v", [options.attention_vec_size]) v = tf.expand_dims(tf.expand_dims(v, axis=0), axis=0) w_c = None if options.use_coverage: with variable_scope.variable_scope("coverage"): w_c = variable_scope.get_variable("w_c", [options.attention_vec_size])
tensorflow.python.ops.variable_scope.variable_scope
13,054
from tensorflow.python.framework import ops mean, tf.check_numerics(decay * (mean - cur_mean), "NaN in moving mean.")) with tf.name_scope(name, "AssignMovingAvg", [var, cur_var, decay]): with ops.colocate_with(var): new_var = tf.assign_sub( var, tf.check_numerics(decay * (var - cur_var), "NaN in moving variance.")) with tf.name_scope(name, "IncrementTime", [step]): with ops.colocate_with(step): new_step = tf.assign_add(step, 1.) res += 0. * new_mean * new_var * new_step return res # batch normalization taking into account the volume transformation def batch_norm_log_diff(input_,
tensorflow.python.framework.ops.colocate_with
13,055
import tensorflow as tf # Host call fns are executed FLAGS.iterations_per_loop times after one # TPU loop is finished, setting max_queue value to the same as number of # iterations will make the summary writer only flush the data to storage # once per loop. with (tf.contrib.summary.create_file_writer( params['model_dir'], max_queue=params['iterations_per_loop']).as_default()): with tf.contrib.summary.always_record_summaries(): tf.contrib.summary.scalar( 'total_loss', tf.reduce_mean(total_loss), step=global_step) tf.contrib.summary.scalar( 'total_rpn_loss', tf.reduce_mean(total_rpn_loss), step=global_step) tf.contrib.summary.scalar( 'rpn_score_loss', tf.reduce_mean(rpn_score_loss), step=global_step) tf.contrib.summary.scalar( 'rpn_box_loss', tf.reduce_mean(rpn_box_loss), step=global_step)
tensorflow.reduce_mean
13,056
import tensorflow as tf is_training: boolean tf.Varialbe, true indicates training phase scope: string, variable scope moments_dims: a list of ints, indicating dimensions for moments calculation bn_decay: float or float tensor variable, controling moving average weight Return: normed: batch-normalized maps """ with tf.variable_scope(scope) as sc: num_channels = inputs.get_shape()[-1].value beta = tf.Variable(tf.constant(0.0, shape=[num_channels]), name='beta', trainable=True) gamma = tf.Variable(tf.constant(1.0, shape=[num_channels]), name='gamma', trainable=True) batch_mean, batch_var = tf.nn.moments(inputs, moments_dims, name='moments') decay = bn_decay if bn_decay is not None else 0.9 ema = tf.train.ExponentialMovingAverage(decay=decay) # Operator that maintains moving averages of variables. ema_apply_op = tf.cond(is_training, lambda: ema.apply([batch_mean, batch_var]), lambda: tf.no_op()) # Update moving average and return current batch's avg and var. def mean_var_with_update(): with tf.control_dependencies([ema_apply_op]): return tf.identity(batch_mean), tf.identity(batch_var)
tensorflow.nn.moments
13,057
import tensorflow as tf # compute the error (potentially clipped) td_error = q_t_selected - tf.stop_gradient(q_t_selected_target)
tensorflow.stop_gradient
13,058
from tensorflow.contrib.eager.python.examples.revnet import config as config_ make_iterator, device_and_format, defun=False, execution_mode=None): config = config_.get_hparams_imagenet_56() with tfe.execution_mode(execution_mode): device, data_format = device_and_format for batch_size in self._train_batch_sizes():
tensorflow.contrib.eager.python.examples.revnet.config.get_hparams_imagenet_56
13,059
import tensorflow as tf self.logits1, self.logits2 = [l for l in self.logits] outer = tf.matmul(tf.expand_dims(tf.nn.softmax(self.logits1), axis=2), tf.expand_dims(tf.nn.softmax(self.logits2), axis=1)) outer = tf.matrix_band_part(outer, 0, self.max_a_len) self.yp1 = tf.argmax(tf.reduce_max(outer, axis=2), axis=1) self.yp2 = tf.argmax(tf.reduce_max(outer, axis=1), axis=1) def _compute_loss(self): def focal_loss(logits, labels, weights=None, alpha=0.25, gamma=2): logits = tf.nn.sigmoid(logits) zeros = array_ops.zeros_like(logits, dtype=logits.dtype) pos_p_sub = array_ops.where(labels > zeros, labels - logits, zeros) neg_p_sub = array_ops.where(labels > zeros, zeros, logits) cross_ent = - alpha * (pos_p_sub ** gamma) * tf.log(tf.clip_by_value(logits, 1e-8, 1.0)) \ - (1 - alpha) * (neg_p_sub ** gamma) * tf.log(tf.clip_by_value(1.0 - logits, 1e-8, 1.0)) return tf.reduce_sum(cross_ent, 1) start_label = tf.one_hot(self.start_label, tf.shape(self.logits1)[1], axis=1) end_label = tf.one_hot(self.end_label, tf.shape(self.logits2)[1], axis=1)
tensorflow.nn.sigmoid
13,060
import tensorflow as tf adj_orig.eliminate_zeros() adj_train, train_edges, val_edges, val_edges_false, test_edges, test_edges_false = mask_test_edges(adj) adj = adj_train if FLAGS.features == 0: features = sp.identity(features.shape[0]) # featureless # Some preprocessing adj_norm = preprocess_graph(adj) # Define placeholders placeholders = { 'features': tf.sparse_placeholder(tf.float32), 'adj': tf.sparse_placeholder(tf.float32), 'adj_orig': tf.sparse_placeholder(tf.float32), 'dropout': tf.placeholder_with_default(0., shape=()) } num_nodes = adj.shape[0] features = sparse_to_tuple(features.tocoo()) num_features = features[2][1] features_nonzero = features[1].shape[0] # Create model model = None if model_str == 'gcn_ae': model = GCNModelAE(placeholders, num_features, features_nonzero) elif model_str == 'gcn_vae':
tensorflow.sparse_placeholder
13,061
import tensorflow as tf res = sess.run([mem]) self.assertEqual(1, len(res)) self.assertEqual((2, 2), res[0].c.shape) self.assertEqual((2, 2), res[0].h.shape) def testEmbeddingRNNSeq2Seq(self): with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)): enc_inp = [tf.constant(1, tf.int32, shape=[2]) for i in range(2)] dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)] cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True) dec, mem = tf.nn.seq2seq.embedding_rnn_seq2seq( enc_inp, dec_inp, cell, num_encoder_symbols=2, num_decoder_symbols=5, embedding_size=2) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 5), res[0].shape) res = sess.run([mem]) self.assertEqual((2, 2), res[0].c.shape) self.assertEqual((2, 2), res[0].h.shape) # Test with state_is_tuple=False. with tf.variable_scope("no_tuple"): cell1 = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False) dec, mem = tf.nn.seq2seq.embedding_rnn_seq2seq( enc_inp, dec_inp, cell1, num_encoder_symbols=2, num_decoder_symbols=5, embedding_size=2)
tensorflow.global_variables_initializer
13,062
import tensorflow as tf name='x') self.y = tf.placeholder(tf.float32, [batch_size, max_sequence_len, out_vocab_size], name='y') # The bidirectional rnn code requires seq_lens as int64 self.seq_lens = tf.placeholder(tf.int64, [batch_size], name='seq_lens') self.example_weights = tf.placeholder(tf.float32, [batch_size], name='example_weights') embeddings = c2v.GetEmbeddings(self.x) self._inputs = [tf.squeeze(input_, [1]) for input_ in tf.split(1, max_sequence_len, embeddings)] # Need to prepare a mask to zero out the padding symbols. # Make a batch_size x max_sequence_len matrix where each # row contains the length repeated max_sequence_len times. lengths_transposed = tf.expand_dims(tf.to_int32(self.seq_lens), 1) lengths_tiled = tf.tile(lengths_transposed, [1, max_sequence_len]) # Make a matrix where each row contains [0, 1, ..., max_sequence_len] r = tf.range(0, max_sequence_len, 1)
tensorflow.split
13,063
import tensorflow as tf blk_indices_crop = tf.strided_slice(blk_indices, [0, 0, 0, 0], [ blk_shape[0], q_shape[1] * strides[1], q_shape[2] * strides[2], 3 ], strides) blk_indices_crop = blk_indices_crop // tf.stack([1, strides[1], strides[2]]) return blk_indices_crop
tensorflow.stack
13,064
import tensorflow as tf return (input_var - mean) / tf.sqrt(var+self.epsilon) class BatchNorm(object): def __init__(self,name,dims,axis=1,epsilon=1e-3,momentum=0.999,center=True,scale=True) : self.momentum = momentum self.epsilon = epsilon self.axis = axis self.center=center self.scale=scale with tf.variable_scope(name) as scope: with tf.variable_scope('bn') : self.gamma= tf.get_variable('gamma',[dims], initializer=tf.constant_initializer(1.0)) self.beta = tf.get_variable('beta',[dims], initializer=tf.constant_initializer(0.0)) self.moving_mean = tf.get_variable('moving_mean',[dims], initializer=tf.constant_initializer(0.0), trainable=False) self.moving_variance = tf.get_variable('moving_variance',[dims], initializer=tf.constant_initializer(1.0), trainable=False) self.scope = scope def __call__(self,input_var,is_training,**xargs) : with tf.variable_scope(self.scope) : return tf.layers.batch_normalization( input_var, axis=self.axis, momentum=self.momentum, epsilon=self.epsilon,
tensorflow.constant_initializer
13,065
import tensorflow as tf X = tf.add_n([X1, X2]) blocks.append(X) (X, comb_ch) = self._combine_cell_blocks(cell_inputs, blocks, cell_arch, w, h, block_ch, is_train) X = tf.reshape(X, (-1, w, h, comb_ch)) # Sanity shape check layers.append((X, w, h, comb_ch)) def _combine_cell_blocks(self, cell_inputs, blocks, cell_arch, w, h, block_ch, is_train=False): # Count usage of inputs input_use_counts = [0] * len(cell_inputs + blocks)
tensorflow.reshape
13,066
import tensorflow as tf train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size, predict_batch_size=FLAGS.predict_batch_size) if FLAGS.do_train: train_file = os.path.join(FLAGS.output_dir, "train.tf_record") file_based_convert_examples_to_features( train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file) tf.logging.info("***** Running training *****") tf.logging.info(" Num examples = %d", len(train_examples)) tf.logging.info(" Batch size = %d", FLAGS.train_batch_size) tf.logging.info(" Num steps = %d", num_train_steps) train_input_fn = file_based_input_fn_builder( input_file=train_file, seq_length=FLAGS.max_seq_length, is_training=True, drop_remainder=True) estimator.train(input_fn=train_input_fn, max_steps=num_train_steps) if FLAGS.do_eval: eval_examples = processor.get_dev_examples(FLAGS.data_dir) num_actual_eval_examples = len(eval_examples)
tensorflow.logging.info
13,067
import tensorflow as tf if self.demo: self.c = tf.placeholder(tf.int32, [None, self.config.max_p_len], "context") self.q = tf.placeholder(tf.int32, [None, self.config.max_q_len], "question") self.ch = tf.placeholder(tf.int32, [None, self.config.max_p_len, self.config.max_ch_len], "context_char") self.qh = tf.placeholder(tf.int32, [None, self.config.max_q_len, self.config.max_ch_len], "question_char") self.start_label = tf.placeholder(tf.int32, [None], "answer_label1")
tensorflow.placeholder
13,068
import tensorflow as tf """ This function will be responsible for output all outputs of all layers and dump them in a pickle :return: """ print("Debugging mode will begin NOW..") layers = tf.get_collection('debug_layers') print("ALL Layers in the collection that i wanna to run {} layer".format(len(layers))) for layer in layers: print(layer) # exit(0)
tensorflow.get_collection
13,069
import tensorflow as tf # Gradients and SGD update operation for training the model. self.loss = self.exam_loss + self.hparams.ranker_loss_weight * self.rank_loss # Select optimizer self.optimizer_func = tf.train.AdagradOptimizer if self.hparams.grad_strategy == 'sgd': self.optimizer_func = tf.train.GradientDescentOptimizer self.separate_gradient_update() tf.summary.scalar('Gradient Norm', self.norm, collections=['train']) tf.summary.scalar('Learning Rate', self.ranker_learning_rate, collections=['train']) tf.summary.scalar('Final Loss', tf.reduce_mean(self.loss), collections=['train']) clipped_labels = tf.clip_by_value(reshaped_train_labels, clip_value_min=0, clip_value_max=1) pad_removed_train_output = self.remove_padding_for_metric_eval(self.docid_inputs, train_output) for metric in self.exp_settings['metrics']: for topn in self.exp_settings['metrics_topn']: list_weights = tf.reduce_mean(self.propensity_weights * clipped_labels, axis=1, keep_dims=True) metric_value = utils.make_ranking_metric_fn(metric, topn)(reshaped_train_labels, pad_removed_train_output, None)
tensorflow.summary.scalar
13,070
import tensorflow as tf tf.argmax(y_1, axis=-1)), tf.float32), name="accuracy_1") accuracy_2 = tf.reduce_mean(tf.cast(tf.equal( tf.argmax(output_2, axis=-1), tf.argmax(y_2, axis=-1)), tf.float32), name="accuracy_2") accuracy = tf.divide(accuracy_1 + accuracy_2, 2.0, name="accuracy") with tf.variable_scope("train"): global_step = tf.get_variable("global_step", shape=(), dtype=tf.int32, trainable=False) train_op = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss_total, global_step=global_step) with tf.variable_scope("summary"): summary_loss_total = tf.summary.scalar("loss_total", loss_total) summary_accuracy_test = tf.summary.scalar("accuracy_test", accuracy) summary_accuracy_train = tf.summary.scalar("accuracy_train", accuracy) # standardization train_X_reshaped = train_X.reshape([train_X.shape[0], -1]) train_X_means = np.mean(train_X_reshaped, axis=0, keepdims=True) train_X_stds = np.std(train_X_reshaped, axis=0, keepdims=True) def standardization(x): x_reshaped = x.reshape([x.shape[0], -1]) result = (x_reshaped - train_X_means) / (train_X_stds + 1e-9) return result.reshape(x.shape)
tensorflow.summary.scalar
13,071
from tensorflow.python.framework import ops ops.RegisterShape("Inv")(common_shapes.unchanged_shape) ops.RegisterShape("IsFinite")(common_shapes.unchanged_shape) ops.RegisterShape("IsInf")(common_shapes.unchanged_shape) ops.RegisterShape("IsNan")(common_shapes.unchanged_shape) ops.RegisterShape("Log")(common_shapes.unchanged_shape) ops.RegisterShape("LogicalNot")(common_shapes.unchanged_shape) ops.RegisterShape("Neg")(common_shapes.unchanged_shape) ops.RegisterShape("Real")(common_shapes.unchanged_shape) ops.RegisterShape("Rsqrt")(common_shapes.unchanged_shape) ops.RegisterShape("Sign")(common_shapes.unchanged_shape) ops.RegisterShape("Sin")(common_shapes.unchanged_shape) ops.RegisterShape("Sqrt")(common_shapes.unchanged_shape) ops.RegisterShape("Square")(common_shapes.unchanged_shape) ops.RegisterShape("Sigmoid")(common_shapes.unchanged_shape) ops.RegisterShape("Tanh")(common_shapes.unchanged_shape) ops.RegisterShape("Cast")(common_shapes.unchanged_shape) ops.RegisterShape("ComplexAbs")(common_shapes.unchanged_shape)
tensorflow.python.framework.ops.RegisterShape
13,072
import tensorflow as tf print("-------X Y----------") print(X) X = tf.reshape(X, shape=[-1, 32, 36]) print(X) print(Y) Y = tf.reshape(Y, shape=[-1, 6]) print(Y) # Weight Initialization def weight_variable(shape): # tra ve 1 gia tri random theo thuat toan truncated_ normal initial = tf.truncated_normal(shape, mean=0.0, stddev=0.1, dtype=tf.float32) return tf.Variable(initial) def bias_varibale(shape): initial = tf.constant(0.1, shape=shape, name='Bias') return tf.Variable(initial) # Convolution and Pooling def conv2d(x, W): # Must have `strides[0] = strides[3] = 1 `. # For the most common case of the same horizontal and vertices strides, `strides = [1, stride, stride, 1] `. return tf.nn.conv2d(input=x, filter=W, strides=[1, 1, 1, 1], padding='SAME', name='conv_2d') def max_pool_2x2(x):
tensorflow.Variable
13,073
import tensorflow as tf rep_tensor_split = tf.reshape(rep_tensor_comp, [bs, block_num, block_len, input_dim]) # bs,bn,bl,d rep_mask_split = tf.reshape(rep_mask_comp, [bs, block_num, block_len]) # bs,bn,bl # non-linear rep_map = bn_dense_layer(rep_tensor_split, ivec, True, 0., 'bn_dense_map', activation, False, wd, keep_prob, is_train) # bs,bn,bl,vec rep_map_tile = tf.tile(tf.expand_dims(rep_map, 2), [1, 1, block_len, 1, 1]) # bs,bn,bl,bl,vec # rep_map_dp = dropout(rep_map, keep_prob, is_train) bn = block_num bl = block_len with tf.variable_scope('self_attention'): # @2.self-attention in block # mask generation sl_indices = tf.range(block_len, dtype=tf.int32) sl_col, sl_row = tf.meshgrid(sl_indices, sl_indices) if direction == 'forward': direct_mask = tf.greater(sl_row, sl_col) # bl,bl else: direct_mask = tf.greater(sl_col, sl_row) # bl,bl direct_mask_tile = tf.tile( tf.expand_dims(tf.expand_dims(direct_mask, 0), 0), [bs, bn, 1, 1]) # bs,bn,bl,bl rep_mask_tile_1 = tf.tile(tf.expand_dims(rep_mask_split, 2), [1, 1, bl, 1]) # bs,bn,bl,bl rep_mask_tile_2 = tf.tile(tf.expand_dims(rep_mask_split, 3), [1, 1, 1, bl]) # bs,bn,bl,bl rep_mask_tile = tf.logical_and(rep_mask_tile_1, rep_mask_tile_2) attn_mask = tf.logical_and(direct_mask_tile, rep_mask_tile, name='attn_mask') # bs,bn,bl,bl # attention
tensorflow.range
13,074
import tensorflow as tf "input_mask": tf.FixedLenFeature([seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
tensorflow.FixedLenFeature
13,075
import tensorflow as tf for token_preprocess_fn in token_preprocess_fns: dataset = token_preprocess_fn(dataset, training) if debug_print_examples: def print_examples_and_shapes(x): if np.random.uniform() < debug_print_examples_rate: tf.print( { 'inputs_shape': tf.size(x['inputs']), 'targets_shape': tf.size(x['targets']), 'inputs': x['inputs'], 'targets': x['targets'], }, output_stream=logging.info) return x dataset = dataset.map(print_examples_and_shapes)
tensorflow.size
13,076
import tensorflow as tf masks = tf.constant(1.0, shape=[0, 2, 2], dtype=tf.float32) scores = tf.constant([], dtype=tf.float32) classes = tf.constant([], dtype=tf.int32) (nms_masks1, nms_scores1, nms_classes1, _) = isu.instance_non_maximum_suppression_1d_scores( masks, scores, classes, min_score_thresh=0.65, min_iou_thresh=0.5, is_class_agnostic=True) nms_masks_expected1 = tf.constant(1.0, shape=[0, 2, 2], dtype=tf.float32) nms_scores_expected1 = tf.constant([], dtype=tf.float32) nms_classes_expected1 = tf.constant([], dtype=tf.int32) (nms_masks2, nms_scores2, nms_classes2, _) = isu.instance_non_maximum_suppression_1d_scores( masks, scores, classes, min_score_thresh=0.65, min_iou_thresh=0.5, is_class_agnostic=False) nms_masks_expected2 = tf.constant(1.0, shape=[0, 2, 2], dtype=tf.float32) nms_scores_expected2 = tf.constant([], dtype=tf.float32) nms_classes_expected2 = tf.constant([], dtype=tf.int32) self.assertAllEqual(nms_masks1.numpy(), nms_masks_expected1.numpy())
tensorflow.constant
13,077
import tensorflow as tf rightmost_transposed_ndims = tf.identity(rightmost_transposed_ndims) perm = tf.range( start=rightmost_transposed_ndims - 1, limit=-1, delta=-1, name='perm') else: # perm is not None: perm = tf.convert_to_tensor(value=perm, dtype=np.int32, name='perm') rightmost_transposed_ndims = tf.size( input=perm, name='rightmost_transposed_ndims') rightmost_transposed_ndims_ = tf.get_static_value( rightmost_transposed_ndims) with tf.control_dependencies(_maybe_validate_perm(perm, validate_args)): perm = tf.identity(perm) # TODO(b/110828604): If bijector base class ever supports dynamic # `min_event_ndims`, then this class already works dynamically and the # following five lines can be removed. if rightmost_transposed_ndims_ is None: raise NotImplementedError('`rightmost_transposed_ndims` must be ' 'known prior to graph execution.') else: rightmost_transposed_ndims_ = int(rightmost_transposed_ndims_) self._perm = perm self._rightmost_transposed_ndims = rightmost_transposed_ndims
tensorflow.identity
13,078
import tensorflow as tf tf.FixedLenFeature([max_seq_length], tf.int64), "input_mask": tf.FixedLenFeature([max_seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([max_seq_length], tf.int64), "masked_lm_positions": tf.FixedLenFeature([max_predictions_per_seq], tf.int64), "masked_lm_ids": tf.FixedLenFeature([max_predictions_per_seq], tf.int64), "masked_lm_weights": tf.FixedLenFeature([max_predictions_per_seq], tf.float32), "next_sentence_labels":
tensorflow.FixedLenFeature
13,079
import tensorflow as tf def prediction_incorrect(logits, label, topk=1, name='incorrect_vector'): with tf.name_scope('prediction_incorrect'): x = tf.logical_not(tf.nn.in_top_k(logits, label, topk)) return tf.cast(x, tf.float32, name=name) wrong = prediction_incorrect(logits, label, 1, name='wrong-top1') add_moving_summary(tf.reduce_mean(wrong, name='train-error-top1')) wrong = prediction_incorrect(logits, label, 5, name='wrong-top5') add_moving_summary(tf.reduce_mean(wrong, name='train-error-top5')) return loss if __name__ == '__main__': import argparse from tensorpack.dataflow import TestDataSpeed from tensorpack.tfutils import get_default_sess_config parser = argparse.ArgumentParser()
tensorflow.reduce_mean
13,080
import tensorflow as tf if hparams.full_latent_tower: for i in range(hparams.num_compress_steps): with tf.variable_scope("latent_downstride%d" % i): x = common_layers.make_even_size(x) if i < hparams.filter_double_steps:
tensorflow.variable_scope
13,081
import tensorflow as tf lstm_cell_bw = tf.contrib.rnn.TimeReversedFusedRNN(lstm_cell_bw) outputs_bw, (hidden_bw, output_bw) = lstm_cell_bw(t, dtype=tf.float32, sequence_length=nwords) outputs = tf.concat([outputs_fw, outputs_bw], axis=-1) hidden = tf.concat([hidden_fw, hidden_bw], axis=-1)
tensorflow.concat
13,082
import tensorflow as tf num_collisions = 0 prev_intersection = 0 sdf_values = tf.zeros_like(samples_world)[:, 0:1] for classes, sdfs, poses in [(predicted_classes, predicted_sdfs, predicted_poses)]: for i in range(classes.shape[0]): sdf = tf.expand_dims(sdfs[i], -1) sdf = sdf * -1.0 # inside positive, outside zero samples_object = centernet_utils.transform_pointcloud( tf.reshape(samples_world, [1, 1, -1, 3]), tf.reshape(poses[2][i], [1, 1, 3]), tf.reshape(poses[0][i], [1, 1, 3, 3]), tf.reshape(poses[1][i], [1, 1, 3]), inverse=True) * 2.0 samples_object = (samples_object * (29.0/32.0) / 2.0 + 0.5) * 32.0 - 0.5 samples = tf.squeeze(samples_object) interpolated = trilinear.interpolate(sdf, samples) occupancy_value = tf.math.sign(tf.nn.relu(interpolated + self.tol)) sdf_values += occupancy_value intersection = tf.reduce_sum(tf.math.sign(tf.nn.relu(sdf_values - 1)))
tensorflow.reshape
13,083
import tensorflow as tf logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) labels = tf.reshape(labels, [-1]) one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, log_probs)
tensorflow.reduce_sum
13,084
from tensorflow.python.framework import ops x = ops.convert_to_tensor(x, name="x") weights = ops.convert_to_tensor(weights, name="weights")
tensorflow.python.framework.ops.convert_to_tensor
13,085
import tensorflow as tf # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.to_int32(t) example[name] = t return example def main(_): tf.logging.set_verbosity(tf.logging.INFO) if not FLAGS.do_train and not FLAGS.do_eval: raise ValueError("At least one of `do_train` or `do_eval` must be True.") bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) tf.gfile.MakeDirs(FLAGS.output_dir) input_files = [] for input_pattern in FLAGS.input_file.split(","): input_files.extend(tf.gfile.Glob(input_pattern))
tensorflow.logging.set_verbosity
13,086
import tensorflow as tf input_fn=imagenet_eval.input_fn, steps=eval_steps, hooks=eval_hooks, checkpoint_path=checkpoint) tf.logging.info('Evaluation results: %s' % eval_results) except tf.errors.NotFoundError: # skip checkpoint if it gets deleted prior to evaluation tf.logging.info('Checkpoint %s no longer exists ... skipping') elif mode == 'train_and_eval': current_step = _load_global_step_from_checkpoint_dir(model_dir) tf.logging.info('Starting training at step=%d.' % current_step) train_steps_per_eval = int( hparams.num_epochs_per_eval * train_steps_per_epoch) # Final Evaluation if training is finished.
tensorflow.logging.info
13,087
from tensorflow.python.framework import tensor_shape # We will broadcast dim_x to dim_y. return_dims.append(dim_y) elif dim_y.value == 1: # We will broadcast dim_y to dim_x. return_dims.append(dim_x) elif dim_x.value == dim_y.value: # The dimensions are compatible, so output is the same size in that # dimension. return_dims.append(dim_x.merge_with(dim_y)) else: raise ValueError("Incompatible shapes for broadcasting: %s and %s" % (shape_x, shape_y)) return [tensor_shape.TensorShape(return_dims)] @ops.RegisterShape("AddN") def _AddNShape(op): merged_shape = tensor_shape.unknown_shape() for input_ in op.inputs: merged_shape = merged_shape.merge_with(input_.get_shape()) return [merged_shape] @ops.RegisterShape("Select")
tensorflow.python.framework.tensor_shape.TensorShape
13,088
import tensorflow as tf Returns: A `TensorTrain`. Raises: ValueError if the provided TT-cores are not valid or inconsistent with the provided shape. """ tt_cores = list(tt_cores) if convert_to_tensors: with tf.name_scope(name): for i in range(len(tt_cores)): name = "core%d" % i tt_cores[i] = tf.convert_to_tensor(tt_cores[i], name=name) if not _are_tt_cores_valid(tt_cores, shape, tt_ranks): raise ValueError('The tt_cores provided to TensorTrain constructor are ' 'not valid, have different dtypes, or are inconsistent ' 'with the provided shape or TT-ranks.')
tensorflow.name_scope
13,089
import tensorflow as tf post_init_op_group = None local_var_init_op = tf.local_variables_initializer() summary_op = tf.summary.merge_all() is_chief = (not self.job_name or self.task_index == 0) summary_writer = None
tensorflow.summary.merge_all
13,090
import tensorflow as tf # Traditional U-Net def build_Unet_Arch(self, input_data, name="Unet_Arch"): self.base_number_of_features = 32 with tf.variable_scope(name): # Encoder definition o_c1 = self.general_conv2d(input_data, self.base_number_of_features, 3, stride = 1, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_conv2d_1') o_mp1 = tf.layers.max_pooling2d(o_c1, 2, 2, name = name + '_maxpooling_1') o_c2 = self.general_conv2d(o_mp1, self.base_number_of_features * 2, 3, stride = 1, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_conv2d_2') o_mp2 = tf.layers.max_pooling2d(o_c2, 2, 2, name = name + '_maxpooling_2') o_c3 = self.general_conv2d(o_mp2, self.base_number_of_features * 4, 3, stride = 1, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_conv2d_3') o_mp3 = tf.layers.max_pooling2d(o_c3, 2, 2, name = name + '_maxpooling_3') o_c4 = self.general_conv2d(o_mp3, self.base_number_of_features * 8, 3, stride = 1, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_conv2d_4') o_mp4 = tf.layers.max_pooling2d(o_c4, 2, 2, name = name + '_maxpooling_4') o_c5 = self.general_conv2d(o_mp4, self.base_number_of_features * 16, 3, stride = 1, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_conv2d_5') # Decoder definition
tensorflow.layers.max_pooling2d
13,091
import tensorflow as tf V = tf.get_variable(name="attn_V", shape=[2 * self.config.hidden_size, 1], initializer=tf.contrib.layers.xavier_initializer(), # initializer=tf.truncated_normal_initializer(), # initializer=tf.keras.initializers.lecun_normal(), dtype=tf.float32) U = tf.get_variable(name="attn_U", shape=[2 * self.config.hidden_size, 2 * self.config.hidden_size], initializer=tf.contrib.layers.xavier_initializer(), # initializer=tf.truncated_normal_initializer(), # initializer=tf.keras.initializers.lecun_normal(), dtype=tf.float32) self.position_emb = tf.reshape(self.position_emb, [-1, 2 * self.config.hidden_size]) shape = tf.shape(output)
tensorflow.contrib.layers.xavier_initializer
13,092
import tensorflow as tf train_dataset_reader = dataset.BatchDatset(train_records, image_options) validation_dataset_reader = dataset.BatchDatset(valid_records, image_options) sess = tf.Session() print("Setting up Saver...")
tensorflow.Session
13,093
import tensorflow as tf image_classifier.train( input_fn=imagenet_train.input_fn, steps=train_steps_per_eval) current_step += train_steps_per_eval tf.logging.info('Starting evaluation at step=%d.' % current_step) eval_results = image_classifier.evaluate( input_fn=imagenet_eval.input_fn, steps=eval_steps, hooks=eval_hooks)
tensorflow.logging.info
13,094
import tensorflow as tf # 定義Actor 新舊的網路模型 with tf.variable_scope(name): c0 = tf.cast(self.tfs, tf.float32) / 255. c1 = tf.nn.relu(self.conv(c0,
tensorflow.cast
13,095
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature if not tensor_signature.tensors_compatible(targets, self._targets_info): raise ValueError('Targets are incompatible with given information. ' 'Given targets: %s, required signatures: %s.' % (str(targets), str(self._targets_info))) else: self._targets_info = tensor_signature.create_signatures(targets) def _train_model(self, input_fn, steps,
tensorflow.contrib.learn.python.learn.estimators.tensor_signature.create_signatures
13,096
import tensorflow as tf flat_offsets = tf.reshape( tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1]) flat_positions = tf.reshape(positions + flat_offsets, [-1])
tensorflow.range
13,097
import tensorflow as tf if use_bias: logits += self.b_soft_no_learn op_id = tf.multinomial(logits, 1) op_id = tf.to_int32(op_id) op_id = tf.reshape(op_id, [1]) arc_seq = arc_seq.write(start_id + 2 * i + 1, op_id) curr_log_prob = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=op_id)
tensorflow.reshape
13,098
import tensorflow as tf if keep_unselected: range_head = tf.tile(tf.expand_dims(tf.range(bs), -1), [1, sl_head]) scatter_attn = tf.cond( tf.equal(sl_head, 0), lambda: tf.zeros([bs, sl+1, hn], tf.float32), lambda: tf.scatter_nd(
tensorflow.equal
13,099