seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf m_init, v_init = tf.nn.moments(x, [0]) scale_init = init_scale / tf.sqrt(v_init + 1e-10)
tensorflow.sqrt
10,400
import tensorflow as tf sess.run(tf.global_variables_initializer()) output = sess.run(predictions) self.assertEqual(output.shape, (batch_size,)) def testTrainEvalWithReuse(self): train_batch_size = 5 eval_batch_size = 2 height, width = 150, 150 num_classes = 1000 train_inputs = tf.random_uniform((train_batch_size, height, width, 3)) mobilenet_v1.mobilenet_v1(train_inputs, num_classes) eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3)) logits, _ = mobilenet_v1.mobilenet_v1(eval_inputs, num_classes, reuse=True) predictions = tf.argmax(logits, 1) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(predictions) self.assertEqual(output.shape, (eval_batch_size,)) def testLogitsNotSqueezed(self): num_classes = 25
tensorflow.random_uniform
10,401
import tensorflow as tf # image_size, # num_channels # ) variance = tf.reduce_mean( input_tensor=tf.square(x=centered_image), axis=0, keepdims=True, name="variance"
tensorflow.square
10,402
import tensorflow as tf # shape is statically known. self.assertEqual(2, a.shape[0].value) assertions_triggered[0] += 1 return a f0(tf.constant([1])) f1(tf.constant([1])) f2(tf.constant([1])) self.assertEqual(3, assertions_triggered[0]) def test_out_of_order_execution1(self): with self.test_session() as session:
tensorflow.constant
10,403
import tensorflow as tf outputs = tf.nn.conv3d(inputs, kernel, [1, stride_d, stride_h, stride_w, 1], padding=padding) biases = _variable_on_cpu('biases', [num_output_channels], tf.constant_initializer(0.0)) outputs = tf.nn.bias_add(outputs, biases) if bn: outputs = batch_norm_for_conv3d(outputs, is_training, bn_decay=bn_decay, scope='bn')
tensorflow.nn.bias_add
10,404
from tensorflow.python.framework import ops self._lr_t = None self._beta1_t = None self._beta2_t = None def _prepare(self): self._lr_t = ops.convert_to_tensor(self._lr, name="learning_rate") self._beta1_t = ops.convert_to_tensor(self._beta1, name="beta1") self._beta2_t = ops.convert_to_tensor(self._beta2, name="beta2") def _create_slots(self, var_list): # Create slots for the first and second moments. for v in var_list:
tensorflow.python.framework.ops.convert_to_tensor
10,405
import tensorflow as tf tf.summary.scalar("%s_length" % k, tf.shape(v)[1]) nonpadding = tf.to_float(tf.not_equal(v, 0)) nonpadding_tokens = tf.reduce_sum(nonpadding) tf.summary.scalar("%s_nonpadding_tokens" % k, nonpadding_tokens) tf.summary.scalar("%s_nonpadding_fraction" % k, tf.reduce_mean(nonpadding)) _already_logged = set()
tensorflow.reduce_mean
10,406
import tensorflow as tf sess = tf.Session() print("Setting up Saver...") saver = tf.train.Saver() # create two summary writers to show training loss and validation loss in the same graph # need to create two folders 'train' and 'validation' inside FLAGS.logs_dir train_writer = tf.summary.FileWriter(FLAGS.logs_dir + '/train', sess.graph) validation_writer = tf.summary.FileWriter(FLAGS.logs_dir + '/validation') sess.run(tf.global_variables_initializer()) ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) print("Model restored...")
tensorflow.summary.FileWriter
10,407
import tensorflow as tf from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import array_ops import numpy as np def safe_get(name, *args, **kwargs): """ Same as tf.get_variable, except flips on reuse_variables automatically """ try: return tf.get_variable(name, *args, **kwargs) except ValueError: tf.get_variable_scope().reuse_variables() return tf.get_variable(name, *args, **kwargs) def init_weights(shape, name=None): shape = tuple(shape) weights = np.random.normal(scale=0.01, size=shape).astype('f') return safe_get(name, list(shape), initializer=tf.constant_initializer(weights), dtype=tf.float32) def init_bias(shape, name=None): return safe_get(name, initializer=tf.zeros(shape, dtype=tf.float32)) def init_fc_weights_xavier(shape, name=None): fc_initializer = tf.contrib.layers.xavier_initializer(dtype=tf.float32)
tensorflow.get_variable_scope
10,408
import tensorflow as tf if self.options.add_first_word_prob_for_phrase: # add prob of the first word to each phrase attn_dist = add_first_word_prob_to_atten_dists(self.in_passage_words, self.phrase_starts, vocab_dist, attn_dist) # match attn_dist[batch_size, passage_length] to sparse one-hot representation [batch_size, passage_length, extended_vsize] batch_nums = tf.range(0, limit=batch_size) # shape (batch_size) batch_nums = tf.expand_dims(batch_nums, axis=1) # shape (batch_size, 1) batch_nums = tf.tile(batch_nums, [1, passage_length]) # shape (batch_size, passage_length) step_nums = tf.range(0, limit=passage_length) # [passage_length] step_nums = tf.expand_dims(step_nums, axis=0) # shape (1, passage_length) step_nums = tf.tile(step_nums, [batch_size, 1]) # shape (batch_size, passage_length) indices = tf.stack((batch_nums, step_nums, passage_word_idx), axis=2) # shape (batch_size, passage_length, 3) indices = tf.reshape(indices, [-1, 3]) #[batch_size * passage_length, 3] indices = tf.cast(indices, tf.int64) shape = [batch_size, passage_length, extended_vsize] shape = tf.cast(shape, tf.int64) attn_dist = tf.reshape(attn_dist, shape=[-1]) # [batch_size*passage_length] one_hot_spare_rep = tf.SparseTensor(indices=indices, values=attn_dist, dense_shape=shape) # [batch_size, passage_length, extended_vsize] if passage_mask is not None: passage_mask = tf.expand_dims(passage_mask, axis=-1) one_hot_spare_rep = one_hot_spare_rep * passage_mask
tensorflow.reshape
10,409
import tensorflow as tf mask = tf.equal(mask, tf.ones_like(mask)) facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] queries = tf.tile(query, [1, tf.shape(facts)[1]]) queries = tf.reshape(queries, tf.shape(facts)) din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag) d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag) d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag) d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]]) scores = d_layer_3_all # Mask
tensorflow.layers.dense
10,410
import tensorflow as tf dw = tf.matmul( tf.transpose(x_means_hot, perm=[1, 2, 0]), tf.transpose(x_reshaped, perm=[1, 0, 2])) updated_ema_means = \ moving_averages.assign_moving_average( self.ema_means, dw, self.hparams.decay, zero_debias=False) n = tf.reduce_sum(updated_ema_count, axis=-1, keep_dims=True) updated_ema_count = ((updated_ema_count + self.hparams.epsilon) / ( n + 2**self.hparams.z_size * self.hparams.epsilon) * n) updated_ema_means = updated_ema_means / tf.expand_dims( updated_ema_count, axis=-1) with tf.control_dependencies([e_loss]): update_means = tf.assign(self.means, updated_ema_means) with tf.control_dependencies([update_means]): loss += self.hparams.beta * e_loss else: # Use a gradient based loss for learning the cluster centers loss += q_loss + self.hparams.beta * e_loss # Get the discrete latent representation
tensorflow.expand_dims
10,411
import tensorflow as tf targets = [dec_inp[i+1] for i in range(len(dec_inp) - 1)] + [0] return tf.nn.seq2seq.model_with_buckets( enc_inp, dec_inp, targets, weights, buckets, GRUSeq2Seq, per_example_loss=per_example_loss) # Now we construct the copy model. inp = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)] out = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)] weights = [tf.ones_like(inp[0], dtype=tf.float32) for _ in range(8)] with tf.variable_scope("root"): _, losses1 = SampleGRUSeq2Seq(inp, out, weights, per_example_loss=False) # Now check that we did not accidentally set reuse. self.assertEqual(False, tf.get_variable_scope().reuse) # Construct one more model with per-example loss. tf.get_variable_scope().reuse_variables() _, losses2 = SampleGRUSeq2Seq(inp, out, weights, per_example_loss=True) # First loss is scalar, the second one is a 1-dimensinal tensor. self.assertEqual([], losses1[0].get_shape().as_list())
tensorflow.variable_scope
10,412
import tensorflow as tf @pytest.fixture def mu(session_tf): return tf.convert_to_tensor(Datum.mu_data)
tensorflow.convert_to_tensor
10,413
import tensorflow as tf tf.cast(tf.nn.in_top_k(logits, labels, 1), data_type)) top_5_op = tf.reduce_sum( tf.cast(tf.nn.in_top_k(logits, labels, 5), data_type)) return (logits, top_1_op, top_5_op) loss = loss_function(logits, labels) params = self.variable_mgr.trainable_variables_on_device(device_num) l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in params]) weight_decay = FLAGS.weight_decay if weight_decay is not None and weight_decay != 0.: loss += weight_decay * l2_loss aggmeth = tf.AggregationMethod.DEFAULT
tensorflow.nn.l2_loss
10,414
import tensorflow as tf xlnet_config = xlnet.XLNetConfig(json_path=FLAGS.model_config_path) run_config = xlnet.create_run_config(is_training, True, FLAGS) xlnet_model = xlnet.XLNetModel( xlnet_config=xlnet_config, run_config=run_config, input_ids=inp, seg_ids=seg_id, input_mask=inp_mask) summary = xlnet_model.get_pooled_out(FLAGS.summary_type, FLAGS.use_summ_proj) with tf.variable_scope("logits"): logits = tf.layers.dense(summary, 1, kernel_initializer=xlnet_model.get_initializer()) logits = tf.reshape(logits, [bsz_per_core, 4]) one_hot_target = tf.one_hot(label, 4) per_example_loss = -tf.reduce_sum( tf.nn.log_softmax(logits) * one_hot_target, -1) total_loss = tf.reduce_mean(per_example_loss) return total_loss, per_example_loss, logits
tensorflow.reshape
10,415
import tensorflow as tf out = tf.gradients(Omega, self.W_rec) out[0] = tf.Print(out[0], [out[0], self.W_rec, Omega], "omega grads") out[0] = tf.verify_tensor_all_finite(out[0], "dead omega grad")
tensorflow.Print
10,416
import tensorflow as tf d = d.apply( tf.contrib.data.parallel_interleave(
tensorflow.contrib.data.parallel_interleave
10,417
import tensorflow as tf def fwd_gradients_1(self, U, x): g = tf.gradients(U, x, grad_ys=self.dummy_x1_tf)[0] return tf.gradients(g, self.dummy_x1_tf)[0] def net_U0(self, x): lambda_1 = self.lambda_1 lambda_2 = tf.exp(self.lambda_2) U = self.neural_net(x, self.weights, self.biases) U_x = self.fwd_gradients_0(U, x) U_xx = self.fwd_gradients_0(U_x, x) U_xxx = self.fwd_gradients_0(U_xx, x) F = -lambda_1*U*U_x - lambda_2*U_xxx U0 = U - self.dt*tf.matmul(F, self.IRK_alpha.T) return U0 def net_U1(self, x): lambda_1 = self.lambda_1 lambda_2 = tf.exp(self.lambda_2) U = self.neural_net(x, self.weights, self.biases) U_x = self.fwd_gradients_1(U, x) U_xx = self.fwd_gradients_1(U_x, x) U_xxx = self.fwd_gradients_1(U_xx, x) F = -lambda_1*U*U_x - lambda_2*U_xxx U1 = U + self.dt*tf.matmul(F, (self.IRK_beta - self.IRK_alpha).T) return U1
tensorflow.matmul
10,418
import tensorflow as tf def l1_l2_regularizer(var, weight_l1=1.0, weight_l2=1.0, name='l1_l2_regularizer'): """Define a L2Loss, useful for regularize, i.e. weight decay. Args: var: tensor to regularize. weight_l1: an optional weight to modulate the l1 loss. weight_l2: an optional weight to modulate the l2 loss. name: Optional scope/name for op_scope. Returns: the l1+L2 loss op. """ with tf.name_scope(name): weight_l1_t = tf.convert_to_tensor(weight_l1, dtype=var.dtype.base_dtype, name='weight_l1') weight_l2_t = tf.convert_to_tensor(weight_l2, dtype=var.dtype.base_dtype, name='weight_l2') reg_l1 = tf.multiply(weight_l1_t, tf.reduce_sum(tf.abs(var)), name='value_l1') reg_l2 = tf.multiply(weight_l2_t, tf.nn.l2_loss(var), name='value_l2') return tf.add(reg_l1, reg_l2, name='value') def l1_regularizer(scale, name='l1_regularizer'): """Returns a function that can be used to apply L1 regularization to weights. L1 regularization encourages sparsity. Args: scale: A scalar multiplier `Tensor`. 0.0 disables the regularizer. name: An optional name/scope name.
tensorflow.convert_to_tensor
10,419
import tensorflow as tf # Create Clones. clones = create_clones(config, model_fn, args, kwargs) first_clone = clones[0] # Gather update_ops from the first clone. These contain, for example, # the updates for the batch_norm variables created by model_fn. update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone.scope) train_op = None total_loss = None with tf.device(config.optimizer_device()): if optimizer: # Place the global step on the device storing the variables.
tensorflow.get_collection
10,420
import tensorflow as tf class_predictions_with_background: A float tensor of shape [batch_size, 1, num_classes + 1] representing the class predictions for the proposals. Raises: ValueError: if num_predictions_per_location is not 1. """ if num_predictions_per_location != 1: raise ValueError('Currently RfcnBoxPredictor only supports ' 'predicting a single box per class per location.') batch_size = tf.shape(proposal_boxes)[0] num_boxes = tf.shape(proposal_boxes)[1] def get_box_indices(proposals): proposals_shape = proposals.get_shape().as_list() if any(dim is None for dim in proposals_shape): proposals_shape = tf.shape(proposals) ones_mat = tf.ones(proposals_shape[:2], dtype=tf.int32) multiplier = tf.expand_dims( tf.range(start=0, limit=proposals_shape[0]), 1) return tf.reshape(ones_mat * multiplier, [-1]) net = image_features
tensorflow.shape
10,421
import tensorflow as tf tgt_dif = tgt_flat1 - tgt_flat2 pred_dif = pred_flat1 - pred_flat2 geq = tf.cast(tgt_dif > 0, tf.bool) # tgt_posi_dif = tf.where(geq, tgt_dif, -tgt_dif)
tensorflow.cast
10,422
import tensorflow as tf self.validateMoments([10**5], -5.0, 1.0, 2.0, np.infty) def testSmallStddev(self): self.validateKolmogorovSmirnov([10**5], 0.0, 0.1, 0.05, 0.10) class ParameterizedTruncatedNormalGpuTest(ParameterizedTruncatedNormalTest): _use_gpu = True # Benchmarking code def parameterized_vs_naive(shape, num_iters, use_gpu=False): np.random.seed(1618) # Make it reproducible. # No CSE/CF. optimizer_options = tf.OptimizerOptions(opt_level=tf.OptimizerOptions.L0) config = tf.ConfigProto( graph_options=tf.GraphOptions(optimizer_options=optimizer_options)) with tf.Session(config=config) as sess: with tf.device("/cpu:0" if not use_gpu else None): param_op = tf.group(random_ops.parameterized_truncated_normal(shape)) naive_op = tf.group(random_ops.truncated_normal(shape)) # Burn-in to avoid session setup costs in the timing. sess.run(param_op) sess.run(param_op) param_dt = timeit.timeit(lambda: sess.run(param_op), number=num_iters) sess.run(naive_op) sess.run(naive_op)
tensorflow.OptimizerOptions
10,423
import tensorflow as tf return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
tensorflow.logging.info
10,424
import tensorflow as tf with tf.name_scope('validate'): x, y = self._build_data_pipeline() y_hat, loss = self._build_validation_model(x, y) with tf.control_dependencies([update_op]): return tf.print('expect', loss, y, y_hat, summarize=50) class DataOwner: BATCH_SIZE = 30
tensorflow.print
10,425
import tensorflow as tf self.assertEqual(3, len(res)) self.assertEqual((2, 4), res[0].shape) res = sess.run([mem]) self.assertEqual((2, 2), res[0].shape) def testAttentionDecoder2(self): with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)): cell = tf.nn.rnn_cell.GRUCell(2) inp = [tf.constant(0.5, shape=[2, 2])] * 2 enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32) attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs]) dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3 dec, mem = tf.nn.seq2seq.attention_decoder(
tensorflow.constant_initializer
10,426
import tensorflow as tf def _DoPredictions(self, in_size, mats, class_weights=None): """Takes in an array of states and calculates predictions. Get the cross-entropy for each example in the vector self._xent. Args: in_size: size of the hidden state vectors mats: list of hidden state vectors """ pred_mat = tf.get_variable('pred_mat', [in_size, self._out_vocab_size]) pred_bias = tf.get_variable('pred_bias', [self._out_vocab_size]) # Make a prediction on every word. def GetWordPred(o_): logits = tf.nn.xw_plus_b(o_, pred_mat, pred_bias) return tf.nn.softmax(logits) self.preds_by_word = tf.pack([GetWordPred(o_) for o_ in mats]) self.cs = self._mask / tf.reduce_sum(self._mask, 1, keep_dims=True) # The final prediction is the average of the predictions for each word
tensorflow.get_variable
10,427
from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten # final convolutional layer #removed GOAL_SIZE conv4 = Conv2D(padding="valid", filters=RNN_SIZE-loc_layer_size, kernel_size=[2, 2], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=None)(pool3) # FC layers flat1a = Flatten(data_format='channels_last')(conv4) #removed GOAL_SIZE flat1b = Dense(units=RNN_SIZE-loc_layer_size)(flat1a) # FC layers for goal_pos input # goal_layer1 = Dense(units=GOAL_SIZE)(goal_pos) # goal_layer2 = Dense(units=GOAL_SIZE)(goal_layer1) # FC layers to find next location loc_layer1 = Dense(units=loc_layer_size)(prev_loc)
tensorflow.keras.layers.Dense
10,428
import tensorflow as tf logits = self.tanh_constant * tf.tanh(logits) index = tf.multinomial(logits, 1) index = tf.to_int32(index) index = tf.reshape(index, [1])
tensorflow.to_int32
10,429
import tensorflow as tf eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0)) q_values = q_func(observations_ph.get(), num_actions, scope="q_func") deterministic_actions = tf.argmax(q_values, axis=1) batch_size = tf.shape(observations_ph.get())[0] random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64) chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions) output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions) update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))
tensorflow.stack
10,430
import tensorflow as tf with tf.variable_scope(name) : self.w = tf.get_variable('w',[input_dim, output_dim], initializer=tf.random_normal_initializer(stddev=stddev)) self.b = tf.get_variable('b',[output_dim], initializer=tf.constant_initializer(0.0)) def __call__(self,input_var,name=None,w=None,b=None,**kwargs) : w = w if w is not None else self.w
tensorflow.constant_initializer
10,431
import tensorflow as tf initializer=tf.random_normal_initializer(stddev=stddev)) self.b = tf.get_variable('b',[output_dim], initializer=tf.constant_initializer(0.0)) self.padding = [ [0,0],[k_h//2,k_h//2],[k_w//2,k_w//2],[0,0] ] def __call__(self,input_var,name=None,**kwargs): _,h,w,c = input_var.shape.as_list() _t = tf.image.resize_nearest_neighbor(input_var, [h*2, w*2]) _t = tf.pad(_t,self.padding, mode='SYMMETRIC') return tf.nn.bias_add( tf.nn.conv2d(_t, self.w, data_format='NHWC', #we can't use cudnn due to resize method... strides=[1,1,1,1], padding="VALID"), self.b,data_format='NHWC',name=name) def get_variables(self): return {'w':self.w,'b':self.b} class WeightNormSymPadConv2d(object): #Resize and Convolution(upsacle by 2) def __init__(self,name,input_dim,output_dim, k_h=3,k_w=3,stddev=0.02) :
tensorflow.nn.conv2d
10,432
import tensorflow as tf outputs, attention_weights, _, _, samples, beam_fun, initial_data = attention_decoder( attention_states=attention_states, initial_state=encoder_state, feed_previous=feed_previous, decoder_inputs=targets[:, :-1], align_encoder_id=align_encoder_id, encoder_input_length=encoder_input_length, **parameters ) if use_baseline: baseline_rewards = reinforce_baseline(outputs, rewards) # FIXME: use logits or decoder outputs? baseline_weights = get_weights(samples, utils.EOS_ID, include_first_eos=False) baseline_loss_ = baseline_loss(rewards=baseline_rewards, weights=baseline_weights) else: baseline_rewards = rewards baseline_loss_ = tf.constant(0.0) reinforce_weights = get_weights(samples, utils.EOS_ID, include_first_eos=True) reinforce_loss = sequence_loss(logits=outputs, targets=samples, weights=reinforce_weights, rewards=baseline_rewards) trg_mask = get_weights(targets[:, 1:], utils.EOS_ID, include_first_eos=True) xent_loss = sequence_loss(logits=outputs, targets=targets[:, 1:], weights=trg_mask) if monotonicity_weight: monotonicity_dist = monotonicity_dist or 1.0
tensorflow.constant
10,433
import tensorflow as tf # Get the normalized coordinates of bboxes bottom_shape = tf.shape(bottom) height = (tf.to_float(bottom_shape[1]) - 1.) * np.float32(self._feat_stride[0]) width = (tf.to_float(bottom_shape[2]) - 1.) * np.float32(self._feat_stride[0]) # rois除以h,w就得到了rois在特征图上的位置 x1 = tf.slice(rois, [0, 1], [-1, 1], name="x1") / width y1 = tf.slice(rois, [0, 2], [-1, 1], name="y1") / height x2 = tf.slice(rois, [0, 3], [-1, 1], name="x2") / width y2 = tf.slice(rois, [0, 4], [-1, 1], name="y2") / height # Won't be backpropagated to rois anyway, but to save time bboxes = tf.stop_gradient(tf.concat([y1, x1, y2, x2], axis=1)) # 'roi_pooling_size', 7 pre_pool_size = cfg.FLAGS.roi_pooling_size * 2 # 把rois对于的特征图上的部分crop出来,然后resize打破14*14的大小 crops = tf.image.crop_and_resize(bottom, bboxes, tf.to_int32(batch_ids), [pre_pool_size, pre_pool_size], name="crops") return slim.max_pool2d(crops, [2, 2], padding='SAME') def _dropout_layer(self, bottom, name, ratio=0.5): return tf.nn.dropout(bottom, ratio, name=name) def _anchor_target_layer(self, rpn_cls_score, name): with tf.variable_scope(name): # 这里的index是对于所有anchor而言 # (1, 1, A * height, width) # (1, height, width, A * 4) # (1, height, width, A * 4) # (1, height, width, A * 4)
tensorflow.to_int32
10,434
import tensorflow as tf shape [-1, num_blocks, block_dim]. means: Embedding means. Returns: The nearest neighbor in one hot form, the nearest neighbor itself, the commitment loss, embedding training loss. """ x_means_hot = self.nearest_neighbor(x, means) x_means_hot_flat = tf.reshape( x_means_hot, [-1, self.hparams.num_blocks, self.hparams.block_v_size]) x_means = tf.matmul(tf.transpose(x_means_hot_flat, perm=[1, 0, 2]), means) x_means = tf.transpose(x_means, [1, 0, 2]) q_loss = tf.reduce_mean( tf.squared_difference(tf.stop_gradient(x), x_means)) e_loss = tf.reduce_mean( tf.squared_difference(x, tf.stop_gradient(x_means))) return x_means_hot, x_means, q_loss, e_loss
tensorflow.reshape
10,435
import tensorflow as tf d_label_smooth = self.cnf['d_label_smooth'] # 0.25 self.d_loss_real = self._sigmoid_kl_with_logits(self.end_points_D['D_on_data_logits'], 1. - d_label_smooth) class_loss_weight = 1. self.d_loss_class = class_loss_weight * tf.nn.sparse_softmax_cross_entropy_with_logits( logits=self.end_points_D['class_logits'], labels=tf.to_int64(targets)) self.test_loss = 1. - \ tf.reduce_mean(tf.to_float(tf.nn.in_top_k( self.end_points_D_val['logits'], targets, 1))) self.error_rate = 1. - \ tf.reduce_mean(tf.to_float(tf.nn.in_top_k( self.end_points_D['class_logits'], targets, 1))) if gpu_idx == 0: update = tf.assign(num_error_rate, num_error_rate + 1.) with tf.control_dependencies([update]): tc = tf.maximum(.01, 1. / num_error_rate) update = tf.assign(avg_error_rate, (1. - tc) * avg_error_rate + tc * self.error_rate) with tf.control_dependencies([update]): self.d_loss_class = tf.identity(self.d_loss_class) self.d_loss_fake = tf.nn.sigmoid_cross_entropy_with_logits( logits=self.end_points_D['D_on_G_logits'], labels=tf.zeros_like(self.end_points_D['D_on_G_logits'])) self.d_loss_class = tf.reduce_mean(self.d_loss_class) self.d_loss_real = tf.reduce_mean(self.d_loss_real) self.d_loss_fake = tf.reduce_mean(self.d_loss_fake) if is_fm_loss: global_pool_head = self.end_points_D['global_pool'] real_data_features = tf.slice(global_pool_head, [0, 0], [batch_size_train, num_classes]) fake_data_features = tf.slice(global_pool_head, [batch_size_train, 0],
tensorflow.maximum
10,436
import tensorflow as tf else: nfeats_conv = reduce(lambda x, y: x * y, [int(x) for x in cnn_output.get_shape()[-3:]]) feats_conv = tf.reshape(cnn_output, [batch_size * rnn_nunroll, nfeats_conv]) nfeats_tot = nfeats_conv + nfeats feats_all = tf.concat(1, [feats_conv, feats_other]) print('feats_cnn: {}'.format(feats_conv.get_shape())) print('feats_all: {}'.format(feats_all.get_shape())) # Project to RNN size rnn_output = feats_all rnn_output_size = nfeats_tot if do_rnn: with tf.variable_scope('rnn_proj'): rnn_proj_w = tf.get_variable('W', [nfeats_tot, rnn_size], initializer=tf.uniform_unit_scaling_initializer(factor=1.0, dtype=dtype), dtype=dtype) rnn_proj_b = tf.get_variable('b', [rnn_size], initializer=tf.constant_initializer(0.0), dtype=dtype) rnn_inputs = tf.nn.bias_add(tf.matmul(feats_all, rnn_proj_w), rnn_proj_b) rnn_inputs = tf.reshape(rnn_inputs, [batch_size, rnn_nunroll, rnn_size]) rnn_inputs = tf.split(rnn_inputs, rnn_nunroll, axis=1) rnn_inputs = [tf.squeeze(input_, [1]) for input_ in rnn_inputs] if rnn_cell_type == 'rnn': cell_fn = tf.nn.rnn_cell.BasicRNNCell elif rnn_cell_type == 'gru': cell_fn = tf.nn.rnn_cell.GRUCell elif rnn_cell_type == 'lstm': cell_fn = tf.nn.rnn_cell.BasicLSTMCell else:
tensorflow.constant_initializer
10,437
import tensorflow as tf import os log = infolog.log def add_embedding_stats(summary_writer, embedding_names, paths_to_meta, checkpoint_path): # Create tensorboard projector config = tf.contrib.tensorboard.plugins.projector.ProjectorConfig() config.model_checkpoint_path = checkpoint_path for embedding_name, path_to_meta in zip(embedding_names, paths_to_meta): # Initialize config embedding = config.embeddings.add() # Specifiy the embedding variable and the metadata
tensorflow.contrib.tensorboard.plugins.projector.ProjectorConfig
10,438
import tensorflow as tf if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold()
tensorflow.train.init_from_checkpoint
10,439
from tensorflow.python.ops import math_ops logits = self._logits(features) return self._logits_to_predictions(logits, proba=True) def _logits_to_predictions(self, logits, proba=False): if self._n_classes < 2: return array_ops.reshape(logits, [-1]) if self._n_classes == 2: logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits]) if proba: return nn.softmax(logits) else: return math_ops.argmax(logits, 1) def _get_feature_ops_from_example(self, examples_batch): column_types = layers.create_dict_for_parse_example( (self._get_linear_feature_columns() or []) + (self._get_dnn_feature_columns() or [])) features = parsing_ops.parse_example(examples_batch, column_types) return features def _num_label_columns(self): return 1 if self._n_classes <= 2 else self._n_classes def _get_linear_feature_columns(self):
tensorflow.python.ops.math_ops.argmax
10,440
import tensorflow as tf act: (tf.Variable, bool, float, bool, float, bool) -> tf.Variable function to select and action given observation. ` See the top of the file for details. """ if param_noise_filter_func is None: param_noise_filter_func = default_param_noise_filter with tf.variable_scope(scope, reuse=reuse): observations_ph = make_obs_ph("observation") stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic") update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps") update_param_noise_threshold_ph = tf.placeholder(tf.float32, (), name="update_param_noise_threshold") update_param_noise_scale_ph = tf.placeholder(tf.bool, (), name="update_param_noise_scale") reset_ph = tf.placeholder(tf.bool, (), name="reset") eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0)) param_noise_scale = tf.get_variable("param_noise_scale", (), initializer=tf.constant_initializer(0.01), trainable=False) param_noise_threshold = tf.get_variable("param_noise_threshold", (), initializer=tf.constant_initializer(0.05), trainable=False) # Unmodified Q. q_values = q_func(observations_ph.get(), num_actions, scope="q_func") # Perturbable Q used for the actual rollout. q_values_perturbed = q_func(observations_ph.get(), num_actions, scope="perturbed_q_func")
tensorflow.placeholder
10,441
import tensorflow as tf dec, mem = tf.nn.seq2seq.embedding_tied_rnn_seq2seq( enc_inp, dec_inp, cell, num_symbols=5, num_decoder_symbols=3, embedding_size=2) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 3), res[0].shape) # Test externally provided output projection. w = tf.get_variable("proj_w", [2, 5]) b = tf.get_variable("proj_b", [5]) with tf.variable_scope("proj_seq2seq"): dec, _ = tf.nn.seq2seq.embedding_tied_rnn_seq2seq( enc_inp, dec_inp, cell, num_symbols=5, embedding_size=2, output_projection=(w, b)) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res))
tensorflow.get_variable
10,442
import tensorflow as tf self.sample_entropy = entropy_1 + entropy_2 self.sample_log_prob = log_prob_1 + log_prob_2 def _create_params(self): initializer = tf.random_uniform_initializer(minval=-0.1, maxval=0.1) with tf.variable_scope(self.name, initializer=initializer): with tf.variable_scope("lstm"): self.w_lstm = [] for layer_id in range(self.lstm_num_layers): with tf.variable_scope("layer_{}".format(layer_id)): w = tf.get_variable("w", [2 * self.lstm_size, 4 * self.lstm_size]) self.w_lstm.append(w) self.g_emb = tf.get_variable("g_emb", [1, self.lstm_size]) with tf.variable_scope("emb"): self.w_emb = tf.get_variable("w", [self.num_branches, self.lstm_size]) with tf.variable_scope("softmax"): self.w_soft = tf.get_variable("w", [self.lstm_size, self.num_branches]) b_init = np.array([10.0, 10.0] + [0] * (self.num_branches - 2), dtype=np.float32) self.b_soft = tf.get_variable( "b", [1, self.num_branches], initializer=tf.constant_initializer(b_init)) b_soft_no_learn = np.array( [0.25, 0.25] + [-0.25] * (self.num_branches - 2), dtype=np.float32)
tensorflow.get_variable
10,443
import tensorflow as tf 'model_scope', None, 'Model scope name used to replace the name_scope in checkpoint.') tf.app.flags.DEFINE_string( 'checkpoint_exclude_scopes', None, 'Comma-separated list of scopes of variables to exclude when restoring from a checkpoint.') tf.app.flags.DEFINE_boolean( 'ignore_missing_vars', True, 'When restoring a checkpoint would ignore missing variables.') tf.app.flags.DEFINE_boolean( 'run_on_cloud', False, 'Wether we will train on cloud.') tf.app.flags.DEFINE_boolean( 'seq_train', False, 'Wether we will train a sequence model.') tf.app.flags.DEFINE_string(# 'model_to_train', 'blouse, dress, outwear, skirt, trousers', #'all, blouse, dress, outwear, skirt, trousers', 'skirt, dress, outwear, trousers',
tensorflow.app.flags.DEFINE_boolean
10,444
from tensorflow.python.ops import array_ops def _log_loss_with_two_classes(logits, target): # sigmoid_cross_entropy_with_logits requires [batch_size, 1] target. if len(target.get_shape()) == 1: target = array_ops.expand_dims(target, dim=[1]) loss_vec = nn.sigmoid_cross_entropy_with_logits(logits, math_ops.to_float(target)) return loss_vec
tensorflow.python.ops.array_ops.expand_dims
10,445
import tensorflow as tf # Add histograms for trainable variables. for var in tf.trainable_variables(): summaries.append(tf.summary.histogram(var.op.name, var)) # Track the moving averages of all trainable variables. variable_averages = tf.train.ExponentialMovingAverage( cifar10.MOVING_AVERAGE_DECAY, global_step) variables_averages_op = variable_averages.apply(tf.trainable_variables()) # Group all updates to into a single train op. train_op = tf.group(apply_gradient_op, variables_averages_op) # Create a saver. saver = tf.train.Saver(tf.global_variables()) # Build the summary operation from the last tower summaries. summary_op = tf.summary.merge(summaries) # Build an initialization operation to run below. init = tf.global_variables_initializer()
tensorflow.group
10,446
from tensorflow.python.framework import ops print("%s takes %.4f sec/step" % (desc, step_time)) self.report_benchmark( name=desc, iters=benchmark_steps, wall_time=total_time) def benchmarkCudnnLSTMTraining(self): test_configs = self._GetTestConfig() for config_name, config in test_configs.items(): config = test_configs[config_name] num_layers = config["num_layers"] num_units = config["num_units"] batch_size = config["batch_size"] seq_length = config["seq_length"] with ops.Graph().as_default(), ops.device("/device:GPU:0"): model = cudnn_rnn_ops.CudnnLSTM(num_layers, num_units, num_units) params_size_t = model.params_size() input_data = variables.Variable( array_ops.ones([seq_length, batch_size, num_units])) input_h = variables.Variable( array_ops.ones([num_layers, batch_size, num_units])) input_c = variables.Variable( array_ops.ones([num_layers, batch_size, num_units])) params = variables.Variable( array_ops.ones([params_size_t]), validate_shape=False) output, output_h, output_c = model( is_training=True,
tensorflow.python.framework.ops.Graph
10,447
import tensorflow as tf cell = DropoutWrapper(cell, input_keep_prob=encoder.rnn_input_keep_prob, output_keep_prob=encoder.rnn_output_keep_prob, state_keep_prob=encoder.rnn_state_keep_prob, variational_recurrent=encoder.pervasive_dropout, dtype=tf.float32, input_size=input_size) return cell batch_size = tf.shape(encoder_inputs_)[0] time_steps = tf.shape(encoder_inputs_)[1] if embeddings is not None: flat_inputs = tf.reshape(encoder_inputs_, [tf.multiply(batch_size, time_steps)]) flat_inputs = tf.nn.embedding_lookup(embeddings, flat_inputs) encoder_inputs_ = tf.reshape(flat_inputs, tf.stack([batch_size, time_steps, flat_inputs.get_shape()[1].value])) if pos_embeddings is not None: pos_inputs_ = tf.range(time_steps, dtype=tf.int32) pos_inputs_ = tf.nn.embedding_lookup(pos_embeddings, pos_inputs_) pos_inputs_ = tf.tile(tf.expand_dims(pos_inputs_, axis=0), [batch_size, 1, 1]) encoder_inputs_ = tf.concat([encoder_inputs_, pos_inputs_], axis=2) if other_inputs is not None: encoder_inputs_ = tf.concat([encoder_inputs_, other_inputs], axis=2)
tensorflow.nn.embedding_lookup
10,448
from tensorflow.core.framework import op_def_pb2 def _to_argdef_list(args): names = [n for n, t in args] if len(names) != len(set(names)): raise ValueError("Expected names to all be unique: %s" % str(names)) return [op_def_pb2.OpDef.ArgDef(type=t.as_datatype_enum, name=n) for n, t in args] self._sig.input_arg.extend(_to_argdef_list(inputs)) self._sig.output_arg.extend(_to_argdef_list(outputs))
tensorflow.core.framework.op_def_pb2.OpDef.ArgDef
10,449
import tensorflow as tf else: d = tf.data.TFRecordDataset(input_files)
tensorflow.data.TFRecordDataset
10,450
import tensorflow as tf elif self.optim_type == 'rprop': self.optimizer = tf.train.RMSPropOptimizer(self.lr)
tensorflow.train.RMSPropOptimizer
10,451
import tensorflow as tf def _TokenToString(self, token): return py_x_ops.vocab_id_to_token(token, vocab=self._pieces) def _StringToToken(self, tokstr): return tf.where( py_x_ops.token_in_vocab(tokstr, vocab=self._pieces), py_x_ops.vocab_token_to_id(tokstr, vocab=self._pieces), tf.broadcast_to(NO_TOKEN, tf.shape(tokstr))) def _MergeTokens(self, tokens): return self._StringToToken( self._TokenToString(tokens[0]) + self._TokenToString(tokens[1])) def _EncodeToIds(self, word): # Below:
tensorflow.shape
10,452
import tensorflow as tf window = [1,patch_size,patch_size,1] print('Window:',window) n_row,n_col,n_channel = x.shape n_patch = n_row*n_col // (patch_size**2) patches = tf.image.extract_patches(tf.expand_dims(x,0),sizes=window,strides=window,rates=[1, 1, 1, 1],padding='VALID') patches = tf.reshape(patches,[n_patch,patch_size,patch_size,n_channel]) patches = tf.random.shuffle(patches) rows = tf.split(patches,n_col//patch_size,axis=0) rows = [tf.concat(tf.unstack(x),axis=1) for x in rows] x_aug = tf.concat(rows,axis=0) x_aug = tf.convert_to_tensor(x_aug) return tf.concat([x, x_aug],axis=2) def gaussian_blur(self,x): #create random gaussian blur filter mean = 0 std = tf.random.uniform(shape=[],minval=5,maxval=10,dtype=tf.float32) # std [5-10] size = tf.random.uniform(shape=[],minval=3,maxval=7,dtype=tf.int32) # size [7-15] self.kernel = self.gaussian_kernel(size,mean,std) self.kernel = tf.tile(self.kernel[:, :, tf.newaxis, tf.newaxis], [1, 1, 3, 1])
tensorflow.convert_to_tensor
10,453
import tensorflow as tf """ TODO: Track Moving mean and variance, and use this statistics. with tf.variable_scope(name): self.moving_mean = tf.get_variable('moving_mean',[dims], initializer=tf.constant_initializer(0.0), trainable=False) self.moving_variance = tf.get_variable('moving_variance',[dims], initializer=tf.constant_initializer(1.0), trainable=False) """ if out_dim is not None: with tf.variable_scope(name) : self.gamma= tf.get_variable('gamma',[1,1,1,out_dim], initializer=tf.constant_initializer(1.0)) self.beta = tf.get_variable('beta',[out_dim], initializer=tf.constant_initializer(0.0)) else: self.gamma = None self.beta = None self.axis = axis
tensorflow.variable_scope
10,454
import tensorflow as tf """ smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32) shape = tf.shape(image) height = shape[0] width = shape[1] new_height, new_width = _smallest_size_at_least(height, width, smallest_side) image = tf.expand_dims(image, 0) resized_image = tf.image.resize_bilinear(image, [new_height, new_width], align_corners=False) resized_image = tf.squeeze(resized_image) resized_image.set_shape([None, None, 3]) return resized_image def preprocess_for_train(image, output_height, output_width, resize_side_min=_RESIZE_SIDE_MIN, resize_side_max=_RESIZE_SIDE_MAX):
tensorflow.squeeze
10,455
import tensorflow as tf tf.get_variable_scope(), reuse=True)) else: # work around https://github.com/tensorflow/tensorflow/issues/14703 ret.append(tf.variable_scope(tf.get_variable_scope())) # always clear existing ns # TODO check existing ns if len(self._name) and self._name != self._vs_name:
tensorflow.get_variable_scope
10,456
import tensorflow as tf prediction = add_layer(l1, 10, 1, activation_function=None,nameScope="layerTest2") sess = tf.Session() # 上面的wtih或者是name都是可选的,可以选择添加,也可以选择不添加,but下面的这一行是一定要写的。 # 这个表明了 在当前的目录下面创建以恶搞logs的文件家,然后把图的信息保存进去 # 这样运行完这段代码之后,就会有一个logs的文件夹被创建 if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1: # tensorflow version < 0.12 writer = tf.train.SummaryWriter('logs/', sess.graph) else: # tensorflow version >= 0.12 writer = tf.summary.FileWriter("logs/", sess.graph) if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1: init = tf.initialize_all_variables() else: init = tf.global_variables_initializer() sess.run(init)
tensorflow.global_variables_initializer
10,457
import tensorflow as tf for x_val in x_vals: print(sess.run(prod, feed_dict={x_data: x_val})) merged = tf.merge_all_summaries() if not os.path.exists('tensorboard_logs/'): os.makedirs('tensorboard_logs/')
tensorflow.merge_all_summaries
10,458
import tensorflow as tf of true images in the resized images, as resized images can be padded with zeros. Returns: prediction_dict: a dictionary holding prediction tensors to be passed to the Loss or Postprocess functions. """ flattened_inputs = tf.contrib.layers.flatten(preprocessed_inputs) class_prediction = tf.contrib.layers.fully_connected( flattened_inputs, self._num_classes) box_prediction = tf.contrib.layers.fully_connected(flattened_inputs, 4) return { 'class_predictions_with_background': tf.reshape( class_prediction, [-1, 1, self._num_classes]), 'box_encodings': tf.reshape(box_prediction, [-1, 1, 4]) } def postprocess(self, prediction_dict, true_image_shapes, **params): """Convert predicted output tensors to final detections. Unused. Args: prediction_dict: a dictionary holding prediction tensors. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded
tensorflow.reshape
10,459
import tensorflow as tf 'sync_queues_step_end_', [main_fetch_group])) variable_mgr_post_init_ops = self.variable_mgr.get_post_init_ops() if variable_mgr_post_init_ops: post_init_op_group = tf.group(*variable_mgr_post_init_ops) else: post_init_op_group = None
tensorflow.group
10,460
import tensorflow as tf def main(_): tf.logging.set_verbosity(tf.logging.INFO)
tensorflow.logging.set_verbosity
10,461
import tensorflow as tf self.assertEqual(self.evaluate(pareto.batch_shape_tensor()), (5,)) self.assertEqual(pareto.batch_shape, tf.TensorShape([5])) self.assertAllEqual(self.evaluate(pareto.event_shape_tensor()), []) self.assertEqual(pareto.event_shape, tf.TensorShape([])) def testParetoShapeBroadcast(self): scale = tf.constant([[3., 2.]]) concentration = tf.constant([[4.], [5.], [6.]]) pareto = tfd.Pareto(concentration, scale) self.assertAllEqual(self.evaluate(pareto.batch_shape_tensor()), (3, 2)) self.assertAllEqual(pareto.batch_shape, tf.TensorShape([3, 2])) self.assertAllEqual(self.evaluate(pareto.event_shape_tensor()), [])
tensorflow.constant
10,462
from tensorflow.python.ops import array_ops def reallocate(): next_size = _next_array_size(new_size) next_shape = array_ops.pack([next_size] + fixed_shape) new_value = array_ops.zeros(next_shape, dtype=values.dtype) old_value = array.value()
tensorflow.python.ops.array_ops.pack
10,463
import tensorflow as tf data_batch.append(d) timestamps_batch.append(t) label_batch.append(l) h5f.close() if len(timestamps_batch) > 0: yield np.array(data_batch), np.array(timestamps_batch), np.array(label_batch) def cnn_bi_lstm_model(x, amp_factor, bil_lstm_win_size, num_classes): logits = cnn_model(x, amp_factor=amp_factor) logits = tf.reshape(logits, [-1, bil_lstm_win_size, 256*amp_factor]) forward_cell = tf.nn.rnn_cell.LSTMCell(128) backward_cell = tf.nn.rnn_cell.LSTMCell(128) encoder_outputs,_ = tf.nn.bidirectional_dynamic_rnn( forward_cell, backward_cell, logits, dtype=tf.float32 ) encoder_outputs = tf.concat(encoder_outputs, axis=2) logits = tf.reshape(tf.layers.dense(encoder_outputs, units=num_classes), [-1, bil_lstm_win_size, num_classes])
tensorflow.reshape
10,464
from tensorflow.python.ops import variable_scope as vs return # Create the func_def object. temp_graph = _FuncGraph() with temp_graph.as_default(): # List of placeholders for the function_def. inputs = [] for (argname, argtype) in self._args: argholder = array_ops.placeholder(argtype, name=argname) inputs.append(argholder) # Call func and gather the output tensors. with vs.variable_scope("", custom_getter=temp_graph.getvar): outputs = self._func(*inputs) # If func only returned one value, make it a tuple. if not isinstance(outputs, (list, tuple)): outputs = (outputs,) if any([_ is None for _ in outputs]): raise ValueError("Function can not return None.") # Ensures each output is a Tensor. outputs = [ops.convert_to_tensor(_) for _ in outputs] self._extra_inputs = temp_graph.extra_inputs inputs.extend(temp_graph.extra_args)
tensorflow.python.ops.variable_scope.variable_scope
10,465
import tensorflow as tf res = sess.run([mem]) self.assertEqual(2, len(res[0])) self.assertEqual((2, 2), res[0][0].c.shape) self.assertEqual((2, 2), res[0][0].h.shape) self.assertEqual((2, 2), res[0][1].c.shape) self.assertEqual((2, 2), res[0][1].h.shape) def testEmbeddingAttentionDecoder(self): with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)): inp = [tf.constant(0.5, shape=[2, 2])] * 2 cell = tf.nn.rnn_cell.GRUCell(2) enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32) attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs]) dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)] dec, mem = tf.nn.seq2seq.embedding_attention_decoder( dec_inp, enc_state, attn_states, cell, num_symbols=4, embedding_size=2, output_size=3) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 3), res[0].shape) res = sess.run([mem]) self.assertEqual((2, 2), res[0].shape) def testEmbeddingAttentionSeq2Seq(self): with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
tensorflow.constant
10,466
import tensorflow as tf # need to create two folders 'train' and 'validation' inside FLAGS.logs_dir train_writer = tf.summary.FileWriter(FLAGS.logs_dir + '/train', sess.graph)
tensorflow.summary.FileWriter
10,467
import tensorflow as tf :param padding: [string] `VALID` or `SAME`, padding method for sparse convolution. :return [Tensor] [N, H', W', C]. Convolution results. """ blk_shape = tf.shape(blk_indices) blk_indices_ = tf.reshape(blk_indices, [-1, 3]) ksize = tf.shape(w)
tensorflow.shape
10,468
import tensorflow as tf output_ = tf.contrib.layers.layer_norm(output_, activation_fn=tf.nn.tanh, scope='output_layer_norm') else: output_ = dense(output_, deep_layer_size, activation=tf.tanh, use_bias=True, name='deep_output') if decoder.use_dropout: size = tf.shape(output_)[1] noise_shape = [1, size] if decoder.pervasive_dropout else None output_ = tf.nn.dropout(output_, keep_prob=decoder.deep_layer_keep_prob, noise_shape=noise_shape) else: if decoder.pred_maxout_layer: maxout_size = decoder.maxout_size or cell_output_size output_ = dense(output_, maxout_size, use_bias=True, name='maxout') if decoder.old_maxout: # for back-compatibility with old models output_ = tf.nn.pool(tf.expand_dims(output_, axis=2), window_shape=[2], pooling_type='MAX', padding='SAME', strides=[2]) output_ = tf.squeeze(output_, axis=2) else: output_ = tf.maximum(*tf.split(output_, num_or_size_splits=2, axis=1)) if decoder.pred_embed_proj: # intermediate projection to embedding size (before projecting to vocabulary size) # this is useful to reduce the number of parameters, and # to use the output embeddings for output projection (tie_embeddings parameter) output_ = dense(output_, decoder.embedding_size, use_bias=False, name='softmax0') if decoder.tie_embeddings and (decoder.pred_embed_proj or decoder.pred_deep_layer): bias = get_variable('softmax1/bias', shape=[decoder.vocab_size]) output_ = tf.matmul(output_, tf.transpose(embedding)) + bias else: output_ = dense(output_, decoder.vocab_size, use_bias=True, name='softmax1')
tensorflow.squeeze
10,469
import tensorflow as tf return (loss, per_example_loss, log_probs) def get_next_sentence_output(bert_config, input_tensor, labels): """Get loss and log probs for the next sentence prediction.""" # Simple binary classification. Note that 0 is "next sentence" and 1 is # "random sentence". This weight matrix is not used after pre-training. with tf.variable_scope("cls/seq_relationship"): output_weights = tf.get_variable( "output_weights", shape=[2, bert_config.hidden_size], initializer=modeling.create_initializer(bert_config.initializer_range)) output_bias = tf.get_variable( "output_bias", shape=[2], initializer=tf.zeros_initializer()) logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
tensorflow.variable_scope
10,470
from tensorflow.python.ops import math_ops Raises: ValueError: if k is invalid. """ if k < 1: raise ValueError('Invalid k=%s.' % k) with ops.name_scope( None, 'average_precision', (predictions, labels, k)) as scope: # Calculate top k indices to produce [D1, ... DN, k] tensor. _, predictions_idx = nn.top_k(predictions, k) predictions_idx = math_ops.to_int64(predictions_idx, name='predictions_idx') # Expand dims to produce [D1, ... DN, k, 1] tensor. This gives us a separate # prediction for each k, so we can calculate separate true positive values # for each k. predictions_idx_per_k = array_ops.expand_dims( predictions_idx, -1, name='predictions_idx_per_k') # Replicate labels k times to produce [D1, ... DN, k, num_labels] tensor. labels_per_k = expand_and_tile(
tensorflow.python.ops.math_ops.to_int64
10,471
import tensorflow as tf # 減少代理損失 self.aloss = -tf.reduce_mean(tf.minimum( surr, tf.clip_by_value(ratio, 1. - EPSILON, 1. + EPSILON) * self.tfadv)) self.atrain_op = tf.train.AdamOptimizer(A_LR).minimize(self.aloss) # log self.train_writer = tf.summary.FileWriter("logs/", self.sess.graph) self.sess.run(tf.global_variables_initializer()) self.tableAction = self.createActionTable() def createActionTable(self): tableAction = [] for a in range(0, 3): for b in range(0, 3):
tensorflow.global_variables_initializer
10,472
import tensorflow as tf shape=[num_examples, seq_length], dtype=tf.int32), "label_ids": tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32), })
tensorflow.constant
10,473
import tensorflow as tf :, -1], method=1) tf.summary.image('Compare/gtboxes_h_gpu:%d' % i, gtboxes_in_img_h) tf.summary.image('Compare/gtboxes_r_gpu:%d' % i, gtboxes_in_img_r) if cfgs.ADD_BOX_IN_TENSORBOARD:
tensorflow.summary.image
10,474
import tensorflow as tf if policy_name.startswith('svmrank'): policy['model'] = svmrank.Model().read(f"trained_models/{args.problem}/{policy['name']}/{seed}/model.txt") else: with open(f"trained_models/{args.problem}/{policy['name']}/{seed}/model.pkl", 'rb') as f: policy['model'] = pickle.load(f) # load feature specifications with open(f"trained_models/{args.problem}/{policy['name']}/{seed}/feat_specs.pkl", 'rb') as f: feat_specs = pickle.load(f) policy['batch_datatypes'] = [tf.float32, tf.int32, tf.int32, tf.float32] policy['batch_fun'] = lambda x: load_batch_flat(x, feat_specs['type'], feat_specs['augment'], feat_specs['qbnorm']) test_data = tf.data.Dataset.from_tensor_slices(test_files) test_data = test_data.batch(test_batch_size) test_data = test_data.map(lambda x: tf.py_func( policy['batch_fun'], [x], policy['batch_datatypes'])) test_data = test_data.prefetch(2) test_kacc = process(policy, test_data, top_k) print(f" {seed} " + " ".join([f"acc@{k}: {100*acc:4.1f}" for k, acc in zip(top_k, test_kacc)])) writer.writerow({ **{ 'policy': f"{policy['type']}:{policy['name']}", 'seed': seed, }, **{ f'acc@{k}': test_kacc[i] for i, k in enumerate(top_k) },
tensorflow.py_func
10,475
from tensorflow.python.training import gradient_descent yield else: yield def _setupDense(self, is_distributed, dtype): with self._maybeWithDevice("/job:ps" if is_distributed else None): var0 = variables.Variable([[0.0, 1.0], [2.0, 3.0]], dtype=dtype) var1 = variables.Variable([4.0, 5.0], dtype=dtype) with self._maybeWithDevice("/job:worker" if is_distributed else None): grads0 = constant_op.constant([[0.1, 0.1], [0.1, 0.1]], dtype=dtype) grads1 = constant_op.constant([0.01, 0.01], dtype=dtype) sgd = gradient_descent.GradientDescentOptimizer(3.0) clip_opt = variable_clipping_optimizer.VariableClippingOptimizer( sgd, {var0: [1]}, 2.0) update_op = clip_opt.apply_gradients( list(zip([grads0, grads1], [var0, var1]))) variables.global_variables_initializer().run() return var0, var1, update_op def _assertDenseCorrect(self, var0, var1, update_op): # Fetch params to validate initial values
tensorflow.python.training.gradient_descent.GradientDescentOptimizer
10,476
import tensorflow as tf global_mode(), feed_dict={tf.global_mode(): tf.estimator.ModeKeys.PREDICT}) # mode == tf.estimator.ModeKeys.PREDICT """ mode = tf.get_collection_ref(_GLOBAL_MODE_KEY) if len(mode) < 1: # mode_tensor = tf.placeholder(tf.string, name="global_mode") mode_tensor = tf.placeholder_with_default( input=tf.estimator.ModeKeys.TRAIN, shape=(), name="global_mode") # mode_tensor = tf.constant( # value=tf.estimator.ModeKeys.TRAIN, # dtype=tf.string,
tensorflow.placeholder_with_default
10,477
import tensorflow as tf B_dis_fake = self.discriminator(AB) def LSGAN_losses(real, fake): d_real = tf.reduce_mean(tf.squared_difference(real, 1), name='d_real') d_fake = tf.reduce_mean(tf.square(fake), name='d_fake') d_loss = tf.multiply(d_real + d_fake, 0.5, name='d_loss') g_loss = tf.reduce_mean(tf.squared_difference(fake, 1), name='g_loss') add_moving_summary(g_loss, d_loss) return g_loss, d_loss with tf.name_scope('losses'): with tf.name_scope('LossA'): # reconstruction loss recon_loss_A = tf.reduce_mean(tf.abs(A - ABA), name='recon_loss') # gan loss G_loss_A, D_loss_A = LSGAN_losses(A_dis_real, A_dis_fake) with tf.name_scope('LossB'): recon_loss_B = tf.reduce_mean(tf.abs(B - BAB), name='recon_loss') G_loss_B, D_loss_B = LSGAN_losses(B_dis_real, B_dis_fake) LAMBDA = 10.0 self.g_loss = tf.add((G_loss_A + G_loss_B),
tensorflow.name_scope
10,478
import tensorflow as tf def hw_flatten(x): return tf.reshape(x, shape=[x.shape[0], -1, x.shape[-1]]) # N = h * w s = tf.matmul(hw_flatten(g), hw_flatten(f), transpose_b=True) # # [bs, N, N] beta = tf.nn.softmax(s, axis=-1) # attention map o = tf.matmul(beta, hw_flatten(h)) # [bs, N, C] gamma = tf.get_variable("gamma", [1], initializer=tf.constant_initializer(0.0)) o = tf.reshape(o, shape=inputs.shape) # [bs, h, w, C] x = gamma * o + inputs return x @add_arg_scope
tensorflow.constant_initializer
10,479
import tensorflow as tf is_train=self.is_train) _, state = self.gru(d_inp, init) tf.get_variable_scope().reuse_variables() _, logits2 = pointer(d_match, state * self.dropout_mask, d, mask) return logits1, logits2 def dropout(args, keep_prob, is_train, mode="recurrent"): if keep_prob < 1.0: noise_shape = None scale = 1.0 shape = tf.shape(args) if mode == "embedding": noise_shape = [shape[0], 1] scale = keep_prob if mode == "recurrent" and len(args.get_shape().as_list()) == 3: noise_shape = [shape[0], 1, shape[-1]] args = tf.cond(is_train, lambda: tf.nn.dropout( args, keep_prob, noise_shape=noise_shape) * scale, lambda: args) return args
tensorflow.shape
10,480
from tensorflow.python.ops import math_ops max_update = state_ops.assign_add(max_var, batch_max, name='update') with ops.name_scope(None, 'total', (average_precision,)) as total_scope: total_var = contrib_variables.local_variable( array_ops.zeros([], dtype=dtypes.float64), name=total_scope) batch_total = math_ops.reduce_sum(average_precision, name='batch_total') total_update = state_ops.assign_add(total_var, batch_total, name='update') # Divide total by max to get mean, for both vars and the update ops.
tensorflow.python.ops.math_ops.reduce_sum
10,481
import tensorflow as tf size=0, dynamic_size=True, element_shape=(facts[:, 0, :].get_shape())) _, output_op, _ = tf.while_loop(cond, body, [facts, output_ta, 0]) self_attention = output_op.stack() self_attention = tf.transpose(self_attention, perm = [1, 0, 2]) return self_attention def self_all_attention(facts, ATTENTION_SIZE, mask, stag='null'): if len(facts.get_shape().as_list()) == 2: facts = tf.expand_dims(facts, 1) def cond(batch, output, i): return tf.less(i, tf.shape(batch)[1]) def body(batch, output, i): self_attention_tmp = din_fcn_attention(batch[:, i, :], batch, ATTENTION_SIZE, mask, softmax_stag=1, stag=stag, mode='LIST') self_attention_tmp = tf.reduce_sum(self_attention_tmp, 1) output = output.write(i, self_attention_tmp) return batch, output, i + 1 output_ta = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True,
tensorflow.shape
10,482
import tensorflow as tf embedding = make_convolutions(self.char_embedding) # for highway and projection layers n_highway = cnn_options.get('n_highway') use_highway = n_highway is not None and n_highway > 0 use_proj = n_filters != projection_dim if use_highway or use_proj: # reshape from (batch_size, n_tokens, dim) to (-1, dim) batch_size_n_tokens = tf.shape(embedding)[0:2] embedding = tf.reshape(embedding, [-1, n_filters]) # set up weights for projection if use_proj: assert n_filters > projection_dim with tf.variable_scope('CNN_proj') as scope: W_proj_cnn = tf.get_variable( "W_proj", [n_filters, projection_dim], initializer=tf.random_normal_initializer(
tensorflow.shape
10,483
import tensorflow as tf net = tf.layers.dense(net, 200, activation=tf.nn.relu, kernel_initializer=init_w, bias_initializer=init_b, name='l2', trainable=trainable) with tf.variable_scope('a'): actions = tf.layers.dense(net, self.a_dim, activation=tf.nn.tanh, kernel_initializer=init_w, bias_initializer=init_b, name='a', trainable=trainable)
tensorflow.variable_scope
10,484
import tensorflow as tf heatmap_pred = tf.squeeze(heatmap_pred_list[i]) heatmap_true = tf.squeeze(heatmap_true_list[i]) loss = 0.5 * tf.losses.mean_squared_error(y_pred=heatmap_pred * true_weight[:, i], y_true=heatmap_true * true_weight[:, i]) losses.append(loss) return tf.reduce_mean(loss) class JointsMSELoss(object): def __init__(self):
tensorflow.reduce_mean
10,485
import tensorflow as tf Returns Tensor with same shape as bboxes but making sure that none of the bboxes are outside the image. """ with tf.name_scope('BoundingBoxTransform/clip_bboxes'): bboxes = tf.cast(bboxes, dtype=tf.float32) imshape = tf.cast(imshape, dtype=tf.float32) x1, y1, x2, y2 = tf.split(bboxes, 4, axis=1) width = imshape[1]
tensorflow.cast
10,486
import tensorflow as tf args = parser.parse_args() def model(): x = tf.placeholder(tf.float32, [None, 784], name='x') gt = tf.placeholder(tf.float32, [None, 10], name='groundtruth') with tf.variable_scope('layer1'): w1 = tf.get_variable('weight1', [784, 1024], initializer=tf.random_normal_initializer()) b1 = tf.get_variable('bias1', [1024], initializer=tf.constant_initializer(0.0)) h1 = tf.nn.relu(tf.matmul(x, w1) + b1) with tf.variable_scope('layer2'): w2 = tf.get_variable('weight2', [1024, 1024], initializer=tf.random_normal_initializer()) b2 = tf.get_variable('bias2', [1024], initializer=tf.constant_initializer(0.0)) h2 = tf.nn.relu(tf.matmul(h1, w2) + b2) with tf.variable_scope('layer3'): w3 = tf.get_variable('weight3', [1024, 10], initializer=tf.random_normal_initializer()) b3 = tf.get_variable('bias3', [10], initializer=tf.constant_initializer(0.0)) y = tf.matmul(h2, w3) + b3 # losses cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=gt, logits=y)) # optimizer optimizer = tf.train.GradientDescentOptimizer(args.lr) # define one-step train ops train_op = optimizer.minimize(cross_entropy) return x, y, gt, train_op
tensorflow.matmul
10,487
import tensorflow as tf The 'crop' mode requires source images to be at least as large as the network input size, while the other modes support any sizes and apply random bbox distortions before resizing (even with --nodistortions).""") tf.flags.DEFINE_boolean('distortions', True, """Enable/disable distortions during image preprocessing. These include bbox and color distortions.""") tf.flags.DEFINE_string('local_parameter_device', 'gpu', """Device to use as parameter server: cpu or gpu. For distributed training, it can affect where caching of variables happens.""") tf.flags.DEFINE_string('device', 'gpu', """Device to use for computation: cpu or gpu""") #tf.flags.DEFINE_string('data_format', 'NCHW', tf.flags.DEFINE_string('data_format', 'NHWC', """Data layout to use: NHWC (TF native) or NCHW (cuDNN native).""") tf.flags.DEFINE_integer('num_intra_threads', 1, """Number of threads to use for intra-op parallelism. If set to 0, the system will pick an appropriate number.""") tf.flags.DEFINE_integer('num_inter_threads', 0, """Number of threads to use for inter-op parallelism. If set to 0, the system will pick
tensorflow.flags.DEFINE_string
10,488
import tensorflow as tf tgt_posi_dif = tf.where(geq, tgt_dif, -tgt_dif) pred_posi_dif = tf.where(geq, pred_dif, -pred_dif) loss = tf.maximum(0., tgt_posi_dif - pred_posi_dif) cstr_pct = tf.math.count_nonzero(loss, dtype=tf.float32) / tf.cast(tf.reduce_prod(tf.shape(loss)), tf.float32) final_loss = tf.reduce_mean(loss) return final_loss, cstr_pct def contra_traj_lossV7(pred, tgt, horizon=12, temp=100): horizon_pred, horizon_tgt = horizon_sumV1(pred, horizon), horizon_sumV1(tgt, horizon)
tensorflow.reduce_mean
10,489
import tensorflow as tf vf_old, vf_old_params, _, _ = self.build_cnet(batch['state'], 'oldvf') self.vf, vf_params, self.vf_state_init, self.vf_state_final = self.build_cnet(batch['state'], 'vf') self.vf_eval, _, self.vf_eval_state_init, self.vf_eval_state_final = self.build_cnet(self.state, 'vf', reuse=True, batch_size=1) self.sample_action = tf.squeeze(pi_eval.sample(1), axis=0) self.eval_action = pi_eval.mode() self.global_step = tf.train.get_or_create_global_step() self.saver = tf.train.Saver() # Loss functions and training epsilon_decay = tf.train.polynomial_decay(self.EPSILON, self.global_step, self.EPS_LEN, 0.1, power=1) ratio = tf.maximum(pi.prob(batch['actions']), 1e-6) / tf.maximum(pi_old.prob(batch['actions']), 1e-6) ratio = tf.clip_by_value(ratio, 0, 10) surr1 = batch['advantage'] * ratio surr2 = batch['advantage'] * tf.clip_by_value(ratio, 1 - epsilon_decay, 1 + epsilon_decay) loss_pg = - 2.0 * tf.reduce_mean(tf.minimum(surr1, surr2)) loss_vf = 0.5 * tf.reduce_mean(tf.square(batch['rewards'] - self.vf)) loss_entropy = - 0.01 * tf.reduce_mean(pi.entropy()) loss = loss_pg + loss_vf + loss_entropy opt = tf.train.AdamOptimizer(self.LR) self.train_op = opt.minimize(loss, global_step=self.global_step, var_list=pi_params + vf_params) self.pi_new_params = [oldp.assign(p) for p, oldp in zip(pi_params, pi_old_params)] self.vf_new_params = [oldp.assign(p) for p, oldp in zip(vf_params, vf_old_params)] self.sess.run(tf.global_variables_initializer()) # Tensorboard if summary_dir is not None:
tensorflow.clip_by_value
10,490
import tensorflow as tf def build_trainer(self, child_model): child_model.build_valid_rl() self.valid_acc = (tf.to_float(child_model.valid_shuffle_acc) / tf.to_float(child_model.batch_size)) self.reward = self.valid_acc if self.entropy_weight is not None: self.reward += self.entropy_weight * self.sample_entropy self.sample_log_prob = tf.reduce_sum(self.sample_log_prob) self.baseline = tf.Variable(0.0, dtype=tf.float32, trainable=False) baseline_update = tf.assign_sub( self.baseline, (1 - self.bl_dec) * (self.baseline - self.reward)) with tf.control_dependencies([baseline_update]): self.reward = tf.identity(self.reward) self.loss = self.sample_log_prob * (self.reward - self.baseline) self.train_step = tf.Variable(0, dtype=tf.int32, trainable=False, name="train_step") tf_variables = [var for var in tf.trainable_variables() if var.name.startswith(self.name)] print("-" * 80) for var in tf_variables: print(var) self.train_op, self.lr, self.grad_norm, self.optimizer = get_train_ops( self.loss, tf_variables, self.train_step,
tensorflow.control_dependencies
10,491
import tensorflow as tf return tf.exp(-s/2) * (1 + 3.5156229*t**2 + 3.0899424*t**4 + 1.2067492*t**6 + 0.2659732*t**8 + 0.0360768*t**10 + 0.0045813*t**12) def __phi_g(s): t = s/7.5 return tf.sqrt(2/s) * (0.39894228 + 0.01328592*t**(-1) + 0.00225319*t**(-2) - 0.00157565*t**(-3) + 0.0091628*t**(-4) - 0.02057706*t**(-5) + 0.02635537*t**(-6) - 0.01647633*t**(-7) + 0.00392377*t**(-8)) a = 7.5 return __phi_f(tf.minimum(x, a)) - __phi_f(a) + __phi_g(tf.maximum(x, a)) N = tf.cast(tf.shape(X)[0], tf.float32) if y is None: y = silverman_rule_of_thumb(N) A = 1/(N*N*tf.sqrt(y)) B = 2.0/(N*tf.sqrt(y+0.5)) A1 = euclidean_norm_squared(tf.subtract(tf.expand_dims(X, 0), tf.expand_dims(X, 1)), axis=2)/(4*y) B1 = euclidean_norm_squared(X, axis=1)/(2+4*y) return 1/tf.sqrt(1+y) + A*tf.reduce_sum(__phi(A1)) - B*tf.reduce_sum(__phi(B1))
tensorflow.shape
10,492
import tensorflow as tf elif encoder.final_state == 'average': mask = tf.sequence_mask(encoder_input_length_, maxlen=tf.shape(encoder_outputs_)[1], dtype=tf.float32) mask = tf.expand_dims(mask, axis=2) encoder_state_ = tf.reduce_sum(mask * encoder_outputs_, axis=1) / tf.reduce_sum(mask, axis=1) elif encoder.final_state == 'average_inputs': mask = tf.sequence_mask(encoder_input_length_, maxlen=tf.shape(encoder_inputs_)[1], dtype=tf.float32) mask = tf.expand_dims(mask, axis=2) encoder_state_ = tf.reduce_sum(mask * encoder_inputs_, axis=1) / tf.reduce_sum(mask, axis=1) elif encoder.bidir and encoder.final_state == 'last_both': encoder_state_ = tf.concat([last_forward, last_backward], axis=1) elif encoder.final_state == 'none': encoder_state_ = tf.zeros(shape=[batch_size, 0])
tensorflow.expand_dims
10,493
import tensorflow as tf def fc(x, scope, nh, *, init_scale=1.0, init_bias=0.0): with tf.variable_scope(scope): nin = x.get_shape()[1].value w = tf.get_variable("w", [nin, nh], initializer=ortho_init(init_scale)) b = tf.get_variable("b", [nh], initializer=tf.constant_initializer(init_bias)) return tf.matmul(x, w)+b def batch_to_seq(h, nbatch, nsteps, flat=False): if flat: h = tf.reshape(h, [nbatch, nsteps]) else: h = tf.reshape(h, [nbatch, nsteps, -1]) return [tf.squeeze(v, [1]) for v in tf.split(axis=1, num_or_size_splits=nsteps, value=h)] def seq_to_batch(h, flat = False): shape = h[0].get_shape().as_list() if not flat: assert(len(shape) > 1) nh = h[0].get_shape()[-1].value return tf.reshape(tf.concat(axis=1, values=h), [-1, nh]) else: return tf.reshape(tf.stack(values=h, axis=1), [-1]) def lstm(xs, ms, s, scope, nh, init_scale=1.0):
tensorflow.squeeze
10,494
import tensorflow as tf # If TPU is not available, this will fall back to normal Estimator on CPU # or GPU. estimator = tf.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn,
tensorflow.contrib.tpu.TPUEstimator
10,495
import tensorflow as tf else: grads = tf.gradients(loss, self.params)
tensorflow.gradients
10,496
import tensorflow as tf X = tf.reshape(X, (-1, w // stride, h // stride, ch)) # Sanity shape check return X #################################### # Utils #################################### def _do_cutout(self, image, im_width, im_height, cutout_size): mask = tf.ones([cutout_size, cutout_size], dtype=tf.int32) start_x = tf.random.uniform(shape=(1,), minval=0, maxval=im_width, dtype=tf.int32) start_y = tf.random.uniform(shape=(1,), minval=0, maxval=im_height, dtype=tf.int32) mask = tf.pad(mask, [[cutout_size + start_y[0], im_height - start_y[0]], [cutout_size + start_x[0], im_width - start_x[0]]]) mask = mask[cutout_size: cutout_size + im_height, cutout_size: cutout_size + im_width] mask = tf.tile(tf.reshape(mask, (im_height, im_width, 1)), (1, 1, 3)) image = tf.where(tf.equal(mask, 0), x=image, y=tf.zeros_like(image)) return image def _add_drop_path(self, X, keep_prob): with tf.variable_scope('drop_path'):
tensorflow.random.uniform
10,497
import tensorflow as tf def get_input_function(): """A function to get test inputs. Returns an image with one box.""" image = tf.random_uniform([32, 32, 3], dtype=tf.float32) key = tf.constant('image_000000') class_label = tf.random_uniform( [1], minval=0, maxval=NUMBER_OF_CLASSES, dtype=tf.int32) box_label = tf.random_uniform(
tensorflow.constant
10,498
import tensorflow as tf with slim.arg_scope([slim.conv2d], activation_fn=None, normalizer_fn=None, normalizer_params=None): box_encodings = slim.conv2d( net, num_predictions_per_location * self._box_code_size, [self._kernel_size, self._kernel_size], scope='BoxEncodingPredictor') if self._use_dropout: net = slim.dropout(net, keep_prob=self._dropout_keep_prob) class_predictions_with_background = slim.conv2d( net, num_predictions_per_location * num_class_slots, [self._kernel_size, self._kernel_size], scope='ClassPredictor', biases_initializer=tf.constant_initializer( self._class_prediction_bias_init)) if self._apply_sigmoid_to_scores: class_predictions_with_background = tf.sigmoid( class_predictions_with_background) combined_feature_map_shape = shape_utils.combined_static_and_dynamic_shape( image_features) box_encodings = tf.reshape( box_encodings, tf.stack([combined_feature_map_shape[0], combined_feature_map_shape[1] *
tensorflow.constant_initializer
10,499