seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf boxes.append(tf.reshape(box, (x_shape[0], -1, 1, 4))) objects.append(tf.reshape(obj, (x_shape[0], -1, 1))) classes.append(tf.reshape(cls, (x_shape[0], -1, num_classes))) boxes = tf.concat(boxes, axis=1) objects = tf.concat(objects, axis=1) classes = tf.concat(classes, axis=1) scores = objects * classes boxes, scores, classes, valid = tf.image.combined_non_max_suppression( boxes=boxes, scores=scores, max_output_size_per_class=max_outputs, max_total_size=max_outputs, iou_threshold=iou_threshold, score_threshold=score_threshold, clip_boxes=False )
tensorflow.image.combined_non_max_suppression
12,100
import tensorflow as tf lstm=tf.nn.rnn_cell.LSTMCell(num_units=state_size, activation = tf.nn.relu, state_is_tuple=True) cell_drop=tf.contrib.rnn.DropoutWrapper(lstm,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob)
tensorflow.contrib.rnn.DropoutWrapper
12,101
import tensorflow as tf losses[loss_name] = mean_loss return losses def summarize_features(features, num_shards=1): with tf.name_scope("input_stats"): for (k, v) in six.iteritems(features): if isinstance(v, tf.Tensor) and v.get_shape().ndims > 1: tf.summary.scalar("%s_batch" % k, tf.shape(v)[0] // num_shards) tf.summary.scalar("%s_length" % k, tf.shape(v)[1]) nonpadding = tf.to_float(tf.not_equal(v, 0)) nonpadding_tokens = tf.reduce_sum(nonpadding) tf.summary.scalar("%s_nonpadding_tokens" % k, nonpadding_tokens) tf.summary.scalar("%s_nonpadding_fraction" % k, tf.reduce_mean(nonpadding))
tensorflow.shape
12,102
from tensorflow.python.framework import ops @ops.RegisterShape("TopK")
tensorflow.python.framework.ops.RegisterShape
12,103
import tensorflow as tf sigmean = tf.Variable(5.28, name="sigmean", dtype=tf.float64) sigwidth = tf.Variable(0.0027, name="sigwidth", dtype=tf.float64) vdict['sigmean'] = sigmean
tensorflow.Variable
12,104
import tensorflow as tf # add final layer bias b = tf.Variable(tf.constant(0.1, shape=[nb_classes]), name="b") # calc l2 losses l2_loss += tf.nn.l2_loss(W) l2_loss += tf.nn.l2_loss(b) # do logit = W*X+b logit = tf.nn.xw_plus_b(H_drop, W, b, name="scores")
tensorflow.nn.l2_loss
12,105
import tensorflow as tf stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions) output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions) update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))
tensorflow.cond
12,106
import tensorflow as tf initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = tf.contrib.tpu.TPUEstimatorSpec(
tensorflow.logging.info
12,107
from tensorflow.python.feature_column import feature_column_lib as core_feature_column learner_config.constraints.max_tree_depth = 1 model_dir = tempfile.mkdtemp() config = run_config.RunConfig() est = estimator.CoreGradientBoostedDecisionTreeEstimator( head=head_fn, learner_config=learner_config, num_trees=1, examples_per_layer=3, model_dir=model_dir, config=config, feature_columns=[core_feature_column.numeric_column("x")]) # Train for a few steps. est.train(input_fn=_train_input_fn, steps=1000) est.evaluate(input_fn=_eval_input_fn, steps=1) est.predict(input_fn=_eval_input_fn) if __name__ == "__main__": googletest.main()
tensorflow.python.feature_column.feature_column_lib.numeric_column
12,108
import tensorflow as tf return (layer_id + 1, inputs, next_c, next_h, anchors, anchors_w_1, arc_seq, entropy, log_prob) loop_vars = [ tf.constant(2, dtype=tf.int32, name="layer_id"), inputs, prev_c, prev_h, anchors, anchors_w_1, arc_seq, tf.constant([0.0], dtype=tf.float32, name="entropy"), tf.constant([0.0], dtype=tf.float32, name="log_prob"), ] loop_outputs = tf.while_loop(_condition, _body, loop_vars, parallel_iterations=1) arc_seq = loop_outputs[-3].stack() arc_seq = tf.reshape(arc_seq, [-1]) entropy = tf.reduce_sum(loop_outputs[-2]) log_prob = tf.reduce_sum(loop_outputs[-1]) last_c = loop_outputs[-7]
tensorflow.constant
12,109
import tensorflow as tf return forward_fn(inputs, is_train=False, data_format=self.data_format) def calc_loss(self, labels, outputs, trainable_vars): """Calculate loss (and some extra evaluation metrics).""" loss = tf.losses.softmax_cross_entropy(labels, outputs) loss_filter = lambda var: 'batch_normalization' not in var.name loss += FLAGS.loss_w_dcy \ * tf.add_n([tf.nn.l2_loss(var) for var in trainable_vars if loss_filter(var)]) accuracy = tf.reduce_mean( tf.cast(tf.equal(tf.argmax(labels, axis=1), tf.argmax(outputs, axis=1)), tf.float32)) metrics = {'accuracy': accuracy} return loss, metrics def setup_lrn_rate(self, global_step): """Setup the learning rate (and number of training iterations).""" nb_epochs = 250 idxs_epoch = [100, 150, 200] decay_rates = [1.0, 0.1, 0.01, 0.001]
tensorflow.argmax
12,110
import tensorflow as tf def _get_initial_lstm(self, features): with tf.variable_scope('initial_lstm'): features_mean = tf.reduce_mean(features, 1) w_h = tf.get_variable('w_h', [self.D, self.H], initializer=self.weight_initializer) b_h = tf.get_variable('b_h', [self.H], initializer=self.const_initializer) h = tf.nn.tanh(tf.matmul(features_mean, w_h) + b_h) w_c = tf.get_variable('w_c', [self.D, self.H], initializer=self.weight_initializer) b_c = tf.get_variable('b_c', [self.H], initializer=self.const_initializer) c = tf.nn.tanh(tf.matmul(features_mean, w_c) + b_c)
tensorflow.get_variable
12,111
import tensorflow as tf param_noise_scale = tf.get_variable("param_noise_scale", (), initializer=tf.constant_initializer(0.01), trainable=False) param_noise_threshold = tf.get_variable("param_noise_threshold", (), initializer=tf.constant_initializer(0.05), trainable=False)
tensorflow.constant_initializer
12,112
import tensorflow as tf with tf.name_scope('Train'): train_input = DataInput(config=config, data=train_data, name='TrainInput') with tf.variable_scope('Model', reuse=None, initializer=initializer): m = Model(is_training=True, config=config, input_=train_input, graph=train_graph) tf.summary.scalar('Training Loss', m.cost)
tensorflow.variable_scope
12,113
import tensorflow as tf labels=masked_lm_ids, predictions=masked_lm_predictions, weights=masked_lm_weights, ) masked_lm_mean_loss = tf.metrics.mean( values=masked_lm_example_loss, weights=masked_lm_weights )
tensorflow.metrics.mean
12,114
import tensorflow as tf image_height, image_width = tf.shape(images)[1], tf.shape(images)[2] cutout_center_height = tf.random.uniform( shape=[], minval=0, maxval=image_height, dtype=tf.int32) cutout_center_width = tf.random.uniform( shape=[], minval=0, maxval=image_width, dtype=tf.int32) lower_pad = tf.maximum(0, cutout_center_height - length // 2) upper_pad = tf.maximum(0, image_height - cutout_center_height - length // 2) left_pad = tf.maximum(0, cutout_center_width - length // 2) right_pad = tf.maximum(0, image_width - cutout_center_width - length // 2) cutout_shape = [image_height - (lower_pad + upper_pad), image_width - (left_pad + right_pad)] padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]] mask = tf.pad(
tensorflow.maximum
12,115
import tensorflow as tf self.b1 = tf.get_variable('b1', [2048],initializer=tf.constant_initializer(0.0)) self.b2 = tf.get_variable('b2', [3072],initializer=tf.constant_initializer(0.0)) self.b3 = tf.get_variable('b3', [512],initializer=tf.constant_initializer(0.0)) self.b4 = tf.get_variable('b4', [classnum],initializer=tf.constant_initializer(0.0)) def inference(self,images):
tensorflow.constant_initializer
12,116
import tensorflow as tf placeholders = { 'features': tf.sparse_placeholder(tf.float32), 'adj': tf.sparse_placeholder(tf.float32), 'adj_orig': tf.sparse_placeholder(tf.float32),
tensorflow.sparse_placeholder
12,117
import tensorflow as tf # tensor has a value of 1.0 for every real prediction and 0.0 for the # padding predictions. per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1]) numerator = tf.reduce_sum(label_weights * per_example_loss) denominator = tf.reduce_sum(label_weights) + 1e-5 loss = numerator / denominator return (loss, per_example_loss, log_probs) def get_next_sentence_output(bert_config, input_tensor, labels): """Get loss and log probs for the next sentence prediction.""" # Simple binary classification. Note that 0 is "next sentence" and 1 is # "random sentence". This weight matrix is not used after pre-training. with tf.variable_scope("cls/seq_relationship"): output_weights = tf.get_variable( "output_weights", shape=[2, bert_config.hidden_size], initializer=modeling.create_initializer(bert_config.initializer_range), ) output_bias = tf.get_variable( "output_bias", shape=[2], initializer=tf.zeros_initializer() ) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) labels = tf.reshape(labels, [-1]) one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32)
tensorflow.variable_scope
12,118
import tensorflow as tf kernel_size=size, kernel_initializer=tf.constant_initializer(1.0), strides=stride, padding="SAME", use_bias=False, trainable=False, dilation_rate=(dilation, dilation)) mask_ratio = slide_window / (update_mask + 1e-8) update_mask = tf.clip_by_value(update_mask, 0.0, 1.0) mask_ratio = mask_ratio * update_mask with tf.variable_scope('parconv'): x = tf.nn.conv2d(input, filters, strides=[1, stride, stride, 1], padding="SAME", name='zero-conv_' + id, dilations=(1, dilation, dilation, 1)) x = x * mask_ratio if use_bias: bias = tf.get_variable("bias" + id, [channels], initializer=tf.constant_initializer(0.0)) x = tf.nn.bias_add(x, bias) return x * update_mask
tensorflow.nn.conv2d
12,119
import tensorflow as tf def testInitRequiredAssignAdd(self): with self.test_session(): p = tf.Variable(tf.fill([1024, 1024], 1), tf.int32) a = tf.assign_add(p, tf.fill([1024, 1024], 0)) with self.assertRaisesOpError("use uninitialized"): a.op.run() def testInitRequiredAssignSub(self): with self.test_session(): p = tf.Variable(tf.fill([1024, 1024], 1), tf.int32) a = tf.assign_sub(p, tf.fill([1024, 1024], 0)) with self.assertRaisesOpError("use uninitialized"): a.op.run() # NOTE(mrry): See also # dense_update_ops_no_tsan_test.AssignOpTest, which contains a benign # data race and must run without TSAN. def testParallelUpdateWithLocking(self): with self.test_session() as sess: zeros_t = tf.fill([1024, 1024], 0.0) ones_t = tf.fill([1024, 1024], 1.0) p = tf.Variable(zeros_t)
tensorflow.fill
12,120
import tensorflow as tf with tf.Session(config=config) as sess: t22 = time.time() expected = sess.run(conv) t11 = time.time() for i in range(0, num_trials): sess.run(conv) t22 = time.time() td = abs(t22 - t11) / max(num_trials,1) print("time dense gpu: ", td) tf.reset_default_graph() print("time ratio: ", ts / td) return [expected, sv3, ts, td] def do_test(res, f_density, batch_size): pid = os.getpid() print(pid)
tensorflow.reset_default_graph
12,121
import tensorflow as tf if logits==True: return tf.nn.softmax(logit), logit return tf.nn.softmax(logit)
tensorflow.nn.softmax
12,122
import tensorflow as tf # For printing layers shape self.training_end_points = self.end_points_D self.training_end_points.update(self.end_points_G) tf.summary.histogram("d", self.end_points_D['D_on_data']) tf.summary.histogram("d_", self.end_points_D['D_on_G']) tf.summary.image("G", G)
tensorflow.summary.histogram
12,123
import tensorflow as tf pred_flat1, pred_flat2 = tf.reshape(horizon_pred, [-1, 1]), tf.reshape(horizon_pred, [1, -1]) tgt_flat1, tgt_flat2 = tf.reshape(horizon_tgt, [-1, 1]), tf.reshape(horizon_tgt, [1, -1]) tgt_dif = tgt_flat1 - tgt_flat2
tensorflow.reshape
12,124
import tensorflow as tf if use_tpu: def metric_fn(tf_logits, labels): with tf.device("cpu:0"), mtf.utils.outside_all_rewrites(): eval_metrics = {} for metric_name, metric_fn in six.iteritems(eval_metrics_fns): if metric_name.split("/")[-1] not in t2t_model.TPU_METRIC_BLACKLIST: eval_metrics[metric_name] = metric_fn( tf_logits, None, tf.identity(labels)) return eval_metrics return tpu_estimator.TPUEstimatorSpec( tf.estimator.ModeKeys.EVAL, evaluation_hooks=[restore_hook], loss=loss, eval_metrics=(metric_fn, [logits, labels]))
tensorflow.identity
12,125
import tensorflow as tf x = tf.matmul( x, weights) # (batch_size * self._num_nodes, output_size) biases = tf.get_variable("biases", [output_size], dtype=dtype, initializer=tf.constant_initializer( bias_start, dtype=dtype)) x = tf.nn.bias_add(x, biases) # Reshape res back to: (batch_size, num_node, state_dim) return tf.reshape(x, [batch_size, self._num_nodes, output_size])
tensorflow.reshape
12,126
import tensorflow as tf ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint, different_vocabulary=False) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string)
tensorflow.train.init_from_checkpoint
12,127
import tensorflow as tf element_shape=(facts[:, 0, :].get_shape())) _, output_op, _ = tf.while_loop(cond, body, [facts, output_ta, 0]) self_attention = output_op.stack() self_attention = tf.transpose(self_attention, perm = [1, 0, 2]) return self_attention def self_all_attention(facts, ATTENTION_SIZE, mask, stag='null'): if len(facts.get_shape().as_list()) == 2: facts = tf.expand_dims(facts, 1) def cond(batch, output, i): return tf.less(i, tf.shape(batch)[1]) def body(batch, output, i): self_attention_tmp = din_fcn_attention(batch[:, i, :], batch, ATTENTION_SIZE, mask, softmax_stag=1, stag=stag,
tensorflow.expand_dims
12,128
import tensorflow as tf def _reverse_seq(sequence, sequence_lengths=None): """Reverse sequence along dim 0. Args: sequence: Tensor of shape [T, B, ...]. sequence_lengths: (optional) tensor of shape [B]. If `None`, only reverse along dim 0. Returns: Tensor of same shape as sequence with dim 0 reversed up to sequence_lengths. """ if sequence_lengths is None: return tf.reverse(sequence, [0]) sequence_lengths = tf.convert_to_tensor(sequence_lengths) with tf.control_dependencies( [tf.assert_equal(sequence.shape[1], sequence_lengths.shape[0])]): return tf.reverse_sequence( sequence, sequence_lengths, seq_axis=0, batch_axis=1) def scan_discounted_sum(sequence, decay, initial_value, reverse=False, sequence_lengths=None, back_prop=True, name="scan_discounted_sum"): """Evaluates a cumulative discounted sum along dimension 0. ```python
tensorflow.reverse
12,129
import tensorflow as tf tvars = tf.trainable_variables() grads, _ = tf.clip_by_global_norm(tf.gradients(self._cost, tvars),
tensorflow.gradients
12,130
import tensorflow as tf masks out all losses passed the sequence length. Args: logits: Logits of shape `[T, B, vocab_size]` targets: Target classes of shape `[T, B]` sequence_length: An int32 tensor of shape `[B]` corresponding to the length of each input Returns: A tensor of shape [T, B] that contains the loss per example, per time step. """ with tf.name_scope("cross_entropy_sequence_loss"): losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets) loss_mask = tf.sequence_mask(tf.to_int32(sequence_length), tf.to_int32(tf.shape(targets)[0])) losses = losses * tf.transpose(tf.to_float(loss_mask), [1, 0]) return losses def dice_loss(predictions, targets, weights=1., name='dice_loss'): with tf.name_scope(name): # predictions = tf.to_float(predictions) targets = tf.to_float(targets) intersection = 2 * tf.reduce_sum(predictions * targets) + weights union = weights + tf.reduce_sum(predictions) + tf.reduce_sum(targets) loss = -(intersection / (union))
tensorflow.shape
12,131
import tensorflow as tf scale = tf.constant([2.] * 5) concentration = tf.constant([2.] * 5)
tensorflow.constant
12,132
from tensorflow.python.framework import ops ops.RegisterShape("Sign")(common_shapes.unchanged_shape) ops.RegisterShape("Sin")(common_shapes.unchanged_shape) ops.RegisterShape("Sqrt")(common_shapes.unchanged_shape) ops.RegisterShape("Square")(common_shapes.unchanged_shape) ops.RegisterShape("Sigmoid")(common_shapes.unchanged_shape) ops.RegisterShape("Tanh")(common_shapes.unchanged_shape) ops.RegisterShape("Cast")(common_shapes.unchanged_shape) ops.RegisterShape("ComplexAbs")(common_shapes.unchanged_shape) @ops.RegisterShape("Add") @ops.RegisterShape("Complex") @ops.RegisterShape("Div") @ops.RegisterShape("Equal") @ops.RegisterShape("Greater") @ops.RegisterShape("GreaterEqual") @ops.RegisterShape("Less") @ops.RegisterShape("LessEqual") @ops.RegisterShape("LogicalAnd") @ops.RegisterShape("LogicalOr") @ops.RegisterShape("Maximum") @ops.RegisterShape("Minimum") @ops.RegisterShape("Mod") @ops.RegisterShape("Mul") @ops.RegisterShape("NotEqual") @ops.RegisterShape("Pow") @ops.RegisterShape("Sub") def _BroadcastShape(op):
tensorflow.python.framework.ops.RegisterShape
12,133
import tensorflow as tf batch_count = batch_count + 1 test_accuracy = test_accuracy + test_accuracy_temp evaluated_images = evaluated_images + testLabels.shape[0] test_accuracy = test_accuracy / batch_count print('test set: accuracy on test set: %0.3f' % test_accuracy) if __name__ == '__main__': tf.app.run(main=main)
tensorflow.app.run
12,134
import tensorflow as tf mask = self.q_mask, num_filters = d, num_heads = nh, seq_len = self.q_len, scope = "Encoder_Residual_Block", reuse = True, # Share the weights between passage and question bias = False, dropout = self.dropout) with tf.variable_scope("Context_to_Query_Attention_Layer"): # C = tf.tile(tf.expand_dims(c,2),[1,1,self.q_maxlen,1]) # Q = tf.tile(tf.expand_dims(q,1),[1,self.c_maxlen,1,1]) # S = trilinear([C, Q, C*Q], input_keep_prob = 1.0 - self.dropout) S = optimized_trilinear_for_attention([c, q], self.c_maxlen, self.q_maxlen, input_keep_prob = 1.0 - self.dropout) mask_q = tf.expand_dims(self.q_mask, 1) S_ = tf.nn.softmax(mask_logits(S, mask = mask_q)) mask_c = tf.expand_dims(self.c_mask, 2) S_T = tf.transpose(tf.nn.softmax(mask_logits(S, mask = mask_c), dim = 1),(0,2,1))
tensorflow.variable_scope
12,135
import tensorflow as tf moving_averages.assign_moving_average( self.ema_count, tf.reduce_sum( tf.reshape( x_means_hot, shape=[-1, self.hparams.num_blocks, self.hparams.block_v_size]), axis=0), self.hparams.decay, zero_debias=False) dw = tf.matmul( tf.transpose(x_means_hot, perm=[1, 2, 0]), tf.transpose(x_reshaped, perm=[1, 0, 2])) updated_ema_means = \ moving_averages.assign_moving_average( self.ema_means, dw, self.hparams.decay, zero_debias=False) n = tf.reduce_sum(updated_ema_count, axis=-1, keep_dims=True) updated_ema_count = ((updated_ema_count + self.hparams.epsilon) / ( n + 2**self.hparams.z_size * self.hparams.epsilon) * n) updated_ema_means = updated_ema_means / tf.expand_dims( updated_ema_count, axis=-1) with tf.control_dependencies([e_loss]):
tensorflow.transpose
12,136
import tensorflow as tf with tf.variable_scope('anchor_generator'): if offset is None: offset = [stride[0]/2, stride[1]/2] features_width = tf.cast(features_width, tf.int32) features_height = tf.cast(features_height, tf.int32) scales = tf.convert_to_tensor(scales, dtype=tf.float32) ratios = tf.convert_to_tensor(ratios, dtype=tf.float32) offset = tf.convert_to_tensor(offset, dtype=tf.float32) scales_grid, ratios_grid = tf.meshgrid(scales, ratios) scales_grid = tf.reshape(scales_grid, [-1, 1]) ratios_grid = tf.reshape(ratios_grid, [-1, 1]) ratio_sqrts = tf.sqrt(ratios_grid) heights = scales_grid / ratio_sqrts * base_size[1] widths = scales_grid * ratio_sqrts * base_size[0] x_centers = tf.cast(tf.range(features_width), tf.float32) x_centers = x_centers * stride[1] y_centers = tf.cast(tf.range(features_height), tf.float32) y_centers = y_centers * stride[0] # x_centers = x_centers + offset[1] # y_centers = y_centers + offset[0] x_centers, y_centers = tf.meshgrid(x_centers, y_centers)
tensorflow.reshape
12,137
import tensorflow as tf import tensorflow as tf import numpy as np def YoloV4Header(num_classes, anchorlist, mask, strides, max_outputs, iou_threshold, score_threshold,inputs): boxes, objects, classes = [], [], [] dtype = inputs[0].dtype for i, logits in enumerate(inputs): print(i,mask[i]) stride = strides[i] anchors = anchorlist[mask[i]] x_shape = tf.shape(logits) logits = tf.reshape(logits, (x_shape[0], x_shape[1], x_shape[2], len(anchors), num_classes + 5)) box_xy, box_wh, obj, cls = tf.split(logits, (2, 2, 1, num_classes), axis=-1) box_xy = tf.sigmoid(box_xy) obj = tf.sigmoid(obj) cls = tf.sigmoid(cls) anchors = anchors.astype(np.float32) grid_shape = x_shape[1:3] # print(grid_shape) grid_h, grid_w = grid_shape[0], grid_shape[1] # print(grid_h,tf.range(grid_h))
tensorflow.shape
12,138
import tensorflow as tf x_shape = tuple(x_shape) y_shape = [] for i, s in zip(int_shape(y), tf.unstack(tf.shape(y))): if i is not None: y_shape.append(i) else: y_shape.append(s) y_shape = tuple(y_shape) y_permute_dim = list(range(get_ndim(y))) y_permute_dim = [y_permute_dim.pop(-2)] + y_permute_dim xt = tf.reshape(x, [-1, x_shape[-1]]) yt = tf.reshape(tf.transpose(y, perm=y_permute_dim), [y_shape[-2], -1]) return tf.reshape( tf.matmul(xt, yt), x_shape[:-1] + y_shape[:-2] + y_shape[-1:]) out = tf.matmul(x, y) return out def get_ndim(x): """Returns the number of axes in a tensor, as an integer. Parameters
tensorflow.transpose
12,139
import tensorflow as tf """ logger.debug("base conditional") # compute kernel stuff num_func = tf.shape(f)[1] # R Lm = tf.cholesky(Kmm) # Compute the projection matrix A A = tf.matrix_triangular_solve(Lm, Kmn, lower=True) # compute the covariance due to the conditioning if full_cov: fvar = Knn - tf.matmul(A, A, transpose_a=True) fvar = tf.tile(fvar[None, :, :], [num_func, 1, 1]) # R x N x N else:
tensorflow.matrix_triangular_solve
12,140
import tensorflow as tf Returns: List of delta tensors corresponding to the updates for each optimized variable. """ learning_rate = self.learning_rate.value() unperturbed_loss = fn_loss(**arguments) deltas = [tf.zeros_like(tensor=variable) for variable in variables] previous_perturbations = [tf.zeros_like(tensor=variable) for variable in variables] if self.unroll_loop: # Unrolled for loop for sample in range(self.num_samples): with tf.control_dependencies(control_inputs=deltas): perturbations = [ tf.random_normal(shape=util.shape(variable)) * learning_rate
tensorflow.zeros_like
12,141
import tensorflow as tf zeros_t = tf.fill([1024, 1024], 0.0) ones_t = tf.fill([1024, 1024], 1.0) p = tf.Variable(zeros_t) assigns = [tf.assign(p, tf.mul(ones_t, float(i)),
tensorflow.Variable
12,142
import tensorflow as tf units=depth, activation=tf.nn.sigmoid, name='T', bias_initializer=tf.constant_initializer(-1.0)) return H * T + inputs * (1.0 - T) def conv1d(inputs, kernel_size, channels, activation, is_training, scope): with tf.variable_scope(scope): conv1d_output = tf.layers.conv1d( inputs, filters=channels, kernel_size=kernel_size, activation=activation, padding='same') return tf.layers.batch_normalization(conv1d_output, training=is_training)
tensorflow.variable_scope
12,143
import tensorflow as tf # tf.nn.rnn_cell # lstm_cell1 = tf.nn.rnn_cell.LSTMCell(lstm_hidden_size_layer1, forget_bias=1.0) # lstm_cell2 = tf.nn.rnn_cell.LSTMCell(lstm_hidden_size_layer2, forget_bias=1.0) #lstm_cells = tf.nn.rnn_cell.MultiRNNCell(cells=[lstm_cell1, lstm_cell2], state_is_tuple=True) # initial_state = lstm_cells.zero_state(batch_size, tf.float32) _, states = tf.nn.dynamic_rnn(lstm_cells, input, dtype=tf.float32, initial_state=None) # z_sequence_output = states[1].h # print(z_sequence_output.get_shape()) states_concat = tf.concat([states[0].h, states[1].h], 1) #def fc(input, scope, out_dim, non_linear_fn=None, initial_value=None, use_bias=True): z_sequence_output = fc(states_concat, lstm_z_sequence_dim, scope='linear_transform')
tensorflow.nn.dynamic_rnn
12,144
from tensorflow.python.framework import tensor_shape logits_shape = op.inputs[0].get_shape() input_shape = logits_shape.with_rank(2) batch_size = input_shape[0] # labels_shape op.inputs[1].get_shape().merge_with(tensor_shape.vector(batch_size)) return [tensor_shape.vector(batch_size.value), input_shape] @ops.RegisterShape("SoftmaxCrossEntropyWithLogits") def _SoftmaxCrossEntropyWithLogitsShape(op): """Shape function for SoftmaxCrossEntropyWithLogits op.""" logits_shape = op.inputs[0].get_shape() labels_shape = op.inputs[1].get_shape() input_shape = logits_shape.merge_with(labels_shape).with_rank(2) batch_size = input_shape[0] return [tensor_shape.vector(batch_size.value), input_shape] def avg_pool(value, ksize, strides, padding, data_format="NHWC", name=None): """Performs the average pooling on the input. Each entry in `output` is the mean of the corresponding size `ksize` window in `value`. Args: value: A 4-D `Tensor` of shape `[batch, height, width, channels]` and type `float32`, `float64`, `qint8`, `quint8`, or `qint32`. ksize: A list of ints that has length >= 4. The size of the window for each dimension of the input tensor. strides: A list of ints that has length >= 4.
tensorflow.python.framework.tensor_shape.vector
12,145
import tensorflow as tf initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN:
tensorflow.train.init_from_checkpoint
12,146
import tensorflow as tf # time_steps = tf.shape(decoder_states)[1] # state_size = decoder_states.get_shape()[2] # states = tf.reshape(decoder_states, shape=tf.stack([batch_size * time_steps, state_size])) baseline = dense(tf.stop_gradient(decoder_states), units=1, activation=None, name='reward_baseline', kernel_initializer=tf.constant_initializer(0.01)) baseline = tf.squeeze(baseline, axis=2) # baseline = tf.reshape(baseline, shape=tf.stack([batch_size, time_steps])) return reward - baseline
tensorflow.squeeze
12,147
import tensorflow as tf """ if isinstance(scale, numbers.Integral): raise ValueError('scale cannot be an integer: %s' % scale) if isinstance(scale, numbers.Real): if scale < 0.: raise ValueError('Setting a scale less than 0 on a regularizer: %g' % scale) if scale == 0.: return lambda _: None def l1(weights, name='l1_regularizer'): """Applies L1 regularization to weights.""" with tf.name_scope(name): my_scale = tf.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale') return tf.multiply(my_scale, tf.reduce_sum(tf.abs(weights)), name=name) return l1 def l2_regularizer(scale, name='l2_regularizer'): """Returns a function that can be used to apply L2 regularization to weights. Small values of L2 can help prevent overfitting the training data. Args: scale: A scalar multiplier `Tensor`. 0.0 disables the regularizer. name: An optional name/scope name.
tensorflow.abs
12,148
from tensorflow.contrib import layers nn_activations = layers.fully_connected(nn_activations, self.params.layer_size) return nn_activations class ManyToOneLayer(hybrid_layer.HybridLayer): def _define_vars(self, params): pass def inference_graph(self, data): with ops.device(self.device_assigner): # Compute activations for the neural network. nn_activations = layers.fully_connected(data, 1) # There is always one activation per instance by definition, so squeeze # away the extra dimension. return array_ops.squeeze(nn_activations, squeeze_dims=[1]) class FlattenedFullyConnectedLayer(hybrid_layer.HybridLayer): """A stacked, fully-connected flattened feed-forward neural network layer.""" def _define_vars(self, params): pass
tensorflow.contrib.layers.fully_connected
12,149
import tensorflow as tf else: raise ValueError('Not defined for this latent dimension') def cw_sampling(X, y=None): def phi_sampling(s, D): return tf.pow(1.0 + 4.0*s/(2.0*D-3), -0.5) D = tf.cast(tf.shape(X)[1], tf.float32) N = tf.cast(tf.shape(X)[0], tf.float32) D_int = tf.cast(D, tf.int32) N_int = tf.cast(N, tf.int32) if y is None: y = silverman_rule_of_thumb(N) YDistr = tf.contrib.distributions.MultivariateNormalDiag(loc=tf.zeros(D_int, tf.float32), scale_diag=tf.ones(D_int, tf.float32)) Y = YDistr.sample(N_int) T = 1.0/(2.0*N*tf.sqrt(m.pi*y)) A0 = euclidean_norm_squared(tf.subtract(tf.expand_dims(X, 0), tf.expand_dims(X, 1)), axis=2) A = tf.reduce_sum(phi_sampling(A0/(4*y), D))
tensorflow.cast
12,150
import tensorflow as tf out = tf.matmul(self.fc1, w) + b self.fc2 = tf.nn.relu(out) # fc3 with tf.variable_scope('fc3'): w = tf.get_variable('w', [self.fc2.get_shape()[1], num_classes], initializer=initializer, regularizer=regularizer) b = tf.get_variable('b', [num_classes], initializer=tf.constant_initializer(1.0)) self.fc3 = tf.matmul(self.fc2, w) + b # Calculate Mean cross-entropy loss with tf.name_scope("loss"): self.predictions = tf.argmax(self.fc3, 1, name="predictions") losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.fc3, labels=self.input_y) regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) self.loss = tf.reduce_mean(losses) + sum(regularization_losses) # Accuracy with tf.name_scope("accuracy"): correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1)) self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
tensorflow.reduce_mean
12,151
import tensorflow as tf # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) # Trainable parameters mask = tf.equal(mask, tf.ones_like(mask)) facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] query = tf.layers.dense(query, facts_size, activation=None, name='f1_trans_shine' + stag) query = prelu(query) queries = tf.tile(query, [1, tf.shape(facts)[1]]) queries = tf.reshape(queries, tf.shape(facts)) din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = tf.layers.dense(din_all, facts_size, activation=tf.nn.sigmoid, name='f1_shine_att' + stag) d_layer_2_all = tf.layers.dense(d_layer_1_all, facts_size, activation=tf.nn.sigmoid, name='f2_shine_att' + stag) d_layer_2_all = tf.reshape(d_layer_2_all, tf.shape(facts)) output = d_layer_2_all return output
tensorflow.shape
12,152
import tensorflow as tf # segment. # # If you want to use the token-level output, use model.get_sequence_output() # instead. output_layer = model.get_pooled_output() hidden_size = output_layer.shape[-1].value output_weights = tf.get_variable( "output_weights", [num_labels, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.matmul(output_layer, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) probabilities = tf.nn.softmax(logits, axis=-1) log_probs = tf.nn.log_softmax(logits, axis=-1) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
tensorflow.zeros_initializer
12,153
import tensorflow as tf self.epsilon = epsilon self.axis = axis self.center=center self.scale=scale with tf.variable_scope(name) as scope: with tf.variable_scope('bn') : self.gamma= tf.get_variable('gamma',[dims], initializer=tf.constant_initializer(1.0)) self.beta = tf.get_variable('beta',[dims], initializer=tf.constant_initializer(0.0)) self.moving_mean = tf.get_variable('moving_mean',[dims], initializer=tf.constant_initializer(0.0), trainable=False) self.moving_variance = tf.get_variable('moving_variance',[dims], initializer=tf.constant_initializer(1.0), trainable=False) self.scope = scope def __call__(self,input_var,is_training,**xargs) : with tf.variable_scope(self.scope) : return tf.layers.batch_normalization( input_var, axis=self.axis, momentum=self.momentum, epsilon=self.epsilon, center=self.center, scale=self.scale, training=is_training, reuse=True, name='bn') """ ---Do NOT forget to add update_ops dependencies for your loss function.--- update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,tf.get_default_graph().get_name_scope()) #And, do not make any scope inside map_fn, since scope.name will not work...(it is corrupted by map_fn.)
tensorflow.layers.batch_normalization
12,154
import tensorflow as tf else: return -tf.reduce_sum(log_sum_exp(log_probs), [1, 2]) def mse_loss(pred, labels): try: batch_size = tf.cast(pred.shape[0], tf.float32) except Exception as e: print('Pred is a tf tensor %s' % str(e.message)) batch_size = tf.cast(tf.shape(pred)[0], tf.float32) loss_val = tf.sqrt(2 * tf.nn.l2_loss(pred - labels)) / batch_size return loss_val def pullaway_loss(embeddings, name='pullaway_loss'): """Pull Away loss calculation. Args:
tensorflow.shape
12,155
import tensorflow as tf for input_file in input_files: tf.logging.info(" %s" % input_file) validation_input_files = [] if FLAGS.validation_input_file is None and FLAGS.validation_input_dir is None: validation_input_files = input_files else: if FLAGS.validation_input_file is not None: for input_pattern in FLAGS.validation_input_file.split(","): validation_input_files.extend(tf.gfile.Glob(input_pattern)) if FLAGS.validation_input_dir is not None: for filename in tf.gfile.ListDirectory(FLAGS.validation_input_dir): validation_input_files.extend(tf.gfile.Glob(os.path.join(FLAGS.validation_input_dir, filename))) tf.logging.info("*** Input Validation Files ***") for input_file in validation_input_files: tf.logging.info(" %s" % input_file) config = tf.ConfigProto() if FLAGS.xla: config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1 if FLAGS.use_hvd:
tensorflow.gfile.ListDirectory
12,156
import tensorflow as tf if chaining_strategy == 'map_attns': x = attns elif chaining_strategy == 'map_outputs': x = decoder_outputs else: x = states shape = [x.get_shape()[-1], attention_states[0].get_shape()[-1]] w = tf.get_variable("map_attns/matrix", shape=shape) b = tf.get_variable("map_attns/bias", shape=shape[-1:]) x = tf.einsum('ijk,kl->ijl', x, w) + b if chaining_non_linearity: x = tf.nn.tanh(x) attention_states[0] += x outputs, attention_weights, _, _, samples, beam_fun, initial_data = attention_decoder( attention_states=attention_states, initial_state=encoder_state, feed_previous=feed_previous, decoder_inputs=targets[:,:-1], align_encoder_id=align_encoder_id, encoder_input_length=encoder_input_length[:1], **parameters )
tensorflow.einsum
12,157
import tensorflow as tf Kuu = feat.Kuu(kern, jitter=settings.numerics.jitter_level) # M x M Luu = tf.cholesky(Kuu) # M x M
tensorflow.cholesky
12,158
from tensorflow.python.ops.rnn_cell_impl import _Linear if self._candidate_linear is None: with vs.variable_scope("candidate"): self._candidate_linear = _Linear( [inputs, r_state],
tensorflow.python.ops.rnn_cell_impl._Linear
12,159
import tensorflow as tf grads.append(expanded_g) grad = tf.concat(grads, 0) grad = tf.reduce_mean(grad, 0) var = grad_and_vars[0][1] grad_and_var = (grad, var) average_grads.append(grad_and_var) return average_grads def binary_mask(shape, p=0.7): samples = tf.random_uniform(shape, minval=0.0, maxval=1.0) mask = tf.less_equal(samples, p) return tf.cast(mask, tf.float32) def weighted_arithmetic_mean(w, x): numer = tf.reduce_sum(w*x) denom = tf.reduce_sum(w) return tf.div(numer, denom)
tensorflow.reduce_sum
12,160
import tensorflow as tf Build the RESNET model using loaded weights """ print("Building the RESNET..") # Convert RGB to BGR with tf.name_scope('Pre_Processing'): self.x_preprocessed = self.x_input * (1.0 / 255.0) # self.x_preprocessed= self.x_input stat= torchfile.load('stat.t7') self.resnet_mean= stat.transpose(1,2,0) # self.resnet_mean = tf.constant([0.2869, 0.3251, 0.2839], dtype=tf.float32)
tensorflow.name_scope
12,161
import tensorflow as tf gtboxes_and_label_h = tf.reshape(gtboxes_and_label_h, [cfgs.BATCH_SIZE, -1, 5]) gtboxes_and_label_q = tf.reshape(gtboxes_and_label_batch[start:end], [cfgs.BATCH_SIZE, -1, 9]) num_objects = num_objects_batch[start:end] num_objects = tf.cast(tf.reshape(num_objects, [cfgs.BATCH_SIZE, -1, ]), tf.float32) img_h = img_h_batch[start:end] img_w = img_w_batch[start:end] inputs_list.append([img, gtboxes_and_label_h, gtboxes_and_label_q, num_objects, img_h, img_w]) tower_grads = [] biases_regularizer = tf.no_regularizer weights_regularizer = tf.contrib.layers.l2_regularizer(cfgs.WEIGHT_DECAY) with tf.variable_scope(tf.get_variable_scope()): for i in range(num_gpu): with tf.device('/gpu:%d' % i): with tf.name_scope('tower_%d' % i): with slim.arg_scope( [slim.model_variable, slim.variable], device='/device:CPU:0'): with slim.arg_scope([slim.conv2d, slim.conv2d_in_plane, slim.conv2d_transpose, slim.separable_conv2d, slim.fully_connected], weights_regularizer=weights_regularizer, biases_regularizer=biases_regularizer, biases_initializer=tf.constant_initializer(0.0)):
tensorflow.contrib.layers.l2_regularizer
12,162
from tensorflow.python.ops import array_ops def __init__(self, label_name, weight_column_name): def loss_fn(logits, target): check_shape_op = control_flow_ops.Assert( math_ops.less_equal(array_ops.rank(target), 2), ["target's shape should be either [batch_size, 1] or [batch_size]"]) with ops.control_dependencies([check_shape_op]): target = array_ops.reshape( target, shape=[array_ops.shape(target)[0], 1]) return loss_ops.hinge_loss(logits, target) super(_BinarySvmTargetColumn, self).__init__( loss_fn=loss_fn, n_classes=2, label_name=label_name, weight_column_name=weight_column_name)
tensorflow.python.ops.array_ops.shape
12,163
import tensorflow as tf else: # Do not perturb, just assign. op = tf.assign(perturbed_var, var) perturb_ops.append(op) assert len(perturb_ops) == len(all_vars) return tf.group(*perturb_ops) # Set up functionality to re-compute `param_noise_scale`. This perturbs yet another copy # of the network and measures the effect of that perturbation in action space. If the perturbation # is too big, reduce scale of perturbation, otherwise increase. q_values_adaptive = q_func(observations_ph.get(), num_actions, scope="adaptive_q_func") perturb_for_adaption = perturb_vars(original_scope="q_func", perturbed_scope="adaptive_q_func") kl = tf.reduce_sum(tf.nn.softmax(q_values) * (tf.log(tf.nn.softmax(q_values)) - tf.log(tf.nn.softmax(q_values_adaptive))), axis=-1) mean_kl = tf.reduce_mean(kl) def update_scale(): with tf.control_dependencies([perturb_for_adaption]): update_scale_expr = tf.cond(mean_kl < param_noise_threshold, lambda: param_noise_scale.assign(param_noise_scale * 1.01), lambda: param_noise_scale.assign(param_noise_scale / 1.01), ) return update_scale_expr # Functionality to update the threshold for parameter space noise. update_param_noise_threshold_expr = param_noise_threshold.assign(tf.cond(update_param_noise_threshold_ph >= 0,
tensorflow.nn.softmax
12,164
from tensorflow.python.framework import ops @ops.RegisterShape("ScatterAdd") @ops.RegisterShape("ScatterSub") @ops.RegisterShape("ScatterUpdate") def _ScatterUpdateShape(op):
tensorflow.python.framework.ops.RegisterShape
12,165
import tensorflow as tf for grad, var in grads] # add vanishing gradient regularizer #out, test = self.dOmega_dWrec() #clipped_grads[0] = (tf.add(out[0], clipped_grads[0][0]), clipped_grads[0][1]) #clipped_grads[0] = (tf.Print(clipped_grads[0][0], [clipped_grads[0][0]], "gw_rec"), clipped_grads[0][1]) optimize = optimizer.apply_gradients(clipped_grads) # run session sess.run(tf.global_variables_initializer()) step = 1 # time training t1 = time() # Keep training until reach max iterations while step * batch_size < training_iters: batch_x, batch_y, output_mask = generator.next() sess.run(optimize, feed_dict={self.x: batch_x, self.y: batch_y, self.output_mask: output_mask}) if step % display_step == 0:
tensorflow.global_variables_initializer
12,166
from tensorflow.python.ops import math_ops return train_tensor def _clip_gradients_by_norm(grads_and_vars, clip_gradients): """Clips gradients by global norm.""" gradients, variables = zip(*grads_and_vars) clipped_gradients, _ = clip_ops.clip_by_global_norm(gradients, clip_gradients) return list(zip(clipped_gradients, variables)) def _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name): """Find max_norm given norm and previous average.""" with vs.variable_scope(name, "AdaptiveMaxNorm", [norm]): log_norm = math_ops.log(norm + epsilon) def moving_average(name, value, decay): moving_average_variable = vs.get_variable( name, shape=value.get_shape(), dtype=value.dtype, initializer=init_ops.zeros_initializer(), trainable=False) return moving_averages.assign_moving_average( moving_average_variable, value, decay, zero_debias=False) # quicker adaptation at the beginning if global_step is not None:
tensorflow.python.ops.math_ops.log
12,167
import tensorflow as tf images = tf.cast(input_dict[fields.InputDataFields.image], dtype=tf.float32) images = tf.expand_dims(images, axis=0) true_image_shape = tf.expand_dims( input_dict[fields.InputDataFields.true_image_shape], axis=0)
tensorflow.expand_dims
12,168
import tensorflow as tf states_tiled = tf.tile(states[:, None], [1, num_tasks, 1]) # B x B x D states_tiled = tf.reshape(states_tiled, [batch_size * num_tasks, obs_dim]) # B*B x D actions_tiled = tf.tile(actions[:, None], [1, num_tasks, 1]) # B x B x D actions_tiled = tf.reshape(actions_tiled, [batch_size * num_tasks, action_dim]) # B*B x D tasks_tiled = tf.tile(tasks[None], [batch_size, 1, 1]) # B x B x D
tensorflow.tile
12,169
import tensorflow as tf return tf.maximum(learning_rate * decay, 5e-6) elif params.learning_rate_decay == "piecewise_constant": return tf.train.piecewise_constant(tf.to_int32(global_step), params.learning_rate_boundaries, params.learning_rate_values) elif params.learning_rate_decay == "none": return learning_rate else: raise ValueError("Unknown learning_rate_decay") def session_config(params): optimizer_options = tf.OptimizerOptions(opt_level=tf.OptimizerOptions.L1, do_function_inlining=True) graph_options = tf.GraphOptions(optimizer_options=optimizer_options) config = tf.ConfigProto(allow_soft_placement=True, graph_options=graph_options) if params.device_list: device_str = ",".join([str(i) for i in params.device_list]) config.gpu_options.visible_device_list = device_str config.gpu_options.per_process_gpu_memory_fraction = params.gpu_memory_fraction config.gpu_options.allow_growth = True return config
tensorflow.OptimizerOptions
12,170
import tensorflow as tf init_fw = tf.tile(tf.Variable( tf.zeros([1, 1, num_units])), [1, batch_size, 1]) init_bw = tf.tile(tf.Variable( tf.zeros([1, 1, num_units])), [1, batch_size, 1]) mask_fw = dropout(tf.ones([1, batch_size, input_size_], dtype=tf.float32), keep_prob=keep_prob, is_train=is_train, mode=None) mask_bw = dropout(tf.ones([1, batch_size, input_size_], dtype=tf.float32), keep_prob=keep_prob, is_train=is_train, mode=None) self.grus.append((gru_fw, gru_bw, )) self.inits.append((init_fw, init_bw, )) self.dropout_mask.append((mask_fw, mask_bw, ))
tensorflow.ones
12,171
import tensorflow as tf def testNonReshape(self): save_path = os.path.join(self.get_temp_dir(), "basics") with self.test_session() as sess: # Build a graph with 2 parameter nodes, and Save and # Restore nodes for them. v0 = tf.Variable(10.0, name="v0") v1 = tf.Variable(20.0, name="v1") save = tf.train.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1}) tf.initialize_all_variables().run() # Check that the parameter nodes have been initialized. self.assertEqual(10.0, v0.eval()) self.assertEqual(20.0, v1.eval()) # Save the initialized values in the file at "save_path" # Use a variable name map to set the saved tensor names val = save.save(sess, save_path) self.assertTrue(isinstance(val, six.string_types))
tensorflow.initialize_all_variables
12,172
import tensorflow as tf if anchor_match_mining_distance_matrix is None: anchor_match_mining_distance_matrix = anchor_match_distance_matrix if use_semi_hard: if anchor_positive_mining_distances is None: raise ValueError('Positive match embeddings must be specified to compute ' 'semi-hard distances.') anchor_positive_mining_distances = tf.expand_dims( anchor_positive_mining_distances, axis=-1) indicators &= ( anchor_match_mining_distance_matrix > anchor_positive_mining_distances) def find_hard_distances(distance_matrix, indicator_matrix): distance_matrix = tf.where(
tensorflow.expand_dims
12,173
import tensorflow as tf op2 = cell_arch[bi][3] with tf.variable_scope('X1'): X1 = self._add_op(cell_inputs, blocks, idx1, op1, w, h, block_ch, is_reduction=is_reduction, is_train=is_train) X1 = self._add_drop_path(X1, drop_path_keep_prob) with tf.variable_scope('X2'): X2 = self._add_op(cell_inputs, blocks, idx2, op2, w, h, block_ch, is_reduction=is_reduction, is_train=is_train) X2 = self._add_drop_path(X2, drop_path_keep_prob) X = tf.add_n([X1, X2])
tensorflow.variable_scope
12,174
import tensorflow as tf nms_scores_expected1 = tf.constant([], dtype=tf.float32) nms_classes_expected1 = tf.constant([], dtype=tf.int32) (nms_masks2, nms_scores2, nms_classes2, _) = isu.instance_non_maximum_suppression_1d_scores( masks, scores, classes, min_score_thresh=0.65, min_iou_thresh=0.5, is_class_agnostic=False) nms_masks_expected2 = tf.constant(1.0, shape=[0, 2, 2], dtype=tf.float32) nms_scores_expected2 = tf.constant([], dtype=tf.float32) nms_classes_expected2 = tf.constant([], dtype=tf.int32) self.assertAllEqual(nms_masks1.numpy(), nms_masks_expected1.numpy()) self.assertAllClose(nms_scores1.numpy(), nms_scores_expected1.numpy()) self.assertAllEqual(nms_classes1.numpy(), nms_classes_expected1.numpy()) self.assertAllEqual(nms_masks2.numpy(), nms_masks_expected2.numpy()) self.assertAllClose(nms_scores2.numpy(), nms_scores_expected2.numpy()) self.assertAllEqual(nms_classes2.numpy(), nms_classes_expected2.numpy()) def test_instance_non_maximum_suppression_2d_scores(self): mask0 = tf.constant([[1, 0],
tensorflow.constant
12,175
from tensorflow.python.client import graph_util @ops.RegisterStatistics("Conv2D", "weight_parameters") def _calc_conv_weight_params(graph, node): """Calculates the on-disk size of the weights for Conv2D.""" input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0]) input_shape.assert_is_fully_defined() filter_shape = graph_util.tensor_shape_from_node_def_name(graph,
tensorflow.python.client.graph_util.tensor_shape_from_node_def_name
12,176
from tensorflow.python.training import saver as saver_lib return self._best_value def every_n_step_end(self, step, outputs): super(ValidationMonitor, self).every_n_step_end(step, outputs) # TODO(mdan): The use of step below is probably misleading. # The code should probably use the step from the checkpoint, because # that's what is being evaluated. if self._estimator is None: raise ValueError("Missing call to set_estimator.") # Check that we are not running evaluation on the same checkpoint. latest_path = saver_lib.latest_checkpoint(self._estimator.model_dir) if latest_path is None: logging.debug("Skipping evaluation since model has not been saved yet " "at step %d.", step) return False if latest_path is not None and latest_path == self._latest_path: logging.debug("Skipping evaluation due to same checkpoint %s for step %d " "as for step %d.", latest_path, step, self._latest_path_step) return False
tensorflow.python.training.saver.latest_checkpoint
12,177
import tensorflow as tf with tf.variable_scope(encoderscope) as scope: if reuse_encoder: scope.reuse_variables() with tf.variable_scope('color_encoder'): X = encoder_conf('eI', I[:, :, :, :-1], 96, 5, 1, norm, reuse_encoder, is_train, self.args.dropout) # 128 > 124 X0 = encoder_conf('d0', X, 96, 2, 2, norm, reuse_encoder, is_train, self.args.dropout) # 124 > 62 @2 X = encoder_conf('e1', X0, 128, 3, 1, norm, reuse_encoder, is_train, self.args.dropout) # 62 > 60 X_EARLY = X X1 = encoder_conf('d1', X, 128, 2, 2, norm, reuse_encoder, is_train, self.args.dropout) # 60 > 30 @4 X = encoder_conf('e2', X1, 256, 3, 1, norm, reuse_encoder, is_train, self.args.dropout) # 30 > 28 X2 = encoder_conf('d2', X, 256, 2, 2, norm, reuse_encoder, is_train, self.args.dropout) # 28 > 14 @8 X = encoder_conf('e3', X2, 512, 3, 1, norm, reuse_encoder, is_train, self.args.dropout) # 14 > 12 X_MIDDLE = X # ===============================================================================DECODER with tf.variable_scope(decoderscope) as scope: if reuse_decoder: scope.reuse_variables() # print('vnet scope', is_train, reuse_unet) # print('VNET Latent:', X.get_shape().as_list()) with tf.variable_scope('decoder'): X = decoder_conf('d3', X, 512, F, 1, norm, reuse_decoder, is_train, self.args.dropout) # 12 > 14 if self.args.skip_connections: X = tf.concat((X, X2), axis=-1) X = decoder_conf('u4', X, 256, F, 2, norm, reuse_decoder, is_train, self.args.dropout) # 14 > 28 X = decoder_conf('d4', X, 256, F, 1, norm, reuse_decoder, is_train, self.args.dropout) # 28 > 30 if self.args.skip_connections: X = tf.concat((X, X1), axis=-1) X = decoder_conf('u5', X, 128, F, 2, norm, reuse_decoder, is_train, self.args.dropout) # 30 > 60 X_LATE = X X = decoder_conf('d5', X, 128, F, 1, norm, reuse_decoder, is_train, self.args.dropout) # 60 > 62 if self.args.skip_connections: X = tf.concat((X, X0), axis=-1)
tensorflow.variable_scope
12,178
import tensorflow as tf FLAGS.checkpoint_dir, hooks=[tf.contrib.training.SummaryAtEndHook(FLAGS.eval_dir), tf.contrib.training.StopAfterNEvalsHook(1)], eval_ops=image_write_ops, max_number_of_evaluations=FLAGS.max_number_of_evaluations) def _get_generator_inputs(num_images_per_class, num_classes, noise_dims): # Since we want a grid of numbers for the conditional generator, manually # construct the desired class labels. num_images_generated = num_images_per_class * num_classes noise = tf.random_normal([num_images_generated, noise_dims]) labels = [lbl for lbl in range(num_classes) for _ in range(num_images_per_class)] one_hot_labels = tf.one_hot(tf.constant(labels), num_classes) return noise, one_hot_labels if __name__ == '__main__': app.run(main)
tensorflow.random_normal
12,179
import tensorflow as tf all_trainable_weights_pi = tf.trainable_variables('model/pi') regularization_penalty_pi = tf.contrib.layers.apply_regularization(regularizerpi, all_trainable_weights_pi)
tensorflow.contrib.layers.apply_regularization
12,180
import tensorflow as tf with tf.variable_scope('C_train'): self.train_op = tf.train.AdamOptimizer(self.lr).minimize(self.loss, global_step=GLOBAL_STEP) with tf.variable_scope('a_grad'): self.a_grads = tf.gradients(self.q, a)[0] # tensor of gradients of each sample (None, a_dim) def _build_net(self, s, a, scope, trainable): with tf.variable_scope(scope): init_w = tf.random_normal_initializer(0., 0.01) init_b = tf.constant_initializer(0.01) with tf.variable_scope('l1'): n_l1 = 700 # combine the action and states together in this way w1_s = tf.get_variable('w1_s', [self.s_dim, n_l1], initializer=init_w, trainable=trainable)
tensorflow.variable_scope
12,181
from tensorflow.python.platform import gfile self.assertEqual([s2, s3], save.last_checkpoints) self.assertEqual(0, len(gfile.Glob(s1))) self.assertFalse(gfile.Exists(save._MetaGraphFilename(s1))) self.assertEqual(2, len(gfile.Glob(s2))) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2))) self.assertEqual(2, len(gfile.Glob(s3))) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s3))) class KeepCheckpointEveryNHoursTest(tf.test.TestCase):
tensorflow.python.platform.gfile.Glob
12,182
import tensorflow as tf gpus = tf.config.experimental.list_physical_devices('GPU') tf.config.experimental.set_memory_growth(gpus[0], True)
tensorflow.config.experimental.set_memory_growth
12,183
import tensorflow as tf with tf.Session(config=config) as sess: sess.run(init) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) tf.train.Saver().restore(sess,path) #test test_acc_avg = 0.0 test_true_total=np.array([])
tensorflow.train.Saver
12,184
import tensorflow as tf class SaverTest(tf.test.TestCase): def testBasics(self): save_path = os.path.join(self.get_temp_dir(), "basics") with self.test_session() as sess: # Build a graph with 2 parameter nodes, and Save and # Restore nodes for them. v0 = tf.Variable(10.0, name="v0") v1 = tf.Variable(20.0, name="v1") save = tf.train.Saver({"v0": v0, "v1": v1}, restore_sequentially=True) tf.initialize_all_variables().run() # Check that the parameter nodes have been initialized. self.assertEqual(10.0, v0.eval()) self.assertEqual(20.0, v1.eval()) # Save the initialized values in the file at "save_path" val = save.save(sess, save_path) self.assertTrue(isinstance(val, six.string_types))
tensorflow.Variable
12,185
import tensorflow as tf # Convert input R+1 tensor into a feature dictionary of one R+1 tensor features = {TIMESERIES_COL: inputs} return features, labels # Create list of files that match pattern file_list = tf.gfile.Glob(filename) # Create dataset from file list dataset = tf.data.TextLineDataset(file_list).map(decode_csv) if mode == tf.estimator.ModeKeys.TRAIN: num_epochs = None # indefinitely dataset = dataset.shuffle(buffer_size = 10 * batch_size) else: num_epochs = 1 # end-of-input after this dataset = dataset.repeat(num_epochs).batch(batch_size)
tensorflow.data.TextLineDataset
12,186
import tensorflow as tf tf.greater_equal( matched_iou, self._config_dict['background_iou_low_threshold']), tf.less( matched_iou, self._config_dict['background_iou_high_threshold']))
tensorflow.less
12,187
from tensorflow.python.ops import array_ops max_update = state_ops.assign_add(max_var, batch_max, name='update') with ops.name_scope(None, 'total', (average_precision,)) as total_scope: total_var = contrib_variables.local_variable( array_ops.zeros([], dtype=dtypes.float64), name=total_scope) batch_total = math_ops.reduce_sum(average_precision, name='batch_total') total_update = state_ops.assign_add(total_var, batch_total, name='update')
tensorflow.python.ops.array_ops.zeros
12,188
import tensorflow as tf if pairwise_reduction == common.DISTANCE_REDUCTION_NEG_LOG_MEAN: return lambda x: -tf.math.log(tf.math.reduce_mean(x, axis=[-2, -1])) if pairwise_reduction == common.DISTANCE_REDUCTION_LOWER_HALF_NEG_LOG_MEAN: def compute_lower_half_negative_log_mean(x): return -tf.math.log( data_utils.compute_lower_percentile_means(x, axis=[-2, -1], q=50)) return compute_lower_half_negative_log_mean if pairwise_reduction == common.DISTANCE_REDUCTION_ONE_MINUS_MEAN: return lambda x: 1.0 - tf.math.reduce_mean(x, axis=[-2, -1]) return pairwise_reduction def get_componentwise_distance_reduction_fn(): """Selects component-wise distance reduction function.""" if componentwise_reduction == common.DISTANCE_REDUCTION_MEAN: return functools.partial(tf.math.reduce_mean, axis=[-1]) return componentwise_reduction def sample_distance_fn(lhs, rhs):
tensorflow.math.reduce_mean
12,189
import tensorflow as tf tf.app.flags.DEFINE_boolean('use_processed_data', False, 'whether to use processed data') tf.app.flags.DEFINE_string('processed_data', './processed_dataset/', 'where to save preprocessed datasets')
tensorflow.app.flags.DEFINE_string
12,190
import tensorflow as tf bs, sl, vec = tf.shape(rep_tensor)[0], tf.shape(rep_tensor)[1], tf.shape(rep_tensor)[2] org_ivec = rep_tensor.get_shape().as_list()[2] ivec = hn or org_ivec with tf.variable_scope(scope or 'directional_attention_%s' % direction or 'diag'): # non-linear rep_map = bn_dense_layer(rep_tensor, ivec, True, 0., 'bn_dense_map', activation, False, wd, keep_prob, is_train) # ensure the seletion is right dep_selection = tf.logical_and(rep_mask, dep_selection) head_selection = tf.logical_and(rep_mask, head_selection) rep_dep_tensor, rep_dep_mask, dep_org_idx = reduce_data_rep_max_len(rep_map, dep_selection) rep_head_tensor,rep_head_mask, head_org_idx = reduce_data_rep_max_len(rep_map, head_selection) sl_dep, sl_head = tf.shape(rep_dep_tensor)[1], tf.shape(rep_head_tensor)[1] if keep_unselected: unhead_selection = tf.logical_and(rep_mask, tf.logical_not(head_selection)) rep_unhead_tensor, rep_unhead_mask, unhead_org_idx = reduce_data_rep_max_len(rep_map, unhead_selection) sl_unhead = tf.shape(rep_unhead_tensor)[1] attn_result = tf.cond( tf.equal(sl_head, 0), lambda: tf.zeros([bs, 0, hn], tf.float32), lambda: self_attention_for_selected_head( head_selection, head_org_idx, sl_head, rep_head_mask, dep_selection, dep_org_idx, sl_dep, rep_dep_mask, rep_map, rep_dep_tensor, keep_prob, is_train, direction, ivec
tensorflow.shape
12,191
import tensorflow as tf self.assertAllEqual( padded_tensor_dict[fields.InputDataFields.image].shape.as_list(), [5, 6, 3]) self.assertAllEqual( padded_tensor_dict[fields.InputDataFields.image_additional_channels] .shape.as_list(), [5, 6, 2]) def test_keypoints(self): input_tensor_dict = { fields.InputDataFields.groundtruth_keypoints: tf.placeholder(tf.float32, [None, 16, 4]), fields.InputDataFields.groundtruth_keypoint_visibilities: tf.placeholder(tf.bool, [None, 16]), } padded_tensor_dict = inputs.pad_input_data_to_static_shapes( tensor_dict=input_tensor_dict, max_num_boxes=3, num_classes=3, spatial_image_shape=[5, 6])
tensorflow.placeholder
12,192
import tensorflow as tf # In[2]: def prenet(inputs, num_units=None, is_training=True, scope="prenet"): if num_units is None: num_units = [embed_size, embed_size // 2] with tf.variable_scope(scope): outputs = tf.layers.dense(inputs, units=num_units[0], activation=tf.nn.relu, name="dense1") outputs = tf.layers.dropout( outputs, rate=dropout_rate, training=is_training, name="dropout1" ) outputs = tf.layers.dense(outputs, units=num_units[1], activation=tf.nn.relu, name="dense2") outputs = tf.layers.dropout( outputs, rate=dropout_rate, training=is_training, name="dropout2" ) return outputs def highwaynet(inputs, num_units=None, scope="highwaynet"): if not num_units: num_units = inputs.get_shape()[-1] with tf.variable_scope(scope): H = tf.layers.dense(inputs, units=num_units, activation=tf.nn.relu, name="dense1")
tensorflow.layers.dense
12,193
import tensorflow as tf # self.final_layer = self.conv_layer(bottom = self.deconv_5, kernal_size = 1, in_channels = 64, out_channels = 3, stride = 1, name = 'final_layer') self.final_layer = self.conv_bn_relu(bottom = self.deconv_5, name = 'final_layer', kernel_size = 1, output_channels = 3, initializer =tf.contrib.layers.variance_scaling_initializer(), bn = False, training = self.is_training, relu=False)
tensorflow.contrib.layers.variance_scaling_initializer
12,194
import tensorflow as tf the dual variable is scaled by this factor. Returns: dual_value: An op that computes the absolute value of the dual variable and reverses its gradient. dual_variable: The underlying variable itself. """ # We disable partitioning while constructing dual variables because they will # be updated with assign, which is not available for partitioned variables. partitioner = tf.get_variable_scope().partitioner try: tf.get_variable_scope().set_partitioner(None) dual_variable = tf.contrib.framework.model_variable( name=name, shape=shape, dtype=dtype, initializer=initializer,
tensorflow.get_variable_scope
12,195
import tensorflow as tf tf.reset_default_graph() tf_config = tf.ConfigProto( inter_op_parallelism_threads=1, intra_op_parallelism_threads=1) session = tf.Session(config=tf_config) print("AVAILABLE GPUS: ", get_available_gpus()) return session
tensorflow.Session
12,196
import tensorflow as tf bc = tf.get_variable("bc", [nh], initializer=tf.constant_initializer(0.0)) c, h = tf.split(axis=1, num_or_size_splits=2, value=s) for idx, (x, m) in enumerate(zip(xs, ms)): c = c*(1-m) h = h*(1-m) z = _ln(tf.matmul(x, wx), gx, bx) + _ln(tf.matmul(h, wh), gh, bh) + b i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z) i = tf.nn.sigmoid(i) f = tf.nn.sigmoid(f) o = tf.nn.sigmoid(o) u = tf.tanh(u) c = f*c + i*u h = o*tf.tanh(_ln(c, gc, bc)) xs[idx] = h s = tf.concat(axis=1, values=[c, h])
tensorflow.nn.sigmoid
12,197
import tensorflow as tf x_aug = tf.convert_to_tensor(x_aug) return tf.concat([x, x_aug],axis=2) def gaussian_blur(self,x): #create random gaussian blur filter mean = 0 std = tf.random.uniform(shape=[],minval=5,maxval=10,dtype=tf.float32) # std [5-10] size = tf.random.uniform(shape=[],minval=3,maxval=7,dtype=tf.int32) # size [7-15] self.kernel = self.gaussian_kernel(size,mean,std) self.kernel = tf.tile(self.kernel[:, :, tf.newaxis, tf.newaxis], [1, 1, 3, 1]) self.paddings = tf.convert_to_tensor([[size,size],[size,size],[0,0]]) x_aug = tf.nn.separable_conv2d(tf.expand_dims(tf.pad(x,self.paddings,'SYMMETRIC'), 0), self.kernel, self.pointwise_filter,strides=[1, 1, 1, 1], padding='VALID') x_aug = tf.squeeze(x_aug) return tf.concat([x, x_aug],axis=2)
tensorflow.random.uniform
12,198
from tensorflow.python.ops import array_ops undefined statistics will return NaN for this statistic. name: The name to prepend to all ops created by this distribution. Raises: TypeError: if `alpha` and `beta` are different dtypes. """ parameters = locals() parameters.pop("self") with ops.name_scope(name, values=[alpha, beta]) as ns: with ops.control_dependencies([ check_ops.assert_positive(alpha), check_ops.assert_positive(beta), ] if validate_args else []): self._alpha = array_ops.identity(alpha, name="alpha") self._beta = array_ops.identity(beta, name="beta") super(InverseGamma, self).__init__( dtype=self._alpha.dtype, validate_args=validate_args, allow_nan_stats=allow_nan_stats, is_continuous=True, is_reparameterized=False, parameters=parameters, graph_parents=[self._alpha, self._beta], name=ns) @staticmethod
tensorflow.python.ops.array_ops.identity
12,199