seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
from tensorflow.python.framework import ops ops.RegisterShape("DepthwiseConv2dNative")( common_shapes.depthwise_conv2d_native_shape) ops.RegisterShape("AvgPool")(common_shapes.avg_pool_shape) ops.RegisterShape("MaxPool")(common_shapes.max_pool_shape)
tensorflow.python.framework.ops.RegisterShape
3,200
import tensorflow as tf return compute_l2_sigmoid_matching_distances if distance_kernel == common.DISTANCE_KERNEL_EXPECTED_LIKELIHOOD: def compute_gaussian_likelihoods(lhs, rhs): """Computes sample likelihoods.""" num_lhs_samples = lhs.shape.as_list()[-2] - 2 num_rhs_samples = rhs.shape.as_list()[-2] - 2 lhs_means, lhs_stddevs, lhs_samples = tf.split( lhs, [1, 1, num_lhs_samples], axis=-2) rhs_means, rhs_stddevs, rhs_samples = tf.split( rhs, [1, 1, num_rhs_samples], axis=-2) rhs_likelihoods = distance_utils.compute_gaussian_likelihoods( lhs_means, lhs_stddevs, rhs_samples, min_stddev=distance_kernel_kwargs.get( distance_kernel + '_min_stddev', None), max_squared_mahalanobis_distance=distance_kernel_kwargs.get( distance_kernel + '_max_squared_mahalanobis_distance', None),
tensorflow.split
3,201
import tensorflow as tf dict( testcase_name='fixed_len_int', make_tensors_fn=lambda: {'x': tf.compat.v1.placeholder(tf.int64, (None,))}, feature_spec={'x': tf.io.FixedLenFeature([], tf.int64)}), dict( testcase_name='fixed_len_string', make_tensors_fn=lambda:
tensorflow.io.FixedLenFeature
3,202
import tensorflow as tf with tf.name_scope('prediction_incorrect'): x = tf.logical_not(tf.nn.in_top_k(logits, label, topk))
tensorflow.nn.in_top_k
3,203
import tensorflow as tf queries = tf.reshape(queries, tf.shape(facts)) din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag) d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag)
tensorflow.layers.dense
3,204
import tensorflow as tf features={ 'label':tf.FixedLenFeature([], tf.int64), 'img_raw' : tf.FixedLenFeature([], tf.string), }) image=tf.decode_raw(features['img_raw'],tf.uint8) label=tf.cast(features['label'],tf.int32) image=tf.reshape(image,[4096,1]) return image,label def get_batch(image,label,batch_size,crop_size): #print(image.shape) #print(label.shape) images,labels=tf.train.shuffle_batch([image,label], batch_size=batch_size,num_threads=10,capacity=10000,min_after_dequeue=200) return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size]) def get_test_batch(image,label,batch_size): images,labels=tf.train.batch([image,label],batch_size=batch_size) return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size]) def get_valid_batch(image,label,batch_size): images,labels=tf.train.batch([image,label],batch_size=batch_size) return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size]) class trainwork(object): def __init__(self): with tf.variable_scope('scop'): self.w1=tf.get_variable('w1', [4096,1024],initializer=tf.contrib.layers.xavier_initializer_conv2d())
tensorflow.reshape
3,205
import tensorflow as tf :, -1], method=0) gtboxes_in_img_r = self.drawer.draw_boxes_with_categories(img_batch=img, boxes=gtboxes_and_label_r[ :, :-1], labels=gtboxes_and_label_r[ :, -1], method=1, is_csl=True) tf.summary.image('Compare/gtboxes_h_gpu:%d' % i, gtboxes_in_img_h) tf.summary.image('Compare/gtboxes_r_gpu:%d' % i, gtboxes_in_img_r) if cfgs.ADD_BOX_IN_TENSORBOARD: detections_in_img = self.drawer.draw_boxes_with_categories_and_scores( img_batch=img, boxes=outputs[0], scores=outputs[1], labels=outputs[2], method=1, is_csl=True)
tensorflow.summary.image
3,206
from tensorflow.python.ops import variable_scope as vs bias_ones = init_ops.constant_initializer(1.0, dtype=inputs.dtype) with vs.variable_scope("gates"): # Reset gate and update gate.
tensorflow.python.ops.variable_scope.variable_scope
3,207
import tensorflow as tf masks, scores, classes, min_score_thresh=0.65, min_iou_thresh=0.5, is_class_agnostic=True) nms_masks_expected1 = tf.stack([mask0, mask4]) nms_scores_expected1 = tf.constant([1.0, 0.85], dtype=tf.float32) nms_classes_expected1 = tf.constant([1, 2], dtype=tf.int32) (nms_masks2, nms_scores2, nms_classes2,
tensorflow.stack
3,208
import tensorflow as tf # execute mean pooling based on pooling_mask[bs, sluh, sld] and pooling_data[bs,sluh,sld,hn] pooling_data = mask_for_high_rank(pooling_data, pooling_mask) # [bs,sluh,sld,hn] pooling_data_sum = tf.reduce_sum(pooling_data, -2) # [bs,sluh,hn] pooling_den = tf.reduce_sum(tf.cast(pooling_mask, tf.int32), -1, keep_dims=True) # [bs,sluh] pooling_den = tf.where(tf.equal(pooling_den, 0), tf.ones_like(pooling_den), pooling_den) pooling_result = pooling_data_sum / tf.cast(pooling_den, tf.float32) return pooling_result def scaled_tanh(x, scale=5.): return scale * tf.nn.tanh(1./scale * x)
tensorflow.cast
3,209
import tensorflow as tf ''' Print all trainable and non-trainable variables ''' if train_only: t_vars = tf.trainable_variables() print('[*] printing trainable variables') else: try:
tensorflow.trainable_variables
3,210
import tensorflow as tf perturb_norm_length: a `float`, Norm length of adversarial perturbation to be optimized with validatio Returns: adversial loss """ grad, = tf.gradients( loss, embedded, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N) grad = tf.stop_gradient(grad) perturb = _scale_l2(grad, perturb_norm_length) return loss_fn(embedded + perturb)
tensorflow.gradients
3,211
from tensorflow.python.ops import partitioned_variables dropout = params.get("dropout") gradient_clip_norm = params.get("gradient_clip_norm") num_ps_replicas = config.num_ps_replicas if config else 0 embedding_lr_multipliers = params.get("embedding_lr_multipliers", {}) features = _get_feature_dict(features) parent_scope = "dnn" input_layer_partitioner = (partitioned_variables.min_max_variable_partitioner( max_partitions=num_ps_replicas, min_slice_size=64 << 20)) input_layer_scope = parent_scope + "/input_from_feature_columns" with variable_scope.variable_scope( input_layer_scope, values=list(six.itervalues(features)), partitioner=input_layer_partitioner) as scope: net = layers.input_from_feature_columns(
tensorflow.python.ops.partitioned_variables.min_max_variable_partitioner
3,212
import tensorflow as tf self.queue_input_tensors = [tf.placeholder(dtype, shape) for dtype, shape in input_props] dtypes, shapes = zip(*input_props) queue = tf.PaddingFIFOQueue(capacity=10, dtypes=dtypes, shapes=shapes) self.enqueue_op = queue.enqueue(self.queue_input_tensors) self.input_tensors = queue.dequeue() self.predictions, self.loss = self.get_predictions_and_loss(*self.input_tensors) self.global_step = tf.Variable(0, name="global_step", trainable=False) self.reset_global_step = tf.assign(self.global_step, 0) learning_rate = tf.train.exponential_decay(self.config["learning_rate"], self.global_step, self.config["decay_frequency"], self.config["decay_rate"], staircase=True) trainable_params = tf.trainable_variables() gradients = tf.gradients(self.loss, trainable_params) gradients, _ = tf.clip_by_global_norm(gradients, self.config["max_gradient_norm"]) optimizers = { "adam" : tf.train.AdamOptimizer,
tensorflow.assign
3,213
import tensorflow as tf # Count number of non-padding words in each sentence sentence_lengths = tf.count_nonzero( masks,
tensorflow.count_nonzero
3,214
import tensorflow as tf w_ = tf.reshape(w, [ksize[0] * ksize[1] * ksize[2], -1]) q = tf.matmul(p_, w_)
tensorflow.matmul
3,215
from tensorflow.python.framework import ops # avoid division by zero epsilon = 1e-7 def compute_precision(name): precision = math_ops.div(true_positives, epsilon + true_positives + false_positives, name='precision_' + name) return precision precision = compute_precision('value') with ops.control_dependencies([true_positives_compute_op, false_positives_compute_op]): update_op = compute_precision('update_op') if metrics_collections: ops.add_to_collections(metrics_collections, precision) if updates_collections: ops.add_to_collections(updates_collections, update_op)
tensorflow.python.framework.ops.control_dependencies
3,216
import tensorflow as tf h = norm(n+m, 'ln_2') return h def embed(X, we): #X [-1,,2] we = convert_gradient_to_tensor(we) e = tf.gather(we, X) h = tf.reduce_sum(e, 2) return h def clf(x, ny, w_init=tf.random_normal_initializer(stddev=0.02), b_init=tf.constant_initializer(0), train=False): with tf.variable_scope('clf'):
tensorflow.gather
3,217
import tensorflow as tf help='the directory of MNIST dataset') parser.add_argument('--lr', type=float, default=0.01, help='learning rate') parser.add_argument('--batch_size', type=int, default=32, help='batch size') parser.add_argument('--max_train_step', type=int, default=50000, help='the maximum training step') parser.add_argument('--model_path', type=str, default='', help='the path of checkpoint file') args = parser.parse_args() def model(): x = tf.placeholder(tf.float32, [None, 784], name='x') gt = tf.placeholder(tf.float32, [None, 10], name='groundtruth') with tf.variable_scope('layer1'): w1 = tf.get_variable('weight1', [784, 1024], initializer=tf.random_normal_initializer()) b1 = tf.get_variable('bias1', [1024], initializer=tf.constant_initializer(0.0)) h1 = tf.nn.relu(tf.matmul(x, w1) + b1) with tf.variable_scope('layer2'): w2 = tf.get_variable('weight2', [1024, 1024], initializer=tf.random_normal_initializer()) b2 = tf.get_variable('bias2', [1024], initializer=tf.constant_initializer(0.0)) h2 = tf.nn.relu(tf.matmul(h1, w2) + b2) with tf.variable_scope('layer3'): w3 = tf.get_variable('weight3', [1024, 10], initializer=tf.random_normal_initializer()) b3 = tf.get_variable('bias3', [10], initializer=tf.constant_initializer(0.0)) y = tf.matmul(h2, w3) + b3
tensorflow.random_normal_initializer
3,218
import tensorflow as tf elif params['optimizer'] == 'adam': optimizer = tf.train.AdamOptimizer(learning_rate) elif params['optimizer'] == 'adadelta': optimizer = tf.train.AdadeltaOptimizer(learning_rate) elif params['optimizer'] == 'adagrad': optimizer = tf.train.AdagradOptimizer(learning_rate) elif params['optimizer'] == 'rmsprop': optimizer = tf.train.RMSPropOptimizer( learning_rate, momentum=params['momentum']) elif params['optimizer'] == 'lars':
tensorflow.train.AdagradOptimizer
3,219
import tensorflow as tf pred_y = pred_y * tf.cast(pred_max>0, tf.float32) + tf.cast(pred_max<=0, tf.float32) * (height / 2.) if config.PRED_DEBUG: pred_indices_ = tf.squeeze(pred_indices) image_ = tf.squeeze(image) * 255. pred_heatmap = tf.one_hot(pred_indices_, heatmap_size*heatmap_size, on_value=1., off_value=0., axis=-1, dtype=tf.float32) pred_heatmap = tf.reshape(pred_heatmap, [-1, heatmap_size, heatmap_size])
tensorflow.squeeze
3,220
import tensorflow as tf should be used for the indicator function. scope: Optional scope for `name_scope`. Returns: loss: A `Tensor` of the same shape as `logits` with the component-wise loss. other_outputs: An empty dictionary, for consistency. Raises: ValueError: If `surrogate_type` is not `xent` or `hinge`. """ with tf.name_scope(scope, 'roc_auc', [labels, logits, weights]): # Convert inputs to tensors and standardize dtypes. labels, logits, weights, original_shape = _prepare_labels_logits_weights(labels, logits, weights) # Create tensors of pairwise differences for logits and labels, and # pairwise products of weights. These have shape # [batch_size, batch_size, num_labels]. logits_difference = tf.expand_dims(logits, 0) - tf.expand_dims(logits, 1)
tensorflow.name_scope
3,221
import tensorflow as tf num_shards, shuffled)) self.assertAllEqual( problem.test_filepaths(data_dir, num_shards, shuffled), problem.data_filepaths(problem_module.DatasetSplit.TEST, data_dir, num_shards, shuffled)) if __name__ == "__main__": tf.test.main()
tensorflow.test.main
3,222
import tensorflow as tf scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold elif not do_serve: tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tensorflow.train.init_from_checkpoint
3,223
import tensorflow as tf dec_inp_dict2 = {} dec_inp_dict2["0"] = [ tf.constant(0, tf.int32, shape=[2]) for _ in range(3)] dec_inp_dict2["1"] = [ tf.constant(0, tf.int32, shape=[2]) for _ in range(4)] with tf.variable_scope("other"): outputs_dict3, _ = tf.nn.seq2seq.one2many_rnn_seq2seq( enc_inp, dec_inp_dict2, cell, 2, dec_symbols_dict, embedding_size=2, feed_previous=tf.constant(True)) sess.run([tf.global_variables_initializer()]) tf.get_variable_scope().reuse_variables() outputs_dict1, _ = tf.nn.seq2seq.one2many_rnn_seq2seq( enc_inp, dec_inp_dict, cell, 2, dec_symbols_dict, embedding_size=2, feed_previous=True) outputs_dict2, _ = tf.nn.seq2seq.one2many_rnn_seq2seq( enc_inp, dec_inp_dict2, cell, 2, dec_symbols_dict, embedding_size=2, feed_previous=True) res1 = sess.run(outputs_dict1["0"]) res2 = sess.run(outputs_dict2["0"]) res3 = sess.run(outputs_dict3["0"])
tensorflow.get_variable_scope
3,224
import tensorflow as tf return new_h, new_h def prelu(_x, scope=''): """parametric ReLU activation""" with tf.variable_scope(name_or_scope=scope, default_name="prelu"): _alpha = tf.get_variable("prelu_"+scope, shape=_x.get_shape()[-1], dtype=_x.dtype, initializer=tf.constant_initializer(0.1)) return tf.maximum(0.0, _x) + _alpha * tf.minimum(0.0, _x)
tensorflow.variable_scope
3,225
import tensorflow as tf vf_old, vf_old_params, _, _ = self.build_cnet(batch['state'], 'oldvf') self.vf, vf_params, self.vf_state_init, self.vf_state_final = self.build_cnet(batch['state'], 'vf') self.vf_eval, _, self.vf_eval_state_init, self.vf_eval_state_final = self.build_cnet(self.state, 'vf', reuse=True, batch_size=1) self.sample_action = tf.squeeze(pi_eval.sample(1), axis=0) self.eval_action = pi_eval.mode() self.global_step = tf.train.get_or_create_global_step() self.saver = tf.train.Saver() # Loss functions and training epsilon_decay = tf.train.polynomial_decay(self.EPSILON, self.global_step, self.EPS_LEN, 0.1, power=1) ratio = tf.maximum(pi.prob(batch['actions']), 1e-6) / tf.maximum(pi_old.prob(batch['actions']), 1e-6) ratio = tf.clip_by_value(ratio, 0, 10) surr1 = batch['advantage'] * ratio surr2 = batch['advantage'] * tf.clip_by_value(ratio, 1 - epsilon_decay, 1 + epsilon_decay) loss_pg = - 2.0 * tf.reduce_mean(tf.minimum(surr1, surr2)) loss_vf = 0.5 * tf.reduce_mean(tf.square(batch['rewards'] - self.vf)) loss_entropy = - 0.01 * tf.reduce_mean(pi.entropy()) loss = loss_pg + loss_vf + loss_entropy opt = tf.train.AdamOptimizer(self.LR)
tensorflow.train.polynomial_decay
3,226
import tensorflow as tf """The actual input function.""" batch_size = params["batch_size"] # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. d = tf.data.TFRecordDataset(input_file) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100)
tensorflow.data.TFRecordDataset
3,227
import tensorflow as tf # optimizer & gradients optimizer_base = tf.train.MomentumOptimizer(lrn_rate, FLAGS.momentum) if not FLAGS.enbl_multi_gpu: optimizer = optimizer_base else: optimizer = mgw.DistributedOptimizer(optimizer_base) grads_origin = optimizer.compute_gradients(loss, self.trainable_vars) grads_pruned = self.__calc_grads_pruned(grads_origin) # TF operations & model saver self.sess_train = sess with tf.control_dependencies(self.update_ops): self.train_op = optimizer.apply_gradients(grads_pruned, global_step=self.global_step) self.summary_op = tf.summary.merge_all() self.log_op = [lrn_rate, loss, pr_trainable, pr_maskable] + list(metrics.values()) self.log_op_names = ['lr', 'loss', 'pr_trn', 'pr_msk'] + list(metrics.keys()) self.init_op = tf.variables_initializer(self.vars) self.init_opt_op = tf.variables_initializer(optimizer_base.variables()) if FLAGS.enbl_multi_gpu: self.bcast_op = mgw.broadcast_global_variables(0) self.saver_train = tf.train.Saver(self.vars) def __build_eval(self):
tensorflow.control_dependencies
3,228
import tensorflow as tf ] if direction == 'forward': i_direction = 0 else: i_direction = 1 variable_scope_name = 'RNN_{0}/RNN/MultiRNNCell/Cell{1}'.format( i_direction, i) with tf.variable_scope(variable_scope_name): layer_output, final_state = tf.nn.dynamic_rnn( lstm_cell, layer_input, sequence_length=sequence_lengths, initial_state=tf.nn.rnn_cell.LSTMStateTuple( *batch_init_states), )
tensorflow.variable_scope
3,229
from tensorflow.python.framework import constant_op return features, label def _infer_ranking_train_input_fn(): features = { "f1": constant_op.constant([[3.], [2], [1.]]), "f2": constant_op.constant([[0.1], [3.], [1.]]) } return features, None class BoostedTreeEstimatorTest(test_util.TensorFlowTestCase):
tensorflow.python.framework.constant_op.constant
3,230
import tensorflow as tf An operation that will update var_matrix when run in a Session ''' selected_rows = tf.nn.embedding_lookup(var_matrix, indices) row_norms = tf.sqrt(tf.reduce_sum(tf.square(selected_rows), 1))
tensorflow.nn.embedding_lookup
3,231
import tensorflow as tf predict = tf.placeholder(tf.float32,shape=[hps.batch_size, 10]) logit_nor,tsne_logit_nor = model_carlini_adv.predict(image,tsne_logits=True) logit_adv,tsne_logit_adv = model_carlini_adv.predict(adv_image,tsne_logits=True) predict_nor = tf.nn.softmax(logit_nor) predict_adv = tf.nn.softmax(logit_adv)
tensorflow.nn.softmax
3,232
import tensorflow as tf 'bar': tf.convert_to_tensor([0, 2, 0, 2], dtype=tf.int64), } # Annotate an arbitrary proto at the schema level (not sure what global # schema boundaries would mean, but hey I'm just a test). boundaries = tf.constant([[1.0]]) message_type = annotations_pb2.BucketBoundaries.DESCRIPTOR.full_name sizes = tf.expand_dims([tf.size(boundaries)], axis=0) message_proto = tf.raw_ops.EncodeProto( sizes=sizes, values=[tf.cast(boundaries, tf.float32)],
tensorflow.constant
3,233
import tensorflow as tf y0_f = tf.to_float(y0) y1_f = tf.to_float(y1) z0_f = tf.to_float(z0) z1_f = tf.to_float(z1) # Check the out-of-boundary case. x0_valid = tf.to_float( tf.less_equal(x0, max_x) & tf.greater_equal(x0, 0)) x1_valid = tf.to_float( tf.less_equal(x1, max_x) & tf.greater_equal(x1, 0)) y0_valid = tf.to_float( tf.less_equal(y0, max_y) & tf.greater_equal(y0, 0)) y1_valid = tf.to_float( tf.less_equal(y1, max_y) & tf.greater_equal(y1, 0)) z0_valid = tf.to_float( tf.less_equal(z0, max_z) & tf.greater_equal(z0, 0)) z1_valid = tf.to_float( tf.less_equal(z1, max_z) & tf.greater_equal(z1, 0)) w_z0_y0_x0 = tf.expand_dims(((x1_f - x) * (y1_f - y) * (z1_f - z) * x1_valid * y1_valid * z1_valid), 1) w_z0_y0_x1 = tf.expand_dims(((x - x0_f) * (y1_f - y) * (z1_f - z) * x0_valid * y1_valid * z1_valid), 1) w_z0_y1_x0 = tf.expand_dims(((x1_f - x) * (y - y0_f) * (z1_f - z) * x1_valid * y0_valid * z1_valid), 1) w_z0_y1_x1 = tf.expand_dims(((x - x0_f) * (y - y0_f) *
tensorflow.greater_equal
3,234
import tensorflow as tf # normalize res = (input_ - used_mean) / tf.sqrt(used_var + epsilon) # de-normalize if scale: res *= gamma res += beta # update variables if train: with tf.name_scope(name, "AssignMovingAvg", [mean, cur_mean, decay]): with ops.colocate_with(mean): new_mean = tf.assign_sub( mean, tf.check_numerics(decay * (mean - cur_mean), "NaN in moving mean.")) with tf.name_scope(name, "AssignMovingAvg", [var, cur_var, decay]): with ops.colocate_with(var): new_var = tf.assign_sub( var, tf.check_numerics(decay * (var - cur_var), "NaN in moving variance.")) with tf.name_scope(name, "IncrementTime", [step]): with ops.colocate_with(step): new_step = tf.assign_add(step, 1.) res += 0. * new_mean * new_var * new_step return res # batch normalization taking into account the volume transformation
tensorflow.name_scope
3,235
import tensorflow as tf all_trainable_weights_vf = tf_util.get_trainable_vars('model/values_fn') regularization_penalty_vf = tf.contrib.layers.apply_regularization(regularizervf, all_trainable_weights_vf) if self.n_step: values_losses = qf1_loss + qf2_loss + value_loss + regularization_penalty_vf + qf1_loss_n + qf2_loss_n else: values_losses = qf1_loss + qf2_loss + value_loss + regularization_penalty_vf # Policy train op # (has to be separate from value train op, because min_qf_pi appears in policy_loss) policy_optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_ph) policy_train_op = policy_optimizer.minimize(policy_loss, var_list=tf_util.get_trainable_vars('model/pi')) # Value train op value_optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_ph) values_params = tf_util.get_trainable_vars('model/values_fn') source_params = tf_util.get_trainable_vars("model/values_fn/vf") target_params = tf_util.get_trainable_vars("target/values_fn/vf") # Polyak averaging for target variables self.target_update_op = [ tf.assign(target, (1 - self.tau) * target + self.tau * source) for target, source in zip(target_params, source_params) ] # Initializing target to match source variables target_init_op = [ tf.assign(target, source)
tensorflow.train.AdamOptimizer
3,236
import tensorflow as tf optimizer = tf.train.MomentumOptimizer(learning_rate=truncated_learning_rate, momentum=params['momentum']) # Batch norm requires update_ops to be added as a train_op dependency. update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): train_op = optimizer.minimize(loss, global_step) else: train_op = None cls_accuracy = tf.metrics.accuracy(glabels, predictions['classes']) metrics = {'cls_accuracy': cls_accuracy} # Create a tensor named train_accuracy for logging purposes. tf.identity(cls_accuracy[1], name='cls_accuracy') tf.summary.scalar('cls_accuracy', cls_accuracy[1]) return tf.estimator.EstimatorSpec( mode=mode, predictions=predictions, loss=loss, train_op=train_op, eval_metric_ops=metrics, scaffold = tf.train.Scaffold(init_fn=train_helper.get_init_fn_for_scaffold(FLAGS))) def parse_comma_list(args): return [float(s.strip()) for s in args.split(',')] def main(_): # Using the Winograd non-fused algorithms provides a small performance boost.
tensorflow.summary.scalar
3,237
import tensorflow as tf tf.app.flags.DEFINE_float( 'gpu_memory_fraction', 1., 'GPU memory fraction to use.') # scaffold related configuration tf.app.flags.DEFINE_string( 'data_dir', '../Datasets/tfrecords',#'/media/rs/0E06CD1706CD0127/Kapok/Chi/Datasets/tfrecords', 'The directory where the dataset input data is stored.') tf.app.flags.DEFINE_string( 'dataset_name', '{}_????', 'The pattern of the dataset name to load.') tf.app.flags.DEFINE_string( 'model_dir', './logs_sext_cpn/', 'The parent directory where the model will be stored.') tf.app.flags.DEFINE_integer( 'log_every_n_steps', 10, 'The frequency with which logs are print.') tf.app.flags.DEFINE_integer( 'save_summary_steps', 100,
tensorflow.app.flags.DEFINE_string
3,238
import tensorflow as tf if double_q: target_index = tf.math.argmax(q_func(obs_tp1_float, self.num_actions, 'q_func', reuse = tf.AUTO_REUSE), axis = 1, output_type = tf.int32) target_v_ph = tf.gather_nd(target_q_ph, tf.stack([tf.range(tf.shape(target_q_ph)[0]), target_index], axis=1)) else: target_v_ph = tf.math.reduce_max(target_q_ph, axis = 1)
tensorflow.shape
3,239
from tensorflow.contrib.eager.python.examples.linear_regression import linear_regression true_w = [[1.0], [-0.5], [2.0]] true_b = [1.0] model = linear_regression.LinearModel() dataset = linear_regression.synthetic_dataset( true_w, true_b, noise_level=0., batch_size=64, num_batches=40)
tensorflow.contrib.eager.python.examples.linear_regression.linear_regression.LinearModel
3,240
import tensorflow as tf def pool(inp, name, kind, size, stride, padding='SAME'): assert kind in ['max', 'avg'] strides = [1, stride, stride, 1] sizes = [1, size, size, 1] with tf.variable_scope(name): if kind == 'max': out = tf.nn.max_pool(inp, sizes, strides=strides, padding=padding, name=kind) else: out = tf.nn.avg_pool(inp, sizes, strides=strides, padding=padding, name=kind) return out
tensorflow.variable_scope
3,241
import tensorflow as tf # The qdist will be defined with no cutoffs, qdist = distributions.QuantizedDistribution( base_dist_cls=distributions.Uniform, lower_cutoff=None, upper_cutoff=None, a=tf.zeros( batch_shape, dtype=tf.float32), b=10 * tf.ones( batch_shape, dtype=tf.float32)) # x is random integers in {-3,...,12}. x = self._rng.randint(-3, 13, size=batch_shape).astype(np.float32) # pmf
tensorflow.ones
3,242
import tensorflow as tf If `min_fake`, minimizing the generator loss is to minimize the probability of fake data being classified as fake. Returns: (scalar Tensor, scalar Tensor): (generator_loss, discriminator_loss). """ real_logits = discriminator_fn(real_data) if isinstance(real_logits, (list, tuple)): real_logits = real_logits[0] real_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( logits=real_logits, labels=tf.ones_like(real_logits))) fake_logits = discriminator_fn(fake_data) if isinstance(fake_logits, (list, tuple)): fake_logits = fake_logits[0] fake_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( logits=fake_logits, labels=tf.zeros_like(fake_logits))) d_loss = real_loss + fake_loss
tensorflow.ones_like
3,243
import tensorflow as tf double_obs_ph = double_policy.obs_ph with tf.variable_scope("loss", reuse=reuse): # set up placeholders
tensorflow.variable_scope
3,244
import tensorflow as tf def gaussian_pdf(mean, loc_std, sample): Z = 1.0 / (loc_std * tf.sqrt(2.0 * np.pi)) a = - tf.square(sample - mean) / (2.0 * tf.square(loc_std)) return Z * tf.exp(a) class ACNet: def __init__(self, scope, GRID_SIZE, a_size, trainer,TRAINING, GLOBAL_NET_SCOPE): with tf.variable_scope(str(scope)+'/qvalues'): #The input size may require more work to fit the interface. self.inputs = tf.placeholder(shape=[None,GRID_SIZE,GRID_SIZE, num_channels], dtype=tf.float32) # input state # self.goal_pos = tf.placeholder(shape=[None,2],dtype=tf.float32) self.prev_loc = tf.placeholder(shape=[None,2], dtype=tf.float32) # self.policy, self.next_loc, self.value, self.state_out, self.state_in, self.state_init, self.valids, self.blocking, self.mypos, self.goalpos, self.next_loc_mean = self._build_net(self.inputs, self.inputs_primal, self.prev_loc, RNN_SIZE, TRAINING,a_size) ''' CHANGES - removed target_blocking, blocking layers, blocking_loss - removed imitation gradients and losss - removed valid_loss - removed train_valid - commented out policy loss (since, discrete)
tensorflow.placeholder
3,245
import tensorflow as tf batch_size: batch_size of the training or validation ops eps: a float, prevents divide by zero name: Optional scope/name for op_scope. Returns: A tensor with the kappa loss. """ with tf.name_scope(name): labels = tf.to_float(labels) repeat_op = tf.to_float( tf.tile(tf.reshape(tf.range(0, num_ratings), [num_ratings, 1]), [1, num_ratings])) repeat_op_sq = tf.square((repeat_op - tf.transpose(repeat_op))) weights = repeat_op_sq / tf.to_float((num_ratings - 1)**2) pred_ = predictions**y_pow try:
tensorflow.to_float
3,246
import tensorflow as tf is_reduction=is_reduction, is_train=is_train) X2 = self._add_drop_path(X2, drop_path_keep_prob) X = tf.add_n([X1, X2]) blocks.append(X)
tensorflow.add_n
3,247
import tensorflow as tf if var.dtype.base_dtype == tf.float16: eps = 1e-7 # Can't use 1e-8 due to underflow -- not sure if it makes a big difference. else: eps = 1e-8 v = self.get_slot(var, "v") v_t = v.assign(beta2_t * v + (1. - beta2_t) * tf.square(grad)) m = self.get_slot(var, "m") m_t = m.assign(beta1_t * m + (1. - beta1_t) * grad) v_t_hat = tf.div(v_t, 1. - beta2_t) m_t_hat = tf.div(m_t, 1. - beta1_t) g_t = tf.div(m_t_hat, tf.sqrt(v_t_hat) + eps) g_t_1 = self.get_slot(var, "g") g_t = g_t_1.assign(g_t) var_update = state_ops.assign_sub(var, 2. * lr_t * g_t - lr_t * g_t_1) # Adam would be lr_t * g_t return control_flow_ops.group(*[var_update, m_t, v_t, g_t]) def _apply_sparse(self, grad, var): raise NotImplementedError("Sparse gradient updates are not supported.") class RegularizeGradientDescentOptimizer(optimizer.Optimizer):
tensorflow.sqrt
3,248
import tensorflow as tf """ if out_dim is not None: with tf.variable_scope(name) : self.gamma= tf.get_variable('gamma',[1,1,1,out_dim], initializer=tf.constant_initializer(1.0)) self.beta = tf.get_variable('beta',[out_dim], initializer=tf.constant_initializer(0.0)) else: self.gamma = None self.beta = None
tensorflow.constant_initializer
3,249
import tensorflow as tf "var", [dim], tf.constant_initializer(1.), trainable=False) mean = variable_on_cpu( "mean", [dim], tf.constant_initializer(0.), trainable=False) step = variable_on_cpu("step", [], tf.constant_initializer(0.), trainable=False) if scale: gamma = variable_on_cpu("gamma", [dim], tf.constant_initializer(1.)) beta = variable_on_cpu("beta", [dim], tf.constant_initializer(0.)) # choose the appropriate moments if train: used_mean, used_var = tf.nn.moments(input_, axes, name="batch_norm") cur_mean, cur_var = used_mean, used_var if bn_lag > 0.:
tensorflow.constant_initializer
3,250
from tensorflow.python.framework import ops "xw_plus_b_v1" is used. Returns: A 2-D Tensor computing matmul(x, weights) + biases. Dimensions typically: batch, out_units. """ with ops.op_scope([x, weights, biases], name, "xw_plus_b_v1") as name: x = ops.convert_to_tensor(x, name="x") weights = ops.convert_to_tensor(weights, name="weights") biases = ops.convert_to_tensor(biases, name="biases") mm = math_ops.matmul(x, weights) return bias_add_v1(mm, biases, name=name) # pylint: disable=invalid-name def dropout(x, keep_prob, noise_shape=None, seed=None, name=None): """Computes dropout.
tensorflow.python.framework.ops.convert_to_tensor
3,251
from tensorflow.python.framework import ops self._BenchmarkOp(training_op, "cudnn_lstm %s %s" % (config_name, self._GetConfigDesc(config))) def benchmarkTfRNNLSTMTraining(self): test_configs = self._GetTestConfig() for config_name, config in test_configs.items(): num_layers = config["num_layers"] num_units = config["num_units"] batch_size = config["batch_size"] seq_length = config["seq_length"] with ops.Graph().as_default(), ops.device("/device:GPU:0"): inputs = seq_length * [ array_ops.zeros([batch_size, num_units], dtypes.float32) ] initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127) cell = rnn_cell.LSTMCell( num_units=num_units, initializer=initializer, state_is_tuple=True) multi_cell = rnn_cell.MultiRNNCell( [cell() for _ in range(num_layers)]) outputs, final_state = core_rnn.static_rnn(
tensorflow.python.framework.ops.device
3,252
import tensorflow as tf from sklearn.metrics import classification_report slim = tf.contrib.slim global first first = True classnum=12 testnum = tf.placeholder(tf.int32) trainnum = tf.placeholder(tf.int32) validnum = tf.placeholder(tf.int32) learnrate = tf.placeholder(tf.float32) def getinputs(path): filename_queue=tf.train.string_input_producer([path]) reader=tf.TFRecordReader()
tensorflow.placeholder
3,253
import tensorflow as tf # Embedding variables entity_var_shape = [entity_cnt, self.embedding_size] rel_var_shape = [rel_cnt, self.embedding_size] entity_init = tf.truncated_normal(entity_var_shape, stddev=init_sd) rel_init = tf.truncated_normal(rel_var_shape, stddev=init_sd) # Ensure maxnorm constraints are initially satisfied entity_init = dense_maxnorm(entity_init, self.maxnorm) self.entity_embedding_vars = tf.Variable(entity_init) self.rel_embedding_vars = tf.Variable(rel_init) # Embedding layer for each (head, rel, tail) triple being fed in as input head_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.head_input) tail_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.tail_input) rel_embed = tf.nn.embedding_lookup(self.rel_embedding_vars, self.rel_input) # Relationship vector acts as a translation in entity embedding space diff_vec = tail_embed - (head_embed + rel_embed) # negative dist so higher scores are better (important for pairwise loss) if self.dist == 'manhattan': raw_output = -tf.reduce_sum(tf.abs(diff_vec), 1) elif self.dist == 'euclidean': # +eps because gradients can misbehave for small values in sqrt raw_output = -tf.sqrt(tf.reduce_sum(tf.square(diff_vec), 1) + self.EPS) elif self.dist == 'sqeuclidean':
tensorflow.nn.embedding_lookup
3,254
import tensorflow as tf # shape is statically known. self.assertEqual(2, a.shape[0].value) assertions_triggered[0] += 1 return a f0(tf.constant([1])) f1(tf.constant([1])) f2(tf.constant([1])) self.assertEqual(3, assertions_triggered[0]) def test_out_of_order_execution1(self): with self.test_session() as session: batcher = dynamic_batching._Batcher(minimum_batch_size=1, maximum_batch_size=1,
tensorflow.constant
3,255
import tensorflow as tf conv1 = conv(inp, inSize, o1s, 1, 1, 1, 1, 'SAME', 'conv1x1', phase_train=phase_train, use_batch_norm=use_batch_norm, weight_decay=weight_decay) net.append(conv1) with tf.variable_scope('branch2_3x3'): if o2s1>0: conv3a = conv(inp, inSize, o2s1, 1, 1, 1, 1, 'SAME', 'conv1x1', phase_train=phase_train, use_batch_norm=use_batch_norm, weight_decay=weight_decay) conv3 = conv(conv3a, o2s1, o2s2, 3, 3, ks, ks, 'SAME', 'conv3x3', phase_train=phase_train, use_batch_norm=use_batch_norm, weight_decay=weight_decay) net.append(conv3) with tf.variable_scope('branch3_5x5'): if o3s1>0: conv5a = conv(inp, inSize, o3s1, 1, 1, 1, 1, 'SAME', 'conv1x1', phase_train=phase_train, use_batch_norm=use_batch_norm, weight_decay=weight_decay) conv5 = conv(conv5a, o3s1, o3s2, 5, 5, ks, ks, 'SAME', 'conv5x5', phase_train=phase_train, use_batch_norm=use_batch_norm, weight_decay=weight_decay) net.append(conv5) with tf.variable_scope('branch4_pool'): if poolType=='MAX': pool = mpool(inp, o4s1, o4s1, o4s3, o4s3, 'SAME', 'pool')
tensorflow.variable_scope
3,256
import tensorflow as tf processed_l1_h2 = tf.nn.bias_add( processed_l1_h2, getattr(self, 'ff_bias_%s' % idx)) processed_l1_h2 = self.ff_nl(processed_l1_h2) if self.batch_norm: with tf.variable_scope( 'l1_h2_bn_ff_%s' % idx, reuse=self.scope_reuse) as scope: processed_l1_h2 = tf.contrib.layers.batch_norm( inputs=processed_l1_h2, scale=True, center=True, fused=True, renorm=False, param_initializers=self.param_initializer, updates_collections=None,
tensorflow.contrib.layers.batch_norm
3,257
import tensorflow as tf variables that exist in given checkpoint. Returns: List of all variables that need to be saved/restored. """ model_vars = tf.trainable_variables() # Add batchnorm variables. bn_vars = [v for v in tf.global_variables() if 'moving_mean' in v.op.name or 'moving_variance' in v.op.name or 'mu' in v.op.name or 'sigma' in v.op.name or 'global_scale_var' in v.op.name] model_vars.extend(bn_vars) model_vars = sorted(model_vars, key=lambda x: x.op.name) mapping = {} if ckpt is not None: ckpt_var = tf.contrib.framework.list_variables(ckpt) ckpt_var_names = [name for (name, unused_shape) in ckpt_var] ckpt_var_shapes = [shape for (unused_name, shape) in ckpt_var] not_loaded = list(ckpt_var_names) for v in model_vars: if v.op.name not in ckpt_var_names: # For backward compatibility, try additional matching. v_additional_name = v.op.name.replace('egomotion_prediction/', '') if v_additional_name in ckpt_var_names: # Check if shapes match. ind = ckpt_var_names.index(v_additional_name) if ckpt_var_shapes[ind] == v.get_shape(): mapping[v_additional_name] = v not_loaded.remove(v_additional_name) continue
tensorflow.contrib.framework.list_variables
3,258
import tensorflow as tf else: self.c_maxlen, self.q_maxlen = config.para_limit, config.ques_limit self.ch_len = tf.reshape(tf.reduce_sum( tf.cast(tf.cast(self.ch, tf.bool), tf.int32), axis=2), [-1]) self.qh_len = tf.reshape(tf.reduce_sum( tf.cast(tf.cast(self.qh, tf.bool), tf.int32), axis=2), [-1]) self.forward() total_params() if trainable: self.lr = tf.minimum(config.learning_rate, 0.001 / tf.log(999.) * tf.log(tf.cast(self.global_step, tf.float32) + 1)) self.opt = tf.train.AdamOptimizer(learning_rate = self.lr, beta1 = 0.8, beta2 = 0.999, epsilon = 1e-7) grads = self.opt.compute_gradients(self.loss) gradients, variables = zip(*grads) capped_grads, _ = tf.clip_by_global_norm( gradients, config.grad_clip) self.train_op = self.opt.apply_gradients( zip(capped_grads, variables), global_step=self.global_step) def forward(self): config = self.config N, PL, QL, CL, d, dc, nh = config.batch_size if not self.demo else config.batch_size, self.c_maxlen, self.q_maxlen, config.char_limit, config.hidden, config.char_dim, config.num_heads
tensorflow.cast
3,259
import tensorflow as tf tf.logging.info("**** Trainable Variables ****")
tensorflow.logging.info
3,260
import tensorflow as tf """Adds the difference loss between the private and shared representations. Args: private_samples: a tensor of shape [num_samples, num_features]. shared_samples: a tensor of shape [num_samples, num_features]. weight: the weight of the incoherence loss. name: the name of the tf summary. """ with tf.name_scope(name): private_samples -= tf.reduce_mean(private_samples, 0) shared_samples -= tf.reduce_mean(shared_samples, 0) private_samples = tf.nn.l2_normalize(private_samples, 1) shared_samples = tf.nn.l2_normalize(shared_samples, 1) correlation_matrix = tf.matmul(private_samples, shared_samples, transpose_a=True)
tensorflow.name_scope
3,261
import tensorflow as tf moving_averages.assign_moving_average( self.ema_count, tf.reduce_sum( tf.reshape( x_means_hot, shape=[-1, self.hparams.num_blocks, self.hparams.block_v_size]), axis=0), self.hparams.decay, zero_debias=False) dw = tf.matmul( tf.transpose(x_means_hot, perm=[1, 2, 0]), tf.transpose(x_reshaped, perm=[1, 0, 2])) updated_ema_means = \ moving_averages.assign_moving_average( self.ema_means, dw, self.hparams.decay, zero_debias=False) n = tf.reduce_sum(updated_ema_count, axis=-1, keep_dims=True) updated_ema_count = ((updated_ema_count + self.hparams.epsilon) / ( n + 2**self.hparams.z_size * self.hparams.epsilon) * n) updated_ema_means = updated_ema_means / tf.expand_dims( updated_ema_count, axis=-1)
tensorflow.transpose
3,262
import tensorflow as tf return weighted_average, weights def local_attention(state, hidden_states, encoder, encoder_input_length, pos=None, scope=None, context=None, **kwargs): batch_size = tf.shape(state)[0] attn_length = tf.shape(hidden_states)[1] if context is not None and encoder.use_context: state = tf.concat([state, context], axis=1)
tensorflow.shape
3,263
import tensorflow as tf return tf.train.Example( features=tf.train.Features(feature=feature_dict)).SerializeToString() def _deserialize(self, serialized_data, batch_size): """Convert serialized TFRecords into tensors. Args: serialized_data: A tensor containing serialized records. batch_size: The data arrives pre-batched, so batch size is needed to deserialize the data. """ feature_map = _TRAIN_FEATURE_MAP if self._is_training else _EVAL_FEATURE_MAP features = tf.parse_single_example(serialized_data, feature_map) users = tf.reshape(tf.decode_raw( features[movielens.USER_COLUMN], rconst.USER_DTYPE), (batch_size,)) items = tf.reshape(tf.decode_raw( features[movielens.ITEM_COLUMN], rconst.ITEM_DTYPE), (batch_size,)) def decode_binary(data_bytes): # tf.decode_raw does not support bool as a decode type. As a result it is # necessary to decode to int8 (7 of the bits will be ignored) and then # cast to bool. return tf.reshape(tf.cast(tf.decode_raw(data_bytes, tf.int8), tf.bool), (batch_size,)) if self._is_training: mask_start_index = tf.decode_raw(
tensorflow.decode_raw
3,264
import tensorflow as tf with tf.variable_scope(name): if reuse: tf.get_variable_scope().reuse_variables() else: assert tf.get_variable_scope().reuse is False """U-Net Generator""" def lrelu(x, alpha,name='lrelu'): with tf.variable_scope(name): return tf.nn.relu(x) - alpha * tf.nn.relu(-x) def instance_norm(x,name='instance_norm'): with tf.variable_scope(name): if reuse: tf.get_variable_scope().reuse_variables() else: assert tf.get_variable_scope().reuse is False
tensorflow.nn.relu
3,265
import tensorflow as tf idx = tf.range(0, self.batch_size, 1) idx = tf.random_shuffle(idx)[0:num_to_add] self.fake_to_add = tf.gather(self.generator_out, idx) self.mixed_pc = tf.concat([self.real_pc_rotated, self.fake_to_add], 0) self.mixed_label = tf.concat([self.rot_label_pl, tf.constant(self.num_angles, shape = (num_to_add,))], axis = 0) mixed_idx = tf.range(0, self.mixed_label.get_shape().as_list()[0], 1)
tensorflow.concat
3,266
import tensorflow as tf projection = batch_norm_lin(projection, dialogue_state_size, self.phase_train, name='linear_projection_2_bn') activation = tf.nn.relu(projection) activation = dropout(activation, self.dropout_keep_prob) projection = linear( input=activation, input_size=dialogue_state_size, output_size=action_templates_vocabulary_length, name='linear_projection_3_predictions_action' ) self.predictions_action = tf.nn.softmax(projection, name="softmax_output_prediction_action") # argument prediction # first encode decoded action template and teh true action template choice = tf.floor(tf.random_uniform([1], self.use_inputs_prob, 1 + self.use_inputs_prob, tf.float32)) prediction_action_argmax = tf.stop_gradient(tf.argmax(self.predictions_action, 1)) predicted_action_templates_embedding = embedding( input=prediction_action_argmax, length=action_templates_vocabulary_length,
tensorflow.nn.softmax
3,267
import tensorflow as tf for source, target in clean_pairs: if source and target: lang1_resfile.write(source) lang1_resfile.write("\n") lang2_resfile.write(target) lang2_resfile.write("\n") else: lang1_filename, lang2_filename = dataset[1] lang1_filepath = os.path.join(tmp_dir, lang1_filename) lang2_filepath = os.path.join(tmp_dir, lang2_filename) is_sgm = ( lang1_filename.endswith("sgm") and lang2_filename.endswith("sgm")) if not (tf.gfile.Exists(lang1_filepath) and tf.gfile.Exists(lang2_filepath)): # For .tar.gz and .tgz files, we read compressed. mode = "r:gz" if compressed_filepath.endswith("gz") else "r" with tarfile.open(compressed_filepath, mode) as corpus_tar: corpus_tar.extractall(tmp_dir) if lang1_filepath.endswith(".gz"): new_filepath = lang1_filepath.strip(".gz") generator_utils.gunzip_file(lang1_filepath, new_filepath) lang1_filepath = new_filepath if lang2_filepath.endswith(".gz"): new_filepath = lang2_filepath.strip(".gz") generator_utils.gunzip_file(lang2_filepath, new_filepath) lang2_filepath = new_filepath for example in text_problems.text2text_txt_iterator(
tensorflow.gfile.Exists
3,268
import tensorflow as tf ) training_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode( decoder=training_decoder, impute_finished=True, maximum_iterations=tf.reduce_max(seq_lens), ) self.Y_hat = training_decoder_output.rnn_output out_decoder2 = tf.reshape(self.Y_hat, [tf.shape(self.Y_hat)[0], -1, n_mels]) dec = conv1d_banks(out_decoder2, K=decoder_num_banks, is_training=self.training) dec = tf.layers.max_pooling1d(dec, pool_size=2, strides=1, padding="same") dec = tf.layers.conv1d(dec, embed_size // 2, 3, name="decoder-conv1-1", padding="SAME") dec = tf.nn.relu(tf.layers.batch_normalization(dec, training=self.training)) dec = tf.layers.conv1d(dec, embed_size // 2, 3, name="decoder-conv1-2", padding="SAME") dec = tf.layers.batch_normalization(dec, training=self.training) dec = tf.layers.dense(dec, embed_size // 2) for i in range(4): dec = highwaynet( dec, num_units=embed_size // 2, scope="decoder-highwaynet-{}".format(i) ) with tf.variable_scope("decoder-gru", reuse=False): cell = tf.contrib.rnn.GRUCell(embed_size // 2) cell_bw = tf.contrib.rnn.GRUCell(embed_size // 2) outputs, _ = tf.nn.bidirectional_dynamic_rnn(cell, cell_bw, dec, dtype=tf.float32) outputs = tf.concat(outputs, 2) self.Z_hat = tf.layers.dense(outputs, 1 + fourier_window_size // 2)
tensorflow.layers.conv1d
3,269
import tensorflow as tf normalizer_fn=slim.batch_norm): mobilenet_v1.mobilenet_v1_base(inputs) total_params, _ = slim.model_analyzer.analyze_vars( slim.get_model_variables()) self.assertAlmostEqual(3217920, total_params) def testBuildEndPointsWithDepthMultiplierLessThanOne(self): batch_size = 5 height, width = 224, 224 num_classes = 1000 inputs = tf.random_uniform((batch_size, height, width, 3)) _, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes) endpoint_keys = [key for key in end_points.keys() if key.startswith('Conv')] _, end_points_with_multiplier = mobilenet_v1.mobilenet_v1( inputs, num_classes, scope='depth_multiplied_net', depth_multiplier=0.5) for key in endpoint_keys: original_depth = end_points[key].get_shape().as_list()[3]
tensorflow.random_uniform
3,270
import tensorflow as tf self.saver = tf.train.Saver(tf.global_variables()) def separate_gradient_update(self): denoise_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "denoising_model") ranking_model_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "ranking_model") self.weighs_propen=denoise_params
tensorflow.get_collection
3,271
import tensorflow as tf q_sqrt_r = tf.matrix_band_part(q_sqrt, -1, 0) # D x M x M eKuf = tf.transpose(expectation(pXnew, (kern, feat))) # M x N (psi1) if Luu is None: Kuu = feat.Kuu(kern, jitter=settings.numerics.jitter_level) # M x M Luu = tf.cholesky(Kuu) # M x M if not white: q_mu = tf.matrix_triangular_solve(Luu, q_mu, lower=True) Luu_tiled = tf.tile(Luu[None, :, :], [num_func, 1, 1]) # remove line once issue 216 is fixed q_sqrt_r = tf.matrix_triangular_solve(Luu_tiled, q_sqrt_r, lower=True) Li_eKuf = tf.matrix_triangular_solve(Luu, eKuf, lower=True) # M x N fmean = tf.matmul(Li_eKuf, q_mu, transpose_a=True) eKff = expectation(pXnew, kern) # N (psi0) eKuffu = expectation(pXnew, (kern, feat), (kern, feat)) # N x M x M (psi2) Luu_tiled = tf.tile(Luu[None, :, :], [num_data, 1, 1]) # remove this line, once issue 216 is fixed Li_eKuffu = tf.matrix_triangular_solve(Luu_tiled, eKuffu, lower=True)
tensorflow.tile
3,272
import tensorflow as tf def test_images_and_additional_channels_errors(self): input_tensor_dict = { fields.InputDataFields.image: tf.placeholder(tf.float32, [None, None, 3]), fields.InputDataFields.image_additional_channels: tf.placeholder(tf.float32, [None, None, 2]), fields.InputDataFields.original_image: tf.placeholder(tf.float32, [None, None, 3]), } with self.assertRaises(ValueError):
tensorflow.placeholder
3,273
from tensorflow.contrib import layers as contrib_layers Returns: A version of `input_tensor` with dropout applied. """ if dropout_prob is None or dropout_prob == 0.0: return input_tensor output = tf.nn.dropout(input_tensor, rate=dropout_prob) return output def layer_norm(input_tensor, name=None): """Run layer normalization on the last dimension of the tensor.""" return contrib_layers.layer_norm( inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name) def layer_norm_and_dropout(input_tensor, dropout_prob, name=None): """Runs layer normalization followed by dropout.""" output_tensor = layer_norm(input_tensor, name) output_tensor = dropout(output_tensor, dropout_prob) return output_tensor def create_initializer(initializer_range=0.02):
tensorflow.contrib.layers.layer_norm
3,274
import tensorflow as tf if __name__ == '__main__': tf.test.main()
tensorflow.test.main
3,275
import tensorflow as tf noise of shape [cur_batch_size, image_size, image_size, 3]. real_images: tensor, real images from input of shape [cur_batch_size, image_size, image_size, 3]. alpha_var: variable, alpha for weighted sum of fade-in of layers. params: dict, user passed parameters. Returns: Discriminator's gradient penalty loss of shape []. """ func_name = "get_gradient_penalty_loss" with tf.name_scope(name="{}/gradient_penalty".format(self.name)): # Get a random uniform number rank 4 tensor. random_uniform_num = tf.random.uniform( shape=[cur_batch_size, 1, 1, 1], minval=0., maxval=1., dtype=tf.float32, name="random_uniform_num" ) print_obj( "\n" + func_name, "random_uniform_num", random_uniform_num ) # Find the element-wise difference between images. image_difference = fake_images - real_images print_obj(func_name, "image_difference", image_difference)
tensorflow.random.uniform
3,276
import tensorflow as tf self.processed_next_obs_ph = self.target_policy.processed_obs self.action_target = self.target_policy.action_ph self.terminals_ph = tf.placeholder(tf.float32, shape=(None, 1), name='terminals') self.rewards_ph = tf.placeholder(tf.float32, shape=(None, 1), name='rewards') self.is_demo_ph = tf.placeholder(tf.float32, shape=(None, 1), name='is_demonstrations') self.weight_ph = tf.placeholder(tf.float32, shape=(None, 1), name='importance_weight') self.actions_ph = tf.placeholder(tf.float32, shape=(None,) + self.action_space.shape, name='actions') self.learning_rate_ph = tf.placeholder(tf.float32, [], name="learning_rate_ph") if self.n_step: self.next_observations_ph_n = self.target_policy.obs_ph self.processed_next_obs_ph_n = self.target_policy.processed_obs self.rewards_ph_n = tf.placeholder(tf.float32, shape=(None, 1), name='n_step_rewards') self.terminals_ph_n = tf.placeholder(tf.float32, shape=(None, 1), name='n_step_terminals')
tensorflow.placeholder
3,277
import tensorflow as tf in0 = tf.strings.to_number(in0, tf.int32) in1 = tf.strings.to_number(in1, tf.int32) add = tf.add(in0, in1, "ADD") sub = tf.subtract(in0, in1, "SUB") # Cast or convert result to the output dtype. if tf_output0_dtype == tf.string: cast0 = tf.dtypes.as_string(add if not swap else sub, name="TOSTR0") else: cast0 = tf.cast(add if not swap else sub, tf_output0_dtype, "CAST0") if tf_output1_dtype == tf.string: cast1 = tf.dtypes.as_string(sub if not swap else add, name="TOSTR1") else: cast1 = tf.cast(sub if not swap else add, tf_output1_dtype, "CAST1") out0 = tf.identity(cast0, "TENSOR_OUTPUT0") out1 = tf.identity(cast1, "TENSOR_OUTPUT1") # Use a different model name for the non-batching variant model_name = tu.get_model_name( "savedmodel_nobatch" if max_batch == 0 else "savedmodel", input_dtype, output0_dtype, output1_dtype) model_version_dir = models_dir + "/" + model_name + "/" + str(model_version) try: os.makedirs(model_version_dir) except OSError as ex: pass # ignore existing dir
tensorflow.cast
3,278
import tensorflow as tf x_data = tf.placeholder(tf.float32) m = tf.constant(3.) # Multiplication prod = tf.mul(x_data, m) for x_val in x_vals: print(sess.run(prod, feed_dict={x_data: x_val}))
tensorflow.mul
3,279
import tensorflow as tf vz = tf.contrib.lookup.MutableHashTable(key_dtype=tf.string, value_dtype=tf.int64, default_value=-1) vx_keys = tf.reshape(tf.Variable([], collections=[], dtype=tf.string), (-1, 1)) vz_keys = tf.reshape(tf.Variable([], collections=[], dtype=tf.string), (-1, 1)) x_t = tf.gather(x, l) x_t_len = tf.strings.length(x_t) x_t = tf.string_split([x_t], delimiter='').values z_t = tf.gather(y, m) z_t_len = tf.strings.length(z_t) z_t = tf.string_split([z_t], delimiter='').values for i in tf.range(start=0, limit=x_t_len - self._p + 1, delta=1, dtype=None, name='range'): u = tf.string_join(x_t[i:i + self._p], '') vx_keys, r = tf.cond( tf.greater(vx.lookup(u), -1), true_fn=lambda: (vx_keys, tf.add(vx.lookup(u), 1)), false_fn=lambda: (tf.concat([vx_keys, tf.reshape(u, (-1, 1))], axis=0), tf.constant(1, dtype=tf.int64, name='constant'))
tensorflow.gather
3,280
import tensorflow as tf def gradsafe_sqrt(x, clip_low=1e-18, name=None): with tf.name_scope(name, "gradsafe_sqrt"): return tf.sqrt(tf.clip_by_value(x, clip_low, x))
tensorflow.name_scope
3,281
import tensorflow as tf weights = tf.to_float(mask) / lengths weighted_average = tf.reduce_sum(hidden_states * tf.expand_dims(weights, axis=2), axis=1) return weighted_average, weights def last_state_attention(hidden_states, encoder_input_length, *args, **kwargs): weights = tf.one_hot(encoder_input_length - 1, tf.shape(hidden_states)[1]) weights = tf.to_float(weights) weighted_average = tf.reduce_sum(hidden_states * tf.expand_dims(weights, axis=2), axis=1) return weighted_average, weights
tensorflow.shape
3,282
import tensorflow as tf Returns: loss: Loss tensor of type float. """ with tf.name_scope('segment_loss'): # logits = tf.reshape(logits, (-1, num_classes)) epsilon = tf.constant(value=1e-7)
tensorflow.name_scope
3,283
import tensorflow as tf # rotates around z, while we rotate around y so need to swap center_1 = tf.reshape(box1[0:3][[0, 2, 1]], [1, 3]) center_2 = tf.reshape(box2[0:3][[0, 2, 1]], [1, 3]) rotation_z_1 = tf.reshape(box1[-1], [1]) rotation_z_2 = tf.reshape(box2[-1], [1]) length_1 = tf.reshape(box1[3 + 0], [1])
tensorflow.reshape
3,284
import tensorflow as tf uniques, index = tf.unique(t_flatten) return uniques, tf.reshape(index, shape=tf.shape(t))
tensorflow.shape
3,285
import tensorflow as tf # Extract 8 most features as mentioned in paper self.k_pooled = tf.nn.top_k(tf.transpose(self.layers[-1], [0,2,1]), k=8, name='k_pool', sorted=False)[0] print("8-maxpooling:", self.k_pooled.get_shape()) self.flatten = tf.reshape(self.k_pooled, (-1, 512*8)) # fc1 with tf.variable_scope('fc1'): w = tf.get_variable('w', [self.flatten.get_shape()[1], 2048], initializer=he_normal, regularizer=regularizer) b = tf.get_variable('b', [2048], initializer=tf.constant_initializer(1.0)) out = tf.matmul(self.flatten, w) + b self.fc1 = tf.nn.relu(out) # fc2 with tf.variable_scope('fc2'): w = tf.get_variable('w', [self.fc1.get_shape()[1], 2048], initializer=he_normal, regularizer=regularizer) b = tf.get_variable('b', [2048], initializer=tf.constant_initializer(1.0)) out = tf.matmul(self.fc1, w) + b
tensorflow.constant_initializer
3,286
import tensorflow.contrib.eager as tfe def tearDown(self): shutil.rmtree(self._tmp_logdir) super(LinearRegressionTest, self).tearDown() def testSyntheticDataset(self): true_w = tf.random_uniform([3, 1]) true_b = [1.0] batch_size = 10 num_batches = 2 noise_level = 0. dataset = linear_regression.synthetic_dataset(true_w, true_b, noise_level, batch_size, num_batches) it = tfe.Iterator(dataset) for _ in range(2): (xs, ys) = it.next() self.assertEqual((batch_size, 3), xs.shape) self.assertEqual((batch_size, 1), ys.shape) self.assertEqual(tf.float32, xs.dtype) self.assertEqual(tf.float32, ys.dtype) with self.assertRaises(StopIteration): it.next() def testLinearRegression(self): true_w = [[1.0], [-0.5], [2.0]] true_b = [1.0]
tensorflow.contrib.eager.Iterator
3,287
import tensorflow as tf task.sample(adv=True) #sample adversarially logger.info('Task Sampled: %s', task.goal_velocity) generated_adversarial_task.append(task.goal_velocity) logger.info('Tasks dump!') assert (task_generator == 'fixed') test_summary['task'].append(task.goal_velocity) if FLAGS.task.reset_policy: # NOTE: reset policy and valuefunc logger.info("Resetting Policy") pol_params = tf.get_default_session().run([nn.utils.parameters_to_vector(policy.parameters())]) tf.get_default_session().run(tf.variables_initializer(policy.parameters())) pol_params_after = tf.get_default_session().run([nn.utils.parameters_to_vector(policy.parameters())]) print ("pol_params:", np.linalg.norm(pol_params), "pol_params_after_reset:", np.linalg.norm(pol_params_after)) logger.info("Resetting Valuefunc") tf.get_default_session().run(tf.variables_initializer(vfn.parameters())) tf.get_default_session().run(tf.variables_initializer(warmup_policy.parameters())) tf.get_default_session().run(tf.variables_initializer(warmup_vfn.parameters())) for p in warmup_policy.parameters(): p.invalidate()
tensorflow.get_default_session
3,288
import tensorflow as tf # Placeholders for input data and the targets x_input = tf.placeholder(dtype=tf.float32, shape=[batch_size, input_dim], name='Input') x_input_l = tf.placeholder(dtype=tf.float32, shape=[batch_size, input_dim], name='Labeled_Input') y_input = tf.placeholder(dtype=tf.float32, shape=[batch_size, n_labels], name='Labels') x_target = tf.placeholder(dtype=tf.float32, shape=[batch_size, input_dim], name='Target') real_distribution = tf.placeholder(dtype=tf.float32, shape=[batch_size, z_dim], name='Real_distribution') categorial_distribution = tf.placeholder(dtype=tf.float32, shape=[batch_size, n_labels], name='Categorical_distribution') manual_decoder_input = tf.placeholder(dtype=tf.float32, shape=[1, z_dim + n_labels], name='Decoder_input')
tensorflow.placeholder
3,289
from tensorflow.python.ops import array_ops if proba: raise ValueError( "logits to probabilities is not supported for _BinarySvmTargetColumn") logits = array_ops.concat([array_ops.zeros_like(logits), logits], 1) return math_ops.argmax(logits, 1) # TODO(zakaria): use contrib losses. def _mean_squared_loss(logits, target): # To prevent broadcasting inside "-". if len(target.get_shape()) == 1: target = array_ops.expand_dims(target, dim=[1]) logits.get_shape().assert_is_compatible_with(target.get_shape()) return math_ops.square(logits - math_ops.to_float(target)) def _log_loss_with_two_classes(logits, target): # sigmoid_cross_entropy_with_logits requires [batch_size, 1] target. if len(target.get_shape()) == 1: target = array_ops.expand_dims(target, dim=[1]) loss_vec = nn.sigmoid_cross_entropy_with_logits( labels=math_ops.to_float(target), logits=logits)
tensorflow.python.ops.array_ops.expand_dims
3,290
import tensorflow as tf self.nnweights.append(weights) biases = tf.get_variable('biases', [hidden_layers_node[i]], initializer=tf.constant_initializer(0.0)) layer_out = tf.nn.dropout(tf.matmul(prev_x, weights) + biases, dropout_keep_prob)
tensorflow.constant_initializer
3,291
import tensorflow as tf self.obs = tf.placeholder(tf.float32, [None, self.obs_space]) # ! self.obs = tf.placeholder(tf.float32, [None] + list(self.obs_space)) # ! self.obs_space = env.observation_space.shape self.r = tf.placeholder(tf.float32, (None,1)) self.ac = tf.placeholder(tf.float32, (None, self.act_space)) self.adv = tf.placeholder(tf.float32, [None]) # unused # specific to FeUdal self.prev_g = tf.placeholder(tf.float32, (None, None, self.g_dim)) self.ri = tf.placeholder(tf.float32, (None,)) self.s_diff = tf.placeholder(tf.float32, (None, self.g_dim)) def build_perception(self): self._obs = tf.expand_dims(self.obs, -1) # ! self._obs = tf.expand_dims(self._obs, -1) # ! conv1 = tf.layers.conv2d(inputs=self._obs, filters=16, kernel_size=[2, 1], # ! kernel_size = [8,8]
tensorflow.placeholder
3,292
import tensorflow as tf if output_length == 1: pool = tf.nn.avg_pool(input_data, [1, height, width, 1], strides=[1, 1, 1, 1], padding=padding) pool = tf.reduce_mean(pool, axis=[1, 2]) pool = tf.squeeze(pool, axis=[1, 2]) return pool else: if num_channels_in != output_length: conv_weight = tf.Variable(tf.truncated_normal([1, 1, num_channels_in, output_length], stddev=0.1, dtype=tf.float32)) conv = tf.nn.conv2d(input_data, conv_weight, strides=[1, 1, 1, 1], padding='SAME') pool = tf.nn.avg_pool(conv, ksize=[1, height, width, 1], strides=[1, 1, 1, 1], padding=padding) else: pool = tf.nn.avg_pool(input_data, ksize=[1, height, width, 1], strides=[1, 1, 1, 1], padding=padding) pool = tf.squeeze(pool, axis=[1, 2]) return pool def avg_pool(input, scope, filter_dims, stride_dims, padding='SAME'): assert (len(filter_dims) == 2) # filter height and width assert (len(stride_dims) == 2) # stride height and width filter_h, filter_w = filter_dims stride_h, stride_w = stride_dims with tf.variable_scope(scope):
tensorflow.squeeze
3,293
import tensorflow as tf # build sprite image ut.images_to_sprite(self.test_set, path=os.path.join(FLAGS.logdir, 'sprite.png')) ut.generate_tsv(len(self.test_set), tsv_path) def _add_loss_summary(self, name, var, collection='train'): if var is not None: tf.summary.scalar(name, var, [collection]) tf.summary.scalar('log_' + name, tf.log(var), [collection]) def _restore_model(self, session): latest_checkpoint = self.get_latest_checkpoint() print(latest_checkpoint) if latest_checkpoint is not None: latest_checkpoint = latest_checkpoint.replace(EMB_SUFFIX, '') ut.print_info("latest checkpoint: %s" % latest_checkpoint)
tensorflow.log
3,294
from tensorflow.python.framework import ops transpose_a=transpose_a, transpose_b=transpose_b, a_is_sparse=a_is_sparse, b_is_sparse=b_is_sparse, name=name) else: return gen_math_ops._mat_mul(a, b, transpose_a=transpose_a, transpose_b=transpose_b, name=name) sparse_matmul = gen_math_ops._sparse_mat_mul batch_matmul = gen_math_ops._batch_mat_mul ops.RegisterShape("MatMul")(common_shapes.matmul_shape) ops.RegisterShape("SparseMatMul")(common_shapes.matmul_shape) def _as_indexed_slices(x): """Convert 'x' to IndexedSlices. Convert a dense Tensor to a block-sparse IndexedSlices. Args: x: Either a Tensor object, or an IndexedSlices object. Returns: An IndexedSlices object. Raises:
tensorflow.python.framework.ops.RegisterShape
3,295
import tensorflow as tf # or evaluation mode, depending on FLAGS.eval: # Under the evaluation mode, this script will read a saved model, # and compute the accuracy of the model against a validation dataset. # Additional ops for accuracy and top_k predictors are only used under this # mode. # Under the benchmarking mode, user can specify whether nor not to use # the forward-only option, which will only compute the loss function. # forward-only cannot be enabled with eval at the same time. tf.flags.DEFINE_boolean('eval', False, 'whether use eval or benchmarking') tf.flags.DEFINE_boolean('forward_only', False, """whether use forward-only or training for benchmarking""") tf.flags.DEFINE_integer('batch_size', 0, 'batch size per compute device') tf.flags.DEFINE_integer('num_batches', 100, 'number of batches to run, excluding warmup') tf.flags.DEFINE_integer('num_warmup_batches', None, 'number of batches to run before timing') tf.flags.DEFINE_integer('autotune_threshold', None, 'The autotune threshold for the models') tf.flags.DEFINE_integer('num_gpus', 1, 'the number of GPUs to run on') tf.flags.DEFINE_integer('display_every', 10, """Number of local steps after which progress is printed out""") tf.flags.DEFINE_string('data_dir', None, """Path to dataset in TFRecord format (aka Example protobufs). If not specified, synthetic data will be used.""") tf.flags.DEFINE_string('data_name', None, """Name of dataset: imagenet or flowers. If not specified, it is automatically guessed
tensorflow.flags.DEFINE_integer
3,296
import tensorflow as tf Args: chunk: A Tensor of text data. Returns: A namedtuple of input and target data. """ input_text = tf.map_fn(lambda x: x[:-1], chunk) target_text = tf.map_fn(lambda x: x[1:], chunk) return (input_text, target_text) def build_to_ids_fn(vocab, max_seq_len): """Constructs function mapping examples to sequences of token indices."""
tensorflow.map_fn
3,297
import tensorflow as tf image = tf.image.random_crop(image, [32, 32, 3]) image = tf.image.random_flip_left_right(image)
tensorflow.image.random_flip_left_right
3,298
import tensorflow as tf h = h + b return h def conv2d(x, shape, name, bias=False, stride=2, padding='SAME'): with tf.variable_scope(name): W = weight_variable(shape) h = tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding=padding) if bias:
tensorflow.variable_scope
3,299