seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf self.saver = tf.train.Saver() # Loss functions and training loss_pg = - tf.reduce_mean(pi.log_prob(batch['actions']) * batch['advantage']) - 0.01 * tf.reduce_mean(pi.entropy()) loss_vf = 0.5 * tf.reduce_mean(tf.square(batch['rewards'] - self.vf)) self.a_grads = tf.gradients(loss_pg, self.pi_params) self.c_grads = tf.gradients(loss_vf, self.vf_params) self.a_grads, _ = tf.clip_by_global_norm(self.a_grads, 20.0) self.c_grads, _ = tf.clip_by_global_norm(self.c_grads, 20.0) opt = tf.train.AdamOptimizer(self.LR) self.update_a_op = opt.apply_gradients(zip(self.a_grads, self.pi_params)) self.update_c_op = opt.apply_gradients(zip(self.c_grads, self.vf_params)) self.sess.run(tf.global_variables_initializer()) # Tensorboard if summary_dir is not None: self.writer = tf.summary.FileWriter(summary_dir)
tensorflow.train.AdamOptimizer
11,900
import tensorflow as tf def main(self): with tf.Graph().as_default() as graph, tf.device('/cpu:0'): num_gpu = len(cfgs.GPU_GROUP.strip().split(',')) global_step = slim.get_or_create_global_step() lr = self.warmup_lr(cfgs.LR, global_step, cfgs.WARM_SETP, num_gpu) tf.summary.scalar('lr', lr) optimizer = tf.train.MomentumOptimizer(lr, momentum=cfgs.MOMENTUM) r3det_gwd = build_whole_network.DetectionNetworkR3DetGWD(cfgs=self.cfgs, is_training=True) with tf.name_scope('get_batch'): if cfgs.IMAGE_PYRAMID: shortside_len_list = tf.constant(cfgs.IMG_SHORT_SIDE_LEN) shortside_len = tf.random_shuffle(shortside_len_list)[0] else: shortside_len = cfgs.IMG_SHORT_SIDE_LEN img_name_batch, img_batch, gtboxes_and_label_batch, num_objects_batch, img_h_batch, img_w_batch = \ self.reader.next_batch(dataset_name=cfgs.DATASET_NAME, batch_size=cfgs.BATCH_SIZE * num_gpu, shortside_len=shortside_len, is_training=True)
tensorflow.name_scope
11,901
import tensorflow as tf p = defaults source_vocab_size = self._encoders["inputs"].vocab_size p.input_modality = { "inputs": (registry.Modalities.SYMBOL, source_vocab_size) } p.target_modality = (registry.Modalities.CLASS_LABEL, self.num_classes) def example_reading_spec(self): data_fields = { "inputs": tf.VarLenFeature(tf.int64), "targets": tf.FixedLenFeature([1], tf.int64), } data_items_to_decoders = None return (data_fields, data_items_to_decoders) def txt_line_iterator(txt_path): """Iterate through lines of file."""
tensorflow.VarLenFeature
11,902
import tensorflow as tf # instead. output_layer = model.get_sequence_output() hidden_size = output_layer.shape[-1].value output_weights = tf.get_variable( "output_weights", [hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "output_bias",[], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.reduce_sum(tf.multiply(output_layer,output_weights),-1) logits = tf.add(logits, output_bias) probabilities=tf.sigmoid(logits) # labels=tf.constant(labels,dtype=tf.int32) per_example_loss=tf.losses.sigmoid_cross_entropy(multi_class_labels=labels, logits=logits,reduction=Reduction.NONE)
tensorflow.variable_scope
11,903
import tensorflow as tf Maximum pairwise distance allowed. Returns ------- tf.Tensor: Of shape (n_test, n_support) """ test = tf.expand_dims(test, 1) support = tf.expand_dims(support, 0) g = -tf.maximum(tf.reduce_sum(tf.square(test - support), 2), max_dist_sq) return g def add_bias(tensor, init=None, name=None):
tensorflow.expand_dims
11,904
import tensorflow as tf assert padding in ['SAME', 'VALID'] x = tf.keras.layers.Conv3D(filters, kernel_size, activation=activation, kernel_initializer=initialization, use_bias=use_bias, kernel_regularizer=reg_l2)(x)
tensorflow.keras.layers.Conv3D
11,905
import tensorflow as tf x = tf.image.random_brightness(x, max_delta=0.8*s) x = tf.image.random_contrast(x, lower=lower, upper=upper) x = tf.image.random_saturation(x, lower=lower, upper=upper) x = tf.image.random_hue(x, max_delta=0.2*s) x = tf.clip_by_value(x, 0, 1) return x def color_drop(image): image = tf.image.rgb_to_grayscale(image) image = tf.tile(image, [1, 1, 1, 3]) return image # pylint: disable=not-callable @gin.configurable(blacklist=["kwargs"]) class CLGAN(modular_gan.ModularGAN): """Self-Supervised GAN with Contrastive Loss"""
tensorflow.image.rgb_to_grayscale
11,906
import tensorflow as tf # There's no notion of partially known shapes in eager mode, so exit # early. if tf.executing_eagerly(): return # Test case 3. x = tf.placeholder_with_default(input=1, shape=None) is_scalar = normal._is_scalar_helper(x.shape, lambda: tf.shape(x)) self.assertTrue(self.evaluate(is_scalar)) x = tf.placeholder_with_default(input=[1], shape=None) is_scalar = normal._is_scalar_helper(x.shape, lambda: tf.shape(x)) self.assertFalse(self.evaluate(is_scalar)) def _GetFakeDistribution(self): class FakeDistribution(tfd.Distribution): """Fake Distribution for testing _set_sample_static_shape.""" def __init__(self, batch_shape=None, event_shape=None): self._static_batch_shape = tf.TensorShape(batch_shape) self._static_event_shape = tf.TensorShape(event_shape) super(FakeDistribution, self).__init__(
tensorflow.shape
11,907
import tensorflow as tf training_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode( decoder=training_decoder, impute_finished=True, maximum_iterations=tf.reduce_max(seq_lens), )
tensorflow.reduce_max
11,908
from tensorflow.python.framework import ops total_var = contrib_variables.local_variable( array_ops.zeros([], dtype=dtypes.float64), name=total_scope) batch_total = math_ops.reduce_sum(average_precision, name='batch_total') total_update = state_ops.assign_add(total_var, batch_total, name='update') # Divide total by max to get mean, for both vars and the update ops. mean_average_precision = _safe_scalar_div(total_var, max_var, name='mean') update = _safe_scalar_div(total_update, max_update, name=scope) if metrics_collections: ops.add_to_collections(metrics_collections, mean_average_precision) if updates_collections: ops.add_to_collections(updates_collections, update) return mean_average_precision, update def _select_class_id(ids, selected_id): """Filter all but `selected_id` out of `ids`. Args: ids: `int64` `Tensor` or `SparseTensor` of IDs. selected_id: Int id to select.
tensorflow.python.framework.ops.add_to_collections
11,909
import tensorflow as tf eval_examples.append(PaddingInputExample()) eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record") if not tf.gfile.Exists(eval_file) or not FLAGS.data_converted: file_based_convert_examples_to_features( eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file) tf.logging.info("***** Running evaluation *****") tf.logging.info(" Num examples = %d (%d actual, %d padding)", len(eval_examples), num_actual_eval_examples, len(eval_examples) - num_actual_eval_examples) tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size) # This tells the estimator to run through the entire set. eval_steps = None # However, if running eval on the TPU, you will need to specify the # number of steps. if FLAGS.use_tpu: assert len(eval_examples) % FLAGS.eval_batch_size == 0 eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size) eval_drop_remainder = True if FLAGS.use_tpu else False
tensorflow.logging.info
11,910
import tensorflow as tf dist /= tf.reshape(self.hparams.scales, [1, 1, self.hparams.moe_num_experts]) nearest_idx = tf.argmax(-dist, axis=-1) nearest_hot = tf.one_hot(nearest_idx, self.hparams.block_v_size)
tensorflow.argmax
11,911
import tensorflow as tf is_training_const = utils.constant_value(is_training) if is_training_const is None or is_training_const: update_mean_op, update_second_moment_op = utils.smart_cond( is_training, build_update_ops, build_no_ops, ) # Every new connection creates a new op which adds its contribution # to the running average when ran. tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_mean_op) tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_second_moment_op) def _build(self, input_batch, is_training=True, test_local_stats=True): """Connects the BatchNorm module into the graph. Args: input_batch: A Tensor of arbitrary dimension. By default, the final dimension is not reduced over when computing the minibatch statistics. is_training: A boolean to indicate if the module should be connected in
tensorflow.add_to_collection
11,912
import tensorflow as tf with tf.variable_scope("convnet"): # original architecture out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu) out = layers.flatten(out) with tf.variable_scope("action_value"): out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu) out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None) return out def simple_model(img_in, num_actions, scope, reuse=False, num_filters=64): with tf.variable_scope(scope, reuse=reuse): out = img_in gauss_initializer = initializers.xavier_initializer(uniform=False) # stddev = 1/n with tf.variable_scope("convnet"): out = layers.convolution2d( out, num_outputs=num_filters, kernel_size=8, stride=4, activation_fn=tf.nn.relu, weights_initializer=gauss_initializer, trainable=False) out = layers.flatten(out) with tf.variable_scope("action_value"): out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None) return out
tensorflow.variable_scope
11,913
import tensorflow as tf :return: sample from the MVN of shape N x D """ eps = tf.random_normal(tf.shape(mean), dtype=settings.float_type) # N x P if cov_structure == "diag": sample = mean + tf.sqrt(cov) * eps # N x P elif cov_structure == "full": cov = cov + (tf.eye(tf.shape(mean)[1], dtype=settings.float_type) * settings.numerics.jitter_level)[None, ...] # N x P x P chol = tf.cholesky(cov) # N x P x P return mean + (tf.matmul(chol, eps[..., None])[..., 0]) # N x P else: raise NotImplementedError # pragma: no cover
tensorflow.shape
11,914
import tensorflow as tf def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu, optimizer) output_spec = contrib_tpu.TPUEstimatorSpec( mode=mode, loss=total_loss,
tensorflow.logging.info
11,915
from tensorflow.python.framework import ops with ops.control_dependencies([maybe_reallocate_op]): append_values_op = array[size:new_size].assign(batch_values) with ops.control_dependencies([append_values_op]): update_op = size.assign(new_size) if metrics_collections: ops.add_to_collections(metrics_collections, value) if updates_collections: ops.add_to_collections(updates_collections, update_op) return value, update_op
tensorflow.python.framework.ops.add_to_collections
11,916
import tensorflow as tf inputs=embed_inputs, dtype=tf.float32, sequence_length=self.seq_len, ) ## (batch_size, seq_len, num_hidden) # rnn_outputs = tf.transpose(rnn_outputs, perm=[1,0,2]) ## (seq_len, batch_size, num_hidden) NOT NEEDED ANY MORE last_outputs = self.last_relevant(rnn_outputs, self.seq_len) ## (batch_size, num_hidden) with tf.variable_scope('output', reuse=forward_only): with tf.variable_scope('softmax'): W = tf.get_variable('W', [self.num_hidden, self.num_classes], # initializer=tf.random_uniform_initializer(-0.003, 0.003)) initializer=tf.contrib.layers.xavier_initializer()) # initializer=tf.truncated_normal_initializer(stddev=0.1)) b = tf.get_variable('b', [self.num_classes], initializer=tf.constant_initializer(0.1)) logits = tf.matmul(last_outputs, W) + b
tensorflow.variable_scope
11,917
import tensorflow as tf "src_lang": tf.TensorShape([None, ]), "trg_lang": tf.TensorShape([None, ])}
tensorflow.TensorShape
11,918
import tensorflow as tf dc_g_loss_fake = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(d_g_fake), logits=d_g_fake)) dc_g_loss = dc_g_loss_fake + dc_g_loss_real # Categorical Discrimminator Loss dc_c_loss_real = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(d_c_real), logits=d_c_real)) dc_c_loss_fake = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(d_c_fake), logits=d_c_fake)) dc_c_loss = dc_c_loss_fake + dc_c_loss_real # Generator loss
tensorflow.ones_like
11,919
import tensorflow as tf precise_logits = tf.cast(logits, tf.float32) if ( logits.dtype == tf.float16) else logits onehot_labels = tf.cast(onehot_labels, precise_logits.dtype) predictions = tf.nn.sigmoid(logits) predictions_pt = tf.where(tf.equal(onehot_labels, 1), predictions, 1.-predictions) # add small value to avoid 0 epsilon = 1e-8 alpha_t = tf.scalar_mul(alpha, tf.ones_like(onehot_labels, dtype=tf.float32)) alpha_t = tf.where(tf.equal(onehot_labels, 1.0), alpha_t, 1-alpha_t) losses = tf.reduce_sum(-alpha_t * tf.pow(1. - predictions_pt, gamma) * tf.log(predictions_pt+epsilon), name=name, axis=1) return losses def Evaluate(sess): test_acc = 0.0 test_loss = 0.0 for it in range(test_iteration):
tensorflow.log
11,920
import tensorflow as tf def resnet_block(x, block_name='ResBlock', channel_nr=64, scale = 1, pad='SAME'): tmp = conv3d(x, kernel_size=3, filters=channel_nr, padding=pad, activation=None, use_bias=False, initialization=None) tmp = tf.keras.layers.LeakyReLU(alpha=0.2)(tmp) tmp = conv3d(tmp, kernel_size=3, filters=channel_nr, padding=pad, activation=None, use_bias=False, initialization=None)
tensorflow.keras.layers.LeakyReLU
11,921
import tensorflow as tf h_conv1 = self.conv2d('h_conv1', s, W_conv1, 4, b_conv1) # 构造第一个卷积层输出为conv1 h_pool1 = self.max_pool_2x2('h_pool1', h_conv1) h_conv2 = self.conv2d('h_conv2', h_pool1, W_conv2, 2, b_conv2) h_conv3 = self.conv2d('h_conv3', h_conv2, W_conv3, 1, b_conv3) h_conv3_flat = tf.reshape(h_conv3, [-1, 1600], 'h_conv3_flat') h_fc1 = tf.nn.relu(tf.add(tf.matmul(h_conv3_flat, W_fc1), b_fc1, 'h_fc1')) readout = tf.add(tf.matmul(h_fc1, W_fc2), b_fc2, 'h_fc2') return s, readout, h_fc1 def creat_optimizer(self,readout): action = tf.placeholder(tf.float32,[None,self.ACTIONS])
tensorflow.matmul
11,922
import tensorflow as tf step_size = tf.cast(step_size, dtype=tf.float32) max_lr = tf.cast(max_lr, dtype=tf.float32) if mode == 'tri': periodic_comp = tf.mod((global_step + step_size / 4) / step_size, 1) first_factor = tf.abs(periodic_comp - 0.5) second_factor = 2 * (max_lr - learning_rate) second_comp = learning_rate
tensorflow.mod
11,923
import tensorflow as tf transform_input_data, model_preprocess_fn=model_preprocess_fn, image_resizer_fn=image_resizer_fn, num_classes=num_classes, data_augmentation_fn=None) decoder = tf_example_decoder.TfExampleDecoder( load_instance_masks=False, num_additional_channels=predict_input_config.num_additional_channels) input_dict = transform_fn(decoder.decode(example)) images = tf.cast(input_dict[fields.InputDataFields.image], dtype=tf.float32) images = tf.expand_dims(images, axis=0) true_image_shape = tf.expand_dims( input_dict[fields.InputDataFields.true_image_shape], axis=0) return tf.estimator.export.ServingInputReceiver( features={ fields.InputDataFields.image: images, fields.InputDataFields.true_image_shape: true_image_shape},
tensorflow.cast
11,924
import tensorflow as tf input_tensor, units=bert_config.hidden_size, activation=modeling.get_activation(bert_config.hidden_act), kernel_initializer=modeling.create_initializer( bert_config.initializer_range)) input_tensor = modeling.layer_norm(input_tensor) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. output_bias = tf.get_variable( "output_bias", shape=[bert_config.vocab_size], initializer=tf.zeros_initializer()) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) label_ids = tf.reshape(label_ids, [-1]) label_weights = tf.reshape(label_weights, [-1]) one_hot_labels = tf.one_hot( label_ids, depth=bert_config.vocab_size, dtype=tf.float32) # The `positions` tensor might be zero-padded (if the sequence is too # short to have the maximum number of predictions). The `label_weights` # tensor has a value of 1.0 for every real prediction and 0.0 for the # padding predictions. per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1]) numerator = tf.reduce_sum(label_weights * per_example_loss) denominator = tf.reduce_sum(label_weights) + 1e-5
tensorflow.nn.log_softmax
11,925
import tensorflow as tf require_init = tf.reduce_any(tf.is_nan(self.g)) init_ops = tf.cond(require_init,_init,lambda : [self.g,self.b]) with tf.control_dependencies(init_ops): w = tf.expand_dims(self.g,axis=0) * tf.nn.l2_normalize(self.v,axis=0) return tf.matmul(input_var,w)+self.b def get_variables(self): #TODO: self.v should be l2-normalized or not? / currently not. return {'v':self.v,'b':self.b,'g':self.g} class SymPadConv2d(object): #Resize and Convolution(upsacle by 2) def __init__(self,name,input_dim,output_dim, k_h=3,k_w=3,stddev=0.02) : assert k_h%2==1 and k_w%2==1, 'kernel size should be odd numbers to ensure exact size' with tf.variable_scope(name) : self.w = tf.get_variable('w', [k_h, k_w, input_dim, output_dim], initializer=tf.random_normal_initializer(stddev=stddev)) self.b = tf.get_variable('b',[output_dim], initializer=tf.constant_initializer(0.0)) self.padding = [ [0,0],[k_h//2,k_h//2],[k_w//2,k_w//2],[0,0] ] def __call__(self,input_var,name=None,**kwargs): _,h,w,c = input_var.shape.as_list() _t = tf.image.resize_nearest_neighbor(input_var, [h*2, w*2]) _t = tf.pad(_t,self.padding, mode='SYMMETRIC') return tf.nn.bias_add( tf.nn.conv2d(_t, self.w, data_format='NHWC', #we can't use cudnn due to resize method... strides=[1,1,1,1], padding="VALID"),
tensorflow.variable_scope
11,926
import tensorflow as tf dec_op, _ = seq2seq(enc_inp, dec_inp, feed_previous=feed_previous) net_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope_name) optimizer = tf.train.AdamOptimizer(0.03, epsilon=1e-5) update_op = optimizer.minimize( tf.nn.seq2seq.sequence_loss(dec_op, targets, weights),
tensorflow.train.AdamOptimizer
11,927
import tensorflow as tf outer = tf.matmul(tf.expand_dims(tf.nn.softmax(logits1), axis=2), tf.expand_dims(tf.nn.softmax(logits2), axis=1)) outer = tf.matrix_band_part(outer, 0, config.ans_limit) self.yp1 = tf.argmax(tf.reduce_max(outer, axis=2), axis=1)
tensorflow.matrix_band_part
11,928
import tensorflow as tf @tfe.run_all_tests_in_graph_and_eager_modes class ParetoTest(test_case.TestCase): def _scipy_pareto(self, concentration, scale): # In scipy pareto is defined with scale = 1, so we need to scale. return stats.pareto(concentration, scale=scale) def testParetoShape(self): scale = tf.constant([2.] * 5) concentration = tf.constant([2.] * 5) pareto = tfd.Pareto(concentration, scale) self.assertEqual(self.evaluate(pareto.batch_shape_tensor()), (5,)) self.assertEqual(pareto.batch_shape, tf.TensorShape([5])) self.assertAllEqual(self.evaluate(pareto.event_shape_tensor()), []) self.assertEqual(pareto.event_shape, tf.TensorShape([])) def testParetoShapeBroadcast(self): scale = tf.constant([[3., 2.]]) concentration = tf.constant([[4.], [5.], [6.]]) pareto = tfd.Pareto(concentration, scale) self.assertAllEqual(self.evaluate(pareto.batch_shape_tensor()), (3, 2)) self.assertAllEqual(pareto.batch_shape, tf.TensorShape([3, 2])) self.assertAllEqual(self.evaluate(pareto.event_shape_tensor()), []) self.assertEqual(pareto.event_shape, tf.TensorShape([]))
tensorflow.TensorShape
11,929
import tensorflow as tf Computes the offset array used to upsample indices with TensorFlow. :param shape: [list] Window shape. """ center = [(ss - 1) // 2 for ss in shape] axes = [tf.range(-cc, ss - cc, dtype=tf.int32) for cc, ss in zip(center, shape)] # Broadcast and match dimension. if len(shape) > 1: for jj in range(len(shape)): for ii in range(len(shape) + 1): if ii != jj: axes[jj] = tf.expand_dims(axes[jj], ii) for jj in range(len(shape)): shape_ = [ss for ss in shape] + [1] shape_[jj] = 1 axes[jj] = tf.tile(axes[jj], shape_) offset = tf.concat(axes, len(shape)) return offset def _get_offset_array(shape): """
tensorflow.expand_dims
11,930
import tensorflow as tf # with `layer_input`. with tf.variable_scope("output"): attention_output = dense_layer_3d_proj( attention_output, hidden_size, attention_head_size, create_initializer(initializer_range), None, name="dense") attention_output = dropout(attention_output, hidden_dropout_prob) attention_output = layer_norm(attention_output + layer_input) with tf.variable_scope("ffn_1"): with tf.variable_scope("intermediate"): intermediate_output = dense_layer_2d( attention_output, intermediate_size, create_initializer(initializer_range), intermediate_act_fn, num_attention_heads=num_attention_heads, name="dense") with tf.variable_scope("output"): ffn_output = dense_layer_2d(
tensorflow.variable_scope
11,931
from tensorflow.contrib.tpu.python.tpu.datasets import StreamingFilesDataset if epochs_between_evals > 1: raise ValueError("epochs_between_evals > 1 not supported for file " "based dataset.") epoch_data_dir = self._result_queue.get(timeout=300) if not self._is_training: self._result_queue.put(epoch_data_dir) # Eval data is reused. file_pattern = os.path.join( epoch_data_dir, rconst.SHARD_TEMPLATE.format("*")) dataset = StreamingFilesDataset( files=file_pattern, worker_job="worker", num_parallel_reads=rconst.NUM_FILE_SHARDS, num_epochs=1, sloppy=not self._deterministic) map_fn = functools.partial(self._deserialize, batch_size=batch_size) dataset = dataset.map(map_fn, num_parallel_calls=16) else: types = {movielens.USER_COLUMN: rconst.USER_DTYPE,
tensorflow.contrib.tpu.python.tpu.datasets.StreamingFilesDataset
11,932
import tensorflow as tf for ns in [d, h, w]: x = tf.split(x, ns, 1) x = tf.concat([tf.squeeze(v, 1) for v in x], 3) return tf.reshape(x, (batch_size, d*r, h*r, w*r, 1)) def subpixel_conv3d(x, r, out_channels): x = tf.split(x, out_channels, 4) x = tf.concat([phase_shift_3d(v, r) for v in x], 4) return x def pixel_shuffler_3d(x, r, k, out_channels, name): in_channels = x.get_shape.as_list()[4]
tensorflow.split
11,933
import tensorflow as tf b6 = utils.bias_variable([4096], name="b6") conv6 = utils.conv2d_basic(pool5, W6, b6) relu6 = tf.nn.relu(conv6, name="relu6") if FLAGS.debug: utils.add_activation_summary(relu6) relu_dropout6 = tf.nn.dropout(relu6, keep_prob=keep_prob) W7 = utils.weight_variable([1, 1, 4096, 4096], name="W7") b7 = utils.bias_variable([4096], name="b7") conv7 = utils.conv2d_basic(relu_dropout6, W7, b7) relu7 = tf.nn.relu(conv7, name="relu7") if FLAGS.debug: utils.add_activation_summary(relu7) relu_dropout7 = tf.nn.dropout(relu7, keep_prob=keep_prob) W8 = utils.weight_variable([1, 1, 4096, NUM_OF_CLASSESS], name="W8") b8 = utils.bias_variable([NUM_OF_CLASSESS], name="b8") conv8 = utils.conv2d_basic(relu_dropout7, W8, b8) # annotation_pred1 = tf.argmax(conv8, dimension=3, name="prediction1")
tensorflow.nn.relu
11,934
import tensorflow as tf if encoder.position_bias and input_length is not None and time is not None: src_pos = tf.tile(tf.expand_dims(tf.range(time_steps), axis=0), [batch_size, 1]) trg_pos = tf.tile(tf.reshape(time, [1, 1]), [batch_size, time_steps]) src_len = tf.tile(tf.expand_dims(input_length, axis=1), [1, time_steps]) # - 1 pos_feats = tf.to_float(tf.stack([src_pos, trg_pos, src_len], axis=2)) pos_feats = tf.log(1 + pos_feats) y += dense(pos_feats, encoder.attn_size, use_bias=False, name='P_a')
tensorflow.stack
11,935
import tensorflow as tf wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale)) gh = tf.get_variable("gh", [nh*4], initializer=tf.constant_initializer(1.0)) bh = tf.get_variable("bh", [nh*4], initializer=tf.constant_initializer(0.0)) b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0)) gc = tf.get_variable("gc", [nh], initializer=tf.constant_initializer(1.0)) bc = tf.get_variable("bc", [nh], initializer=tf.constant_initializer(0.0)) c, h = tf.split(axis=1, num_or_size_splits=2, value=s) for idx, (x, m) in enumerate(zip(xs, ms)): c = c*(1-m) h = h*(1-m) z = _ln(tf.matmul(x, wx), gx, bx) + _ln(tf.matmul(h, wh), gh, bh) + b i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z) i = tf.nn.sigmoid(i) f = tf.nn.sigmoid(f) o = tf.nn.sigmoid(o) u = tf.tanh(u) c = f*c + i*u h = o*tf.tanh(_ln(c, gc, bc)) xs[idx] = h s = tf.concat(axis=1, values=[c, h]) return xs, s def conv_to_fc(x):
tensorflow.matmul
11,936
from tensorflow.python.framework import constant_op features = { 'language': sparse_tensor.SparseTensor( values=('en', 'fr', 'zh'), indices=((0, 0), (0, 1), (2, 0)), dense_shape=(3, 2)) } labels = constant_op.constant(((1,), (0,), (0,))) return features, labels # The given hash_bucket_size results in variables larger than the # default min_slice_size attribute, so the variables are partitioned. sparse_feature = feature_column.sparse_column_with_hash_bucket( 'language', hash_bucket_size=2e7)
tensorflow.python.framework.constant_op.constant
11,937
import tensorflow as tf def _to_chars(line): """string scalar -> Dataset of characters (string scalars).""" chars, = tf.py_func(_split_string, [line + "\n"], [tf.string]) chars.set_shape([None]) return tf.data.Dataset.from_tensor_slices(chars) return (tf.data.TextLineDataset([filename]) .flat_map(_to_chars) .repeat() .batch(tf.to_int64(sequence_size)) .shuffle(1000) .batch(tf.to_int64(batch_size)))
tensorflow.to_int64
11,938
import tensorflow as tf layer_c2 = tf.layers.dense(layer_c1, 256, tf.nn.relu, kernel_regularizer=reg) lstm_c = tf.nn.rnn_cell.LSTMCell(num_units=256)
tensorflow.nn.rnn_cell.LSTMCell
11,939
import tensorflow as tf def contra_step_lossV5(pred, tgt, resample=1): # p = tf.print('begin loss v5', [resample, pred.shape,tgt.shape]) # with tf.control_dependencies([p]): pred_flat = tf.reshape(pred, [-1]) tgt_flat = tf.reshape(tgt, [-1]) batch = tf.stack([pred_flat, tgt_flat], 1) num_sam = tools.shape(batch)[0] index = tf.range(num_sam) divider = tf.constant(resample, dtype=tf.float32) def sample_compute(cur_loss, i): batch1 = tf.gather(batch, tf.random.shuffle(index)) batch2 = tf.gather(batch, tf.random.shuffle(index)) pred1 = tf.slice(batch1, [0, 0], [num_sam, 1]) pred2 = tf.slice(batch2, [0, 0], [num_sam, 1]) tgt1 = tf.slice(batch1, [0, 1], [num_sam, 1]) tgt2 = tf.slice(batch2, [0, 1], [num_sam, 1]) loss = cur_loss + compute_contra_loss(pred1, pred2, tgt1, tgt2) print(loss) return (loss, i + 1) # def sample_compute(i): # batch1 = tf.gather(batch, tf.random.shuffle(index)) # batch2 = tf.gather(batch, tf.random.shuffle(index))
tensorflow.random.shuffle
11,940
import tensorflow as tf if mode == tf.estimator.ModeKeys.TRAIN: train_op, update_learning_rate = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu, adjust_lr, use_hvd, use_compression, use_fp16, clip, cos_decay, use_lamb, previous_train_steps, post_train_steps) logging_hook = tf.train.LoggingTensorHook({"loss": total_loss, "learning_rate": update_learning_rate}, every_n_iter=FLAGS.hooking_frequence) output_spec = tf.estimator.EstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, training_hooks=[logging_hook]) elif mode == tf.estimator.ModeKeys.EVAL:
tensorflow.estimator.EstimatorSpec
11,941
import tensorflow as tf value_dtype=tf.int64, default_value=-1) vx_keys = tf.reshape(tf.Variable([], collections=[], dtype=tf.string), (-1, 1)) vz_keys = tf.reshape(tf.Variable([], collections=[], dtype=tf.string), (-1, 1)) x_t = tf.gather(x, l) x_t_len = tf.strings.length(x_t) x_t = tf.string_split([x_t], delimiter='').values z_t = tf.gather(y, m) z_t_len = tf.strings.length(z_t) z_t = tf.string_split([z_t], delimiter='').values for i in tf.range(start=0, limit=x_t_len - self._p + 1, delta=1, dtype=None, name='range'): u = tf.string_join(x_t[i:i + self._p], '') vx_keys, r = tf.cond( tf.greater(vx.lookup(u), -1), true_fn=lambda: (vx_keys, tf.add(vx.lookup(u), 1)), false_fn=lambda: (tf.concat([vx_keys, tf.reshape(u, (-1, 1))], axis=0), tf.constant(1, dtype=tf.int64, name='constant')) ) vx.insert(u, r)
tensorflow.string_split
11,942
import tensorflow as tf name: the name of the tf summary. """ with tf.name_scope(name): private_samples -= tf.reduce_mean(private_samples, 0) shared_samples -= tf.reduce_mean(shared_samples, 0)
tensorflow.reduce_mean
11,943
import tensorflow as tf ignored_matches: a bool tensor of shape of [batch, N], representing whether each box is an ignored matches or not. An ignored matches is the match that is neither positive or negative. """ matched_gt_boxes, matched_gt_classes, matched_gt_indices, matched_iou, _ = ( box_ops.box_matching(boxes, gt_boxes, gt_classes)) positive_matches = tf.greater( matched_iou, self._config_dict['foreground_iou_threshold']) negative_matches = tf.logical_and( tf.greater_equal( matched_iou, self._config_dict['background_iou_low_threshold']), tf.less( matched_iou, self._config_dict['background_iou_high_threshold']))
tensorflow.greater
11,944
import tensorflow as tf class AssignOpTest(tf.test.TestCase): def _initAssignFetch(self, x, y, use_gpu=False): """Initialize a param to init and update it with y.""" super(AssignOpTest, self).setUp() with self.test_session(use_gpu=use_gpu): p = tf.Variable(x) assign = tf.assign(p, y) p.initializer.run() new_value = assign.eval() return p.eval(), new_value def _initAssignAddFetch(self, x, y, use_gpu=False): """Initialize a param to init, and compute param += y.""" with self.test_session(use_gpu=use_gpu): p = tf.Variable(x)
tensorflow.assign
11,945
import tensorflow as tf [0, 0]], dtype=tf.float32) mask5 = tf.constant([[1, 0], [1, 0]], dtype=tf.float32) masks1 = tf.stack([mask0, mask1, mask2]) masks2 = tf.stack([mask3, mask4, mask5]) ious = isu.get_pairwise_iou_matrix(masks1, masks2) expected_ious = tf.constant([[0.5, 0.0, 1.0/3.0], [0.75, 0.0, 0.25], [0.75, 0.0, 2.0/3.0]], dtype=tf.float32) self.assertAllClose(ious.numpy(), expected_ious.numpy())
tensorflow.constant
11,946
import tensorflow as tf hidden_size = output_layer.shape[-1].value output_weights = tf.get_variable( "output_weights", [num_labels, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.matmul(output_layer, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) if task_name != "sts-b": probabilities = tf.nn.softmax(logits, axis=-1) predictions = tf.argmax(probabilities, axis=-1, output_type=tf.int32) log_probs = tf.nn.log_softmax(logits, axis=-1) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) else:
tensorflow.nn.dropout
11,947
import tensorflow as tf one_hot_labels = tf.one_hot( label_ids, depth=bert_config.vocab_size, dtype=tf.float32) # The `positions` tensor might be zero-padded (if the sequence is too # short to have the maximum number of predictions). The `label_weights` # tensor has a value of 1.0 for every real prediction and 0.0 for the # padding predictions. per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1]) numerator = tf.reduce_sum(label_weights * per_example_loss) denominator = tf.reduce_sum(label_weights) + 1e-5 loss = numerator / denominator return (loss, per_example_loss, log_probs) def get_next_sentence_output(bert_config, input_tensor, labels): """Get loss and log probs for the next sentence prediction.""" # Simple binary classification. Note that 0 is "next sentence" and 1 is
tensorflow.reduce_sum
11,948
import tensorflow as tf # Traditional U-Net def build_Unet_Arch(self, input_data, name="Unet_Arch"): self.base_number_of_features = 32 with tf.variable_scope(name): # Encoder definition o_c1 = self.general_conv2d(input_data, self.base_number_of_features, 3, stride = 1, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_conv2d_1') o_mp1 = tf.layers.max_pooling2d(o_c1, 2, 2, name = name + '_maxpooling_1') o_c2 = self.general_conv2d(o_mp1, self.base_number_of_features * 2, 3, stride = 1, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_conv2d_2') o_mp2 = tf.layers.max_pooling2d(o_c2, 2, 2, name = name + '_maxpooling_2') o_c3 = self.general_conv2d(o_mp2, self.base_number_of_features * 4, 3, stride = 1, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_conv2d_3') o_mp3 = tf.layers.max_pooling2d(o_c3, 2, 2, name = name + '_maxpooling_3') o_c4 = self.general_conv2d(o_mp3, self.base_number_of_features * 8, 3, stride = 1, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_conv2d_4') o_mp4 = tf.layers.max_pooling2d(o_c4, 2, 2, name = name + '_maxpooling_4') o_c5 = self.general_conv2d(o_mp4, self.base_number_of_features * 16, 3, stride = 1, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_conv2d_5') # Decoder definition o_d1 = self.general_deconv2d(o_c5, self.base_number_of_features * 8, 3, stride = 2, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_deconv2d_1') o_me1 = tf.concat([o_d1, o_c4], 3) # Skip connection o_d2 = self.general_deconv2d(o_me1, self.base_number_of_features * 4, 3, stride = 2, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_deconv2d_2') o_me2 = tf.concat([o_d2, o_c3], 3) # Skip connection o_d3 = self.general_deconv2d(o_me2, self.base_number_of_features * 2, 3, stride = 2, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_deconv2d_3') o_me3 = tf.concat([o_d3, o_c2], 3) # Skip connection
tensorflow.layers.max_pooling2d
11,949
import tensorflow as tf start = time() self.sess.run([self.pi_new_params, self.vf_new_params, self.data_iter.initializer], feed_dict={self.state: s, self.actions: a, self.rewards: r, self.advantage: adv}) while True: try: summary, step, _ = self.sess.run([self.summarise, self.global_step, self.train_op]) except tf.errors.OutOfRangeError: break print('\rTrained in %.3fs. Global step %i' % (time() - start, step+1)) return summary class PPO_HC(PPO): def build_anet(self, state_in, name, reuse=False): reg = tf.contrib.layers.l2_regularizer(1e-3) with tf.variable_scope(name, reuse=reuse): layer_a1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg) layer_a2 = tf.layers.dense(layer_a1, 256, tf.nn.relu, kernel_regularizer=reg) mu = tf.layers.dense(layer_a2, self.a_dim, tf.nn.tanh, kernel_regularizer=reg) sigma = tf.layers.dense(layer_a2, self.a_dim, tf.nn.softplus, kernel_regularizer=reg) # sigma = tf.get_variable(name='pi_sigma', shape=self.a_dim, initializer=tf.constant_initializer(0.5)) sigma = tf.clip_by_value(sigma, 0.0, 1.0) norm_dist = tf.distributions.Normal(loc=mu * self.a_bound, scale=sigma) params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name) return norm_dist, params class PPO_LSTM(Base):
tensorflow.contrib.layers.l2_regularizer
11,950
import tensorflow as tf if os.path.exists(test_dir): shutil.rmtree(test_dir) gfile.MakeDirs(test_dir) return test_dir def testAddCollectionDef(self): test_dir = self._TestDir("good_collection") filename = os.path.join(test_dir, "metafile") with self.test_session(): # Creates a graph. v0 = tf.Variable(10.0, name="v0") var = tf.Variable(tf.constant(0, dtype=tf.int64)) count_up_to = var.count_up_to(3) input_queue = tf.FIFOQueue(30, tf.float32, shared_name="collection_queue") qr = tf.train.QueueRunner(input_queue, [count_up_to]) tf.initialize_all_variables() # Creates a saver. save = tf.train.Saver({"v0": v0}) # Adds a set of collections. tf.add_to_collection("int_collection", 3) tf.add_to_collection("float_collection", 3.5) tf.add_to_collection("string_collection", "hello") tf.add_to_collection("variable_collection", v0) # Add QueueRunners. tf.train.add_queue_runner(qr) # Adds user_defined proto in three formats: string, bytes and Any.
tensorflow.FIFOQueue
11,951
import tensorflow as tf initializer=tf.random_normal_initializer()) alpha_logstd = tf.get_variable('alpha_logstd_layer'+str(h), shape=[1, 1, n_basis, n_out], initializer=tf.random_normal_initializer()) alpha_std = tf.exp(alpha_logstd) # Compute epsilon from {n_samples} standard Gaussian # epsilon = tf.random_normal([n_samples, 1, n_out*2, n_out]) epsilon = tf.random_uniform([n_samples, 1, n_basis, n_out]) hyp_params = tf.get_variable('hyp_params_layer'+str(h), shape=[2], initializer=tf.random_normal_initializer()) l1, l2 = tf.nn.sigmoid(hyp_params[0]), tf.exp(hyp_params[1]) epsilon = tf.sinh(epsilon*l2)/tf.cosh(epsilon*l2)**l1/l2 # Compute A_{h+1} A = tf.tile(alpha_mean+epsilon*alpha_std, [1, tf.shape(X)[0], 1, 1]) # Compute z_{h}A_{h+1} Z1 = tf.matmul(Z, A[:,:,:n_basis//2,:])/tf.sqrt(n_basis*.5) Z2 = tf.matmul(Z, A[:,:,n_basis//2:,:])/tf.sqrt(n_basis*.5) # Compute u_{h+1} and v_{h+1} U, V = tf.cos(Z1)+tf.cos(Z2), tf.sin(Z1)+tf.sin(Z2) Z = tf.concat([U, V], 3)/tf.sqrt(n_out*1.) KL += tf.reduce_mean(alpha_std**2+alpha_mean**2-2*alpha_logstd-1)/2. # Output layer else: F = tf.squeeze(tf.layers.dense(Z, n_out), [2]) return F, KL
tensorflow.shape
11,952
import tensorflow as tf # Axis 0 - Batch. # Axis 1 - Input Frames, 4 frames. # Axis 2, 3 - Height & Width. # Axis 4 - Channels RGB, 3 colours. x = tf.transpose(observations, [0, 2, 3, 1, 4]) x_shape = common_layers.shape_list(x) x = tf.reshape(x, x_shape[:-2] + [-1]) dropout = getattr(self.hparams, "dropout_ppo", 0.0)
tensorflow.transpose
11,953
import tensorflow as tf with tf.variable_scope('mixed'): #add fake pc as a rotation class num_to_add = int(max(self.batch_size/self.num_angles, 1)) idx = tf.range(0, self.batch_size, 1) idx = tf.random_shuffle(idx)[0:num_to_add] self.fake_to_add = tf.gather(self.generator_out, idx) self.mixed_pc = tf.concat([self.real_pc_rotated, self.fake_to_add], 0) self.mixed_label = tf.concat([self.rot_label_pl, tf.constant(self.num_angles, shape = (num_to_add,))], axis = 0) mixed_idx = tf.range(0, self.mixed_label.get_shape().as_list()[0], 1) mixed_idx = tf.random_shuffle(mixed_idx)[0:self.batch_size] self.mixed_pc = tf.gather(self.mixed_pc, mixed_idx) self.mixed_label = tf.gather(self.mixed_label, mixed_idx) self.mixed_pred, mixed_end_points = self.get_pred(self.mixed_pc)
tensorflow.constant
11,954
import tensorflow as tf # Downloads data at url into data_dir/basename(url). The dataset has a header # row (color_name, r, g, b) followed by comma-separated lines. path = maybe_download(os.path.basename(url), data_dir, url) # This chain of commands loads our data by: # 1. skipping the header; (.skip(1)) # 2. parsing the subsequent lines; (.map(parse)) # 3. shuffling the data; (.shuffle(...)) # 3. grouping the data into padded batches (.padded_batch(...)). dataset = tf.data.TextLineDataset(path).skip(1).map(parse).shuffle( buffer_size=10000).padded_batch( batch_size, padded_shapes=([None], [None, None], [])) return dataset # pylint: disable=not-callable class RNNColorbot(tf.keras.Model): """Multi-layer (LSTM) RNN that regresses on real-valued vector labels.
tensorflow.data.TextLineDataset
11,955
import tensorflow as tf c_init = np.zeros((1, lstm_cell.state_size.c), np.float32) h_init = np.zeros((1, lstm_cell.state_size.h), np.float32) state_init = [c_init, h_init] c_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.c]) h_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.h]) state_in = (c_in, h_in) rnn_in = tf.expand_dims(self.h3, [0]) step_size = tf.shape(inputs)[:1] state_in = tf.nn.rnn_cell.LSTMStateTuple(c_in, h_in) lstm_outputs, lstm_state = tf.nn.dynamic_rnn( lstm_cell, rnn_in, initial_state=state_in, sequence_length=step_size, time_major=False)
tensorflow.expand_dims
11,956
import tensorflow as tf def print_examples_and_shapes(x): if np.random.uniform() < debug_print_examples_rate: tf.print( { 'inputs_shape': tf.size(x['inputs']), 'targets_shape': tf.size(x['targets']), 'inputs': x['inputs'], 'targets': x['targets'], }, output_stream=logging.info)
tensorflow.size
11,957
import tensorflow as tf else: return tf.no_op() else: return tf.assert_equal(shape_a[0], shape_b[0])
tensorflow.assert_equal
11,958
import tensorflow as tf # Remove episode indices. num_episodes = tf.count_nonzero( input_tensor=tf.gather(params=self.terminal_memory, indices=indices), axis=0, dtype=util.tf_dtype('int') ) num_episodes = tf.minimum(x=num_episodes, y=self.episode_count) assignment = tf.assign( ref=self.episode_indices[:self.episode_count - num_episodes], value=self.episode_indices[num_episodes: self.episode_count] ) # Decrement episode count.
tensorflow.minimum
11,959
import tensorflow as tf 'learning_rate_decay_factor', 0.96, 'Learning rate decay factor.') tf.app.flags.DEFINE_float( 'decay_steps', 1000, 'Number of epochs after which learning rate decays.') # for learning rate piecewise_constant decay tf.app.flags.DEFINE_string( 'decay_boundaries', '60000, 800000', 'Learning rate decay boundaries by global_step (comma-separated list).') tf.app.flags.DEFINE_string( 'lr_decay_factors', '1, 0.6, 0.1',
tensorflow.app.flags.DEFINE_string
11,960
from tensorflow.python.ops import math_ops def _prob(self, x): return math_ops.exp(self._log_prob(x)) def _log_cdf(self, x): return math_ops.log(self._cdf(x)) def _cdf(self, x): x = control_flow_ops.with_dependencies([check_ops.assert_positive(x)] if self.validate_args else [], x) # Note that igammac returns the upper regularized incomplete gamma # function Q(a, x), which is what we want for the CDF. return math_ops.igammac(self.alpha, self.beta / x) @distribution_util.AppendDocstring( """This is defined to be ``` entropy = alpha - log(beta) + log(Gamma(alpha)) + (1-alpha)digamma(alpha) ``` where digamma(alpha) is the digamma function.""")
tensorflow.python.ops.math_ops.igammac
11,961
import tensorflow as tf """ 检查所给路径是否已存在,如果存在删除原有日志。并创建日志写入对象 参数 ---- logPath :string,日志存储路径 返回 ---- summaryWriter :FileWriter,日志写入器 """ if tf.gfile.Exists(logPath): tf.gfile.DeleteRecursively(logPath) summaryWriter = tf.summary.FileWriter(logPath, graph=tf.get_default_graph()) return summaryWriter
tensorflow.gfile.Exists
11,962
import tensorflow as tf break output_line = "\t".join( str(class_probability) for class_probability in probabilities) + "\n" writer.write(output_line) num_written_lines += 1 assert num_written_lines == num_actual_predict_examples if FLAGS.do_serve: def serving_input_fn(): with tf.variable_scope("foo"): feature_spec = { "input_ids": tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), "input_mask": tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), "label_ids": tf.FixedLenFeature([], tf.int64), } serialized_tf_example = tf.placeholder(dtype=tf.string, shape=[None], name='input_example_tensor') receiver_tensors = {'examples': serialized_tf_example} features = tf.parse_example(serialized_tf_example, feature_spec) return tf.estimator.export.ServingInputReceiver(features, receiver_tensors) estimator._export_to_tpu = False # this is important
tensorflow.FixedLenFeature
11,963
import tensorflow as tf # FC layers to find next location loc_layer1 = Dense(units=loc_layer_size)(prev_loc) loc_layer2 = Dense(units=loc_layer_size)(loc_layer1) # Concatenationation of above layers, followed by FC layer concat = tf.concat([flat1b, loc_layer2],1) # goal_layer2 h1 = Dense(units=RNN_SIZE)(concat) h2 = Dense(units=RNN_SIZE)(h1) self.h3 = tf.nn.relu(h2+concat) #Recurrent network for temporal dependencies lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(RNN_SIZE,state_is_tuple=True) c_init = np.zeros((1, lstm_cell.state_size.c), np.float32) h_init = np.zeros((1, lstm_cell.state_size.h), np.float32) state_init = [c_init, h_init] c_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.c]) h_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.h]) state_in = (c_in, h_in) rnn_in = tf.expand_dims(self.h3, [0]) step_size = tf.shape(inputs)[:1] state_in = tf.nn.rnn_cell.LSTMStateTuple(c_in, h_in) lstm_outputs, lstm_state = tf.nn.dynamic_rnn( lstm_cell, rnn_in, initial_state=state_in, sequence_length=step_size,
tensorflow.nn.rnn_cell.BasicLSTMCell
11,964
from tensorflow.python.framework import ops Args: x: A `Tensor` or `SparseTensor`. dtype: The destination type. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor` with same shape as `x`. Raises: TypeError: If `x` cannot be cast to the `dtype`. """ with ops.op_scope([x], name, "Cast") as name: if isinstance(x, ops.SparseTensor): values_cast = cast(x.values, dtype, name=name) return ops.SparseTensor(x.indices, values_cast, x.shape) else: # TODO(touts): Handle what Josh said. # # Could return ops.convert_to_tensor(x, dtype=dtype, ...) here, but that # allows some conversions that cast() can't do, e.g. casting numbers to # strings. x = ops.convert_to_tensor(x, name="x") if x.dtype.base_dtype == dtype: return x return gen_math_ops.cast(x, dtype, name=name) def to_float(x, name="ToFloat"):
tensorflow.python.framework.ops.SparseTensor
11,965
import tensorflow as tf def conv_to_fc(x): nh = np.prod([v.value for v in x.get_shape()[1:]]) x = tf.reshape(x, [-1, nh]) return x def discount_with_dones(rewards, dones, gamma): discounted = [] r = 0 for reward, done in zip(rewards[::-1], dones[::-1]): r = reward + gamma*r*(1.-done) # fixed off by one bug discounted.append(r) return discounted[::-1] def find_trainable_variables(key): return tf.trainable_variables(key) def make_path(f): return os.makedirs(f, exist_ok=True) def constant(p): return 1 def linear(p): return 1-p def middle_drop(p): eps = 0.75 if 1-p<eps:
tensorflow.trainable_variables
11,966
from tensorflow.python.ops import common_shapes @ops.RegisterShape("MaxPoolWithArgmax") def _MaxPoolWithArgMaxShape(op): """Shape function for MaxPoolWithArgmax op.""" return common_shapes.max_pool_shape(op) * 2 @ops.RegisterShape("AvgPoolGrad") def _AvgPoolGradShape(op):
tensorflow.python.ops.common_shapes.max_pool_shape
11,967
import tensorflow as tf (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer(
tensorflow.logging.info
11,968
from tensorflow.python.ops import init_ops padding='SAME', data_format=None, rate=1, activation_fn=nn.relu, normalizer_fn=None, normalizer_params=None, weights_initializer=initializers.xavier_initializer(), weights_regularizer=None, biases_initializer=init_ops.zeros_initializer(), biases_regularizer=None, reuse=None, variables_collections=None, outputs_collections=None, trainable=True, use_spectral_norm=False, is_training=False,
tensorflow.python.ops.init_ops.zeros_initializer
11,969
import tensorflow as tf one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size) output = tf.matmul(one_hot_input_ids, embedding_table)
tensorflow.matmul
11,970
import tensorflow as tf degree_l = tf.convert_to_tensor(value=degree_l) order_m = tf.convert_to_tensor(value=order_m) x = tf.convert_to_tensor(value=x)
tensorflow.convert_to_tensor
11,971
import tensorflow as tf :description: Placeholders """ def _setup_placeholders(self): if self.demo: self.c = tf.placeholder(tf.int32, [None, self.config.max_p_len], "context") self.q = tf.placeholder(tf.int32, [None, self.config.max_q_len], "question") self.ch = tf.placeholder(tf.int32, [None, self.config.max_p_len, self.config.max_ch_len], "context_char") self.qh = tf.placeholder(tf.int32, [None, self.config.max_q_len, self.config.max_ch_len], "question_char") self.start_label = tf.placeholder(tf.int32, [None], "answer_label1") self.end_label = tf.placeholder(tf.int32, [None], "answer_label2") else: self.c = tf.placeholder(tf.int32, [self.config.batch_size * self.max_p_num, self.config.max_p_len], "context") self.q = tf.placeholder(tf.int32, [self.config.batch_size * self.max_p_num, self.config.max_q_len], "question") self.ch = tf.placeholder(tf.int32, [self.config.batch_size * self.max_p_num, self.config.max_p_len,
tensorflow.placeholder
11,972
from tensorflow.python.framework import tensor_util def _MaxPoolWithArgMaxShape(op): """Shape function for MaxPoolWithArgmax op.""" return common_shapes.max_pool_shape(op) * 2 @ops.RegisterShape("AvgPoolGrad") def _AvgPoolGradShape(op): """Shape function for the AvgPoolGrad op.""" orig_input_shape = tensor_util.constant_value(op.inputs[0]) if orig_input_shape is not None: return [tensor_shape.TensorShape(orig_input_shape.tolist())] else: # NOTE(mrry): We could in principle work out the shape from the # gradients and the attrs, but if we do not know orig_input_shape # statically, then we are unlikely to know the shape of the # gradients either.
tensorflow.python.framework.tensor_util.constant_value
11,973
from tensorflow.python.framework import tensor_util x = ops.convert_to_tensor(x, name="x") def slice_shape(start_sum, size, name): """Closure to slice out shape.""" start_sum = start_sum if start_sum else ( array_ops.zeros((), dtype=dtypes.int32, name="zero"),) if (x.get_shape().ndims is not None and self._is_all_constant_helper(size, *start_sum)): start = sum(tensor_util.constant_value(s) for s in start_sum) stop = start + tensor_util.constant_value(size) slice_ = x.get_shape()[start:stop].as_list() if all(s is not None for s in slice_): return ops.convert_to_tensor(slice_, dtype=dtypes.int32, name=name) # Fall-through intended. return array_ops.slice(array_ops.shape(x), (sum(start_sum),), (size,)) sample_ndims = self.get_sample_ndims(x, name=name) return (slice_shape((), sample_ndims,
tensorflow.python.framework.tensor_util.constant_value
11,974
import tensorflow as tf tf.random.uniform([]) < self._merge_prob) def _MergeOneToken(tokens, i): return tf.expand_dims( self._MergeTokens((tokens[i], tokens[i + 1])), axis=-1) def _MergeCandidates(tokens, candidates): """Merge in the reverse binary tree.""" best_id = tf.argmin(candidates, output_type=tf.int32) # Perform the merge at position best_id. tokens = tf.concat( [tokens[:best_id], [candidates[best_id]], tokens[best_id + 2:]], axis=0) # Recompute the merge candidates. # Only the neighbors of best_id need to be recomputed. empty = tf.zeros([0], dtype=candidates.dtype)
tensorflow.argmin
11,975
import tensorflow as tf for mtype, (classes, sdfs, poses) in enumerate([ (labeled_classes, labeled_sdfs, labeled_poses), (predicted_classes, predicted_sdfs, predicted_poses)]): for i in range(classes.shape[0]): if class_id == classes[i]: sdf = tf.expand_dims(sdfs[i], -1) sdf = sdf * -1.0 # inside positive, outside zero samples_object = centernet_utils.transform_pointcloud( tf.reshape(samples_world, [1, 1, -1, 3]), tf.reshape(poses[2][i], [1, 1, 3]), tf.reshape(poses[0][i], [1, 1, 3, 3]), tf.reshape(poses[1][i], [1, 1, 3]), inverse=True) * 2.0 samples_object = \ (samples_object * (29.0/32.0) / 2.0 + 0.5) * 32.0 - 0.5 samples = tf.squeeze(samples_object) interpolated = trilinear.interpolate(sdf, samples) sdf_values += tf.math.sign(tf.nn.relu(interpolated + self.tol)) status2 = False if status2:
tensorflow.reshape
11,976
import tensorflow as tf Returns: A list of tf.Summary values if hook_args.hparams contains the reference file and the translated file. """ decode_hparams = hook_args.decode_hparams if (decode_hparams.decode_reference is None or decode_hparams.decode_to_file is None): return None values = [] bleu = 100 * bleu_hook.bleu_wrapper( decode_hparams.decode_reference, decode_hparams.decode_to_file) values.append(tf.Summary.Value(tag="BLEU", simple_value=bleu)) tf.logging.info("%s: BLEU = %6.2f" % (decode_hparams.decode_to_file, bleu)) return values def _preprocess_sgm(line, is_sgm): """Preprocessing to strip tags in SGM files.""" if not is_sgm: return line # In SGM files, remove <srcset ...>, <p>, <doc ...> lines. if line.startswith("<srcset") or line.startswith("</srcset"): return "" if line.startswith("<doc") or line.startswith("</doc"): return "" if line.startswith("<p>") or line.startswith("</p>"):
tensorflow.logging.info
11,977
import tensorflow as tf context_emb = tf.concat(context_emb_list, 2) # [num_sentences, max_sentence_length, emb] head_emb = tf.concat(head_emb_list, 2) # [num_sentences, max_sentence_length, emb] context_emb = tf.nn.dropout(context_emb, self.lexical_dropout) # [num_sentences, max_sentence_length, emb] head_emb = tf.nn.dropout(head_emb, self.lexical_dropout) # [num_sentences, max_sentence_length, emb] text_len_mask = tf.sequence_mask(text_len, maxlen=max_sentence_length) # [num_sentence, max_sentence_length]
tensorflow.nn.dropout
11,978
import tensorflow as tf ) mu = 2 * tf.layers.dense(inputs=l1, units=action_dim, # number of hidden units activation=tf.nn.tanh, name='mu', trainable=trainable ) sigma = tf.layers.dense(inputs=l1, units=action_dim, # output units activation=tf.nn.softplus, # get action probabilities name='sigma', trainable=trainable ) norm_dist = tf.distributions.Normal(loc=mu, scale=sigma) params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name) return norm_dist, params def choose_action(self, s): # 決定下一步該怎麼做 # s = s[np.newaxis, :] s = s.reshape(-1, 84, 84, 3) a = self.sess.run(self.sample_op, {self.tfs: s})[0] return np.clip(a, ACTION_BOUND[0], ACTION_BOUND[1]) def get_v(self, s): if s.ndim < 4:
tensorflow.distributions.Normal
11,979
import tensorflow as tf else: value_for_priority = qf1_loss_col # Compute the entropy temperature loss # it is used when the entropy coefficient is learned ent_coef_loss, entropy_optimizer = None, None if not isinstance(self.ent_coef, float): ent_coef_loss = -tf.reduce_mean( self.log_ent_coef * tf.stop_gradient(logp_pi + self.target_entropy)*self.weight_ph) entropy_optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_ph) # Compute the policy loss # Alternative: policy_kl_loss = tf.reduce_mean(logp_pi - min_qf_pi) policy_kl_loss = tf.reduce_mean((self.ent_coef * logp_pi - min_qf_pi)*self.weight_ph) actor_for_priority = tf.reduce_mean(self.ent_coef * logp_pi - min_qf_pi,1) # NOTE: in the original implementation, they have an additional # regularization loss for the Gaussian parameters # this is not used for now # policy_loss = (policy_kl_loss + policy_regularization_loss) min_q = tf.minimum(dtm_qf1,dtm_qf2) Q_filter = tf.cast((qf1 > min_q)|(qf2 > min_q),tf.float32) #Q_filter_1 = tf.cast(qf1 > min_q,tf.float32) #Q_filter_2 = tf.cast(qf2 > min_q,tf.float32) im_loss1 = tf.square(self.actions_ph - self.deterministic_actions_ph)*Q_filter*self.is_demo_ph #im_loss2 = tf.square(self.actions_ph - self.deterministic_actions_ph)*Q_filter_2*self.is_demo_ph #actor_loss_di1 = tf.reduce_mean(im_loss1)
tensorflow.reduce_mean
11,980
import tensorflow as tf """Numerically more stable variance computation.""" if mean is None: mean = tf.reduce_mean(input_, axes) res = tf.square(input_ - mean)
tensorflow.reduce_mean
11,981
import tensorflow as tf self.y2 = tf.placeholder(tf.int32, [None, config.test_para_limit],"answer_index2") else: self.c, self.q, self.ch, self.qh, self.y1, self.y2, self.qa_id = batch.get_next() # self.word_unk = tf.get_variable("word_unk", shape = [config.glove_dim], initializer=initializer()) self.word_mat = tf.get_variable("word_mat", initializer=tf.constant( word_mat, dtype=tf.float32), trainable=False) self.char_mat = tf.get_variable( "char_mat", initializer=tf.constant(char_mat, dtype=tf.float32)) self.c_mask = tf.cast(self.c, tf.bool) self.q_mask = tf.cast(self.q, tf.bool) self.c_len = tf.reduce_sum(tf.cast(self.c_mask, tf.int32), axis=1) self.q_len = tf.reduce_sum(tf.cast(self.q_mask, tf.int32), axis=1) if opt:
tensorflow.constant
11,982
import tensorflow as tf with tf.variable_scope('scop'): self.w1=tf.get_variable('w1', [4096,1024],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.w2=tf.get_variable('w2', [1024,classnum],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.b1 = tf.get_variable('b1', [1024],initializer=tf.constant_initializer(0.0))
tensorflow.contrib.layers.xavier_initializer_conv2d
11,983
import tensorflow as tf with tf.variable_scope("deconv") as scope: s_h, s_w = self.output_height, self.output_width s_h2, s_h4, s_h8, s_h16 = \ int(s_h/2), int(s_h/4), int(s_h/8), int(s_h/16) s_w2, s_w4, s_w8, s_w16 = \ int(s_w/2), int(s_w/4), int(s_w/8), int(s_w/16) output_z_ = lrelu(linear(trans_z, self.gf_dim*8*s_h16*s_w16, 'd_h0_lin')) output_h0 = tf.reshape(output_z_, [-1, s_h16, s_w16, self.gf_dim * 8]) output_h1 = lrelu(deconv2d(tf.concat([output_h0, tgtctx_h3], 3), [self.batch_size, s_h8, s_w8, self.gf_dim*4], name='d_h1')) output_h2 = lrelu(deconv2d(tf.concat([output_h1, tgtctx_h2], 3), [self.batch_size, s_h4, s_w4, self.gf_dim*2], name='d_h2')) output_h3 = lrelu(deconv2d(tf.concat([output_h2, tgtctx_h1], 3), [self.batch_size, s_h2, s_w2, self.gf_dim*1], name='d_h3')) output_h4 = deconv2d(tf.concat([output_h3, tgtctx_h0], 3), [self.batch_size, s_h, s_w, self.c_dim], name='d_h4') scope.reuse_variables() truthoutput_z_ = lrelu(linear(tgtimg_z, self.gf_dim*8*s_h16*s_w16, 'd_h0_lin')) truthoutput_h0 = tf.reshape(truthoutput_z_, [-1, s_h16, s_w16, self.gf_dim * 8]) truthoutput_h1 = lrelu(deconv2d(tf.concat([truthoutput_h0, tgtctx_h3], 3), [self.batch_size, s_h8, s_w8, self.gf_dim*4], name='d_h1')) truthoutput_h2 = lrelu(deconv2d(tf.concat([truthoutput_h1, tgtctx_h2], 3), [self.batch_size, s_h4, s_w4, self.gf_dim*2], name='d_h2')) truthoutput_h3 = lrelu(deconv2d(tf.concat([truthoutput_h2, tgtctx_h1], 3), [self.batch_size, s_h2, s_w2, self.gf_dim*1], name='d_h3')) truthoutput_h4 = deconv2d(tf.concat([truthoutput_h3, tgtctx_h0], 3), [self.batch_size, s_h, s_w, self.c_dim], name='d_h4')
tensorflow.concat
11,984
import tensorflow as tf features = { "inputs": observations, "epoch": tf.constant(epoch + 1), "input_action": tf.zeros(obs_shape[:2] + [1], dtype=tf.int32), "input_reward": tf.zeros(obs_shape[:2] + [1], dtype=tf.int32), "targets": tf.zeros(obs_shape[:1] + [num_target_frames] + obs_shape[2:]), "target_action": tf.zeros( obs_shape[:1] + [num_target_frames, 1], dtype=tf.int32), "target_reward": tf.zeros( obs_shape[:1] + [num_target_frames, 1], dtype=tf.int32),
tensorflow.zeros
11,985
import tensorflow as tf flat_offsets = tf.reshape( tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1]) flat_positions = tf.reshape(positions + flat_offsets, [-1]) flat_sequence_tensor = tf.reshape(sequence_tensor, [batch_size * seq_length, width])
tensorflow.reshape
11,986
import tensorflow as tf assert(len(x.get_shape()) == 2) assert(len(idx.get_shape()) == 1) idx_flattened = tf.range(0, x.shape[0]) * x.shape[1] + idx y = tf.gather(tf.reshape(x, [-1]), # flatten input idx_flattened) # use flattened indices return y def check_shape(ts,shapes): i = 0 for (t,shape) in zip(ts,shapes): assert t.get_shape().as_list()==shape, "id " + str(i) + " shape " + str(t.get_shape()) + str(shape) i += 1 def avg_norm(t): return tf.reduce_mean(tf.sqrt(tf.reduce_sum(tf.square(t), axis=-1))) def gradient_add(g1, g2, param): print([g1, g2, param.name]) assert (not (g1 is None and g2 is None)), param.name if g1 is None: return g2 elif g2 is None: return g1 else: return g1 + g2 def q_explained_variance(qpred, q): _, vary = tf.nn.moments(q, axes=[0, 1])
tensorflow.square
11,987
import tensorflow as tf encoder_state = tf.concat(encoder_states, 1) return encoder_outputs, encoder_state, new_encoder_input_length def compute_energy(hidden, state, encoder, time=None, input_length=None, prev_weights=None, **kwargs): batch_size = tf.shape(hidden)[0] time_steps = tf.shape(hidden)[1] if encoder.attn_keep_prob is not None: state_noise_shape = [1, tf.shape(state)[1]] if encoder.pervasive_dropout else None state = tf.nn.dropout(state, keep_prob=encoder.attn_keep_prob, noise_shape=state_noise_shape) hidden_noise_shape = [1, 1, tf.shape(hidden)[2]] if encoder.pervasive_dropout else None hidden = tf.nn.dropout(hidden, keep_prob=encoder.attn_keep_prob, noise_shape=hidden_noise_shape)
tensorflow.shape
11,988
import tensorflow as tf return update_scale_expr # Functionality to update the threshold for parameter space noise. update_param_noise_threshold_expr = param_noise_threshold.assign(tf.cond(update_param_noise_threshold_ph >= 0, lambda: update_param_noise_threshold_ph, lambda: param_noise_threshold)) # Put everything together. deterministic_actions = tf.argmax(q_values_perturbed, axis=1) batch_size = tf.shape(observations_ph.get())[0] random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64) chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions) output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions) update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps)) updates = [ update_eps_expr, tf.cond(reset_ph, lambda: perturb_vars(original_scope="q_func", perturbed_scope="perturbed_q_func"), lambda: tf.group(*[])),
tensorflow.stack
11,989
import tensorflow as tf if tf.test.is_built_with_cuda(): with tf.device('/cpu:0'): a = tf.constant([1.0, 3.0, 5.0], shape=[1, 3]) b = tf.constant([2.0, 4.0, 6.0], shape=[3, 1]) with tf.device('/gpu:1'): c = tf.matmul(a,b) c = tf.reshape(c, [-1]) with tf.device('/gpu:2'): d = tf.matmul(b,a) flat_d = tf.reshape(d, [-1]) combined = tf.mul(c, flat_d) print(sess.run(combined))
tensorflow.matmul
11,990
import tensorflow as tf loss_box = tf.reduce_mean(tf.reduce_sum( out_loss_box, axis=dim )) return loss_box def _add_losses(self, sigma_rpn=3.0): with tf.variable_scope('loss_' + self._tag): # RPN, class loss rpn_cls_score = tf.reshape(self._predictions['rpn_cls_score_reshape'], [-1, 2]) rpn_label = tf.reshape(self._anchor_targets['rpn_labels'], [-1]) # 得到前景和背景anchor的index rpn_select = tf.where(tf.not_equal(rpn_label, -1)) rpn_cls_score = tf.reshape(tf.gather(rpn_cls_score, rpn_select), [-1, 2]) rpn_label = tf.reshape(tf.gather(rpn_label, rpn_select), [-1]) rpn_cross_entropy = tf.reduce_mean( tf.nn.sparse_softmax_cross_entropy_with_logits(logits=rpn_cls_score, labels=rpn_label))
tensorflow.reshape
11,991
from tensorflow.contrib.layers.python.layers import utils return mean, variance def build_moving_stats(): return ( tf.identity(self._moving_mean), tf.identity(self._moving_variance), ) mean, variance = utils.smart_cond( use_batch_stats, build_batch_stats, build_moving_stats, ) return mean, variance
tensorflow.contrib.layers.python.layers.utils.smart_cond
11,992
import tensorflow as tf tf.multiply(smooth_l1_option2, tf.abs(tf.subtract(smooth_l1_sign, 1.0)))) outside_mul = tf.multiply(bbox_outside_weights, smooth_l1_result)
tensorflow.multiply
11,993
import tensorflow as tf all_top_5_ops.append(results[2]) if self.variable_mgr.retain_tower_updates(device_num): # Retain the Batch Normalization updates operations only from the # first tower. Ideally, we should grab the updates from all towers but # these stats accumulate extremely fast so we can ignore the other # stats from the other towers without significant detriment. update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, name_scope) staging_delta_ops = list(self.variable_mgr.staging_delta_ops) if not update_ops: update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, name_scope) enqueue_ops.append(tf.group(*gpu_copy_stage_ops)) if self.variable_mgr.supports_staged_vars():
tensorflow.get_collection
11,994
import tensorflow as tf self.a_grads = tf.gradients(loss_pg, self.pi_params) self.c_grads = tf.gradients(loss_vf, self.vf_params) self.a_grads, _ = tf.clip_by_global_norm(self.a_grads, 20.0) self.c_grads, _ = tf.clip_by_global_norm(self.c_grads, 20.0) opt = tf.train.AdamOptimizer(self.LR) self.update_a_op = opt.apply_gradients(zip(self.a_grads, self.pi_params)) self.update_c_op = opt.apply_gradients(zip(self.c_grads, self.vf_params)) self.sess.run(tf.global_variables_initializer()) # Tensorboard if summary_dir is not None: self.writer = tf.summary.FileWriter(summary_dir) tf.summary.scalar('Loss/Policy', loss_pg) tf.summary.scalar('Loss/Value', loss_vf)
tensorflow.global_variables_initializer
11,995
import tensorflow as tf shortcut = tf.identity(x) if pool_first: if in_channel == filters: if strides == 1: shortcut = tf.identity(x) else: shortcut= tf.pad(x, tf.constant([[0,0],[1,1],[1,1],[0,0]]), "CONSTANT") shortcut = tf.nn.max_pool(shortcut, [1, strides, strides, 1], [1, strides, strides, 1], 'VALID') else: shortcut = self._conv('shortcut_conv', x, padding='VALID', num_filters=filters, kernel_size=(1, 1), stride=(strides, strides), bias=self.bias) else:
tensorflow.constant
11,996
import tensorflow as tf log_probs = tf.nn.log_softmax(logits, axis=-1) labels = tf.reshape(labels, [-1]) one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
tensorflow.one_hot
11,997
import tensorflow as tf # Activation if softmax_stag: scores = tf.nn.softmax(scores) # [B, 1, T] # Weighted sum if mode == 'SUM': output = tf.matmul(scores, facts) # [B, 1, H] # output = tf.reshape(output, [-1, tf.shape(facts)[-1]]) else: scores = tf.reshape(scores, [-1, tf.shape(facts)[1]]) output = facts * tf.expand_dims(scores, -1) output = tf.reshape(output, tf.shape(facts)) if return_alphas: return output, scores return output class VecAttGRUCell(RNNCell):
tensorflow.expand_dims
11,998
import tensorflow as tf gate_sig = tf.nn.sigmoid(gate) combination_res = gate_sig * new_context_act + (1 - gate_sig) * rep_map # bs,bn,bl,vec with tf.variable_scope('restore_original_length'): combination_res_reshape = tf.reshape(combination_res, [bs, bn * bl, ivec]) # bs,bn*bl,vec output = combination_res_reshape[:, :sl, :] return output
tensorflow.reshape
11,999