seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf # a_fc2_ = tf.layers.dense(a_fc1_, 128, tf.nn.relu, kernel_initializer=w_initializer, # bias_initializer=b_initializer, name='agent_fc2_t') # a_fc3_ = tf.layers.dense(a_fc2_, 64, tf.nn.relu, kernel_initializer=w_initializer, # bias_initializer=b_initializer, name='agent_fc3_t') self.q_next = tf.layers.dense(a_fc1_, self.num_a, kernel_initializer=w_initializer, bias_initializer=b_initializer, name='q_t') # [batch*n_agents, 1] self.q_selected = tf.reduce_sum(tf.multiply(self.q_eval, self.a), axis=1) # ------------------ build mixing_net ------------------ with tf.variable_scope('mixing_net'): # [batch, n_agents] self.q_concat = tf.reshape(self.q_selected, [-1, self.n_agents]) self.q_concat_ =tf.reshape(self.q_m_, [-1, self.n_agents]) with tf.variable_scope('eval_hyper'): self.Q_tot = Qmix_mixer(self.q_concat, self.S, self.num_global_s, self.n_agents, 32) with tf.variable_scope('target_hyper'): self.Q_tot_ = Qmix_mixer(self.q_concat_, self.S_, self.num_global_s, self.n_agents, 32) # with tf.variable_scope('layer_mix_eval'): # lin1 = tf.matmul(tf.reshape(self.q_concat, shape=[-1, 1, self.n_agents]), self.w1) + tf.reshape(self.b1, shape=[-1, 1, 32]) # a1 = tf.nn.elu(lin1, name='a1') # self.Q_tot = tf.reshape(tf.matmul(a1, self.w2), shape=[-1, 1]) + self.b2
tensorflow.reshape
1,000
from tensorflow.python.ops import clip_ops return train_tensor def _clip_gradients_by_norm(grads_and_vars, clip_gradients): """Clips gradients by global norm.""" gradients, variables = zip(*grads_and_vars) clipped_gradients, _ = clip_ops.clip_by_global_norm(gradients, clip_gradients) return list(zip(clipped_gradients, variables)) def _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name): """Find max_norm given norm and previous average."""
tensorflow.python.ops.clip_ops.clip_by_global_norm
1,001
import tensorflow as tf Args: data_set: dataset instance to use to access data for training/validation weights_from: str, if not None, initializes model from exisiting weights start_epoch: int, epoch number to start training from e.g. for retarining set the epoch number you want to resume training from summary_every: int, epoch interval to write summary; higher value means lower frequency of summary writing """ with tf.Graph().as_default(), tf.device('/gpu:0'): self._setup_model_loss(num_classes=num_classes) if self.is_summary: self._setup_summaries(self.capped_d_grads, self.capped_g_grads) self._setup_misc() self._print_info(data_set) self._train_semi_supervised(data_set, start_epoch, weights_from, summary_every, model_name, weights_dir)
tensorflow.device
1,002
import tensorflow as tf tf.cast(num_active_triplets, dtype=tf.float32) / num_total_triplets) active_mining_triplet_ratio = ( tf.cast(num_active_mining_triplets, dtype=tf.float32) / num_total_triplets) active_loss = ( loss / tf.math.maximum(1e-12, tf.stop_gradient(active_triplet_ratio))) active_mining_loss = ( mining_loss / tf.math.maximum(1e-12, tf.stop_gradient(active_mining_triplet_ratio))) tag = 'SemiHardNegative' if use_semi_hard else 'HardNegative' summaries = { # Summaries related to triplet loss computation. 'triplet_loss/Anchor/%s/Distance/Mean' % tag: tf.math.reduce_mean(negative_distances), 'triplet_loss/%s/Loss/All' % tag: loss,
tensorflow.stop_gradient
1,003
import tensorflow as tf tf.summary.scalar("model/value_loss", vf_loss / bs) tf.summary.scalar("model/entropy", entropy / bs)
tensorflow.summary.scalar
1,004
import tensorflow as tf correct_prediction_action = tf.equal( tf.argmax(one_hot_labels_action, 1), tf.argmax(self.predictions_action, 1) ) self.accuracy_action = tf.reduce_mean(tf.cast(correct_prediction_action, 'float')) tf.scalar_summary('accuracy_action', self.accuracy_action) correct_prediction_arguments = tf.equal(tf.argmax(one_hot_labels_arguments, 2), tf.argmax(self.predictions_arguments, 2)) self.accuracy_arguments = tf.reduce_mean(tf.cast(correct_prediction_arguments, 'float')) tf.scalar_summary('accuracy_arguments', self.accuracy_arguments)
tensorflow.scalar_summary
1,005
import tensorflow as tf return final_loss def contra_traj_lossV6(pred, tgt, horizon=12): horizon_pred, horizon_tgt = horizon_sumV1(pred, horizon), horizon_sumV1(tgt, horizon) # horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon) pred_flat1, pred_flat2 = tf.reshape(horizon_pred, [-1, 1]), tf.reshape(horizon_pred, [1, -1]) tgt_flat1, tgt_flat2 = tf.reshape(horizon_tgt, [-1, 1]), tf.reshape(horizon_tgt, [1, -1]) tgt_dif = tgt_flat1 - tgt_flat2 pred_dif = pred_flat1 - pred_flat2 geq = tf.cast(tgt_dif > 0, tf.bool) tgt_posi_dif = tf.where(geq, tgt_dif, -tgt_dif) pred_posi_dif = tf.where(geq, pred_dif, -pred_dif) loss = tf.maximum(0., tgt_posi_dif - pred_posi_dif) cstr_pct = tf.math.count_nonzero(loss, dtype=tf.float32) / tf.cast(tf.reduce_prod(tf.shape(loss)), tf.float32) final_loss = tf.reduce_mean(loss) return final_loss, cstr_pct def contra_traj_lossV7(pred, tgt, horizon=12, temp=100): horizon_pred, horizon_tgt = horizon_sumV1(pred, horizon), horizon_sumV1(tgt, horizon)
tensorflow.cast
1,006
import tensorflow as tf strides=self.ff_conv_strides[idx], padding=self.padding) processed_l2_h2 = tf.nn.bias_add( processed_l2_h2, getattr(self, 'ff_bias_%s' % idx)) processed_l2_h2 = self.ff_nl(processed_l2_h2) if self.batch_norm: with tf.variable_scope( 'l3_h2_bn_ff_%s' % idx, reuse=self.scope_reuse) as scope: processed_l2_h2 = tf.contrib.layers.batch_norm( inputs=processed_l2_h2, scale=True, center=True,
tensorflow.variable_scope
1,007
import tensorflow as tf #train_step=tf.train.GradientDescentOptimizer(0.001).minimize(cross_entropy) valid_pre = tf.reshape(valid_inf, [validnum, classnum]) valid_correct_prediction=tf.equal(tf.argmax(valid_inf,1),tf.argmax(valid_labels,1)) valid_accuracy=tf.reduce_mean(tf.cast(valid_correct_prediction,tf.float32))
tensorflow.argmax
1,008
import tensorflow as tf mode=tf.estimator.ModeKeys.TRAIN, num_mixtures=num_mixtures, ) decoder_output = tf.random_normal([batch, rows, cols, hparams.hidden_size]) targets = tf.random_uniform([batch, height, width, channels], minval=-1., maxval=1.)
tensorflow.random_normal
1,009
import tensorflow as tf depth_multiplier=2.0) for key in endpoint_keys: original_depth = end_points[key].get_shape().as_list()[3] new_depth = end_points_with_multiplier[key].get_shape().as_list()[3] self.assertEqual(2.0 * original_depth, new_depth) def testRaiseValueErrorWithInvalidDepthMultiplier(self): batch_size = 5 height, width = 224, 224 num_classes = 1000 inputs = tf.random_uniform((batch_size, height, width, 3)) with self.assertRaises(ValueError): _ = mobilenet_v1.mobilenet_v1( inputs, num_classes, depth_multiplier=-0.1) with self.assertRaises(ValueError): _ = mobilenet_v1.mobilenet_v1( inputs, num_classes, depth_multiplier=0.0) def testHalfSizeImages(self): batch_size = 5 height, width = 112, 112 num_classes = 1000
tensorflow.random_uniform
1,010
from tensorflow.contrib.layers.python.layers import utils variable=self._moving_variance, value=variance, decay=self._decay_rate, name="update_moving_variance").op return update_mean_op, update_variance_op def build_no_ops(): return (tf.no_op(), tf.no_op()) # Only make the ops if we know that `is_training=True`, or the value of # `is_training` is unknown. is_training_const = utils.constant_value(is_training) if is_training_const is None or is_training_const: update_mean_op, update_variance_op = utils.smart_cond( is_training, build_update_ops, build_no_ops, ) # Every new connection creates a new op which adds its contribution # to the running average when ran. tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_mean_op) tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_variance_op)
tensorflow.contrib.layers.python.layers.utils.constant_value
1,011
import tensorflow as tf class PairwiseGMF(ModelBase): def __init__(self, config): """ :param config: """ # super(PairwiseGMF, self).__init__(config) self.config = config self._activation_fn = tf.nn.relu self._embedding_initializers = { 'embeddings': tf.truncated_normal_initializer(stddev=0.01), } self._embedding_regularizers = {} self._initializers = { "w": tf.contrib.layers.xavier_initializer(), } self._regularizers = { 'w': tf.contrib.layers.l2_regularizer(config.l2) }
tensorflow.truncated_normal_initializer
1,012
import tensorflow as tf return z2, fldj def _inverse(self, x1, z2, **kwargs): params = self.parameterizer(x1) mus, log_sigmas = params[:,:,:,0::2], params[:,:,:,1::2] x2, ildj = gaussianize(z2, mus, log_sigmas, inverse=tf.constant(True)) return x2, ildj def log_gaussianize(x, mus, log_sigmas, inverse=tf.constant(False)): """
tensorflow.constant
1,013
import tensorflow as tf end_log_probs, k=FLAGS.end_n_top) end_top_log_probs = tf.reshape(
tensorflow.reshape
1,014
import tensorflow as tf else: all_top_1_ops = tf.reduce_sum(all_top_1_ops)
tensorflow.reduce_sum
1,015
import tensorflow as tf axis=[1, 2, 3] ) + 1e-8 ) print_obj(func_name, "mixed_norms", mixed_norms) # Get squared difference from target of 1.0. squared_difference = tf.square( x=mixed_norms - 1.0, name="squared_difference" ) print_obj(func_name, "squared_difference", squared_difference)
tensorflow.square
1,016
import tensorflow as tf # create new Session for the DeepSurv Class self.sess = tf.Session(graph=G)
tensorflow.Session
1,017
import tensorflow as tf where: p = the spectrum weight """ def __init__(self, p=2): self._dim = None self._p = p def _compute(self, x, y): self._dim = x._rank() kernel = np.zeros((tf.size(x), tf.size(y))) for l in tf.range(start=0, limit=tf.size(x), delta=1, dtype=None, name='l_range'): for m in tf.range(start=0, limit=tf.size(y), delta=1, dtype=None, name='m_range'): vx = tf.contrib.lookup.MutableHashTable(key_dtype=tf.string, value_dtype=tf.int64, default_value=-1) vz = tf.contrib.lookup.MutableHashTable(key_dtype=tf.string, value_dtype=tf.int64, default_value=-1) vx_keys = tf.reshape(tf.Variable([], collections=[], dtype=tf.string), (-1, 1)) vz_keys = tf.reshape(tf.Variable([], collections=[], dtype=tf.string), (-1, 1))
tensorflow.size
1,018
import tensorflow as tf [-1, masked_lm_log_probs.shape[-1]]) masked_lm_predictions = tf.argmax( masked_lm_log_probs, axis=-1, output_type=tf.int32) masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1]) masked_lm_ids = tf.reshape(masked_lm_ids, [-1]) masked_lm_weights = tf.reshape(masked_lm_weights, [-1])
tensorflow.reshape
1,019
import tensorflow as tf rep_tensor_comp = tf.concat([rep_tensor, tf.zeros([bs, comp_len, input_dim], tf.float32)], 1) rep_mask_comp = tf.concat([rep_mask, tf.cast(tf.zeros([bs, comp_len], tf.int32), tf.bool)], 1) rep_tensor_split = tf.reshape(rep_tensor_comp, [bs, block_num, block_len, input_dim]) # bs,bn,bl,d rep_mask_split = tf.reshape(rep_mask_comp, [bs, block_num, block_len]) # bs,bn,bl # non-linear
tensorflow.reshape
1,020
import tensorflow as tf zip(capped_grads, variables), global_step=self.global_step) def forward(self): config = self.config N, PL, QL, CL, d, dc, nh = config.batch_size if not self.demo else config.batch_size, self.c_maxlen, self.q_maxlen, config.char_limit, config.hidden, config.char_dim, config.num_heads with tf.variable_scope("Input_Embedding_Layer"): ch_emb = tf.reshape(tf.nn.embedding_lookup( self.char_mat, self.ch), [N * PL, CL, dc]) qh_emb = tf.reshape(tf.nn.embedding_lookup( self.char_mat, self.qh), [N * QL, CL, dc]) ch_emb = tf.nn.dropout(ch_emb, 1.0 - 0.5 * self.dropout) qh_emb = tf.nn.dropout(qh_emb, 1.0 - 0.5 * self.dropout) # Bidaf style conv-highway encoder ch_emb = conv(ch_emb, d, bias = True, activation = tf.nn.relu, kernel_size = 5, name = "char_conv", reuse = None) qh_emb = conv(qh_emb, d, bias = True, activation = tf.nn.relu, kernel_size = 5, name = "char_conv", reuse = True) ch_emb = tf.reduce_max(ch_emb, axis = 1) qh_emb = tf.reduce_max(qh_emb, axis = 1)
tensorflow.nn.dropout
1,021
import tensorflow as tf predictions: 2D tensor or array, [batch_size, num_classes] predictions of the network . labels: 2D or array tensor, [batch_size, num_classes] ground truth labels or target labels. eps: a constant to set upper or lower limit for labels, smoothening factor name: Optional scope/name for op_scope. Returns: A tensor with the log loss. """ with tf.name_scope(name): predictions = tf.to_float(predictions) labels = tf.to_float(labels) predictions = tf.clip_by_value(predictions, eps, 1 - eps) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) loss = -tf.reduce_mean(labels * tf.log(predictions)) return loss def log_loss_tf(predictions, labels, eps=1e-7, weights=1.0, name='log_loss'): """Define a log loss.
tensorflow.to_float
1,022
import tensorflow as tf N = tf.cast(tf.shape(X)[0], tf.float32) D_int = tf.cast(D, tf.int32) N_int = tf.cast(N, tf.int32) if y is None: y = silverman_rule_of_thumb(N) YDistr = tf.contrib.distributions.MultivariateNormalDiag(loc=tf.zeros(D_int, tf.float32), scale_diag=tf.ones(D_int, tf.float32)) Y = YDistr.sample(N_int) T = 1.0/(2.0*N*tf.sqrt(m.pi*y)) A0 = euclidean_norm_squared(tf.subtract(tf.expand_dims(X, 0), tf.expand_dims(X, 1)), axis=2) A = tf.reduce_sum(phi_sampling(A0/(4*y), D)) B0 = euclidean_norm_squared(tf.subtract(tf.expand_dims(Y, 0), tf.expand_dims(Y, 1)), axis=2) B = tf.reduce_sum(phi_sampling(B0/(4*y), D)) C0 = euclidean_norm_squared(tf.subtract(tf.expand_dims(X, 0), tf.expand_dims(Y, 1)), axis=2) C = tf.reduce_sum(phi_sampling(C0/(4*y), D)) return T*(A + B - 2*C)
tensorflow.expand_dims
1,023
import tensorflow as tf reader = tf.TFRecordReader() _, serialized_example = reader.read(filename_queue) features = tf.parse_single_example( serialized_example, features={ 'wav_raw': tf.FixedLenFeature([], tf.string), 'noisy_raw': tf.FixedLenFeature([], tf.string), }) wave = tf.decode_raw(features['wav_raw'], tf.int32) wave.set_shape(canvas_size) wave = (2./65535.) * tf.cast((wave - 32767), tf.float32) + 1. noisy = tf.decode_raw(features['noisy_raw'], tf.int32) noisy.set_shape(canvas_size) noisy = (2./65535.) * tf.cast((noisy - 32767), tf.float32) + 1. if preemph > 0: wave = tf.cast(pre_emph(wave, preemph), tf.float32) noisy = tf.cast(pre_emph(noisy, preemph), tf.float32) return wave, noisy
tensorflow.cast
1,024
import tensorflow as tf elif type=='mix_scramble': self.augment = self.mix_scramble elif type=='blur': self.augment = self.gaussian_blur self.pointwise_filter = tf.eye(3, batch_shape=[1, 1]) elif type=='high_low_pass': self.augment = self.high_low_pass self.kernel = self.gaussian_kernel(size,mean,std) self.kernel = tf.tile(self.kernel[:, :, tf.newaxis, tf.newaxis], [1, 1, 3, 1]) self.pointwise_filter = tf.eye(3, batch_shape=[1, 1]) self.paddings = [[size,size],[size,size],[0,0]] elif type=='no_op': self.augment = self.no_op def gaussian_kernel(self,size,mean,std): """Makes 2D gaussian Kernel for convolution.""" d = tfp.distributions.Normal(mean, std) vals = d.prob(tf.range(start = -size, limit = size + 1, dtype = tf.float32))
tensorflow.eye
1,025
import tensorflow as tf dual_rate_factor=dual_rate_factor) # Create biases with shape [1, num_labels, num_anchors]. biases = tf.contrib.framework.model_variable( name='biases', shape=[1, num_labels, num_anchors], dtype=logits.dtype, initializer=tf.zeros_initializer(), collections=variables_collections, trainable=trainable) # Maybe create label_priors. label_priors = maybe_create_label_priors(label_priors, labels, weights, variables_collections) label_priors = tf.reshape(label_priors, [1, num_labels, 1]) # Expand logits, labels, and weights to shape [batch_size, num_labels, 1]. logits = tf.expand_dims(logits, 2) labels = tf.expand_dims(labels, 2) weights = tf.expand_dims(weights, 2) # Calculate weighted loss and other outputs. The log(2.0) term corrects for # logloss not being an upper bound on the indicator function. loss = weights * losses_utils.weighted_surrogate_loss( labels, logits + biases, surrogate_type=surrogate_type, positive_weights=1.0 + lambdas * (1.0 - precision_values), negative_weights=lambdas * precision_values) maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0 maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype)
tensorflow.expand_dims
1,026
import tensorflow as tf def _meshgrid(depth, height, width, z_near, z_far): with tf.variable_scope('_meshgrid'): x_t = tf.reshape(
tensorflow.variable_scope
1,027
import tensorflow as tf os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) graph = tf.Graph() with graph.as_default(): tf.set_random_seed(self.seed) self.user_id = tf.placeholder(shape=[None, ], dtype=tf.int32, name='user_id') self.item_id = tf.placeholder(shape=[None, ], dtype=tf.int32, name='item_id') self.labels = tf.placeholder(shape=[None, 1], dtype=tf.float32, name='labels') self.interaction = gmf(uid=self.user_id, iid=self.item_id, num_users=self.train_set.num_users, num_items=self.train_set.num_items, emb_size=self.num_factors, reg_user=self.regs[0], reg_item=self.regs[1], seed=self.seed)
tensorflow.placeholder
1,028
import tensorflow as tf save.save(sess, filepath, global_step=1) inc.eval() save.save(sess, filepath, global_step=2) with self.test_session() as sess: # Build a new graph with different initialization. v0 = tf.Variable(-1.0) # Create a new saver. save = tf.train.Saver({"v0": v0}) tf.initialize_all_variables().run() # Get the most recent checkpoint name from the training history file. name = tf.train.latest_checkpoint(traindir) self.assertIsNotNone(name) # Restore "v0" from that checkpoint. save.restore(sess, name) self.assertEqual(v0.eval(), 2.0) class CheckpointStateTest(tf.test.TestCase): def _TestDir(self, test_name): test_dir = os.path.join(self.get_temp_dir(), test_name) if os.path.exists(test_dir):
tensorflow.train.latest_checkpoint
1,029
from tensorflow.contrib.eager.python.examples.l2hmc import l2hmc x = tf.random_normal([hparams.n_samples, hparams.x_dim], dtype=tf.float32) dynamics = l2hmc.Dynamics( x_dim=hparams.x_dim,
tensorflow.contrib.eager.python.examples.l2hmc.l2hmc.Dynamics
1,030
import tensorflow as tf Returns: a Tensor of timing signals [batch, seq_len, channels] """ num_timescales = channels // 2 log_timescale_increment = ( math.log(float(max_timescale) / float(min_timescale)) / (tf.to_float(num_timescales) - 1)) inv_timescales = min_timescale * tf.exp( tf.to_float(tf.range(num_timescales)) * -log_timescale_increment) scaled_time = ( tf.expand_dims(tf.to_float(position), 2) * tf.expand_dims( tf.expand_dims(inv_timescales, 0), 0)) signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=2) signal = tf.pad(signal, [[0, 0], [0, 0], [0, tf.mod(channels, 2)]]) return signal def embedding_lookup(input_ids, vocab_size, embedding_size=128, initializer_range=0.02,
tensorflow.to_float
1,031
from tensorflow.contrib.framework.python.ops import variables as contrib_variables _add_hidden_layer_summary(logits, scope.name) def _train_op_fn(loss): """Returns the op to optimize the loss.""" return optimizers.optimize_loss( loss=loss, global_step=contrib_variables.get_global_step(), learning_rate=_LEARNING_RATE, optimizer=_get_optimizer(optimizer), gradient_multipliers=( dnn_linear_combined._extract_embedding_lr_multipliers( # pylint: disable=protected-access embedding_lr_multipliers, parent_scope, input_layer_scope)),
tensorflow.contrib.framework.python.ops.variables.get_global_step
1,032
import tensorflow as tf stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions) output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions) update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))
tensorflow.cond
1,033
import tensorflow as tf tf.io.FixedLenFeature((), tf.string), 'image/height': tf.io.FixedLenFeature((), tf.int64), 'image/width': tf.io.FixedLenFeature((), tf.int64), 'image/object/bbox/xmin': tf.io.VarLenFeature(tf.float32), 'image/object/bbox/xmax': tf.io.VarLenFeature(tf.float32), 'image/object/bbox/ymin': tf.io.VarLenFeature(tf.float32), 'image/object/bbox/ymax': tf.io.VarLenFeature(tf.float32), 'image/object/class/label': tf.io.VarLenFeature(tf.int64), 'image/object/area':
tensorflow.io.VarLenFeature
1,034
import tensorflow as tf return roc_sc, auprc_score,accuracy,precision,recall,f ,apk_sc def construct_placeholders(edge_types): placeholders = { 'batch': tf.placeholder(tf.int32, name='batch'), 'batch_neg': tf.placeholder(tf.int32, name='batch_neg'), 'batch_node':tf.placeholder(tf.int32,name = 'batch_node'), 'adj_min_batch': tf.placeholder(tf.float32,name='adj_min_batch'), 'sim_min_batch': tf.placeholder(tf.float32,name='sim_min_batch'), 'batch_edge_type_idx': tf.placeholder(tf.int32, shape=(), name='batch_edge_type_idx'), 'batch_row_edge_type': tf.placeholder(tf.int32, shape=(), name='batch_row_edge_type'), 'batch_col_edge_type': tf.placeholder(tf.int32, shape=(), name='batch_col_edge_type'), 'degrees': tf.placeholder(tf.int32), 'dropout': tf.placeholder_with_default(0., shape=()),
tensorflow.placeholder
1,035
import tensorflow as tf both summaries and checkpoints.""") tf.flags.DEFINE_integer('save_summaries_steps', 0, """How often to save summaries for trained models. Pass 0 to disable summaries.""") tf.flags.DEFINE_integer('save_model_secs', 0, """How often to save trained models. Pass 0 to disable checkpoints""") tf.flags.DEFINE_string('train_dir', None, """Path to session checkpoints.""") tf.flags.DEFINE_string('eval_dir', '/tmp/tf_cnn_benchmarks/eval', """Directory where to write eval event logs.""") tf.flags.DEFINE_string('pretrain_dir', None, """Path to pretrained session checkpoints.""") tf.flags.DEFINE_string('result_storage', None, """Specifies storage option for benchmark results. None means results won't be stored. 'cbuild_benchmark_datastore' means results will be stored in cbuild datastore (note: this option requires special pemissions and meant to be used from cbuilds).""") FLAGS = tf.flags.FLAGS log_fn = print # tf.logging.info class GlobalStepWatcher(threading.Thread): """A helper class for globe_step.
tensorflow.flags.DEFINE_string
1,036
import tensorflow as tf b = tf.reshape(b, bshape) return tf.nn.conv2d( x, w, strides=strides, padding=pad, data_format=data_format) + b def fc(self, x, scope, nh, *, init_scale=1.0, init_bias=0.0): with tf.variable_scope(scope): nin = x.get_shape()[1].value w = tf.get_variable( "w", [nin, nh], initializer=self.ortho_init(init_scale)) b = tf.get_variable( "b", [nh], initializer=tf.constant_initializer(init_bias)) return tf.matmul(x, w) + b def ortho_init(self, scale=1.0): def _ortho_init(shape, dtype, partition_info=None): # lasagne ortho init for tf shape = tuple(shape) if len(shape) == 2: flat_shape = shape elif len(shape) == 4: # assumes NHWC flat_shape = (np.prod(shape[:-1]), shape[-1]) else: raise NotImplementedError a = np.random.normal(0.0, 1.0, flat_shape)
tensorflow.constant_initializer
1,037
import tensorflow as tf def _add_avg_pool_3x3_op(self, X, input_idx, ni, w, h, ch, is_reduction, is_dynamic, is_train): filter_size = 3 stride = 2 if is_reduction else 1 with tf.variable_scope('avg_pool_3x3_op'): X = tf.nn.avg_pool(X, ksize=(1, filter_size, filter_size, 1), strides=[1, stride, stride, 1], padding='SAME') X = tf.reshape(X, (-1, w // stride, h // stride, ch)) # Sanity shape check
tensorflow.variable_scope
1,038
from tensorflow.contrib.metrics.python.ops import set_ops class_id: Class for which we want binary metrics. weights: `Tensor` whose shape is broadcastable to the the first [D1, ... DN] dimensions of `predictions_idx` and `labels`. name: Name of operation. Returns: A [D1, ... DN] `Tensor` of true positive counts. """ with ops.name_scope(name, 'true_positives', (predictions_idx, labels)): labels, predictions_idx = _maybe_select_class_id( labels, predictions_idx, class_id) tp = set_ops.set_size(set_ops.set_intersection(predictions_idx, labels)) tp = math_ops.to_double(tp) if weights is not None: weights = math_ops.to_double(weights) tp = math_ops.mul(tp, weights) return tp def _streaming_sparse_true_positive_at_k(predictions_idx, labels, k=None,
tensorflow.contrib.metrics.python.ops.set_ops.set_intersection
1,039
import tensorflow as tf kernel = tf.get_variable("weights", [kH, kW, nIn, nOut], initializer=tf.truncated_normal_initializer(stddev=1e-1), regularizer=l2_regularizer, dtype=inpOp.dtype) cnv = tf.nn.conv2d(inpOp, kernel, [1, dH, dW, 1], padding=padType) if use_batch_norm:
tensorflow.nn.conv2d
1,040
import tensorflow as tf HaaInv = tf.matrix_inverse(Haa) # The two terms 'term1' and 'term2' which come from normalizers of the # 1. Original policy distribution # 2. The distribution after completing the square sigma = tf.matrix_inverse(prec) term1 = -0.5 * param_eta * tf.log(tf.matrix_determinant(2 * np.pi * sigma)) if self.beta == 0: term2 = 0.5 * param_eta * tf.log(tf.matrix_determinant(2 * np.pi * param_eta * HaaInv)) else: term2 = 0.5 * (param_eta + param_omega) * tf.log(tf.matrix_determinant(2 * np.pi * (param_eta + param_omega) * HaaInv)) dual = param_eta * self.epsilon - param_omega * beta + \ term1 + term2 + tf.reduce_mean( 0.5 * (tf.reduce_sum(tf.matmul(ha, HaaInv) * ha, axis=1) - hss))
tensorflow.matrix_determinant
1,041
import tensorflow as tf eval_config.batch_size = 1 eval_config.num_steps = 1 with tf.Graph().as_default(): initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale) with tf.name_scope("Train"):
tensorflow.random_uniform_initializer
1,042
from tensorflow.python.framework import ops predictions, labels = tensor_util.remove_squeezable_dimensions( predictions, labels) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) radial_diffs = math_ops.mul(predictions, labels) radial_diffs = math_ops.reduce_sum(radial_diffs, reduction_indices=[dim,], keep_dims=True) mean_distance, update_op = streaming_mean(radial_diffs, weights, None, None, name or 'mean_cosine_distance') mean_distance = math_ops.sub(1.0, mean_distance) update_op = math_ops.sub(1.0, update_op) if metrics_collections: ops.add_to_collections(metrics_collections, mean_distance) if updates_collections: ops.add_to_collections(updates_collections, update_op) return mean_distance, update_op @deprecated_args(IGNORE_MASK_DATE, IGNORE_MASK_INSTRUCTIONS, 'ignore_mask') def streaming_percentage_less(values, threshold, ignore_mask=None, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes the percentage of values less than the given threshold.
tensorflow.python.framework.ops.add_to_collections
1,043
import tensorflow as tf else: inputs = tf.split(inputdata, split, channel_axis)
tensorflow.split
1,044
import tensorflow as tf - one_hot_labels_action * tf.log(tf.clip_by_value(self.predictions_action, 1e-10, 1.0)), name='loss' ) loss_arguments = tf.reduce_mean( - one_hot_labels_arguments * tf.log(tf.clip_by_value(self.predictions_arguments, 1e-10, 1.0)), name='loss' ) self.loss = loss_action + loss_arguments tf.scalar_summary('loss', self.loss) with tf.name_scope('accuracy'): correct_prediction_action = tf.equal( tf.argmax(one_hot_labels_action, 1), tf.argmax(self.predictions_action, 1) ) self.accuracy_action = tf.reduce_mean(tf.cast(correct_prediction_action, 'float')) tf.scalar_summary('accuracy_action', self.accuracy_action)
tensorflow.scalar_summary
1,045
import tensorflow as tf self.initial_learning_rate = initial_learning_rate self.maximal_learning_rate = maximal_learning_rate self.step_size = step_size self.scale_fn = scale_fn self.scale_mode = scale_mode self.name = name def __call__(self, step): with tf.name_scope(self.name or "CyclicalLearningRate"): initial_learning_rate = tf.convert_to_tensor( self.initial_learning_rate, name="initial_learning_rate" ) dtype = initial_learning_rate.dtype maximal_learning_rate = tf.cast(self.maximal_learning_rate, dtype) step_size = tf.cast(self.step_size, dtype) cycle = tf.floor(1 + step / (2 * step_size)) x = tf.abs(step / step_size - 2 * cycle + 1)
tensorflow.convert_to_tensor
1,046
import tensorflow as tf from google.protobuf import text_format import tensorflow as tf import preprocessing import datasets NUM_TEST_IMAGES = 50000 def load_graph(model_file): graph = tf.Graph() graph_def = tf.compat.v1.GraphDef() import os file_ext = os.path.splitext(model_file)[1] with open(model_file, "rb") as f: if file_ext == '.pbtxt': text_format.Merge(f.read(), graph_def) else: graph_def.ParseFromString(f.read())
tensorflow.compat.v1.GraphDef
1,047
import tensorflow as tf nms_classes_expected1 = tf.constant([1, 2, 2], dtype=tf.int32) (nms_masks2, nms_scores2, nms_classes2) = isu.instance_non_maximum_suppression_2d_scores( masks, scores, 3, min_score_thresh=0.65, min_iou_thresh=0.5, is_class_agnostic=False) nms_masks_expected2 = tf.stack([mask2, mask0, mask5, mask4]) nms_scores_expected2 = tf.constant([0.95, 1.0, 0.8, 0.7], dtype=tf.float32) nms_classes_expected2 = tf.constant([0, 1, 2, 2], dtype=tf.int32) self.assertAllEqual(nms_masks1.numpy(), nms_masks_expected1.numpy()) self.assertAllClose(nms_scores1.numpy(), nms_scores_expected1.numpy()) self.assertAllEqual(nms_classes1.numpy(), nms_classes_expected1.numpy()) self.assertAllEqual(nms_masks2.numpy(), nms_masks_expected2.numpy()) self.assertAllClose(nms_scores2.numpy(), nms_scores_expected2.numpy()) self.assertAllEqual(nms_classes2.numpy(), nms_classes_expected2.numpy())
tensorflow.stack
1,048
import tensorflow as tf std = tf.sqrt(var+self.epsilon) return [tf.assign(self.g,1/std),tf.assign(self.b,-1.*mu/std)] require_init = tf.reduce_any(tf.is_nan(self.g)) init_ops = tf.cond(require_init,_init,lambda : [self.g,self.b]) with tf.control_dependencies(init_ops): w = tf.reshape(self.g,[1,1,1,tf.shape(self.v)[-1]]) * tf.nn.l2_normalize(self.v,axis=[0,1,2]) return tf.nn.bias_add( tf.nn.conv2d(input_var, w,data_format='NHWC', strides=self.strides, padding=self.padding), self.b,data_format='NHWC',name=name) def get_variables(self): #TODO: self.v should be l2-normalized or not? / currently not. return {'v':self.v,'b':self.b,'g':self.g} class DepthConv2d(object) :
tensorflow.nn.conv2d
1,049
import tensorflow as tf tf.summary.scalar('policy_gradient_loss', loss_policy) tf.summary.scalar('value_function_loss', loss_q)
tensorflow.summary.scalar
1,050
import tensorflow as tf for w, filter_size in enumerate(encoder.convolutions, 1): filter_ = get_variable('filter_{}'.format(w), [w, encoder.embedding_size, filter_size]) if w > 1: right = (w - 1) // 2 left = (w - 1) - right pad_right = tf.tile(pad, [1, right, 1]) pad_left = tf.tile(pad, [1, left, 1]) inputs_ = tf.concat([pad_left, encoder_inputs_, pad_right], axis=1) else: inputs_ = encoder_inputs_ inputs_ = tf.nn.convolution(inputs_, filter=filter_, padding='VALID') inputs.append(inputs_) encoder_inputs_ = tf.concat(inputs, axis=2) # if encoder.convolution_activation.lower() == 'relu': encoder_inputs_ = tf.nn.relu(encoder_inputs_) if encoder.maxout_stride: if encoder.binary: raise NotImplementedError stride = encoder.maxout_stride k = tf.to_int32(tf.ceil(time_steps / stride) * stride) - time_steps # TODO: simpler pad = tf.zeros([batch_size, k, tf.shape(encoder_inputs_)[2]]) encoder_inputs_ = tf.concat([encoder_inputs_, pad], axis=1) encoder_inputs_ = tf.nn.pool(encoder_inputs_, window_shape=[stride], pooling_type='MAX', padding='VALID', strides=[stride]) encoder_input_length_ = tf.to_int32(tf.ceil(encoder_input_length_ / stride))
tensorflow.concat
1,051
import tensorflow as tf tf.less_equal(x0, max_x) & tf.greater_equal(x0, 0)) x1_valid = tf.to_float( tf.less_equal(x1, max_x) & tf.greater_equal(x1, 0)) y0_valid = tf.to_float( tf.less_equal(y0, max_y) & tf.greater_equal(y0, 0)) y1_valid = tf.to_float( tf.less_equal(y1, max_y) & tf.greater_equal(y1, 0)) z0_valid = tf.to_float( tf.less_equal(z0, max_z) & tf.greater_equal(z0, 0)) z1_valid = tf.to_float( tf.less_equal(z1, max_z) & tf.greater_equal(z1, 0)) w_z0_y0_x0 = tf.expand_dims(((x1_f - x) * (y1_f - y) * (z1_f - z) * x1_valid * y1_valid * z1_valid), 1) w_z0_y0_x1 = tf.expand_dims(((x - x0_f) * (y1_f - y) * (z1_f - z) * x0_valid * y1_valid * z1_valid), 1) w_z0_y1_x0 = tf.expand_dims(((x1_f - x) * (y - y0_f) * (z1_f - z) * x1_valid * y0_valid * z1_valid),
tensorflow.less_equal
1,052
import tensorflow as tf inputs = tf.transpose(inputs, [0, 3, 1, 2]) inputs = conv2d_fixed_padding( inputs=inputs, filters=self.num_filters, kernel_size=self.kernel_size, strides=self.conv_stride, data_format=self.data_format) inputs = tf.identity(inputs, 'initial_conv') # We do not include batch normalization or activation functions in V2 # for the initial conv1 because the first ResNet unit will perform these # for both the shortcut and non-shortcut paths as part of the first
tensorflow.identity
1,053
import tensorflow as tf detections_in_img = self.drawer.draw_boxes_with_categories_and_scores( img_batch=img, boxes=outputs[0], scores=outputs[1], labels=outputs[2], method=1) tf.summary.image('Compare/final_detection_gpu:%d' % i, detections_in_img) loss_dict = outputs[-1] total_loss_dict, total_losses = self.loss_dict(loss_dict, num_gpu) if i == num_gpu - 1: regularization_losses = tf.get_collection( tf.GraphKeys.REGULARIZATION_LOSSES) # weight_decay_loss = tf.add_n(slim.losses.get_regularization_losses()) total_losses = total_losses + tf.add_n(regularization_losses) tf.get_variable_scope().reuse_variables() grads = optimizer.compute_gradients(total_losses) if cfgs.GRADIENT_CLIPPING_BY_NORM is not None: grads = slim.learning.clip_gradient_norms(grads, cfgs.GRADIENT_CLIPPING_BY_NORM) tower_grads.append(grads) self.log_printer(r3det_gwd, optimizer, global_step, tower_grads, total_loss_dict, num_gpu, graph)
tensorflow.get_collection
1,054
import tensorflow as tf output_line = "\t".join( str(class_probability) for class_probability in probabilities) + "\n" writer.write(output_line) num_written_lines += 1 assert num_written_lines == num_actual_predict_examples if __name__ == "__main__": flags.mark_flag_as_required("data_dir") flags.mark_flag_as_required("task_name") flags.mark_flag_as_required("model_file") flags.mark_flag_as_required("vocab_file") flags.mark_flag_as_required("output_dir") tf.app.run()
tensorflow.app.run
1,055
import tensorflow as tf # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. if is_training: d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files)) if use_hvd: d = d.shard(hvd.size(), hvd.rank()) #TODO only for Horovod, shard to mimic single_GPU = False
tensorflow.constant
1,056
import tensorflow as tf saver0_ckpt = os.path.join(test_dir, "saver0.ckpt") with self.test_session(graph=tf.Graph()) as sess: # Creates an inference graph. # Hidden 1 images = tf.constant(1.2, tf.float32, shape=[100, 28]) with tf.name_scope("hidden1"): weights = tf.Variable( tf.truncated_normal([28, 128], stddev=1.0 / math.sqrt(float(28))), name="weights") biases = tf.Variable(tf.zeros([128]), name="biases") hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases) # Hidden 2 with tf.name_scope("hidden2"): weights = tf.Variable( tf.truncated_normal([128, 32], stddev=1.0 / math.sqrt(float(128))), name="weights") biases = tf.Variable(tf.zeros([32]), name="biases") hidden2 = tf.nn.relu(tf.matmul(hidden1, weights) + biases) # Linear with tf.name_scope("softmax_linear"):
tensorflow.matmul
1,057
import tensorflow as tf return total_loss, per_example_loss, logits def get_qa_outputs(FLAGS, features, is_training): """Loss for downstream span-extraction QA tasks such as SQuAD.""" inp = tf.transpose(features["input_ids"], [1, 0]) seg_id = tf.transpose(features["segment_ids"], [1, 0]) inp_mask = tf.transpose(features["input_mask"], [1, 0]) cls_index = tf.reshape(features["cls_index"], [-1]) seq_len = tf.shape(inp)[0]
tensorflow.transpose
1,058
import tensorflow as tf # Inverse of a softplus function, so that the value of the standard deviation # will be equal to what the user specifies, but we can still enforce positivity # by wrapping the standard deviation in the softplus function. # standard_dev = tf.log(tf.exp(standard_dev) - 1.0) * tf.ones(shape) # it's important to initialize variances with care, otherwise the model takes too long to converge sigma_min = 1-1/10 sigma_max = 1+1/10 rho_max_init = tf.log(tf.exp(sigma_max) - 1.0) rho_min_init = tf.log(tf.exp(sigma_min) - 1.0) std_init = tf.random_uniform_initializer(rho_min_init, rho_max_init) # Initialize the mean mean = tf.get_variable(name + "_mean", shape, dtype=dtype) # Initialize the standard deviation pre_sigma = tf.get_variable(name + "_standard_deviation", shape, initializer=std_init, dtype=dtype) standard_deviation = tf.nn.softplus(pre_sigma) + 1e-5 # The famous reparametrization formula for the factorized Gaussian noise = tf.random_normal([num_samples] + shape, 0.0, 1.0, dtype) weights = mean + standard_deviation * noise return weights, mean, standard_deviation, pre_sigma, noise
tensorflow.get_variable
1,059
import tensorflow as tf activation=modeling.get_activation(bert_config.hidden_act), kernel_initializer=modeling.create_initializer( bert_config.initializer_range)) input_tensor = modeling.layer_norm(input_tensor) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. output_bias = tf.get_variable( "output_bias", shape=[bert_config.vocab_size], initializer=tf.zeros_initializer()) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) label_ids = tf.reshape(label_ids, [-1]) label_weights = tf.reshape(label_weights, [-1]) one_hot_labels = tf.one_hot( label_ids, depth=bert_config.vocab_size, dtype=tf.float32) # The `positions` tensor might be zero-padded (if the sequence is too # short to have the maximum number of predictions). The `label_weights` # tensor has a value of 1.0 for every real prediction and 0.0 for the
tensorflow.nn.bias_add
1,060
import tensorflow as tf filter_regex = get_vocab_newline_characters_regex(x.dtype, file_format) if vocab_ordering_type == _VocabOrderingType.WEIGHTED_MUTUAL_INFORMATION: labels = tf.reshape(labels, [-1]) reduced_batch = tf_utils.reduce_batch_weighted_cooccurrences( x, labels, weights, filter_regex=filter_regex) return [ reduced_batch.unique_x, reduced_batch.summed_weights_per_x, reduced_batch.summed_positive_per_x_and_y, reduced_batch.counts_per_x ] elif vocab_ordering_type == _VocabOrderingType.MUTUAL_INFORMATION: labels = tf.reshape(labels, [-1]) reduced_batch = tf_utils.reduce_batch_weighted_cooccurrences( x, labels, weights, filter_regex=filter_regex) return [ reduced_batch.unique_x, reduced_batch.summed_positive_per_x_and_y, reduced_batch.counts_per_x ] elif vocab_ordering_type == _VocabOrderingType.WEIGHTED_FREQUENCY: reduced_batch = tf_utils.reduce_batch_weighted_counts( x, weights, filter_regex=filter_regex)
tensorflow.reshape
1,061
import tensorflow as tf Returns: cost -- cost function """ L= len(activation) #number of layers m = Y.shape[1] #number of training examples last = activation[L-1] labels= tf.transpose(Y) if last == 'sigmoid' or last == 'softmax': #use cross entropy loss function logits= tf.transpose(betan*zn[1]) cost = tf.reduce_mean(tf.losses.sigmoid_cross_entropy(logits = logits, multi_class_labels=labels)) elif last == 'esp' or last == 'relu': #use minimum squared error (L2 loss) out = tf.transpose(zn[0]) cost = tf.reduce_mean(tf.squared_difference(out, labels))/2 return cost #------------Hessian------------------- def flatten(tensor): '''
tensorflow.transpose
1,062
import tensorflow as tf image_width = static_shape.get_width(image_shape) if image_height is None or image_width is None: shape_assert = tf.Assert( tf.logical_and(tf.greater_equal(tf.shape(image_tensor)[1], min_dim), tf.greater_equal(tf.shape(image_tensor)[2], min_dim)), ['image size must be >= {} in both height and width.'.format(min_dim)]) with tf.control_dependencies([shape_assert]): return tf.identity(image_tensor)
tensorflow.shape
1,063
import tensorflow as tf @registry.register_model class DenseBitwiseCategoricalPolicy(PolicyBase): """Dense network with bitwise input and categorical output.""" def body(self, features): observations = features["inputs"] flat_x = tf.layers.flatten(observations) with tf.variable_scope("dense_bitwise"): flat_x = discretization.int_to_bit_embed(flat_x, 8, 32) x = tf.layers.dense(flat_x, 256, activation=tf.nn.relu) x = tf.layers.dense(flat_x, 128, activation=tf.nn.relu) logits = tf.layers.dense(x, self.hparams.problem.num_actions) value = tf.layers.dense(x, 1)[..., 0] return {"target_policy": logits, "target_value": value} @registry.register_model class RandomPolicy(PolicyBase): """Random policy with categorical output.""" def body(self, features): observations = features["inputs"]
tensorflow.layers.dense
1,064
from tensorflow.python.ops import math_ops mean_distance: A tensor representing the current mean, the value of `total` divided by `count`. update_op: An operation that increments the `total` and `count` variables appropriately. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. """ predictions, labels = tensor_util.remove_squeezable_dimensions( predictions, labels) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) radial_diffs = math_ops.mul(predictions, labels) radial_diffs = math_ops.reduce_sum(radial_diffs, reduction_indices=[dim,], keep_dims=True) mean_distance, update_op = streaming_mean(radial_diffs, weights, None, None, name or 'mean_cosine_distance') mean_distance = math_ops.sub(1.0, mean_distance) update_op = math_ops.sub(1.0, update_op) if metrics_collections: ops.add_to_collections(metrics_collections, mean_distance)
tensorflow.python.ops.math_ops.mul
1,065
from tensorflow.python.framework import ops clipped_grads.append(None) elif isinstance(grad, ops.IndexedSlices): clipped_grads.append( ops.IndexedSlices(grad.values * factor, grad.indices, grad.dense_shape)) else:
tensorflow.python.framework.ops.IndexedSlices
1,066
import tensorflow as tf num_sam = tools.shape(batch)[0] index = tf.range(num_sam) tgt1 = tf.slice(batch, [0, 1], [num_sam, 1]) pred1 = tf.slice(batch, [0, 0], [num_sam, 1]) def uniform():
tensorflow.slice
1,067
import tensorflow as tf scope.reuse_variables() truthoutput_z_ = lrelu(linear(tgtimg_z, self.gf_dim*8*s_h16*s_w16, 'd_h0_lin')) truthoutput_h0 = tf.reshape(truthoutput_z_, [-1, s_h16, s_w16, self.gf_dim * 8]) truthoutput_h1 = lrelu(deconv2d(tf.concat([truthoutput_h0, tgtctx_h3], 3), [self.batch_size, s_h8, s_w8, self.gf_dim*4], name='d_h1')) truthoutput_h2 = lrelu(deconv2d(tf.concat([truthoutput_h1, tgtctx_h2], 3),
tensorflow.reshape
1,068
import tensorflow as tf out[1].numpy()) def testReducer(self): with tf.device(self._test_device): batch_size = 3 size = 10
tensorflow.device
1,069
import tensorflow as tf with tf.variable_scope("Context_to_Query_Attention_Layer"): C = tf.tile(tf.expand_dims(self.c_embed_encoding, 2), [1, 1, self.max_q_len, 1]) Q = tf.tile(tf.expand_dims(self.q_embed_encoding, 1), [1, self.max_p_len, 1, 1]) S = trilinear([C, Q, C * Q], input_keep_prob=1.0 - self.dropout) mask_q = tf.expand_dims(self.q_mask, 1) S_ = tf.nn.softmax(mask_logits(S, mask=mask_q)) mask_c = tf.expand_dims(self.c_mask, 2) S_T = tf.transpose(tf.nn.softmax(mask_logits(S, mask=mask_c), dim=1), (0, 2, 1)) self.c2q = tf.matmul(S_, self.q_embed_encoding) self.q2c = tf.matmul(tf.matmul(S_, S_T), self.c_embed_encoding) self.attention_outputs = [self.c_embed_encoding, self.c2q, self.c_embed_encoding * self.c2q, self.c_embed_encoding * self.q2c]
tensorflow.expand_dims
1,070
import tensorflow as tf # Add regularization loss reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) reg_loss = reg_decay * tf.add_n(reg_losses) self._mark_for_monitoring('reg_loss', reg_loss) # Add loss from auxiliary logits aux_loss = tf.constant(0, dtype=tf.float32) for aux_logits in aux_logits_list: log_probs = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=aux_logits, labels=classes) aux_loss += aux_loss_mul * tf.reduce_mean(log_probs) total_loss = loss + reg_loss + aux_loss return total_loss def _add_global_avg_pool(self, X, in_w, in_h, in_ch): X = tf.nn.relu(X) X = tf.reduce_mean(X, (1, 2)) X = tf.reshape(X, (-1, in_ch)) # Sanity shape check
tensorflow.reduce_mean
1,071
import tensorflow as tf ksize_list, strides, 'VALID', is_training, data_format=data_format, no_activation=no_activation) if use_var: y = sbnet_module.sparse_scatter_var( q, indices.bin_counts, indices.active_block_indices, x, dynamic_bsize=tf.constant(block_params.bsize_out, dtype=tf.int32), dynamic_bstride=tf.constant(block_params.bsize_out, dtype=tf.int32), dynamic_boffset=tf.constant([0, 0], dtype=tf.int32), add=True, transpose=transpose) else: y = sbnet_module.sparse_scatter( q, indices.bin_counts, indices.active_block_indices, x, dynamic_bsize=tf.constant(block_params.bsize_out, dtype=tf.int32), dynamic_bstride=tf.constant(block_params.bsize_out, dtype=tf.int32), dynamic_boffset=tf.constant([0, 0], dtype=tf.int32), add=True,
tensorflow.constant
1,072
import tensorflow as tf return [train]+ops def mgpu_predict(*xs): gpu_ops = [] xs = (tf.split(x, n_gpu, 0) for x in xs) for i, xs in enumerate(zip(*xs)): with tf.device(assign_to_gpu(i, "/gpu:0")), tf.variable_scope(tf.get_variable_scope(), reuse=True): clf_logits, clf_losses, lm_losses = model(*xs, train=False, reuse=True) gpu_ops.append([clf_logits, clf_losses, lm_losses]) ops = [tf.concat(op, 0) for op in zip(*gpu_ops)] return ops def transform_roc(X1, X2, X3): n_batch = len(X1) xmb = np.zeros((n_batch, 2, n_ctx, 2), dtype=np.int32) mmb = np.zeros((n_batch, 2, n_ctx), dtype=np.float32) start = encoder['_start_'] delimiter = encoder['_delimiter_']
tensorflow.concat
1,073
import tensorflow as tf import tensorflow as tf #add_layer 函数里面所有的with都是为了tensorboard添加上去的 def add_layer(inputs, in_size, out_size, activation_function=None,nameScope="layer"): # add one more layer and return the output of this layer with tf.name_scope(nameScope): with tf.name_scope('weights'): Weights = tf.Variable(tf.random_normal([in_size, out_size]), name='W') with tf.name_scope('biases'): biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, name='b') with tf.name_scope('Wx_plus_b'): Wx_plus_b = tf.add(tf.matmul(inputs, Weights), biases) if activation_function is None: outputs = Wx_plus_b
tensorflow.random_normal
1,074
import tensorflow as tf if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) mask = tf.equal(mask, tf.ones_like(mask)) hidden_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer input_size = query.get_shape().as_list()[-1]
tensorflow.ones_like
1,075
import tensorflow as tf 'Number of mini-batches to train on. (default: %(default)d)') tf.app.flags.DEFINE_integer('log-frequency', 10, 'Number of steps between logging results to the console and saving summaries (default: %(default)d)') tf.app.flags.DEFINE_integer('save-model', 1000, 'Number of steps between model saves (default: %(default)d)') # Optimisation hyperparameters tf.app.flags.DEFINE_integer('batch-size', 256, 'Number of examples per mini-batch (default: %(default)d)') tf.app.flags.DEFINE_float('learning-rate', 1e-4, 'Learning rate (default: %(default)d)') tf.app.flags.DEFINE_integer('img-width', 32, 'Image width (default: %(default)d)') tf.app.flags.DEFINE_integer('img-height', 32, 'Image height (default: %(default)d)') tf.app.flags.DEFINE_integer('img-channels', 3, 'Image channels (default: %(default)d)') tf.app.flags.DEFINE_integer('num-classes', 10, 'Number of classes (default: %(default)d)') tf.app.flags.DEFINE_string('log-dir', '{cwd}/logs/'.format(cwd=os.getcwd()), 'Directory where to write event logs and checkpoint. (default: %(default)s)') run_log_dir = os.path.join(FLAGS.log_dir, 'exp_BN_bs_{bs}_lr_{lr}_aug_flip_brightness'.format(bs=FLAGS.batch_size, lr=FLAGS.learning_rate)) def weight_variable(shape):
tensorflow.app.flags.DEFINE_integer
1,076
import tensorflow as tf stn = tf.reshape(tf.transpose(stn, [2, 0, 1]), [3, -1]) # 3 x (bx2) coor = tf.reshape(tf.matmul(xys, stn), [WARP_TARGET_SIZE, WARP_TARGET_SIZE, -1, 2]) coor = tf.transpose(coor, [2, 0, 1, 3], 'sampled_coords') # b h w 2 sampled = ImageSample('warp', [image, coor], borderMode='constant') return sampled with argscope([Conv2D, FullyConnected], nl=tf.nn.relu): with tf.variable_scope('STN1'): sampled1 = get_stn(image) with tf.variable_scope('STN2'): sampled2 = get_stn(image) # For visualization in tensorboard with tf.name_scope('visualization'): padded1 = tf.pad(sampled1, [[0, 0], [HALF_DIFF, HALF_DIFF], [HALF_DIFF, HALF_DIFF], [0, 0]]) padded2 = tf.pad(sampled2, [[0, 0], [HALF_DIFF, HALF_DIFF], [HALF_DIFF, HALF_DIFF], [0, 0]]) img_orig = tf.concat([image[:, :, :, 0], image[:, :, :, 1]], 1) # b x 2h x w transform1 = tf.concat([padded1[:, :, :, 0], padded1[:, :, :, 1]], 1) transform2 = tf.concat([padded2[:, :, :, 0], padded2[:, :, :, 1]], 1)
tensorflow.variable_scope
1,077
import tensorflow as tf import numpy as np FLAGS = tf.app.flags.FLAGS # Basic model parameters. tf.app.flags.DEFINE_float('learning_rate', 0.1, 'Initial learning rate.') tf.app.flags.DEFINE_string('pm', '66661', 'pooling scheme across scales. Each number specifies the number of scales remaining at each layer. The first number has to be the same as used in --num_scales.') tf.app.flags.DEFINE_integer('conv_kernel', 5, 'Size of convolutional kernel') tf.app.flags.DEFINE_integer('pool_kernel', 3, 'Size of spatial pooling kernel')
tensorflow.app.flags.DEFINE_float
1,078
import tensorflow as tf init_scale = .01 m_init, v_init = tf.nn.moments(x, [0, 1, 2]) scale_init = init_scale / tf.sqrt(v_init + 1e-10) with tf.control_dependencies([g.assign(g * scale_init), b.assign_add(-m_init * scale_init)]): x = tf.reshape(scale_init, [1, 1, 1, num_filters]) * (x - tf.reshape(m_init, [1, 1, 1, num_filters])) else: V = maybe_avg(V)
tensorflow.reshape
1,079
from tensorflow.python.framework import ops @ops.RegisterShape("MaxPoolWithArgmax")
tensorflow.python.framework.ops.RegisterShape
1,080
from tensorflow.contrib.learn.python.learn.estimators import test_data dnn_hidden_units=(3, 3), dnn_optimizer=adagrad.AdagradOptimizer(learning_rate=0.1)) input_fn = test_data.iris_input_logistic_fn metrics = classifier.fit(input_fn=input_fn, steps=_ITERS).evaluate( input_fn=input_fn, steps=100) self._assertSingleClassMetrics(metrics) def benchmarkMultiClass(self): iris = base.load_iris() cont_feature = feature_column.real_valued_column('feature', dimension=4) bucketized_feature = feature_column.bucketized_column( cont_feature, test_data.get_quantile_based_buckets(iris.data, 10)) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( n_classes=3, linear_feature_columns=(bucketized_feature,), dnn_feature_columns=(cont_feature,), dnn_hidden_units=(3, 3)) input_fn = test_data.iris_input_multiclass_fn metrics = classifier.fit(input_fn=input_fn, steps=_ITERS).evaluate( input_fn=input_fn, steps=100) self._assertCommonMetrics(metrics)
tensorflow.contrib.learn.python.learn.estimators.test_data.get_quantile_based_buckets
1,081
from tensorflow.python.ops import array_ops ValueError: If `weights` is not `None` and its shape doesn't match `values`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. """ with variable_scope.variable_scope(name, 'mean', [values, weights]): total = _create_local('total_tensor', shape=values.get_shape()) count = _create_local('count_tensor', shape=values.get_shape()) num_values = array_ops.ones_like(values) if weights is not None: weights = math_ops.to_float(weights) values = math_ops.mul(values, weights) num_values = math_ops.mul(num_values, weights) total_compute_op = state_ops.assign_add(total, values) count_compute_op = state_ops.assign_add(count, num_values)
tensorflow.python.ops.array_ops.ones_like
1,082
import tensorflow as tf active_mining_triplet_ratio = ( tf.cast(num_active_mining_triplets, dtype=tf.float32) / num_total_triplets) active_loss = ( loss / tf.math.maximum(1e-12, tf.stop_gradient(active_triplet_ratio))) active_mining_loss = ( mining_loss / tf.math.maximum(1e-12, tf.stop_gradient(active_mining_triplet_ratio))) tag = 'SemiHardNegative' if use_semi_hard else 'HardNegative' summaries = { # Summaries related to triplet loss computation. 'triplet_loss/Anchor/%s/Distance/Mean' % tag: tf.math.reduce_mean(negative_distances), 'triplet_loss/%s/Loss/All' % tag: loss, 'triplet_loss/%s/Loss/Active' % tag: active_loss, 'triplet_loss/%s/ActiveTripletNum' % tag: num_active_triplets, 'triplet_loss/%s/ActiveTripletRatio' % tag: active_triplet_ratio, # Summaries related to triplet mining. 'triplet_mining/Anchor/%s/Distance/Mean' % tag: tf.math.reduce_mean(negative_mining_distances), 'triplet_mining/%s/Loss/All' % tag:
tensorflow.math.reduce_mean
1,083
import tensorflow as tf self.rgb_channels = rgb_channels self.on_gpu = on_gpu def post_init(self): import tensorflow as tf from i3d_cores.i3d import InceptionI3d import os os.environ['CUDA_VISIBLE_DEVICES'] = str(get_first_available_gpu()) with tf.Graph().as_default(): self.rgb_images_placeholder = tf.placeholder(dtype=tf.float32, shape=(None, self.num_frame_per_clib, self.frame_size_x, self.frame_size_y, self.rgb_channels)) is_training = False with tf.variable_scope('RGB'): self.feature, _ = InceptionI3d( num_classes=self.num_classes, spatial_squeeze=True,
tensorflow.placeholder
1,084
import tensorflow as tf out=tf.matmul(l3, self.w4)+self.b4 return out def test_inference(self,images): images=tf.cast(images,tf.float32)/255.0 l1 = tf.matmul(images, self.w1)+self.b1 l1=tf.nn.relu(l1) l2 = tf.matmul(l1, self.w2)+self.b2 l2=tf.nn.relu(l2) l3=tf.matmul(l2, self.w3)+self.b3 l3=tf.nn.relu(l3) out=tf.matmul(l3, self.w4)+self.b4 return out def valid_inference(self,images): images=tf.cast(images,tf.float32)/255.0
tensorflow.nn.relu
1,085
import tensorflow as tf "input_ids": tf.constant( all_input_ids, shape=[num_examples, seq_length], dtype=tf.int32), "input_mask": tf.constant( all_input_mask, shape=[num_examples, seq_length], dtype=tf.int32), "segment_ids": tf.constant( all_segment_ids, shape=[num_examples, seq_length], dtype=tf.int32), "label_ids": tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32), }) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder) return d return input_fn # This function is not used by this file but is still used by the Colab and # people who depend on it.
tensorflow.constant
1,086
import tensorflow as tf global_step = slim.get_or_create_global_step() lr = self.warmup_lr(cfgs.LR, global_step, cfgs.WARM_SETP, num_gpu*cfgs.BATCH_SIZE) tf.summary.scalar('lr', lr) optimizer = tf.train.MomentumOptimizer(lr, momentum=cfgs.MOMENTUM) fcos = build_whole_network_batch_quad.DetectionNetworkFCOS(cfgs=self.cfgs, is_training=True) with tf.name_scope('get_batch'): if cfgs.IMAGE_PYRAMID: shortside_len_list = tf.constant(cfgs.IMG_SHORT_SIDE_LEN) shortside_len = tf.random_shuffle(shortside_len_list)[0] else: shortside_len = cfgs.IMG_SHORT_SIDE_LEN img_name_batch, img_batch, gtboxes_and_label_batch, num_objects_batch, img_h_batch, img_w_batch = \ self.reader.next_batch(dataset_name=cfgs.DATASET_NAME, batch_size=cfgs.BATCH_SIZE * num_gpu, shortside_len=shortside_len, is_training=True)
tensorflow.random_shuffle
1,087
import tensorflow as tf var = var[:FLAGS.visualiza_max] var = tf.concat(tf.unstack(var), axis=0) var = tf.expand_dims(var, dim=0) color_s = tf.summary.image(name, var[..., :3], max_outputs=FLAGS.visualiza_max) var = tf.expand_dims(var[..., 3], dim=3) bw_s = tf.summary.image('depth_' + name, var, max_outputs=FLAGS.visualiza_max) return tf.summary.merge([color_s, bw_s]) # TRAINING PROGRESS EVENTS def _on_training_start(self, sess): # Writers and savers self.summary_writer = tf.summary.FileWriter(FLAGS.logdir, sess.graph) self.saver = tf.train.Saver() self._build_embedding_saver(sess) self._restore_model(sess) # Loss summaries self._build_summaries() self.epoch_stats = get_stats_template() self.stats = Bunch( epoch_accuracy=[], epoch_reconstructions=[], permutation=None ) # if FLAGS.dev:
tensorflow.summary.FileWriter
1,088
import tensorflow as tf tf.summary.scalar('Learning rate', m.lr) latest_ckpt = tf.train.latest_checkpoint(FLAGS.save_path) with train_graph.as_default(): sv = tf.train.Supervisor(logdir=FLAGS.save_path) config_proto = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True) with sv.managed_session(config=config_proto) as train_sess: #with tf.Session(config=config_proto) as train_sess: train_sess.run(tf.global_variables_initializer()) for i in range(config.max_max_epoch): lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.) m.assign_lr(train_sess, config.learning_rate * lr_decay) train_perplexity = run_epoch(train_sess, m, #eval_op=m.train_op, verbose=True) print('Epoch {} Train Perplexity: {:.3f}'.format(i + 1, train_perplexity)) if i % 5 == 0: sv.saver.save(train_sess, FLAGS.save_path,
tensorflow.global_variables_initializer
1,089
import tensorflow as tf # Calculate loss, which includes softmax cross entropy and L2 regularization. cross_entropy = tf.cond(n_positives > 0., lambda: tf.losses.sparse_softmax_cross_entropy(labels=glabels, logits=cls_pred), lambda: 0.) #cross_entropy = tf.losses.sparse_softmax_cross_entropy(labels=glabels, logits=cls_pred) # Create a tensor named cross_entropy for logging purposes. tf.identity(cross_entropy, name='cross_entropy_loss') tf.summary.scalar('cross_entropy_loss', cross_entropy) loc_loss = tf.cond(n_positives > 0., lambda: modified_smooth_l1(location_pred, tf.stop_gradient(gtargets), sigma=1.), lambda: tf.zeros_like(location_pred)) #loc_loss = modified_smooth_l1(location_pred, tf.stop_gradient(gtargets)) loc_loss = tf.reduce_mean(tf.reduce_sum(loc_loss, axis=-1)) loc_loss = tf.identity(loc_loss, name='location_loss') tf.summary.scalar('location_loss', loc_loss) tf.losses.add_loss(loc_loss) # Add weight decay to the loss. We exclude the batch norm variables because # doing so leads to a small improvement in accuracy.
tensorflow.zeros_like
1,090
from tensorflow.python.ops import array_ops def _move_tensors(tensors, device): """Moves a list of tensors to a device by concatenating/splitting them.""" # Reset the device setting to avoid weird interactions with device merging # logic. with ops.device(None): if all(tensor.shape == tensor_shape.scalar() for tensor in tensors): with ops.device(tensors[0].device): values = array_ops.stack(tensors) with ops.device(device): return array_ops.unstack(values) else: with ops.device(tensors[0].device): sizes = array_ops.stack( [array_ops.shape(tensor)[0] for tensor in tensors]) values = array_ops.concat(tensors, axis=0) with ops.device(device): sizes = array_ops.unstack(sizes) return list(array_ops.split(values, sizes, axis=0))
tensorflow.python.ops.array_ops.unstack
1,091
import tensorflow as tf label=tf.cast(features['label'],tf.int32) image=tf.reshape(image,[4096,1])
tensorflow.reshape
1,092
import tensorflow as tf loss=work.softmax_loss(inf,batch_label) opti=work.optimer(loss,learnrate) test_image_batch,test_label_batch=get_test_batch(test_image,test_label,testnum) test_inf=work.test_inference(test_image_batch) test_labels=tf.one_hot(test_label_batch,classnum) test_pre = tf.reshape(test_inf, [testnum, classnum]) correct_prediction=tf.equal(tf.argmax(test_inf,1),tf.argmax(test_labels,1)) accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) test_pre = tf.argmax(test_pre, 1)
tensorflow.one_hot
1,093
import tensorflow as tf feature = InputFeatures( input_ids=input_ids_new, input_mask=input_mask, segment_ids=segment_ids, masked_lm_positions=masked_lm_positions, masked_lm_ids=masked_lm_labels) features.append(feature) i += mask_count return features def parse_result(result, all_tokens, output_file=None): with tf.gfile.GFile(output_file, "w") as writer: tf.logging.info("***** Predict results *****") i = 0 sentences = [] for word_loss in result: # start of a sentence if all_tokens[i] == "[CLS]": sentence = {} tokens = [] sentence_loss = 0.0 word_count_per_sent = 0 i += 1 # add token tokens.append({"token": tokenization.printable_text(all_tokens[i]),
tensorflow.logging.info
1,094
import tensorflow as tf print('Epoch {} Train Perplexity: {:.3f}'.format(i + 1, train_perplexity)) if i % 5 == 0: sv.saver.save(train_sess, FLAGS.save_path, global_step=sv.global_step) if __name__ == '__main__': tf.app.run()
tensorflow.app.run
1,095
import tensorflow as tf estimator=entropy_bottleneck) status = checkpoint.restore(tf.train.latest_checkpoint(ckpt_dir)) x = tf.convert_to_tensor(x_color, "float32") x_coori = tf.convert_to_tensor(x_coori, "float32") def loop_analysis(element): x = tf.expand_dims(element[0], 0) x_coori = tf.expand_dims(element[1], 0) y = analysis_transform(x_coori,x) return tf.squeeze(y,axis=0) element = [x,x_coori] ys = tf.map_fn(loop_analysis, element, dtype=tf.float32, parallel_iterations=1, back_prop=False)
tensorflow.expand_dims
1,096
import tensorflow as tf # Policy Gradient loss, with truncated importance sampling & bias correction value = strip(value, self.n_envs, self.n_steps, True) # check_shape([qret, value, rho_i, f_i], [[self.n_envs * self.n_steps]] * 4) # check_shape([rho, distribution_f, q_value], [[self.n_envs * self.n_steps, self.n_act]] * 2) # Truncated importance sampling adv = qret - value log_f = tf.log(f_i + eps) # [n_envs * n_steps] gain_f = log_f * tf.stop_gradient(adv * tf.minimum(self.correction_term, rho_i)) loss_f = -tf.reduce_mean(gain_f) # Bias correction for the truncation adv_bc = (q_value - tf.reshape(value, [self.n_envs * self.n_steps, 1])) # [n_envs * n_steps, n_act]
tensorflow.log
1,097
from tensorflow.python.framework import constant_op self._assert_sparse_tensor_equals(expected_out, sess.run(op)) def test_integer_sparse_input(self): """Tests mixed type sparse and dense inputs.""" op = sparse_feature_cross_op.sparse_feature_cross([ self._sparse_tensor([[11], [333, 5555]]), constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'], ['batch2-FC2-F1', 'batch2-FC2-F2']], dtypes.string), ]) expected_out = self._sparse_tensor( [['11_X_batch1-FC2-F1', '11_X_batch1-FC2-F2'], [
tensorflow.python.framework.constant_op.constant
1,098
import tensorflow as tf """ label_smoothing = 0. def inputs(self): return [tf.TensorSpec([None, self.image_shape, self.image_shape, 3], self.image_dtype, 'input'), tf.TensorSpec([None], tf.int32, 'label')] def build_graph(self, image, label): image = self.image_preprocess(image) assert self.data_format in ['NCHW', 'NHWC']
tensorflow.TensorSpec
1,099