seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf with tf.variable_scope('l1'): n_l1 = 700 # combine the action and states together in this way w1_s = tf.get_variable('w1_s', [self.s_dim, n_l1], initializer=init_w, trainable=trainable) w1_a = tf.get_variable('w1_a', [self.a_dim, n_l1], initializer=init_w, trainable=trainable) b1 = tf.get_variable('b1', [1, n_l1], initializer=init_b, trainable=trainable) net = tf.nn.relu(tf.matmul(s, w1_s) + tf.matmul(a, w1_a) + b1) with tf.variable_scope('l2'): net = tf.layers.dense(net, 20, activation=tf.nn.relu, kernel_initializer=init_w, bias_initializer=init_b, name='l2', trainable=trainable) with tf.variable_scope('q'): q = tf.layers.dense(net, 1, kernel_initializer=init_w, bias_initializer=init_b, trainable=trainable) # Q(s,a) return q def learn(self, s, a, r, s_, ISW): _, abs_td = self.sess.run([self.train_op, self.abs_td], feed_dict={S: s, self.a: a, R: r, S_: s_, self.ISWeights: ISW}) if self.t_replace_counter % self.t_replace_iter == 0:
tensorflow.layers.dense
11,500
import tensorflow as tf with tf.name_scope('accuracy') as scope: correct = tf.equal(tf.arg_max(logits,1), tf.arg_max(labels,1))
tensorflow.arg_max
11,501
import tensorflow as tf bias_init = tf.ones(bias_shape, dtype=self.dtype) setattr( self, 'mix_bias_%s' % layer, tf.get_variable( name='%s_mix_bias' % self.layer_name, dtype=self.dtype, trainable=True,
tensorflow.get_variable
11,502
import tensorflow as tf lstm=tf.nn.rnn_cell.LSTMCell(num_units=state_size, state_is_tuple=True) cell_drop=tf.contrib.rnn.DropoutWrapper(lstm,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob) elif cell_type == 'GRU': if activation == 'linear': gru=tf.nn.rnn_cell.GRUCell(state_size, activation = tf.identity) cell_drop=tf.contrib.rnn.DropoutWrapper(gru,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob) elif activation == 'relu': gru=tf.nn.rnn_cell.GRUCell(state_size, activation = tf.nn.relu) cell_drop=tf.contrib.rnn.DropoutWrapper(gru,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob) else: gru=tf.nn.rnn_cell.GRUCell(state_size) cell_drop=tf.contrib.rnn.DropoutWrapper(gru,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob) else: if activation == 'linear': cell_basic = tf.contrib.rnn.BasicRNNCell(state_size,activation=tf.identity) cell_drop=tf.contrib.rnn.DropoutWrapper(cell_basic,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob)
tensorflow.nn.rnn_cell.GRUCell
11,503
from tensorflow.python.ops import array_ops @property def beta(self): """Scale parameter.""" return self._beta def _batch_shape(self): return array_ops.broadcast_dynamic_shape( array_ops.shape(self.alpha), array_ops.shape(self.beta)) def _get_batch_shape(self): return array_ops.broadcast_static_shape( self.alpha.get_shape(), self.beta.get_shape()) def _event_shape(self):
tensorflow.python.ops.array_ops.shape
11,504
import tensorflow as tf dense_opt = utils.get_dense_optimizer(args.optimizer)(learning_rate=0.1) sok_saver = sok.Saver() restore_op = list() for i, embedding_layer in enumerate(sok_sparse_demo.embedding_layers): control_inputs = [restore_op[-1]] if restore_op else None with tf.control_dependencies(control_inputs): if args.restore_params: filepath = r"./embedding_variables" op = sok_saver.restore_from_file(embedding_layer.embedding_variable, filepath) else: op = sok_saver.load_embedding_values(embedding_layer.embedding_variable, init_tensors[i])
tensorflow.control_dependencies
11,505
import tensorflow as tf pred = tf.reshape(tensor=y_pred, shape=(batch_size, -1, num_of_joints)) heatmap_pred_list = tf.split(value=pred, num_or_size_splits=num_of_joints, axis=-1) gt = tf.reshape(tensor=target, shape=(batch_size, -1, num_of_joints)) heatmap_gt_list = tf.split(value=gt, num_or_size_splits=num_of_joints, axis=-1) loss = 0.0 for i in range(num_of_joints): heatmap_pred = tf.squeeze(heatmap_pred_list[i]) heatmap_gt = tf.squeeze(heatmap_gt_list[i]) loss += 0.5 * self.mse(y_true=heatmap_pred * target_weight[:, i], y_pred=heatmap_gt * target_weight[:, i]) return loss / num_of_joints
tensorflow.squeeze
11,506
import tensorflow as tf """ with tf.name_scope(name): source_samples -= tf.reduce_mean(source_samples, 0) target_samples -= tf.reduce_mean(target_samples, 0) source_samples = tf.nn.l2_normalize(source_samples, 1) target_samples = tf.nn.l2_normalize(target_samples, 1) source_cov = tf.matmul(tf.transpose(source_samples), source_samples) target_cov = tf.matmul(tf.transpose(target_samples), target_samples) corr_loss = tf.reduce_mean(tf.square(source_cov - target_cov)) * weight assert_op = tf.Assert(tf.is_finite(corr_loss), [corr_loss]) with tf.control_dependencies([assert_op]): tag = 'Correlation Loss' barrier = tf.no_op(tag) return corr_loss def maximum_mean_discrepancy(x, y,
tensorflow.is_finite
11,507
import tensorflow as tf width, height = tf.cast(width, tf.float32), tf.cast(height, tf.float32) pred_x, pred_y = pred_x * width / tf.cast(heatmap_size, tf.float32), pred_y * height / tf.cast(heatmap_size, tf.float32) if clip_at_zero: pred_x, pred_y = pred_x * tf.cast(pred_max>0, tf.float32), pred_y * tf.cast(pred_max>0, tf.float32) pred_x = pred_x * tf.cast(pred_max>0, tf.float32) + tf.cast(pred_max<=0, tf.float32) * (width / 2.) pred_y = pred_y * tf.cast(pred_max>0, tf.float32) + tf.cast(pred_max<=0, tf.float32) * (height / 2.) if config.PRED_DEBUG: pred_indices_ = tf.squeeze(pred_indices) image_ = tf.squeeze(image) * 255. pred_heatmap = tf.one_hot(pred_indices_, heatmap_size*heatmap_size, on_value=1., off_value=0., axis=-1, dtype=tf.float32)
tensorflow.cast
11,508
import tensorflow as tf generator_inputs = features real_data = labels gan_model = tf.contrib.gan.gan_model(generator_fn, discriminator_fn, real_data, generator_inputs) predictions = gan_model.generated_data
tensorflow.contrib.gan.gan_model
11,509
from tensorflow.python.ops import state_ops 'delta_mean_prediction') update_mean_prediction = state_ops.assign_add(mean_prediction, delta_mean_prediction) # prev_mean_prediction is E[x_A] in the update equation prev_mean_prediction = update_mean_prediction - delta_mean_prediction # batch_mean_label is E[y_B] in the update equation batch_mean_label = _safe_div( math_ops.reduce_sum(weighted_labels), batch_count, 'batch_mean_label') delta_mean_label = _safe_div((batch_mean_label - mean_label) * batch_count, update_count, 'delta_mean_label') update_mean_label = state_ops.assign_add(mean_label, delta_mean_label) # prev_mean_label is E[y_A] in the update equation prev_mean_label = update_mean_label - delta_mean_label unweighted_batch_coresiduals = ( (predictions - batch_mean_prediction) * (labels - batch_mean_label)) # batch_comoment is C_B in the update equation if weights is None: batch_comoment = math_ops.reduce_sum(unweighted_batch_coresiduals) else: batch_comoment = math_ops.reduce_sum(unweighted_batch_coresiduals *
tensorflow.python.ops.state_ops.assign_add
11,510
import tensorflow as tf self.replay_buffer = ReplayBuffer(self.buffer_size) self.beta_schedule = None if self.n_step: self.replay_N_buffer=NStepTransitionBuffer(self.buffer_size,self.n_step_length,self.gamma) with tf.variable_scope("input", reuse=False): # Create policy and target TF objects self.policy_tf = self.policy(self.sess, self.observation_space, self.action_space, **self.policy_kwargs) self.target_policy = self.policy(self.sess, self.observation_space, self.action_space, **self.policy_kwargs)
tensorflow.variable_scope
11,511
import tensorflow as tf def build_discriminator_logits_layer(self, params): """Builds flatten and logits layer internals using call. Args: params: dict, user passed parameters. Returns: Final logits tensor of discriminator. """ with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE): block_conv_size = params["discriminator_base_conv_blocks"][-1][-1][3] # Flatten final block conv tensor. block_conv_flat = self.flatten_layer( inputs=tf.zeros( shape=[1, 1, 1, block_conv_size], dtype=tf.float32 ) )
tensorflow.variable_scope
11,512
from tensorflow.python.ops import math_ops # update averages mean = moving_average("mean", log_norm, decay) sq_mean = moving_average("sq_mean", math_ops.square(log_norm), decay)
tensorflow.python.ops.math_ops.square
11,513
import tensorflow as tf tf.enable_resource_variables() for sample_size in [10, 25, 50, 100, 200]: hparams.n_samples = sample_size tf.reset_default_graph() with tf.Graph().as_default(): energy_fn, _, _ = l2hmc.get_scg_energy_fn()
tensorflow.reset_default_graph
11,514
import tensorflow as tf height = shape[1] width = shape[2] channels = shape[3] res = tf.reshape(input_, [batch_size, height, 1, width, 1, channels]) res = tf.concat( axis=2, values=[res, tf.zeros([batch_size, height, stride - 1, width, 1, channels])]) res = tf.concat(axis=4, values=[ res, tf.zeros([batch_size, height, stride, width, stride - 1, channels]) ]) res = tf.reshape(res, [batch_size, stride * height, stride * width, channels])
tensorflow.zeros
11,515
import tensorflow as tf end_label = tf.one_hot(self.end_label, tf.shape(self.logits2)[1], axis=1) if self.config.loss_type == 'cross_entropy': start_loss = tf.nn.softmax_cross_entropy_with_logits( logits=self.logits1, labels=start_label) end_loss = tf.nn.softmax_cross_entropy_with_logits( logits=self.logits2, labels=end_label) self.loss = tf.reduce_mean(start_loss + end_loss) else: start_loss = focal_loss(tf.nn.softmax(self.logits1, -1), start_label) end_loss = focal_loss(tf.nn.softmax(self.logits2, -1), end_label) self.loss = tf.reduce_mean(start_loss + end_loss) self.logger.info("loss type %s" % self.config.loss_type) self.all_params = tf.trainable_variables() if self.config.l2_norm is not None: self.logger.info("applying l2 loss") variables = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) l2_loss = tf.contrib.layers.apply_regularization(regularizer, variables) self.loss += l2_loss
tensorflow.reduce_mean
11,516
import tensorflow as tf def testBasicRNNSeq2Seq(self): with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)): inp = [tf.constant(0.5, shape=[2, 2])] * 2 dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3 cell = tf.nn.rnn_cell.OutputProjectionWrapper(
tensorflow.constant
11,517
import tensorflow as tf with tf.variable_scope('a'): actions = tf.layers.dense(net, self.a_dim, activation=tf.nn.tanh, kernel_initializer=init_w, bias_initializer=init_b, name='a', trainable=trainable) scaled_a = tf.multiply(actions, self.action_bound, name='scaled_a') # Scale output to -action_bound to action_bound return scaled_a def learn(self, s): # batch update
tensorflow.multiply
11,518
import tensorflow as tf surrogate_type=surrogate_type, positive_weights=1.0 + lambdas * (1.0 - precision_values), negative_weights=lambdas * precision_values) maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0 maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype) lambda_term = lambdas * (1.0 - precision_values) * label_priors * maybe_log2 per_anchor_loss = loss - lambda_term per_label_loss = delta * tf.reduce_sum(per_anchor_loss, 2) # Normalize the AUC such that a perfect score function will have AUC 1.0. # Because precision_range is discretized into num_anchors + 1 intervals # but only num_anchors terms are included in the Riemann sum, the # effective length of the integration interval is `delta` less than the # length of precision_range. scaled_loss = tf.div( per_label_loss, precision_range[1] - precision_range[0] - delta, name='AUC_Normalize') scaled_loss = tf.reshape(scaled_loss, original_shape) other_outputs = { 'lambdas': lambdas_variable, 'biases': biases, 'label_priors': label_priors, 'true_positives_lower_bound': true_positives_lower_bound(labels, logits, weights, surrogate_type),
tensorflow.div
11,519
import tensorflow as tf # data for self-attention rep_map_dp = dropout(rep_map, keep_prob, is_train) rep_dep_tensor_dp, _, _ = reduce_data_rep_max_len(rep_map_dp, dep_selection) rep_head_tensor_dp, _, _ = reduce_data_rep_max_len(rep_map_dp, head_selection) # mask generation dep_idxs = tf.tile(tf.expand_dims(dep_org_idx, 1), [1, sl_head, 1]) head_idxs = tf.tile(tf.expand_dims(head_org_idx, 2), [1, 1, sl_dep]) if direction is None: direct_mask = tf.not_equal(head_idxs, dep_idxs) # [bs, slh, sld] else: if direction == 'forward': direct_mask = tf.greater(head_idxs, dep_idxs) # [bs, slh, sld] else: direct_mask = tf.less(head_idxs, dep_idxs) # [bs, slh, sld] # [bs, slh, slh] rep_mask_tile = tf.logical_and(tf.expand_dims(rep_dep_mask, 1), tf.expand_dims(rep_head_mask, 2)) attn_mask = tf.logical_and(direct_mask, rep_mask_tile) # [bs, slh, sld]
tensorflow.not_equal
11,520
import tensorflow as tf if not (tf.gfile.Exists(lang1_filepath) and tf.gfile.Exists(lang2_filepath)):
tensorflow.gfile.Exists
11,521
import tensorflow as tf # the same parameters without a lock. def testParallelUpdateWithoutLocking(self): with self.test_session() as sess: ones_t = tf.fill([1024, 1024], 1.0) p = tf.Variable(tf.zeros([1024, 1024])) adds = [tf.assign_add(p, ones_t, use_locking=False) for _ in range(20)] tf.initialize_all_variables().run() def run_add(add_op): sess.run(add_op) threads = [self.checkedThread(target=run_add, args=(add_op,)) for add_op in adds] for t in threads:
tensorflow.initialize_all_variables
11,522
import tensorflow as tf ValueError: If the expected shape doesn't match the actual shape. """ if name is None: name = tensor.name expected_rank_dict = {} if isinstance(expected_rank, six.integer_types): expected_rank_dict[expected_rank] = True else: for x in expected_rank: expected_rank_dict[x] = True actual_rank = tensor.shape.ndims if actual_rank not in expected_rank_dict: scope_name = tf.get_variable_scope().name raise ValueError( "For the tensor `%s` in scope `%s`, the actual rank " "`%d` (shape = %s) is not equal to the expected rank `%s`" % (name, scope_name, actual_rank, str(tensor.shape), str(expected_rank))) def gather_indexes(sequence_tensor, positions): """Gathers the vectors at the specific positions over a minibatch.""" sequence_shape = get_shape_list(sequence_tensor, expected_rank=3) batch_size = sequence_shape[0] seq_length = sequence_shape[1] width = sequence_shape[2] flat_offsets = tf.reshape(
tensorflow.get_variable_scope
11,523
import tensorflow as tf self.state = tf.placeholder(tf.float32, [None,400], name="state") self.target = tf.placeholder(tf.float32,[None,1], name="target") self.a_his = tf.placeholder(tf.float32, [None, num_action], name="action_hist")
tensorflow.placeholder
11,524
import tensorflow as tf srcimg_h2 = lrelu(conv2d(srcimg_h1, self.df_dim*4, name='h2_conv')) srcimg_h3 = lrelu(conv2d(srcimg_h2, self.df_dim*8, name='h3_conv')) print(srcimg_h3.get_shape()) srcimg_h4 = lrelu(linear(tf.reshape(srcimg_h3, [self.batch_size, -1]), featsize, 'h4_lin')) srcimg_z = lrelu(linear(srcimg_h4, featsize, 'hz_lin')) scope.reuse_variables() tgtimg_h0 = lrelu(conv2d(tgtimg, self.df_dim, name='h0_conv')) tgtimg_h1 = lrelu(conv2d(tgtimg_h0, self.df_dim*2, name='h1_conv')) tgtimg_h2 = lrelu(conv2d(tgtimg_h1, self.df_dim*4, name='h2_conv')) tgtimg_h3 = lrelu(conv2d(tgtimg_h2, self.df_dim*8, name='h3_conv')) tgtimg_h4 = lrelu(linear(tf.reshape(tgtimg_h3, [self.batch_size, -1]), featsize, 'h4_lin')) tgtimg_z = lrelu(linear(tgtimg_h4, featsize, 'hz_lin')) with tf.variable_scope("translate") as scope: trans_h0 = lrelu(linear(tf.concat([srcimg_z, tgtctx_z], 1), featsize, 'trans_h0')) trans_z = linear(trans_h0, featsize, 'trans_z') self.translated_z = trans_z with tf.variable_scope("deconv") as scope: s_h, s_w = self.output_height, self.output_width s_h2, s_h4, s_h8, s_h16 = \ int(s_h/2), int(s_h/4), int(s_h/8), int(s_h/16) s_w2, s_w4, s_w8, s_w16 = \ int(s_w/2), int(s_w/4), int(s_w/8), int(s_w/16) output_z_ = lrelu(linear(trans_z, self.gf_dim*8*s_h16*s_w16, 'd_h0_lin')) output_h0 = tf.reshape(output_z_, [-1, s_h16, s_w16, self.gf_dim * 8]) output_h1 = lrelu(deconv2d(tf.concat([output_h0, tgtctx_h3], 3),
tensorflow.variable_scope
11,525
import tensorflow as tf if other_inputs is not None and chaining_stop_gradient: other_inputs = tf.stop_gradient(other_inputs)
tensorflow.stop_gradient
11,526
import tensorflow as tf return mean, variance def build_moving_stats(): return ( tf.identity(self._moving_mean), tf.identity(self._moving_variance), ) mean, variance = utils.smart_cond( use_batch_stats,
tensorflow.identity
11,527
import tensorflow as tf return tf.argmax(logits - tf.log(-tf.log(noise)), 1) def cat_entropy(logits): a0 = logits - tf.reduce_max(logits, 1, keepdims=True) ea0 = tf.exp(a0) z0 = tf.reduce_sum(ea0, 1, keepdims=True) p0 = ea0 / z0 return tf.reduce_sum(p0 * (tf.log(z0) - a0), 1) def cat_entropy_softmax(p0): return - tf.reduce_sum(p0 * tf.log(p0 + 1e-6), axis = 1) def ortho_init(scale=1.0): def _ortho_init(shape, dtype, partition_info=None):
tensorflow.log
11,528
import tensorflow as tf if W_init_args is None: W_init_args = {} if b_init_args is None: b_init_args = {} if self.inputs.get_shape().ndims != 2: raise Exception("The input dimension must be rank 2, please reshape or flatten it") n_in = int(self.inputs.get_shape()[-1]) with tf.variable_scope(name): W = tf.get_variable(name='W', shape=(n_in, n_units), initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args) if b_init is not None: try: b = tf.get_variable(name='b', shape=(n_units), initializer=b_init, dtype=LayersConfig.tf_dtype, **b_init_args) except Exception: # If initializer is a constant, do not specify shape. b = tf.get_variable(name='b', initializer=b_init, dtype=LayersConfig.tf_dtype, **b_init_args) self.outputs = act(tf.matmul(self.inputs, W) + b) else: self.outputs = act(tf.matmul(self.inputs, W)) self.all_layers.append(self.outputs) if b_init is not None: self.all_params.extend([W, b]) else: self.all_params.append(W)
tensorflow.get_variable
11,529
import tensorflow as tf def __init__(self): with tf.variable_scope('scop'): self.w1=tf.get_variable('w1', [4096,2048],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.w2=tf.get_variable('w2', [2048,3072],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.w3=tf.get_variable('w3', [3072,512],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.w4=tf.get_variable('w4', [512,classnum],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.b1 = tf.get_variable('b1', [2048],initializer=tf.constant_initializer(0.0)) self.b2 = tf.get_variable('b2', [3072],initializer=tf.constant_initializer(0.0)) self.b3 = tf.get_variable('b3', [512],initializer=tf.constant_initializer(0.0)) self.b4 = tf.get_variable('b4', [classnum],initializer=tf.constant_initializer(0.0)) def inference(self,images): images=tf.cast(images,tf.float32)/255.0 l1 = tf.matmul(images, self.w1)+self.b1 l1=tf.nn.relu(l1) l2 = tf.matmul(l1, self.w2)+self.b2 l2=tf.nn.relu(l2) l3=tf.matmul(l2, self.w3)+self.b3 l3=tf.nn.relu(l3) out=tf.matmul(l3, self.w4)+self.b4 return out def test_inference(self,images): images=tf.cast(images,tf.float32)/255.0 l1 = tf.matmul(images, self.w1)+self.b1 l1=tf.nn.relu(l1)
tensorflow.matmul
11,530
import tensorflow as tf batch_image,batch_label=get_batch(image,label,trainnum,0) work=trainwork() inf=work.inference(batch_image) loss=work.softmax_loss(inf,batch_label) opti=work.optimer(loss,learnrate) test_image_batch,test_label_batch=get_test_batch(test_image,test_label,testnum) test_inf=work.test_inference(test_image_batch) test_labels=tf.one_hot(test_label_batch,classnum) test_pre = tf.reshape(test_inf, [testnum, classnum]) correct_prediction=tf.equal(tf.argmax(test_inf,1),tf.argmax(test_labels,1)) accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) test_pre = tf.argmax(test_pre, 1) test_true = tf.argmax(test_labels, 1) valid_image_batch,valid_label_batch=get_valid_batch(valid_image,valid_label,validnum) valid_inf=work.valid_inference(valid_image_batch) valid_labels=tf.one_hot(valid_label_batch,classnum) #train_step=tf.train.GradientDescentOptimizer(0.001).minimize(cross_entropy) valid_pre = tf.reshape(valid_inf, [validnum, classnum])
tensorflow.argmax
11,531
import tensorflow as tf def _tukey_parameters( x: common_types.TensorType, reduce_instance_dims: bool = True, output_dtype: Optional[tf.DType] = None ) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]: """Efficient computation of L-moments.""" if output_dtype is None: output_dtype = _FLOAT_OUTPUT_DTYPE_MAP.get(x.dtype) if output_dtype is None: raise TypeError('Tensor type %r is not supported' % x.dtype) with tf.compat.v1.name_scope('tukey_parameters'): x = tf.cast(x, output_dtype) (count_l1, l1, count_l2, l2, count_l3, l3, count_l4, l4) = ( tf_utils.reduce_batch_count_l_moments(x, reduce_instance_dims)) combine_inputs = _LMomentsAccumulator( count_l1=count_l1, count_l2=count_l2, count_l3=count_l3,
tensorflow.compat.v1.name_scope
11,532
import tensorflow as tf obs_dim = states.shape[1] action_dim = actions.shape[1] action_spec = self._actor.output_tensor_spec states_tiled = tf.tile(states[:, None], [1, num_tasks, 1]) # B x B x D states_tiled = tf.reshape(states_tiled, [batch_size * num_tasks, obs_dim]) # B*B x D actions_tiled = tf.tile(actions[:, None], [1, num_tasks, 1]) # B x B x D actions_tiled = tf.reshape(actions_tiled, [batch_size * num_tasks, action_dim]) # B*B x D tasks_tiled = tf.tile(tasks[None], [batch_size, 1, 1]) # B x B x D tasks_tiled = tf.reshape(tasks_tiled, [batch_size * num_tasks, task_dim]) # B*B x D next_states_tiled = tf.tile(next_states[:, None], [1, num_tasks, 1]) next_states_tiled = tf.reshape(next_states_tiled, [batch_size * num_tasks, obs_dim]) # B*B x D next_relabelled_obs = self._task_distribution.combine( next_states_tiled, tasks_tiled) sampled_actions_tiled = self._actor( next_relabelled_obs, step_type=(), network_state=())[0].sample() critic_input = (next_relabelled_obs, sampled_actions_tiled) q_vals, _ = self._critic(critic_input, training=False) q_vals_vec = tf.reshape(q_vals, (batch_size, num_tasks)) rewards, dones = self._task_distribution.evaluate(states_tiled, actions_tiled,
tensorflow.tile
11,533
import tensorflow as tf import tensorflow as tf from metric import tf_metrics from optimizer import distributed_optimizer as optimizer from model_io import model_io from distillation import knowledge_distillation as distill def correlation(x, y): x = x - tf.reduce_mean(x, axis=-1, keepdims=True) y = y - tf.reduce_mean(y, axis=-1, keepdims=True) x = tf.nn.l2_normalize(x, -1) y = tf.nn.l2_normalize(y, -1) return -tf.reduce_sum(x*y, axis=-1) # higher the better def kd(x, y): x_prob = tf.nn.softmax(x) print(x_prob.get_shape(), y.get_shape(), tf.reduce_sum(x_prob * y, axis=-1).get_shape()) return -tf.reduce_sum(x_prob * y, axis=-1) # higher the better def mse(x, y): x = x - tf.reduce_mean(x, axis=-1, keepdims=True)
tensorflow.nn.l2_normalize
11,534
import tensorflow as tf config = tf.ConfigProto(allow_soft_placement = True, log_device_placement = False, intra_op_parallelism_threads = FLAGS.num_cpu_threads, inter_op_parallelism_threads = FLAGS.num_cpu_threads, gpu_options = gpu_options) # Set up a RunConfig to only save checkpoints once per training cycle. run_config = tf.estimator.RunConfig().replace( save_checkpoints_secs=FLAGS.save_checkpoints_secs).replace( save_checkpoints_steps=None).replace(
tensorflow.estimator.RunConfig
11,535
import tensorflow as tf final_loss = tf.reduce_mean(loss) return final_loss, cstr_pct def contra_traj_lossV9(pred, tgt, horizon=12, margin=1): horizon_pred, horizon_tgt = horizon_sumV1(pred, horizon), horizon_sumV1(tgt, horizon) # horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon) pred_flat1, pred_flat2 = tf.reshape(horizon_pred, [-1, 1]), tf.reshape(horizon_pred, [1, -1]) tgt_flat1, tgt_flat2 = tf.reshape(horizon_tgt, [-1, 1]), tf.reshape(horizon_tgt, [1, -1]) tgt_dif = tgt_flat1 - tgt_flat2 pred_dif = pred_flat1 - pred_flat2 geq = tf.cast(tgt_dif > 0, tf.bool) # tgt_posi_dif = tf.where(geq, tgt_dif, -tgt_dif) pred_posi_dif = tf.where(geq, pred_dif, -pred_dif) loss = tf.maximum(0., margin-pred_posi_dif)
tensorflow.reshape
11,536
import tensorflow as tf actions_tiled = tf.tile(actions[:, None], [1, num_tasks, 1]) # B x B x D actions_tiled = tf.reshape(actions_tiled, [batch_size * num_tasks, action_dim]) # B*B x D tasks_tiled = tf.tile(tasks[None], [batch_size, 1, 1]) # B x B x D tasks_tiled = tf.reshape(tasks_tiled, [batch_size * num_tasks, task_dim]) # B*B x D next_states_tiled = tf.tile(next_states[:, None], [1, num_tasks, 1]) next_states_tiled = tf.reshape(next_states_tiled, [batch_size * num_tasks, obs_dim]) # B*B x D next_relabelled_obs = self._task_distribution.combine( next_states_tiled, tasks_tiled) sampled_actions_tiled = self._actor( next_relabelled_obs, step_type=(), network_state=())[0].sample() critic_input = (next_relabelled_obs, sampled_actions_tiled)
tensorflow.reshape
11,537
import tensorflow as tf t_shape = tf.shape(t) t_d0 = t_shape[0] pad_d0 = tf.expand_dims(length - t_d0, 0) pad_shape = tf.cond( tf.greater(t_rank, 1), lambda: tf.concat([pad_d0, t_shape[1:]], 0), lambda: tf.expand_dims(length - t_d0, 0)) padded_t = tf.concat([t, tf.zeros(pad_shape, dtype=t.dtype)], 0) if not _is_tensor(length): padded_t = _set_dim_0(padded_t, length) return padded_t
tensorflow.zeros
11,538
from tensorflow.python.framework import ops name) precision = compute_precision('value') with ops.control_dependencies([true_positives_update_op, false_positives_update_op]): update_op = compute_precision('update_op') if metrics_collections: ops.add_to_collections(metrics_collections, precision) if updates_collections: ops.add_to_collections(updates_collections, update_op) return precision, update_op @deprecated_args(IGNORE_MASK_DATE, IGNORE_MASK_INSTRUCTIONS, 'ignore_mask') def streaming_recall(predictions, labels, ignore_mask=None, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes the recall of the predictions with respect to the labels.
tensorflow.python.framework.ops.add_to_collections
11,539
import tensorflow as tf v = tf.Variable(10.0, name="v") save = tf.train.Saver({"v": v}, max_to_keep=2)
tensorflow.train.Saver
11,540
import tensorflow as tf self.prev_g = tf.placeholder(tf.float32, (None, None, self.g_dim)) self.ri = tf.placeholder(tf.float32, (None,)) self.s_diff = tf.placeholder(tf.float32, (None, self.g_dim)) def build_perception(self): self._obs = tf.expand_dims(self.obs, -1) # ! self._obs = tf.expand_dims(self._obs, -1) # ! conv1 = tf.layers.conv2d(inputs=self._obs, filters=16, kernel_size=[2, 1], # ! kernel_size = [8,8] activation=tf.nn.elu, strides=1) # ! strides = 4 conv2 = tf.layers.conv2d(inputs=conv1, filters=32, kernel_size=[2, 1], # ! kernel_size = [4,4] activation=tf.nn.elu, strides=1) # ! strides = 2 flattened_filters = policy_utils.flatten(conv2) self.z = tf.layers.dense(inputs=flattened_filters, units=256, activation=tf.nn.elu) def build_manager(self):
tensorflow.layers.conv2d
11,541
import tensorflow as tf # sample mixture indicator from softmax sel = tf.one_hot(tf.argmax(logit_probs - tf.log(-tf.log(tf.random_uniform( tf.shape(logit_probs), minval=1e-5, maxval=1. - 1e-5))), 3), depth=nr_mix, dtype=tf.float32) sel = tf.reshape(sel, xs[:-1] + [1, nr_mix]) # select logistic parameters means = tf.reduce_sum(l[:, :, :, :, :nr_mix] * sel, 4) log_scales = tf.maximum(tf.reduce_sum( l[:, :, :, :, nr_mix:2 * nr_mix] * sel, 4), -7.) coeffs = tf.reduce_sum(tf.nn.tanh( l[:, :, :, :, 2 * nr_mix:3 * nr_mix]) * sel, 4) # sample from logistic & clip to interval # we don't actually round to the nearest 8bit value when sampling
tensorflow.reduce_sum
11,542
from tensorflow.python.platform import tf_logging as logging # runtime errors when we are calling export on the same global step. # Exports depend on saved checkpoints for constructing the graph and # getting the global step from the graph instance saved in the checkpoint. # If the checkpoint is stale with respect to current step, the global step # is taken to be the last saved checkpoint's global step and exporter # doesn't export the same checkpoint again with the following error. logging.info("Skipping exporting because the existing checkpoint has " "already been exported. " "Consider exporting less frequently.") def end(self, session=None): super(ExportMonitor, self).end(session=session) latest_path = saver_lib.latest_checkpoint(self._estimator.model_dir) if latest_path is None: logging.info("Skipping export at the end since model has not been saved " "yet.") return try: self._last_export_dir = self._estimator.export( self.export_dir, exports_to_keep=self.exports_to_keep, signature_fn=self.signature_fn, input_fn=self._input_fn, default_batch_size=self._default_batch_size, input_feature_key=self._input_feature_key, use_deprecated_input_fn=self._use_deprecated_input_fn) except RuntimeError: logging.info("Skipping exporting for the same step.")
tensorflow.python.platform.tf_logging.info
11,543
from tensorflow.python.framework import ops ops.RegisterShape("Square")(common_shapes.unchanged_shape) ops.RegisterShape("Sigmoid")(common_shapes.unchanged_shape)
tensorflow.python.framework.ops.RegisterShape
11,544
import tensorflow as tf beam_width=beam_width, blank_index=blank_index, top_paths=1, blank_label=0) decoded = tf.sparse.SparseTensor(indices[0], values[0], shape[0]) decoded = tf.cast(tf.sparse.to_dense(decoded), tf.int32) decoded_u = tf.sparse.SparseTensor(indices_u[0], values_u[0], shape_u[0]) decoded_u = tf.cast(tf.sparse.to_dense(decoded_u), tf.int32) # Adjust event vals according to representation decoded = tf.where(tf.not_equal(decoded, 0), decoded+shift, decoded)
tensorflow.sparse.SparseTensor
11,545
import tensorflow as tf def build_anet(self, state_in, name, reuse=False): reg = tf.contrib.layers.l2_regularizer(1e-3) with tf.variable_scope(name, reuse=reuse): layer_a1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg) layer_a2 = tf.layers.dense(layer_a1, 256, tf.nn.relu, kernel_regularizer=reg) mu = tf.layers.dense(layer_a2, self.a_dim, tf.nn.tanh, kernel_regularizer=reg) sigma = tf.layers.dense(layer_a2, self.a_dim, tf.nn.softplus, kernel_regularizer=reg)
tensorflow.layers.dense
11,546
import tensorflow as tf next_sentence_example_loss, next_sentence_log_probs, ) = get_next_sentence_output( bert_config, model.get_pooled_output(), next_sentence_labels ) total_loss = masked_lm_loss + next_sentence_loss tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: ( assignment_map, initialized_variable_names,
tensorflow.trainable_variables
11,547
import tensorflow as tf full_video, time_axis=1) latent = common_video.get_gaussian_tensor(latent_mean, latent_std) latent = tf.layers.flatten(latent) latent = tf.expand_dims(latent, axis=1)
tensorflow.layers.flatten
11,548
from tensorflow.python.framework import tensor_util @ops.RegisterShape("Range") def _RangeShape(op): start_value = tensor_util.ConstantValue(op.inputs[0]) limit_value = tensor_util.ConstantValue(op.inputs[1]) delta_value = tensor_util.ConstantValue(op.inputs[2]) if start_value is None or limit_value is None or delta_value is None: return [tensor_shape.vector(None)] else:
tensorflow.python.framework.tensor_util.ConstantValue
11,549
import tensorflow as tf rightmost_transposed_ndims) msg = '`rightmost_transposed_ndims` must be non-negative.' if rightmost_transposed_ndims_ is not None: if rightmost_transposed_ndims_ < 0: raise ValueError(msg[:-1] + ', saw: {}.'.format( rightmost_transposed_ndims_)) elif validate_args: assertions += [ tf.compat.v1.assert_non_negative( rightmost_transposed_ndims, message=msg) ] return assertions def _maybe_validate_perm(perm, validate_args, name=None):
tensorflow.compat.v1.assert_non_negative
11,550
import tensorflow as tf seq_lengths = tf.fill([batch_size], seq_length) # Perform beam search indices, values, shape, indices_u, values_u, shape_u, log_probs = ctc_ext_beam_search_decoder( inputs=inputs, sequence_length=seq_lengths, beam_width=beam_width, blank_index=blank_index, top_paths=1, blank_label=0) decoded = tf.sparse.SparseTensor(indices[0], values[0], shape[0]) decoded = tf.cast(tf.sparse.to_dense(decoded), tf.int32) decoded_u = tf.sparse.SparseTensor(indices_u[0], values_u[0], shape_u[0]) decoded_u = tf.cast(tf.sparse.to_dense(decoded_u), tf.int32) # Adjust event vals according to representation decoded = tf.where(tf.not_equal(decoded, 0), decoded+shift, decoded) decoded_u = tf.where(tf.not_equal(decoded_u, 0), decoded_u+shift, decoded_u) # Set default vals decoded = tf.where(tf.equal(decoded, 0), def_val, decoded) decoded_u = tf.where(tf.equal(decoded_u, 0), def_val, decoded_u) # We know the shape pf decoded_u, and first dim for decoded decoded_u.set_shape([batch_size, seq_length]) decoded = tf.reshape(decoded, [batch_size, -1]) return decoded_u, decoded
tensorflow.equal
11,551
import tensorflow as tf def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" name_to_features = { "input_ids": tf.FixedLenFeature([seq_length], tf.int64), "input_mask": tf.FixedLenFeature([seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), "label_ids": tf.FixedLenFeature([], tf.int64), "is_real_example": tf.FixedLenFeature([], tf.int64), } def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32.
tensorflow.FixedLenFeature
11,552
import tensorflow as tf if __name__ == '__main__': tf.app.run()
tensorflow.app.run
11,553
import tensorflow as tf name="dense_0") end_logits = tf.contrib.layers.layer_norm(end_logits, begin_norm_axis=-1) end_logits = tf.layers.dense( end_logits, 1, kernel_initializer=initializer, name="dense_1") end_logits = tf.reshape(end_logits, [seq_len, -1, FLAGS.start_n_top]) end_logits = tf.transpose(end_logits, [1, 2, 0]) end_logits_masked = end_logits * ( 1 - p_mask[:, None]) - 1e30 * p_mask[:, None] end_log_probs = tf.nn.log_softmax(end_logits_masked, -1) end_top_log_probs, end_top_index = tf.nn.top_k( end_log_probs, k=FLAGS.end_n_top) end_top_log_probs = tf.reshape( end_top_log_probs, [-1, FLAGS.start_n_top * FLAGS.end_n_top])
tensorflow.transpose
11,554
import tensorflow as tf tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, tpu_config=tf.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) train_examples = None num_train_steps = None num_warmup_steps = None if FLAGS.do_train: train_examples = processor.get_train_examples(FLAGS.data_dir) num_train_steps = int( len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
tensorflow.contrib.tpu.TPUConfig
11,555
import tensorflow as tf """The `model_fn` for TPUEstimator.""" tf.logging.info("*** Features ***") for name in sorted(features.keys()):
tensorflow.logging.info
11,556
import tensorflow as tf masked_lm_weights = tf.reshape(masked_lm_weights, [-1]) masked_lm_accuracy = tf.metrics.accuracy(
tensorflow.metrics.accuracy
11,557
import tensorflow as tf print("Embedded Lookup:", cnn_inputs.get_shape()) print("-" * 20) self.layers = [] # Temp(First) Conv Layer with tf.variable_scope("temp_conv") as scope: filter_shape = [3, embedding_size, 4, 64] W = tf.get_variable(name='W_1', shape=filter_shape, initializer=he_normal, regularizer=regularizer) paddings = [[0, 0], [1, 1], [0, 0], [0, 0]] cnn_inputs = tf.pad(cnn_inputs, paddings, "CONSTANT") #print("cnn_inputs shape:", cnn_inputs.shape) inputs = tf.nn.conv2d(cnn_inputs, W, strides=[1, 1, 1, 1], padding="VALID", name="first_conv") inputs = tf.layers.batch_normalization(inputs, axis=-1, training=self.is_training)
tensorflow.get_variable
11,558
import tensorflow as tf d = d.apply( tf.contrib.data.map_and_batch( lambda record: _decode_record(record, name_to_features), batch_size=batch_size, num_parallel_batches=num_cpu_threads, drop_remainder=True)) return d return input_fn def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.to_int32(t) example[name] = t return example
tensorflow.parse_single_example
11,559
import tensorflow as tf self.pairwise_improvement_bool = pairwise_improvement_bool metrics.append(tf.summary.scalar('training/avg_dist', mean_dist)) metrics.append(tf.summary.scalar('training/pred_dist', mean_pred_error))
tensorflow.summary.scalar
11,560
import tensorflow as tf with tf.variable_scope('A'): A_dis_real = self.discriminator(A) A_dis_fake = self.discriminator(BA) with tf.variable_scope('B'): B_dis_real = self.discriminator(B) B_dis_fake = self.discriminator(AB) def LSGAN_losses(real, fake): d_real = tf.reduce_mean(tf.squared_difference(real, 1), name='d_real') d_fake = tf.reduce_mean(tf.square(fake), name='d_fake') d_loss = tf.multiply(d_real + d_fake, 0.5, name='d_loss') g_loss = tf.reduce_mean(tf.squared_difference(fake, 1), name='g_loss') add_moving_summary(g_loss, d_loss) return g_loss, d_loss with tf.name_scope('losses'): with tf.name_scope('LossA'): # reconstruction loss recon_loss_A = tf.reduce_mean(tf.abs(A - ABA), name='recon_loss') # gan loss
tensorflow.multiply
11,561
import tensorflow as tf @layer def identity_layer(tensor, **opts): out = tf.identity(tensor) return out @layer def embedding_layer(tensor, vocab_size=None, embedding_dim=None, embedding_matrix=None, **opts): if embedding_matrix is None: initializer = tf.contrib.layers.xavier_initializer(uniform=True) embedding_matrix = tf.get_variable("embedding_matrix", initializer=initializer(shape=(vocab_size, embedding_dim))) out = tf.nn.embedding_lookup(embedding_matrix, tensor) return out @layer def recurrent_layer(tensor, cell=None, hidden_dims=128, sequence_length=None, decoder_fn=None, activation=tf.nn.tanh, initializer=tf.orthogonal_initializer(), initial_state=None, keep_prob=1.0, return_final_state=False, return_next_cell_input=True, **opts): if cell is None: cell = tf.contrib.rnn.BasicRNNCell(hidden_dims, activation=activation) # cell = tf.contrib.rnn.LSTMCell(hidden_dims, activation=activation)
tensorflow.nn.embedding_lookup
11,562
import tensorflow as tf def test_instance_non_maximum_suppression_1d_scores_empty_inputs(self): masks = tf.constant(1.0, shape=[0, 2, 2], dtype=tf.float32)
tensorflow.constant
11,563
import tensorflow as tf for name, tensor in predictions.items() }) return tf.estimator.EstimatorSpec( mode, predictions=predictions,
tensorflow.estimator.EstimatorSpec
11,564
import tensorflow as tf vocab_scores.append(output_t) # The vocabulary distributions. state_t_1 = state_t context_t_1 = context_t coverage_t_1 = coverage_t if mode_gen == 'greedy': wordidx_t = tf.argmax(output_t, 1) # [batch_size] wordidx_t = tf.reshape(wordidx_t, [-1]) # [batch_size] elif mode_gen == 'sample': log_score_t = tf.log(output_t) # [batch_size, vsize] wordidx_t = tf.multinomial(log_score_t, 1) # [batch_size, 1] wordidx_t = tf.reshape(wordidx_t, [-1]) # [batch_size] elif mode_gen in ('ce_train', 'loss',): wordidx_t = answer_batch_unstack[i] else:
tensorflow.reshape
11,565
import tensorflow as tf A = tf.subtract(tf.expand_dims(X, 0), tf.expand_dims(X, 1)) return (1.0/(N*N)) * tf.reduce_sum(N0(A, 2*y)) + N0(0.0, 2.0 + 2*y) - (2/N) * tf.reduce_sum(N0(X, 1.0 + 2*y)) def cw_2d(X, y=None): def __phi(x): def __phi_f(s): t = s/7.5 return tf.exp(-s/2) * (1 + 3.5156229*t**2 + 3.0899424*t**4 + 1.2067492*t**6 + 0.2659732*t**8 + 0.0360768*t**10 + 0.0045813*t**12) def __phi_g(s): t = s/7.5 return tf.sqrt(2/s) * (0.39894228 + 0.01328592*t**(-1) + 0.00225319*t**(-2) - 0.00157565*t**(-3) + 0.0091628*t**(-4) - 0.02057706*t**(-5) + 0.02635537*t**(-6) - 0.01647633*t**(-7) + 0.00392377*t**(-8)) a = 7.5 return __phi_f(tf.minimum(x, a)) - __phi_f(a) + __phi_g(tf.maximum(x, a)) N = tf.cast(tf.shape(X)[0], tf.float32) if y is None: y = silverman_rule_of_thumb(N) A = 1/(N*N*tf.sqrt(y)) B = 2.0/(N*tf.sqrt(y+0.5))
tensorflow.sqrt
11,566
import tensorflow as tf self.target = tf.placeholder(tf.float32, [None,1], name="target") # layers self.value_estimate = tf.layers.dense(self.state, 1, kernel_initializer=w_init, name='v') # estimated value for state # loss and optimizer
tensorflow.layers.dense
11,567
import tensorflow as tf for layer in range(self.num_layers): gru_fw, gru_bw = self.grus[layer] init_fw, init_bw = self.inits[layer] mask_fw, mask_bw = self.dropout_mask[layer] with tf.variable_scope("fw_{}".format(layer)): out_fw, _ = tf.nn.dynamic_rnn( gru_fw, outputs[-1] * mask_fw, seq_len, initial_state=init_fw, dtype=tf.float32) with tf.variable_scope("bw_{}".format(layer)): inputs_bw = tf.reverse_sequence( outputs[-1] * mask_bw, seq_lengths=seq_len, seq_dim=1, batch_dim=0) out_bw, _ = tf.nn.dynamic_rnn( gru_bw, inputs_bw, seq_len, initial_state=init_bw, dtype=tf.float32) out_bw = tf.reverse_sequence( out_bw, seq_lengths=seq_len, seq_dim=1, batch_dim=0) outputs.append(tf.concat([out_fw, out_bw], axis=2)) if concat_layers:
tensorflow.reverse_sequence
11,568
import tensorflow as tf # We run the summaries in the same thread as the training operations by # passing in None for summary_op to avoid a summary_thread being started. # Running summaries and training operations in parallel could run out of # GPU memory. sv = tf.train.Supervisor( is_chief=is_chief, logdir=FLAGS.train_dir, saver=tf.train.Saver(tf.global_variables()), global_step=global_step, summary_op=None, save_model_secs=FLAGS.save_model_secs, summary_writer=summary_writer) step_train_times = []
tensorflow.global_variables
11,569
import tensorflow as tf # Mean-squared-error was used in Vanilla AE logging.info(' use: mse, L2_w, KLD') self.cost = mse + L2_w + KLD # ---------------------------------------------------- # Add DropNeuro penalty (P_o) can remove neurons of AE # logging.info(' use: mse, L2_w, KLD, P_o') # self.cost = mse + L2_w + KLD + P_o # ---------------------------------------------------- # Add DropNeuro penalty (P_i) can remove neurons of previous layer # If previous layer is InputLayer, it means remove useless features # logging.info(' use: mse, L2_w, KLD, P_i') # self.cost = mse + L2_w + KLD + P_i else: raise Exception("Don't support the given reconstruct activation function") self.train_op = tf.train.AdamOptimizer( learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08, use_locking=False).minimize( self.cost, var_list=self.train_params) # self.train_op = tf.train.GradientDescentOptimizer(1.0).minimize(self.cost, var_list=self.train_params) def pretrain(self, sess, x, X_train, X_val, denoise_name=None, n_epoch=100, batch_size=128, print_freq=10, save=True, save_name='w1pre_'): # ==================================================== # # You need to modify the cost function in __init__() so as to # get your own pre-train method. # # ==================================================== logging.info(" [*] %s start pretrain" % self.name) logging.info(" batch_size: %d" % batch_size) if denoise_name:
tensorflow.train.AdamOptimizer
11,570
import tensorflow as tf seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=eval_drop_remainder) result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps) output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt") with tf.gfile.GFile(output_eval_file, "w") as writer: tf.logging.info("***** Eval results *****") for key in sorted(result.keys()): tf.logging.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if FLAGS.do_predict: predict_examples = processor.get_test_examples(FLAGS.data_dir) num_actual_predict_examples = len(predict_examples)
tensorflow.logging.info
11,571
import tensorflow as tf gx = tf.get_variable("gx", [nh*4], initializer=tf.constant_initializer(1.0)) bx = tf.get_variable("bx", [nh*4], initializer=tf.constant_initializer(0.0))
tensorflow.constant_initializer
11,572
import tensorflow as tf def batch_norm(inp, name, phase, decay=0.9): channels = inp.get_shape().as_list()[3] with tf.variable_scope(name): moving_mean = get_variable("mean", shape=[channels], dtype=tf.float32, initializer=tf.constant_initializer(0.0), trainable=False) moving_variance = get_variable("var", shape=[channels], dtype=tf.float32, initializer=tf.constant_initializer(1.0), trainable=False)
tensorflow.variable_scope
11,573
import tensorflow as tf episode[i]=[]; total_step+=1; if Done: break # Print out which step we're on, useful for debugging. (average recent 10 episode scores) if(i_episode>=10): print("Episode {}/{} ({})".format(i_episode + 1, num_episodes, np.max(np.mean(stats["episode_rewards"][:,i_episode-10:i_episode - 1],axis=1)))) return stats tf.reset_default_graph() global_step = tf.Variable(0, name="global_step", trainable=False) policy_estimator = np.zeros(n_particles,dtype=object); value_estimator = np.zeros(n_particles,dtype=object); # call proper policy and value estimators for each envs for i in range(n_particles): if(ENV_NAME=="Pendulum-v0"): policy_estimator[i] = PolicyEstimator_Pendulum(entropy_beta=ENTROPY_BETA,learning_rate=POLICY_LR,par_idx=i) value_estimator[i] = ValueEstimator_Pendulum(learning_rate=VALUE_LR,par_idx=i) if(ENV_NAME=="MountainCarContinuous-v0"): policy_estimator[i] = PolicyEstimator_MountainCarContinuous(entropy_beta=ENTROPY_BETA,learning_rate=POLICY_LR,par_idx=i) value_estimator[i] = ValueEstimator_MountainCarContinuous(learning_rate=VALUE_LR,par_idx=i)
tensorflow.Variable
11,574
import tensorflow as tf epsilon_drift_penalty = tf.multiply( x=params["epsilon_drift"], y=tf.reduce_mean(input_tensor=tf.square(x=real_logits)), name="epsilon_drift_penalty"
tensorflow.square
11,575
import tensorflow as tf for output in sorted(outputs_to_scales_to_logits): scales_to_logits = outputs_to_scales_to_logits[output] logits = tf.image.resize_bilinear( scales_to_logits[_MERGED_LOGITS_SCOPE], tf.shape(images)[1:3], align_corners=True) outputs_to_predictions[output].append( tf.expand_dims(tf.nn.softmax(logits), 4)) if add_flipped_images: scales_to_logits_reversed = ( outputs_to_scales_to_logits_reversed[output]) logits_reversed = tf.image.resize_bilinear( tf.reverse_v2(scales_to_logits_reversed[_MERGED_LOGITS_SCOPE], [2]),
tensorflow.nn.softmax
11,576
import tensorflow as tf matched_label = tf.gather(groundtruth_labels, matched_gt_indices) matched_is_foreground = tf.cast(matched_label[:,0] <= 0, tf.float32)
tensorflow.cast
11,577
from tensorflow.python.platform import gfile # variance in the test. # We now have 2 'last_checkpoints': [s2, s3], and s1 on disk. The next # call to Save(), will delete s2, because max_to_keep is 2, and because # we already kept the old s1. s2 is very close in time to s1 so it gets # deleted. s4 = save.save(sess, os.path.join(save_dir, "s4")) self.assertEqual([s3, s4], save.last_checkpoints) # Check that s1 is still here, but s2 is gone. self.assertTrue(gfile.Exists(s1)) self.assertFalse(gfile.Exists(s2)) self.assertTrue(gfile.Exists(s3)) self.assertTrue(gfile.Exists(s4)) class SaveRestoreWithVariableNameMap(tf.test.TestCase): def testNonReshape(self): save_path = os.path.join(self.get_temp_dir(), "basics") with self.test_session() as sess: # Build a graph with 2 parameter nodes, and Save and # Restore nodes for them. v0 = tf.Variable(10.0, name="v0") v1 = tf.Variable(20.0, name="v1")
tensorflow.python.platform.gfile.Exists
11,578
import tensorflow as tf return tf.convert_to_tensor(priors) @tf.function def assign_priors(gt_boxes, gt_labels, corner_form_priors, iou_threshold=0.45): """Assign ground truth boxes and targets to priors. Args: gt_boxes (num_targets, 4): ground truth boxes. gt_labels (num_targets): labels of targets. priors (num_priors, 4): corner form priors Returns: boxes (num_priors, 4): real values for priors. labels (num_priors): labels for priors. """ # size: num_priors x num_targets ious = iou_of(tf.expand_dims(gt_boxes, axis=0), tf.expand_dims(corner_form_priors, axis=1)) # size: num_priors best_target_per_prior = tf.math.reduce_max(ious, axis=1) best_target_per_prior_index = tf.math.argmax(ious, axis=1) # size: num_targets best_prior_per_target = tf.math.reduce_max(ious, axis=0) best_prior_per_target_index = tf.math.argmax(ious, axis=0) targets = tf.range(tf.shape(best_prior_per_target_index)[0], dtype='int64') best_target_per_prior_index = tf.tensor_scatter_nd_update(best_target_per_prior_index, tf.expand_dims(best_prior_per_target_index, 1), targets) # 2.0 is used to make sure every target has a prior assigned best_target_per_prior = tf.tensor_scatter_nd_update(best_target_per_prior, tf.expand_dims(best_prior_per_target_index, 1), tf.ones_like(best_prior_per_target_index, dtype=tf.float32)*2.0) # size: num_priors
tensorflow.expand_dims
11,579
import tensorflow as tf def contra_step_lossV4(pred, tgt): # 50*50 # Step-wise contrastive loss even = [2 * i for i in range(25)] odd = [2 * i + 1 for i in range(25)] pred1 = tf.gather(pred, even) pred2 = tf.gather(pred, odd) tgt1 = tf.gather(tgt, even) tgt2 = tf.gather(tgt, odd) geq = tf.cast((tgt1 - tgt2) > 0, tf.bool) tgt_larg = tf.where(geq, tgt1, tgt2) tgt_small = tf.where(geq, tgt2, tgt1) pred_larg = tf.where(geq, pred1, pred2) pred_small = tf.where(geq, pred2, pred1) loss = tf.maximum(0.0, (tgt_larg - tgt_small) - (pred_larg - pred_small)) # loss = tf.maximum(0.0, tf.math.abs(tgt_larg - pred_larg) - tf.math.abs(tgt_small - pred_small)) loss = tf.reduce_mean(loss) return loss
tensorflow.cast
11,580
from tensorflow.python.ops import math_ops retrieved_per_k = math_ops.cumsum( array_ops.ones_like(relevant_per_k), axis=-1, name='retrieved_per_k') precision_per_k = math_ops.div( math_ops.to_double(tp_per_k), math_ops.to_double(retrieved_per_k), name='precision_per_k') relevant_precision_per_k = math_ops.mul(
tensorflow.python.ops.math_ops.to_double
11,581
import tensorflow as tf def __call__(self, step): with tf.name_scope(self.name or "CyclicalLearningRate"): initial_learning_rate = tf.convert_to_tensor( self.initial_learning_rate, name="initial_learning_rate" ) dtype = initial_learning_rate.dtype maximal_learning_rate = tf.cast(self.maximal_learning_rate, dtype) step_size = tf.cast(self.step_size, dtype) cycle = tf.floor(1 + step / (2 * step_size)) x = tf.abs(step / step_size - 2 * cycle + 1) mode_step = cycle if self.scale_mode == "cycle" else step return initial_learning_rate + ( maximal_learning_rate - initial_learning_rate ) * tf.maximum(tf.cast(0, dtype), (1 - x)) * self.scale_fn(mode_step) def get_config(self):
tensorflow.abs
11,582
import tensorflow as tf querry_size = query.get_shape().as_list()[-1] query = tf.layers.dense(query, facts_size, activation=None, name='f1_trans_shine' + stag) query = prelu(query) queries = tf.tile(query, [1, tf.shape(facts)[1]]) queries = tf.reshape(queries, tf.shape(facts)) din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = tf.layers.dense(din_all, facts_size, activation=tf.nn.sigmoid, name='f1_shine_att' + stag) d_layer_2_all = tf.layers.dense(d_layer_1_all, facts_size, activation=tf.nn.sigmoid, name='f2_shine_att' + stag)
tensorflow.shape
11,583
import tensorflow as tf fields.InputDataFields.groundtruth_keypoints, fields.InputDataFields.groundtruth_instance_masks, fields.InputDataFields.groundtruth_area, fields.InputDataFields.groundtruth_is_crowd, fields.InputDataFields.groundtruth_difficult ] for key in optional_label_keys: if key in input_dict: labels_dict[key] = input_dict[key] if fields.InputDataFields.groundtruth_difficult in labels_dict: labels_dict[fields.InputDataFields.groundtruth_difficult] = tf.cast( labels_dict[fields.InputDataFields.groundtruth_difficult], tf.int32) return labels_dict def _replace_empty_string_with_random_number(string_tensor): """Returns string unchanged if non-empty, and random string tensor otherwise. The random string is an integer 0 and 2**63 - 1, casted as string.
tensorflow.cast
11,584
from tensorflow.contrib.learn.python.learn.summary_writer_cache import SummaryWriterCache summary_writer=None): super(StepCounter, self).__init__(every_n_steps=every_n_steps) self._summary_tag = "global_step/sec" self._last_reported_step = None self._last_reported_time = None self._summary_writer = summary_writer if summary_writer is None and output_dir: self._summary_writer = SummaryWriterCache.get(output_dir) def set_estimator(self, estimator): super(StepCounter, self).set_estimator(estimator) if self._summary_writer is None: self._summary_writer = SummaryWriterCache.get(estimator.model_dir) def every_n_step_end(self, current_step, outputs): current_time = time.time() if self._last_reported_time is not None and self._summary_writer: added_steps = current_step - self._last_reported_step elapsed_time = current_time - self._last_reported_time steps_per_sec = added_steps / elapsed_time summary = Summary(value=[Summary.Value(tag=self._summary_tag, simple_value=steps_per_sec)]) self._summary_writer.add_summary(summary, current_step) self._last_reported_step = current_step
tensorflow.contrib.learn.python.learn.summary_writer_cache.SummaryWriterCache.get
11,585
import tensorflow as tf with tf.tpu.bfloat16_scope(): return two_stream_loss(FLAGS, features, labels, mems, is_training) else: return two_stream_loss(FLAGS, features, labels, mems, is_training) def get_classification_loss( FLAGS, features, n_class, is_training): """Loss for downstream classification tasks.""" bsz_per_core = tf.shape(features["input_ids"])[0] inp = tf.transpose(features["input_ids"], [1, 0]) seg_id = tf.transpose(features["segment_ids"], [1, 0]) inp_mask = tf.transpose(features["input_mask"], [1, 0]) label = tf.reshape(features["label_ids"], [bsz_per_core]) xlnet_config = xlnet.XLNetConfig(json_path=FLAGS.model_config_path) run_config = xlnet.create_run_config(is_training, True, FLAGS)
tensorflow.shape
11,586
import tensorflow as tf h = conv(x, scope='h_conv', filter_dims=[1, 1, channels//2], stride_dims=[1, 1], non_linear_fn=act_func) h = tf.layers.max_pooling2d(h, pool_size=2, strides=2, padding='SAME') print('attention h dims: ' + str(h.get_shape().as_list())) # N = h * w g = tf.reshape(g, shape=[-1, g.shape[1]*g.shape[2], g.get_shape().as_list()[-1]]) print('attention g flat dims: ' + str(g.get_shape().as_list())) f = tf.reshape(f, shape=[-1, f.shape[1]*f.shape[2], f.shape[-1]]) print('attention f flat dims: ' + str(f.get_shape().as_list())) s = tf.matmul(g, f, transpose_b=True) # # [bs, N, N] beta = tf.nn.softmax(s) # attention map print('attention beta dims: ' + str(s.get_shape().as_list()))
tensorflow.reshape
11,587
import tensorflow as tf predicted_action_templates_embedding = tf.stop_gradient(predicted_action_templates_embedding) action_templates_embedding = choice * true_action_template_embedding + (1.0 - choice) * predicted_action_templates_embedding dialogue_state_action_template = tf.concat( 1, [ dialogue_state,
tensorflow.concat
11,588
import tensorflow as tf # 6. 有时,我们希望代码健壮到可以决定运行多少GPU合适。TensorFlow有内建函数可以探测到。如果我们期望代码在GPU内存合适时利用GPU计算能力,并分配指定操作给GPU,那么该功能是有益的 if tf.test.is_built_with_cuda(): pass # 7. 我们希望分配指定操作给GPU。下面是一个示例代码,做了一些简单的计算,并将它们分配给主CPU和两个副GPU with tf.device('/cpu:0'): a = tf.constant([1.0, 3.0, 5.0], shape=[1,3]) b = tf.constant([2.0, 4.0, 6.0], shape=[3, 1]) with tf.device('/gpu:0'): c = tf.matmul(a,b) c = tf.reshape(c, [-1]) with tf.device('/gpu:1'): d = tf.matmul(b, a) flat_d = tf.reshape(d, [-1]) combined = tf.multiply(c, flat_d) print(sess.run(combined))
tensorflow.matmul
11,589
import tensorflow as tf x = tf.layers.dense(x, size, activation=tf.nn.relu) value = tf.layers.dense(x, 1)[..., 0] mean = tf.check_numerics(mean, "mean") logstd = tf.check_numerics(logstd, "logstd") value = tf.check_numerics(value, "value") policy = tfp.distributions.MultivariateNormalDiag(mean, tf.exp(logstd))
tensorflow.check_numerics
11,590
import tensorflow as tf return pred_strings def id2word(self, word_ids, name=None): mapping_strings = self.load_word_data() reverse_vocab_tags = tf.contrib.lookup.index_to_string_table_from_tensor( mapping_strings, name=name ) word_strings = reverse_vocab_tags.lookup(tf.to_int64(word_ids)) return word_strings def loss_layer(self, preds, ground_true, nwords, crf_params): with tf.name_scope("CRF_log_likelihood"): log_likelihood, _ = tf.contrib.crf.crf_log_likelihood( preds, ground_true, nwords, crf_params ) loss = tf.reduce_mean(-log_likelihood) # regularizer = tf.contrib.layers.l2_regularizer(0.001) # reg = regularizer(embedding_variable) # loss += reg return loss def crf_decode_layer(self, logits, crf_params, nwords):
tensorflow.name_scope
11,591
import tensorflow as tf end_logits = tf.squeeze( conv(self._attention(tf.concat([self.enc[1], self.enc[3]], axis=-1), name="attn2"), 1, bias=False, name="end_pointer"), -1) else: start_logits = tf.squeeze( conv(tf.concat([self.enc[1], self.enc[2]], axis=-1), 1, bias=False, name="start_pointer"), -1) end_logits = tf.squeeze( conv(tf.concat([self.enc[1], self.enc[3]], axis=-1), 1, bias=False, name="end_pointer"), -1) self.logits = [mask_logits(start_logits, mask=tf.reshape(self.c_mask, [N, -1])),
tensorflow.concat
11,592
import tensorflow as tf for degree in range(0, max_band + 1): for order in range(-degree, degree + 1): degree_l.append(degree) order_m.append(order) return (tf.convert_to_tensor(value=degree_l), tf.convert_to_tensor(value=order_m))
tensorflow.convert_to_tensor
11,593
import tensorflow as tf } vars_assign_op = tf.group([ tf.assign(tf_var, ph) for (tf_var, ph) in zip(vars, var_phs.values())
tensorflow.assign
11,594
from tensorflow.python.framework import ops else: init = ops.convert_to_tensor(init, name="init")
tensorflow.python.framework.ops.convert_to_tensor
11,595
import tensorflow as tf is_real_example=True) return feature def file_based_convert_examples_to_features( examples, label_list, max_seq_length, tokenizer, output_file): """Convert a set of `InputExample`s to a TFRecord file.""" writer = tf.python_io.TFRecordWriter(output_file) for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: tf.logging.info("Writing example %d of %d" % (ex_index, len(examples))) tf_example = from_record_to_tf_example(ex_index, example, label_list, max_seq_length, tokenizer) writer.write(tf_example.SerializeToString())
tensorflow.python_io.TFRecordWriter
11,596
import tensorflow as tf def benchmark_graph(self): """Benchmark Graph performance.""" hparams = get_default_hparams() tf.enable_resource_variables() for sample_size in [10, 25, 50, 100, 200]: hparams.n_samples = sample_size tf.reset_default_graph() with tf.Graph().as_default():
tensorflow.enable_resource_variables
11,597
import tensorflow as tf self.assertEqual(x_.shape, v_.shape) self.assertEqual(x_out.shape, samples.shape) self.assertEqual(x_.shape, x_out.shape) self.assertEqual(x_accept_prob.shape, (hparams.n_samples,)) # Graph mode testing with tf.Graph().as_default(): energy_fn, _, _ = l2hmc.get_scg_energy_fn() dynamics = l2hmc.Dynamics( x_dim=hparams.x_dim, minus_loglikelihood_fn=energy_fn, n_steps=hparams.n_steps, eps=hparams.eps)
tensorflow.Graph
11,598
import tensorflow as tf X = decoder_conf('u4', X, 256, F, 2, norm, reuse_decoder, is_train, self.args.dropout) # 14 > 28 X = decoder_conf('d4', X, 256, F, 1, norm, reuse_decoder, is_train, self.args.dropout) # 28 > 30 if self.args.skip_connections: X = tf.concat((X, X1), axis=-1) X = decoder_conf('u5', X, 128, F, 2, norm, reuse_decoder, is_train, self.args.dropout) # 30 > 60 X_LATE = X X = decoder_conf('d5', X, 128, F, 1, norm, reuse_decoder, is_train, self.args.dropout) # 60 > 62 if self.args.skip_connections: X = tf.concat((X, X0), axis=-1) X = decoder_conf('u6', X, 64, F, 2, norm, reuse_decoder, is_train, self.args.dropout) # 62 > 124 X = decoder_conf('d6', X, 64, 5, 1, norm, reuse_decoder, is_train, self.args.dropout) # 124 > 128 X = decoder_conf('out', X, self.args.num_classes, 1, 1, '', reuse_decoder, is_train, slope=1.0, stddev=0.02, use_bias=False)
tensorflow.concat
11,599