seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf output_weights = tf.get_variable( "output_weights", [num_labels, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
tensorflow.zeros_initializer
4,900
import tensorflow as tf def __init__(self, feature_extractor, gpu, batch_size): self.feature_extractor = feature_extractor self.gpu = gpu self.batch_size = batch_size global sess sess = init_gpu(gpu) global graph graph = tf.get_default_graph() model_dir = os.path.join(os.path.expanduser('~'), '.fawkes') if not os.path.exists(os.path.join(model_dir, "mtcnn.p.gz")): os.makedirs(model_dir, exist_ok=True) get_file("mtcnn.p.gz", "http://sandlab.cs.uchicago.edu/fawkes/files/mtcnn.p.gz", cache_dir=model_dir, cache_subdir='')
tensorflow.get_default_graph
4,901
import tensorflow as tf num_dims = shape.size if tt_rank.size == 1: tt_rank = tt_rank * np.ones(num_dims - 1) tt_rank = np.insert(tt_rank, 0, 1) tt_rank = np.append(tt_rank, 1) tt_rank = tt_rank.astype(int) tt_cores = [None] * num_dims with tf.name_scope(name): for i in range(num_dims): curr_core_shape = (tt_rank[i], shape[i], tt_rank[i + 1]) tt_cores[i] = tf.random_normal(curr_core_shape, mean=mean, stddev=stddev, dtype=dtype) return TensorTrain(tt_cores, shape, tt_rank) def tensor_batch_with_random_cores(shape, tt_rank=2, batch_size=1, mean=0., stddev=1., dtype=tf.float32, name='t3f_tensor_batch_with_random_cores'): """Generate a batch of TT-tensors of given shape with N(mean, stddev^2) cores.
tensorflow.random_normal
4,902
import tensorflow as tf def contra_step_lossV5(pred, tgt, resample=1): # p = tf.print('begin loss v5', [resample, pred.shape,tgt.shape]) # with tf.control_dependencies([p]): pred_flat = tf.reshape(pred, [-1]) tgt_flat = tf.reshape(tgt, [-1]) batch = tf.stack([pred_flat, tgt_flat], 1) num_sam = tools.shape(batch)[0] index = tf.range(num_sam) divider = tf.constant(resample, dtype=tf.float32) def sample_compute(cur_loss, i): batch1 = tf.gather(batch, tf.random.shuffle(index))
tensorflow.stack
4,903
import tensorflow as tf self._build_graph() # save info self.saver = tf.train.Saver() # initialize the model self.sess.run(tf.global_variables_initializer()) def _build_graph(self): """ Builds the computation graph with Tensorflow """
tensorflow.global_variables_initializer
4,904
import tensorflow as tf self.rank_loss = self.loss_func(train_output, reshaped_train_labels, self.propensity_weights) pw_list = tf.unstack(self.propensity_weights, axis=1) # Compute propensity weights self.click_metrics=self.click_loglikelihood(reshaped_train_labels,\ self.propensity,train_output) tf.summary.scalar('click_metrics',self.click_metrics,collections=['train']) for i in range(len(pw_list)): tf.summary.scalar('Inverse Propensity weights %d' % i, tf.reduce_mean(pw_list[i]), collections=['train']) tf.summary.scalar('Rank Loss', tf.reduce_mean(self.rank_loss), collections=['train']) # Compute examination loss self.relevance_weights = self.get_normalized_weights(self.logits_to_prob(train_output)) self.exam_loss = self.loss_func(self.propensity, reshaped_train_labels, self.relevance_weights) rw_list = tf.unstack(self.relevance_weights, axis=1) # Compute propensity weights for i in range(len(rw_list)):
tensorflow.reduce_mean
4,905
import tensorflow as tf x = tf.tanh(tf.layers.dense(x, hparams.bottleneck_bits, name="bottleneck")) d = x + tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x)) - 1.0 - x)
tensorflow.less
4,906
import tensorflow as tf last_forward = tf.gather_nd(encoder_outputs_[:, :, :cell_output_size], indices) last_forward.set_shape([None, cell_output_size]) if encoder.final_state == 'concat_last': # concats last states of all backward layers (full LSTM states) encoder_state_ = tf.concat(encoder_states_, axis=1) elif encoder.final_state == 'average': mask = tf.sequence_mask(encoder_input_length_, maxlen=tf.shape(encoder_outputs_)[1], dtype=tf.float32) mask = tf.expand_dims(mask, axis=2) encoder_state_ = tf.reduce_sum(mask * encoder_outputs_, axis=1) / tf.reduce_sum(mask, axis=1) elif encoder.final_state == 'average_inputs': mask = tf.sequence_mask(encoder_input_length_, maxlen=tf.shape(encoder_inputs_)[1], dtype=tf.float32) mask = tf.expand_dims(mask, axis=2) encoder_state_ = tf.reduce_sum(mask * encoder_inputs_, axis=1) / tf.reduce_sum(mask, axis=1) elif encoder.bidir and encoder.final_state == 'last_both': encoder_state_ = tf.concat([last_forward, last_backward], axis=1) elif encoder.final_state == 'none': encoder_state_ = tf.zeros(shape=[batch_size, 0]) elif encoder.bidir and not encoder.final_state == 'last_forward': # last backward hidden state encoder_state_ = last_backward else: # last forward hidden state encoder_state_ = last_forward if encoder.bidir and encoder.bidir_projection: encoder_outputs_ = dense(encoder_outputs_, cell_output_size, use_bias=False, name='bidir_projection')
tensorflow.reduce_sum
4,907
import tensorflow as tf var_value = tf.contrib.framework.load_variable(checkpoint_dir, from_name) assign_ops.append(tf.assign(var, var_value))
tensorflow.assign
4,908
import tensorflow as tf # Monitor losses and entropy in tensorboard tf.summary.scalar('policy_loss', policy_loss) tf.summary.scalar('qf1_loss', qf1_loss) tf.summary.scalar('qf2_loss', qf2_loss) tf.summary.scalar('value_loss', value_loss) tf.summary.scalar("Imitation_loss",self.actor_loss_di) tf.summary.scalar('entropy', self.entropy) tf.summary.scalar('importance weight',tf.reduce_mean(self.weight_ph)) if ent_coef_loss is not None: tf.summary.scalar('ent_coef_loss', ent_coef_loss)
tensorflow.summary.scalar
4,909
import tensorflow as tf # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.to_int32(t) example[name] = t return example def input_fn(params):
tensorflow.to_int32
4,910
import tensorflow as tf elif actL == 'esp' or actL == 'relu': #r2 score norm= tf.reduce_mean( tf.squared_difference(Y,tf.reduce_mean(Y)) ) accuracy = 1 - tf.divide( tf.reduce_mean(tf.squared_difference(an, Y)), norm) elif actL == 'softmax': #accuracy score for multiclass classification Yp = tf.sigmoid(betan*hn) correct = tf.equal(tf.argmax(Yp), tf.argmax(Y)) accuracy= tf.reduce_mean(tf.cast(correct, "float")) #-----------------Initialize the graph and start the session------------------------------------------------- init = tf.global_variables_initializer() with tf.Session() as sess: # Run the initialization sess.run(init) jj=0 for epoch in range(num_iterations): _ , epoch_cost, epoch_grad, epoch_acc_train = sess.run([min, cost, grads_var, accuracy], feed_dict={X: X_tr, Y: Y_tr}) # Print the cost every interval epoch (here uses the inhomogenous interval but you can change it) if jj< e_len and epoch % epoch_sample[jj] == 0:
tensorflow.global_variables_initializer
4,911
import tensorflow as tf # print(self.name, 'EP:', GLOBALE_EP) # GLOBALE_EP += 1 # 加一回合 # break # 结束本回合 # global_step = SESS.run(self.global_AC.global_step) if __name__ == '__main__': SESS = tf.Session() with tf.device('/cpu:0'): OPT = tf.train.AdamOptimizer(1e-4) # 后续主要是使用该optimizer中的apply—gradients操作 # OPT_C = tf.train.RMSPropOptimizer(LR_C, name='RMSPropC') # 定义critic训练过程 GLOBAL_AC = ACnet(scope=GLOBAL_NET_SCOPE) # 创建中央大脑GLOBALE_AC,只创建结构(A和C的参数) workers = [] for i in range(N_workers): # N—workers等于cpu数量
tensorflow.Session
4,912
import tensorflow as tf precision_range=(0.0, 1.0), num_anchors=20, weights=1.0, dual_rate_factor=0.1, label_priors=None, surrogate_type='xent', lambdas_initializer=tf.constant_initializer(1.0), reuse=None, variables_collections=None, trainable=True, scope=None): """Computes precision-recall AUC loss.
tensorflow.constant_initializer
4,913
import tensorflow as tf b = tf.Variable(tf.zeros(shape[1])) self.x = x self.w = w self.b = b y = tf.nn.softmax(tf.matmul(x, w) + b) y_ = tf.placeholder(tf.float32, [None, shape[1]]) self.y_ = y_ cross_entropy = tf.reduce_mean( -tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]) ) self.cross_entropy = cross_entropy self.cross_entropy_grads = tf.gradients(cross_entropy, [w, b]) self.sess = tf.Session() # In order to get and set the weights, we pass in the loss function to # Ray's TensorFlowVariables to automatically create methods to modify # the weights. self.variables = ray.experimental.tf_utils.TensorFlowVariables( cross_entropy, self.sess ) def loss(self, xs, ys): """Computes the loss of the network."""
tensorflow.gradients
4,914
from tensorflow.python.training import moving_averages def build_update_ops(): """Builds the exponential moving average update ops.""" update_mean_op = moving_averages.assign_moving_average( variable=self._moving_mean, value=mean, decay=self._decay_rate, name="update_moving_mean").op update_second_moment_op = moving_averages.assign_moving_average( variable=self._moving_second_moment, value=second_moment, decay=self._decay_rate, name="update_moving_second_moment").op return update_mean_op, update_second_moment_op def build_no_ops(): return (tf.no_op(), tf.no_op())
tensorflow.python.training.moving_averages.assign_moving_average
4,915
import tensorflow as tf else: entropy = - targets*tf.log(targets) - (1. - targets)*tf.log(1. - targets) return sigmoid_ce_with_logits(logits, tf.ones_like(logits)*targets) - entropy def gradient_difference_loss(x, y): x_h_diff = x[:, 1:] - x[:, :-1] x_w_diff = x[:, :, 1:] - x[:, :, :-1] y_h_diff = y[:, 1:] - y[:, :-1] y_w_diff = y[:, :, 1:] - y[:, :, :-1] h_diff = tf.abs(tf.abs(x_h_diff) - tf.abs(y_h_diff)) w_diff = tf.abs(tf.abs(x_w_diff) - tf.abs(y_w_diff)) return h_diff + tf.transpose(w_diff) def leaky_relu(x, leak=0.2, name='leaky_relu'): with tf.variable_scope(name): f1 = 0.5 * (1 + leak) f2 = 0.5 * (1 - leak) return f1 * x + f2 * abs(x)
tensorflow.abs
4,916
import tensorflow as tf
tensorflow.reshape
4,917
import tensorflow as tf z1 = z0 + 1 x0_clip = tf.clip_by_value(x0, zero, max_x) x1_clip = tf.clip_by_value(x1, zero, max_x) y0_clip = tf.clip_by_value(y0, zero, max_y) y1_clip = tf.clip_by_value(y1, zero, max_y) z0_clip = tf.clip_by_value(z0, zero, max_z) z1_clip = tf.clip_by_value(z1, zero, max_z) dim3 = width dim2 = width * height
tensorflow.clip_by_value
4,918
from tensorflow.python.ops import sparse_ops expand_dims = [dim] expanded_shape = array_ops.concat( 0, (array_ops.slice(tensor.shape, [0], expand_dims), [1], array_ops.slice(tensor.shape, expand_dims, [-1])), name='expanded_shape') expanded = sparse_ops.sparse_reshape( tensor, shape=expanded_shape, name='expand') if multiple == 1: return expanded return sparse_ops.sparse_concat( dim - 1 if dim < 0 else dim, [expanded] * multiple, name=scope) # Dense. expanded = array_ops.expand_dims( tensor, dim if (dim >= 0) else (dim - 1), name='expand') if multiple == 1: return expanded ones = array_ops.ones_like(array_ops.shape(tensor))
tensorflow.python.ops.sparse_ops.sparse_concat
4,919
import tensorflow as tf 损失函数想要表达的意思: 输出的特征图数量为关键点的数量,意味着输出的是每一个像素属于各个关键点的置信度 """ batch_size = y_pred.shape[0] num_of_joints = y_pred.shape[-1] # 有多少个关键点 y_pred = tf.reshape(y_pred, shape=(batch_size, -1, num_of_joints)) # 合并宽和高 heatmap_pred_list = tf.split(value=y_pred, num_or_size_splits=num_of_joints, axis=-1) # 拆分每一个关键点的特征图 [batch_size, -1, 1] y_true = tf.reshape(y_true, shape=(batch_size, -1, num_of_joints)) heatmap_true_list = tf.split(value=y_true, # y_true执行与y_pred相同的操作 num_or_size_splits=num_of_joints, axis=-1) losses = [] # 计算每一个关键点的损失值,并累加求平均 for i in range(num_of_joints): heatmap_pred = tf.squeeze(heatmap_pred_list[i]) heatmap_true = tf.squeeze(heatmap_true_list[i]) loss = 0.5 * tf.losses.mean_squared_error(y_pred=heatmap_pred * true_weight[:, i], y_true=heatmap_true * true_weight[:, i])
tensorflow.split
4,920
import tensorflow as tf self.error_rate = 1. - \ tf.reduce_mean(tf.to_float(tf.nn.in_top_k( self.end_points_D['class_logits'], targets, 1))) if gpu_idx == 0: update = tf.assign(num_error_rate, num_error_rate + 1.) with tf.control_dependencies([update]): tc = tf.maximum(.01, 1. / num_error_rate) update = tf.assign(avg_error_rate, (1. - tc) * avg_error_rate + tc * self.error_rate) with tf.control_dependencies([update]): self.d_loss_class = tf.identity(self.d_loss_class) self.d_loss_fake = tf.nn.sigmoid_cross_entropy_with_logits( logits=self.end_points_D['D_on_G_logits'], labels=tf.zeros_like(self.end_points_D['D_on_G_logits'])) self.d_loss_class = tf.reduce_mean(self.d_loss_class)
tensorflow.assign
4,921
import tensorflow as tf if expected_rank is not None: assert_rank(tensor, expected_rank, name) shape = tensor.shape.as_list() non_static_indexes = [] for (index, dim) in enumerate(shape): if dim is None: non_static_indexes.append(index) if not non_static_indexes: return shape dyn_shape = tf.shape(tensor) for index in non_static_indexes: shape[index] = dyn_shape[index] return shape def reshape_to_matrix(input_tensor): """Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).""" ndims = input_tensor.shape.ndims if ndims < 2: raise ValueError("Input tensor must have at least rank 2. Shape = %s" % (input_tensor.shape)) if ndims == 2: return input_tensor
tensorflow.shape
4,922
import tensorflow as tf #self.fc0 = self.fc_layer(self.pool5, 2048, 1024, "fc0") #self.relu1 = tf.nn.relu(self.fc0) #if train_mode is not None: # self.relu1 = tf.cond(train_mode, lambda: tf.nn.dropout(self.relu1, self.dropout), lambda: self.relu1) #elif self.trainable: # self.relu1 = tf.nn.dropout(self.relu1, self.dropout) self.y_soft = tf.nn.softmax(self.final_layer) self.logits = tf.reshape(self.final_layer, (-1, 3)) print(self.logits) self.predicted = tf.argmax(self.final_layer, axis = 3) print(self.predicted.get_shape().as_list()) # cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.labels, logits=logits, name=None) # self.loss = tf.reduce_mean(cross_entropy, name = 'xcross_entropy') # if(last_layer_type == "sigmoid"): # self.prob = tf.nn.sigmoid(self.fc1, name="prob") # elif(last_layer_type == "softmax"): # self.prob = tf.nn.softmax(self.fc1, name="prob")
tensorflow.argmax
4,923
import tensorflow as tf # ! self.obs_space = env.observation_space.shape self.r = tf.placeholder(tf.float32, (None,1)) self.ac = tf.placeholder(tf.float32, (None, self.act_space)) self.adv = tf.placeholder(tf.float32, [None]) # unused # specific to FeUdal self.prev_g = tf.placeholder(tf.float32, (None, None, self.g_dim)) self.ri = tf.placeholder(tf.float32, (None,)) self.s_diff = tf.placeholder(tf.float32, (None, self.g_dim)) def build_perception(self): self._obs = tf.expand_dims(self.obs, -1) # ! self._obs = tf.expand_dims(self._obs, -1) # ! conv1 = tf.layers.conv2d(inputs=self._obs, filters=16, kernel_size=[2, 1], # ! kernel_size = [8,8] activation=tf.nn.elu, strides=1) # ! strides = 4 conv2 = tf.layers.conv2d(inputs=conv1, filters=32, kernel_size=[2, 1], # ! kernel_size = [4,4] activation=tf.nn.elu, strides=1) # ! strides = 2 flattened_filters = policy_utils.flatten(conv2) self.z = tf.layers.dense(inputs=flattened_filters,
tensorflow.layers.conv2d
4,924
import tensorflow as tf filter_depths=[128, 128, 512], kernel_size=3) x = self.__identity_block(stage=2, block=5, inputs=x, filter_depths=[128, 128, 512], kernel_size=3) x = self.__conv_block(stage=3, block=0, inputs=x, filter_depths=[256, 256, 1024], kernel_size=3, stride=2) x = self.__identity_block(stage=3, block=1, inputs=x, filter_depths=[256, 256, 1024], kernel_size=3) x = self.__identity_block(stage=3, block=2, inputs=x, filter_depths=[256, 256, 1024], kernel_size=3) x = tf.keras.layers.AveragePooling2D(pool_size=7, strides=1, padding="valid", name="pool")(x) x = tf.reshape(x, shape=(-1, 1024)) self.logits = self.__fully_connected(name="fc_nsfw", inputs=x, num_outputs=2) self.predictions = tf.nn.softmax(self.logits, name="predictions") """Get weights for layer with given name """ def __get_weights(self, layer_name, field_name): if not layer_name in self.weights: raise ValueError("No weights for layer named '{}' found"
tensorflow.keras.layers.AveragePooling2D
4,925
import tensorflow as tf correlation_matrix = tf.matmul(private_samples, shared_samples, transpose_a=True) cost = tf.reduce_mean(tf.square(correlation_matrix)) * weight cost = tf.where(cost > 0, cost, 0, name='value') assert_op = tf.Assert(tf.is_finite(cost), [cost])
tensorflow.where
4,926
import tensorflow as tf eval_config.num_steps = 1 with tf.Graph().as_default(): initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale) with tf.name_scope("Train"): train_input = PTBInput(config=config, data=train_data, name="TrainInput") with tf.variable_scope("Model", reuse=None, initializer=initializer): m = PTBModel(is_training=True, config=config, input_=train_input) tf.summary.scalar("Training Loss", m.cost) tf.summary.scalar("Learning Rate", m.lr) with tf.name_scope("Valid"): valid_input = PTBInput(config=config, data=valid_data, name="ValidInput") with tf.variable_scope("Model", reuse=True, initializer=initializer): mvalid = PTBModel(is_training=False, config=config, input_=valid_input) tf.summary.scalar("Validation Loss", mvalid.cost) with tf.name_scope("Test"): test_input = PTBInput( config=eval_config, data=test_data, name="TestInput") with tf.variable_scope("Model", reuse=True, initializer=initializer): mtest = PTBModel(is_training=False, config=eval_config, input_=test_input) models = {"Train": m, "Valid": mvalid, "Test": mtest} for name, model in models.iteritems(): model.export_ops(name) metagraph = tf.train.export_meta_graph()
tensorflow.variable_scope
4,927
import tensorflow as tf # raw mask for positive > 0.5, and for negetive < 0.3 # each positive examples has one label positive_mask = glabels > 0#tf.logical_and(glabels > 0, gscores > params['match_threshold']) fpositive_mask = tf.cast(positive_mask, tf.float32) n_positives = tf.reduce_sum(fpositive_mask) # negtive examples are those max_overlap is still lower than neg_threshold, note that some positive may also has lower jaccard # note those gscores is 0 is either be ignored during anchors encode or anchors have 0 overlap with all ground truth #negtive_mask = tf.logical_and(tf.logical_and(tf.logical_not(tf.logical_or(positive_mask, glabels < 0)), gscores < params['neg_threshold']), gscores > 0.)
tensorflow.reduce_sum
4,928
import tensorflow as tf margin=margin, use_semi_hard=use_semi_hard, anchor_positive_mining_distances=anchor_positive_mining_distances, anchor_match_mining_distance_matrix=( anchor_match_mining_distance_matrix))) negative_distances = tf.boolean_mask( negative_distances, mask=negative_distances < negative_distances.dtype.max) negative_mining_distances = tf.boolean_mask( negative_mining_distances, mask=negative_distances < negative_distances.dtype.max) active_triplet_ratio = ( tf.cast(num_active_triplets, dtype=tf.float32) / num_total_triplets) active_mining_triplet_ratio = ( tf.cast(num_active_mining_triplets, dtype=tf.float32) / num_total_triplets) active_loss = ( loss / tf.math.maximum(1e-12, tf.stop_gradient(active_triplet_ratio))) active_mining_loss = ( mining_loss / tf.math.maximum(1e-12, tf.stop_gradient(active_mining_triplet_ratio))) tag = 'SemiHardNegative' if use_semi_hard else 'HardNegative' summaries = {
tensorflow.cast
4,929
import tensorflow as tf self.char_mat, self.qh), [N * QL, CL, dc]) ch_emb = tf.nn.dropout(ch_emb, 1.0 - 0.5 * self.dropout) qh_emb = tf.nn.dropout(qh_emb, 1.0 - 0.5 * self.dropout)
tensorflow.nn.dropout
4,930
import tensorflow as tf features=tf.parse_single_example(serialized_example, features={ 'label':tf.FixedLenFeature([], tf.int64), 'img_raw' : tf.FixedLenFeature([], tf.string), }) image=tf.decode_raw(features['img_raw'],tf.uint8) label=tf.cast(features['label'],tf.int32) image=tf.reshape(image,[4096,1]) return image,label def get_batch(image,label,batch_size,crop_size): #print(image.shape)
tensorflow.cast
4,931
import tensorflow as tf flags.mark_flag_as_required("data_dir") flags.mark_flag_as_required("task_name") flags.mark_flag_as_required("vocab_file") flags.mark_flag_as_required("bert_config_file") flags.mark_flag_as_required("output_dir") tf.app.run()
tensorflow.app.run
4,932
import tensorflow as tf num_dims = shape[0].size if tt_rank.size == 1: tt_rank = tt_rank * np.ones(num_dims - 1) tt_rank = np.concatenate([[1], tt_rank, [1]]) shape = shape.astype(int) tt_rank = tt_rank.astype(int) tt_cores = [None] * num_dims with tf.name_scope(name): for i in range(num_dims): curr_core_shape = (batch_size, tt_rank[i], shape[0][i], shape[1][i], tt_rank[i + 1]) tt_cores[i] = tf.random_normal(curr_core_shape, mean=mean, stddev=stddev, dtype=dtype)
tensorflow.name_scope
4,933
from tensorflow.contrib.learn.python.learn.estimators import test_data iris.data[:, i], dtype=dtypes.float32), (-1, 1)) }) # The following shows how to provide the SparseTensor data for # a SparseColumn. features['dummy_sparse_column'] = sparse_tensor.SparseTensor( values=('en', 'fr', 'zh'), indices=((0, 0), (0, 1), (60, 0)), dense_shape=(len(iris.target), 2)) labels = array_ops.reshape( constant_op.constant( iris.target, dtype=dtypes.int32), (-1, 1)) return features, labels iris = test_data.prepare_iris_data_for_logistic_regression() cont_features = [ feature_column.real_valued_column(str(i)) for i in range(4) ] linear_features = [ feature_column.bucketized_column( cont_features[i], test_data.get_quantile_based_buckets(iris.data[:, i], 10)) for i in range(4) ] linear_features.append( feature_column.sparse_column_with_hash_bucket( 'dummy_sparse_column', hash_bucket_size=100))
tensorflow.contrib.learn.python.learn.estimators.test_data.prepare_iris_data_for_logistic_regression
4,934
import tensorflow as tf def dense(inputs, hidden, use_bias=True, scope="dense"): with tf.variable_scope(scope): shape = tf.shape(inputs) dim = inputs.get_shape().as_list()[-1] out_shape = [shape[idx] for idx in range( len(inputs.get_shape().as_list()) - 1)] + [hidden] flat_inputs = tf.reshape(inputs, [-1, dim]) W = tf.get_variable("W", [dim, hidden]) res = tf.matmul(flat_inputs, W) if use_bias: b = tf.get_variable( "b", [hidden], initializer=tf.constant_initializer(0.)) res = tf.nn.bias_add(res, b) res = tf.reshape(res, out_shape) return res
tensorflow.constant_initializer
4,935
import tensorflow as tf if grad_norm_clipping is not None: for i, (grad, var) in enumerate(gradients): if grad is not None: gradients[i] = (tf.clip_by_norm(grad, grad_norm_clipping), var) with tf.variable_scope("input_info", reuse=False):
tensorflow.clip_by_norm
4,936
import tensorflow as tf # for i in range(nor_img_all.shape[0]): # imsave(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/nor_img/nor_img_' + str(i) + '.png', nor_img_all[i,:,:,0]) # imsave(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/adv_img/adv_img_' + str(i) + '.png', adv_img_all[i,:,:,0]) # imsave(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/noise_img/noise_img_' + str(i) + '.png', noise_img_all[i, :, :, 0]) return None def apply_attack_loop(hps): #Construct graph images, labels = input_name.build_input( FLAGS.dataset, FLAGS.eval_data_path, hps.batch_size, FLAGS.mode)#FLAGS.mode='attack', batch_size=200 Res = model_name.ResNet(hps, images, FLAGS.mode, Reuse=False) Res.build_graph() saver = tf.train.Saver() #Open session and restore checkpoint sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) tf.train.start_queue_runners(sess) ckpt_state = tf.train.get_checkpoint_state(FLAGS.log_root) # Choose dir according to rt tf.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path) num_sample = hps.batch_size*FLAGS.eval_batch_count # Initialize results to save entropy_test_adv_all = np.array([]) confidence_test_adv_all = np.array([]) entropy_test_nor_all = np.array([]) confidence_test_nor_all = np.array([]) logits_adv_all = np.reshape(np.array([]), (0, 64)) logits_nor_all = np.reshape(np.array([]), (0, 64))
tensorflow.ConfigProto
4,937
import tensorflow as tf def test_op_shape(self): with self.test_session(): batcher = dynamic_batching._Batcher(minimum_batch_size=1, maximum_batch_size=1, timeout_ms=None) _, computation_id = batcher.get_inputs([tf.int32]) self.assertEqual([], computation_id.shape) class DynamicBatchingBenchmarks(tf.test.Benchmark): def benchmark_batching_small(self): with tf.Session() as session: @dynamic_batching.batch_fn def f(a, b): return a + b outputs = [] for _ in xrange(1000): outputs.append(f(tf.ones([1, 10]), tf.ones([1, 10]))) op_to_benchmark = tf.group(*outputs) tf.train.start_queue_runners() self.run_op_benchmark( name='batching_many_small', sess=session,
tensorflow.Session
4,938
from tensorflow.python.ops import logging_ops as logging def after_create_session(self, session, _): assert self._init_op.graph == ops.get_default_graph() assert self._is_initialized_op.graph == self._init_op.graph while True: try: if session.run(self._is_initialized_op): break elif self._is_chief: session.run(self._init_op) else: time.sleep(1) except RuntimeError as e: logging.info(e) class GMM(estimator.Estimator): """An estimator for GMM clustering.""" SCORES = 'scores' ASSIGNMENTS = 'assignments' ALL_SCORES = 'all_scores' def __init__(self, num_clusters, model_dir=None,
tensorflow.python.ops.logging_ops.info
4,939
import tensorflow as tf def din_attention(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False): if isinstance(facts, tuple): # In case of Bi-RNN, concatenate the forward and the backward RNN outputs. facts = tf.concat(facts, 2) print ("querry_size mismatch") query = tf.concat(values = [ query,
tensorflow.concat
4,940
import tensorflow as tf # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.matmul(output_layer, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) probabilities = tf.nn.softmax(logits, axis=-1) log_probs = tf.nn.log_softmax(logits, axis=-1) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, logits, probabilities) def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
tensorflow.reduce_mean
4,941
import tensorflow as tf # Get gradient from returned list of length 1. mixed_gradients = tf.gradients( ys=mixed_loss, xs=[mixed_images], name="gradients" )[0] print_obj(func_name, "mixed_gradients", mixed_gradients) # Get gradient's L2 norm. mixed_norms = tf.sqrt( x=tf.reduce_sum( input_tensor=tf.square( x=mixed_gradients, name="squared_grads" ), axis=[1, 2, 3] ) + 1e-8 ) print_obj(func_name, "mixed_norms", mixed_norms) # Get squared difference from target of 1.0. squared_difference = tf.square(
tensorflow.square
4,942
import tensorflow as tf signed_logits_difference = labels_difference * logits_difference raw_loss = losses_utils.weighted_surrogate_loss( labels=tf.ones_like(signed_logits_difference), logits=signed_logits_difference, surrogate_type=surrogate_type) weighted_loss = weights_product * raw_loss # Zero out entries of the loss where labels_difference zero (so loss is only # computed on pairs with different labels). loss = tf.reduce_mean(tf.abs(labels_difference) * weighted_loss, 0) * 0.5 loss = tf.reshape(loss, original_shape) return loss, {} def recall_at_precision_loss(labels, logits, target_precision, weights=1.0, dual_rate_factor=0.1, label_priors=None,
tensorflow.reshape
4,943
from tensorflow.python.framework import op_def_registry as _op_def_registry result = _op_def_lib.apply_op( "XlaRecv", dtype=dtype, shape=shape, tensor_name=tensor_name, name=name if name else "XlaRecv") return result def _InitOpDefLibrary(): op_list = _op_def_pb2.OpList() _text_format.Merge(_InitOpDefLibrary.op_list_ascii, op_list) _op_def_registry.register_op_list(op_list) op_def_lib = _op_def_library.OpDefLibrary() op_def_lib.add_op_list(op_list) return op_def_lib _InitOpDefLibrary.op_list_ascii = """op { name: "_Recv" output_arg { name: "tensor" type_attr: "tensor_type" }
tensorflow.python.framework.op_def_registry.register_op_list
4,944
import tensorflow as tf gamma = tf.get_variable("gamma", [1], initializer=tf.constant_initializer(0.0)) o = tf.reshape(o, shape=[-1, height, width, num_channels // 2]) # [bs, h, w, C] o = conv(o, scope='attn_conv', filter_dims=[1, 1, channels], stride_dims=[1, 1], non_linear_fn=act_func) x = gamma * o + x
tensorflow.reshape
4,945
from tensorflow.python.framework import ops (self._batch_ndims_static, self._event_ndims_static, ndims)) return ops.convert_to_tensor(sample_ndims, name="sample_ndims")
tensorflow.python.framework.ops.convert_to_tensor
4,946
import tensorflow as tf
tensorflow.variable_scope
4,947
import tensorflow as tf return attn_result def mean_pooling_for_unselected_head( unhead_org_idx, sl_unhead, rep_unhead_mask, dep_org_idx, sl_dep, rep_dep_mask, rep_dep_tensor, direction ): with tf.name_scope('pooling_for_un_head'): undep_idxs = tf.tile(tf.expand_dims(dep_org_idx, 1), [1, sl_unhead, 1]) # [bs, sluh, sld] unhead_idxs = tf.tile(tf.expand_dims(unhead_org_idx, 2), [1, 1, sl_dep]) # [bs, sluh, sld] if direction is None: direct_mask_un = tf.not_equal(unhead_idxs, undep_idxs) # [bs, sluh, sld] else: if direction == 'forward': direct_mask_un = tf.greater(unhead_idxs, undep_idxs) # [bs, sluh, sld]
tensorflow.name_scope
4,948
import tensorflow as tf bias_var_shape = [nf] if one_dim_bias else [1, nf, 1, 1] nin = x.get_shape()[channel_ax].value wshape = [rf, rf, nin, nf] with tf.variable_scope(scope): w = tf.get_variable("w", wshape, initializer=ortho_init(init_scale)) b = tf.get_variable("b", bias_var_shape, initializer=tf.constant_initializer(0.0)) if not one_dim_bias and data_format == 'NHWC': b = tf.reshape(b, bshape) return tf.nn.conv2d(x, w, strides=strides, padding=pad, data_format=data_format) + b def fc(x, scope, nh, *, init_scale=1.0, init_bias=0.0): with tf.variable_scope(scope): nin = x.get_shape()[1].value w = tf.get_variable("w", [nin, nh], initializer=ortho_init(init_scale)) print("w is "+str(w)) b = tf.get_variable("b", [nh], initializer=tf.constant_initializer(init_bias)) return tf.matmul(x, w)+b def batch_to_seq(h, nbatch, nsteps, flat=False): if flat: h = tf.reshape(h, [nbatch, nsteps]) else: h = tf.reshape(h, [nbatch, nsteps, -1]) return [tf.squeeze(v, [1]) for v in tf.split(axis=1, num_or_size_splits=nsteps, value=h)] def seq_to_batch(h, flat = False): shape = h[0].get_shape().as_list() if not flat: assert(len(shape) > 1) nh = h[0].get_shape()[-1].value
tensorflow.constant_initializer
4,949
import tensorflow as tf print(var.name) self.test_image_A = tf.placeholder(tf.float32,[None, self.image_size,self.image_size,self.input_dim], name='test_A') self.test_image_B = tf.placeholder(tf.float32,[None, self.image_size, self.image_size,self.output_c_dim], name='test_B') self.saver = tf.train.Saver() def train_network(self): self.learning_rate = tf.placeholder(tf.float32) self.d_optimizer = tf.train.AdamOptimizer(self.learning_rate,beta1=self.beta1,beta2=self.beta2).minimize(self.discriminator_loss,var_list=self.d_variables) self.g_optimizer = tf.train.AdamOptimizer(self.learning_rate,beta1=self.beta1,beta2=self.beta2).minimize(self.generator_loss,var_list=self.g_variables) self.init_op = tf.global_variables_initializer() self.sess = tf.Session() self.sess.run(self.init_op) #self.dataset_dir = '/home/santanu/Downloads/DiscoGAN/edges2handbags/train/' self.writer = tf.summary.FileWriter("./logs", self.sess.graph) count = 1 start_time = time.time() for epoch in range(self.epoch): data_A = os.listdir(self.dataset_dir + 'trainA/') data_B = os.listdir(self.dataset_dir + 'trainB/') data_A = [ (self.dataset_dir + 'trainA/' + str(file_name)) for file_name in data_A ]
tensorflow.global_variables_initializer
4,950
import tensorflow as tf outputs[-1] * mask_bw, seq_lengths=seq_len, seq_dim=0, batch_dim=1) out_bw, _ = gru_bw(inputs_bw, initial_state=(init_bw, )) out_bw = tf.reverse_sequence( out_bw, seq_lengths=seq_len, seq_dim=0, batch_dim=1) outputs.append(tf.concat([out_fw, out_bw], axis=2)) if concat_layers: res = tf.concat(outputs[1:], axis=2) else: res = outputs[-1] res = tf.transpose(res, [1, 0, 2]) return res
tensorflow.concat
4,951
import tensorflow as tf d_logits = logits_from_embedding_fn([emb + d for (emb, d) in zip(embedded, perturbs)]) kl = _kl_divergence_with_logits(logits, d_logits, weights, num_classes) perturbs = tf.gradients( kl, perturbs, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N) perturbs = [tf.stop_gradient(d) for d in perturbs] perturbs = [_scale_l2(_mask_by_length(d, length), perturb_norm_length) for d in perturbs] vadv_logits = logits_from_embedding_fn([emb + d for (emb, d) in zip(embedded, perturbs)]) return _kl_divergence_with_logits(logits, vadv_logits, weights, num_classes) def _mask_by_length(t, length): maxlen = t.get_shape().as_list()[1] mask = tf.sequence_mask(length, maxlen=maxlen) mask = tf.expand_dims(tf.cast(mask, tf.float32), -1) return t * mask def _scale_l2(x, norm_length): alpha = tf.reduce_max(tf.abs(x), (1, 2), keep_dims=True) + 1e-12 l2_norm = alpha * tf.sqrt(tf.reduce_sum(tf.pow(x / alpha, 2), (1, 2), keep_dims=True) + 1e-6) x_unit = x / l2_norm return norm_length * x_unit def _end_of_seq_mask(tokens, vocab_size): """Generate a mask for the EOS token (1.0 on EOS, 0.0 otherwise).
tensorflow.cast
4,952
import tensorflow as tf config.left_right_group_map[category][0], config.left_right_group_map[category][1], config.left_right_group_map[category][2]], tf.int64, stateful=True) with tf.control_dependencies([save_image_op]): pred_x, pred_y = pred_x * 1., pred_y * 1. return pred_x, pred_y def gaussian_blur(inputs, inputs_filters, sigma, data_format, name=None): with tf.name_scope(name, "gaussian_blur", [inputs]): data_format_ = 'NHWC' if data_format=='channels_last' else 'NCHW' if data_format_ == 'NHWC': inputs = tf.transpose(inputs, [0, 2, 3, 1]) ksize = int(6 * sigma + 1.) x = tf.expand_dims(tf.range(ksize, delta=1, dtype=tf.float32), axis=1) y = tf.transpose(x, [1, 0]) kernel_matrix = tf.exp(- ((x - ksize/2.) ** 2 + (y - ksize/2.) ** 2) / (2 * sigma ** 2)) #print(kernel_matrix) kernel_filter = tf.reshape(kernel_matrix, [ksize, ksize, 1, 1]) kernel_filter = tf.tile(kernel_filter, [1, 1, inputs_filters, 1]) #kernel_filter = tf.transpose(kernel_filter, [1, 0, 2, 3]) outputs = tf.nn.depthwise_conv2d(inputs, kernel_filter, strides=[1, 1, 1, 1], padding='SAME', data_format=data_format_, name='blur') if data_format_ == 'NHWC': outputs = tf.transpose(outputs, [0, 3, 1, 2]) return outputs cpn_backbone = cpn.cascaded_pyramid_net if 'seresnext50' in FLAGS.backbone: cpn_backbone = cpn.xt_cascaded_pyramid_net
tensorflow.transpose
4,953
import tensorflow as tf def difference_loss(private_samples, shared_samples, weight=1.0, name='difference_loss'): """Adds the difference loss between the private and shared representations. Args: private_samples: a tensor of shape [num_samples, num_features]. shared_samples: a tensor of shape [num_samples, num_features]. weight: the weight of the incoherence loss. name: the name of the tf summary. """ with tf.name_scope(name): private_samples -= tf.reduce_mean(private_samples, 0) shared_samples -= tf.reduce_mean(shared_samples, 0) private_samples = tf.nn.l2_normalize(private_samples, 1) shared_samples = tf.nn.l2_normalize(shared_samples, 1) correlation_matrix = tf.matmul(private_samples, shared_samples, transpose_a=True) cost = tf.reduce_mean(tf.square(correlation_matrix)) * weight cost = tf.where(cost > 0, cost, 0, name='value') assert_op = tf.Assert(tf.is_finite(cost), [cost]) with tf.control_dependencies([assert_op]):
tensorflow.reduce_mean
4,954
import tensorflow as tf outputs = { 'foo': tf.convert_to_tensor([0, 1, 2, 3], dtype=tf.int64), 'bar': tf.convert_to_tensor([0, 2, 0, 2], dtype=tf.int64), } # Annotate an arbitrary proto at the schema level (not sure what global # schema boundaries would mean, but hey I'm just a test). boundaries = tf.constant([[1.0]]) message_type = annotations_pb2.BucketBoundaries.DESCRIPTOR.full_name sizes = tf.expand_dims([tf.size(boundaries)], axis=0) message_proto = tf.raw_ops.EncodeProto( sizes=sizes, values=[tf.cast(boundaries, tf.float32)], field_names=['boundaries'], message_type=message_type)[0] type_url = os.path.join('type.googleapis.com', message_type) schema_inference.annotate(type_url, message_proto) with tf.compat.v1.Session(graph=graph) as session: schema = schema_inference.infer_feature_schema(outputs, graph, session)
tensorflow.size
4,955
import tensorflow as tf if continuous: gain_bc = tf.stop_gradient(adv_bc * tf.nn.relu(1.0 - (self.correction_term / (rho_i_ + eps))) * f_i_) else: log_f_bc = tf.log(f_i_ + eps) # / (f_old + eps) gain_bc = tf.reduce_sum(log_f_bc * tf.stop_gradient( adv_bc * tf.nn.relu(1.0 - (self.correction_term / (rho + eps))) * f_i_), axis=1) # IMP: This is sum, as expectation wrt f loss_bc = -tf.reduce_mean(gain_bc) loss_policy = loss_f + loss_bc # Value/Q function loss, and explained variance
tensorflow.nn.relu
4,956
import tensorflow as tf def avg_pool(self, bottom, name): return tf.nn.avg_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name) def max_pool(self, bottom, name): return tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name) def conv_layer(self, bottom, name): with tf.variable_scope(name): filt = self.get_conv_filter(name) conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME') conv_biases = self.get_bias(name) bias = tf.nn.bias_add(conv, conv_biases)
tensorflow.variable_scope
4,957
import tensorflow as tf return tf.less(l, max_length + 1) def eval_right_length(example, target): l = tf.maximum(tf.shape(example['inputs'])[0], tf.shape(target)[0]) return tf.less(l, max_eval_length + 1) if max_length > 0 and training: dataset = dataset.filter(train_right_length)
tensorflow.less
4,958
from tensorflow.python.util import compat temp_graph, inputs, outputs, out_names=self._out_names) # Extra kwargs are treated as attrs on the function def. kwargs_attr = _parse_kwargs_as_attrs(**self._extra_kwargs) for k in kwargs_attr: self._definition.attr[k].CopyFrom(kwargs_attr[k]) # Hash the definition and its dependencies. hasher = hashlib.sha1() def _hash_func_def(): """Hash the function definition agnostic to node/map ordering.""" def update_num(n): hasher.update(compat.as_bytes("%x" % n)) def update_str(s): update_num(len(s)) hasher.update(compat.as_bytes(s)) def update_strs(slist): update_num(len(slist)) for s in slist: update_str(s) for adef in self._definition.signature.input_arg: update_str(adef.SerializeToString())
tensorflow.python.util.compat.as_bytes
4,959
from tensorflow.python.framework import ops """ with ops.op_scope([x], name, "Tanh") as name: x = ops.convert_to_tensor(x, name="x") return gen_math_ops._tanh(x, name=name) ops.RegisterShape("Abs")(common_shapes.unchanged_shape) ops.RegisterShape("Ceil")(common_shapes.unchanged_shape) ops.RegisterShape("Conj")(common_shapes.unchanged_shape) ops.RegisterShape("Cos")(common_shapes.unchanged_shape) ops.RegisterShape("Exp")(common_shapes.unchanged_shape) ops.RegisterShape("Floor")(common_shapes.unchanged_shape) ops.RegisterShape("Imag")(common_shapes.unchanged_shape) ops.RegisterShape("Inv")(common_shapes.unchanged_shape) ops.RegisterShape("IsFinite")(common_shapes.unchanged_shape) ops.RegisterShape("IsInf")(common_shapes.unchanged_shape) ops.RegisterShape("IsNan")(common_shapes.unchanged_shape) ops.RegisterShape("Log")(common_shapes.unchanged_shape) ops.RegisterShape("LogicalNot")(common_shapes.unchanged_shape) ops.RegisterShape("Neg")(common_shapes.unchanged_shape)
tensorflow.python.framework.ops.RegisterShape
4,960
import tensorflow as tf vaX, vaM = transform_roc(vaX1, vaX2, vaX3) if submit: teX, teM = transform_roc(teX1, teX2, teX3) n_train = len(trY) n_valid = len(vaY) n_batch_train = n_batch*n_gpu n_updates_total = (n_train//n_batch_train)*n_iter X_train = tf.placeholder(tf.int32, [n_batch_train, 2, n_ctx, 2]) M_train = tf.placeholder(tf.float32, [n_batch_train, 2, n_ctx]) X = tf.placeholder(tf.int32, [None, 2, n_ctx, 2]) M = tf.placeholder(tf.float32, [None, 2, n_ctx]) Y_train = tf.placeholder(tf.int32, [n_batch_train]) Y = tf.placeholder(tf.int32, [None]) train, logits, clf_losses, lm_losses = mgpu_train(X_train, M_train, Y_train) clf_loss = tf.reduce_mean(clf_losses) params = find_trainable_variables('model') sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) sess.run(tf.global_variables_initializer()) shapes = json.load(open('model/params_shapes.json')) offsets = np.cumsum([np.prod(shape) for shape in shapes]) init_params = [np.load('model/params_{}.npy'.format(n)) for n in range(10)] init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1] init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)] init_params[0] = init_params[0][:n_ctx]
tensorflow.placeholder
4,961
import tensorflow as tf embedding_table = tf.get_variable( name=word_embedding_name, shape=[vocab_size, embedding_size], initializer=create_initializer(initializer_range)) if use_one_hot_embeddings: flat_input_ids = tf.reshape(input_ids, [-1]) one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size) output = tf.matmul(one_hot_input_ids, embedding_table) else: output = tf.nn.embedding_lookup(embedding_table, input_ids) input_shape = get_shape_list(input_ids) output = tf.reshape(output, input_shape[0:-1] + [input_shape[-1] * embedding_size]) return (output, embedding_table) def embedding_postprocessor(input_tensor, use_token_type=False, token_type_ids=None, token_type_vocab_size=16, token_type_embedding_name="token_type_embeddings", use_position_embeddings=True, position_embedding_name="position_embeddings", initializer_range=0.02, max_position_embeddings=512,
tensorflow.reshape
4,962
from tensorflow.python.framework import ops """ return cast(x, types.bfloat16, name=name) ops.Tensor._override_operator("__neg__", neg) ops.Tensor._override_operator("__abs__", abs) # __invert__ corresponds to the ~ operator. Here we follow the numpy convention # ~ marks an elementwise bit-wise inverse. This is only implemented for boolean
tensorflow.python.framework.ops.Tensor._override_operator
4,963
from tensorflow.python.framework import ops ops.Tensor._override_operator("__neg__", neg) ops.Tensor._override_operator("__abs__", abs) # __invert__ corresponds to the ~ operator. Here we follow the numpy convention # ~ marks an elementwise bit-wise inverse. This is only implemented for boolean
tensorflow.python.framework.ops.Tensor._override_operator
4,964
import tensorflow as tf print ("creating protobuf...") g_1 = tf.get_default_graph() with tf.Session(graph = g_1) as sess: saver = tf.train.import_meta_graph('save/model.ckpt.meta', clear_devices=True) saver.restore(sess, ckpt_name) graph_def = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, dst_nodes) tf.train.write_graph(tf.graph_util.extract_sub_graph(graph_def, dst_nodes), path, fname, as_text=False)
tensorflow.graph_util.convert_variables_to_constants
4,965
import tensorflow as tf batch_size, size=num_elements, dtype=np.int64) indices_value = np.arange(num_elements, dtype=np.int64) indices = np.asarray( sorted(zip(indices_batch, indices_value)), dtype=np.int64) values = ["feature_value_for_embedding_lookup"] * num_elements shape = np.asarray([batch_size, num_elements], dtype=np.int64) with tf.Session() as sess: with tf.device("/cpu:0"): indices = tf.Variable(indices) values = tf.Variable(values) shape = tf.Variable(shape) st = tf.SparseTensor(indices, values, shape) st_handles = add_many_sparse_to_tensors_map(st) st_roundtrip = take_many_sparse_from_tensors_map( sparse_map_op=st_handles.op, sparse_handles=st_handles) st_roundtrip_op = st_roundtrip.values.op st_serialized = tf.serialize_many_sparse(st) st_deserialized = tf.deserialize_many_sparse( st_serialized, dtype=values.dtype) st_deserialized_op = st_deserialized.values.op
tensorflow.SparseTensor
4,966
import tensorflow as tf # Output is result of linear activation of last layer of RNN weight = tf.Variable(tf.random_normal([LSTM_SIZE, N_OUTPUTS]))
tensorflow.random_normal
4,967
import tensorflow as tf hidden_sizes, model_prob=1.0 - dropout_rate) rnd_pred_act_dropout_mask_phs = rnd_pred_act_dropout_mask_generator.generate_dropout_mask_placeholders() rnd_pred_act, rnd_pred_act_reg = mlp_variational(x_ph, rnd_pred_act_dropout_mask_phs, list(hidden_sizes) + [act_dim], activation, output_activation, dropout_rate) rnd_pred_act = act_limit * rnd_pred_act with tf.variable_scope('rnd_targ_cri'): rnd_targ_cri = tf.squeeze(mlp(tf.concat([x_ph, a_ph], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1) with tf.variable_scope('rnd_pred_cri'): rnd_pred_cri = tf.squeeze(mlp(tf.concat([x_ph, a_ph], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1) rnd_pred_cri_in_ph = tf.concat([x_ph, a_ph], axis=-1) rnd_pred_cri_in_dim = rnd_pred_cri_in_ph.shape.as_list()[1] rnd_pred_cri_dropout_mask_generator = DropoutMaskGenerator(rnd_pred_cri_in_dim, hidden_sizes, model_prob=1.0 - dropout_rate) rnd_pred_cri_dropout_mask_phs = rnd_pred_cri_dropout_mask_generator.generate_dropout_mask_placeholders()
tensorflow.variable_scope
4,968
import tensorflow as tf """Tensorflow Example proto decoder for object detection. A decoder to decode string tensors containing serialized tensorflow.Example protos for object detection. """ import tensorflow as tf class TfExampleDecoder(object): """Tensorflow Example proto decoder.""" def __init__(self, include_mask=False): self._include_mask = include_mask self._keys_to_features = { 'image/encoded': tf.io.FixedLenFeature((), tf.string), 'image/source_id': tf.io.FixedLenFeature((), tf.string), 'image/height': tf.io.FixedLenFeature((), tf.int64), 'image/width': tf.io.FixedLenFeature((), tf.int64), 'image/object/bbox/xmin': tf.io.VarLenFeature(tf.float32), 'image/object/bbox/xmax': tf.io.VarLenFeature(tf.float32), 'image/object/bbox/ymin': tf.io.VarLenFeature(tf.float32), 'image/object/bbox/ymax': tf.io.VarLenFeature(tf.float32),
tensorflow.io.FixedLenFeature
4,969
import tensorflow as tf logits_clip = getattr(config, "logits_clip", 0.) if logits_clip > 0: min_logit = tf.reduce_min(logits) return tf.minimum(logits - min_logit, logits_clip) else: return logits @registry.register_model class FeedForwardCategoricalPolicy(PolicyBase): """Feed-forward categorical.""" def body(self, features): observations = features["inputs_raw"] observations = tf.cast(observations, tf.float32) flat_observations = tf.layers.flatten(observations) with tf.variable_scope("policy"): x = flat_observations for size in self.hparams.policy_layers: x = tf.layers.dense(x, size, activation=tf.nn.relu) logits = tf.layers.dense(x, self.hparams.problem.num_actions) logits = tf.expand_dims(logits, axis=1) with tf.variable_scope("value"): x = flat_observations for size in self.hparams.value_layers: x = tf.layers.dense(x, size, activation=tf.nn.relu) value = tf.layers.dense(x, 1) logits = clip_logits(logits, self.hparams)
tensorflow.cast
4,970
import tensorflow as tf from tensorflow.contrib import slim from npu_bridge.estimator import npu_ops from tensorflow.core.protobuf.rewriter_config_pb2 import RewriterConfig tf.app.flags.DEFINE_integer('input_size', 512, '') tf.app.flags.DEFINE_integer('batch_size_per_gpu', 14, '') tf.app.flags.DEFINE_integer('num_readers', 16, '') tf.app.flags.DEFINE_float('learning_rate', 0.0001, '') tf.app.flags.DEFINE_integer('max_steps', 100000, '') tf.app.flags.DEFINE_integer('loss_scale', 1024, '') tf.app.flags.DEFINE_float('moving_average_decay', 0.997, '') tf.app.flags.DEFINE_string('gpu_list', '1', '') tf.app.flags.DEFINE_string('checkpoint_path', '/tmp/east_resnet_v1_50_rbox/', '')
tensorflow.app.flags.DEFINE_integer
4,971
from tensorflow.python.platform import gfile IOError, lambda e: "Cannot parse file"): tf.train.import_meta_graph(filename) # Deletes the file gfile.Remove(filename) with self.assertRaisesWithPredicateMatch( IOError, lambda e: "does not exist"):
tensorflow.python.platform.gfile.Remove
4,972
import tensorflow as tf if not FLAGS.do_train and not FLAGS.do_eval: raise ValueError("At least one of `do_train` or `do_eval` must be True.") bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) tf.gfile.MakeDirs(FLAGS.output_dir) input_files = [] for input_pattern in FLAGS.input_file.split(","): input_files.extend(tf.gfile.Glob(input_pattern))
tensorflow.gfile.MakeDirs
4,973
import tensorflow as tf # casting to float on GPU ensures lower data transfer times. if lander: obs_t_float = self.obs_t_ph obs_tp1_float = self.obs_tp1_ph else: obs_t_float = tf.cast(self.obs_t_ph, tf.float32) / 255.0 obs_tp1_float = tf.cast(self.obs_tp1_ph, tf.float32) / 255.0 # Here, you should fill in your own code to compute the Bellman error. This requires # evaluating the current and next Q-values and constructing the corresponding error.
tensorflow.cast
4,974
import tensorflow as tf def main(frame): print("Searching faces...") with tf.Graph().as_default(): faces = evaluate( weight_file_path= "weights.pckl", frame = frame, prob_thresh=0.7, nms_thresh=0.1, #non max suppression threshold,
tensorflow.Graph
4,975
import tensorflow as tf def get_fc_weight(self, name): return tf.constant(self.data_dict[name][0], name="weights")
tensorflow.constant
4,976
import tensorflow as tf input_.targets, tf.ones([self.batch_size, self.num_steps], dtype=data_type()), average_across_timesteps=False, average_across_batch=True) # Update the cost self._cost = tf.reduce_sum(loss) self._final_state = state if not is_training: return self._lr = tf.Variable(0.0, trainable=False) tvars = tf.trainable_variables() grads, _ = tf.clip_by_global_norm(tf.gradients(self._cost, tvars), config.max_grad_norm) optimizer = tf.train.GradientDescentOptimizer(self._lr) self._train_op = optimizer.apply_gradients( zip(grads, tvars), global_step=tf.train.get_or_create_global_step()) self._new_lr = tf.placeholder( tf.float32, shape=[], name="new_learning_rate") self._lr_update = tf.assign(self._lr, self._new_lr) def _build_rnn_graph(self, inputs, config, is_training):
tensorflow.trainable_variables
4,977
import tensorflow as tf labels=masked_lm_ids, predictions=masked_lm_predictions, weights=masked_lm_weights) masked_lm_mean_loss = tf.metrics.mean( values=masked_lm_example_loss, weights=masked_lm_weights)
tensorflow.metrics.mean
4,978
import tensorflow as tf """ src = batch_of_data["feature"] if self._trg_lang_tag_position in ["src", "source"]: src = tf.concat([tf.expand_dims(batch_of_data["trg_lang"], axis=1), src], axis=1) if self._with_src_lang_tag: src = tf.concat([tf.expand_dims(batch_of_data["src_lang"], axis=1), src], axis=1) input_dict = {"src": src, "src_length": deduce_text_length(src, self._multilingual_dp.meta["pad_id"], self._multilingual_dp.meta["padding_mode"])} if self._trg_lang_tag_position in ["trg", "target"]: target_bos = batch_of_data["trg_lang"] else: target_bos = tf.tile([tf.convert_to_tensor( self._multilingual_dp.meta["bos_id"], dtype=tf.int64)], [tf.shape(src)[0]]) if mode == compat.ModeKeys.INFER: input_dict["trg_input"] = target_bos else: input_dict["trg"] = batch_of_data["label"] input_dict["trg_length"] = deduce_text_length(batch_of_data["label"], self._multilingual_dp.meta["pad_id"], self._multilingual_dp.meta["padding_mode"]) input_dict["trg_input"] = tf.concat([tf.expand_dims(target_bos, axis=1), batch_of_data["label"][:, :-1]], axis=1) return input_dict def get_data_postprocess_fn(self, data_status, **kwargs) -> callable: if data_status == compat.DataStatus.PROJECTED:
tensorflow.shape
4,979
import tensorflow as tf self.IRK_alpha = weights[0:-1,:] self.IRK_beta = weights[-1:,:] self.IRK_times = tmp[q**2+q:] # tf placeholders and graph self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)) self.x0_tf = tf.placeholder(tf.float32, shape=(None, self.x0.shape[1])) self.x1_tf = tf.placeholder(tf.float32, shape=(None, self.x1.shape[1])) self.u0_tf = tf.placeholder(tf.float32, shape=(None, self.u0.shape[1])) self.u1_tf = tf.placeholder(tf.float32, shape=(None, self.u1.shape[1])) self.dummy_x0_tf = tf.placeholder(tf.float32, shape=(None, self.q)) # dummy variable for fwd_gradients self.dummy_x1_tf = tf.placeholder(tf.float32, shape=(None, self.q)) # dummy variable for fwd_gradients self.U0_pred = self.net_U0(self.x0_tf) # N0 x q self.U1_pred = self.net_U1(self.x1_tf) # N1 x q self.loss = tf.reduce_sum(tf.square(self.u0_tf - self.U0_pred)) + \ tf.reduce_sum(tf.square(self.u1_tf - self.U1_pred))
tensorflow.placeholder
4,980
import tensorflow as tf c, ei, ev, v, n_cs, n_vs, n_cands, cands, best_cands, cand_scores = batch pred_scores = policy['model']((c, ei, ev, v, tf.reduce_sum(n_cs, keepdims=True), tf.reduce_sum(n_vs, keepdims=True)), tf.convert_to_tensor(False))
tensorflow.reduce_sum
4,981
import tensorflow as tf grad, = tf.gradients(loss, x) r=old_div(grad*loss,tf.reduce_sum(tf.square(grad)))
tensorflow.square
4,982
import tensorflow as tf z = tf.py_func(my_func, [x, y], [tf.float32]) self.assertEqual(z[0].eval(), my_func(1.0, 2.0).astype(np.float32)) # array with self.test_session(): x = tf.constant([1.0, 2.0], tf.float64) y = tf.constant([2.0, 3.0], tf.float64) z = tf.py_func(my_func, [x, y], [tf.float64]) self.assertAllEqual( z[0].eval(), my_func([1.0, 2.0], [2.0, 3.0]).astype(np.float64)) # a bit exotic type (complex64) with self.test_session(): x = tf.constant(1+2j, tf.complex64) y = tf.constant(3+4j, tf.complex64) z, = tf.py_func(my_func, [x, y], [tf.complex64]) self.assertAllClose(z.eval(), my_func(1+2j, 3+4j)) # a bit excotic function (rfft) with self.test_session(): x = tf.constant([1., 2., 3., 4.], tf.float32) def rfft(x): return np.fft.rfft(x).astype(np.complex64) y, = tf.py_func(rfft, [x], [tf.complex64]) self.assertAllClose(y.eval(), np.fft.rfft([1., 2., 3., 4.])) # returns a python literal. with self.test_session(): def literal(x):
tensorflow.constant
4,983
from tensorflow.python.ops import array_ops else: loss_unweighted = array_ops.reshape(loss_unweighted, shape=(-1,))
tensorflow.python.ops.array_ops.reshape
4,984
import tensorflow as tf def build_generator(self,image,reuse=False,name='generator'): with tf.variable_scope(name): if reuse: tf.get_variable_scope().reuse_variables() else: assert tf.get_variable_scope().reuse is False """U-Net Generator""" def lrelu(x, alpha,name='lrelu'): with tf.variable_scope(name): return tf.nn.relu(x) - alpha * tf.nn.relu(-x) def instance_norm(x,name='instance_norm'): with tf.variable_scope(name): if reuse: tf.get_variable_scope().reuse_variables() else: assert tf.get_variable_scope().reuse is False
tensorflow.variable_scope
4,985
import tensorflow as tf bert_config.initializer_range)) input_tensor = modeling.layer_norm(input_tensor) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. output_bias = tf.get_variable( "output_bias", shape=[bert_config.vocab_size], initializer=tf.zeros_initializer()) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) label_ids = tf.reshape(label_ids, [-1]) label_weights = tf.reshape(label_weights, [-1])
tensorflow.zeros_initializer
4,986
import tensorflow as tf blk_shape = tf.shape(blk_indices) blk_indices_ = tf.reshape(blk_indices, [-1, 3]) ksize = tf.shape(w) # Calculate the block strides. bstrides = _calc_block_strides(blk_shape, ksize, strides) # Calculate the output size. x_shape = tf.shape(x) out_shape = calc_out_size_4d(x_shape, ksize, strides, padding) # Pad input. x_ = _pad_input( x, ksize, strides, padding, bsize=[1, blk_shape[1], blk_shape[2], 1], bstrides=bstrides) # Convolution when number of indices is larger than zero.
tensorflow.shape
4,987
import tensorflow as tf For the scale (gamma), a ones initializer is used by default. Args: var_name: name of variable as a string. """ if var_name not in self._initializers: if var_name == self.GAMMA: self._initializers[self.GAMMA] = tf.ones_initializer() elif var_name == self.BETA: self._initializers[self.BETA] = tf.zeros_initializer() def _build_statistics_variance(self, input_batch, reduction_indices, use_batch_stats): """Builds the statistics part of the graph when using moving variance.
tensorflow.ones_initializer
4,988
import tensorflow as tf # Step-wise contrastive loss pred1, pred2 = tf.split(pred, 2, axis=0) tgt1, tgt2 = tf.split(tgt, 2, axis=0) geq = tf.cast((tgt1 - tgt2) > 0, tf.bool) tgt_larg = tf.where(geq, tgt1, tgt2) tgt_small = tf.where(geq, tgt2, tgt1) pred_larg = tf.where(geq, pred1, pred2) pred_small = tf.where(geq, pred2, pred1) loss = tf.maximum(0.0, (tgt_larg - tgt_small) - (pred_larg - pred_small) + margin) loss = tf.reduce_mean(loss) return loss def contra_step_lossV4(pred, tgt): # 50*50 # Step-wise contrastive loss even = [2 * i for i in range(25)] odd = [2 * i + 1 for i in range(25)] pred1 = tf.gather(pred, even) pred2 = tf.gather(pred, odd)
tensorflow.reduce_mean
4,989
import tensorflow as tf def data_format(): return "channels_first" if tfe.num_gpus() else "channels_last" def random_dataset(): batch_size = 64 images = tf.random_normal([batch_size, 784]) labels = tf.random_uniform([batch_size], minval=0, maxval=10, dtype=tf.int32) return tf.data.Dataset.from_tensors((images, labels)) def train(defun=False): model = mnist.create_model(data_format()) if defun: model.call = tfe.defun(model.call) optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01) dataset = random_dataset() with tf.device(device()):
tensorflow.data.Dataset.from_tensors
4,990
import tensorflow as tf d = tf.data.Dataset.from_tensor_slices({ "input_ids": tf.constant( all_input_ids, shape=[num_examples, seq_length],
tensorflow.constant
4,991
import tensorflow as tf def bucket_distance(self, distances): """ Places the given values (designed for distances) into 10 semi-logscale buckets: [0, 1, 2, 3, 4, 5-7, 8-15, 16-31, 32-63, 64+]. """ logspace_idx = tf.to_int32(tf.floor(tf.log(tf.to_float(distances))/math.log(2))) + 3 use_identity = tf.to_int32(distances <= 4) combined_idx = use_identity * distances + (1 - use_identity) * logspace_idx return tf.clip_by_value(combined_idx, 0, 9) def get_slow_antecedent_scores(self, top_span_emb, top_antecedents, top_antecedent_emb, top_antecedent_offsets, top_span_speaker_ids, genre_emb): k = util.shape(top_span_emb, 0)
tensorflow.to_int32
4,992
import tensorflow as tf ) trainable_vars = tf.trainable_variables() kernels = [ v for v in trainable_vars if ('weights' in v.name or 'kernel' in v.name) and 'depthwise_weights' not in v.name ] for K in kernels: x = tf.multiply(weight_decay, tf.nn.l2_loss(K)) tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, x) class RestoreMovingAverageHook(tf.train.SessionRunHook): def __init__(self, model_dir): super(RestoreMovingAverageHook, self).__init__() self.model_dir = model_dir
tensorflow.add_to_collection
4,993
import tensorflow as tf features = [] for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: tf.logging.info("Writing example %d of %d" % (ex_index, len(examples))) feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer) features.append(feature) return features def main(_): tf.logging.set_verbosity(tf.logging.INFO) processors = { "faq": FAQProcessor, } tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case, FLAGS.init_checkpoint) if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict: raise ValueError( "At least one of `do_train`, `do_eval` or `do_predict' must be True.")
tensorflow.logging.set_verbosity
4,994
from tensorflow.contrib.distributions.python.ops import distribution_util @distribution_util.AppendDocstring(_poisson_sample_note) def _log_prob(self, x): return self._log_unnormalized_prob(x) - self._log_normalization() @distribution_util.AppendDocstring(_poisson_sample_note) def _prob(self, x): return math_ops.exp(self._log_prob(x)) @distribution_util.AppendDocstring(_poisson_sample_note) def _log_cdf(self, x): return math_ops.log(self.cdf(x)) @distribution_util.AppendDocstring(_poisson_sample_note) def _cdf(self, x): x = self._assert_valid_sample(x, check_integer=False) return math_ops.igammac(math_ops.floor(x + 1), self.rate) def _log_normalization(self): return self.rate def _log_unnormalized_prob(self, x): x = self._assert_valid_sample(x, check_integer=True) return x * math_ops.log(self.rate) - math_ops.lgamma(x + 1)
tensorflow.contrib.distributions.python.ops.distribution_util.AppendDocstring
4,995
import tensorflow as tf dropout=1 - config.keep_prob if is_training else 0) params_size_t = self._cell.params_size() self._rnn_params = tf.get_variable( "lstm_params", initializer=tf.random_uniform( [params_size_t], -config.init_scale, config.init_scale), validate_shape=False) c = tf.zeros([config.num_layers, self.batch_size, config.hidden_size], tf.float32) h = tf.zeros([config.num_layers, self.batch_size, config.hidden_size], tf.float32) self._initial_state = (tf.contrib.rnn.LSTMStateTuple(h=h, c=c),) outputs, h, c = self._cell(inputs, h, c, self._rnn_params, is_training) outputs = tf.transpose(outputs, [1, 0, 2])
tensorflow.zeros
4,996
import tensorflow as tf tmp2 = tf.tensordot(query, w2, axes=1) tmp2 = tf.reshape(tmp2, [-1, 1, tf.shape(tmp2)[-1]]) tmp = tf.tanh((tmp1 + tmp2) + b) # For each of the timestamps its vector of size A from `tmp` is reduced with `v` vector v_dot_tmp = tf.tensordot(tmp, v, axes=1, name='v_dot_tmp') # (B,T) shape key_masks = mask # [B, 1, T] # key_masks = tf.expand_dims(mask, 1) # [B, 1, T] paddings = tf.ones_like(v_dot_tmp) * (-2 ** 32 + 1) v_dot_tmp = tf.where(key_masks, v_dot_tmp, paddings) # [B, 1, T] alphas = tf.nn.softmax(v_dot_tmp, name='alphas') # (B,T) shape # Output of (Bi-)RNN is reduced with attention vector; the result has (B,D) shape #output = tf.reduce_sum(facts * tf.expand_dims(alphas, -1), 1) output = facts * tf.expand_dims(alphas, -1) output = tf.reshape(output, tf.shape(facts)) # output = output / (facts.get_shape().as_list()[-1] ** 0.5) if not return_alphas: return output else:
tensorflow.nn.softmax
4,997
import tensorflow as tf sv.stop() def _build_model(self): """Build the TensorFlow graph.""" image_size = self.model_conf.get_image_size() data_type = tf.float32 input_data_type = tf.float32 input_nchan = 3 tf.set_random_seed(1234) np.random.seed(4321) phase_train = not (FLAGS.eval or FLAGS.forward_only) log_fn('Generating model') losses = [] device_grads = [] all_logits = []
tensorflow.set_random_seed
4,998
import tensorflow as tf update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): train_op = optimizer.minimize(Total_loss) correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_labels, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # val_dir = '/data0/AIChallenger/ai_challenger_scene_validation_20170908/scene_validation_images_20170908/' # annotations = '/data0/AIChallenger/ai_challenger_scene_validation_20170908/scene_validation_annotations_20170908.json' # # a DataFlow you implement to produce [tensor1, tensor2, ..] lists from whatever sources:
tensorflow.cast
4,999