seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)): enc_inp = [tf.constant(1, tf.int32, shape=[2]) for i in range(2)] dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)] cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True) dec, mem = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
tensorflow.constant
5,100
import tensorflow as tf "moving_mean", shape=self._mean_shape, collections=[tf.GraphKeys.MOVING_AVERAGE_VARIABLES, tf.GraphKeys.GLOBAL_VARIABLES], initializer=tf.zeros_initializer(), trainable=False) self._moving_second_moment = tf.get_variable(
tensorflow.zeros_initializer
5,101
import tensorflow as tf activation = tf.nn.relu(projection) activation = dropout(activation, self.dropout_keep_prob) projection = linear( input=activation, input_size=dialogue_state_size, output_size=action_templates_vocabulary_length, name='linear_projection_3_predictions_action' ) self.predictions_action = tf.nn.softmax(projection, name="softmax_output_prediction_action") # argument prediction # first encode decoded action template and teh true action template choice = tf.floor(tf.random_uniform([1], self.use_inputs_prob, 1 + self.use_inputs_prob, tf.float32)) prediction_action_argmax = tf.stop_gradient(tf.argmax(self.predictions_action, 1)) predicted_action_templates_embedding = embedding( input=prediction_action_argmax, length=action_templates_vocabulary_length, size=action_templates_embedding_size, name='action_templates_embedding' ) true_action_template_embedding = tf.gather(predicted_action_templates_embedding.embedding_table, actions_template) predicted_action_templates_embedding = tf.stop_gradient(predicted_action_templates_embedding) action_templates_embedding = choice * true_action_template_embedding + (1.0 - choice) * predicted_action_templates_embedding
tensorflow.random_uniform
5,102
from tensorflow.python.ops import control_flow_ops input_data=input_data, input_h=input_h, input_c=input_c, params=params) all_grads = gradients_impl.gradients( [output, output_h, output_c], [params, input_data, input_h, input_c]) training_op = control_flow_ops.group(*all_grads) self._BenchmarkOp(training_op, "cudnn_lstm %s %s" % (config_name, self._GetConfigDesc(config))) def benchmarkTfRNNLSTMTraining(self): test_configs = self._GetTestConfig() for config_name, config in test_configs.items():
tensorflow.python.ops.control_flow_ops.group
5,103
import tensorflow as tf nms_scores_expected1 = tf.constant([1.0, 0.85], dtype=tf.float32) nms_classes_expected1 = tf.constant([1, 2], dtype=tf.int32)
tensorflow.constant
5,104
import tensorflow as tf with tf.variable_scope(name) as scope: kernel = self.variable('weights', [kernel_size, kernel_size, output_channels, input_channels], initializer, regularizer=tf.contrib.layers.l2_regularizer(0.0005)) deconv = tf.nn.conv2d_transpose(bottom, kernel, output_shape, [1, stride, stride, 1], padding='SAME') biases = self.variable('biases', [output_channels], tf.constant_initializer(0.0))
tensorflow.nn.conv2d_transpose
5,105
import tensorflow as tf return tf.estimator.EstimatorSpec( tf.estimator.ModeKeys.PREDICT, predictions=predictions, export_outputs={ "output": tf.estimator.export.PredictOutput(export_out) }) def _normalize_body_output(self, body_out): if isinstance(body_out, tuple):
tensorflow.estimator.export.PredictOutput
5,106
import tensorflow as tf embeddings = c2v.GetEmbeddings(self.x) self._inputs = [tf.squeeze(input_, [1]) for input_ in tf.split(1, max_sequence_len, embeddings)] # Need to prepare a mask to zero out the padding symbols. # Make a batch_size x max_sequence_len matrix where each # row contains the length repeated max_sequence_len times. lengths_transposed = tf.expand_dims(tf.to_int32(self.seq_lens), 1) lengths_tiled = tf.tile(lengths_transposed, [1, max_sequence_len]) # Make a matrix where each row contains [0, 1, ..., max_sequence_len] r = tf.range(0, max_sequence_len, 1) range_row = tf.expand_dims(r, 0) range_tiled = tf.tile(range_row, [batch_size, 1]) self.lengths_transposed = lengths_transposed self.lengths_tiled = lengths_tiled self.range_row = range_row self.range_tiled = range_tiled # Use the logical operations to create a mask indicator = tf.less(range_tiled, lengths_tiled+1) #i.e. where seq len is less than index trim = np.ones(indicator.get_shape()) trim[:,0] = 0 #ignore start symbol indicator = tf.logical_and(indicator, trim.astype(bool)) self.indicator = indicator
tensorflow.tile
5,107
import tensorflow as tf if not reduce_instance_dims: raise NotImplementedError('Per-key elementwise reduction not supported') with tf.compat.v1.name_scope('mean_and_var_per_key'): x = tf.cast(x, output_dtype)
tensorflow.compat.v1.name_scope
5,108
from tensorflow.python.framework import constant_op from tensorflow.python.ops import math_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops.control_flow_ops import with_dependencies from tensorflow.python.training import session_run_hook def _streaming_sum(scalar_tensor): """Create a sum metric and update op.""" sum_metric = framework.local_variable(constant_op.constant(0.0)) sum_update = sum_metric.assign_add(scalar_tensor) return sum_metric, sum_update class _InitializeClustersHook(session_run_hook.SessionRunHook): """Initializes clusters or waits for cluster initialization."""
tensorflow.python.framework.constant_op.constant
5,109
import tensorflow as tf tf.expand_dims(k, 0), util.shape(context_outputs, 0), True) # [1, k] top_span_indices.set_shape([1, None]) top_span_indices = tf.squeeze(top_span_indices, 0) # [k] top_span_starts = tf.gather(candidate_starts, top_span_indices) # [k] top_span_ends = tf.gather(candidate_ends, top_span_indices) # [k] top_span_emb = tf.gather(candidate_span_emb, top_span_indices) # [k, emb] top_span_cluster_ids = tf.gather(candidate_cluster_ids, top_span_indices) # [k] top_span_mention_scores = tf.gather(candidate_mention_scores, top_span_indices) # [k] top_span_sentence_indices = tf.gather(candidate_sentence_indices, top_span_indices) # [k] top_span_speaker_ids = tf.gather(speaker_ids, top_span_starts) # [k]
tensorflow.gather
5,110
import tensorflow as tf # we run the policy before we update the parameters. # The larger local steps is, the lower is the variance in our policy gradients estimate # on the one hand; but on the other hand, we get less frequent parameter updates, which # slows down learning. In this code, we found that making local steps be much # smaller than 20 makes the algorithm more difficult to tune and to get to work. self.runner = RunnerThread(env, pi, 20) grads = tf.gradients(self.loss, pi.var_list) tf.summary.scalar("model/policy_loss", pi_loss / bs) tf.summary.scalar("model/value_loss", vf_loss / bs) tf.summary.scalar("model/entropy", entropy / bs) tf.summary.image("model/state", pi.x) tf.summary.scalar("model/grad_global_norm", tf.global_norm(grads)) tf.summary.scalar("model/var_global_norm", tf.global_norm(pi.var_list)) self.summary_op = tf.summary.merge_all() grads, _ = tf.clip_by_global_norm(grads, 40.0) # copy weights from the parameter server to the local model self.sync = tf.group(*[v1.assign(v2) for v1, v2 in zip(pi.var_list, self.network.var_list)]) grads_and_vars = list(zip(grads, self.network.var_list)) self.inc_step = self.global_step.assign_add(tf.shape(pi.x)[0]) # each worker has a different set of adam optimizer parameters
tensorflow.global_norm
5,111
import tensorflow as tf res = sess.run([mem]) self.assertEqual(1, len(res)) self.assertEqual((2, 2), res[0].shape) def testEmbeddingRNNDecoder(self): with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)): inp = [tf.constant(0.5, shape=[2, 2])] * 2 cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True) _, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32) dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)] dec, mem = tf.nn.seq2seq.embedding_rnn_decoder( dec_inp, enc_state, cell, num_symbols=4, embedding_size=2) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 2), res[0].shape) res = sess.run([mem]) self.assertEqual(1, len(res)) self.assertEqual((2, 2), res[0].c.shape) self.assertEqual((2, 2), res[0].h.shape)
tensorflow.nn.seq2seq.embedding_rnn_decoder
5,112
import tensorflow as tf #DeepFool 仅实现了目标攻击 def deepfool(x, loss=None, bounds=(0,1)): (clip_min, clip_max)=bounds grad, = tf.gradients(loss, x) r=old_div(grad*loss,tf.reduce_sum(tf.square(grad))) #目标是让loss下降 adv_x = x - r
tensorflow.gradients
5,113
import tensorflow as tf loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, log_probs) def gather_indexes(sequence_tensor, positions): """Gathers the vectors at the specific positions over a minibatch.""" sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3) batch_size = sequence_shape[0] seq_length = sequence_shape[1] width = sequence_shape[2] flat_offsets = tf.reshape( tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1]) flat_positions = tf.reshape(positions + flat_offsets, [-1]) flat_sequence_tensor = tf.reshape(sequence_tensor, [batch_size * seq_length, width]) output_tensor = tf.gather(flat_sequence_tensor, flat_positions) return output_tensor def input_fn_builder(input_files, max_seq_length, max_predictions_per_seq, is_training, num_cpu_threads=4): """Creates an `input_fn` closure to be passed to TPUEstimator.""" def input_fn(params):
tensorflow.reshape
5,114
from tensorflow.python.framework import ops as _ops name=name) return result _ops.RegisterShape("ResourceInitializedOp")(None) _resource_using_op_outputs = [""]
tensorflow.python.framework.ops.RegisterShape
5,115
import tensorflow as tf """ idx_iter_beg = int(self.nb_iters_train * FLAGS.ws_iter_ratio_beg) idx_iter_end = int(self.nb_iters_train * FLAGS.ws_iter_ratio_end) base = tf.cast(self.global_step - idx_iter_beg, tf.float32) / (idx_iter_end - idx_iter_beg) base = tf.minimum(1.0, tf.maximum(0.0, base)) prune_ratio_dyn = prune_ratio_fnl * (1.0 - tf.pow(1.0 - base, FLAGS.ws_prune_ratio_exp)) return prune_ratio_dyn
tensorflow.maximum
5,116
import tensorflow as tf hyper_b_final = tf.layers.dense(inputs=hyper_b_final_l1, units=1, activation=None, use_bias=False, name='hyper_b_final') # First layer w1 = tf.abs(tf.matmul(state, hyper_w_1)) b1 = tf.matmul(state, hyper_b_1) w1_reshaped = tf.reshape(w1, [-1, n_agents, n_h_mixer]) # reshape into batch of matrices b1_reshaped = tf.reshape(b1, [-1, 1, n_h_mixer])
tensorflow.matmul
5,117
import tensorflow as tf # Save and reload one Variable named "var1" in the same file. # The cached readers should know to re-read the file. self._SaveAndLoad("var1", 1.1, 2.2, save_path) def testGPU(self): if not tf.test.is_built_with_cuda(): return save_path = os.path.join(self.get_temp_dir(), "gpu") with tf.Session("", graph=tf.Graph()) as sess: with sess.graph.device("/gpu:0"): v0_1 = tf.Variable(123.45) save = tf.train.Saver({"v0": v0_1}) tf.initialize_all_variables().run() save.save(sess, save_path) with tf.Session("", graph=tf.Graph()) as sess:
tensorflow.Graph
5,118
import tensorflow as tf if args.cuda: config = tf.ConfigProto() config.gpu_options.allow_growth = True else: config = tf.ConfigProto(device_count={'GPU': 0}) os.environ["CUDA_VISIBLE_DEVICES"] = "-1" data_class = MultiTurnDialog.load_class(args.dataset)
tensorflow.ConfigProto
5,119
import tensorflow as tf def gelu(x): """Gaussian Error Linear Unit. This is a smoother version of the RELU. Original paper: https://arxiv.org/abs/1606.08415 Args: x: float Tensor to perform activation. Returns: `x` with the GELU activation applied. """ cdf = 0.5 * (1.0 + tf.tanh( (np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3))))) return x * cdf def get_activation(activation_string): """Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`. Args: activation_string: String name of the activation function. Returns: A Python function corresponding to the activation function. If `activation_string` is None, empty, or "linear", this will return None. If `activation_string` is not a string, it will return `activation_string`.
tensorflow.pow
5,120
import tensorflow as tf if self.use_bias: self.bias = self.add_weight(name="bias", shape=(np.prod(self.out_modes),), initializer=self.bias_initializer, trainable=True) super(TTLayerDense, self).build(input_shape) def call(self, input_): dim = self.order out = tf.reshape(input_, [-1, np.prod(self.inp_modes)]) self.image_max_size = max(self.image_max_size, np.prod(self.inp_modes)) out = tf.transpose(out, [1, 0]) for i in range(dim): out = tf.reshape(out, [self.mat_ranks[i] * self.inp_modes[i], -1]) out = tf.matmul(self.mat_cores[i], out) out = tf.reshape(out, [self.out_modes[i], -1]) out = tf.transpose(out, [1, 0]) out = tf.reshape(out, [-1, np.prod(self.out_modes)]) # self.image_max_size = max(self.image_max_size, np.prod([val.value for val in out.get_shape()[1:]])) if self.use_bias: out = tf.add(out, self.bias, name='out') if self.activation is not None: out = self.activation(out) return out def compute_output_shape(self, input_shape): return (input_shape[0], np.prod(self.out_modes))
tensorflow.reshape
5,121
import tensorflow as tf and type `tf.int64`. For binary classification, the standard \ sigmoid function is used for prediction, and the class labels are \ `{0, 1}`. """ logits = self._encoder(inputs, sequence_length, dtype, mode) num_classes = self._hparams.num_classes is_binary = num_classes == 1 is_binary = is_binary or (num_classes <= 0 and logits.shape[1] == 1) if is_binary: pred = tf.greater(logits, 0) logits = tf.reshape(logits, [-1]) else: pred = tf.argmax(logits, 1) pred = tf.cast(tf.reshape(pred, [-1]), tf.int64) self._built = True return logits, pred @property def trainable_variables(self): """The list of trainable variables of the module. """ if not self._built: raise TexarError( "Attempting to access trainable_variables before module %s "
tensorflow.argmax
5,122
import tensorflow as tf self.sess.run(tf.global_variables_initializer()) # Tensorboard if summary_dir is not None: self.writer = tf.summary.FileWriter(summary_dir) tf.summary.scalar('Loss/Policy', loss_pg) tf.summary.scalar('Loss/Value', loss_vf) tf.summary.scalar('Loss/Entropy', loss_entropy)
tensorflow.summary.FileWriter
5,123
import tensorflow as tf class trainwork(object): def __init__(self): with tf.variable_scope('scop'): self.w1=tf.get_variable('w1', [4096,2048],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.w2=tf.get_variable('w2', [2048,3072],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.w3=tf.get_variable('w3', [3072,512],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.w4=tf.get_variable('w4', [512,classnum],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.b1 = tf.get_variable('b1', [2048],initializer=tf.constant_initializer(0.0)) self.b2 = tf.get_variable('b2', [3072],initializer=tf.constant_initializer(0.0)) self.b3 = tf.get_variable('b3', [512],initializer=tf.constant_initializer(0.0)) self.b4 = tf.get_variable('b4', [classnum],initializer=tf.constant_initializer(0.0)) def inference(self,images): images=tf.cast(images,tf.float32)/255.0
tensorflow.constant_initializer
5,124
import tensorflow as tf self.initial_dlatents = np.zeros((self.batch_size, 18, 512)) model.components.synthesis.run(self.initial_dlatents, randomize_noise=randomize_noise, minibatch_size=self.batch_size, custom_inputs=[partial(create_variable_for_generator, batch_size=batch_size), partial(create_stub, batch_size=batch_size)], structure='fixed') self.sess = tf.get_default_session() self.graph = tf.get_default_graph() self.dlatent_variable = next(v for v in tf.global_variables() if 'learnable_dlatents' in v.name) self.set_dlatents(self.initial_dlatents) self.generator_output = self.graph.get_tensor_by_name('G_synthesis_1/_Run/concat/concat:0') self.generated_image = tflib.convert_images_to_uint8(self.generator_output, nchw_to_nhwc=True, uint8_cast=False) self.generated_image_uint8 = tf.saturate_cast(self.generated_image, tf.uint8) def reset_dlatents(self): self.set_dlatents(self.initial_dlatents) def set_dlatents(self, dlatents):
tensorflow.global_variables
5,125
import tensorflow as tf embeddings = tf.get_variable( name='embeddings', shape=(vocabulary_size, embedding_size), initializer=E_init, dtype=LayersConfig.tf_dtype, **E_init_args) embed = tf.nn.embedding_lookup(embeddings, self.inputs)
tensorflow.nn.embedding_lookup
5,126
import tensorflow as tf sess.run(zero_var.initializer) sess.run(ones_var.initializer) print(sess.run(zero_var)) print(sess.run(ones_var)) zero_similar = tf.Variable(tf.zeros_like(zero_var)) ones_similar = tf.Variable(tf.ones_like(ones_var)) sess.run(ones_similar.initializer) sess.run(zero_similar.initializer) print(sess.run(ones_similar)) print(sess.run(zero_similar)) fill_var = tf.Variable(tf.fill([row_dim, col_dim], -1))
tensorflow.ones_like
5,127
import tensorflow as tf chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
tensorflow.where
5,128
from tensorflow.python.eager import context class DummyVariableStore(object): @contextlib.contextmanager def as_default(self): yield def create_eager_var_store(): if context.in_eager_mode(): return variable_scope.EagerVariableStore() else: return DummyVariableStore() def scheduled_sampling(hparams, problem_hparams, dp, sharded_logits, losses, sharded_features, transformed_features, model):
tensorflow.python.eager.context.in_eager_mode
5,129
import tensorflow as tf images,labels=tf.train.batch([image,label],batch_size=batch_size) return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size]) class trainwork(object): def __init__(self): with tf.variable_scope('scop'): self.w1=tf.get_variable('w1', [4096,1024],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.w2=tf.get_variable('w2', [1024,classnum],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.b1 = tf.get_variable('b1', [1024],initializer=tf.constant_initializer(0.0)) self.b2 = tf.get_variable('b2', [classnum],initializer=tf.constant_initializer(0.0)) def inference(self,images): images=tf.cast(images,tf.float32)/255.0 l1 = tf.matmul(images, self.w1)+self.b1 l1=tf.nn.relu(l1) out = tf.matmul(l1, self.w2)+self.b2 return out def test_inference(self,images): images=tf.cast(images,tf.float32)/255.0 l1 = tf.matmul(images, self.w1)+self.b1 l1=tf.nn.relu(l1) out = tf.matmul(l1, self.w2)+self.b2 return out def valid_inference(self,images): images=tf.cast(images,tf.float32)/255.0 l1 = tf.matmul(images, self.w1)+self.b1
tensorflow.nn.relu
5,130
import tensorflow as tf with tf.Session("", graph=tf.Graph()) as sess: one = tf.Variable(0.0) twos = tf.Variable([0.0, 0.0, 0.0]) # Saver with no arg, defaults to 'all variables'. save = tf.train.Saver()
tensorflow.Variable
5,131
import tensorflow as tf num_shards=FLAGS.num_shards, per_host_input_for_training=tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 )) return run_config def build_image_serving_input_receiver_fn(shape, dtype=tf.float32): """Returns a input_receiver_fn for raw images during serving.""" def _preprocess_image(encoded_image): """Preprocess a single raw image.""" image = tf.image.decode_image(encoded_image, channels=shape[-1]) image.set_shape(shape) return tf.cast(image, dtype) def serving_input_receiver_fn(): image_bytes_list = tf.placeholder( shape=[None], dtype=tf.string, ) images = tf.map_fn( _preprocess_image, image_bytes_list, back_prop=False, dtype=dtype) return tf.estimator.export.TensorServingInputReceiver( features=images, receiver_tensors=image_bytes_list) return serving_input_receiver_fn
tensorflow.cast
5,132
import tensorflow as tf 'Input layer does not contain zero weights, so apply CQAT instead.') centroids_mask = None centroids, lookup = get_unique(weights) num_centroids = tf.size(centroids) if self.preserve_sparsity: sparsity_mask = tf.math.divide_no_nan(weights, weights) zero_idx = tf.argmin(tf.abs(centroids), axis=-1) centroids_mask = 1.0 - tf.one_hot(zero_idx, num_centroids) result = {SPARSITY_MASK: sparsity_mask} # Prepare clustering variables for the Keras graph when clusters # exist, assuming we do not use number_of_clusters larger than 1024 if num_centroids > 1024:
tensorflow.abs
5,133
import tensorflow as tf if num_classes == 2: q = tf.nn.sigmoid(q_logits) p = tf.nn.sigmoid(p_logits) kl = (-tf.nn.sigmoid_cross_entropy_with_logits(logits=q_logits, labels=q) + f.nn.sigmoid_cross_entropy_with_logits(logits=p_logits, labels=q)) else: q = tf.nn.softmax(q_logits) p = tf.nn.softmax(p_logits) kl = tf.reduce_sum(q * (tf.log(q) - tf.log(p)), 1) num_labels = tf.reduce_sum(weights) num_labels = tf.where(tf.equal(num_labels, 0.), 1., num_labels) kl.get_shape().assert_has_rank(2) weights.get_shape().assert_has_rank(1)
tensorflow.nn.softmax
5,134
import tensorflow as tf model._task.ApplyExponentialMovingAverage(model.ema) with tf.variable_scope('', reuse=True):
tensorflow.variable_scope
5,135
import tensorflow as tf W = weight_variable([1, 1, 1, FLAGS.feats_per_layer, FLAGS.feats_per_layer]) else: bottom = out # l (not l + 1) because from previous layer W = weight_variable([1, FLAGS.conv_kernel, FLAGS.conv_kernel, FLAGS.feats_per_layer, FLAGS.feats_per_layer]) b = bias_variable([FLAGS.feats_per_layer]) Wx_b = tf.nn.conv3d(bottom, W, strides=[1,1,1,1,1], padding='VALID') + b out = tf.nn.relu(Wx_b) shape = out.get_shape() print('conv{}'.format(l+1)) print('\t{} --> {}'.format(bottom.name, out.name)) print('\t{} --> {}'.format(bottom.get_shape(), out.get_shape())) with tf.variable_scope('pool'): bottom = out if l == num_layers - 1 and FLAGS.total_pool: kernel_size = bottom.get_shape()[2] out = tf.nn.max_pool3d(bottom, ksize=[1,1, kernel_size, kernel_size,1], strides=[1,1,1,1,1], padding='VALID') else: out = tf.nn.max_pool3d(bottom, ksize=[1,1, FLAGS.pool_kernel, FLAGS.pool_kernel,1], strides=[1,1,FLAGS.pool_stride,FLAGS.pool_stride,1], padding='VALID') shape = out.get_shape() print('pool{}'.format(l + 1)) print('\t{} --> {}'.format(bottom.name, out.name)) print('\t{} --> {}'.format(bottom.get_shape(), out.get_shape())) with tf.variable_scope('scale'):
tensorflow.variable_scope
5,136
import tensorflow as tf shape = control_flow_ops.with_dependencies([rank_assertions[i]], tf.shape(image))
tensorflow.shape
5,137
import tensorflow as tf Calibrate input of shape (-1, w, h, ch) to (-1, w_out, h_out, ch_out), assuming (w, h) / (w_out, h_out) is power of 2 ''' # Downsample with factorized reduction downsample_no = 0 while w > w_out or h > h_out: downsample_no += 1 with tf.variable_scope('downsample_{}x'.format(downsample_no)): X = tf.nn.relu(X) X = self._add_factorized_reduction(X, w, h, ch, ch_out, is_train=is_train) ch = ch_out w >>= 1 h >>= 1 # If channel counts finally don't match, convert channel counts with 1x1 conv
tensorflow.nn.relu
5,138
import tensorflow as tf img_h = img_h_batch[i] img_w = img_w_batch[i] inputs_list.append([img, gtboxes_and_label_h, gtboxes_and_label_r, num_objects, img_h, img_w]) tower_grads = [] biases_regularizer = tf.no_regularizer weights_regularizer = tf.contrib.layers.l2_regularizer(cfgs.WEIGHT_DECAY) with tf.variable_scope(tf.get_variable_scope()): for i in range(num_gpu): with tf.device('/gpu:%d' % i): with tf.name_scope('tower_%d' % i): with slim.arg_scope( [slim.model_variable, slim.variable], device='/device:CPU:0'): with slim.arg_scope([slim.conv2d, slim.conv2d_in_plane, slim.conv2d_transpose, slim.separable_conv2d, slim.fully_connected], weights_regularizer=weights_regularizer, biases_regularizer=biases_regularizer, biases_initializer=tf.constant_initializer(0.0)): gtboxes_and_label_h, gtboxes_and_label_r = tf.py_func(self.get_gtboxes_and_label,
tensorflow.name_scope
5,139
import tensorflow as tf tf.contrib.tensorboard.plugins.projector.visualize_embeddings(summary_writer, config) def add_train_stats(model, hparams): with tf.variable_scope("stats") as scope: for i in range(hparams.tacotron_num_gpus): tf.summary.histogram("mel_outputs %d" % i, model.tower_mel_outputs[i]) tf.summary.histogram("mel_targets %d" % i, model.tower_mel_targets[i])
tensorflow.variable_scope
5,140
import tensorflow as tf num_in_channels = conv_block[0][3] # Get first base conv layer from list. first_base_conv_layer = base_conv_layer_block[0] # Build first layer with bigger tensor. base_conv_tensors = [ first_base_conv_layer( inputs=tf.zeros( shape=[1] + conv_block[0][0:2] + [num_in_channels], dtype=tf.float32 ) ) ] # Now build the rest of the base conv block layers, store in list.
tensorflow.zeros
5,141
import tensorflow as tf t = m / m0 u = 1 - t * t argus_t_ge_1 = m * tf.pow(u, p) * tf.exp(c * u) return tf.maximum(tf.zeros_like(m), argus_t_ge_1, name="argus_pdf")
tensorflow.pow
5,142
import tensorflow as tf masked_lm_mean_loss = tf.metrics.mean( values=masked_lm_example_loss, weights=masked_lm_weights) next_sentence_log_probs = tf.reshape( next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]]) next_sentence_predictions = tf.argmax(
tensorflow.reshape
5,143
from tensorflow.python.ops import array_ops Returns: ndims: Scalar number of dimensions associated with a `Tensor`. """ with self._name_scope(name, values=[x]): x = ops.convert_to_tensor(x, name="x") ndims = x.get_shape().ndims if ndims is None: return array_ops.rank(x, name="ndims") return ops.convert_to_tensor(ndims, dtype=dtypes.int32, name="ndims") def get_sample_ndims(self, x, name="get_sample_ndims"): """Returns number of dimensions corresponding to iid draws ("sample"). Args: x: `Tensor`.
tensorflow.python.ops.array_ops.rank
5,144
import tensorflow as tf test_image_batch,test_label_batch=get_test_batch(test_image,test_label,testnum) test_inf=work.test_inference(test_image_batch) test_labels=tf.one_hot(test_label_batch,classnum) test_pre = tf.reshape(test_inf, [testnum, classnum]) correct_prediction=tf.equal(tf.argmax(test_inf,1),tf.argmax(test_labels,1))
tensorflow.one_hot
5,145
import tensorflow as tf mask = mask[:h // grid * grid, :w // grid * grid, :] image = np.expand_dims(image, 0) mask = np.expand_dims(mask, 0) input_image = np.concatenate([image, mask], axis=2) sess_config = tf.ConfigProto() sess_config.gpu_options.allow_growth = True with tf.Session(config=sess_config) as sess: input_image = tf.constant(input_image, dtype=tf.float32) output = model.build_server_graph(FLAGS, input_image) output = (output + 1.) * 127.5 output = tf.reverse(output, [-1]) output = tf.saturate_cast(output, tf.uint8) # load pretrained model vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) assign_ops = [] for var in vars_list: vname = var.name from_name = vname var_value = tf.contrib.framework.load_variable(checkpoint_dir, from_name) assign_ops.append(tf.assign(var, var_value)) sess.run(assign_ops) result = sess.run(output) tf.reset_default_graph() return result[0][:, :, ::-1]
tensorflow.saturate_cast
5,146
import tensorflow as tf _EVAL_FEATURE_MAP = { movielens.USER_COLUMN: tf.FixedLenFeature([], dtype=tf.string), movielens.ITEM_COLUMN: tf.FixedLenFeature([], dtype=tf.string), rconst.DUPLICATE_MASK: tf.FixedLenFeature([], dtype=tf.string) }
tensorflow.FixedLenFeature
5,147
import tensorflow as tf x1 = tf.maximum(tf.minimum(x1, width - 1.0), 0.0) x2 = tf.maximum(tf.minimum(x2, width - 1.0), 0.0) y1 = tf.maximum(tf.minimum(y1, height - 1.0), 0.0) y2 = tf.maximum(tf.minimum(y2, height - 1.0), 0.0) bboxes = tf.concat([x1, y1, x2, y2], axis=1)
tensorflow.minimum
5,148
import tensorflow as tf def _split_string(string): """Splits a byte string into an array of character bytes.""" text = tf.compat.as_text(string) ret = np.empty(len(text), dtype=np.object) for i, char in enumerate(text): ret[i] = tf.compat.as_bytes(char) return ret def vocabulary(filename, max_size=None, num_oov_buckets=1):
tensorflow.compat.as_bytes
5,149
import tensorflow as tf def _spherical_harmonics_normalization(l, m, var_type=tf.float64): l = tf.cast(l, dtype=var_type) m = tf.cast(m, dtype=var_type) numerator = (2.0 * l + 1.0) * factorial(l - tf.abs(m)) denominator = 4.0 * np.pi * factorial(l + tf.abs(m)) return tf.sqrt(numerator / denominator)
tensorflow.abs
5,150
import tensorflow as tf """ Test RNN graph single layer """ def test_rnn_kstep(test_data_x,test_data_y, preds, rnn_outputs, g, checkpoint, input_prob, output_prob, state_prob, num_test, kstep = 3): with tf.Session() as sess: sess.run(tf.global_variables_initializer()) result= {} "read the trained graph" g['saver'].restore(sess, checkpoint) losses = [] for step_num in range(kstep): k=step_num+1
tensorflow.global_variables_initializer
5,151
import tensorflow as tf if not context.in_eager_mode(): recent_output.set_shape([None, None, None, 1]) padded = tf.pad(recent_output, [[0, 0], [0, 1], [0, 0], [0, 0]]) features["targets"] = padded
tensorflow.pad
5,152
import tensorflow as tf sess.run(tf.global_variables_initializer()) ckpt_state = tf.train.get_checkpoint_state(FLAGS.log_root) # Choose dir according to rt tf.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path) saver.restore(sess, ckpt_state.model_checkpoint_path)
tensorflow.logging.info
5,153
import tensorflow as tf var = grad_and_vars[0][1] grad_and_var = (grad, var) average_grads.append(grad_and_var) return average_grads def binary_mask(shape, p=0.7): samples = tf.random_uniform(shape, minval=0.0, maxval=1.0) mask = tf.less_equal(samples, p) return tf.cast(mask, tf.float32) def weighted_arithmetic_mean(w, x): numer = tf.reduce_sum(w*x)
tensorflow.random_uniform
5,154
import tensorflow as tf # boundaries as float for all numeric types. bucket_dtype = tf.float32 with tf.compat.v1.name_scope(name, 'quantiles'): if weights is None:
tensorflow.compat.v1.name_scope
5,155
import tensorflow as tf embedding_size=2, output_size=3) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 3), res[0].shape) res = sess.run([mem]) self.assertEqual((2, 2), res[0].shape) def testEmbeddingAttentionSeq2Seq(self): with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)): enc_inp = [tf.constant(1, tf.int32, shape=[2]) for i in range(2)] dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)] cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True) dec, mem = tf.nn.seq2seq.embedding_attention_seq2seq( enc_inp, dec_inp, cell, num_encoder_symbols=2, num_decoder_symbols=5, embedding_size=2) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 5), res[0].shape) res = sess.run([mem]) self.assertEqual((2, 2), res[0].c.shape) self.assertEqual((2, 2), res[0].h.shape) # Test with state_is_tuple=False.
tensorflow.nn.rnn_cell.BasicLSTMCell
5,156
import tensorflow as tf num_acts = self.act_space # Calculate U self.worker_lstm = SingleStepLSTM(tf.expand_dims(self.z, [0]), size=num_acts * self.k, step_size=tf.shape(self.obs)[:1]) flat_logits = self.worker_lstm.output self.worker_vf = self.build_value(flat_logits) U = tf.reshape(flat_logits, [-1, num_acts, self.k]) # Calculate w cut_g = tf.stop_gradient(self.g) cut_g = tf.expand_dims(cut_g, [1]) gstack = tf.concat([self.prev_g, cut_g], axis=1) self.last_c_g = gstack[:, 1:] # print self.last_c_g gsum = tf.reduce_sum(gstack, axis=1)
tensorflow.reshape
5,157
import tensorflow as tf class InputOnlyProblem(problem_module.Problem): def hparams(self, defaults, model_hparams): hp = defaults hp.modality = {"inputs": modalities.SymbolModality} hp.vocab_size = {"inputs": 2} problem = InputOnlyProblem(False, False) p_hparams = problem.get_hparams() self.assertIsInstance(p_hparams.modality["inputs"], modalities.SymbolModality) self.assertLen(p_hparams.modality, 1) @tf.contrib.eager.run_test_in_graph_and_eager_modes() def testProblemHparamsTargetOnlyModality(self): class TargetOnlyProblem(problem_module.Problem): def hparams(self, defaults, model_hparams): hp = defaults hp.modality = {"targets": modalities.SymbolModality} hp.vocab_size = {"targets": 3} problem = TargetOnlyProblem(False, False) p_hparams = problem.get_hparams() self.assertIsInstance(p_hparams.modality["targets"], modalities.SymbolModality)
tensorflow.contrib.eager.run_test_in_graph_and_eager_modes
5,158
import tensorflow as tf conv.get_shape()) else: self.top_layer = conv self.top_size = num_out_channels biased = self.batch_norm(**self.batch_norm_config) if activation == 'relu': conv1 = tf.nn.relu(biased) elif activation == 'linear' or activation is None: conv1 = biased elif activation == 'tanh': conv1 = tf.nn.tanh(biased) else:
tensorflow.nn.relu
5,159
import tensorflow as tf self.lr = self.learning_rate if self.optim_type == 'adagrad': self.optimizer = tf.train.AdagradOptimizer(self.lr) elif self.optim_type == 'adam': self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)
tensorflow.train.AdagradOptimizer
5,160
import tensorflow as tf """Verbosity level for summary ops. Pass 0 to disable both summaries and checkpoints.""") tf.flags.DEFINE_integer('save_summaries_steps', 0, """How often to save summaries for trained models. Pass 0 to disable summaries.""") tf.flags.DEFINE_integer('save_model_secs', 0, """How often to save trained models. Pass 0 to disable checkpoints""") tf.flags.DEFINE_string('train_dir', None, """Path to session checkpoints.""") tf.flags.DEFINE_string('eval_dir', '/tmp/tf_cnn_benchmarks/eval', """Directory where to write eval event logs.""") tf.flags.DEFINE_string('pretrain_dir', None, """Path to pretrained session checkpoints.""") tf.flags.DEFINE_string('result_storage', None, """Specifies storage option for benchmark results.
tensorflow.flags.DEFINE_string
5,161
import tensorflow as tf _, variance = tf.nn.moments(tf.reshape(var,[-1]),axes=[0]) normal = tf.distributions.Normal(loc=0.0, scale=tf.sqrt(variance)/10) white_noise = normal.sample(var.get_shape()) update_opts.append(var.assign(var + white_noise)) self.random_update_op = tf.group(update_opts) #apply clipping def _clip_gradients(self, grads_and_vars, grad_clipping_tuple): clipping_method, clipping_kwargs = grad_clipping_tuple
tensorflow.group
5,162
import tensorflow as tf else: q = tf.nn.softmax(q_logits) p = tf.nn.softmax(p_logits) kl = tf.reduce_sum(q * (tf.log(q) - tf.log(p)), 1) num_labels = tf.reduce_sum(weights) num_labels = tf.where(tf.equal(num_labels, 0.), 1., num_labels) kl.get_shape().assert_has_rank(2) weights.get_shape().assert_has_rank(1) loss = tf.identity(tf.reduce_sum(tf.expand_dims(weights, -1) * kl) / num_labels, name='kl') return loss
tensorflow.equal
5,163
import tensorflow as tf with tf.variable_scope(scope): nin = x.get_shape()[1].value w = tf.get_variable( "w", [nin, nh], initializer=self.ortho_init(init_scale)) b = tf.get_variable( "b", [nh], initializer=tf.constant_initializer(init_bias)) return tf.matmul(x, w) + b def ortho_init(self, scale=1.0): def _ortho_init(shape, dtype, partition_info=None): # lasagne ortho init for tf shape = tuple(shape)
tensorflow.matmul
5,164
import tensorflow as tf tf.equal(image_rank, 3), ['Wrong rank for tensor %s [expected] [actual]', image_list[i].name, 3, image_rank]) rank_assertions.append(rank_assert) image_shape = control_flow_ops.with_dependencies( [rank_assertions[0]], tf.shape(image_list[0])) image_height = image_shape[0] image_width = image_shape[1] crop_size_assert = tf.Assert( tf.logical_and( tf.greater_equal(image_height, crop_height), tf.greater_equal(image_width, crop_width)),
tensorflow.shape
5,165
import tensorflow as tf if t == 0: x = self._word_embedding(inputs=tf.fill([tf.shape(features)[0]], self._start)) else: x = self._word_embedding(inputs=sampled_word, reuse=True) context, alpha = self._attention_layer(features, features_proj, h, reuse=(t!=0)) alpha_list.append(alpha) if self.selector: context, beta = self._selector(context, h, reuse=(t!=0)) beta_list.append(beta) with tf.variable_scope('lstm', reuse=(t!=0)): _, (c, h) = lstm_cell(inputs=tf.concat(axis=1, values=[x, context]), state=[c, h]) logits = self._decode_lstm(x, h, context, reuse=(t!=0)) sampled_word = tf.argmax(logits, 1) sampled_word_list.append(sampled_word) alphas = tf.transpose(tf.stack(alpha_list), (1, 0, 2)) # (N, T, L) betas = tf.transpose(tf.squeeze(beta_list), (1, 0)) # (N, T) sampled_captions = tf.transpose(tf.stack(sampled_word_list), (1, 0)) # (N, max_len) return alphas, betas, sampled_captions
tensorflow.variable_scope
5,166
import tensorflow as tf # Trainable parameters mask = tf.equal(mask, tf.ones_like(mask))
tensorflow.ones_like
5,167
import tensorflow as tf propensity_list = tf.unstack(propensity, axis=1) # Compute propensity weights pw_list = [] for i in range(len(propensity_list)): pw_i = propensity_list[0] / propensity_list[i] pw_list.append(pw_i) propensity_weights = tf.stack(pw_list, axis=1) if self.hparams.max_propensity_weight > 0: propensity_weights = tf.clip_by_value(propensity_weights, clip_value_min=0, clip_value_max=self.hparams.max_propensity_weight) return propensity_weights def click_weighted_softmax_cross_entropy_loss(self, output, labels, propensity_weights, name=None): """Computes listwise softmax loss with propensity weighting. Args:
tensorflow.clip_by_value
5,168
import tensorflow as tf "mean", [dim], tf.constant_initializer(0.), trainable=False) step = variable_on_cpu("step", [], tf.constant_initializer(0.), trainable=False) if scale: gamma = variable_on_cpu("gamma", [dim], tf.constant_initializer(1.)) beta = variable_on_cpu("beta", [dim], tf.constant_initializer(0.)) # choose the appropriate moments
tensorflow.constant_initializer
5,169
import tensorflow as tf if inputs.get_shape().ndims != 2: raise ValueError('inputs must be of size batch_size * batch_sentence_length') self.inputs = inputs with tf.variable_scope(name): self.embeddings = tf.get_variable( name='embeddings', shape=(vocabulary_size, embedding_size), initializer=embeddings_initializer, dtype=LayersConfig.tf_dtype, **(embeddings_kwargs or {})
tensorflow.get_variable
5,170
import tensorflow as tf return tf.nn.softmax(logits) preds = GetWordPred(wvsum) z = tf.tile(tf.reshape(tf.reduce_sum(preds,1),[-1,1]), [1, out_vocab_size]) self.preds, self.z = preds, z self.probs = tf.div(preds, z) #normalize
tensorflow.reduce_sum
5,171
import tensorflow as tf # sigma=sigma_rpn=3, dim=[1, 2, 3] def _smooth_l1_loss(self, bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights, sigma=1.0, dim=[1]): sigma_2 = sigma ** 2 box_diff = bbox_pred - bbox_targets in_box_diff = bbox_inside_weights * box_diff #属于前景的行不为0,其他的行都为0 abs_in_box_diff = tf.abs(in_box_diff) # 决定哪些位置是权重是1(包括的本身为0的位置,即非前景),哪些位置权重为0 smoothL1_sign = tf.stop_gradient(tf.to_float(tf.less(abs_in_box_diff, 1. / sigma_2))) # Smooth L1函数 (和论文有点不一样) in_loss_box = tf.pow(in_box_diff, 2) * (sigma_2 / 2.) * smoothL1_sign + (abs_in_box_diff - (0.5 / sigma_2)) * (1. - smoothL1_sign) out_loss_box = bbox_outside_weights * in_loss_box loss_box = tf.reduce_mean(tf.reduce_sum( out_loss_box, axis=dim
tensorflow.less
5,172
from tensorflow.python.ops import control_flow_ops if clones_gradients: if summarize_gradients: # Add summaries to the gradients. summaries |= set(_add_gradients_summaries(clones_gradients)) # Create gradient updates. grad_updates = optimizer.apply_gradients(clones_gradients, global_step=global_step) update_ops.append(grad_updates) update_op = tf.group(*update_ops) train_op = control_flow_ops.with_dependencies([update_op], total_loss, name='train_op') else: clones_losses = [] regularization_losses = tf.get_collection( tf.GraphKeys.REGULARIZATION_LOSSES) for clone in clones: with tf.name_scope(clone.scope): clone_loss = _gather_clone_loss(clone, len(clones), regularization_losses) if clone_loss is not None:
tensorflow.python.ops.control_flow_ops.with_dependencies
5,173
import tensorflow as tf input_tensor = tf.layers.dense( input_tensor, units=bert_config.hidden_size, activation=modeling.get_activation(bert_config.hidden_act), kernel_initializer=modeling.create_initializer( bert_config.initializer_range)) input_tensor = modeling.layer_norm(input_tensor) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. output_bias = tf.get_variable( "output_bias", shape=[bert_config.vocab_size], initializer=tf.zeros_initializer()) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) label_ids = tf.reshape(label_ids, [-1]) label_weights = tf.reshape(label_weights, [-1]) one_hot_labels = tf.one_hot( label_ids, depth=bert_config.vocab_size, dtype=tf.float32) # The `positions` tensor might be zero-padded (if the sequence is too # short to have the maximum number of predictions). The `label_weights` # tensor has a value of 1.0 for every real prediction and 0.0 for the # padding predictions.
tensorflow.matmul
5,174
import tensorflow as tf self.hparams.block_dim ], initializer=tf.initializers.variance_scaling(distribution="uniform"))
tensorflow.initializers.variance_scaling
5,175
from tensorflow.contrib.layers.python.layers import feature_column for i in range(4) ] linear_features.append( feature_column.sparse_column_with_hash_bucket( 'dummy_sparse_column', hash_bucket_size=100))
tensorflow.contrib.layers.python.layers.feature_column.sparse_column_with_hash_bucket
5,176
import tensorflow as tf eval_config.batch_size = 1 eval_config.num_steps = 1 train_graph = tf.Graph() eval_graph = tf.Graph() infer_graph = tf.Graph() with train_graph.as_default(): initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale)
tensorflow.Graph
5,177
import tensorflow as tf with tf.variable_scope("input_info", reuse=False): tf.summary.scalar('rewards', tf.reduce_mean(self.reward_ph)) tf.summary.scalar('learning_rate', tf.reduce_mean(self.learning_rate)) tf.summary.scalar('advantage', tf.reduce_mean(adv)) tf.summary.scalar('action_probability', tf.reduce_mean(self.mu_ph)) if self.full_tensorboard_log: tf.summary.histogram('rewards', self.reward_ph) tf.summary.histogram('learning_rate', self.learning_rate) tf.summary.histogram('advantage', adv) tf.summary.histogram('action_probability', self.mu_ph) if tf_util.is_image(self.observation_space): tf.summary.image('observation', train_model.obs_ph) else: tf.summary.histogram('observation', train_model.obs_ph) trainer = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate_ph, decay=self.rprop_alpha, epsilon=self.rprop_epsilon) _opt_op = trainer.apply_gradients(grads) # so when you call _train, you first do the gradient step, then you apply ema
tensorflow.summary.histogram
5,178
import tensorflow as tf print("Setting up image reader...") train_records, valid_records = scene_parsing.read_dataset(FLAGS.data_dir) print(len(train_records)) print(len(valid_records)) print("Setting up dataset reader") image_options = {'resize': True, 'resize_size': IMAGE_SIZE} if FLAGS.mode == 'train': train_dataset_reader = dataset.BatchDatset(train_records, image_options) validation_dataset_reader = dataset.BatchDatset(valid_records, image_options) sess = tf.Session() print("Setting up Saver...") saver = tf.train.Saver() # create two summary writers to show training loss and validation loss in the same graph # need to create two folders 'train' and 'validation' inside FLAGS.logs_dir train_writer = tf.summary.FileWriter(FLAGS.logs_dir + '/train', sess.graph) validation_writer = tf.summary.FileWriter(FLAGS.logs_dir + '/validation') sess.run(tf.global_variables_initializer()) ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) print("Model restored...") if FLAGS.mode == "train": for itr in xrange(MAX_ITERATION):
tensorflow.train.Saver
5,179
import tensorflow as tf # The autoencoder network def encoder(x, reuse=False, supervised=False): """ Encode part of the autoencoder. :param x: input to the autoencoder :param reuse: True -> Reuse the encoder variables, False -> Create or search of variables before creating :param supervised: True -> returns output without passing it through softmax, False -> returns output after passing it through softmax. :return: tensor which is the classification output and a hidden latent variable of the autoencoder. """ if reuse: tf.get_variable_scope().reuse_variables() with tf.name_scope('Encoder'): e_dense_1 = tf.nn.relu(dense(x, input_dim, n_l1, 'e_dense_1')) e_dense_2 = tf.nn.relu(dense(e_dense_1, n_l1, n_l2, 'e_dense_2')) latent_variable = dense(e_dense_2, n_l2, z_dim, 'e_latent_variable') cat_op = dense(e_dense_2, n_l2, n_labels, 'e_label') if not supervised: softmax_label = tf.nn.softmax(logits=cat_op, name='e_softmax_label') else: softmax_label = cat_op return softmax_label, latent_variable
tensorflow.get_variable_scope
5,180
import tensorflow as tf self.gen_out_rot_loss = self.get_loss(self.gen_out_pred, self.rot_label_pl, gen_out_end_points) #classification loss #need to fix if self.ms_task: with tf.variable_scope('mixed'): #add fake pc as a rotation class num_to_add = int(max(self.batch_size/self.num_angles, 1)) idx = tf.range(0, self.batch_size, 1) idx = tf.random_shuffle(idx)[0:num_to_add] self.fake_to_add = tf.gather(self.generator_out, idx) self.mixed_pc = tf.concat([self.real_pc_rotated, self.fake_to_add], 0) self.mixed_label = tf.concat([self.rot_label_pl, tf.constant(self.num_angles, shape = (num_to_add,))], axis = 0) mixed_idx = tf.range(0, self.mixed_label.get_shape().as_list()[0], 1) mixed_idx = tf.random_shuffle(mixed_idx)[0:self.batch_size] self.mixed_pc = tf.gather(self.mixed_pc, mixed_idx) self.mixed_label = tf.gather(self.mixed_label, mixed_idx) self.mixed_pred, mixed_end_points = self.get_pred(self.mixed_pc) self.mixed_loss = self.get_loss(self.mixed_pred, self.mixed_label, mixed_end_points) with tf.variable_scope('discriminator') as scope: self.real_prob, self.real_logit = self.discriminator(self.real_pc_rotated, scope=scope, **disc_kwargs) self.synthetic_prob, self.synthetic_logit = self.discriminator(self.gen_out_rotated, reuse=True, scope=scope, **disc_kwargs) # Compute WGAN losses self.loss_d = tf.reduce_mean(self.synthetic_logit) - tf.reduce_mean(self.real_logit) # comparing rotated fake and real images self.loss_g = -tf.reduce_mean(self.synthetic_logit) # Add rotation loss
tensorflow.gather
5,181
import tensorflow as tf if rank > 2: # reshape back to time dimension out = tf.reshape(out, shape=original_tensor_shape) return out @layer def dropout_layer(tensor, keep_prob=1.0, **opts): keep_prob = _global_keep_prob(keep_prob) out = tf.nn.dropout(tensor, keep_prob=keep_prob) return out # TODO: should i normalize? @layer def word_dropout_layer(tensor, keep_prob=1.0, **opts): keep_prob = _global_keep_prob(keep_prob) rank = _rank(tensor) assert rank == 3, "Use embedding lookup layer"
tensorflow.nn.dropout
5,182
from tensorflow.python.framework import ops """Implementation of Perturbed gold Gradient Descent, i.e., FedDane optimizer""" def __init__(self, learning_rate=0.001, mu=0.01, use_locking=False, name="PGD"): super(PerGodGradientDescent, self).__init__(use_locking, name) self._lr = learning_rate self._mu = mu # Tensor versions of the constructor arguments, created in _prepare(). self._lr_t = None self._mu_t = None def _prepare(self): self._lr_t = ops.convert_to_tensor(self._lr, name="learning_rate") self._mu_t = ops.convert_to_tensor(self._mu, name="prox_mu") def _create_slots(self, var_list): # Create slots for the global solution. for v in var_list: self._zeros_slot(v, "vstar", self._name) self._zeros_slot(v, "gold", self._name) def _apply_dense(self, grad, var): lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
tensorflow.python.framework.ops.convert_to_tensor
5,183
import tensorflow as tf random_tensor = (1.0 - self._dropout_keep_prob + tf.random_uniform(tf.shape(confidence_scores))) binary_tensor = -50.0 * tf.floor(random_tensor) csshape = confidence_scores.get_shape() self.cs = tf.nn.softmax(tf.constant(1.0, shape=csshape)) # The final prediction is the average of the predictions for each word # weighted by the individual confidence/utility scores.
tensorflow.constant
5,184
import tensorflow as tf if hparams.mode == tf.estimator.ModeKeys.PREDICT: layer_shape = common_layers.shape_list(layer) if hparams.full_latent_tower: rand = tf.random_uniform(layer_shape[:-1] + [hparams.bottleneck_bits]) else: rand = tf.random_uniform(layer_shape[:-3] + [
tensorflow.random_uniform
5,185
import tensorflow as tf # now we wait for all workers to finish # we create an empty dataset and wait # until we collected as many observations in it # as there were points in the batch all_new_data = Dataset( tf.zeros((0, initial_data.query_points.shape[1]), tf.float64), tf.zeros((0, initial_data.observations.shape[1]), tf.float64), ) while len(all_new_data) < num_workers: # this line blocks the process until new data is available in the queue new_data = oq.get() print(f"Process {pid}: Main : received data {new_data}", flush=True) new_data = Dataset( query_points=tf.constant(new_data[0], dtype=tf.float64), observations=tf.constant(new_data[1], dtype=tf.float64), ) all_new_data = all_new_data + new_data # tell Trieste of new batch of observations sync_bo.tell(all_new_data) finally: terminate_processes(observer_processes) stop = timeit.default_timer() # Collect the observations, compute the running time sync_lp_observations = ( sync_bo.to_result().try_get_final_dataset().observations - SCALED_BRANIN_MINIMUM
tensorflow.constant
5,186
import tensorflow as tf def contra_traj_lossV9(pred, tgt, horizon=12, margin=1): horizon_pred, horizon_tgt = horizon_sumV1(pred, horizon), horizon_sumV1(tgt, horizon) # horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon) pred_flat1, pred_flat2 = tf.reshape(horizon_pred, [-1, 1]), tf.reshape(horizon_pred, [1, -1]) tgt_flat1, tgt_flat2 = tf.reshape(horizon_tgt, [-1, 1]), tf.reshape(horizon_tgt, [1, -1]) tgt_dif = tgt_flat1 - tgt_flat2 pred_dif = pred_flat1 - pred_flat2 geq = tf.cast(tgt_dif > 0, tf.bool) # tgt_posi_dif = tf.where(geq, tgt_dif, -tgt_dif) pred_posi_dif = tf.where(geq, pred_dif, -pred_dif) loss = tf.maximum(0., margin-pred_posi_dif) cstr_pct = tf.math.count_nonzero(loss, dtype=tf.float32) / tf.cast(tf.reduce_prod(tf.shape(loss)), tf.float32) final_loss = tf.reduce_mean(loss) return final_loss, cstr_pct def contra_traj_lossV4(pred, tgt, horizon=12, resample=1, hard_ratio=1.0): horizon_pred = horizon_sumV1(pred, horizon) horizon_tgt = horizon_sumV1(tgt, horizon) pred_flat = tf.reshape(horizon_pred, [-1]) tgt_flat = tf.reshape(horizon_tgt, [-1]) batch = tf.stack([pred_flat, tgt_flat], 1) sample_func = sample_pair(batch) def sample_compute(_): pairs = sample_func()
tensorflow.reduce_mean
5,187
import tensorflow as tf "event_shape=(), " "dtype=float16)") chi2 = tfd.Chi2(df=np.float32([1., 2.]), name="silly") self.assertEqual( str(chi2), "tfp.distributions.Chi2(" "\"silly/\", " # What a silly name that is! "batch_shape=(2,), " "event_shape=(), " "dtype=float32)") # There's no notion of partially known shapes in eager mode, so exit # early. if tf.executing_eagerly(): return exp = tfd.Exponential(rate=tf.placeholder_with_default( input=1., shape=None)) self.assertEqual( str(exp), "tfp.distributions.Exponential(\"Exponential/\", " # No batch shape. "event_shape=(), " "dtype=float32)") def testStrWorksCorrectlyMultivariate(self): mvn_static = tfd.MultivariateNormalDiag(
tensorflow.executing_eagerly
5,188
from tensorflow.python.framework import ops reduction_indices=[dim,], keep_dims=True) mean_distance, update_op = streaming_mean(radial_diffs, weights, None, None, name or 'mean_cosine_distance') mean_distance = math_ops.sub(1.0, mean_distance) update_op = math_ops.sub(1.0, update_op) if metrics_collections: ops.add_to_collections(metrics_collections, mean_distance) if updates_collections: ops.add_to_collections(updates_collections, update_op) return mean_distance, update_op @deprecated_args(IGNORE_MASK_DATE, IGNORE_MASK_INSTRUCTIONS, 'ignore_mask') def streaming_percentage_less(values, threshold, ignore_mask=None, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes the percentage of values less than the given threshold. The `streaming_percentage_less` function creates two local variables,
tensorflow.python.framework.ops.add_to_collections
5,189
import tensorflow as tf num_layers = [10,10,4,4] elif depth == 49: num_layers = [16,16,10,6] else: raise ValueError('depth=%g is a not a valid setting!' % depth) # input tensors self.input_x = tf.placeholder(tf.int32, [None, sequence_max_length], name="input_x") self.input_tags = tf.placeholder(tf.int32, [None, sequence_max_length], name="input_tags") self.input_deps = tf.placeholder(tf.int32, [None, sequence_max_length], name="input_dependency") self.input_head = tf.placeholder(tf.int32, [None, sequence_max_length], name="input_head") self.input_y = tf.placeholder(tf.float32, [None, num_classes], name="input_y") self.is_training = tf.placeholder(tf.bool) initializer = tf.contrib.layers.variance_scaling_initializer() # Embedding Lookup 16 with tf.device('/cpu:0'), tf.name_scope("embedding"): if use_he_uniform: self.embedding_W = tf.get_variable(name='lookup_W', shape=[num_quantized_chars, embedding_size], initializer=tf.contrib.layers.variance_scaling_initializer()) else:
tensorflow.placeholder
5,190
import tensorflow as tf return loss def compute_error_loss(pred1, pred2, tgt1, tgt2, hard_ratio=1.0): geq = tf.cast((tgt1 - tgt2) > 0, tf.bool) tgt_larg = tf.where(geq, tgt1, tgt2) tgt_small = tf.where(geq, tgt2, tgt1) pred_larg = tf.where(geq, pred1, pred2)
tensorflow.cast
5,191
import tensorflow as tf cpn_backbone = cpn.xt_cascaded_pyramid_net def keypoint_model_fn(features, labels, mode, params): targets = labels['targets'] shape = labels['shape'] classid = labels['classid'] key_v = labels['key_v'] isvalid = labels['isvalid'] norm_value = labels['norm_value'] cur_batch_size = tf.shape(features)[0] #features= tf.ones_like(features) with tf.variable_scope(params['model_scope'], default_name=None, values=[features], reuse=tf.AUTO_REUSE): pred_outputs = cpn_backbone(features, config.class_num_joints[(params['model_scope'] if 'all' not in params['model_scope'] else '*')], params['heatmap_size'], (mode == tf.estimator.ModeKeys.TRAIN), params['data_format']) if params['data_format'] == 'channels_last': pred_outputs = [tf.transpose(pred_outputs[ind], [0, 3, 1, 2], name='outputs_trans_{}'.format(ind)) for ind in list(range(len(pred_outputs)))] score_map = pred_outputs[-1] pred_x, pred_y = get_keypoint(features, targets, score_map, params['heatmap_size'], params['train_image_size'], params['train_image_size'], (params['model_scope'] if 'all' not in params['model_scope'] else '*'), clip_at_zero=True, data_format=params['data_format']) # this is important!!! targets = 255. * targets blur_list = [1., 1.37, 1.73, 2.4, None]#[1., 1.5, 2., 3., None]
tensorflow.variable_scope
5,192
import tensorflow as tf criterion = criterion_map[FLAGS.model.loss] loss_mod = MultiStepLoss(model, normalizers, dim_state, dim_action, criterion, FLAGS.model.multi_step) loss_mod.build_backward(FLAGS.model.lr, FLAGS.model.weight_decay) shadow_loss_mods = [MultiStepLoss(shadow_model, normalizers, dim_state, dim_action, criterion, FLAGS.model.multi_step) for shadow_model in shadow_models] for shadow_loss_mod in shadow_loss_mods: shadow_loss_mod.build_backward(FLAGS.model.lr, FLAGS.model.weight_decay) algo = TRPO(vfn=vfn, policy=policy, dim_state=dim_state, dim_action=dim_action, **FLAGS.TRPO.as_dict()) advtask = ADVTASK(dim_state, dim_action, policy, vfn, warmup_policy, warmup_vfn, task, alpha=alpha, beta=beta, nsample=nsample, atype=atype) tf.get_default_session().run(tf.global_variables_initializer()) print ("norm params:", normalizers_parameters) print ("norm_copy params:", normalizers_copy_parameters) norm_before = tf.get_default_session().run(normalizers_parameters) print ("norm_before:", norm_before) assert FLAGS.algorithm != 'MF', "don't support model free for now"
tensorflow.get_default_session
5,193
import tensorflow as tf f1 = tf.reduce_sum(half(masked, 0), 2) / tf.reduce_sum(half(mask, 0)) f2 = tf.reduce_sum(half(masked, 1), 2) / tf.reduce_sum(half(mask, 1)) return tf.concat([x, f1, f2], 1) def batch_norm(x, train, name, decay=0.99, epsilon=1e-5): shape = x.get_shape().as_list() with tf.variable_scope(name): beta = tf.get_variable('beta', [shape[-1]], initializer=tf.constant_initializer(0.)) gamma = tf.get_variable('gamma', [shape[-1]], initializer=tf.random_normal_initializer(1., 0.02)) pop_mean = tf.get_variable('pop_mean', [shape[-1]], initializer=tf.constant_initializer(0.), trainable=False) pop_var = tf.get_variable('pop_var', [shape[-1]], initializer=tf.constant_initializer(1.), trainable=False) if pop_mean not in tf.moving_average_variables():
tensorflow.variable_scope
5,194
import tensorflow as tf w (tf.Variable): Weight matrix. b (tf.Variable): Bias vector. y_ (tf.placeholder): Input result vector. cross_entropy (tf.Operation): Final layer of network. cross_entropy_grads (tf.Operation): Gradient computation. sess (tf.Session): Session used for training. variables (TensorFlowVariables): Extracted variables and methods to manipulate them. """ def __init__(self, shape): """Creates a LinearModel object.""" x = tf.placeholder(tf.float32, [None, shape[0]]) w = tf.Variable(tf.zeros(shape)) b = tf.Variable(tf.zeros(shape[1])) self.x = x self.w = w self.b = b y = tf.nn.softmax(tf.matmul(x, w) + b) y_ = tf.placeholder(tf.float32, [None, shape[1]]) self.y_ = y_ cross_entropy = tf.reduce_mean( -tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]) ) self.cross_entropy = cross_entropy self.cross_entropy_grads = tf.gradients(cross_entropy, [w, b])
tensorflow.zeros
5,195
import tensorflow as tf facenet.load_model(args.model_dir) # Get input and output tensors images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0") embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0") phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0") # Run forward pass to calculate embeddings
tensorflow.get_default_graph
5,196
import tensorflow as tf o_me2 = tf.concat([o_d2, o_c3], 3) # Skip connection o_d3 = self.general_deconv2d(o_me2, self.base_number_of_features * 2, 3, stride = 2, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_deconv2d_3') o_me3 = tf.concat([o_d3, o_c2], 3) # Skip connection o_d4 = self.general_deconv2d(o_me3, self.base_number_of_features, 3, stride = 2, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_deconv2d_4')
tensorflow.concat
5,197
import tensorflow as tf encode = tf.placeholder(tf.int32, shape=[None], name="encode") decode = tf.placeholder(tf.int32, shape=[decode_max_length + 2], name="decode")
tensorflow.placeholder
5,198
import tensorflow as tf in1 = tf.placeholder(tf_input_dtype, [ None, ] + tu.shape_to_tf_shape(input_shape), "TENSOR_INPUT1") # If the input is a string, then convert each string to the # equivalent float value. if tf_input_dtype == tf.string: in0 = tf.strings.to_number(in0, tf.int32) in1 = tf.strings.to_number(in1, tf.int32) add = tf.add(in0, in1, "ADD") sub = tf.subtract(in0, in1, "SUB") # Cast or convert result to the output dtype.
tensorflow.strings.to_number
5,199