seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf normalizer_fn=tf.contrib.layers.batch_norm, normalizer_params={"is_training": self.train}): self.fc1 = tf.contrib.layers.fully_connected(self.flatten, self.config.cifar10_cnn["fc1_nb_units"]) self.fc2 = tf.contrib.layers.fully_connected(self.fc1, self.config.data["num_categories"], activation_fn=None) # Compute loss with tf.name_scope("loss"): self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.fc2, labels=self.y)) # Optimizer with tf.name_scope("training_op"): self.training_op = tf.compat.v1.train.AdamOptimizer(self.learning_rate).minimize(self.loss) # Perf metrics with tf.name_scope("accuracy"): prediction = tf.equal(tf.argmax(self.fc2, 1), tf.argmax(self.y, 1)) self.accuracy = tf.reduce_mean(tf.cast(prediction, tf.float32))
tensorflow.name_scope
2,700
import tensorflow as tf "segment_ids": tf.FixedLenFeature([max_seq_length], tf.int64), "masked_lm_positions": tf.FixedLenFeature( [max_predictions_per_seq], tf.int64 ), "masked_lm_ids": tf.FixedLenFeature([max_predictions_per_seq], tf.int64), "masked_lm_weights": tf.FixedLenFeature( [max_predictions_per_seq], tf.float32 ), "next_sentence_labels": tf.FixedLenFeature([1], tf.int64), } # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. if is_training: d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files)) d = d.repeat() d = d.shuffle(buffer_size=len(input_files)) # `cycle_length` is the number of parallel files that get read. cycle_length = min(num_cpu_threads, len(input_files)) # `sloppy` mode means that the interleaving is not exact. This adds # even more randomness to the training pipeline. d = d.apply( tf.contrib.data.parallel_interleave( tf.data.TFRecordDataset, sloppy=is_training, cycle_length=cycle_length,
tensorflow.constant
2,701
import tensorflow as tf network = resnet_model.imagenet_resnet_v2( resnet_size=18, num_classes=class_num, mode='se', data_format=None) inputs= network(inputs=inputs, is_training=training) feat = tf.nn.l2_normalize(inputs, 1, 1e-10, name='feat') inputs = tf.layers.dense(inputs=inputs, units=class_num) # inputs = tf.layers.dense(inputs=feat, units=class_num) inputs = tf.identity(inputs, 'final_dense')
tensorflow.layers.dense
2,702
import tensorflow as tf shape = (config.batch_size,) + config.input_shape self.model = revnet.RevNet(config=config) self.x = tf.random_normal(shape=shape, dtype=tf.float64) self.t = tf.random_uniform(
tensorflow.random_normal
2,703
import tensorflow as tf learning_rate = tf.placeholder(tf.float32, name='learning_rate') logits, feat = resnet_model_fn(x, training=training_flag) cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_labels, logits=logits)) Focal_loss = tf.reduce_mean(focal_loss(one_hot_labels, logits, alpha=0.5)) l2_loss = weight_decay * tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables()]) Center_loss, Centers = center_loss(feat, tf.cast(label, dtype=tf.int32), 0.95, class_num) Total_loss = cost + l2_loss optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=momentum, use_nesterov=True) # Batch norm requires update_ops to be added as a train_op dependency.
tensorflow.nn.l2_loss
2,704
import tensorflow as tf """ vec_pairs = tf.gather(vecs, gather_inds) vec_len = int(vec_pairs.get_shape()[2]) * 2 vec_pairs = tf.reshape(vec_pairs, [-1, vec_len]) return vec_pairs
tensorflow.reshape
2,705
import tensorflow as tf step_size=tf.shape(self.obs)[:1]) g_hat = self.manager_lstm.output self.g = tf.nn.l2_normalize(g_hat, dim=1) self.manager_vf = self.build_value(g_hat) def build_worker(self): with tf.variable_scope('worker'): num_acts = self.act_space # Calculate U self.worker_lstm = SingleStepLSTM(tf.expand_dims(self.z, [0]), size=num_acts * self.k, step_size=tf.shape(self.obs)[:1]) flat_logits = self.worker_lstm.output self.worker_vf = self.build_value(flat_logits) U = tf.reshape(flat_logits, [-1, num_acts, self.k]) # Calculate w cut_g = tf.stop_gradient(self.g) cut_g = tf.expand_dims(cut_g, [1]) gstack = tf.concat([self.prev_g, cut_g], axis=1) self.last_c_g = gstack[:, 1:]
tensorflow.shape
2,706
import tensorflow as tf return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions) # Calculate loss, which includes softmax cross entropy and L2 regularization. cross_entropy = tf.cond(n_positives > 0., lambda: tf.losses.sparse_softmax_cross_entropy(labels=glabels, logits=cls_pred), lambda: 0.) #cross_entropy = tf.losses.sparse_softmax_cross_entropy(labels=glabels, logits=cls_pred) # Create a tensor named cross_entropy for logging purposes. tf.identity(cross_entropy, name='cross_entropy_loss') tf.summary.scalar('cross_entropy_loss', cross_entropy) loc_loss = tf.cond(n_positives > 0., lambda: modified_smooth_l1(location_pred, tf.stop_gradient(gtargets), sigma=1.), lambda: tf.zeros_like(location_pred)) #loc_loss = modified_smooth_l1(location_pred, tf.stop_gradient(gtargets)) loc_loss = tf.reduce_mean(tf.reduce_sum(loc_loss, axis=-1)) loc_loss = tf.identity(loc_loss, name='location_loss') tf.summary.scalar('location_loss', loc_loss) tf.losses.add_loss(loc_loss) # Add weight decay to the loss. We exclude the batch norm variables because # doing so leads to a small improvement in accuracy. loss = cross_entropy + loc_loss + params['weight_decay'] * tf.add_n(
tensorflow.stop_gradient
2,707
import tensorflow as tf # the convolutions def make_convolutions(inp): with tf.variable_scope('CNN') as scope: convolutions = []
tensorflow.variable_scope
2,708
from tensorflow.python.framework import ops if weights is not None: weights = math_ops.to_float(weights) values = math_ops.mul(values, weights) value_tensor = array_ops.identity(count) update_op = state_ops.assign_add(count, math_ops.reduce_sum(values)) if metrics_collections: ops.add_to_collections(metrics_collections, value_tensor) if updates_collections: ops.add_to_collections(updates_collections, update_op) return value_tensor, update_op def _streaming_true_positives(predictions, labels, weights=None, metrics_collections=None, updates_collections=None, name=None): """Sum the weights of true_positives.
tensorflow.python.framework.ops.add_to_collections
2,709
from tensorflow.python.platform import gfile def _create_tfrecord_dataset(tmpdir): if not gfile.Exists(tmpdir): gfile.MakeDirs(tmpdir) data_sources = test_utils.create_tfrecord_files(tmpdir, num_files=1)
tensorflow.python.platform.gfile.MakeDirs
2,710
import tensorflow as tf self.w = tf.get_variable('w', [k_h, k_w, out_dim, input_dim], initializer=tf.random_normal_initializer(stddev=stddev))
tensorflow.random_normal_initializer
2,711
import tensorflow as tf cell_fw = GetCell() with tf.variable_scope('bw'): cell_bw = GetCell() rnnout, _, _ = tf.nn.bidirectional_rnn(cell_fw, cell_bw, self._inputs, dtype=tf.float32, sequence_length=self.seq_lens) if proj_size: out_size = 2 * proj_size else: out_size = 2 * hidden_size super(TweetSeqModel, self)._DoPredictions(out_size, rnnout, class_weights=weights) self.cost = tf.reduce_mean(self.example_weights * self._xent) class CharSeqModel(object): #formerly TweetSeqModel """ Treats each document (tweet) as a single "word," which is fed through c2v, and the output "embedding" sized to be a vector of language predictions. """ def __init__(self, out_vocab_size=None, batch_size=10, model_params=None, c2v=None, max_sequence_len=None, dropout_keep_prob=None, weights=None):
tensorflow.reduce_mean
2,712
import tensorflow as tf flat_inputs = tf.reshape(encoder_inputs_, [tf.multiply(batch_size, time_steps)]) flat_inputs = tf.nn.embedding_lookup(embeddings, flat_inputs) encoder_inputs_ = tf.reshape(flat_inputs, tf.stack([batch_size, time_steps, flat_inputs.get_shape()[1].value])) if pos_embeddings is not None: pos_inputs_ = tf.range(time_steps, dtype=tf.int32) pos_inputs_ = tf.nn.embedding_lookup(pos_embeddings, pos_inputs_) pos_inputs_ = tf.tile(tf.expand_dims(pos_inputs_, axis=0), [batch_size, 1, 1]) encoder_inputs_ = tf.concat([encoder_inputs_, pos_inputs_], axis=2) if other_inputs is not None: encoder_inputs_ = tf.concat([encoder_inputs_, other_inputs], axis=2) if encoder.use_dropout: noise_shape = [1, time_steps, 1] if encoder.pervasive_dropout else [batch_size, time_steps, 1] encoder_inputs_ = tf.nn.dropout(encoder_inputs_, keep_prob=encoder.word_keep_prob, noise_shape=noise_shape) size = tf.shape(encoder_inputs_)[2] noise_shape = [1, 1, size] if encoder.pervasive_dropout else [batch_size, time_steps, size] encoder_inputs_ = tf.nn.dropout(encoder_inputs_, keep_prob=encoder.embedding_keep_prob, noise_shape=noise_shape)
tensorflow.concat
2,713
import tensorflow as tf img_h4 = lrelu(linear(tf.nn.dropout(tf.reshape(img_h3, [self.batch_size, -1]), keep_prob), featsize, 'h4_lin')) img_z = lrelu(linear(tf.nn.dropout(img_h4, keep_prob), featsize, 'hz_lin')) return img_h0, img_h1, img_h2, img_h3, img_h4, img_z with tf.variable_scope("conv") as scope: srcimg_h0, srcimg_h1, srcimg_h2, srcimg_h3, srcimg_h4, srcimg_z = encode(srcimg) scope.reuse_variables() tgtimg_h0, tgtimg_h1, tgtimg_h2, tgtimg_h3, tgtimg_h4, tgtimg_z = encode(tgtimg) tgtctx_h0, tgtctx_h1, tgtctx_h2, tgtctx_h3, tgtctx_h4, tgtctx_z = encode(tgtctx) with tf.variable_scope("translate") as scope: trans_h0 = lrelu(linear(tf.nn.dropout(tf.concat([srcimg_z, tgtctx_z], 1), keep_prob), featsize, 'trans_h0')) trans_z = linear(tf.nn.dropout(trans_h0, keep_prob), featsize, 'trans_z') self.translated_z = trans_z s_h, s_w = self.output_height, self.output_width s_h0, s_h1, s_h2, s_h3 = \ int(s_h/ns0), int(s_h/ns0/ns1), int(s_h/ns0/ns1/ns2), int(s_h/ns0/ns1/ns2/ns3) s_w0, s_w1, s_w2, s_w3 = \ int(s_w/ns0), int(s_w/ns0/ns1), int(s_w/ns0/ns1/ns2), int(s_w/ns0/ns1/ns2/ns3) def decode(z, skip_h3, skip_h2, skip_h1, skip_h0):
tensorflow.concat
2,714
from tensorflow.python.ops import gradients_impl input_c=input_c, params=params) all_grads = gradients_impl.gradients( [output, output_h, output_c],
tensorflow.python.ops.gradients_impl.gradients
2,715
import tensorflow as tf cost =tf.reduce_mean(tf.square(y-readout_action)) train_step = tf.train.AdamOptimizer(1e-6).minimize(cost)
tensorflow.train.AdamOptimizer
2,716
import tensorflow as tf A tensor. """ zero = _to_tensor(0., x.dtype.base_dtype) inf = _to_tensor(np.inf, x.dtype.base_dtype) x = tf.clip_by_value(x, zero, inf) return tf.sqrt(x) def var(x, axis=None, keepdims=False): """Variance of a tensor, alongside the specified axis.
tensorflow.sqrt
2,717
from tensorflow.python.client import graph_util return [orig_input_shape] @ops.RegisterStatistics("Conv2D", "flops") def _calc_conv_flops(graph, node): """Calculates the compute resources needed for Conv2D.""" input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0]) input_shape.assert_is_fully_defined() filter_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[1]) filter_shape.assert_is_fully_defined() output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name) output_shape.assert_is_fully_defined() filter_height = int(filter_shape[0]) filter_width = int(filter_shape[1]) filter_in_depth = int(filter_shape[2]) output_count = np.prod(output_shape.as_list()) return ops.OpStats("flops", (output_count * filter_in_depth * filter_height * filter_width * 2)) @ops.RegisterStatistics("Conv2D", "weight_parameters")
tensorflow.python.client.graph_util.tensor_shape_from_node_def_name
2,718
import tensorflow as tf next_sentence_log_probs, next_sentence_labels): """Computes the loss and accuracy of the model.""" masked_lm_log_probs = tf.reshape(masked_lm_log_probs, [-1, masked_lm_log_probs.shape[-1]]) masked_lm_predictions = tf.argmax( masked_lm_log_probs, axis=-1, output_type=tf.int32) masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1]) masked_lm_ids = tf.reshape(masked_lm_ids, [-1]) masked_lm_weights = tf.reshape(masked_lm_weights, [-1]) masked_lm_accuracy = tf.metrics.accuracy( labels=masked_lm_ids, predictions=masked_lm_predictions,
tensorflow.reshape
2,719
import tensorflow as tf total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu, optimizer) output_spec = contrib_tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf.estimator.ModeKeys.EVAL: if task_name not in ["sts-b", "cola"]: def metric_fn(per_example_loss, label_ids, logits, is_real_example): predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) accuracy = tf.metrics.accuracy( labels=label_ids, predictions=predictions, weights=is_real_example) loss = tf.metrics.mean( values=per_example_loss, weights=is_real_example) return { "eval_accuracy": accuracy, "eval_loss": loss, } elif task_name == "sts-b": def metric_fn(per_example_loss, label_ids, logits, is_real_example): """Compute Pearson correlations for STS-B.""" # Display labels and predictions concat1 = contrib_metrics.streaming_concat(logits) concat2 = contrib_metrics.streaming_concat(label_ids) # Compute Pearson correlation pearson = contrib_metrics.streaming_pearson_correlation(
tensorflow.metrics.mean
2,720
import tensorflow as tf X = tf.nn.leaky_relu(X, 0.2) X = self.conv('d_1', X, 512, size=1, stride=1, padding="SAME") X = tf.nn.leaky_relu(X, 0.2) X = self.conv('d_2', X, 512, size=1, stride=1, padding="SAME")
tensorflow.nn.leaky_relu
2,721
import tensorflow as tf dxt = tf.stack(dxt_list) xt = tf.stack(states) num = (1 - self.alpha) * dxt + tf.tensordot(self.alpha * dxt , tf.transpose( tf.matmul(tf.abs(self.W_rec) * self.rec_Connectivity,self.Dale_rec)), axes=1) * \ tf.where(tf.greater(xt, 0), tf.ones_like(xt), tf.zeros_like(xt)) denom = dxt # sum over hidden units num = tf.reduce_sum(tf.square(num), axis=2) denom = tf.reduce_sum(tf.square(denom), axis=2) bounded = tf.where(tf.greater(denom, 1e-20), tf.div(num, 1.0 * denom), tf.ones_like(num)) nelems = tf.reduce_mean(tf.where(tf.greater(denom, 1e-20), 1.0 * tf.ones_like(num), 1.0 * tf.zeros_like(num)), axis=1) # sum mean over each batch by time steps Omega = tf.square(bounded - 1.0) Omega = tf.reduce_sum(tf.reduce_mean(Omega, axis=1)) / (1.0 * tf.reduce_sum(nelems)) out = tf.gradients(Omega, self.W_rec) out[0] = tf.Print(out[0], [out[0], self.W_rec, Omega], "omega grads") out[0] = tf.verify_tensor_all_finite(out[0], "dead omega grad") return out, test
tensorflow.ones_like
2,722
import tensorflow as tf candidate_start_sentence_indices = tf.gather(flattened_sentence_indices, candidate_starts) # [num_words, max_span_width] candidate_end_sentence_indices = tf.gather(flattened_sentence_indices, tf.minimum(candidate_ends, num_words - 1)) # [num_words, max_span_width] candidate_mask = tf.logical_and(candidate_ends < num_words, tf.equal(candidate_start_sentence_indices, candidate_end_sentence_indices)) # [num_words, max_span_width] flattened_candidate_mask = tf.reshape(candidate_mask, [-1]) # [num_words * max_span_width] candidate_starts = tf.boolean_mask(tf.reshape(candidate_starts, [-1]), flattened_candidate_mask) # [num_candidates] candidate_ends = tf.boolean_mask(tf.reshape(candidate_ends, [-1]), flattened_candidate_mask) # [num_candidates]
tensorflow.reshape
2,723
import tensorflow as tf #Construct carlini adversarial samples model_carlini_adv = models_carlini(hps) #Construct predictions image = tf.placeholder(tf.float32,shape=[hps.batch_size, image_size, image_size, num_channel])############MNIST and CIFAR10 are different ar here adv_image = tf.placeholder(tf.float32,shape=[hps.batch_size, image_size, image_size, num_channel])############MNIST and CIFAR10 are different ar here predict = tf.placeholder(tf.float32,shape=[hps.batch_size, 10])
tensorflow.placeholder
2,724
import tensorflow as tf # 2. Loss function, training/eval ops if mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL: loss = tf.losses.mean_squared_error(labels, predictions) train_op = tf.contrib.layers.optimize_loss( loss = loss, global_step = tf.train.get_global_step(), learning_rate = 0.01, optimizer = "SGD") eval_metric_ops = { "rmse": tf.metrics.root_mean_squared_error(labels, predictions) }
tensorflow.train.get_global_step
2,725
import tensorflow as tf model = linear_regression.LinearModel() with tf.device(device()): optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1) # Perform burn-in. linear_regression.fit(model, burn_in_dataset, optimizer)
tensorflow.train.GradientDescentOptimizer
2,726
import tensorflow as tf # Ensure maxnorm constraints are initially satisfied entity_init = dense_maxnorm(entity_init, self.maxnorm) rel_init = dense_maxnorm(rel_init, self.maxnorm) self.entity_embedding_vars = tf.Variable(entity_init) self.rel_embedding_vars = tf.Variable(rel_init) # Embedding layer for each (head, rel, tail) triple being fed in as input head_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.head_input) tail_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.tail_input) rel_embed = tf.nn.embedding_lookup(self.rel_embedding_vars, self.rel_input) # Reshape rel_embed into square D x D matrices rel_embed_square = tf.reshape(rel_embed, (-1, self.embedding_size, self.embedding_size)) # Reshape head_embed and tail_embed to be suitable for the matrix multiplication head_embed_row = tf.expand_dims(head_embed, 1) # embeddings as row vectors tail_embed_col = tf.expand_dims(tail_embed, 2) # embeddings as column vectors
tensorflow.nn.embedding_lookup
2,727
import tensorflow as tf eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0)) param_noise_scale = tf.get_variable("param_noise_scale", (), initializer=tf.constant_initializer(0.01), trainable=False) param_noise_threshold = tf.get_variable("param_noise_threshold", (), initializer=tf.constant_initializer(0.05), trainable=False) # Unmodified Q.
tensorflow.constant_initializer
2,728
import tensorflow as tf weighted_average = tf.zeros(shape=tf.stack([batch_size, 0])) weights = tf.zeros(shape=[batch_size, tf.shape(hidden_states)[1]]) return weighted_average, weights def average_attention(hidden_states, encoder_input_length, *args, **kwargs): # attention with fixed weights (average of all hidden states) lengths = tf.to_float(tf.expand_dims(encoder_input_length, axis=1)) mask = tf.sequence_mask(encoder_input_length, maxlen=tf.shape(hidden_states)[1]) weights = tf.to_float(mask) / lengths weighted_average = tf.reduce_sum(hidden_states * tf.expand_dims(weights, axis=2), axis=1) return weighted_average, weights def last_state_attention(hidden_states, encoder_input_length, *args, **kwargs): weights = tf.one_hot(encoder_input_length - 1, tf.shape(hidden_states)[1]) weights = tf.to_float(weights) weighted_average = tf.reduce_sum(hidden_states * tf.expand_dims(weights, axis=2), axis=1) return weighted_average, weights
tensorflow.expand_dims
2,729
import tensorflow as tf # Write images to disk. image_write_ops = None if FLAGS.write_to_disk: image_write_ops = tf.write_file( '%s/%s'% (FLAGS.eval_dir, 'conditional_gan.png'), tf.image.encode_png(data_provider.float_image_to_uint8( reshaped_img[0]))) # For unit testing, use `run_eval_loop=False`. if not run_eval_loop: return tf.contrib.training.evaluate_repeatedly( FLAGS.checkpoint_dir, hooks=[tf.contrib.training.SummaryAtEndHook(FLAGS.eval_dir), tf.contrib.training.StopAfterNEvalsHook(1)], eval_ops=image_write_ops, max_number_of_evaluations=FLAGS.max_number_of_evaluations) def _get_generator_inputs(num_images_per_class, num_classes, noise_dims): # Since we want a grid of numbers for the conditional generator, manually # construct the desired class labels. num_images_generated = num_images_per_class * num_classes noise = tf.random_normal([num_images_generated, noise_dims]) labels = [lbl for lbl in range(num_classes) for _ in range(num_images_per_class)]
tensorflow.contrib.training.SummaryAtEndHook
2,730
import tensorflow as tf (total_loss, per_example_loss, logits) = create_model( bert_config, is_training, input_ids, input_mask, segment_ids, label_ids, num_labels, use_one_hot_embeddings) tvars = tf.trainable_variables() scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars,init_checkpoint) tf.train.init_from_checkpoint(init_checkpoint, assignment_map) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else:
tensorflow.train.init_from_checkpoint
2,731
import tensorflow as tf stride = encoder.maxout_stride k = tf.to_int32(tf.ceil(time_steps / stride) * stride) - time_steps # TODO: simpler pad = tf.zeros([batch_size, k, tf.shape(encoder_inputs_)[2]]) encoder_inputs_ = tf.concat([encoder_inputs_, pad], axis=1) encoder_inputs_ = tf.nn.pool(encoder_inputs_, window_shape=[stride], pooling_type='MAX', padding='VALID', strides=[stride]) encoder_input_length_ = tf.to_int32(tf.ceil(encoder_input_length_ / stride)) if encoder.highway_layers: x = encoder_inputs_ for j in range(encoder.highway_layers): size = x.shape[2].value
tensorflow.ceil
2,732
import tensorflow as tf stn = tf.reshape(tf.transpose(stn, [2, 0, 1]), [3, -1]) # 3 x (bx2) coor = tf.reshape(tf.matmul(xys, stn), [WARP_TARGET_SIZE, WARP_TARGET_SIZE, -1, 2]) coor = tf.transpose(coor, [2, 0, 1, 3], 'sampled_coords') # b h w 2 sampled = ImageSample('warp', [image, coor], borderMode='constant') return sampled with argscope([Conv2D, FullyConnected], nl=tf.nn.relu): with tf.variable_scope('STN1'): sampled1 = get_stn(image) with tf.variable_scope('STN2'): sampled2 = get_stn(image) # For visualization in tensorboard with tf.name_scope('visualization'): padded1 = tf.pad(sampled1, [[0, 0], [HALF_DIFF, HALF_DIFF], [HALF_DIFF, HALF_DIFF], [0, 0]]) padded2 = tf.pad(sampled2, [[0, 0], [HALF_DIFF, HALF_DIFF], [HALF_DIFF, HALF_DIFF], [0, 0]]) img_orig = tf.concat([image[:, :, :, 0], image[:, :, :, 1]], 1) # b x 2h x w transform1 = tf.concat([padded1[:, :, :, 0], padded1[:, :, :, 1]], 1) transform2 = tf.concat([padded2[:, :, :, 0], padded2[:, :, :, 1]], 1) stacked = tf.concat([img_orig, transform1, transform2], 2, 'viz') tf.summary.image('visualize', tf.expand_dims(stacked, -1), max_outputs=30) sampled = tf.concat([sampled1, sampled2], 3, 'sampled_concat') logits = (LinearWrap(sampled) .FullyConnected('fc1', out_dim=256, nl=tf.nn.relu) .FullyConnected('fc2', out_dim=128, nl=tf.nn.relu)
tensorflow.name_scope
2,733
import tensorflow as tf size. Also randomly apply horizontal flip. Args: example: An example dict containing an image and a label. random_crop_pad: By how many pixels should the image be padded on each side before cropping. Returns: An example with the same label and an augmented version of the image. """ image, label = example['image'], example['label'] image = tf.image.random_flip_left_right(image) image_shape = tf.shape(image) image = tf.pad( image, [[random_crop_pad, random_crop_pad], [random_crop_pad, random_crop_pad], [0, 0]], mode='REFLECT') image = tf.image.random_crop(image, image_shape) return {'image': image, 'label': label} def auto_augmentation(example, dataset_name): """Applies the AutoAugment policy found for the dataset. AutoAugment: Learning Augmentation Policies from Data
tensorflow.pad
2,734
import tensorflow as tf # Now the computation. with tf.variable_scope(scope or "Linear"): matrix = tf.get_variable("Matrix", [total_arg_size, output_size]) if len(args) == 1: res = tf.matmul(args[0], matrix)
tensorflow.get_variable
2,735
import tensorflow as tf assert data_format in ['NHWC', 'NCHW'] axis = [1, 2] if data_format == 'NHWC' else [2, 3] return tf.reduce_mean(input_tensor=inputdata, axis=axis, name=name) @staticmethod def layernorm(inputdata, epsilon=1e-5, use_bias=True, use_scale=True,
tensorflow.reduce_mean
2,736
import tensorflow as tf self._create_params() arc_seq_1, entropy_1, log_prob_1, c, h = self._build_sampler(use_bias=True) arc_seq_2, entropy_2, log_prob_2, _, _ = self._build_sampler(prev_c=c, prev_h=h) self.sample_arc = (arc_seq_1, arc_seq_2) self.sample_entropy = entropy_1 + entropy_2 self.sample_log_prob = log_prob_1 + log_prob_2 def _create_params(self): initializer = tf.random_uniform_initializer(minval=-0.1, maxval=0.1) with tf.variable_scope(self.name, initializer=initializer): with tf.variable_scope("lstm"): self.w_lstm = [] for layer_id in range(self.lstm_num_layers): with tf.variable_scope("layer_{}".format(layer_id)): w = tf.get_variable("w", [2 * self.lstm_size, 4 * self.lstm_size]) self.w_lstm.append(w) self.g_emb = tf.get_variable("g_emb", [1, self.lstm_size])
tensorflow.variable_scope
2,737
import tensorflow as tf channel_2: output of second channel (i.e. branch_2), tensor of size [batch_size, 192] label: Tensor of shape [batch_size] margin: Margin of the contrastive loss Returns: loss: scalar float Tensor """ ######################## # PUT YOUR CODE HERE # ######################## D = (tf.reduce_sum((channel_1 - channel_2)**2, reduction_indices=1))**0.5 zeros = tf.fill(tf.shape(D), 0.0) # loss = 0.5*(label*(D**2.) + (1-label) * (tf.reduce_max([zeros, margin - D], reduction_indices=0))**2) loss = label*(D**2) + (1-label) * (tf.reduce_max([zeros, margin - D**2], 0)) ######################## # END OF YOUR CODE # ######################## return loss
tensorflow.reduce_max
2,738
import tensorflow as tf from object_detection.core import losses from object_detection.core import model from object_detection.core import standard_fields as fields from object_detection.protos import train_pb2 NUMBER_OF_CLASSES = 2 def get_input_function(): """A function to get test inputs. Returns an image with one box.""" image = tf.random_uniform([32, 32, 3], dtype=tf.float32) key = tf.constant('image_000000') class_label = tf.random_uniform( [1], minval=0, maxval=NUMBER_OF_CLASSES, dtype=tf.int32) box_label = tf.random_uniform( [1, 4], minval=0.4, maxval=0.6, dtype=tf.float32) return { fields.InputDataFields.image: image, fields.InputDataFields.key: key, fields.InputDataFields.groundtruth_classes: class_label,
tensorflow.random_uniform
2,739
import tensorflow as tf import time import numpy as np import tensorflow as tf import random from tensorflow.contrib import slim from npu_bridge.estimator import npu_ops from tensorflow.core.protobuf.rewriter_config_pb2 import RewriterConfig tf.app.flags.DEFINE_integer('input_size', 512, '') tf.app.flags.DEFINE_integer('batch_size_per_gpu', 14, '') tf.app.flags.DEFINE_integer('num_readers', 16, '') tf.app.flags.DEFINE_float('learning_rate', 0.0001, '') tf.app.flags.DEFINE_integer('max_steps', 100000, '') tf.app.flags.DEFINE_integer('loss_scale', 1024, '') tf.app.flags.DEFINE_float('moving_average_decay', 0.997, '') tf.app.flags.DEFINE_string('gpu_list', '1', '') tf.app.flags.DEFINE_string('checkpoint_path', '/tmp/east_resnet_v1_50_rbox/', '') tf.app.flags.DEFINE_boolean('restore', False, 'whether to resotre from checkpoint') tf.app.flags.DEFINE_integer('save_checkpoint_steps', 1000, '')
tensorflow.app.flags.DEFINE_integer
2,740
import tensorflow as tf head_size: The size of head. initializer: Kernel initializer. activation: Actication function. name: The name scope of this layer. Returns: float logits Tensor. """ input_shape = get_shape_list(input_tensor) num_attention_heads= input_shape[2] with tf.variable_scope(name): w = tf.get_variable( name="kernel", shape=[num_attention_heads * head_size, hidden_size], initializer=initializer) w = tf.reshape(w, [num_attention_heads, head_size, hidden_size]) b = tf.get_variable( name="bias", shape=[hidden_size], initializer=tf.zeros_initializer) ret = tf.einsum("BFND,NDH->BFH", input_tensor, w) ret += b
tensorflow.variable_scope
2,741
from tensorflow.python.ops import math_ops predictions, labels = tensor_util.remove_squeezable_dimensions( predictions, labels) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) squared_error = math_ops.square(labels - predictions) return streaming_mean(squared_error, weights, metrics_collections, updates_collections, name or 'mean_squared_error')
tensorflow.python.ops.math_ops.square
2,742
import tensorflow as tf l1 = tf.matmul(images, self.w1)+self.b1 l1=tf.nn.relu(l1) l2 = tf.matmul(l1, self.w2)+self.b2 l2=tf.nn.relu(l2) l3=tf.matmul(l2, self.w3)+self.b3 l3=tf.nn.relu(l3) out=tf.matmul(l3, self.w4)+self.b4 return out def softmax_loss(self,predicts,labels): predicts=tf.nn.softmax(predicts) labels=tf.one_hot(labels,classnum) loss=-tf.reduce_sum(labels*tf.log(predicts)) return loss def optimer(self,loss,lr=0.001): train_step=tf.train.GradientDescentOptimizer(lr).minimize(loss) return train_step path=r'C:\JC\test\train_model.ckpt' image,label=getinputs(r'C:\JC\tfrecord\64_shuffle/train.tfrecords') test_image,test_label=getinputs(r'C:\JC\tfrecord\64_shuffle/test.tfrecords') valid_image,valid_label= getinputs(r'C:\JC\tfrecord\64_shuffle\validation.tfrecords')
tensorflow.log
2,743
import tensorflow as tf """ ResultLoss = outside_weights * SmoothL1(inside_weights * (bbox_pred - bbox_targets)) SmoothL1(x) = 0.5 * (sigma * x)^2, if |x| < 1 / sigma^2 |x| - 0.5 / sigma^2, otherwise """ sigma2 = sigma * sigma inside_mul = tf.multiply(bbox_inside_weights, tf.subtract(bbox_pred, bbox_targets)) smooth_l1_sign = tf.cast(tf.less(tf.abs(inside_mul), 1.0 / sigma2), tf.float32) smooth_l1_option1 = tf.multiply(tf.multiply(inside_mul, inside_mul), 0.5 * sigma2) smooth_l1_option2 = tf.subtract(tf.abs(inside_mul), 0.5 / sigma2) smooth_l1_result = tf.add(tf.multiply(smooth_l1_option1, smooth_l1_sign), tf.multiply(smooth_l1_option2, tf.abs(tf.subtract(smooth_l1_sign, 1.0)))) outside_mul = tf.multiply(bbox_outside_weights, smooth_l1_result) return outside_mul def xdet_model_fn(features, labels, mode, params): """Our model_fn for ResNet to be used with our Estimator.""" num_anchors_list = labels['num_anchors_list']
tensorflow.abs
2,744
import tensorflow as tf cost = tf.reduce_mean(tf.square(correlation_matrix)) * weight cost = tf.where(cost > 0, cost, 0, name='value') assert_op = tf.Assert(tf.is_finite(cost), [cost]) with tf.control_dependencies([assert_op]): barrier = tf.no_op(name) return cost
tensorflow.control_dependencies
2,745
import tensorflow as tf for (index, dim) in enumerate(shape): if dim is None: non_static_indexes.append(index) if not non_static_indexes: return shape dyn_shape = tf.shape(tensor) for index in non_static_indexes: shape[index] = dyn_shape[index] return shape def reshape_to_matrix(input_tensor): """Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix)."""
tensorflow.shape
2,746
import tensorflow as tf return X def _add_op_dynamic(self, cell_inputs, blocks, input_idx, op, w, h, ch, is_train=False): ni = len(cell_inputs + blocks) inputs = tf.stack(cell_inputs + blocks, axis=0) op_map = self._get_op_map() # Build output for each available operation X = inputs[input_idx]
tensorflow.stack
2,747
import tensorflow as tf saver.save(sess, FLAGS.checkpoint_path + 'model.ckpt', global_step=global_step) if step % FLAGS.save_summary_steps == 0: _, tl, summary_str = sess.run([train_op, total_loss, summary_op], feed_dict={input_images: images, input_score_maps: score_maps, input_geo_maps: geo_maps, input_training_masks: training_masks}) summary_writer.add_summary(summary_str, global_step=step) print("Final Train Accuracy", tl) E2Etime = time.time() - start1 print("E2E Training Duration sec", E2Etime) print("avg time per step", avg_time_per_step1) print("FPS {:.2f}".format(sum(performs)/len(performs))) if __name__ == '__main__': tf.app.run()
tensorflow.app.run
2,748
import tensorflow as tf mean, variance = tf.nn.moments(inp, axes=[0, 1, 2], shift=moving_mean) mean_op = moving_mean.assign(decay * moving_mean + (1 - decay) * mean) var_op = moving_variance.assign(decay * moving_variance + (1 - decay) * variance) assert(phase in ['train', 'test']) if phase == 'train': with tf.control_dependencies([mean_op, var_op]): return tf.nn.batch_normalization(inp, mean, variance, offset, scale, 0.01, name='norm') else: return tf.nn.batch_normalization(inp, moving_mean, moving_variance, offset, scale, 0.01, name='norm') def pool(inp, name, kind, size, stride, padding='SAME'): assert kind in ['max', 'avg']
tensorflow.nn.batch_normalization
2,749
import tensorflow as tf grid = tf.concat([d_t_flat, y_t_flat, x_t_flat, ones], 0) return grid def _transform(theta, input_dim, out_size, z_near, z_far): with tf.variable_scope('_transform'): num_batch = input_dim.get_shape().as_list()[0] num_channels = input_dim.get_shape().as_list()[4] theta = tf.reshape(theta, (-1, 4, 4)) theta = tf.cast(theta, 'float32') out_depth = out_size[0] out_height = out_size[1] out_width = out_size[2] grid = _meshgrid(out_depth, out_height, out_width, z_near, z_far) grid = tf.expand_dims(grid, 0) grid = tf.reshape(grid, [-1]) grid = tf.tile(grid, tf.stack([num_batch])) grid = tf.reshape(grid, tf.stack([num_batch, 4, -1])) # Transform A x (x_t', y_t', 1, d_t)^T -> (x_s, y_s, z_s, 1). t_g = tf.matmul(theta, grid) z_s = tf.slice(t_g, [0, 0, 0], [-1, 1, -1]) y_s = tf.slice(t_g, [0, 1, 0], [-1, 1, -1]) x_s = tf.slice(t_g, [0, 2, 0], [-1, 1, -1]) z_s_flat = tf.reshape(z_s, [-1]) y_s_flat = tf.reshape(y_s, [-1]) x_s_flat = tf.reshape(x_s, [-1])
tensorflow.expand_dims
2,750
import tensorflow as tf init_op = tf.initialize_all_variables() # from http://stackoverflow.com/a/35907755/1199693 config = tf.ConfigProto(graph_options=tf.GraphOptions( # optimizer_options=tf.OptimizerOptions(opt_level=tf.OptimizerOptions.L2))) # L2 werkt niet (wrs eruit gehaald) optimizer_options=tf.OptimizerOptions(opt_level=tf.OptimizerOptions.L1))) # start session with tf.Session(config=config) as sess: # Merge all the summaries and write them out to /tmp/mnist_logs (by default) summarize_merged = tf.merge_all_summaries()
tensorflow.OptimizerOptions
2,751
import tensorflow as tf activation_fn=tf.nn.relu, bn=False, bn_decay=None, is_training=None): """ Fully connected layer with non-linear operation. Args: inputs: 2-D tensor BxN num_outputs: int Returns: Variable tensor of size B x num_outputs. """ with tf.variable_scope(scope) as sc: num_input_units = inputs.get_shape()[-1].value weights = _variable_with_weight_decay('weights', shape=[num_input_units, num_outputs], use_xavier=use_xavier, stddev=stddev, wd=weight_decay) outputs = tf.matmul(inputs, weights) biases = _variable_on_cpu('biases', [num_outputs], tf.constant_initializer(0.0)) outputs = tf.nn.bias_add(outputs, biases) if bn:
tensorflow.variable_scope
2,752
import tensorflow as tf samples = samples.stack() # put batch_size as first dimension outputs = tf.transpose(outputs, perm=(1, 0, 2)) weights = tf.transpose(weights, perm=(1, 0, 2)) states = tf.transpose(states, perm=(1, 0, 2)) attns = tf.transpose(attns, perm=(1, 0, 2)) samples = tf.transpose(samples) return outputs, weights, states, attns, samples, get_logits, initial_data
tensorflow.transpose
2,753
import tensorflow as tf graph=tf.get_default_graph()) as f: sess.run(tf.global_variables_initializer())
tensorflow.global_variables_initializer
2,754
import tensorflow as tf ''' total_loss = self.loss # add regularizers in case there are any if len(self.regularizers)>0: total_loss += tf.add_n(self.regularizers, name="regularization") # 1st part of minimize: compute_gradient self.grads_and_vars = self._optimizer.compute_gradients(total_loss) # clip gradients
tensorflow.add_n
2,755
import tensorflow as tf if full_tensorboard_log: tf.summary.histogram('rewards', rew_t_ph) tf.summary.histogram('importance_weights', importance_weights_ph) if tf_util.is_image(obs_phs[0]): tf.summary.image('observation', obs_phs[0]) elif len(obs_phs[0].shape) == 1: tf.summary.histogram('observation', obs_phs[0]) optimize_expr = optimizer.apply_gradients(gradients) summary = tf.summary.merge_all()
tensorflow.summary.histogram
2,756
from tensorflow.contrib.layers.python.layers import feature_column dnn_feature_columns=(cont_feature,), dnn_hidden_units=(3, 3), dnn_optimizer=adagrad.AdagradOptimizer(learning_rate=0.1)) input_fn = test_data.iris_input_logistic_fn metrics = classifier.fit(input_fn=input_fn, steps=_ITERS).evaluate( input_fn=input_fn, steps=100) self._assertSingleClassMetrics(metrics) def benchmarkMultiClass(self): iris = base.load_iris() cont_feature = feature_column.real_valued_column('feature', dimension=4) bucketized_feature = feature_column.bucketized_column( cont_feature, test_data.get_quantile_based_buckets(iris.data, 10)) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( n_classes=3, linear_feature_columns=(bucketized_feature,), dnn_feature_columns=(cont_feature,), dnn_hidden_units=(3, 3)) input_fn = test_data.iris_input_multiclass_fn
tensorflow.contrib.layers.python.layers.feature_column.real_valued_column
2,757
import tensorflow as tf save = tf.train.Saver({"v0": v0, "v1": v1}, restore_sequentially=True) tf.initialize_all_variables().run()
tensorflow.initialize_all_variables
2,758
import tensorflow as tf features, filters=depth, rate=rate, weight_decay=weight_decay, scope=scope) else: aspp_features = slim.conv2d( features, depth, 3, rate=rate, scope=scope) branch_logits.append(aspp_features) # Merge branch logits. concat_logits = tf.concat(branch_logits, 3) concat_logits = slim.conv2d( concat_logits, depth, 1, scope=_CONCAT_PROJECTION_SCOPE) concat_logits = slim.dropout( concat_logits, keep_prob=0.9, is_training=is_training, scope=_CONCAT_PROJECTION_SCOPE + '_dropout') return concat_logits, end_points
tensorflow.concat
2,759
import tensorflow as tf def viz3(name, a, b, c): with tf.name_scope(name): im = tf.concat([a, b, c], axis=3)
tensorflow.name_scope
2,760
import tensorflow as tf test_device = "gpu:0" if tfe.num_gpus() else "cpu:0" with tf.device(test_device):
tensorflow.device
2,761
import tensorflow as tf # Create towers, i.e. copies of the model for each GPU, # with their own loss and gradients. tower_losses = [] tower_gradvars = [] tower_preds = [] tower_metrics = [] for i in range(self.n_gpus): worker = '/gpu:{}'.format(i) device_setter = tf.train.replica_device_setter( worker_device=worker, ps_device='/cpu:0', ps_tasks=1) with tf.name_scope('{}_{}'.format(mode, i)) as scope: with tf.device(device_setter): net_outputs = self._model(shards[i], mode, **self.config) if mode == Mode.TRAIN: loss = self._loss(net_outputs, shards[i], **self.config) loss += tf.reduce_sum( tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
tensorflow.train.replica_device_setter
2,762
import tensorflow as tf num_written_lines = 0 tf.logging.info("***** Predict results *****")
tensorflow.logging.info
2,763
import tensorflow.contrib.graph_editor as ge bwd_inputs = [t for op in bwd_ops for t in op.inputs] # list of tensors in forward graph that is in input to bwd graph ts_filtered = list(set(bwd_inputs).intersection(ts_all)) debug_print("Using tensors %s", ts_filtered) # try two slightly different ways of getting bottlenecks tensors # to checkpoint for ts in [ts_filtered, ts_all]: # get all bottlenecks in the graph bottleneck_ts = [] for t in ts: b = set(ge.get_backward_walk_ops(t.op, inclusive=True, within_ops=fwd_ops)) f = set(ge.get_forward_walk_ops(t.op, inclusive=False, within_ops=fwd_ops)) # check that there are not shortcuts b_inp = set([inp for op in b for inp in op.inputs]).intersection(ts_all) f_inp = set([inp for op in f for inp in op.inputs]).intersection(ts_all) if not set(b_inp).intersection(f_inp) and len(b_inp)+len(f_inp) >= len(ts_all): bottleneck_ts.append(t) # we have a bottleneck! else: debug_print("Rejected bottleneck candidate and ops %s", [t] + list(set(ts_all) - set(b_inp) - set(f_inp))) # success? or try again without filtering? if len(bottleneck_ts) >= np.sqrt(len(ts_filtered)): # yes, enough bottlenecks found! break
tensorflow.contrib.graph_editor.get_forward_walk_ops
2,764
from tensorflow.python.framework import ops """ with ops.op_scope([value], name, "MaxPool") as name: value = ops.convert_to_tensor(value, name="input") return gen_nn_ops._max_pool(value, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name) ops.RegisterShape("Relu")(common_shapes.unchanged_shape) ops.RegisterShape("Relu6")(common_shapes.unchanged_shape) ops.RegisterShape("Elu")(common_shapes.unchanged_shape) ops.RegisterShape("Softplus")(common_shapes.unchanged_shape) ops.RegisterShape("Softsign")(common_shapes.unchanged_shape) @ops.RegisterShape("ReluGrad") @ops.RegisterShape("Relu6Grad") @ops.RegisterShape("EluGrad") @ops.RegisterShape("SoftplusGrad") @ops.RegisterShape("SoftsignGrad") def _BinaryElementwiseShape(op): """Returns same shape as both inputs to op.
tensorflow.python.framework.ops.RegisterShape
2,765
from tensorflow.python.layers import convolutional as conv_layers if input_layer is None: input_layer = self.top_layer if num_channels_in is None: num_channels_in = self.top_size name = 'conv' + str(self.counts['conv']) self.counts['conv'] += 1 with tf.variable_scope(name): strides = [1, d_height, d_width, 1] if self.data_format == 'NCHW': strides = [strides[0], strides[3], strides[1], strides[2]] if mode != 'SAME_RESNET': conv = conv_layers.conv2d( input_layer, num_out_channels, [k_height, k_width], strides=[d_height, d_width], padding=mode, data_format=self.channel_pos, use_bias=False) else: # Special padding mode for ResNet models if d_height == 1 and d_width == 1: conv = conv_layers.conv2d( input_layer,
tensorflow.python.layers.convolutional.conv2d
2,766
import tensorflow as tf W = tf.gather(W, unused_indices, axis=0) W = tf.reshape(W, (1, 1, num_out_blocks * block_ch, block_ch))
tensorflow.reshape
2,767
import tensorflow as tf lstm_cell_fw = tf.contrib.rnn.LSTMBlockFusedCell(self.params['lstm_size']) lstm_cell_bw = tf.contrib.rnn.LSTMBlockFusedCell(self.params['lstm_size']) lstm_cell_bw = tf.contrib.rnn.TimeReversedFusedRNN(lstm_cell_bw) output_fw, _ = lstm_cell_fw(t, dtype=tf.float32,
tensorflow.contrib.rnn.TimeReversedFusedRNN
2,768
import tensorflow as tf def gather_indexes(sequence_tensor, positions): """Gathers the vectors at the specific positions over a minibatch.""" sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3) batch_size = sequence_shape[0] seq_length = sequence_shape[1] width = sequence_shape[2] flat_offsets = tf.reshape( tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1] ) flat_positions = tf.reshape(positions + flat_offsets, [-1]) flat_sequence_tensor = tf.reshape(sequence_tensor, [batch_size * seq_length, width]) output_tensor = tf.gather(flat_sequence_tensor, flat_positions) return output_tensor def input_fn_builder( input_files, max_seq_length, max_predictions_per_seq, is_training, num_cpu_threads=4 ): """Creates an `input_fn` closure to be passed to TPUEstimator.""" def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] name_to_features = {
tensorflow.gather
2,769
import tensorflow as tf # this is useful to reduce the number of parameters, and # to use the output embeddings for output projection (tie_embeddings parameter) output_ = dense(output_, decoder.embedding_size, use_bias=False, name='softmax0') if decoder.tie_embeddings and (decoder.pred_embed_proj or decoder.pred_deep_layer): bias = get_variable('softmax1/bias', shape=[decoder.vocab_size]) output_ = tf.matmul(output_, tf.transpose(embedding)) + bias else: output_ = dense(output_, decoder.vocab_size, use_bias=True, name='softmax1') return output_ if decoder.use_dropout: # FIXME: why no pervasive dropout here? initial_state = tf.nn.dropout(initial_state, keep_prob=decoder.initial_state_keep_prob) with tf.variable_scope(scope_name): activation_fn = None if decoder.initial_state == 'linear' else tf.nn.tanh if decoder.initial_state == 'trained': initial_state = get_variable(shape=[cell_state_size], name='initial_state') initial_state = tf.tile(tf.expand_dims(initial_state, axis=0), [batch_size, 1]) elif decoder.initial_state == 'zero': initial_state = tf.zeros(shape=[batch_size, cell_state_size]) elif decoder.layer_norm: initial_state = dense(initial_state, cell_state_size, use_bias=False, name='initial_state_projection') initial_state = tf.contrib.layers.layer_norm(initial_state, activation_fn=activation_fn, scope='initial_state_layer_norm') else: initial_state = dense(initial_state, cell_state_size, use_bias=True, name='initial_state_projection', activation=activation_fn)
tensorflow.variable_scope
2,770
import tensorflow as tf capacity=1000 + 3 * batch_size, # Ensures a minimum amount of shuffling of examples. min_after_dequeue=1000) return images, sparse_labels def weight_variable(shape): initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial) def bias_variable(shape): initial = tf.constant(0.1, shape=shape) return tf.Variable(initial) def conv_scale(x, W): return tf.nn.conv3d(x, W, strides=[1,1,1,1,1], padding='VALID') def inference(x): """ Creates a model with pooling across space and scales. Always we have a conv-relu-spatial_pool-scale_pool x N layers structure with one fully connected layer on top.
tensorflow.constant
2,771
import tensorflow as tf [1, 1]], dtype=tf.float32) mask4 = tf.constant([[0, 0],
tensorflow.constant
2,772
import tensorflow as tf def testParetoShapeBroadcast(self): scale = tf.constant([[3., 2.]]) concentration = tf.constant([[4.], [5.], [6.]]) pareto = tfd.Pareto(concentration, scale)
tensorflow.constant
2,773
import tensorflow as tf parallel_iterations=1) arc_seq = loop_outputs[-3].stack() arc_seq = tf.reshape(arc_seq, [-1]) entropy = tf.reduce_sum(loop_outputs[-2]) log_prob = tf.reduce_sum(loop_outputs[-1]) last_c = loop_outputs[-7] last_h = loop_outputs[-6] return arc_seq, entropy, log_prob, last_c, last_h def build_trainer(self, child_model): child_model.build_valid_rl() self.valid_acc = (tf.to_float(child_model.valid_shuffle_acc) / tf.to_float(child_model.batch_size)) self.reward = self.valid_acc if self.entropy_weight is not None: self.reward += self.entropy_weight * self.sample_entropy self.sample_log_prob = tf.reduce_sum(self.sample_log_prob) self.baseline = tf.Variable(0.0, dtype=tf.float32, trainable=False) baseline_update = tf.assign_sub( self.baseline, (1 - self.bl_dec) * (self.baseline - self.reward)) with tf.control_dependencies([baseline_update]): self.reward = tf.identity(self.reward)
tensorflow.to_float
2,774
import tensorflow as tf bert_config.initializer_range)) input_tensor = modeling.layer_norm(input_tensor) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. output_bias = tf.get_variable( "output_bias", shape=[bert_config.vocab_size], initializer=tf.zeros_initializer()) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) label_ids = tf.reshape(label_ids, [-1]) label_weights = tf.reshape(label_weights, [-1])
tensorflow.zeros_initializer
2,775
import tensorflow as tf """ expected_num_anchors = 0 actual_num_anchors = 0 for num_anchors_per_location, feature_map_shape, anchors in zip( self.num_anchors_per_location(), feature_map_shape_list, anchors_list): expected_num_anchors += (num_anchors_per_location * feature_map_shape[0] * feature_map_shape[1]) actual_num_anchors += anchors.num_boxes() return tf.assert_equal(expected_num_anchors, actual_num_anchors)
tensorflow.assert_equal
2,776
import tensorflow as tf 'num_cpu_threads', 0, 'The number of cpu cores used to train.') tf.app.flags.DEFINE_float( 'gpu_memory_fraction', 1., 'GPU memory fraction to use.')
tensorflow.app.flags.DEFINE_float
2,777
import tensorflow as tf # BN when training update = 1.0 - decay update_mu = mu.assign_sub(update * (mu - batch_mean)) update_sigma = sigma.assign_sub(update * (sigma - batch_var)) tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_mu) tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_sigma) mean, var = tf.cond(self.train_flag, lambda: (batch_mean, batch_var), lambda: (mu, sigma)) bn = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-5) tf.add_to_collection('debug_layers', bn) return bn
tensorflow.nn.batch_normalization
2,778
import tensorflow as tf w1 = tf.get_variable('weight1', [784, 1024], initializer=tf.random_normal_initializer()) b1 = tf.get_variable('bias1', [1024], initializer=tf.constant_initializer(0.0)) h1 = tf.nn.relu(tf.matmul(x, w1) + b1) with tf.variable_scope('layer2'): w2 = tf.get_variable('weight2', [1024, 1024], initializer=tf.random_normal_initializer()) b2 = tf.get_variable('bias2', [1024], initializer=tf.constant_initializer(0.0)) h2 = tf.nn.relu(tf.matmul(h1, w2) + b2) with tf.variable_scope('layer3'): w3 = tf.get_variable('weight3', [1024, 10], initializer=tf.random_normal_initializer()) b3 = tf.get_variable('bias3', [10], initializer=tf.constant_initializer(0.0)) y = tf.matmul(h2, w3) + b3 # losses cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=gt, logits=y)) # optimizer optimizer = tf.train.GradientDescentOptimizer(args.lr) # define one-step train ops train_op = optimizer.minimize(cross_entropy) return x, y, gt, train_op if __name__ == "__main__": max_train_step = args.max_train_step batch_size = args.batch_size mnist = input_data.read_data_sets(args.data_dir, one_hot=True) x, y, gt, train_op = model() # create saver saver = tf.train.Saver() if os.path.exists('./mnist'):
tensorflow.train.GradientDescentOptimizer
2,779
import tensorflow as tf batch_mean, batch_var = tf.nn.moments(x, range(len(shape) - 1)) update_mean = tf.assign_sub(pop_mean, (1 - decay)*(pop_mean - batch_mean)) update_var = tf.assign_sub(pop_var, (1 - decay)*(pop_var - batch_var)) with tf.control_dependencies([update_mean, update_var]): return tf.nn.batch_normalization(x, batch_mean, batch_var, beta, gamma, epsilon) def func2(): # execute at test time return tf.nn.batch_normalization(x, pop_mean, pop_var, beta, gamma, epsilon) return tf.cond(train, func1, func2) def average_gradients(tower_grads): average_grads = [] for grad_and_vars in zip(*tower_grads): grads = []
tensorflow.nn.batch_normalization
2,780
import tensorflow as tf flat_offsets = tf.reshape( tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1]) flat_positions = tf.reshape(positions + flat_offsets, [-1])
tensorflow.range
2,781
import tensorflow as tf perturbed_deterministic_actions = tf.argmax(perturbable_policy.q_values, axis=1) deterministic_actions = tf.argmax(policy.q_values, axis=1)
tensorflow.argmax
2,782
import tensorflow as tf if self.weight_decay > 0: wd_loss = regularize_cost(self.weight_decay_pattern, tf.contrib.layers.l2_regularizer(self.weight_decay), name='l2_regularize_loss') add_moving_summary(loss, wd_loss) total_cost = tf.add_n([loss, wd_loss], name='cost') else: total_cost = tf.identity(loss, name='cost') add_moving_summary(total_cost) if self.loss_scale != 1.: logger.info("Scaling the total loss by {} ...".format(self.loss_scale)) return total_cost * self.loss_scale else: return total_cost
tensorflow.identity
2,783
import tensorflow as tf initializer=lambdas_initializer, collections=variables_collections, trainable=trainable, dual_rate_factor=dual_rate_factor) # Create biases with shape [1, num_labels, num_anchors]. biases = tf.contrib.framework.model_variable( name='biases', shape=[1, num_labels, num_anchors], dtype=logits.dtype, initializer=tf.zeros_initializer(), collections=variables_collections, trainable=trainable) # Maybe create label_priors. label_priors = maybe_create_label_priors(label_priors, labels, weights, variables_collections) label_priors = tf.reshape(label_priors, [1, num_labels, 1]) # Expand logits, labels, and weights to shape [batch_size, num_labels, 1]. logits = tf.expand_dims(logits, 2) labels = tf.expand_dims(labels, 2) weights = tf.expand_dims(weights, 2) # Calculate weighted loss and other outputs. The log(2.0) term corrects for # logloss not being an upper bound on the indicator function. loss = weights * losses_utils.weighted_surrogate_loss( labels, logits + biases, surrogate_type=surrogate_type, positive_weights=1.0 + lambdas * (1.0 - precision_values),
tensorflow.reshape
2,784
import tensorflow as tf temp = tempfile.NamedTemporaryFile(prefix='ReadFileTest') open(temp.name, 'wb').write(contents) with self.test_session(): read = tf.read_file(temp.name) self.assertEqual([], read.get_shape()) self.assertEqual(read.eval(), contents) def _subset(self, files, indices): return set(tf.compat.as_bytes(files[i].name) for i in range(len(files)) if i in indices) def testMatchingFiles(self): cases = ['ABcDEF.GH', 'ABzDEF.GH', 'ABasdfjklDEF.GH', 'AB3DEF.GH', 'AB4DEF.GH', 'ABDEF.GH', 'XYZ'] files = [tempfile.NamedTemporaryFile(prefix=c) for c in cases]
tensorflow.compat.as_bytes
2,785
import tensorflow as tf tf.where( tf.less(tf.random_uniform([2 * expected_num_elements]), 0.5))), 1)
tensorflow.random_uniform
2,786
import tensorflow as tf # Generator loss generator_g_loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(d_g_fake), logits=d_g_fake)) generator_c_loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(d_c_fake), logits=d_c_fake)) generator_loss = generator_c_loss + generator_g_loss # Supervised Encoder Loss supervised_encoder_loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(labels=y_input, logits=encoder_output_label_)) all_variables = tf.trainable_variables() dc_g_var = [var for var in all_variables if 'dc_g_' in var.name] dc_c_var = [var for var in all_variables if 'dc_c_' in var.name] en_var = [var for var in all_variables if 'e_' in var.name] # Optimizers autoencoder_optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=beta1).minimize(autoencoder_loss) discriminator_g_optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=beta1).minimize(dc_g_loss, var_list=dc_g_var) discriminator_c_optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=beta1).minimize(dc_c_loss, var_list=dc_c_var)
tensorflow.trainable_variables
2,787
import tensorflow as tf )) return loss_box def _add_losses(self, sigma_rpn=3.0): with tf.variable_scope('loss_' + self._tag): # RPN, class loss rpn_cls_score = tf.reshape(self._predictions['rpn_cls_score_reshape'], [-1, 2]) rpn_label = tf.reshape(self._anchor_targets['rpn_labels'], [-1]) # 得到前景和背景anchor的index rpn_select = tf.where(tf.not_equal(rpn_label, -1)) rpn_cls_score = tf.reshape(tf.gather(rpn_cls_score, rpn_select), [-1, 2]) rpn_label = tf.reshape(tf.gather(rpn_label, rpn_select), [-1])
tensorflow.reshape
2,788
import tensorflow as tf if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) mask = tf.equal(mask, tf.ones_like(mask)) hidden_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer input_size = query.get_shape().as_list()[-1] # Trainable parameters w1 = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.1)) w2 = tf.Variable(tf.random_normal([input_size, attention_size], stddev=0.1)) b = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) v = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) with tf.name_scope('v'): # Applying fully connected layer with non-linear activation to each of the B*T timestamps; # the shape of `tmp` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size tmp1 = tf.tensordot(facts, w1, axes=1) tmp2 = tf.tensordot(query, w2, axes=1)
tensorflow.random_normal
2,789
import tensorflow as tf 1) w_z0_y1_x0 = tf.expand_dims(((x1_f - x) * (y - y0_f) *
tensorflow.expand_dims
2,790
import tensorflow as tf return output @staticmethod def lrelu(inputdata, name, alpha=0.2): """ :param inputdata: :param alpha: :param name: :return: """ with tf.variable_scope(name): return tf.nn.relu(inputdata) - alpha * tf.nn.relu(-inputdata)
tensorflow.variable_scope
2,791
import tensorflow as tf from neorl.rl.baselines.shared.runners import AbstractEnvRunner from neorl.rl.baselines.shared.policies import ActorCriticPolicy, RecurrentActorCriticPolicy # Filter tensorflow version warnings import os # https://stackoverflow.com/questions/40426502/is-there-a-way-to-suppress-the-messages-tensorflow-prints/40426709 os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'} import warnings # https://stackoverflow.com/questions/15777951/how-to-suppress-pandas-future-warning warnings.simplefilter(action='ignore', category=FutureWarning) warnings.simplefilter(action='ignore', category=Warning) import tensorflow as tf tf.get_logger().setLevel('INFO') tf.autograph.set_verbosity(0) import logging tf.get_logger().setLevel(logging.ERROR) # For ACER def get_by_index(input_tensor, idx): """ Return the input tensor, offset by a certain value :param input_tensor: (TensorFlow Tensor) The input tensor :param idx: (int) The index offset :return: (TensorFlow Tensor) the offset tensor """
tensorflow.autograph.set_verbosity
2,792
import tensorflow as tf ] alpha_fixed_progress = [ sess.run( networks._discriminator_alpha(block_id, tf.constant(1.2, tf.float32))) for block_id in range(1, 5) ]
tensorflow.constant
2,793
import tensorflow as tf model :dict,里面包含模型的参数,损失函数,自变量,应变量 """ np.random.seed(1024) # 定义自变量和应变量 x = tf.placeholder(tf.float64, shape=[None, dimension], name='x') ## 将被预测值写成矩阵形式,会极大加快速度 y = tf.placeholder(tf.float64, shape=[None, 1], name="y") # 定义参数估计值和预测值 betaPred = tf.Variable(np.random.random([dimension, 1])) yPred = tf.matmul(x, betaPred, name="y_pred") # 定义损失函数 loss = tf.reduce_mean(tf.square(yPred - y)) model = {"loss_function": loss, "independent_variable": x, "dependent_variable": y, "prediction": yPred, "model_params": betaPred} return model def createSummaryWriter(logPath): """ 检查所给路径是否已存在,如果存在删除原有日志。并创建日志写入对象 参数
tensorflow.square
2,794
import tensorflow as tf n_tokens = vocab.size embed_dim = int(embedding_op.shape[2]) embeddings = np.zeros((n_tokens, embed_dim), dtype=DTYPE) config = tf.ConfigProto(allow_soft_placement=True) with tf.Session(config=config) as sess: sess.run(tf.global_variables_initializer()) for k in range(n_tokens): token = vocab.id_to_word(k) char_ids = batcher.batch_sentences([[token]])[0, 1, :].reshape( 1, 1, -1) embeddings[k, :] = sess.run(
tensorflow.Session
2,795
import tensorflow.contrib.slim as slim def get_gtboxes_and_label(self, gtboxes_and_label_h, gtboxes_and_label_r, num_objects): return gtboxes_and_label_h[:int(num_objects), :].astype(np.float32), \ gtboxes_and_label_r[:int(num_objects), :].astype(np.float32) def main(self): with tf.Graph().as_default() as graph, tf.device('/cpu:0'): num_gpu = len(cfgs.GPU_GROUP.strip().split(',')) global_step = slim.get_or_create_global_step() lr = self.warmup_lr(cfgs.LR, global_step, cfgs.WARM_SETP, num_gpu) tf.summary.scalar('lr', lr) optimizer = tf.train.MomentumOptimizer(lr, momentum=cfgs.MOMENTUM) r3det_dcl = build_whole_network.DetectionNetworkR3DetDCL(cfgs=self.cfgs, is_training=True)
tensorflow.contrib.slim.get_or_create_global_step
2,796
import tensorflow as tf return tf.where( tf.less( tf.random_uniform(common_layers.shape_list(sampled_targets)), hparams.scheduled_sampling_gold_mixin_prob), gold_targets, sampled_targets) def sampled_results(): """Generate scheduled sampling results.""" sampled_targets = dp(sample, sharded_logits) new_targets = dp(mix_gold_sampled, sharded_features["targets"], sampled_targets) new_features = transformed_features with tf.variable_scope(tf.get_variable_scope(), reuse=True): with tf.variable_scope(target_modality.name): new_features["targets"] = target_modality.targets_bottom_sharded( new_targets, dp) with tf.variable_scope("body"): body_outputs, losses = model.model_fn_sharded(new_features) if not isinstance(losses, dict): # If it's a single extra loss. losses = {"extra": losses} with tf.variable_scope(target_modality.name): new_sharded_logits = target_modality.top_sharded( body_outputs, sharded_features["targets"], dp) if "training" not in losses:
tensorflow.get_variable_scope
2,797
from tensorflow.contrib.tpu.python.tpu import tpu_estimator if use_tpu: # TPU host call. Important: need to be called before remove_summaries() if hparams.tpu_enable_host_call: host_call = t2t_model.create_host_call(hparams.model_dir) else: host_call = None t2t_model.remove_summaries() return tpu_estimator.TPUEstimatorSpec( mode=tf.estimator.ModeKeys.TRAIN, loss=tf_loss, train_op=train_op, host_call=host_call, training_hooks=[restore_hook, saver_hook]) else: return tf.estimator.EstimatorSpec( tf.estimator.ModeKeys.TRAIN, loss=tf_loss, train_op=train_op,
tensorflow.contrib.tpu.python.tpu.tpu_estimator.TPUEstimatorSpec
2,798
import tensorflow as tf global_step = slim.get_or_create_global_step() lr = self.warmup_lr(cfgs.LR, global_step, cfgs.WARM_SETP, num_gpu) tf.summary.scalar('lr', lr)
tensorflow.summary.scalar
2,799