seed
stringlengths
59
2.16k
seed_api
stringlengths
14
101
index
int64
0
523
import tensorflow.contrib.layers as layers out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu) out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None) return out def simple_model(img_in, num_actions, scope, reuse=False, num_filters=64): with tf.variable_scope(scope, reuse=reuse): out = img_in gauss_initializer = initializers.xavier_initializer(uniform=False) # stddev = 1/n with tf.variable_scope("convnet"): out = layers.convolution2d( out, num_outputs=num_filters, kernel_size=8, stride=4, activation_fn=tf.nn.relu, weights_initializer=gauss_initializer, trainable=False) out = layers.flatten(out) with tf.variable_scope("action_value"): out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None) return out def simple_model_w_feat_eng(img_in, num_actions, scope, reuse=False): with tf.variable_scope(scope, reuse=reuse): out = img_in out = layers.flatten(out) # stddev = 1/n, where n = number of inputs gauss_initializer = initializers.xavier_initializer(uniform=False) with tf.variable_scope("action_value"): out = layers.fully_connected(
tensorflow.contrib.layers.flatten
300
from tensorflow.contrib.learn.python.learn.graph_actions import infer predictions = {'predictions': predictions} # TODO(ipolosukhin): Support batching return infer(checkpoint_path, predictions, feed_dict=feed_dict)
tensorflow.contrib.learn.python.learn.graph_actions.infer
301
import tensorflow as tf self.w1=tf.get_variable('w1', [4096,1024],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.w2=tf.get_variable('w2', [1024,classnum],initializer=tf.contrib.layers.xavier_initializer_conv2d())
tensorflow.contrib.layers.xavier_initializer_conv2d
302
from tensorflow.contrib.slim.python.slim.data import tfexample_decoder } items_to_handlers = { 'image': tfexample_decoder.Image(), 'label': tfexample_decoder.Tensor('image/class/label'), } decoder = tfexample_decoder.TFExampleDecoder(keys_to_features, items_to_handlers) return dataset.Dataset( data_sources=data_sources, reader=io_ops.TFRecordReader, decoder=decoder,
tensorflow.contrib.slim.python.slim.data.tfexample_decoder.TFExampleDecoder
303
import tensorflow as tf Returns: Integer representation of this number. """ x_l = tf.stop_gradient(tf.to_int32(tf.reshape(x_bit, [-1, num_bits]))) x_labels = [] for i in range(num_bits): x_labels.append(x_l[:, i] * tf.to_int32(base)**tf.to_int32(i)) res = sum(x_labels) return tf.to_int32(tf.reshape(res, common_layers.shape_list(x_bit)[:-1])) def int_to_bit(self, x_int, num_bits, base=2): """Turn x_int representing numbers into a bitwise (lower-endian) tensor.
tensorflow.to_int32
304
import tensorflow as tf self.Y = tf.placeholder(tf.float32, (None, None, n_mels * resampled)) self.decoder_inputs = tf.concat((tf.zeros_like(self.Y[:, :1, :]), self.Y[:, :-1, :]), 1) self.decoder_inputs = self.decoder_inputs[:, :, -n_mels:] self.Z = tf.placeholder(tf.float32, (None, None, fourier_window_size // 2 + 1)) batch_size = tf.shape(self.X)[0] seq_lens = tf.count_nonzero(tf.reduce_sum(self.decoder_inputs, -1), 1, dtype=tf.int32) + 1 def cells(reuse=False): return tf.contrib.rnn.DropoutWrapper( tf.nn.rnn_cell.LSTMCell( size_layers, initializer=tf.orthogonal_initializer(), reuse=reuse ), state_keep_prob=dropout, output_keep_prob=dropout, ) def attention(encoder_out, seq_len, reuse=False): attention_mechanism = tf.contrib.seq2seq.LuongAttention( num_units=size_layers, memory=encoder_out, memory_sequence_length=seq_len ) return tf.contrib.seq2seq.AttentionWrapper(
tensorflow.orthogonal_initializer
305
import tensorflow.contrib.graph_editor as ge debug_print("fwd_ops: %s", fwd_ops) # exclude ops with no inputs fwd_ops = [op for op in fwd_ops if op.inputs] # don't recompute xs, remove variables xs_ops = _to_ops(xs) fwd_ops = [op for op in fwd_ops if not op in xs_ops] fwd_ops = [op for op in fwd_ops if not '/assign' in op.name] fwd_ops = [op for op in fwd_ops if not '/Assign' in op.name] fwd_ops = [op for op in fwd_ops if not '/read' in op.name] ts_all = ge.filter_ts(fwd_ops, True) # get the tensors ts_all = [t for t in ts_all if '/read' not in t.name] ts_all = set(ts_all) - set(xs) - set(ys) # construct list of tensors to checkpoint during forward pass, if not # given as input if type(checkpoints) is not list: if checkpoints == 'collection': checkpoints = tf.get_collection('checkpoints') elif checkpoints == 'speed':
tensorflow.contrib.graph_editor.filter_ts
306
import tensorflow as tf # In the demo, we are doing a simple classification task on the entire # segment. # # If you want to use the token-level output, use model.get_sequence_output() # instead. output_layer = model.get_pooled_output() hidden_size = output_layer.shape[-1].value output_weights = tf.get_variable( "output_weights", [num_labels, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.matmul(output_layer, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) probabilities = tf.nn.softmax(logits, axis=-1) log_probs = tf.nn.log_softmax(logits, axis=-1) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
tensorflow.zeros_initializer
307
import tensorflow as tf hash_from_source_id = tf.string_to_hash_bucket_fast(source_id, HASH_BINS)
tensorflow.string_to_hash_bucket_fast
308
from tensorflow.python.ops import math_ops """ with variable_scope.variable_scope(name, 'mean_iou', [predictions, labels]): # Check if shape is compatible. predictions.get_shape().assert_is_compatible_with(labels.get_shape()) # Local variable to accumulate the predictions in the confusion matrix. cm_dtype = dtypes.int64 if weights is not None else dtypes.float64 total_cm = _create_local('total_confusion_matrix', shape=[num_classes, num_classes], dtype=cm_dtype) # Cast the type to int64 required by confusion_matrix_ops. predictions = math_ops.to_int64(predictions) labels = math_ops.to_int64(labels) num_classes = math_ops.to_int64(num_classes) # Flatten the input if its rank > 1. predictions_rank = predictions.get_shape().ndims if predictions_rank > 1: predictions = array_ops.reshape(predictions, [-1]) labels_rank = labels.get_shape().ndims if labels_rank > 1:
tensorflow.python.ops.math_ops.to_int64
309
import tensorflow as tf ones = tf.ones_like(attention_mask[:1]) mask = (tf.matrix_band_part(ones, -1, 0))
tensorflow.matrix_band_part
310
from tensorflow.contrib.metrics.python.ops import metric_ops def _accuracy_metric(predictions, labels, weights=None): threshold_predictions = math_ops.to_float( math_ops.greater_equal(predictions, threshold)) return metric_ops.streaming_accuracy( predictions=threshold_predictions, labels=labels, weights=weights) return _accuracy_metric
tensorflow.contrib.metrics.python.ops.metric_ops.streaming_accuracy
311
from tensorflow.python.ops import math_ops def _log_prob(self, x): x = control_flow_ops.with_dependencies([check_ops.assert_positive(x)] if self.validate_args else [], x) return (self.alpha * math_ops.log(self.beta) - math_ops.lgamma(self.alpha) - (self.alpha + 1.) * math_ops.log(x) - self.beta / x) def _prob(self, x): return math_ops.exp(self._log_prob(x))
tensorflow.python.ops.math_ops.log
312
import tensorflow as tf token_type_table = tf.get_variable( name=token_type_embedding_name, shape=[token_type_vocab_size, width], initializer=create_initializer(initializer_range)) # This vocab will be small so we always do one-hot here, since it is always # faster for a small vocabulary. flat_token_type_ids = tf.reshape(token_type_ids, [-1]) one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size) token_type_embeddings = tf.matmul(one_hot_ids, token_type_table) token_type_embeddings = tf.reshape(token_type_embeddings, [batch_size, seq_length, width]) output += token_type_embeddings if use_position_embeddings: assert_op = tf.assert_less_equal(seq_length, max_position_embeddings) with tf.control_dependencies([assert_op]): full_position_embeddings = tf.get_variable( name=position_embedding_name, shape=[max_position_embeddings, width], initializer=create_initializer(initializer_range)) # Since the position embedding table is a learned variable, we create it # using a (long) sequence length `max_position_embeddings`. The actual # sequence length might be shorter than this, for faster training of # tasks that do not have long sequences. # # So `full_position_embeddings` is effectively an embedding table # for position [0, 1, 2, ..., max_position_embeddings-1], and the current # sequence has positions [0, 1, 2, ... seq_length-1], so we can just
tensorflow.assert_less_equal
313
from tensorflow.python.ops import math_ops ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. """ with variable_scope.variable_scope( name, 'true_positives', [predictions, labels]): predictions.get_shape().assert_is_compatible_with(labels.get_shape()) is_true_positive = math_ops.logical_and(math_ops.equal(labels, 1), math_ops.equal(predictions, 1)) return _count_condition(is_true_positive, weights, metrics_collections, updates_collections) def _streaming_false_positives(predictions, labels, weights=None, metrics_collections=None, updates_collections=None, name=None): """Sum the weights of false positives.
tensorflow.python.ops.math_ops.equal
314
from tensorflow.contrib.distributions.python.ops import distribution_util sample_shape: `Tensor` (1D, `int32`). name: `String`. The name to give this op. Returns: x: `Tensor`. Input transposed/reshaped to `S+B+E`. """ with self._name_scope(name, values=[x, sample_shape]): x = ops.convert_to_tensor(x, name="x") sample_shape = ops.convert_to_tensor(sample_shape, name="sample_shape") x = distribution_util.rotate_transpose(x, shift=1) if self._is_all_constant_helper(self.batch_ndims, self.event_ndims): if self._batch_ndims_is_0 or self._event_ndims_is_0: b = ((min(-2, -1 - self._event_ndims_static),) if self._batch_ndims_is_0 else ()) e = (-1,) if self._event_ndims_is_0 else () x = array_ops.squeeze(x, squeeze_dims=b + e) _, batch_shape, event_shape = self.get_shape(x) else:
tensorflow.contrib.distributions.python.ops.distribution_util.rotate_transpose
315
from tensorflow.python.ops import check_ops self.alpha.get_shape(), self.beta.get_shape()) def _event_shape(self): return constant_op.constant([], dtype=dtypes.int32) def _get_event_shape(self): return tensor_shape.scalar() def _sample_n(self, n, seed=None): """See the documentation for tf.random_gamma for more details.""" return 1. / random_ops.random_gamma([n], self.alpha, beta=self.beta, dtype=self.dtype, seed=seed) def _log_prob(self, x): x = control_flow_ops.with_dependencies([check_ops.assert_positive(x)] if self.validate_args else [], x) return (self.alpha * math_ops.log(self.beta) - math_ops.lgamma(self.alpha) - (self.alpha + 1.) * math_ops.log(x) - self.beta / x) def _prob(self, x): return math_ops.exp(self._log_prob(x)) def _log_cdf(self, x): return math_ops.log(self._cdf(x)) def _cdf(self, x): x = control_flow_ops.with_dependencies([check_ops.assert_positive(x)] if
tensorflow.python.ops.check_ops.assert_positive
316
from tensorflow.contrib.losses.python.losses import loss_ops def __init__(self, label_name, weight_column_name): def loss_fn(logits, target): check_shape_op = control_flow_ops.Assert( math_ops.less_equal(array_ops.rank(target), 2), ["target's shape should be either [batch_size, 1] or [batch_size]"]) with ops.control_dependencies([check_shape_op]): target = array_ops.reshape( target, shape=[array_ops.shape(target)[0], 1]) return loss_ops.hinge_loss(logits, target) super(_BinarySvmTargetColumn, self).__init__( loss_fn=loss_fn, n_classes=2, label_name=label_name, weight_column_name=weight_column_name) def logits_to_predictions(self, logits, proba=False):
tensorflow.contrib.losses.python.losses.loss_ops.hinge_loss
317
from tensorflow.python.framework import ops array_size = array_ops.shape_internal(array, optimize=False)[0] maybe_reallocate_op = control_flow_ops.cond( new_size > array_size, reallocate, control_flow_ops.no_op) with ops.control_dependencies([maybe_reallocate_op]): append_values_op = array[size:new_size].assign(batch_values) with ops.control_dependencies([append_values_op]): update_op = size.assign(new_size) if metrics_collections: ops.add_to_collections(metrics_collections, value) if updates_collections: ops.add_to_collections(updates_collections, update_op) return value, update_op # pylint: enable=invalid-slice-index
tensorflow.python.framework.ops.add_to_collections
318
import tensorflow as tf def _unique_chars(filename): """Returns the used alphabet as an array of strings.""" counts = collections.Counter() with tf.gfile.Open(filename) as file_: for line in file_: counts.update(_split_string(line)) alphabet = [k for (k, _) in counts.most_common(max_size)] alphabet.sort() return np.asarray(alphabet, dtype=np.object) chars, = tf.py_func(_unique_chars, [filename], [tf.string]) char_to_id = tf.contrib.lookup.index_table_from_tensor( chars, num_oov_buckets=num_oov_buckets) id_to_char = tf.contrib.lookup.index_to_string_table_from_tensor(chars, " ") return char_to_id, id_to_char def characters(filename, batch_size, sequence_size): """Returns a dataset of characters from the given file.""" def _to_chars(line): """string scalar -> Dataset of characters (string scalars).""" chars, = tf.py_func(_split_string, [line + "\n"], [tf.string]) chars.set_shape([None]) return tf.data.Dataset.from_tensor_slices(chars) return (tf.data.TextLineDataset([filename])
tensorflow.contrib.lookup.index_to_string_table_from_tensor
319
import tensorflow as tf shape = [batch_size, passage_length, extended_vsize] shape = tf.cast(shape, tf.int64) attn_dist = tf.reshape(attn_dist, shape=[-1]) # [batch_size*passage_length] one_hot_spare_rep = tf.SparseTensor(indices=indices, values=attn_dist, dense_shape=shape) # [batch_size, passage_length, extended_vsize] if passage_mask is not None: passage_mask = tf.expand_dims(passage_mask, axis=-1) one_hot_spare_rep = one_hot_spare_rep * passage_mask one_hot_spare_rep = tf.sparse_reduce_sum(one_hot_spare_rep, axis=1) # [batch_size, extended_vsize] vocab_dist = tf.add(vocab_dist, one_hot_spare_rep) if self.options.add_first_word_prob_for_phrase: vocab_dist = tf.nn.softmax(vocab_dist) # normalize return vocab_dist # [batch_size, extended_vsize] def linear(args, output_size, bias=True, bias_start=0.0, scope=None): if args is None or (isinstance(args, (list, tuple)) and not args): raise ValueError("`args` must be specified")
tensorflow.sparse_reduce_sum
320
from tensorflow.python.ops import clip_ops def _clip_gradients_by_norm(grads_and_vars, clip_gradients): """Clips gradients by global norm.""" gradients, variables = zip(*grads_and_vars) clipped_gradients, _ = clip_ops.clip_by_global_norm(gradients, clip_gradients) return list(zip(clipped_gradients, variables)) def _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name):
tensorflow.python.ops.clip_ops.clip_by_global_norm
321
import tensorflow as tf if len(variables) == 0: return [] if semver.match(tf.__version__, '<1.0.0'): init_flag = sess.run( tf.pack([tf.is_variable_initialized(v) for v in variables])) else: init_flag = sess.run( tf.stack([tf.is_variable_initialized(v) for v in variables])) return [v for v, f in zip(variables, init_flag) if not f]
tensorflow.is_variable_initialized
322
import tensorflow as tf if gpu_idx == 0: update = tf.assign(num_error_rate, num_error_rate + 1.)
tensorflow.assign
323
from tensorflow.python.ops import parsing_ops image = array_ops.expand_dims(image, 0) image = image_ops.resize_bilinear(image, [height, width]) return array_ops.squeeze(image, [0]) def _create_tfrecord_dataset(tmpdir): if not gfile.Exists(tmpdir): gfile.MakeDirs(tmpdir) data_sources = test_utils.create_tfrecord_files(tmpdir, num_files=1) keys_to_features = { 'image/encoded': parsing_ops.FixedLenFeature( shape=(), dtype=dtypes.string, default_value=''), 'image/format': parsing_ops.FixedLenFeature( shape=(), dtype=dtypes.string, default_value='jpeg'), 'image/class/label': parsing_ops.FixedLenFeature( shape=[1], dtype=dtypes.int64, default_value=array_ops.zeros( [1], dtype=dtypes.int64)) }
tensorflow.python.ops.parsing_ops.FixedLenFeature
324
from tensorflow.python.training import moving_averages update_mean_op = moving_averages.assign_moving_average( variable=self._moving_mean, value=mean, decay=self._decay_rate, name="update_moving_mean").op update_second_moment_op = moving_averages.assign_moving_average( variable=self._moving_second_moment, value=second_moment, decay=self._decay_rate, name="update_moving_second_moment").op return update_mean_op, update_second_moment_op
tensorflow.python.training.moving_averages.assign_moving_average
325
import tensorflow as tf assignments.append(tf.scatter_update(ref=self.reward_memory, indices=indices, updates=reward)) # Add episode indices. with tf.control_dependencies(control_inputs=assignments): num_episodes = tf.count_nonzero(input_tensor=terminal, axis=0, dtype=util.tf_dtype('int')) assignment = tf.assign( ref=self.episode_indices[self.episode_count: self.episode_count + num_episodes], value=tf.boolean_mask(tensor=indices, mask=terminal) ) # Increment episode count. with tf.control_dependencies(control_inputs=(assignment,)): assignment = tf.assign_add(ref=self.episode_count, value=num_episodes)
tensorflow.boolean_mask
326
import tensorflow as tf self.generator_output = self.graph.get_tensor_by_name('G_synthesis_1/_Run/concat/concat:0') self.generated_image = tflib.convert_images_to_uint8(self.generator_output, nchw_to_nhwc=True, uint8_cast=False) self.generated_image_uint8 = tf.saturate_cast(self.generated_image, tf.uint8)
tensorflow.saturate_cast
327
from tensorflow.python.framework import tensor_shape @ops.RegisterShape("Pow") @ops.RegisterShape("Sub") def _BroadcastShape(op): """Common shape function for binary operators that broadcast their inputs.""" shape_x = op.inputs[0].get_shape() shape_y = op.inputs[1].get_shape() if shape_x.ndims is None or shape_y.ndims is None: return [tensor_shape.unknown_shape()] # To compute the broadcasted dimensions, we zip together shape_x and shape_y, # and pad with 1 to make them the same length. broadcasted_dims = reversed(list(six.moves.zip_longest( reversed(shape_x.dims), reversed(shape_y.dims), fillvalue=tensor_shape.Dimension(1)))) # Next we combine the dimensions according to the numpy broadcasting rules. # http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html return_dims = [] for (dim_x, dim_y) in broadcasted_dims: if dim_x.value is None or dim_y.value is None: # One or both dimensions is unknown. If either dimension is greater than # 1, we assume that the program is correct, and the other dimension will # be broadcast to match it. # TODO(mrry): If we eliminate the shape checks in C++, we must still # assert that the unknown dim is either 1 or the same as the known dim. if dim_x.value is not None and dim_x.value > 1: return_dims.append(dim_x) elif dim_y.value is not None and dim_y.value > 1:
tensorflow.python.framework.tensor_shape.Dimension
328
from tensorflow.python.ops import sparse_ops with ops.name_scope( name, 'expand_and_tile', (tensor, multiple, dim)) as scope: # Sparse. if isinstance(tensor, ops.SparseTensorValue): tensor = ops.SparseTensor.from_value(tensor) if isinstance(tensor, ops.SparseTensor): if dim < 0: expand_dims = array_ops.reshape( array_ops.size(tensor.shape) + dim, [1]) else: expand_dims = [dim] expanded_shape = array_ops.concat( 0, (array_ops.slice(tensor.shape, [0], expand_dims), [1], array_ops.slice(tensor.shape, expand_dims, [-1])), name='expanded_shape') expanded = sparse_ops.sparse_reshape( tensor, shape=expanded_shape, name='expand') if multiple == 1: return expanded return sparse_ops.sparse_concat( dim - 1 if dim < 0 else dim, [expanded] * multiple, name=scope) # Dense. expanded = array_ops.expand_dims( tensor, dim if (dim >= 0) else (dim - 1), name='expand') if multiple == 1: return expanded ones = array_ops.ones_like(array_ops.shape(tensor)) tile_multiples = array_ops.concat( 0, (ones[:dim], (multiple,), ones[dim:]), name='multiples')
tensorflow.python.ops.sparse_ops.sparse_reshape
329
from tensorflow.python.ops import array_ops return sparse_ops.sparse_concat( dim - 1 if dim < 0 else dim, [expanded] * multiple, name=scope) # Dense. expanded = array_ops.expand_dims( tensor, dim if (dim >= 0) else (dim - 1), name='expand') if multiple == 1: return expanded ones = array_ops.ones_like(array_ops.shape(tensor)) tile_multiples = array_ops.concat( 0, (ones[:dim], (multiple,), ones[dim:]), name='multiples') return array_ops.tile(expanded, tile_multiples, name=scope) def sparse_average_precision_at_k(predictions, labels, k): """Computes average precision@k of predictions with respect to sparse labels. From en.wikipedia.org/wiki/Information_retrieval#Average_precision, formula for each row is: AveP = sum_{i=1...k} P_{i} * rel_{i} / num_relevant_items
tensorflow.python.ops.array_ops.tile
330
from tensorflow.contrib.slim.python.slim.data import tfexample_decoder } items_to_handlers = { 'image': tfexample_decoder.Image(), 'label': tfexample_decoder.Tensor('image/class/label'), } decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
tensorflow.contrib.slim.python.slim.data.tfexample_decoder.Tensor
331
from tensorflow.python.ops import check_ops `None`. Returns: Masked weights if `mask` and `weights` are not `None`, weights equivalent to `mask` if `weights` is `None`, and otherwise `weights`. Raises: ValueError: If `weights` and `mask` are not `None` and have mismatched shapes. """ if mask is not None: check_ops.assert_type(mask, dtypes.bool) if weights is None: weights = array_ops.ones_like(mask, dtype=dtypes.float32) weights = math_ops.cast(math_ops.logical_not(mask), weights.dtype) * weights return weights def _safe_div(numerator, denominator, name): """Divides two values, returning 0 if the denominator is <= 0.
tensorflow.python.ops.check_ops.assert_type
332
import tensorflow as tf # LinearOperatorLowerTriangular has an assert_non_singular method that # is called by the Bijector. # However, cholesky() ignores the upper triangular part, so we do need # to separately assert symmetric. scale_tril = tf.cholesky(covariance_matrix) super(MultivariateNormalFullCovariance, self).__init__( loc=loc, scale_tril=scale_tril,
tensorflow.cholesky
333
import tensorflow as tf real_logits = discriminator_fn(real_data) if isinstance(real_logits, (list, tuple)): real_logits = real_logits[0] real_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( logits=real_logits, labels=tf.ones_like(real_logits))) fake_logits = discriminator_fn(fake_data) if isinstance(fake_logits, (list, tuple)):
tensorflow.ones_like
334
import tensorflow as tf self.logger.info("applying optimize %s" % self.optim_type) trainable_vars = tf.trainable_variables() if self.config.clip_weight: # clip_weight tvars = tf.trainable_variables() grads = tf.gradients(self.loss, tvars) grads, _ = tf.clip_by_global_norm(grads, clip_norm=self.config.max_norm_grad) grad_var_pairs = zip(grads, tvars) self.train_op = self.optimizer.apply_gradients(grad_var_pairs, name='apply_grad') else: self.train_op = self.optimizer.minimize(self.loss) def _attention(self, output, name='attn', reuse=None):
tensorflow.clip_by_global_norm
335
import tensorflow as tf if FLAGS.do_serve: def serving_input_fn(): with tf.variable_scope("foo"): feature_spec = { "input_ids": tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), "input_mask": tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), "label_ids": tf.FixedLenFeature([], tf.int64), } serialized_tf_example = tf.placeholder(dtype=tf.string, shape=[None], name='input_example_tensor') receiver_tensors = {'examples': serialized_tf_example} features = tf.parse_example(serialized_tf_example, feature_spec) return tf.estimator.export.ServingInputReceiver(features, receiver_tensors) estimator._export_to_tpu = False # this is important path = estimator.export_savedmodel('export_t', serving_input_fn) print(path) if __name__ == "__main__": flags.mark_flag_as_required("data_dir") flags.mark_flag_as_required("task_name") flags.mark_flag_as_required("vocab_file") flags.mark_flag_as_required("bert_config_file") flags.mark_flag_as_required("output_dir") tf.app.run()
tensorflow.parse_example
336
from tensorflow.contrib.layers.python.layers import feature_column def _assertCommonMetrics(self, metrics): estimator_test_utils.assert_in_range(_ITERS, _ITERS + 5, 'global_step', metrics) estimator_test_utils.assert_in_range(0.9, 1.0, 'accuracy', metrics) estimator_test_utils.assert_in_range(0.0, 0.2, 'loss', metrics) self.report_benchmark( iters=metrics['global_step'], extras={k: v for k, v in metrics.items() if k in _METRIC_KEYS}) def benchmarkMatrixData(self): iris = test_data.prepare_iris_data_for_logistic_regression() cont_feature = feature_column.real_valued_column('feature', dimension=4) bucketized_feature = feature_column.bucketized_column( cont_feature, test_data.get_quantile_based_buckets(iris.data, 10)) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( model_dir=tempfile.mkdtemp(), linear_feature_columns=(bucketized_feature,), dnn_feature_columns=(cont_feature,), dnn_hidden_units=(3, 3)) input_fn = test_data.iris_input_logistic_fn metrics = classifier.fit(input_fn=input_fn, steps=_ITERS).evaluate(
tensorflow.contrib.layers.python.layers.feature_column.real_valued_column
337
from tensorflow.contrib import metrics as contrib_metrics # Compute Pearson correlation pearson = contrib_metrics.streaming_pearson_correlation(
tensorflow.contrib.metrics.streaming_pearson_correlation
338
import tensorflow as tf lambda: tf.scatter_nd( tf.stack([range_head, head_org_idx], -1), attn_result, [bs, sl+1, hn]) ) range_unhead = tf.tile(tf.expand_dims(tf.range(bs), -1), [1, sl_unhead]) scatter_pooling = tf.cond( tf.equal(sl_unhead, 0), lambda: tf.zeros([bs, sl+1, hn], tf.float32), lambda: tf.scatter_nd( tf.stack([range_unhead, unhead_org_idx], -1), pooling_result, [bs, sl+1, hn]) )
tensorflow.equal
339
from tensorflow.python.framework import ops rnorm_var = tf.random_normal([row_dim, col_dim], mean=0.0, stddev=1.0) runif_var = tf.random_uniform([row_dim, col_dim], minval=0, maxval=4) print(sess.run(rnorm_var)) print(sess.run(runif_var)) ops.reset_default_graph() sess = tf.Session() my_var = tf.Variable(tf.zeros([1,20])) merged = tf.summary.merge_all() writer = tf.summary.FileWriter("./logs", graph=sess.graph) initialize_op = tf.global_variables_initializer()
tensorflow.python.framework.ops.reset_default_graph
340
from tensorflow.python.ops import array_ops seq_length = config["seq_length"] with ops.Graph().as_default(), ops.device("/device:GPU:0"): inputs = seq_length * [ array_ops.zeros([batch_size, num_units], dtypes.float32) ] initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
tensorflow.python.ops.array_ops.zeros
341
from tensorflow.contrib.framework import deprecated_arg_values hooks=hooks) @deprecated_arg_values( estimator.AS_ITERABLE_DATE,
tensorflow.contrib.framework.deprecated_arg_values
342
from tensorflow.contrib.learn.python.learn.datasets import base # Reduce the size of original data by a factor of 1000. base.shrink_csv(train_path, 1000) base.shrink_csv(test_path, 1000) train_path = train_path.replace('train.csv', 'train_small.csv') test_path = test_path.replace('test.csv', 'test_small.csv') else: module_path = os.path.dirname(__file__) train_path = os.path.join(module_path, 'data', 'text_train.csv') test_path = os.path.join(module_path, 'data', 'text_test.csv') train = base.load_csv_without_header( train_path, target_dtype=np.int32, features_dtype=np.str, target_column=0) test = base.load_csv_without_header( test_path, target_dtype=np.int32, features_dtype=np.str, target_column=0) return base.Datasets(train=train, validation=None, test=test)
tensorflow.contrib.learn.python.learn.datasets.base.load_csv_without_header
343
import tensorflow as tf alpha = 0.1 val = tf.constant([[2, 3], [1, 4]], dtype=tf.float32) l1 = tf.contrib.layers.l1_regularizer(alpha)(val) l2 = tf.contrib.layers.l2_regularizer(alpha)(val)
tensorflow.contrib.layers.l1_regularizer
344
import tensorflow as tf lambda: update_param_noise_threshold_ph, lambda: param_noise_threshold)) # Put everything together. deterministic_actions = tf.argmax(q_values_perturbed, axis=1) batch_size = tf.shape(observations_ph.get())[0] random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64) chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions) output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)
tensorflow.stack
345
import tensorflow as tf with tf.variable_scope(tf.get_variable_scope(), reuse=True if i else None): outputs_to_scales_to_logits = multi_scale_logits( images, model_options=model_options, image_pyramid=[image_scale], is_training=False, fine_tune_batch_norm=False) if add_flipped_images: with tf.variable_scope(tf.get_variable_scope(), reuse=True): outputs_to_scales_to_logits_reversed = multi_scale_logits( tf.reverse_v2(images, [2]), model_options=model_options, image_pyramid=[image_scale], is_training=False, fine_tune_batch_norm=False) for output in sorted(outputs_to_scales_to_logits): scales_to_logits = outputs_to_scales_to_logits[output] logits = tf.image.resize_bilinear( scales_to_logits[_MERGED_LOGITS_SCOPE], tf.shape(images)[1:3],
tensorflow.reverse_v2
346
from tensorflow.python.ops import array_ops @property def alpha(self): """Shape parameter.""" return self._alpha @property def beta(self): """Scale parameter.""" return self._beta def _batch_shape(self): return array_ops.broadcast_dynamic_shape( array_ops.shape(self.alpha), array_ops.shape(self.beta)) def _get_batch_shape(self): return array_ops.broadcast_static_shape( self.alpha.get_shape(), self.beta.get_shape()) def _event_shape(self): return constant_op.constant([], dtype=dtypes.int32) def _get_event_shape(self): return tensor_shape.scalar() def _sample_n(self, n, seed=None):
tensorflow.python.ops.array_ops.shape
347
from tensorflow.python.ops import math_ops return math_ops.igammac(self.alpha, self.beta / x) @distribution_util.AppendDocstring( """This is defined to be ``` entropy = alpha - log(beta) + log(Gamma(alpha)) + (1-alpha)digamma(alpha) ``` where digamma(alpha) is the digamma function.""") def _entropy(self): return (self.alpha + math_ops.log(self.beta) + math_ops.lgamma(self.alpha) - (1. + self.alpha) * math_ops.digamma(self.alpha)) @distribution_util.AppendDocstring( """The mean of an inverse gamma distribution is `beta / (alpha - 1)`, when `alpha > 1`, and `NaN` otherwise. If `self.allow_nan_stats` is `False`, an exception will be raised rather than returning `NaN`""") def _mean(self): mean = self.beta / (self.alpha - 1.) if self.allow_nan_stats: nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype()) return array_ops.where( self.alpha > 1., mean, array_ops.fill(self.batch_shape(), nan, name="nan")) else: return control_flow_ops.with_dependencies([
tensorflow.python.ops.math_ops.digamma
348
from tensorflow.python.ops import array_ops # batch_ndims to get the event start dim. event_start = array_ops.where(
tensorflow.python.ops.array_ops.where
349
from tensorflow.python.ops import math_ops update = math_ops.div( tp_update, math_ops.add(tp_update, fp_update), name='update')
tensorflow.python.ops.math_ops.add
350
import tensorflow as tf enable_pretrain = tf.cast( tf.greater_equal(global_step, FLAGS.first_pretrain_steps), tf.float32) loss = src_loss * tf.stop_gradient(loss_weights) * enable_pretrain loss += dst_loss + l2_loss return tf.identity(loss), src_loss, dst_loss def train_model_fn(features, labels, mode, params): # pylint: disable=unused-argument """Defines the model function.""" target_num_classes = FLAGS.target_num_classes global_step = tf.train.get_global_step() src_features, src_labels = features['src'], tf.cast(labels['src'], tf.int64) finetune_features = features['finetune'] target_features = features['target'] num_classes = FLAGS.src_num_classes finetune_one_hot_labels = tf.one_hot( tf.cast(labels['finetune'], tf.int64), target_num_classes) target_one_hot_labels = tf.one_hot( tf.cast(labels['target'], tf.int64), target_num_classes) with tf.variable_scope('rl_controller') as rl_scope: # It creates a `rl_scope` which will be used for ops. pass
tensorflow.cast
351
import tensorflow as tf if forward_only: str_summary_type = 'eval' loss_summ = tf.summary.scalar("{0}_loss".format(str_summary_type), mean_cost) acc_summ = tf.summary.scalar("{0}_accuracy".format(str_summary_type), accuracy) merged = tf.summary.merge([loss_summ, acc_summ]) return mean_cost, accuracy, y_pred, merged else: return mean_cost, accuracy, y_pred def training(self, cost): optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate) # train_op = optimizer.minimize(cost) trainables = tf.trainable_variables() grads = tf.gradients(cost, trainables) grads, _ = tf.clip_by_global_norm(grads, clip_norm=self.clip_norm) capped_gvs = zip(grads, trainables) train_op = optimizer.apply_gradients(capped_gvs) return train_op @staticmethod def seq_length(data): used = tf.sign(tf.reduce_max(tf.abs(data), axis=2)) length = tf.reduce_sum(used, axis=1) length = tf.cast(length, tf.int64) return length
tensorflow.trainable_variables
352
import tensorflow as tf inter = tf.transpose(tf.reduce_max(inter, axis=a)) im = axs[fig_obj_count, mtype * 2 + 0].matshow(inter.numpy()) plt.colorbar(im, ax=axs[fig_obj_count, mtype * 2 + 0]) print(mtype, fig_obj_count, 0) values = tf.math.sign(tf.nn.relu(interpolated + self.tol)) inter = tf.reshape(values, [self.resolution, self.resolution, self.resolution]) inter = tf.transpose(tf.reduce_max(inter, axis=a)) im = axs[fig_obj_count, mtype * 2 + 1].matshow(inter.numpy()) plt.colorbar(im, ax=axs[fig_obj_count, mtype * 2 + 1]) print(mtype, fig_obj_count, 1) if mtype == 1: values = sdf_values inter = tf.reshape(values, [self.resolution, self.resolution,
tensorflow.reduce_max
353
import tensorflow as tf pert - prev_pert for pert, prev_pert in zip(perturbations, previous_perturbations) ] applied = self.apply_step(variables=variables, deltas=perturbation_deltas) with tf.control_dependencies(control_inputs=(applied,)): perturbed_loss = fn_loss(**arguments) direction = tf.sign(x=(unperturbed_loss - perturbed_loss)) deltas = [ delta + direction * perturbation for delta, perturbation in zip(deltas, perturbations) ] return deltas, perturbations
tensorflow.sign
354
import tensorflow as tf elif encoder.attn_norm_fun == 'sigmoid': weights = tf.nn.sigmoid(e) elif encoder.attn_norm_fun == 'max': weights = tf.one_hot(tf.argmax(e, -1), depth=tf.shape(e)[1]) else: e -= tf.reduce_max(e, axis=1, keep_dims=True) T = encoder.attn_temperature or 1.0 exp = tf.exp(e / T) * mask weights = exp / tf.reduce_sum(exp, axis=-1, keep_dims=True) weighted_average = tf.reduce_sum(tf.expand_dims(weights, 2) * hidden_states, axis=1) return weighted_average, weights def no_attention(state, hidden_states, *args, **kwargs):
tensorflow.reduce_sum
355
from tensorflow.python.framework import random_seed self._config.training_worker_session_startup_stagger_secs) if sleep_secs: logging.info('Waiting %d secs before starting task %d.', sleep_secs, self._config.task) time.sleep(sleep_secs) # Device allocation device_fn = device_fn or self._device_fn with ops.Graph().as_default() as g, g.device(device_fn): random_seed.set_random_seed(self._config.tf_random_seed) global_step = contrib_framework.create_global_step(g) features, targets = input_fn() self._check_inputs(features, targets) train_op, loss_op = self._get_train_ops(features, targets) return train( graph=g, output_dir=self._model_dir, train_op=train_op, loss_op=loss_op,
tensorflow.python.framework.random_seed.set_random_seed
356
from tensorflow.python.ops import array_ops def _move_tensors(tensors, device): """Moves a list of tensors to a device by concatenating/splitting them.""" # Reset the device setting to avoid weird interactions with device merging # logic. with ops.device(None): if all(tensor.shape == tensor_shape.scalar() for tensor in tensors): with ops.device(tensors[0].device): values = array_ops.stack(tensors) with ops.device(device): return array_ops.unstack(values) else: with ops.device(tensors[0].device): sizes = array_ops.stack( [array_ops.shape(tensor)[0] for tensor in tensors]) values = array_ops.concat(tensors, axis=0)
tensorflow.python.ops.array_ops.stack
357
from tensorflow.contrib import seq2seq if decoder_fn is None: outputs, final_state = tf.nn.dynamic_rnn(cell, tensor, sequence_length=sequence_length, initial_state=initial_state, dtype=tf.float32) final_context_state = None else: # TODO: turn off sequence_length? outputs, final_state, final_context_state = seq2seq.dynamic_rnn_decoder( cell, decoder_fn, inputs=None, sequence_length=sequence_length) if return_final_state: return final_state else: return outputs
tensorflow.contrib.seq2seq.dynamic_rnn_decoder
358
from tensorflow.python.framework import ops output_count = np.prod(output_shape.as_list()) return ops.OpStats("flops", (output_count * filter_in_depth * filter_height * filter_width * 2)) @ops.RegisterStatistics("Conv2D", "weight_parameters") def _calc_conv_weight_params(graph, node): """Calculates the on-disk size of the weights for Conv2D.""" input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0]) input_shape.assert_is_fully_defined()
tensorflow.python.framework.ops.RegisterStatistics
359
import tensorflow as tf if reduction_mode == 'max': print('USING MAX POOLING FOR REDUCTION!') vecs_reduced = tf.segment_max(vecs, segment_inds) elif reduction_mode == 'mean':
tensorflow.segment_max
360
import tensorflow as tf class EagerLinearRegressionBenchmark(tf.test.Benchmark): def benchmarkEagerLinearRegression(self): num_epochs = 10 num_batches = 200 batch_size = 64 dataset = linear_regression.synthetic_dataset( w=tf.random_uniform([3, 1]), b=tf.random_uniform([1]), noise_level=0.01, batch_size=batch_size, num_batches=num_batches) burn_in_dataset = dataset.take(10)
tensorflow.random_uniform
361
from tensorflow.python.ops import math_ops return self._logits_to_predictions(logits, proba=True) def _logits_to_predictions(self, logits, proba=False): if self._n_classes < 2: return array_ops.reshape(logits, [-1]) if self._n_classes == 2: logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits]) if proba: return nn.softmax(logits) else: return math_ops.argmax(logits, 1) def _get_feature_ops_from_example(self, examples_batch): column_types = layers.create_dict_for_parse_example( (self._get_linear_feature_columns() or []) + (self._get_dnn_feature_columns() or [])) features = parsing_ops.parse_example(examples_batch, column_types) return features def _num_label_columns(self): return 1 if self._n_classes <= 2 else self._n_classes
tensorflow.python.ops.math_ops.argmax
362
import tensorflow as tf with self.test_session(): qdist = distributions.QuantizedDistribution( base_dist_cls=distributions.Normal, lower_cutoff=0., upper_cutoff=None, mu=tf.zeros( batch_shape, dtype=tf.float32), sigma=tf.ones( batch_shape, dtype=tf.float32)) samps = qdist.sample_n(n=5000, seed=42) samps_v = samps.eval() # With lower_cutoff = 0, the interval j=0 is (-infty, 0], which holds 1/2
tensorflow.ones
363
from tensorflow.python.framework import ops returned_dims.append(dim) return [tensor_shape.TensorShape(returned_dims)] @ops.RegisterShape("SegmentMax") @ops.RegisterShape("SegmentMean") @ops.RegisterShape("SegmentMin") @ops.RegisterShape("SegmentProd") @ops.RegisterShape("SegmentSum") def _SegmentReductionShape(op): """Common shape function for segment reduction ops.""" data_shape = op.inputs[0].get_shape() segment_ids_shape = op.inputs[1].get_shape() segment_ids_shape.assert_has_rank(1) return [tensor_shape.TensorShape([None]).concatenate(data_shape[1:])]
tensorflow.python.framework.ops.RegisterShape
364
import tensorflow as tf same_cluster_indicator = tf.equal(top_antecedent_cluster_ids, tf.expand_dims(top_span_cluster_ids, 1)) # [k, c] non_dummy_indicator = tf.expand_dims(top_span_cluster_ids > 0, 1) # [k, 1] pairwise_labels = tf.logical_and(same_cluster_indicator, non_dummy_indicator) # [k, c] dummy_labels = tf.logical_not(tf.reduce_any(pairwise_labels, 1, keepdims=True)) # [k, 1] top_antecedent_labels = tf.concat([dummy_labels, pairwise_labels], 1) # [k, c + 1] loss = self.softmax_loss(top_antecedent_scores, top_antecedent_labels) # [k]
tensorflow.reduce_any
365
import tensorflow as tf .FullyConnected('fc2', out_dim=128, nl=tf.nn.relu) .FullyConnected('fct', out_dim=19, nl=tf.identity)()) tf.nn.softmax(logits, name='prob') cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label) cost = tf.reduce_mean(cost, name='cross_entropy_loss') wrong = tf.to_float(tf.logical_not(tf.nn.in_top_k(logits, label, 1)), name='incorrect_vector') summary.add_moving_summary(tf.reduce_mean(wrong, name='train_error')) wd_cost = tf.multiply(1e-5, regularize_cost('fc.*/W', tf.nn.l2_loss), name='regularize_loss') summary.add_moving_summary(cost, wd_cost) self.cost = tf.add_n([wd_cost, cost], name='cost') def _get_optimizer(self): lr = tf.get_variable('learning_rate', initializer=5e-4, trainable=False) opt = tf.train.AdamOptimizer(lr, epsilon=1e-3) return optimizer.apply_grad_processors( opt, [ gradproc.ScaleGradient(('STN.*', 0.1)), gradproc.SummaryGradient()]) def get_data(isTrain): ds = dataset.Mnist('train' if isTrain else 'test')
tensorflow.add_n
366
from tensorflow.python.framework import op_def_registry as _op_def_registry result = _op_def_lib.apply_op("UnpackPath", path=path, path_values=path_values, name=name) return result def _InitOpDefLibrary(): op_list = _op_def_pb2.OpList() _text_format.Merge(_InitOpDefLibrary.op_list_ascii, op_list) _op_def_registry.register_op_list(op_list) op_def_lib = _op_def_library.OpDefLibrary() op_def_lib.add_op_list(op_list) return op_def_lib _InitOpDefLibrary.op_list_ascii = """op { name: "HardRoutingFunction"
tensorflow.python.framework.op_def_registry.register_op_list
367
from tensorflow.contrib.slim.python.slim.data import tfexample_decoder shape=(), dtype=dtypes.string, default_value='jpeg'), 'image/class/label': parsing_ops.FixedLenFeature( shape=[1], dtype=dtypes.int64, default_value=array_ops.zeros( [1], dtype=dtypes.int64)) } items_to_handlers = { 'image': tfexample_decoder.Image(), 'label': tfexample_decoder.Tensor('image/class/label'), } decoder = tfexample_decoder.TFExampleDecoder(keys_to_features, items_to_handlers) return dataset.Dataset( data_sources=data_sources, reader=io_ops.TFRecordReader,
tensorflow.contrib.slim.python.slim.data.tfexample_decoder.Image
368
import tensorflow as tf self.U1_pred = self.net_U1(self.x1_tf) # N1 x q self.loss = tf.reduce_sum(tf.square(self.u0_tf - self.U0_pred)) + \ tf.reduce_sum(tf.square(self.u1_tf - self.U1_pred)) self.optimizer = tf.contrib.opt.ScipyOptimizerInterface(self.loss,
tensorflow.square
369
from tensorflow.python.ops import math_ops array_ops.zeros((), dtype=dtypes.int32, name="zero"),) if self._is_all_constant_helper(size, *start_sum): start = sum(tensor_util.constant_value(s) for s in start_sum) stop = start + tensor_util.constant_value(size) return ops.convert_to_tensor( list(range(start, stop)), dtype=dtypes.int32, name=name) else: start = sum(start_sum) return math_ops.range(start, start + size) sample_ndims = self.get_sample_ndims(x, name=name) return (make_dims((), sample_ndims, name="sample_dims"), make_dims((sample_ndims,), self.batch_ndims, name="batch_dims"), make_dims((sample_ndims, self.batch_ndims), self.event_ndims, name="event_dims")) def get_shape(self, x, name="get_shape"):
tensorflow.python.ops.math_ops.range
370
import tensorflow as tf self.assertFalse(tf.is_nan(final_var_grads.a[1]).eval()) self.assertTrue(tf.is_finite(final_var_grads.a[1]).eval())
tensorflow.is_finite
371
from tensorflow.python.platform import gfile """ with gfile.Open(filename, 'wb') as f: f.write(pickle.dumps(self)) @classmethod def restore(cls, filename): """Restores vocabulary processor from given file. Args: filename: Path to file to load from. Returns: VocabularyProcessor object. """ with gfile.Open(filename, 'rb') as f: return pickle.loads(f.read())
tensorflow.python.platform.gfile.Open
372
import tensorflow as tf class BatchedSpMDT: def __init__(self): self.b_module = tf.load_op_library('./batched.so')
tensorflow.load_op_library
373
from tensorflow.python.framework import ops dimension of the input tensor. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. name: Optional name for the returned tensor. Returns: A `Tensor` with the same type as `value`. Raises: ValueError: If input/output depth does not match `filter`'s shape, or if padding is other than `'VALID'` or `'SAME'`. """ with ops.op_scope([value, filter, output_shape], name, "conv2d_transpose") as name: value = ops.convert_to_tensor(value, name="value") filter = ops.convert_to_tensor(filter, name="filter") if not value.get_shape()[3].is_compatible_with(filter.get_shape()[3]): raise ValueError( "input channels does not match filter's input channels, " "{} != {}".format(value.get_shape()[3], filter.get_shape()[3])) output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape") if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(4)): raise ValueError("output_shape must have shape (4,), got {}" .format(output_shape_.get_shape())) if isinstance(output_shape, (list, np.ndarray)):
tensorflow.python.framework.ops.convert_to_tensor
374
from tensorflow.python.ops import array_ops return reduction_indices else: return range(0, array_ops.rank(x))
tensorflow.python.ops.array_ops.rank
375
from tensorflow.contrib import slim } if is_training is not None: batch_norm_params["is_training"] = is_training # Set weight_decay for weights in Conv and DepthSepConv layers. weights_init = tf.keras.initializers.glorot_normal() regularizer = contrib_layers.l2_regularizer(weight_decay) if regularize_depthwise: depthwise_regularizer = regularizer else: depthwise_regularizer = None with slim.arg_scope( [slim.conv2d, slim.separable_conv2d], weights_initializer=weights_init, activation_fn=tf.nn.relu6, normalizer_fn=normalizer_fn, ): with slim.arg_scope([slim.batch_norm], **batch_norm_params): with slim.arg_scope([slim.conv2d], weights_regularizer=regularizer): with slim.arg_scope( [slim.separable_conv2d], weights_regularizer=depthwise_regularizer ) as sc:
tensorflow.contrib.slim.arg_scope
376
from tensorflow.python.ops import control_flow_ops batch_size = math_ops.minimum(values_size, max_size - size) perm = [axis] + [n for n in range(ndim) if n != axis] batch_values = array_ops.transpose(values, perm)[:batch_size] def reallocate(): next_size = _next_array_size(new_size) next_shape = array_ops.pack([next_size] + fixed_shape) new_value = array_ops.zeros(next_shape, dtype=values.dtype) old_value = array.value() assign_op = state_ops.assign(array, new_value, validate_shape=False) with ops.control_dependencies([assign_op]): copy_op = array[:size].assign(old_value[:size]) # return value needs to be the same dtype as no_op() for cond with ops.control_dependencies([copy_op]): return control_flow_ops.no_op() new_size = size + batch_size array_size = array_ops.shape_internal(array, optimize=False)[0] maybe_reallocate_op = control_flow_ops.cond( new_size > array_size, reallocate, control_flow_ops.no_op) with ops.control_dependencies([maybe_reallocate_op]): append_values_op = array[size:new_size].assign(batch_values) with ops.control_dependencies([append_values_op]): update_op = size.assign(new_size) if metrics_collections: ops.add_to_collections(metrics_collections, value) if updates_collections:
tensorflow.python.ops.control_flow_ops.no_op
377
from tensorflow.python.framework import op_def_library as _op_def_library _text_format.Merge(_InitOpDefLibrary.op_list_ascii, op_list) _op_def_registry.register_op_list(op_list) op_def_lib = _op_def_library.OpDefLibrary() op_def_lib.add_op_list(op_list)
tensorflow.python.framework.op_def_library.OpDefLibrary
378
import tensorflow as tf match that is neither positive or negative. """ matched_gt_boxes, matched_gt_classes, matched_gt_indices, matched_iou, _ = ( box_ops.box_matching(boxes, gt_boxes, gt_classes)) positive_matches = tf.greater( matched_iou, self._config_dict['foreground_iou_threshold']) negative_matches = tf.logical_and( tf.greater_equal( matched_iou, self._config_dict['background_iou_low_threshold']), tf.less( matched_iou, self._config_dict['background_iou_high_threshold'])) ignored_matches = tf.logical_and( tf.less(matched_iou, 0.0), tf.greater_equal( matched_iou, self._config_dict['background_iou_high_threshold']))
tensorflow.greater_equal
379
from tensorflow.contrib.learn.python.learn.datasets import base TRAIN_IMAGES = 'train-images-idx3-ubyte.gz' TRAIN_LABELS = 'train-labels-idx1-ubyte.gz' TEST_IMAGES = 't10k-images-idx3-ubyte.gz' TEST_LABELS = 't10k-labels-idx1-ubyte.gz' VALIDATION_SIZE = 5000 local_file = base.maybe_download(TRAIN_IMAGES, train_dir, SOURCE_URL + TRAIN_IMAGES) train_images = extract_images(local_file) local_file = base.maybe_download(TRAIN_LABELS, train_dir, SOURCE_URL + TRAIN_LABELS) train_labels = extract_labels(local_file, one_hot=one_hot) local_file = base.maybe_download(TEST_IMAGES, train_dir, SOURCE_URL + TEST_IMAGES) test_images = extract_images(local_file) local_file = base.maybe_download(TEST_LABELS, train_dir, SOURCE_URL + TEST_LABELS) test_labels = extract_labels(local_file, one_hot=one_hot) validation_images = train_images[:VALIDATION_SIZE] validation_labels = train_labels[:VALIDATION_SIZE] train_images = train_images[VALIDATION_SIZE:] train_labels = train_labels[VALIDATION_SIZE:] train = DataSet(train_images, train_labels, dtype=dtype, reshape=reshape) validation = DataSet(validation_images,
tensorflow.contrib.learn.python.learn.datasets.base.maybe_download
380
from tensorflow.python.ops import array_ops super(_MultiClassTargetColumn, self).__init__( loss_fn=loss_fn, num_label_columns=1 if n_classes == 2 else n_classes, label_name=label_name, weight_column_name=weight_column_name, problem_type=ProblemType.CLASSIFICATION) def logits_to_predictions(self, logits, proba=False): if self.num_label_columns == 1: logits = array_ops.concat([array_ops.zeros_like(logits), logits], 1) if proba: return nn.softmax(logits) else: return math_ops.argmax(logits, 1) def _default_eval_metrics(self): if self._num_label_columns == 1:
tensorflow.python.ops.array_ops.zeros_like
381
from tensorflow.python.ops import math_ops broadcast_weights = math_ops.mul( weights, array_ops.ones_like(average_precision), name='broadcast_weights') batch_max = math_ops.reduce_sum(broadcast_weights, name='batch_max') max_update = state_ops.assign_add(max_var, batch_max, name='update') with ops.name_scope(None, 'total', (average_precision,)) as total_scope: total_var = contrib_variables.local_variable( array_ops.zeros([], dtype=dtypes.float64), name=total_scope) batch_total = math_ops.reduce_sum(average_precision, name='batch_total') total_update = state_ops.assign_add(total_var, batch_total, name='update') # Divide total by max to get mean, for both vars and the update ops. mean_average_precision = _safe_scalar_div(total_var, max_var, name='mean') update = _safe_scalar_div(total_update, max_update, name=scope) if metrics_collections:
tensorflow.python.ops.math_ops.reduce_sum
382
from tensorflow.python.ops import array_ops if self._batch_ndims_is_0 or self._event_ndims_is_0: b = ((min(-2, -1 - self._event_ndims_static),) if self._batch_ndims_is_0 else ()) e = (-1,) if self._event_ndims_is_0 else () x = array_ops.squeeze(x, squeeze_dims=b + e) _, batch_shape, event_shape = self.get_shape(x) else: s = (x.get_shape().as_list() if x.get_shape().is_fully_defined() else array_ops.shape(x)) batch_shape = array_ops.slice(s, (1,), (self.batch_ndims,)) # Since sample_dims=1 and is left-most, we add 1 to the number of # batch_ndims to get the event start dim. event_start = array_ops.where( self._batch_ndims_is_0, 2, 1 + self.batch_ndims) event_shape = array_ops.slice(s, (event_start,), (self.event_ndims,)) new_shape = array_ops.concat(0, (sample_shape, batch_shape, event_shape)) x = array_ops.reshape(x, shape=new_shape) return x @contextlib.contextmanager def _name_scope(self, name=None, values=None): """Helper function to standardize op scope.""" with ops.name_scope(self.name): with ops.name_scope(name, values=( (values or []) + [self.batch_ndims, self.event_ndims])) as scope: yield scope def _is_all_constant_helper(self, *args):
tensorflow.python.ops.array_ops.slice
383
from tensorflow.python.ops import nn def _get_optimizer(optimizer): if callable(optimizer): return optimizer() else: return optimizer def _add_hidden_layer_summary(value, tag): summary.scalar("%s_fraction_of_zero_values" % tag, nn.zero_fraction(value)) summary.histogram("%s_activation" % tag, value) def _dnn_model_fn(features, labels, mode, params, config=None): """Deep Neural Net model_fn. Args: features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`). labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
tensorflow.python.ops.nn.zero_fraction
384
import tensorflow as tf filename = os.path.join(test_dir, "metafile") with self.test_session(): # Creates a graph. v0 = tf.Variable(10.0, name="v0") var = tf.Variable(tf.constant(0, dtype=tf.int64)) count_up_to = var.count_up_to(3) input_queue = tf.FIFOQueue(30, tf.float32, shared_name="collection_queue") qr = tf.train.QueueRunner(input_queue, [count_up_to]) tf.initialize_all_variables() # Creates a saver. save = tf.train.Saver({"v0": v0}) # Adds a set of collections.
tensorflow.FIFOQueue
385
from tensorflow.python.ops import array_ops event_shape = distribution_util.pick_vector( self._event_ndims_is_0, (1,), event_shape) batch_shape = distribution_util.pick_vector( self._batch_ndims_is_0, (1,), batch_shape) new_shape = array_ops.concat(0, ((-1,), batch_shape, event_shape)) x = array_ops.reshape(x, shape=new_shape) x = distribution_util.rotate_transpose(x, shift=-1) return x, sample_shape
tensorflow.python.ops.array_ops.concat
386
import tensorflow as tf elif model_str == 'gcn_vae': opt = OptimizerVAE(preds=model.reconstructions, labels=tf.reshape(tf.sparse_tensor_to_dense(placeholders['adj_orig'], validate_indices=False), [-1]),
tensorflow.sparse_tensor_to_dense
387
import tensorflow as tf hazard_ratio = tf.exp(y_pred) cumsum_hazard_ratio = tf.cumsum(hazard_ratio)
tensorflow.cumsum
388
import tensorflow.contrib.rnn as rnn lstm_cell = rnn.BasicLSTMCell(LSTM_SIZE, forget_bias = 1.0) outputs, _ = rnn.static_rnn(lstm_cell, x, dtype = tf.float32)
tensorflow.contrib.rnn.static_rnn
389
import tensorflow as tf end_loss = focal_loss(tf.nn.softmax(self.logits2, -1), end_label) self.loss = tf.reduce_mean(start_loss + end_loss) self.logger.info("loss type %s" % self.config.loss_type) self.all_params = tf.trainable_variables() if self.config.l2_norm is not None: self.logger.info("applying l2 loss") variables = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) l2_loss = tf.contrib.layers.apply_regularization(regularizer, variables) self.loss += l2_loss if self.config.decay is not None: self.var_ema = tf.train.ExponentialMovingAverage(self.config.decay) ema_op = self.var_ema.apply(tf.trainable_variables()) with tf.control_dependencies([ema_op]): self.loss = tf.identity(self.loss)
tensorflow.contrib.layers.apply_regularization
390
import tensorflow as tf return pred_x, pred_y def gaussian_blur(inputs, inputs_filters, sigma, data_format, name=None): with tf.name_scope(name, "gaussian_blur", [inputs]): data_format_ = 'NHWC' if data_format=='channels_last' else 'NCHW' if data_format_ == 'NHWC':
tensorflow.name_scope
391
from tensorflow.python import debug as tf_debug if FLAGS.use_hvd: hooks.append(hvd.BroadcastGlobalVariablesHook(0)) if hvd.rank() == -1: #if debug, set 0 CLIDebugHook = tf_debug.LocalCLIDebugHook(ui_type='readline') CLIDebugHook.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan) hooks.append(CLIDebugHook) if FLAGS.profile and hvd.rank() == 0:
tensorflow.python.debug.LocalCLIDebugHook
392
import tensorflow as tf for i in range(num_dims): curr_core_shape = (1, shape[i], shape[i], 1) tt_cores[i] = tf.reshape(tf.eye(shape[i], dtype=dtype), curr_core_shape)
tensorflow.eye
393
from tensorflow.python.framework import tensor_shape return [tensor_shape.unknown_shape(ndims=input_shape.ndims - 1)] elif 0 <= dimension and dimension < input_shape.ndims: returned_shape = [] for i, dim in enumerate(input_shape.dims): if i != dimension: returned_shape.append(dim) return [tensor_shape.TensorShape(returned_shape)] else: raise ValueError( "dimension (%d) must be in the range [0, %d), where %d is the number " "of dimensions in the input" % (dimension, input_shape.ndims, input_shape.ndims))
tensorflow.python.framework.tensor_shape.TensorShape
394
from tensorflow.contrib.framework import deprecated_args `values`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. """ is_below_threshold = math_ops.to_float(math_ops.less(values, threshold)) return streaming_mean(is_below_threshold, _mask_weights(ignore_mask, weights), metrics_collections, updates_collections, name or 'percentage_below_threshold') @deprecated_args(IGNORE_MASK_DATE, IGNORE_MASK_INSTRUCTIONS, 'ignore_mask') def streaming_mean_iou(predictions, labels, num_classes, ignore_mask=None, weights=None, metrics_collections=None, updates_collections=None, name=None): """Calculate per-step mean Intersection-Over-Union (mIOU).
tensorflow.contrib.framework.deprecated_args
395
from tensorflow.python.framework import tensor_shape """Shape function for UnsortedSegmentSum.""" data_shape = op.inputs[0].get_shape() segment_ids_shape = op.inputs[1].get_shape() mid = segment_ids_shape.ndims if mid is None: return [tensor_shape.unknown_shape()] else: num_segments = tensor_util.ConstantValue(op.inputs[2]) return [tensor_shape.TensorShape([num_segments]).concatenate( data_shape[mid:])]
tensorflow.python.framework.tensor_shape.unknown_shape
396
from tensorflow.python.saved_model import loader self.session = None self.graph = None self.feed_tensors = None self.fetch_tensors = None def process(self, inputs): # Create a session for every worker only once. The session is not # pickleable, so it can't be created at the DoFn constructor. if not self.session: self.graph = ops.Graph() with self.graph.as_default(): self.session = tf.Session() metagraph_def = loader.load( self.session, {self.meta_tag}, self.model_dir) signature_def = metagraph_def.signature_def[self.meta_signature] # inputs self.feed_tensors = { k: self.graph.get_tensor_by_name(v.name) for k, v in signature_def.inputs.items() } # outputs/predictions self.fetch_tensors = {
tensorflow.python.saved_model.loader.load
397
import tensorflow as tf kernels = [ v for v in trainable_vars if ('weights' in v.name or 'kernel' in v.name) and 'depthwise_weights' not in v.name ] for K in kernels: x = tf.multiply(weight_decay, tf.nn.l2_loss(K)) tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, x) class RestoreMovingAverageHook(tf.train.SessionRunHook): def __init__(self, model_dir): super(RestoreMovingAverageHook, self).__init__()
tensorflow.add_to_collection
398
import tensorflow as tf bounds = [] for v in variables: key = v.name[:v.name.find(':')] lower, upper = constraint[key] bounds.append((lower, upper)) max_steps = 1000 status_every = 1 # Create an optimizer with the desired parameters. opt = tf.contrib.opt.ScipyOptimizerInterface(nll, options={'maxiter': max_steps, # 'disp': True, # 'tol': 1e-20, 'maxls': 10, }, # inequalities=inequalities, # method='SLSQP' # supports inequalities # method='BFGS', bounds=bounds, var_list=variables, # supply with bounds to match order! tol=1e-14,
tensorflow.contrib.opt.ScipyOptimizerInterface
399