seed
stringlengths
59
2.16k
seed_api
stringlengths
14
101
index
int64
0
523
import tensorflow as tf decoded = tf.sparse.SparseTensor(indices[0], values[0], shape[0]) decoded = tf.cast(tf.sparse.to_dense(decoded), tf.int32) decoded_u = tf.sparse.SparseTensor(indices_u[0], values_u[0], shape_u[0]) decoded_u = tf.cast(tf.sparse.to_dense(decoded_u), tf.int32) # Adjust event vals according to representation decoded = tf.where(tf.not_equal(decoded, 0), decoded+shift, decoded) decoded_u = tf.where(tf.not_equal(decoded_u, 0), decoded_u+shift, decoded_u) # Set default vals decoded = tf.where(tf.equal(decoded, 0), def_val, decoded) decoded_u = tf.where(tf.equal(decoded_u, 0), def_val, decoded_u)
tensorflow.not_equal
200
import tensorflow as tf lstm_cell_fw = tf.contrib.rnn.LSTMBlockFusedCell(num_units=num_units, **kwargs) outputs_fw, (hidden_fw, output_fw) = lstm_cell_fw(t, dtype=tf.float32, sequence_length=nwords) if bidirectional: lstm_cell_bw = tf.contrib.rnn.LSTMBlockFusedCell(num_units=num_units, **kwargs) lstm_cell_bw = tf.contrib.rnn.TimeReversedFusedRNN(lstm_cell_bw) outputs_bw, (hidden_bw, output_bw) = lstm_cell_bw(t, dtype=tf.float32, sequence_length=nwords) outputs = tf.concat([outputs_fw, outputs_bw], axis=-1) hidden = tf.concat([hidden_fw, hidden_bw], axis=-1) output = tf.concat([output_fw, output_bw], axis=-1) else: outputs = outputs_fw hidden = hidden_fw output = output_fw outputs = tf.transpose(outputs, perm=[1, 0, 2]) return (outputs, hidden, output)
tensorflow.transpose
201
import tensorflow as tf if self.norm_type == 'layer': norm_net = tf.contrib.layers.layer_norm(net, center=True, scale=True, activation_fn=activation_fn) elif self.norm_type == 'batch':
tensorflow.contrib.layers.layer_norm
202
import tensorflow as tf } placeholders.update({ 'adj_mats_%d,%d,%d' % (i, j, k): tf.sparse_placeholder(tf.float32) for i, j in edge_types for k in range(edge_types[i,j])})
tensorflow.sparse_placeholder
203
from tensorflow.python.ops import variables as vars_ "Got %s." % str(optimizer)) # All trainable variables, if specific variables are not specified. if variables is None: variables = vars_.trainable_variables() # Compute gradients. gradients = opt.compute_gradients(
tensorflow.python.ops.variables.trainable_variables
204
import tensorflow as tf self._cost = tf.reduce_sum(loss) self._final_state = state if not is_training: return self._lr = tf.Variable(0.0, trainable=False) tvars = tf.trainable_variables() grads, _ = tf.clip_by_global_norm(tf.gradients(self._cost, tvars), config.max_grad_norm) optimizer = tf.train.GradientDescentOptimizer(self._lr) self._train_op = optimizer.apply_gradients(
tensorflow.Variable
205
import tensorflow as tf def directional_attention_with_selections( rep_tensor, rep_mask, dep_selection, head_selection, direction=None, hn=None, keep_unselected=True, scope=None, keep_prob=1., is_train=None, wd=0., activation='elu'): bs, sl, vec = tf.shape(rep_tensor)[0], tf.shape(rep_tensor)[1], tf.shape(rep_tensor)[2] org_ivec = rep_tensor.get_shape().as_list()[2] ivec = hn or org_ivec with tf.variable_scope(scope or 'directional_attention_%s' % direction or 'diag'): # non-linear rep_map = bn_dense_layer(rep_tensor, ivec, True, 0., 'bn_dense_map', activation, False, wd, keep_prob, is_train) # ensure the seletion is right dep_selection = tf.logical_and(rep_mask, dep_selection) head_selection = tf.logical_and(rep_mask, head_selection) rep_dep_tensor, rep_dep_mask, dep_org_idx = reduce_data_rep_max_len(rep_map, dep_selection) rep_head_tensor,rep_head_mask, head_org_idx = reduce_data_rep_max_len(rep_map, head_selection) sl_dep, sl_head = tf.shape(rep_dep_tensor)[1], tf.shape(rep_head_tensor)[1] if keep_unselected: unhead_selection = tf.logical_and(rep_mask, tf.logical_not(head_selection)) rep_unhead_tensor, rep_unhead_mask, unhead_org_idx = reduce_data_rep_max_len(rep_map, unhead_selection) sl_unhead = tf.shape(rep_unhead_tensor)[1] attn_result = tf.cond( tf.equal(sl_head, 0), lambda: tf.zeros([bs, 0, hn], tf.float32), lambda: self_attention_for_selected_head(
tensorflow.logical_and
206
import tensorflow as tf self.embeddings, self.inputs, name='word_embeddings', ) # Zero out embeddings of pad value masks = tf.not_equal(self.inputs, pad_value, name='masks') word_embeddings *= tf.cast( tf.expand_dims(masks, axis=-1), # tf.float32, dtype=LayersConfig.tf_dtype, ) sum_word_embeddings = tf.reduce_sum(word_embeddings, axis=1) # Count number of non-padding words in each sentence sentence_lengths = tf.count_nonzero( masks, axis=1, keep_dims=True, # dtype=tf.float32, dtype=LayersConfig.tf_dtype, name='sentence_lengths', ) sentence_embeddings = tf.divide( sum_word_embeddings, sentence_lengths + 1e-8, # Add epsilon to avoid dividing by 0 name='sentence_embeddings')
tensorflow.count_nonzero
207
from tensorflow.python.ops import math_ops def get_eval_ops(self, features, logits, targets, metrics=None): loss = self.loss(logits, targets, features) result = {"loss": metrics_lib.streaming_mean(loss)} # Adds default metrics. if metrics is None: # TODO(b/29366811): This currently results in both an "accuracy" and an # "accuracy/threshold_0.500000_mean" metric for binary classification. metrics = {("accuracy", "classes"): metrics_lib.streaming_accuracy} predictions = math_ops.sigmoid(logits) targets_float = math_ops.to_float(targets) default_metrics = self._default_eval_metrics() for metric_name, metric_op in default_metrics.items(): result[metric_name] = metric_op(predictions, targets_float) class_metrics = {} proba_metrics = {} for name, metric_op in six.iteritems(metrics): if isinstance(name, tuple):
tensorflow.python.ops.math_ops.sigmoid
208
import tensorflow as tf def lstm_network(input, scope='lstm_network'): with tf.variable_scope(scope): # tf.nn.rnn_cell lstm_cell1 = tf.contrib.rnn.BasicLSTMCell(lstm_hidden_size_layer1, forget_bias=1.0) lstm_cell2 = tf.contrib.rnn.BasicLSTMCell(lstm_hidden_size_layer2, forget_bias=1.0) lstm_cells = tf.contrib.rnn.MultiRNNCell(cells=[lstm_cell1, lstm_cell2], state_is_tuple=True) # tf.nn.rnn_cell # lstm_cell1 = tf.nn.rnn_cell.LSTMCell(lstm_hidden_size_layer1, forget_bias=1.0) # lstm_cell2 = tf.nn.rnn_cell.LSTMCell(lstm_hidden_size_layer2, forget_bias=1.0) #lstm_cells = tf.nn.rnn_cell.MultiRNNCell(cells=[lstm_cell1, lstm_cell2], state_is_tuple=True)
tensorflow.contrib.rnn.MultiRNNCell
209
import tensorflow as tf x_ += x * (1. - diag_mask) # Finally, gather everything into a lower triangular matrix. L_ = tf.gather(x_, tril_mask) return [L_, tf.transpose(L_)] tmp = tf.scan(fn, L_flat, initializer=init) if isinstance(tmp, (list, tuple)): # TensorFlow 0.10 now returns a tuple of tensors. L, LT = tmp else:
tensorflow.scan
210
from tensorflow.python.ops import sparse_ops hash_key=layers.SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY) # Check actual hashed output to prevent unintentional hashing changes. expected_out = self._sparse_tensor([[83]]) with self.test_session() as sess: self._assert_sparse_tensor_equals(expected_out, sess.run(op)) def test_hashed_output_v1_has_collision(self): """Tests the old version of the fingerprint concatenation has collisions. """ # The last 10 bits of 359 and 1024+359 are identical. # As a result, all the crosses collide. t1 = constant_op.constant([[359], [359 + 1024]]) t2 = constant_op.constant([list(range(10)), list(range(10))]) cross = sparse_feature_cross_op.sparse_feature_cross( [t2, t1], hashed_output=True, num_buckets=1024) cross_dense = sparse_ops.sparse_tensor_to_dense(cross) with session.Session(): values = cross_dense.eval() self.assertTrue(numpy.equal(values[0], values[1]).all()) def test_hashed_output_v2_has_no_collision(self): """Tests the new version of the fingerprint concatenation has no collisions. """ # Although the last 10 bits of 359 and 1024+359 are identical. # As a result, all the crosses shouldn't collide. t1 = constant_op.constant([[359], [359 + 1024]]) t2 = constant_op.constant([list(range(10)), list(range(10))]) cross = sparse_feature_cross_op.sparse_feature_cross( [t2, t1], hashed_output=True,
tensorflow.python.ops.sparse_ops.sparse_tensor_to_dense
211
from tensorflow.python.framework import sparse_tensor dnn_hidden_units=(3, 3)) input_fn = test_data.iris_input_multiclass_fn metrics = classifier.fit(input_fn=input_fn, steps=_ITERS).evaluate( input_fn=input_fn, steps=100) self._assertCommonMetrics(metrics) def benchmarkPartitionedVariables(self): def _input_fn(): features = { 'language': sparse_tensor.SparseTensor( values=('en', 'fr', 'zh'), indices=((0, 0), (0, 1), (2, 0)), dense_shape=(3, 2)) } labels = constant_op.constant(((1,), (0,), (0,))) return features, labels # The given hash_bucket_size results in variables larger than the # default min_slice_size attribute, so the variables are partitioned. sparse_feature = feature_column.sparse_column_with_hash_bucket( 'language', hash_bucket_size=2e7)
tensorflow.python.framework.sparse_tensor.SparseTensor
212
import tensorflow as tf FLAGS = flags.FLAGS # augmentation functions # augment def random_crop_and_resize(images, ratio=0.8): b, h, w, c = images.get_shape().as_list() ch, cw = map(lambda x: int(x * ratio), (h, w)) crop = tf.random_crop(images, size=[b, ch, cw, 3]) crop = tf.image.resize(crop, [h, w]) return crop def random_apply(fn, image, prob=1.): b, *_ = image.get_shape().as_list() chance = tf.less(tf.random_uniform([b], 0, 1.0), prob) return tf.where(chance, fn(image), tf.identity(image))
tensorflow.random_crop
213
from tensorflow.python.ops import math_ops mean_average_precision: Scalar `float64` `Tensor` with the mean average precision values. update: `Operation` that increments variables appropriately, and whose value matches `metric`. """ default_name = _at_k_name('average_precision', k) with ops.name_scope(name, default_name, (predictions, labels)) as scope: # Calculate per-example average precision, and apply weights. average_precision = sparse_average_precision_at_k( predictions=predictions, labels=labels, k=k) if weights is not None: weights = math_ops.to_double(weights) average_precision = math_ops.mul(average_precision, weights) # Create accumulation variables and update ops for max average precision and # total average precision. with ops.name_scope(None, 'max', (average_precision,)) as max_scope: # `max` is the max possible precision. Since max for any row is 1.0: # - For the unweighted case, this is just the number of rows. # - For the weighted case, it's the sum of the weights broadcast across # `average_precision` rows. max_var = contrib_variables.local_variable(
tensorflow.python.ops.math_ops.to_double
214
from tensorflow.python.framework import ops if tensor_dtype is None: if not inputs or not isinstance(inputs, (list, tuple)): raise ValueError("inputs must be a list of at least one Tensor with the " "same dtype and shape") inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs) if not all(isinstance(x, ops.Tensor) for x in inputs): raise ValueError("inputs must be a list of at least one Tensor with the " "same dtype and shape")
tensorflow.python.framework.ops.convert_n_to_tensor_or_indexed_slices
215
from tensorflow.contrib.cudnn_rnn.python.ops import cudnn_rnn_ops test_configs = self._GetTestConfig() for config_name, config in test_configs.items(): config = test_configs[config_name] num_layers = config["num_layers"] num_units = config["num_units"] batch_size = config["batch_size"] seq_length = config["seq_length"] with ops.Graph().as_default(), ops.device("/device:GPU:0"): model = cudnn_rnn_ops.CudnnLSTM(num_layers, num_units, num_units) params_size_t = model.params_size() input_data = variables.Variable( array_ops.ones([seq_length, batch_size, num_units])) input_h = variables.Variable( array_ops.ones([num_layers, batch_size, num_units])) input_c = variables.Variable( array_ops.ones([num_layers, batch_size, num_units])) params = variables.Variable(
tensorflow.contrib.cudnn_rnn.python.ops.cudnn_rnn_ops.CudnnLSTM
216
import tensorflow as tf Evaluate the quality of the logits at predicting the label ''' correct = tf.equal(tf.arg_max(logits,1), tf.arg_max(labels,1)) correct = tf.cast(correct, tf.int32)
tensorflow.arg_max
217
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined {'TF_CONFIG': json.dumps(tf_config)}): config = run_config.RunConfig() # Because we did not start a distributed cluster, we need to pass an # empty ClusterSpec, otherwise the device_setter will look for # distributed jobs, such as "/job:ps" which are not present. config._cluster_spec = server_lib.ClusterSpec({}) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=(sparse_feature,), dnn_feature_columns=(embedding_feature,), dnn_hidden_units=(3, 3), config=config) metrics = classifier.fit(input_fn=_input_fn, steps=_ITERS).evaluate(
tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined.DNNLinearCombinedClassifier
218
from tensorflow.python.layers import core as core_layers def dropout(self, keep_prob=0.5, input_layer=None): if input_layer is None: input_layer = self.top_layer else: self.top_size = None name = 'dropout' + str(self.counts['dropout']) with tf.variable_scope(name): if not self.phase_train: keep_prob = 1.0 dropout = core_layers.dropout(input_layer, keep_prob) self.top_layer = dropout return dropout def batch_norm(self, input_layer=None, **kwargs): """Adds a Batch Normalization layer.""" if input_layer is None: input_layer = self.top_layer else:
tensorflow.python.layers.core.dropout
219
import tensorflow as tf def build_loss(self): cutoff_vf_manager = tf.reshape(tf.stop_gradient(self.manager_vf), [-1]) dot = tf.reduce_sum(tf.multiply(self.s_diff, self.g), axis=1) gcut = tf.stop_gradient(self.g) mag = tf.norm(self.s_diff, axis=1) * tf.norm(gcut, axis=1) + .0001 dcos = dot / mag manager_loss = -tf.reduce_sum((self.r - cutoff_vf_manager) * dcos)
tensorflow.norm
220
import tensorflow as tf ds = ds.apply( tf.data.experimental.map_and_batch( lambda fname, label: (mapper(tf.read_file(fname)), label), batch_size=batch_size,
tensorflow.read_file
221
import tensorflow.contrib.graph_editor as ge # get all bottlenecks in the graph bottleneck_ts = [] for t in ts: b = set(ge.get_backward_walk_ops(t.op, inclusive=True, within_ops=fwd_ops)) f = set(ge.get_forward_walk_ops(t.op, inclusive=False, within_ops=fwd_ops)) # check that there are not shortcuts b_inp = set([inp for op in b for inp in op.inputs]).intersection(ts_all)
tensorflow.contrib.graph_editor.get_backward_walk_ops
222
from tensorflow.contrib.layers.python.layers import utils # Only make the ops if we know that `is_training=True`, or the value of # `is_training` is unknown. is_training_const = utils.constant_value(is_training) if is_training_const is None or is_training_const: update_mean_op, update_variance_op = utils.smart_cond( is_training, build_update_ops, build_no_ops,
tensorflow.contrib.layers.python.layers.utils.smart_cond
223
from tensorflow.python.ops import array_ops with ops.device(device): return array_ops.unstack(values)
tensorflow.python.ops.array_ops.unstack
224
import tensorflow.contrib.graph_editor as ge scope_name = str(micros) op_list = [] with tf.name_scope(scope_name): yield op_list g = tf.get_default_graph() op_list.extend(ge.select_ops(scope_name+"/.*", graph=g)) def _to_op(tensor_or_op): if hasattr(tensor_or_op, "op"): return tensor_or_op.op return tensor_or_op
tensorflow.contrib.graph_editor.select_ops
225
import tensorflow as tf def func1(): # execute at training time batch_mean, batch_var = tf.nn.moments(x, range(len(shape) - 1)) update_mean = tf.assign_sub(pop_mean, (1 - decay)*(pop_mean - batch_mean)) update_var = tf.assign_sub(pop_var, (1 - decay)*(pop_var - batch_var)) with tf.control_dependencies([update_mean, update_var]): return tf.nn.batch_normalization(x, batch_mean, batch_var, beta, gamma, epsilon)
tensorflow.assign_sub
226
import tensorflow as tf try: t_vars = tf.global_variables()
tensorflow.global_variables
227
import tensorflow as tf #For Imitation Learning Part # self.bc_loss = 0.5 * tf.reduce_mean(tf.contrib.keras.backend.categorical_crossentropy(self.optimal_actions_onehot,self.policy)) # self.next_loc_loss_il = 0.2 * tf.reduce_sum(tf.sqrt(tf.square(self.next_loc_mean[:-1,:] - self.il_nextloc))) # self.imitation_loss = self.bc_loss #+ self.next_loc_loss_il # Get gradients from local network using local losses and # normalize the gradients using clipping local_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope+'/qvalues') self.gradients = tf.gradients(self.loss, local_vars) self.var_norms = tf.global_norm(local_vars) grads, self.grad_norms = tf.clip_by_global_norm(self.gradients, GRAD_CLIP) # Apply local gradients to global network global_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, GLOBAL_NET_SCOPE+'/qvalues') self.apply_grads = trainer.apply_gradients(zip(grads, global_vars)) #now the gradients for imitation loss # self.i_gradients = tf.gradients(self.imitation_loss, local_vars) # self.i_var_norms = tf.global_norm(local_vars)
tensorflow.global_norm
228
from tensorflow.python.client import device_lib learning_starts=50000, learning_freq=4, frame_history_len=4, target_update_freq=10000, grad_norm_clipping=10 ) env.close() def get_available_gpus(): from tensorflow.python.client import device_lib local_device_protos = device_lib.list_local_devices() return [x.physical_device_desc for x in local_device_protos if x.device_type == 'GPU'] def set_global_seeds(i): try: import tensorflow as tf except ImportError: pass else: tf.set_random_seed(i)
tensorflow.python.client.device_lib.list_local_devices
229
from tensorflow.python.ops import array_ops if labels_rank > 1: labels = array_ops.reshape(labels, [-1])
tensorflow.python.ops.array_ops.reshape
230
import tensorflow as tf HAS_MATPLOTLIB = True except ImportError: HAS_MATPLOTLIB = False layers = tf.keras.layers def parse(line): """Parse a line from the colors dataset.""" # Each line of the dataset is comma-separated and formatted as # color_name, r, g, b # so `items` is a list [color_name, r, g, b]. items = tf.string_split([line], ",").values rgb = tf.string_to_number(items[1:], out_type=tf.float32) / 255. # Represent the color name as a one-hot encoded character sequence. color_name = items[0] chars = tf.one_hot(tf.decode_raw(color_name, tf.uint8), depth=256) # The sequence length is needed by our RNN. length = tf.cast(tf.shape(chars)[0], dtype=tf.int64) return rgb, chars, length def maybe_download(filename, work_directory, source_url): """Download the data from source url, unless it's already here. Args: filename: string, name of the file in the directory.
tensorflow.string_to_number
231
from tensorflow.python.framework import ops loss_vec, array_ops.reshape(weight_tensor, shape=(-1,))) return math_ops.div( math_ops.reduce_sum(loss_vec), math_ops.to_float(math_ops.reduce_sum(weight_tensor)), name="loss") def _get_linear_vars(self): if self._get_linear_feature_columns(): return ops.get_collection(self._linear_weight_collection) return [] def _get_linear_training_ops(self, linear_grads, linear_vars): if self._get_linear_feature_columns(): self._linear_optimizer = self._get_optimizer( self._linear_optimizer, default_optimizer="Ftrl",
tensorflow.python.framework.ops.get_collection
232
import tensorflow as tf gtboxes_and_label_q, num_objects, img_h, img_w]) tower_grads = [] biases_regularizer = tf.no_regularizer weights_regularizer = tf.contrib.layers.l2_regularizer(cfgs.WEIGHT_DECAY) with tf.variable_scope(tf.get_variable_scope()): for i in range(num_gpu): with tf.device('/gpu:%d' % i): with tf.name_scope('tower_%d' % i): with slim.arg_scope( [slim.model_variable, slim.variable],
tensorflow.get_variable_scope
233
from tensorflow.contrib.learn.python.learn import ops def test_softmax_classifier(self): with self.cached_session() as session: features = array_ops.placeholder(dtypes.float32, [None, 3]) labels = array_ops.placeholder(dtypes.float32, [None, 2]) weights = constant_op.constant([[0.1, 0.1], [0.1, 0.1], [0.1, 0.1]]) biases = constant_op.constant([0.2, 0.3]) class_weight = constant_op.constant([0.1, 0.9]) prediction, loss = ops.softmax_classifier(features, labels, weights, biases, class_weight) self.assertEqual(prediction.get_shape()[1], 2) self.assertEqual(loss.get_shape(), []) value = session.run(loss, {features: [[0.2, 0.3, 0.2]], labels: [[0, 1]]}) self.assertAllClose(value, 0.55180627) def test_embedding_lookup(self):
tensorflow.contrib.learn.python.learn.ops.softmax_classifier
234
from tensorflow.python.framework import op_def_registry def _get_node_def(op): return op._node_def # pylint: disable=protected-access def _get_op_def(op): # pylint: disable=protected-access if hasattr(op, "_sig"): return getattr(op, "_sig") else: return op_def_registry.get_registered_ops()[op.type] # pylint: enable=protected-access def _is_in_placeholders(op, func_arg_placeholders): return op.values() and (op.values()[0].name in func_arg_placeholders) def _create_input_dict(function_graph, func_arg_placeholders): """Create a mapping from graph tensor names to function tensor names.""" input_dict = {}
tensorflow.python.framework.op_def_registry.get_registered_ops
235
import tensorflow as tf break if not mute: tf.logging.info('Finished evaluation') if max_iterations: pbar.close() # List of dicts to dict of lists metrics = dict(zip(metrics[0], zip(*[m.values() for m in metrics]))) metrics = {m: np.nanmean(metrics[m], axis=0) for m in metrics} return metrics def _checkpoint_var_search(self, checkpoint_path): reader = tf.train.NewCheckpointReader(checkpoint_path) saved_shapes = reader.get_variable_to_shape_map() model_names = tf.model_variables() # Used by tf.slim layers if not len(tf.model_variables()): model_names = tf.global_variables() # Fallback when slim is not used model_names = set([v.name.split(':')[0] for v in model_names]) checkpoint_names = set(saved_shapes.keys()) found_names = model_names & checkpoint_names missing_names = model_names - checkpoint_names shape_conflicts = set() restored = [] with tf.variable_scope('', reuse=True): for name in found_names: # print(tf.global_variables()) # print(name, name in model_names, name in checkpoint_names) var = tf.get_variable(name)
tensorflow.model_variables
236
from tensorflow.python.ops import math_ops ops should be added to. name: An optional variable_scope name. Returns: percentage: A tensor representing the current mean, the value of `total` divided by `count`. update_op: An operation that increments the `total` and `count` variables appropriately. Raises: ValueError: If `ignore_mask` is not `None` and its shape doesn't match `values`, or if `weights` is not `None` and its shape doesn't match `values`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. """ is_below_threshold = math_ops.to_float(math_ops.less(values, threshold)) return streaming_mean(is_below_threshold, _mask_weights(ignore_mask, weights), metrics_collections, updates_collections, name or 'percentage_below_threshold') @deprecated_args(IGNORE_MASK_DATE, IGNORE_MASK_INSTRUCTIONS, 'ignore_mask') def streaming_mean_iou(predictions, labels, num_classes, ignore_mask=None, weights=None, metrics_collections=None, updates_collections=None,
tensorflow.python.ops.math_ops.less
237
from tensorflow.python.ops import variable_scope v = variable_scope.get_variable("v", [options.attention_vec_size]) v = tf.expand_dims(tf.expand_dims(v, axis=0), axis=0) w_c = None if options.use_coverage: with variable_scope.variable_scope("coverage"): w_c = variable_scope.get_variable("w_c", [options.attention_vec_size]) w_c = tf.expand_dims(tf.expand_dims(w_c, axis=0), axis=0) # For each step, dec_input => lstm_output => vocab_score wordidx_t = decoder_inputs[0] # [batch_size] int32
tensorflow.python.ops.variable_scope.get_variable
238
import tensorflow as tf trainnum = tf.placeholder(tf.int32) validnum = tf.placeholder(tf.int32) learnrate = tf.placeholder(tf.float32) def getinputs(path): filename_queue=tf.train.string_input_producer([path]) reader=tf.TFRecordReader() _,serialized_example=reader.read(filename_queue) features=tf.parse_single_example(serialized_example, features={ 'label':tf.FixedLenFeature([], tf.int64), 'img_raw' : tf.FixedLenFeature([], tf.string),
tensorflow.TFRecordReader
239
from tensorflow.python.training import training as train * `learning_rate` and `learning_rate_decay_fn` are supplied, but no `global_step` is available. * `gradients` is empty. """ loss = ops.convert_to_tensor(loss) contrib_framework.assert_scalar(loss) if global_step is None: global_step = train.get_global_step() else: train.assert_global_step(global_step) with vs.variable_scope(name, "OptimizeLoss", [loss, global_step]): # Update ops take UPDATE_OPS collection if not provided. if update_ops is None: update_ops = set(ops.get_collection(ops.GraphKeys.UPDATE_OPS)) # Make sure update ops are ran before computing loss. if update_ops: loss = control_flow_ops.with_dependencies(list(update_ops), loss)
tensorflow.python.training.training.assert_global_step
240
from tensorflow.python.ops import nn which fall into the top `k` predictions. update_op: An operation that increments the `total` and `count` variables appropriately and whose value matches `recall_at_k`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `ignore_mask` is not `None` and its shape doesn't match `predictions`, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. """ in_top_k = math_ops.to_float(nn.in_top_k(predictions, labels, k)) return streaming_mean(in_top_k, _mask_weights(ignore_mask, weights), metrics_collections, updates_collections, name or _at_k_name('recall', k)) # TODO(ptucker): Validate range of values in labels? @deprecated_args(IGNORE_MASK_DATE, IGNORE_MASK_INSTRUCTIONS, 'ignore_mask') def streaming_sparse_recall_at_k(predictions,
tensorflow.python.ops.nn.in_top_k
241
from tensorflow.contrib import framework as contrib_framework supervisor_is_chief=(self._config.task == 0), supervisor_master=self._config.master, feed_fn=feed_fn, max_steps=steps, fail_on_nan_loss=fail_on_nan_loss) def _evaluate_model(self, input_fn, steps, feed_fn=None, metrics=None): if self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset'): return checkpoint_path = saver.latest_checkpoint(self._model_dir) eval_dir = os.path.join(self._model_dir, 'eval') with ops.Graph().as_default() as g: random_seed.set_random_seed(self._config.tf_random_seed) global_step = contrib_framework.create_global_step(g) features, targets = input_fn() self._check_inputs(features, targets) eval_dict = self._get_eval_ops(features, targets, metrics or self._get_default_metric_functions()) eval_results, _ = evaluate( graph=g, output_dir=eval_dir, checkpoint_path=checkpoint_path, eval_dict=eval_dict, global_step_tensor=global_step, supervisor_master=self._config.master, feed_fn=feed_fn, max_steps=steps)
tensorflow.contrib.framework.create_global_step
242
from tensorflow.python.ops import variable_scope as vs _FuncGraph overrides ops.Graph's create_op() so that we can keep track of every inputs into every op created inside the function. If any input is from other graphs, we keep track of it in self.capture and substitue the input with a place holder. Each captured input's corresponding place holder is converted into a function argument and the caller passes in the captured tensor. """ def __init__(self, *args, **kwargs): super(_FuncGraph, self).__init__(*args, **kwargs) self._building_function = True self._outer_graph = ops.get_default_graph() self._vscope = vs.get_variable_scope() self._old_custom_getter = self._vscope.custom_getter self._captured = {} self.extra_inputs = [] self.extra_args = [] self.extra_vars = [] def getvar(self, getter, name, shape=None, dtype=None, initializer=None, trainable=True,
tensorflow.python.ops.variable_scope.get_variable_scope
243
import tensorflow as tf tf.summary.image('Compare/final_detection_gpu:%d' % i, detections_in_img) loss_dict = outputs[-1] total_loss_dict, total_losses = self.loss_dict(loss_dict, num_gpu) if i == num_gpu - 1: regularization_losses = tf.get_collection( tf.GraphKeys.REGULARIZATION_LOSSES) # weight_decay_loss = tf.add_n(slim.losses.get_regularization_losses()) total_losses = total_losses + tf.add_n(regularization_losses) tf.get_variable_scope().reuse_variables()
tensorflow.get_collection
244
from tensorflow.python.platform import googletest # the `y` value at the input and the `y` value at the baseline. expected_val = y_input_val[0] - y_baseline_val[0] # Calculate the integrated gradients attribution of the input. ig = integrated_gradients.IntegratedGradients(graph, sess, y[0], x) mask = ig.GetMask(x_value=x_input_val[0], feed_dict={}, x_baseline=x_baseline_val[0], x_steps=1000) # Verify the result. self.assertAlmostEqual(expected_val, mask.sum(), places=3) if __name__ == '__main__': googletest.main()
tensorflow.python.platform.googletest.main
245
from tensorflow.python.ops import math_ops thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon] (tp, fn, tn, fp, tp_update_op, fn_update_op, tn_update_op, fp_update_op) = _tp_fn_tn_fp(predictions, labels, thresholds, weights) assert array_ops.squeeze(fp).get_shape().as_list()[0] == num_thresholds def compute_sensitivity_at_specificity(name): specificities = math_ops.div(tn, tn + fp + kepsilon) tf_index = math_ops.argmin(math_ops.abs(specificities - specificity), 0) tf_index = math_ops.cast(tf_index, dtypes.int32) # Now, we have the implicit threshold, so compute the sensitivity: return math_ops.div(tp[tf_index], tp[tf_index] + fn[tf_index] + kepsilon,
tensorflow.python.ops.math_ops.div
246
import tensorflow as tf ignored_matches, tf.less(
tensorflow.less
247
import tensorflow as tf sess.run(zero_var.initializer) sess.run(ones_var.initializer) print(sess.run(zero_var)) print(sess.run(ones_var)) zero_similar = tf.Variable(tf.zeros_like(zero_var)) ones_similar = tf.Variable(tf.ones_like(ones_var)) sess.run(ones_similar.initializer) sess.run(zero_similar.initializer) print(sess.run(ones_similar)) print(sess.run(zero_similar)) fill_var = tf.Variable(tf.fill([row_dim, col_dim], -1)) sess.run(fill_var.initializer) print(sess.run(fill_var)) const_var = tf.Variable(tf.constant([8, 6, 7, 5, 3, 0, 9])) const_fill_var = tf.Variable(tf.constant(-1, shape=[row_dim, col_dim])) sess.run(const_var.initializer) sess.run(const_fill_var.initializer) print(sess.run(const_var)) print(sess.run(const_fill_var)) linear_var = tf.Variable(tf.linspace(start=0.0, stop=1.0, num=3)) # Generates [0.0, 0.5, 1.0] includes the end sequence_var = tf.Variable(tf.range(start=6, limit=15, delta=3)) # Generates [6, 9, 12] doesn't include the end
tensorflow.fill
248
from tensorflow.python.layers import convolutional as conv_layers strides = [1, d_height, d_width, 1] if self.data_format == 'NCHW': strides = [strides[0], strides[3], strides[1], strides[2]] if mode != 'SAME_RESNET': conv = conv_layers.conv2d( input_layer, num_out_channels, [k_height, k_width], strides=[d_height, d_width],
tensorflow.python.layers.convolutional.conv2d
249
import tensorflow as tf # Prediction operation prediction = tf.sigmoid(model_output)
tensorflow.sigmoid
250
from tensorflow.python.framework import constant_op class OpsTest(test.TestCase): """Ops tests.""" def test_softmax_classifier(self): with self.cached_session() as session: features = array_ops.placeholder(dtypes.float32, [None, 3]) labels = array_ops.placeholder(dtypes.float32, [None, 2]) weights = constant_op.constant([[0.1, 0.1], [0.1, 0.1], [0.1, 0.1]]) biases = constant_op.constant([0.2, 0.3]) class_weight = constant_op.constant([0.1, 0.9]) prediction, loss = ops.softmax_classifier(features, labels, weights, biases, class_weight) self.assertEqual(prediction.get_shape()[1], 2) self.assertEqual(loss.get_shape(), []) value = session.run(loss, {features: [[0.2, 0.3, 0.2]], labels: [[0, 1]]}) self.assertAllClose(value, 0.55180627)
tensorflow.python.framework.constant_op.constant
251
from tensorflow.python.training import training self._target_column.num_label_columns)], array_ops.reshape(centered_bias, [-1])) return centered_bias def _centered_bias_step(self, targets, features): centered_bias = ops.get_collection(self._centered_bias_weight_collection) batch_size = array_ops.shape(targets)[0] logits = array_ops.reshape( array_ops.tile(centered_bias[0], [batch_size]), [batch_size, self._target_column.num_label_columns]) loss = self._target_column.loss(logits, targets, features) # Learn central bias by an optimizer. 0.1 is a convervative lr for a single # variable. return training.AdagradOptimizer(0.1).minimize(loss, var_list=centered_bias) def _logits(self, features, is_training=False): linear_feature_columns = self._get_linear_feature_columns() dnn_feature_columns = self._get_dnn_feature_columns() if not (linear_feature_columns or dnn_feature_columns): raise ValueError("Either linear_feature_columns or dnn_feature_columns " "should be defined.") if linear_feature_columns and dnn_feature_columns: logits = (self._linear_logits(features, is_training) + self._dnn_logits(features, is_training)) elif dnn_feature_columns:
tensorflow.python.training.training.AdagradOptimizer
252
from tensorflow.python.training import training as train loss = ops.convert_to_tensor(loss) contrib_framework.assert_scalar(loss) if global_step is None: global_step = train.get_global_step() else: train.assert_global_step(global_step)
tensorflow.python.training.training.get_global_step
253
from tensorflow.python.ops import math_ops moving_average_variable, value, decay, zero_debias=False) # quicker adaptation at the beginning if global_step is not None: n = math_ops.cast(global_step, dtypes.float32) decay = math_ops.minimum(decay, n / (n + 1.)) # update averages mean = moving_average("mean", log_norm, decay) sq_mean = moving_average("sq_mean", math_ops.square(log_norm), decay)
tensorflow.python.ops.math_ops.minimum
254
import tensorflow as tf rnn_inputs = tf.nn.bias_add(tf.matmul(feats_all, rnn_proj_w), rnn_proj_b) rnn_inputs = tf.reshape(rnn_inputs, [batch_size, rnn_nunroll, rnn_size]) rnn_inputs = tf.split(rnn_inputs, rnn_nunroll, axis=1) rnn_inputs = [tf.squeeze(input_, [1]) for input_ in rnn_inputs] if rnn_cell_type == 'rnn': cell_fn = tf.nn.rnn_cell.BasicRNNCell
tensorflow.squeeze
255
import tensorflow as tf output, state = update(state, input_, context, input_symbol) output_ = generate(output, input_, context) argmax = lambda: tf.argmax(output_, 1) target = lambda: inputs.read(time + 1) softmax = lambda: tf.squeeze(tf.multinomial(tf.log(tf.nn.softmax(output_)), num_samples=1), axis=1) use_target = tf.logical_and(time < time_steps - 1, tf.random_uniform([]) >= feed_previous) predicted_symbol = tf.case([ (use_target, target), (tf.logical_not(feed_argmax), softmax)], default=argmax) # default case is useful for beam-search predicted_symbol.set_shape([None]) predicted_symbol = tf.stop_gradient(predicted_symbol) input_ = embed(predicted_symbol) pos = update_pos(pos, predicted_symbol, encoder_input_length[align_encoder_id]) samples = samples.write(time, predicted_symbol) attns = attns.write(time, context) weights = weights.write(time, new_weights)
tensorflow.logical_not
256
from tensorflow.contrib.rnn.python.ops import core_rnn multi_cell = rnn_cell.MultiRNNCell( [cell() for _ in range(num_layers)]) outputs, final_state = core_rnn.static_rnn( multi_cell, inputs, dtype=dtypes.float32) trainable_variables = ops.get_collection(
tensorflow.contrib.rnn.python.ops.core_rnn.static_rnn
257
import tensorflow as tf is_dynamic_rnn: Use dynamic_rnn or not. Returns: A tuple containing: - Input tensor of the restored model. - Prediction tensor of the restored model. - Output tensor, which is the softwmax result of the prediction tensor. - new session of the restored model. """ model_dir = tempfile.mkdtemp() saver.save(sess, model_dir) # Reset the graph. tf.reset_default_graph() x, prediction, output_class = self.buildModel(lstm_layer, is_dynamic_rnn) new_sess = tf.compat.v1.Session(config=CONFIG) saver = tf.train.Saver() saver.restore(new_sess, model_dir) return x, prediction, output_class, new_sess def getInferenceResult(self, x, output_class, sess): """Get inference result given input tensor and output tensor. Args: x: The input tensor. output_class: The output tensor. sess: Current session.
tensorflow.reset_default_graph
258
import tensorflow as tf log_timescale_increment = ( math.log(float(max_timescale) / float(min_timescale)) / (tf.to_float(num_timescales) - 1)) inv_timescales = min_timescale * tf.exp( tf.to_float(tf.range(num_timescales)) * -log_timescale_increment) scaled_time = ( tf.expand_dims(tf.to_float(position), 2) * tf.expand_dims( tf.expand_dims(inv_timescales, 0), 0)) signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=2) signal = tf.pad(signal, [[0, 0], [0, 0], [0, tf.mod(channels, 2)]]) return signal def embedding_lookup(input_ids, vocab_size, embedding_size=128,
tensorflow.cos
259
from tensorflow.contrib.opt.python.training import variable_clipping_optimizer with ops.device(device): yield else: yield def _setupDense(self, is_distributed, dtype): with self._maybeWithDevice("/job:ps" if is_distributed else None): var0 = variables.Variable([[0.0, 1.0], [2.0, 3.0]], dtype=dtype) var1 = variables.Variable([4.0, 5.0], dtype=dtype) with self._maybeWithDevice("/job:worker" if is_distributed else None): grads0 = constant_op.constant([[0.1, 0.1], [0.1, 0.1]], dtype=dtype) grads1 = constant_op.constant([0.01, 0.01], dtype=dtype) sgd = gradient_descent.GradientDescentOptimizer(3.0) clip_opt = variable_clipping_optimizer.VariableClippingOptimizer( sgd, {var0: [1]}, 2.0) update_op = clip_opt.apply_gradients( list(zip([grads0, grads1], [var0, var1]))) variables.global_variables_initializer().run() return var0, var1, update_op def _assertDenseCorrect(self, var0, var1, update_op): # Fetch params to validate initial values self.assertAllCloseAccordingToType([[0.0, 1.0], [2.0, 3.0]], var0.eval()) self.assertAllCloseAccordingToType([4.0, 5.0], var1.eval())
tensorflow.contrib.opt.python.training.variable_clipping_optimizer.VariableClippingOptimizer
260
from tensorflow.python.ops import array_ops # Check that we got integer for classification. if not target.dtype.is_integer: raise ValueError("Target's dtype should be integer " "Instead got %s." % target.dtype) # sparse_softmax_cross_entropy_with_logits requires [batch_size] target. if len(target.get_shape()) == 2: target = array_ops.squeeze(target, squeeze_dims=[1]) loss_vec = nn.sparse_softmax_cross_entropy_with_logits( labels=target, logits=logits) return loss_vec
tensorflow.python.ops.array_ops.squeeze
261
from tensorflow.python.training import saver as saver_lib def every_n_step_end(self, step, outputs): super(ValidationMonitor, self).every_n_step_end(step, outputs) # TODO(mdan): The use of step below is probably misleading. # The code should probably use the step from the checkpoint, because # that's what is being evaluated. if self._estimator is None: raise ValueError("Missing call to set_estimator.") # Check that we are not running evaluation on the same checkpoint. latest_path = saver_lib.latest_checkpoint(self._estimator.model_dir) if latest_path is None: logging.debug("Skipping evaluation since model has not been saved yet " "at step %d.", step) return False if latest_path is not None and latest_path == self._latest_path: logging.debug("Skipping evaluation due to same checkpoint %s for step %d " "as for step %d.", latest_path, step, self._latest_path_step)
tensorflow.python.training.saver.latest_checkpoint
262
from tensorflow.python.framework import ops """Moves a list of tensors to a device by concatenating/splitting them.""" # Reset the device setting to avoid weird interactions with device merging # logic. with ops.device(None): if all(tensor.shape == tensor_shape.scalar() for tensor in tensors): with ops.device(tensors[0].device): values = array_ops.stack(tensors) with ops.device(device): return array_ops.unstack(values) else: with ops.device(tensors[0].device): sizes = array_ops.stack( [array_ops.shape(tensor)[0] for tensor in tensors]) values = array_ops.concat(tensors, axis=0) with ops.device(device): sizes = array_ops.unstack(sizes) return list(array_ops.split(values, sizes, axis=0)) def _scheduled_stamp_resource_op_runner(batch, stamp): """Runs a batch operation on a stamped resource.""" if not batch: return arg_keys = set(batch[0].args.keys()) grouped_args = collections.OrderedDict() resource_handles = [] # Check that the set of arguments is the same across all the scheduled ops. for op in batch:
tensorflow.python.framework.ops.device
263
import tensorflow as tf trg_len = tf.shape(attention_weights)[1] src_indices = tf.tile(tf.reshape(tf.range(src_len), shape=[1, 1, src_len]), [batch_size, trg_len, 1]) trg_indices = tf.tile(tf.reshape(tf.range(trg_len), shape=[1, trg_len, 1]), [batch_size, 1, src_len]) source_length = encoder_input_length[0]
tensorflow.range
264
import tensorflow as tf serialized_example, # Defaults are not specified since both keys are required. features={ 'image_raw': tf.FixedLenFeature([], tf.string), 'label': tf.FixedLenFeature([], tf.int64), }) if FLAGS.contrast_norm == 'areafactor': image = tf.decode_raw(features['image_raw'], tf.float32) else: image = tf.decode_raw(features['image_raw'], tf.uint8) image = tf.cast(image, tf.float32) * (1. / 255) image.set_shape(np.prod([FLAGS.num_scales, FLAGS.crop_size, FLAGS.crop_size])) image = tf.reshape(image, [FLAGS.num_scales, FLAGS.crop_size, FLAGS.crop_size, 1]) image = image - 0.5
tensorflow.decode_raw
265
from tensorflow.python.ops import state_ops lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype) mu_t = math_ops.cast(self._mu_t, var.dtype.base_dtype) vstar = self.get_slot(var, "vstar") gold = self.get_slot(var, "gold") # glod is not sparse v_diff = state_ops.assign(vstar, mu_t * (var - vstar), use_locking=self._use_locking) with ops.control_dependencies([v_diff]): # run v_diff operation before scatter_add scaled_grad = scatter_add(vstar, indices, grad) var_update = state_ops.assign_sub(var, lr_t * (scaled_grad + gold)) return control_flow_ops.group(*[var_update, ]) def _apply_sparse(self, grad, var): # sparse grad (only for the shakespeare model) return self._apply_sparse_shared( grad.values, var, grad.indices, lambda x, i, v: state_ops.scatter_add(x, i, v)) def set_params(self, cog, avg_gradient, client): with client.model.graph.as_default():
tensorflow.python.ops.state_ops.assign_sub
266
from tensorflow.python.ops import image_ops from tensorflow.contrib.slim.python.slim.data import tfexample_decoder from tensorflow.python.client import session from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.ops import image_ops from tensorflow.python.ops import io_ops from tensorflow.python.ops import parsing_ops from tensorflow.python.platform import gfile from tensorflow.python.platform import test def _resize_image(image, height, width): image = array_ops.expand_dims(image, 0) image = image_ops.resize_bilinear(image, [height, width]) return array_ops.squeeze(image, [0]) def _create_tfrecord_dataset(tmpdir): if not gfile.Exists(tmpdir): gfile.MakeDirs(tmpdir) data_sources = test_utils.create_tfrecord_files(tmpdir, num_files=1) keys_to_features = { 'image/encoded': parsing_ops.FixedLenFeature(
tensorflow.python.ops.image_ops.resize_bilinear
267
from tensorflow.python.ops import state_ops old_value = array.value() assign_op = state_ops.assign(array, new_value, validate_shape=False)
tensorflow.python.ops.state_ops.assign
268
from tensorflow.python.ops import random_ops def validateKolmogorovSmirnov(self, shape, mean, stddev, minval, maxval, seed=1618): try: import scipy.stats # pylint: disable=g-import-not-at-top tf.set_random_seed(seed) with self.test_session(use_gpu=self._use_gpu): samples = random_ops.parameterized_truncated_normal(shape, mean, stddev, minval, maxval).eval() assert (~np.isnan(samples)).all() minval = max(mean - stddev * 10, minval) maxval = min(mean + stddev * 10, maxval) dist = scipy.stats.norm(loc=mean, scale=stddev) cdf_min = dist.cdf(minval) cdf_max = dist.cdf(maxval) def truncated_cdf(x):
tensorflow.python.ops.random_ops.parameterized_truncated_normal
269
import tensorflow as tf encode = tf.placeholder(tf.int32, shape=[None], name="encode") decode = tf.placeholder(tf.int32, shape=[decode_max_length + 2], name="decode") weight = tf.placeholder(tf.float32, shape=[decode_max_length + 1], name="weight") queue = tf.PaddingFIFOQueue(capacity = capacity, dtypes = [tf.int32, tf.int32, tf.float32], shapes = [[None], [decode_max_length + 2], [decode_max_length + 1]],
tensorflow.PaddingFIFOQueue
270
from tensorflow.python.ops import logging_ops Returns: Numpy array of predicted probabilities. """ return self._infer_model(x=x, input_fn=input_fn, batch_size=batch_size) def _get_train_ops(self, features, targets): """See base class.""" global_step = variables.get_global_step() assert global_step loss = self._loss( self._logits(features), targets, self._get_weight_tensor(features)) logging_ops.scalar_summary("loss", loss) linear_vars = self._get_linear_vars() dnn_vars = self._get_dnn_vars() grads = gradients.gradients(loss, dnn_vars + linear_vars) dnn_grads = grads[0:len(dnn_vars)] linear_grads = grads[len(dnn_vars):] train_ops = self._get_linear_training_ops( linear_grads, linear_vars) + self._get_dnn_training_ops(dnn_grads, dnn_vars)
tensorflow.python.ops.logging_ops.scalar_summary
271
import tensorflow as tf self._on_training_finish(sess) except KeyboardInterrupt: self._on_training_abort(sess) def inference(self, max=10^6): self.fetch_datasets() self.build_ae_model() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) # nut.print_model_info() # nut.list_checkpoint_vars(self.get_latest_checkpoint().replace(EMB_SUFFIX, '')) self.saver = tf.train.Saver() self._restore_model(sess) # nut.print_model_info()
tensorflow.Session
272
import tensorflow as tf block_v_size, block_dim], initializer=tf.uniform_unit_scaling_initializer()) hparams.bottleneck = functools.partial(
tensorflow.uniform_unit_scaling_initializer
273
import tensorflow as tf 'warmup_constant':warmup_constant, } def _norm(x, g=None, b=None, e=1e-5, axis=[1]): u = tf.reduce_mean(x, axis=axis, keep_dims=True) s = tf.reduce_mean(tf.square(x-u), axis=axis, keep_dims=True) x = (x - u) * tf.rsqrt(s + e) if g is not None and b is not None: x = x*g + b return x def norm(x, scope, axis=[-1]):
tensorflow.rsqrt
274
from tensorflow.python.framework import tensor_util """ with self._name_scope(name, values=[x]): def make_dims(start_sum, size, name): """Closure to make dims range.""" start_sum = start_sum if start_sum else ( array_ops.zeros((), dtype=dtypes.int32, name="zero"),) if self._is_all_constant_helper(size, *start_sum): start = sum(tensor_util.constant_value(s) for s in start_sum) stop = start + tensor_util.constant_value(size) return ops.convert_to_tensor( list(range(start, stop)), dtype=dtypes.int32, name=name) else: start = sum(start_sum) return math_ops.range(start, start + size) sample_ndims = self.get_sample_ndims(x, name=name) return (make_dims((), sample_ndims, name="sample_dims"),
tensorflow.python.framework.tensor_util.constant_value
275
import tensorflow as tf "mean", [dim], tf.constant_initializer(0.), trainable=False) step = variable_on_cpu("step", [], tf.constant_initializer(0.), trainable=False) if scale: gamma = variable_on_cpu("gamma", [dim], tf.constant_initializer(1.)) beta = variable_on_cpu("beta", [dim], tf.constant_initializer(0.)) # choose the appropriate moments if train: used_mean, used_var = tf.nn.moments(input_, axes, name="batch_norm") cur_mean, cur_var = used_mean, used_var if bn_lag > 0.: used_mean -= (1. - bn_lag) * (used_mean - tf.stop_gradient(mean)) used_var -= (1 - bn_lag) * (used_var - tf.stop_gradient(var)) used_mean /= (1. - bn_lag**(step + 1)) used_var /= (1. - bn_lag**(step + 1)) else: used_mean, used_var = mean, var cur_mean, cur_var = used_mean, used_var # normalize res = (input_ - used_mean) / tf.sqrt(used_var + epsilon) # de-normalize if scale:
tensorflow.stop_gradient
276
import tensorflow as tf TIMESERIES_INPUT_LAYER = 'rawdata' TIMESERIES_COL = '{}_input'.format(TIMESERIES_INPUT_LAYER) # In each sequence, column index 0 to N_INPUTS - 1 are features, and column index N_INPUTS to SEQ_LEN are labels N_OUTPUTS = 1 N_INPUTS = SEQ_LEN - N_OUTPUTS LSTM_SIZE = 3 # number of hidden layers in each of the LSTM cells # Read data and convert to needed format def read_dataset(filename, mode, batch_size): def _input_fn(): # Provide the ability to decode a CSV def decode_csv(line): # all_data is a list of scalar tensors all_data = tf.decode_csv(line, record_defaults = DEFAULTS) inputs = all_data[:len(all_data) - N_OUTPUTS] # first N_INPUTS values labels = all_data[len(all_data) - N_OUTPUTS:] # last N_OUTPUTS values # Convert each list of rank R tensors to one rank R+1 tensor inputs = tf.stack(inputs, axis = 0) labels = tf.stack(labels, axis = 0) # Convert input R+1 tensor into a feature dictionary of one R+1 tensor features = {TIMESERIES_COL: inputs} return features, labels
tensorflow.decode_csv
277
import tensorflow as tf def conv_3d_op( self, data, weights, strides, symmetric_weights=False, dilations=None): """3D convolutions for hgru.""" if dilations is None: dilations = [1, 1, 1, 1, 1] w_shape = [int(w) for w in weights.get_shape()] if len(w_shape) > 1 and int(w_shape[-2]) > 1: # Full convolutions if symmetric_weights: g = tf.get_default_graph() with g.gradient_override_map({'Conv3D': 'SymmetricConv3D'}): activities = tf.nn.conv3d( data, weights, strides, padding=self.padding) # TODO (jk): removed dilations=dilations to accommodate r1.4 else: activities = tf.nn.conv3d( data, weights, strides, padding=self.padding) # TODO (jk): removed dilations=dilations to accommodate r1.4
tensorflow.get_default_graph
278
import tensorflow as tf argpar = tf.Variable(argpar_num, name="argpar", dtype=tf.float64) m0 = tf.constant(m0_num, name="m0", dtype=tf.float64) vdict['argpar'] = argpar # RooArgusBG argus("argus","Argus PDF",mes,m0,argpar) ; def argus_pdf(m, m0, c, p=0.5): t = m / m0 u = 1 - t * t argus_t_ge_1 = m * tf.pow(u, p) * tf.exp(c * u) return tf.maximum(tf.zeros_like(m), argus_t_ge_1, name="argus_pdf") # // --- Construct signal+background PDF --- # RooRealVar nsig("nsig","#signal events",200,0.,10000) ; # RooRealVar nbkg("nbkg","#background events",800,0.,10000) ; nsig = tf.Variable(200, name="nsig", dtype=tf.float64) nbkg = tf.Variable(800, name="nbkg", dtype=tf.float64)
tensorflow.pow
279
import tensorflow as tf return x assert x.dense_shape is not None, "memory_saving_gradients encountered sparse gradients of unknown shape" indices = x.indices while indices.shape.ndims < x.values.shape.ndims: indices = tf.expand_dims(indices, -1) return tf.scatter_nd(indices, x.values, x.dense_shape) # partial derivatives to xs (usually the params of the neural net) d_xs_new = dv[len(checkpoints_other):] for j in range(len(xs)):
tensorflow.scatter_nd
280
from tensorflow.contrib.learn.python.learn.estimators import run_config self._export_dir_base = tempfile.mkdtemp() + "export/" gfile.MkDir(self._export_dir_base) def testFitAndEvaluateDontThrowException(self): learner_config = learner_pb2.LearnerConfig() learner_config.num_classes = 2 learner_config.constraints.max_tree_depth = 1 model_dir = tempfile.mkdtemp() config = run_config.RunConfig() classifier = estimator.GradientBoostedDecisionTreeClassifier( learner_config=learner_config, num_trees=1, examples_per_layer=3, model_dir=model_dir, config=config,
tensorflow.contrib.learn.python.learn.estimators.run_config.RunConfig
281
from tensorflow.python.ops import variables if device is not None: with ops.device(device): yield else: yield def _setupDense(self, is_distributed, dtype): with self._maybeWithDevice("/job:ps" if is_distributed else None): var0 = variables.Variable([[0.0, 1.0], [2.0, 3.0]], dtype=dtype) var1 = variables.Variable([4.0, 5.0], dtype=dtype) with self._maybeWithDevice("/job:worker" if is_distributed else None): grads0 = constant_op.constant([[0.1, 0.1], [0.1, 0.1]], dtype=dtype) grads1 = constant_op.constant([0.01, 0.01], dtype=dtype) sgd = gradient_descent.GradientDescentOptimizer(3.0) clip_opt = variable_clipping_optimizer.VariableClippingOptimizer( sgd, {var0: [1]}, 2.0)
tensorflow.python.ops.variables.Variable
282
from tensorflow.contrib.learn.python.learn.estimators import test_data 'dummy_sparse_column', hash_bucket_size=100)) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( model_dir=tempfile.mkdtemp(), linear_feature_columns=linear_features, dnn_feature_columns=cont_features, dnn_hidden_units=(3, 3)) metrics = classifier.fit(input_fn=_input_fn, steps=_ITERS).evaluate( input_fn=_input_fn, steps=100) self._assertSingleClassMetrics(metrics) def benchmarkCustomOptimizer(self): iris = test_data.prepare_iris_data_for_logistic_regression() cont_feature = feature_column.real_valued_column('feature', dimension=4) bucketized_feature = feature_column.bucketized_column( cont_feature, test_data.get_quantile_based_buckets(iris.data, 10)) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( model_dir=tempfile.mkdtemp(), linear_feature_columns=(bucketized_feature,), linear_optimizer=ftrl.FtrlOptimizer(learning_rate=0.1), dnn_feature_columns=(cont_feature,), dnn_hidden_units=(3, 3), dnn_optimizer=adagrad.AdagradOptimizer(learning_rate=0.1))
tensorflow.contrib.learn.python.learn.estimators.test_data.prepare_iris_data_for_logistic_regression
283
import tensorflow as tf for i in range(len(self.grads_and_vars)): self.grads.append(self.grads_and_vars[i][0]); self.vars.append(self.grads_and_vars[i][1]); self.grads=self.grads[-1*NUM_VARS:]; self.vars=self.vars[-1*NUM_VARS:]; self.train_op = self.optimizer.apply_gradients( self.grads_and_vars, global_step=tf.contrib.framework.get_global_step()) def predict(self, state, sess=None): sess = sess or tf.get_default_session() state=featurize_state(state); return sess.run(self.action, { self.state: [state] })[0]
tensorflow.contrib.framework.get_global_step
284
import tensorflow as tf scale = tf.constant([2., 3., 4.]) concentration = tf.constant([2.] * batch_size) pareto = tfd.Pareto(concentration, scale, validate_args=True) with self.assertRaisesOpError("not in the support"): x = tf.placeholder_with_default(input=[2., 3., 3.], shape=[3]) log_prob = pareto.log_prob(x) self.evaluate(log_prob) with self.assertRaisesOpError("not in the support"): x = tf.placeholder_with_default(input=[2., 2., 5.], shape=[3]) log_prob = pareto.log_prob(x) self.evaluate(log_prob) with self.assertRaisesOpError("not in the support"): x = tf.placeholder_with_default(input=[1., 3., 5.], shape=[3]) log_prob = pareto.log_prob(x) self.evaluate(log_prob) def testParetoLogPdfMultidimensional(self):
tensorflow.placeholder_with_default
285
import tensorflow as tf # TODO: move to ops def _rank(x): return len(x.get_shape()) def _apply_dropout_mask(tensor_shape, keep_prob=1.0, normalize=True): random_tensor = keep_prob + tf.random_uniform(tensor_shape, dtype=tf.float32) binary_mask = tf.floor(random_tensor) if normalize: binary_mask = tf.reciprocal(keep_prob) * binary_mask return binary_mask def _global_keep_prob(keep_prob): keep_prob = tf.convert_to_tensor(keep_prob, dtype=tf.float32) keep_prob = tf.cond(_phase, lambda: keep_prob, lambda: keep_prob * 0.0 + 1.0) return keep_prob def layer(func):
tensorflow.reciprocal
286
import tensorflow as tf optimizer = tf.train.GradientDescentOptimizer(self._lr) self._train_op = optimizer.apply_gradients( zip(grads, tvars), global_step=tf.contrib.framework.get_or_create_global_step()) self._new_lr = tf.placeholder(
tensorflow.contrib.framework.get_or_create_global_step
287
import tensorflow as tf self.mu = tf.layers.dense(l_a, num_action, tf.nn.tanh, kernel_initializer=w_init, name='mu') # estimated action value self.sigma = tf.layers.dense(l_a, num_action, tf.nn.softplus, kernel_initializer=w_init, name='sigma') # estimated variance # wrap output self.mu = self.mu * action_bound[1]; self.sigma = self.sigma + 1e-4 # get action from distribution self.normal_dist = tf.contrib.distributions.Normal(self.mu, self.sigma) self.action = tf.squeeze(self.normal_dist.sample(1),axis=0); self.action = tf.clip_by_value(self.action, action_bound[0], action_bound[1]) # Loss and train op self.loss = -self.normal_dist.log_prob(self.a_his) * self.target # Add cross entropy cost to encourage exploration self.loss -= entropy_beta * self.normal_dist.entropy()
tensorflow.contrib.distributions.Normal
288
import tensorflow as tf tf.set_random_seed(93820985) p = self._testParams() mdl = p.Instantiate() mdl.FPropDefaultTheta() decoder_theta = mdl._MakeDecoderTheta(theta=mdl.theta, input_batch=None) mdl.BProp() self.assertEqual(decoder_theta, mdl.theta.decoder) def testFProp(self): with self.session(use_gpu=False): tf.set_random_seed(93820985) p = self._testParams() mdl = p.Instantiate() mdl.FPropDefaultTheta() tf.global_variables_initializer().run() test_utils.CompareToGoldenSingleFloat(self, 4.472597, mdl.loss.eval()) actual_var_names = [_.name for _ in tf.all_variables()] print('all vars \n', '\n'.join(actual_var_names)) expected_var_names = [
tensorflow.set_random_seed
289
import tensorflow as tf #print(np.shape(up1)) up2 = common_deconv2d(up1,self.gf*4,name='up2') # 16x16 -> 32x32 up3 = common_deconv2d(up2,self.gf*2,name='up3') # 32x32 -> 64x64 up4 = common_deconv2d(up3,self.gf,name='up4') # 64x64 -> 128x128 out_img = tf.contrib.layers.conv2d_transpose(up4,self.channels,kernel_size=4,stride=2,padding='SAME',activation_fn=tf.nn.tanh) # 128x128 -> 256x256 #print('out_img',(np.shape(out_img))) return out_img
tensorflow.contrib.layers.conv2d_transpose
290
from tensorflow.contrib.metrics.python.ops import metric_ops if weights is None: return None return math_ops.to_float(weights) def _labels_streaming_mean(unused_predictions, labels, weights=None): return metric_ops.streaming_mean(labels, weights=weights) def _predictions_streaming_mean(predictions, unused_labels, weights=None): return metric_ops.streaming_mean(predictions, weights=weights) def _streaming_auc(predictions, labels, weights=None): return metric_ops.streaming_auc( predictions, labels, weights=_float_weights_or_none(weights)) def _accuracy_at_threshold(threshold):
tensorflow.contrib.metrics.python.ops.metric_ops.streaming_mean
291
import tensorflow as tf pred1, pred2 = tf.split(pred, 2, axis=0) tgt1, tgt2 = tf.split(tgt, 2, axis=0) geq = tf.cast((tgt1 - tgt2) > 0, tf.bool) tgt_larg = tf.where(geq, tgt1, tgt2) tgt_small = tf.where(geq, tgt2, tgt1) pred_larg = tf.where(geq, pred1, pred2) pred_small = tf.where(geq, pred2, pred1) loss = tf.maximum(0.0, (tgt_larg - tgt_small) - (pred_larg - pred_small)) loss = tf.reduce_mean(loss) return loss def contra_step_lossV3(pred, tgt, margin=1.0): # Step-wise contrastive loss pred1, pred2 = tf.split(pred, 2, axis=0) tgt1, tgt2 = tf.split(tgt, 2, axis=0) geq = tf.cast((tgt1 - tgt2) > 0, tf.bool)
tensorflow.reduce_mean
292
import tensorflow as tf # Apply the token-preprocessors. if token_preprocess_fns is not None: for token_preprocess_fn in token_preprocess_fns: dataset = token_preprocess_fn(dataset, training) if debug_print_examples: def print_examples_and_shapes(x): if np.random.uniform() < debug_print_examples_rate: tf.print( { 'inputs_shape': tf.size(x['inputs']), 'targets_shape': tf.size(x['targets']), 'inputs': x['inputs'], 'targets': x['targets'], }, output_stream=logging.info) return x dataset = dataset.map(print_examples_and_shapes) return dataset
tensorflow.size
293
from tensorflow.python.layers import pooling as pooling_layers k_height, k_width, d_height=2, d_width=2, mode='VALID', input_layer=None, num_channels_in=None): """Construct an average pooling layer.""" if input_layer is None: input_layer = self.top_layer else: self.top_size = num_channels_in name = 'apool' + str(self.counts['apool']) self.counts['apool'] += 1 pool = pooling_layers.average_pooling2d( input_layer, [k_height, k_width], [d_height, d_width], padding=mode, data_format=self.channel_pos, name=name) self.top_layer = pool return pool def reshape(self, shape, input_layer=None): if input_layer is None: input_layer = self.top_layer self.top_layer = tf.reshape(input_layer, shape) self.top_size = shape[-1] # HACK This may not always work return self.top_layer
tensorflow.python.layers.pooling.average_pooling2d
294
import tensorflow as tf policy = tfp.distributions.MultivariateNormalDiag(mean, tf.exp(logstd)) return NetworkOutput(policy, value, lambda a: tf.clip_by_value(a, -2., 2)) def clip_logits(logits, config): logits_clip = getattr(config, "logits_clip", 0.) if logits_clip > 0: min_logit = tf.reduce_min(logits) return tf.minimum(logits - min_logit, logits_clip) else: return logits @registry.register_model class FeedForwardCategoricalPolicy(PolicyBase): """Feed-forward categorical."""
tensorflow.reduce_min
295
from tensorflow.python.ops import math_ops # "accuracy/threshold_0.500000_mean" metric for binary classification. metrics = {("accuracy", "classes"): metrics_lib.streaming_accuracy} predictions = math_ops.sigmoid(logits) targets_float = math_ops.to_float(targets) default_metrics = self._default_eval_metrics() for metric_name, metric_op in default_metrics.items():
tensorflow.python.ops.math_ops.to_float
296
import tensorflow as tf Omega = tf.square(bounded - 1.0) Omega = tf.reduce_sum(tf.reduce_mean(Omega, axis=1)) / (1.0 * tf.reduce_sum(nelems)) out = tf.gradients(Omega, self.W_rec) out[0] = tf.Print(out[0], [out[0], self.W_rec, Omega], "omega grads") out[0] = tf.verify_tensor_all_finite(out[0], "dead omega grad") return out, test def sussillo_reg(self):
tensorflow.verify_tensor_all_finite
297
from tensorflow.contrib.learn.python.learn import ops self.assertEqual(prediction.get_shape()[1], 2) self.assertEqual(loss.get_shape(), []) value = session.run(loss, {features: [[0.2, 0.3, 0.2]], labels: [[0, 1]]}) self.assertAllClose(value, 0.55180627) def test_embedding_lookup(self): d_embed = 5 n_embed = 10 ids_shape = (2, 3, 4) embeds = np.random.randn(n_embed, d_embed) ids = np.random.randint(0, n_embed, ids_shape) with self.cached_session(): embed_np = embeds[ids] embed_tf = ops.embedding_lookup(embeds, ids).eval() self.assertEqual(embed_np.shape, embed_tf.shape) self.assertAllClose(embed_np, embed_tf) def test_categorical_variable(self): random_seed.set_random_seed(42) with self.cached_session() as sess: cat_var_idx = array_ops.placeholder(dtypes.int64, [2, 2]) embeddings = ops.categorical_variable( cat_var_idx, n_classes=5, embedding_size=10, name="my_cat_var") sess.run(variables.global_variables_initializer()) emb1 = sess.run(embeddings, feed_dict={cat_var_idx.name: [[0, 1], [2, 3]]})
tensorflow.contrib.learn.python.learn.ops.embedding_lookup
298
from tensorflow.contrib.slim.python.slim import queues width = 280 with self.cached_session(): provider = dataset_data_provider.DatasetDataProvider( _create_tfrecord_dataset(dataset_dir)) [image] = provider.get(['image']) [label] = provider.get(['label']) image = _resize_image(image, height, width) with session.Session('') as sess: with queues.QueueRunners(sess): image, label = sess.run([image, label]) self.assertListEqual([height, width, 3], list(image.shape)) self.assertListEqual([1], list(label.shape)) def testConflictingRecordKeyItem(self): dataset_dir = tempfile.mkdtemp(prefix=os.path.join(self.get_temp_dir(), 'tfrecord_dataset')) with self.cached_session():
tensorflow.contrib.slim.python.slim.queues.QueueRunners
299