seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
from tensorflow.python.ops import init_ops return list(zip(clipped_gradients, variables)) def _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name): """Find max_norm given norm and previous average.""" with vs.variable_scope(name, "AdaptiveMaxNorm", [norm]): log_norm = math_ops.log(norm + epsilon) def moving_average(name, value, decay): moving_average_variable = vs.get_variable( name, shape=value.get_shape(), dtype=value.dtype, initializer=init_ops.zeros_initializer(), trainable=False) return moving_averages.assign_moving_average( moving_average_variable, value, decay, zero_debias=False) # quicker adaptation at the beginning if global_step is not None: n = math_ops.cast(global_step, dtypes.float32) decay = math_ops.minimum(decay, n / (n + 1.)) # update averages mean = moving_average("mean", log_norm, decay) sq_mean = moving_average("sq_mean", math_ops.square(log_norm), decay)
tensorflow.python.ops.init_ops.zeros_initializer
3,100
import tensorflow as tf param_noise_filter_func = default_param_noise_filter update_param_noise_threshold_ph = tf.placeholder(tf.float32, (), name="update_param_noise_threshold") update_param_noise_scale_ph = tf.placeholder(tf.bool, (), name="update_param_noise_scale") reset_ph = tf.placeholder(tf.bool, (), name="reset") eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0)) param_noise_scale = tf.get_variable("param_noise_scale", (), initializer=tf.constant_initializer(0.01),
tensorflow.placeholder
3,101
import tensorflow as tf ivec = hn or org_ivec with tf.variable_scope(scope or 'directional_attention_%s' % direction or 'diag'): # non-linear rep_map = bn_dense_layer(rep_tensor, ivec, True, 0., 'bn_dense_map', activation, False, wd, keep_prob, is_train) # ensure the seletion is right dep_selection = tf.logical_and(rep_mask, dep_selection) head_selection = tf.logical_and(rep_mask, head_selection) rep_dep_tensor, rep_dep_mask, dep_org_idx = reduce_data_rep_max_len(rep_map, dep_selection) rep_head_tensor,rep_head_mask, head_org_idx = reduce_data_rep_max_len(rep_map, head_selection) sl_dep, sl_head = tf.shape(rep_dep_tensor)[1], tf.shape(rep_head_tensor)[1] if keep_unselected:
tensorflow.logical_and
3,102
import tensorflow as tf len(eval_examples), num_actual_eval_examples, len(eval_examples) - num_actual_eval_examples) tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
tensorflow.logging.info
3,103
import tensorflow as tf Returns ------- A tensor with the variance of elements of `x`. """ axis = _normalize_axis(axis, get_ndim(x)) if x.dtype.base_dtype == tf.bool: x = tf.cast(x, tf.float32) m = tf.reduce_mean(x, axis=axis, keep_dims=True) devs_squared = tf.square(x - m) return tf.reduce_mean(devs_squared, axis=axis, keep_dims=keepdims) def euclidean_distance(test, support, max_dist_sq=20): """Computes pairwise euclidean distances between provided tensors TODO(rbharath): BROKEN! THIS DOESN'T WORK!
tensorflow.square
3,104
import tensorflow as tf tf.train.import_meta_graph(filename) # Writes wrong contents to the file. tf.train.write_graph(saver.as_saver_def(), os.path.dirname(filename), os.path.basename(filename)) with self.test_session(graph=tf.Graph()): # Import should fail. with self.assertRaisesWithPredicateMatch( IOError, lambda e: "Cannot parse file"): tf.train.import_meta_graph(filename) # Deletes the file gfile.Remove(filename) with self.assertRaisesWithPredicateMatch( IOError, lambda e: "does not exist"): tf.train.import_meta_graph(filename) def testSliceVariable(self):
tensorflow.train.import_meta_graph
3,105
import tensorflow as tf def save_model_params(sess, file_path): # assert file_path.endswith('.npz') param_dict = {v.name: v.eval(sess) for v in tf.global_variables()} np.savez_compressed(file_path, **param_dict) def load_model_params(net, param_dict, sess, ignore_missing=False): for param_name, param_data in param_dict: with tf.variable_scope(param_name, reuse=True): try: var = tf.get_variable(param_name) sess.run(var.assign(param_data)) except ValueError: if not ignore_missing: raise def prepare_model(model_name, use_pretrained, pretrained_model_file_path): kwargs = {'pretrained': use_pretrained} net = get_model(model_name, **kwargs) input_image_size = net.in_size[0] if hasattr(net, 'in_size') else 224
tensorflow.get_variable
3,106
import tensorflow as tf out = tf.reshape(tensor, shape=shape) return out @layer def dense_layer(tensor, hidden_dims, weight=None, bias=None, **opts): original_tensor_shape = tf.shape(tensor) in_dim = int(tensor.get_shape()[-1]) rank = _rank(tensor) if rank > 2: # -- time distributed dense tensor = tf.reshape(tensor, shape=(-1, in_dim)) name = opts.get("name", "") if weight is None: initializer = tf.contrib.layers.xavier_initializer(uniform=True) weight = tf.get_variable("{}_dense_W".format(name), initializer=initializer(shape=(in_dim, hidden_dims))) if bias is None: bias = tf.get_variable("{}_dense_b".format(name), initializer=tf.zeros(shape=hidden_dims)) out = tf.add(tf.matmul(tensor, weight), bias)
tensorflow.reshape
3,107
import tensorflow as tf self.is_training_pl = tf.placeholder(tf.bool, shape=(), name='is_training_pl') self.bn_decay = train_rotation_prediction.get_bn_decay(batch) self.get_pred = partial(self.model_pred.get_model, is_training=self.is_training_pl, bn_decay=self.bn_decay, num_angles=self.num_angles, use_input_trans=self.use_input_trans, use_feature_trans=self.use_feature_trans) self.get_loss = partial(self.model_pred.get_loss, use_trans_loss=self.use_trans_loss) with tf.variable_scope(name): self.noise = tf.placeholder(tf.float32, shape=[self.batch_size, self.noise_dim], name='noise') # Noise vector. self.real_pc = tf.placeholder(tf.float32, shape=[self.batch_size] + self.n_output, name='real_pc') # Ground-truth. with tf.variable_scope('rotation'): self.rot_label_pl = tf.placeholder(tf.int32, shape=self.batch_size, name='rot_label_pl') self.real_pc_rotated = self.rotate_n_angles(self.real_pc, self.rot_label_pl) self.real_pc_pred, real_pc_end_points = self.get_pred(self.real_pc_rotated) self.real_pc_rot_loss = self.get_loss(self.real_pc_pred, self.rot_label_pl, real_pc_end_points) with tf.variable_scope('generator'): self.generator_out = self.generator(self.noise, self.n_output, **gen_kwargs)
tensorflow.variable_scope
3,108
import tensorflow as tf all_features.update(self._context_keys_to_features) all_features.update(self._sequence_keys_to_features) # Reshape non-sparse elements just once: for k, value in all_features.items(): if isinstance(value, tf.FixedLenFeature): example[k] = tf.reshape(example[k], value.shape) if not items: items = self._items_to_handlers.keys() outputs = []
tensorflow.reshape
3,109
import tensorflow as tf tgt_small = tf.where(geq, tgt2, tgt1) pred_larg = tf.where(geq, pred1, pred2) pred_small = tf.where(geq, pred2, pred1) loss = tf.maximum(0.0, ((tgt_larg - tgt_small) - (pred_larg - pred_small))) loss = tf.reduce_mean(loss) return loss
tensorflow.maximum
3,110
import tensorflow as tf def train(): """Train CIFAR-10 for a number of steps.""" with tf.Graph().as_default(), tf.device('/cpu:0'): # Create a variable to count the number of train() calls. This equals the # number of batches processed * FLAGS.num_gpus. global_step = tf.get_variable( 'global_step', [], initializer=tf.constant_initializer(0), trainable=False) # Calculate the learning rate schedule. num_batches_per_epoch = (cifar10.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size) decay_steps = int(num_batches_per_epoch * cifar10.NUM_EPOCHS_PER_DECAY) # Decay the learning rate exponentially based on the number of steps. lr = tf.train.exponential_decay(cifar10.INITIAL_LEARNING_RATE, global_step, decay_steps, cifar10.LEARNING_RATE_DECAY_FACTOR, staircase=True) # Create an optimizer that performs gradient descent. opt = tf.train.GradientDescentOptimizer(lr) # Calculate the gradients for each model tower. tower_grads = [] with tf.variable_scope(tf.get_variable_scope()): for i in xrange(FLAGS.num_gpus): with tf.device('/gpu:%d' % i): with tf.name_scope('%s_%d' % (cifar10.TOWER_NAME, i)) as scope:
tensorflow.train.exponential_decay
3,111
import tensorflow as tf save_path = os.path.join(self.get_temp_dir(), "cache_rereads") # Save and reload one Variable named "var0". self._SaveAndLoad("var0", 0.0, 1.0, save_path) # Save and reload one Variable named "var1" in the same file. # The cached readers should know to re-read the file. self._SaveAndLoad("var1", 1.1, 2.2, save_path) def testGPU(self): if not tf.test.is_built_with_cuda(): return save_path = os.path.join(self.get_temp_dir(), "gpu") with tf.Session("", graph=tf.Graph()) as sess: with sess.graph.device("/gpu:0"): v0_1 = tf.Variable(123.45) save = tf.train.Saver({"v0": v0_1}) tf.initialize_all_variables().run() save.save(sess, save_path) with tf.Session("", graph=tf.Graph()) as sess: with sess.graph.device("/gpu:0"): v0_2 = tf.Variable(543.21) save = tf.train.Saver({"v0": v0_2}) tf.initialize_all_variables().run() self.assertAllClose(543.21, v0_2.eval()) save.restore(sess, save_path) self.assertAllClose(123.45, v0_2.eval()) def testVariables(self): save_path = os.path.join(self.get_temp_dir(), "variables") with tf.Session("", graph=tf.Graph()) as sess:
tensorflow.initialize_all_variables
3,112
import tensorflow as tf return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size]) def get_test_batch(image,label,batch_size): images,labels=tf.train.batch([image,label],batch_size=batch_size) return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size]) def get_valid_batch(image,label,batch_size): images,labels=tf.train.batch([image,label],batch_size=batch_size) return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size]) class trainwork(object): def __init__(self): with tf.variable_scope('scop'): self.w1=tf.get_variable('w1', [4096,2048],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.w2=tf.get_variable('w2', [2048,3072],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.w3=tf.get_variable('w3', [3072,512],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.w4=tf.get_variable('w4', [512,classnum],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.b1 = tf.get_variable('b1', [2048],initializer=tf.constant_initializer(0.0)) self.b2 = tf.get_variable('b2', [3072],initializer=tf.constant_initializer(0.0)) self.b3 = tf.get_variable('b3', [512],initializer=tf.constant_initializer(0.0)) self.b4 = tf.get_variable('b4', [classnum],initializer=tf.constant_initializer(0.0)) def inference(self,images): images=tf.cast(images,tf.float32)/255.0 l1 = tf.matmul(images, self.w1)+self.b1 l1=tf.nn.relu(l1)
tensorflow.contrib.layers.xavier_initializer_conv2d
3,113
import tensorflow as tf n_valid = len(vaY) n_batch_train = n_batch*n_gpu n_updates_total = (n_train//n_batch_train)*n_iter X_train = tf.placeholder(tf.int32, [n_batch_train, 2, n_ctx, 2]) M_train = tf.placeholder(tf.float32, [n_batch_train, 2, n_ctx]) X = tf.placeholder(tf.int32, [None, 2, n_ctx, 2]) M = tf.placeholder(tf.float32, [None, 2, n_ctx])
tensorflow.placeholder
3,114
import tensorflow as tf # In matrix multiplication mode, the block patch should be the same as the kernel size. assert_shape = tf.assert_equal( tf.stack([blk_shape[1], blk_shape[2]]), tf.stack([ksize[0], ksize[1]]), message='Expect blk_indices.shape[1] == w.shape[0] and blk_indices.shape[2] == w.shape[1].')
tensorflow.stack
3,115
import tensorflow as tf return 1 - (tf.to_float(is_training) * dropout_rate) def coarse_to_fine_pruning(self, top_span_emb, top_span_mention_scores, c): k = util.shape(top_span_emb, 0) top_span_range = tf.range(k) # [k] antecedent_offsets = tf.expand_dims(top_span_range, 1) - tf.expand_dims(top_span_range, 0) # [k, k] antecedents_mask = antecedent_offsets >= 1 # [k, k] fast_antecedent_scores = tf.expand_dims(top_span_mention_scores, 1) + tf.expand_dims(top_span_mention_scores, 0) # [k, k]
tensorflow.range
3,116
from tensorflow.python.ops import nn return features return {"": features} def _get_optimizer(optimizer): if callable(optimizer): return optimizer() else: return optimizer def _add_hidden_layer_summary(value, tag): summary.scalar("%s_fraction_of_zero_values" % tag, nn.zero_fraction(value)) summary.histogram("%s_activation" % tag, value) def _dnn_model_fn(features, labels, mode, params, config=None): """Deep Neural Net model_fn. Args: features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`). labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of dtype `int32` or `int64` in the range `[0, n_classes)`. mode: Defines whether this is training, evaluation or prediction.
tensorflow.python.ops.nn.zero_fraction
3,117
import tensorflow as tf logger.error("error msg = {}, error type = {}, error file = {}, error line = {}".format(e, exc_type, fname, exc_tb.tb_lineno)) raise RuntimeError("Error in CIFAR10CNN construction regarding the checkpoints and model directories!") ################### ## build_model() ## ################### def build_model(self): """ Build the custom CNN for the CIFAR-10 dataset. """ # The input data holders (cf. shapes after prepa) self.X = tf.compat.v1.placeholder(tf.float32, shape = (None, self.config.data["image_size"], self.config.data["image_size"], self.config.data["num_channels"]), name="X") # ex. (50000, 32, 32, 3) self.y = tf.compat.v1.placeholder(tf.int32, shape = (None, self.config.data["num_categories"]), name="y") # ex. (50000, 10) self.train = tf.compat.v1.placeholder(tf.bool) # The CNN architecture = conv/poo layers + flatten layer + connected layers with tf.name_scope("cnn"): # a. Create convolution/pooling layers = conv + drop + pool + conv + drop + pool + conv + pool + conv + drop self.conv1 = tf.layers.conv2d(self.X, self.config.cifar10_cnn["num_filters"], self.config.cifar10_cnn["filter_size"],
tensorflow.compat.v1.placeholder
3,118
import tensorflow as tf labels_0_n = isu.map_labels_to_0_to_n(labels) expected_labels_0_n = tf.constant([[-1, 0, 1],
tensorflow.constant
3,119
import tensorflow as tf x = tf.zeros([1000000], dtype=np.float32) y = tf.py_func(lambda x: x + 1, [x], [tf.float32])
tensorflow.py_func
3,120
import tensorflow as tf def __call__(self, x, update=False): with tf.variable_scope(self.name) as scope: if not update: new_coeff = 1. / (self.batch_size + 1.) old_coeff = 1. - new_coeff new_mean = tf.reduce_mean(x, [1, 2], keep_dims=True) new_mean_sq = tf.reduce_mean(tf.square(x), [1, 2], keep_dims=True) mean = new_coeff * new_mean + old_coeff * self.mean mean_sq = new_coeff * new_mean_sq + old_coeff * self.mean_sq out = tf.nn.relu(self._normalize(x, mean, mean_sq, "live")) # Update the mean and mean_sq when passing the reference data else: self.mean = tf.reduce_mean(x, [0, 1, 2], keep_dims=True) self.mean_sq = tf.reduce_mean(tf.square(x), [0, 1, 2], keep_dims=True) out = tf.nn.relu(self._normalize(x, self.mean, self.mean_sq, "reference")) return out def _normalize(self, x, mean, mean_sq, message): # make sure this is called with a variable scope shape = x.get_shape().as_list() assert len(shape) == 4 self.gamma = safe_get("gamma", [shape[-1]], initializer=tf.random_normal_initializer(1., 0.02)) gamma = tf.reshape(self.gamma, [1, 1, 1, -1]) self.beta = safe_get("beta", [shape[-1]], initializer=tf.constant_initializer(0.)) beta = tf.reshape(self.beta, [1, 1, 1, -1])
tensorflow.square
3,121
import tensorflow as tf 'Number of steps between logging results to the console and saving summaries (default: %(default)d)') tf.app.flags.DEFINE_integer('save-model', 1000,
tensorflow.app.flags.DEFINE_integer
3,122
import tensorflow as tf label_ids = tf.reshape(label_ids, [-1]) one_hot_labels = tf.one_hot( label_ids, depth=bert_config.vocab_size, dtype=tf.float32) per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1]) loss = tf.reshape(per_example_loss, [-1, tf.shape(positions)[1]]) # TODO: dynamic gather from per_example_loss return loss
tensorflow.shape
3,123
import tensorflow as tf os.makedirs(d) print("Folder created:", d) with tf.Graph().as_default(): with tf.Session() as sess: # Load the model facenet.load_model(args.model_dir)
tensorflow.Session
3,124
import tensorflow as tf trainable=False) # ------------------------------------------------ # Network loss # ------------------------------------------------ self.predictions, self.states = self.compute_predictions() self.error = self.mean_square_error() self.loss = self.error + self.regularization() # regularized loss function def reg_loss(self): return self.mean_square_error() + self.regularization() # mean squared error def mean_square_error(self): return tf.reduce_mean(tf.square(self.output_mask * (self.predictions - self.y))) # regularizations def regularization(self): reg = 0 # L1 weight regularization reg += self.L1_in * tf.reduce_mean(tf.abs(self.W_in) * self.input_Connectivity) reg += self.L1_rec * tf.reduce_mean(tf.abs(self.W_rec) * self.rec_Connectivity) if self.dale_ratio: reg += self.L1_out * tf.reduce_mean(tf.matmul(tf.abs(self.W_out) * self.output_Connectivity, self.Dale_out)) else: reg += self.L1_out * tf.reduce_mean(tf.abs(self.W_out) * self.output_Connectivity) # L2 weight regularization
tensorflow.square
3,125
import tensorflow as tf self.head_scores = util.projection(context_outputs, 1) # [num_words, 1] span_head_scores = tf.gather(self.head_scores, span_indices) # [k, max_span_width, 1] span_mask = tf.expand_dims(tf.sequence_mask(span_width, self.config["max_span_width"], dtype=tf.float32), 2) # [k, max_span_width, 1] span_head_scores += tf.log(span_mask) # [k, max_span_width, 1] span_attention = tf.nn.softmax(span_head_scores, 1) # [k, max_span_width, 1] span_head_emb = tf.reduce_sum(span_attention * span_text_emb, 1) # [k, emb] span_emb_list.append(span_head_emb) span_emb = tf.concat(span_emb_list, 1) # [k, emb] return span_emb # [k, emb]
tensorflow.reduce_sum
3,126
import tensorflow as tf request.model_spec.signature_name = 'serving_default' # This is correct (default constant). request.inputs['input'].CopyFrom(make_tensor_proto(input_data, shape=input_data.shape)) # Boiler-Plate response = stub.Predict(request, timeout) result = response.outputs['output'] print(tf.make_ndarray(result))
tensorflow.make_ndarray
3,127
import tensorflow as tf inp=[gtboxes_and_label_r, -1], Tout=tf.float32) gtboxes_and_label_r_ = tf.reshape(gtboxes_and_label_r_, [-1, 6])
tensorflow.reshape
3,128
from tensorflow.python.framework import ops import string import requests import io from zipfile import ZipFile from tensorflow.contrib import learn from tensorflow.python.framework import ops ops.reset_default_graph() # Start a graph session sess = tf.Session() # Check if data was downloaded, otherwise download it and save for future use
tensorflow.python.framework.ops.reset_default_graph
3,129
import tensorflow as tf order_m.append(order) return (tf.convert_to_tensor(value=degree_l), tf.convert_to_tensor(value=order_m)) def _evaluate_legendre_polynomial_pmm_eval(m, x): pmm = tf.pow(1.0 - tf.pow(x, 2.0), tf.cast(m, dtype=x.dtype) / 2.0) ones = tf.ones_like(m) pmm *= tf.cast( tf.pow(-ones, m) * double_factorial(2 * m - 1), dtype=pmm.dtype) return pmm
tensorflow.ones_like
3,130
import tensorflow as tf candidate_end_sentence_indices = tf.gather(flattened_sentence_indices, tf.minimum(candidate_ends, num_words - 1)) # [num_words, max_span_width] candidate_mask = tf.logical_and(candidate_ends < num_words, tf.equal(candidate_start_sentence_indices, candidate_end_sentence_indices)) # [num_words, max_span_width] flattened_candidate_mask = tf.reshape(candidate_mask, [-1]) # [num_words * max_span_width] candidate_starts = tf.boolean_mask(tf.reshape(candidate_starts, [-1]), flattened_candidate_mask) # [num_candidates] candidate_ends = tf.boolean_mask(tf.reshape(candidate_ends, [-1]), flattened_candidate_mask) # [num_candidates] candidate_sentence_indices = tf.boolean_mask(tf.reshape(candidate_start_sentence_indices, [-1]), flattened_candidate_mask) # [num_candidates]
tensorflow.reshape
3,131
from tensorflow.python.framework import ops sp_indices = [sp_m.indices for sp_m in sp_matrices] sp_values = [sp_m.values for sp_m in sp_matrices] sp_shape = [sp_m.dense_shape for sp_m in sp_matrices] return self.b_module.bspmdt(sp_ids = sp_indices, sp_values = sp_values, sp_shape = sp_shape, rhs = dense_matrices, adjoint_a = adjoint_a, adjoint_b = adjoint_b) b_module = tf.load_op_library('./batched.so') @ops.RegisterGradient("Bspmdt") def _bspmdt_grad(op, *grad): """Gradients for the dense tensors in the SparseTensorDenseMatMul ops. Args: op: the Bspmdt ops grads: the incoming gradients
tensorflow.python.framework.ops.RegisterGradient
3,132
import tensorflow as tf num_objects = num_objects_batch[i] num_objects = tf.cast(tf.reshape(num_objects, [-1, ]), tf.float32) img_h = img_h_batch[i] img_w = img_w_batch[i] inputs_list.append([img, gtboxes_and_label_h, gtboxes_and_label_r, num_objects, img_h, img_w]) tower_grads = [] biases_regularizer = tf.no_regularizer weights_regularizer = tf.contrib.layers.l2_regularizer(cfgs.WEIGHT_DECAY) with tf.variable_scope(tf.get_variable_scope()): for i in range(num_gpu): with tf.device('/gpu:%d' % i): with tf.name_scope('tower_%d' % i): with slim.arg_scope( [slim.model_variable, slim.variable], device='/device:CPU:0'): with slim.arg_scope([slim.conv2d, slim.conv2d_in_plane, slim.conv2d_transpose, slim.separable_conv2d, slim.fully_connected], weights_regularizer=weights_regularizer, biases_regularizer=biases_regularizer, biases_initializer=tf.constant_initializer(0.0)):
tensorflow.get_variable_scope
3,133
import tensorflow as tf def mlp_dropout(x, hidden_sizes=(32,), activation=tf.tanh, output_activation=None, dropout_rate=0): for h in hidden_sizes[:-1]: x = tf.layers.dense(x, units=h, activation=activation) x = tf.layers.dropout(x, rate=dropout_rate, training=True) x = tf.layers.dropout(x, rate=dropout_rate, training=True) return tf.layers.dense(x, units=hidden_sizes[-1], activation=output_activation) def mlp(x, hidden_sizes=(32,), activation=tf.tanh, output_activation=None): for h in hidden_sizes[:-1]: x = tf.layers.dense(x, units=h, activation=activation) return tf.layers.dense(x, units=hidden_sizes[-1], activation=output_activation) def get_vars(scope): return [x for x in tf.global_variables() if scope in x.name] def count_vars(scope): v = get_vars(scope) return sum([np.prod(var.shape.as_list()) for var in v])
tensorflow.layers.dense
3,134
from tensorflow.python.framework import ops Raises: ValueError: if inputs have invalid dtypes or values. """ if k < 1: raise ValueError('Invalid k=%s.' % k) with ops.name_scope(None, 'num_relevant', (labels,)) as scope: # For SparseTensor, calculate separate count for each row. if isinstance(labels, (ops.SparseTensor, ops.SparseTensorValue)): labels_sizes = set_ops.set_size(labels) return math_ops.minimum(labels_sizes, k, name=scope)
tensorflow.python.framework.ops.name_scope
3,135
import tensorflow as tf """ with tf.name_scope(name): weight_l1_t = tf.convert_to_tensor(weight_l1, dtype=var.dtype.base_dtype, name='weight_l1') weight_l2_t = tf.convert_to_tensor(weight_l2, dtype=var.dtype.base_dtype, name='weight_l2') reg_l1 = tf.multiply(weight_l1_t, tf.reduce_sum(tf.abs(var)), name='value_l1') reg_l2 = tf.multiply(weight_l2_t, tf.nn.l2_loss(var), name='value_l2') return tf.add(reg_l1, reg_l2, name='value') def l1_regularizer(scale, name='l1_regularizer'): """Returns a function that can be used to apply L1 regularization to weights. L1 regularization encourages sparsity.
tensorflow.add
3,136
import tensorflow as tf b = tf.get_variable("b", [n_state], initializer=tf.constant_initializer(0)) return _norm(x, g, b, axis=axis) def dropout(x, pdrop, train): if train and pdrop > 0: x = tf.nn.dropout(x, 1-pdrop) return x def mask_attn_weights(w): n = shape_list(w)[-1] b = tf.matrix_band_part(tf.ones([n, n]), -1, 0) b = tf.reshape(b, [1, 1, n, n]) w = w*b + -1e9*(1-b) return w def _attn(q, k, v, train=False, scale=False): #w=[-1,head,n_ctx,n_ctx] w = tf.matmul(q, k) if scale: n_state = shape_list(v)[-1] w = w*tf.rsqrt(tf.cast(n_state, tf.float32))
tensorflow.reshape
3,137
import tensorflow as tf if self.train_data['ties'] == 'breslow': s = cumsum_hazard_ratio[trisk[-1]] logL += tf.log(s) * d elif self.train_data['ties'] == 'efron': s = cumsum_hazard_ratio[trisk[-1]] r = cumsum_hazard_ratio[tfail[-1]] - (0 if tfail[0] == 0 else cumsum_hazard_ratio[tfail[0]-1]) for j in range(d): logL += tf.log(s - j * r / d) else: raise NotImplementedError('tie breaking method not recognized') # negative average log-likelihood observations = tf.reduce_sum(y_true) return logL / observations def _Metrics_CI(self, label_true, y_pred): """ Compute the concordance-index value. Parameters: label_true: dict, like {'e': event, 't': time}, Observation and Time in survival analyze. y_pred: np.array, predictive proportional risk of network. Returns:
tensorflow.reduce_sum
3,138
from tensorflow.contrib.framework import deprecated_args in_top_k = math_ops.to_float(nn.in_top_k(predictions, labels, k)) return streaming_mean(in_top_k, _mask_weights(ignore_mask, weights), metrics_collections, updates_collections, name or _at_k_name('recall', k)) # TODO(ptucker): Validate range of values in labels? @deprecated_args(IGNORE_MASK_DATE, IGNORE_MASK_INSTRUCTIONS, 'ignore_mask') def streaming_sparse_recall_at_k(predictions, labels, k, class_id=None, ignore_mask=None, weights=None, metrics_collections=None, updates_collections=None,
tensorflow.contrib.framework.deprecated_args
3,139
import tensorflow as tf with tf.Session() as sess: summary_writer = tf.summary.FileWriter(run_log_dir + '_train', sess.graph, flush_secs=5) summary_writer_validation = tf.summary.FileWriter(run_log_dir + '_validate', sess.graph, flush_secs=5) sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) # Training and validation
tensorflow.global_variables_initializer
3,140
import tensorflow as tf "initialize the variables" sess.run(tf.global_variables_initializer()) raw_data_yp = np.insert(raw_data_y,0,0,axis=0)[:-1] val_data_yp = np.insert(val_data_y,0,0,axis=0)[:-1] "see the trainable variables" # print("The trainable variables are:") variable_names = [v.name for v in tf.trainable_variables()] variable_shapes = [v.get_shape() for v in tf.trainable_variables()] parameter_num = 0 for name, shape in zip(variable_names, variable_shapes): # print('{}\nShape: {}'.format(name, shape)) parameter_num += shape[0]*shape[1] if np.size(shape)>1 else shape[0] "train the graph" training_losses = []
tensorflow.trainable_variables
3,141
import tensorflow as tf in_channels = x.get_shape()[-1].value with tf.variable_scope(layer_name):
tensorflow.variable_scope
3,142
import tensorflow as tf def testFProp(self): with self.session(use_gpu=False): tf.set_random_seed(93820985) p = self._testParams() mdl = p.Instantiate() mdl.FPropDefaultTheta() tf.global_variables_initializer().run() test_utils.CompareToGoldenSingleFloat(self, 4.472597, mdl.loss.eval()) actual_var_names = [_.name for _ in tf.all_variables()] print('all vars \n', '\n'.join(actual_var_names)) expected_var_names = [ 'global_step:0', 'test_mdl/enc/conv_L0/w/var:0', 'test_mdl/enc/conv_L0/beta/var:0', 'test_mdl/enc/conv_L0/gamma/var:0', 'test_mdl/enc/conv_L0/moving_mean/var:0', 'test_mdl/enc/conv_L0/moving_variance/var:0', 'test_mdl/enc/conv_L1/w/var:0', 'test_mdl/enc/conv_L1/beta/var:0', 'test_mdl/enc/conv_L1/gamma/var:0', 'test_mdl/enc/conv_L1/moving_mean/var:0',
tensorflow.all_variables
3,143
import tensorflow as tf saver = tf.train.Saver() #-----------------Initialize the cost and gradients--------------------------------------------------------- costs = [] #store the cost for different opochs cost = obj([an,hn], betan, Y, activation) #-----------------Initialize the optimizer----------------------------------------------------------------- # Implement an exponential learning rate decay every 1000 epochs #Implement a dynamical learning rate global_step = tf.Variable(0., trainable=False) rate = tf.train.exponential_decay(starter_learning, global_step, 500, 0.9) #exponential learning rate decay #rate = starter_learning tvars = tf.trainable_variables() #list of trainable variables Npar= flatten(tvars).get_shape()[1] #total number of paramters in the network print('there are:', Npar,'parameters in the network') optimizer = tf.train.AdamOptimizer(learning_rate = rate) #Initialize Adam optimizer grads_var = optimizer.compute_gradients(cost, tvars ) #Get gradients layer by layer. Note that this function returns the pair (grads, var) grads = [grads_var[i][0] for i in range(len(grads_var))] #extract the gradients min = optimizer.apply_gradients(grads_and_vars= grads_var, global_step= global_step) #Apply the gradients to look for critical points gradients_and_par = [] #store gradients and training paramters for different epochs hessians = [] #store the hessian for different epochs residuals= [] #store the value of the residuals for different epochs
tensorflow.trainable_variables
3,144
import tensorflow as tf tmp = np.float32(np.loadtxt('../../Utilities/IRK_weights/Butcher_IRK%d.txt' % (q), ndmin = 2)) weights = np.reshape(tmp[0:q**2+q], (q+1,q)) self.IRK_alpha = weights[0:-1,:] self.IRK_beta = weights[-1:,:] self.IRK_times = tmp[q**2+q:] # tf placeholders and graph self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)) self.x0_tf = tf.placeholder(tf.float32, shape=(None, self.x0.shape[1])) self.x1_tf = tf.placeholder(tf.float32, shape=(None, self.x1.shape[1])) self.u0_tf = tf.placeholder(tf.float32, shape=(None, self.u0.shape[1])) self.u1_tf = tf.placeholder(tf.float32, shape=(None, self.u1.shape[1])) self.dummy_x0_tf = tf.placeholder(tf.float32, shape=(None, self.q)) # dummy variable for fwd_gradients self.dummy_x1_tf = tf.placeholder(tf.float32, shape=(None, self.q)) # dummy variable for fwd_gradients self.U0_pred = self.net_U0(self.x0_tf) # N0 x q self.U1_pred = self.net_U1(self.x1_tf) # N1 x q self.loss = tf.reduce_sum(tf.square(self.u0_tf - self.U0_pred)) + \ tf.reduce_sum(tf.square(self.u1_tf - self.U1_pred)) self.optimizer = tf.contrib.opt.ScipyOptimizerInterface(self.loss, method = 'L-BFGS-B', options = {'maxiter': 50000,
tensorflow.placeholder
3,145
import tensorflow as tf lambda: tf.shape(x))) x = tf.placeholder(dtype=tf.int32, shape=[1]) # None would fire an exception were it actually executed. self.assertFalse(normal._is_scalar_helper(x.get_shape, lambda: None)) self.assertFalse(normal._is_scalar_helper(lambda: tf.TensorShape(None), lambda: tf.shape(x))) # Test case 3. x = tf.placeholder(dtype=tf.int32) is_scalar = normal._is_scalar_helper(x.get_shape, lambda: tf.shape(x)) self.assertTrue(is_scalar.eval(feed_dict={x: 1})) self.assertFalse(is_scalar.eval(feed_dict={x: [1]})) if __name__ == '__main__': tf.test.main()
tensorflow.placeholder
3,146
import tensorflow as tf self.c_len = tf.reduce_sum(tf.cast(self.c_mask, tf.int32), axis=1) self.q_len = tf.reduce_sum(tf.cast(self.q_mask, tf.int32), axis=1) self.dropout = tf.placeholder(tf.float32, name="dropout") self.global_step = tf.Variable(0, name="global_step", trainable=False) """ :descrition: The embedding layer, question and passage share embeddings
tensorflow.Variable
3,147
import tensorflow as tf std = tf.sqrt(var+self.epsilon) return [tf.assign(self.g,1/std),tf.assign(self.b,-1.*mu/std)] require_init = tf.reduce_any(tf.is_nan(self.g)) init_ops = tf.cond(require_init,_init,lambda : [self.g,self.b])
tensorflow.is_nan
3,148
import tensorflow as tf optimizer = tf.train.MomentumOptimizer(lr, momentum=cfgs.MOMENTUM) r3det_gwd = build_whole_network.DetectionNetworkR3DetGWD(cfgs=self.cfgs, is_training=True) with tf.name_scope('get_batch'): if cfgs.IMAGE_PYRAMID: shortside_len_list = tf.constant(cfgs.IMG_SHORT_SIDE_LEN) shortside_len = tf.random_shuffle(shortside_len_list)[0] else: shortside_len = cfgs.IMG_SHORT_SIDE_LEN
tensorflow.constant
3,149
import tensorflow as tf elif self.hparams.loss_func == 'click_weighted_log_loss': self.loss_func = self.click_weighted_log_loss elif self.hparams.loss_func == 'click_weighted_pairwise_loss': self.loss_func = self.click_weighted_pairwise_loss else: # softmax loss without weighting self.loss_func = self.softmax_loss # Compute rank loss reshaped_train_labels = tf.transpose(tf.convert_to_tensor(train_labels)) # reshape from [rank_list_size, ?] to [?, rank_list_size] self.propensity_weights = self.get_normalized_weights(self.logits_to_prob(self.propensity)) self.rank_loss = self.loss_func(train_output, reshaped_train_labels, self.propensity_weights) pw_list = tf.unstack(self.propensity_weights, axis=1) # Compute propensity weights self.click_metrics=self.click_loglikelihood(reshaped_train_labels,\ self.propensity,train_output) tf.summary.scalar('click_metrics',self.click_metrics,collections=['train']) for i in range(len(pw_list)):
tensorflow.convert_to_tensor
3,150
import tensorflow as tf def linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False): shape = input_.get_shape().as_list() with tf.variable_scope(scope or "Linear"): matrix = tf.get_variable("Matrix", [shape[1], output_size], tf.float32, tf.random_normal_initializer(stddev=stddev)) bias = tf.get_variable("bias", [output_size], initializer=tf.constant_initializer(bias_start)) if with_w: return tf.matmul(input_, matrix) + bias, matrix, bias else: return tf.matmul(input_, matrix) + bias
tensorflow.constant_initializer
3,151
import tensorflow as tf "label_ids": tf.FixedLenFeature([], tf.int64), "is_real_example": tf.FixedLenFeature([], tf.int64), } def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.to_int32(t) example[name] = t return example def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. d = tf.data.TFRecordDataset(input_file) if is_training:
tensorflow.to_int32
3,152
import tensorflow as tf import os os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu_list if not tf.gfile.Exists(FLAGS.checkpoint_path): tf.gfile.MkDir(FLAGS.checkpoint_path) else: if not FLAGS.restore:
tensorflow.gfile.MkDir
3,153
import tensorflow as tf out += aux out = activation(out) if dropout > 0: out = tf.layers.dropout(out, rate=dropout, training=training) if sum(dim[2]) > 2: out = deconv2d(out, [2*dim[0], dim[1], dim[2]], scope="%s_conv_out"%scope, training=training, ema=ema, init=init) else:
tensorflow.layers.dropout
3,154
import tensorflow as tf # should we be taking the mean elem-wise by batch? i think this is a big bug avg_neg_log_lhood = tf.reduce_sum(neg_log_lhoods) / tf.reduce_sum(target_weights)
tensorflow.reduce_sum
3,155
import tensorflow as tf inter_block_attn_output = tf.reduce_sum(self_attn_result * inter_block_soft, 2) # bs,bn,vec with tf.variable_scope('self_attn_inter_block'): inter_block_attn_output_mask = tf.cast(tf.ones([bs, bn], tf.int32), tf.bool) block_ct_res = directional_attention_with_dense( inter_block_attn_output, inter_block_attn_output_mask, direction, 'disa', keep_prob, is_train, wd, activation ) # [bs,bn,vec] block_ct_res_tile = tf.tile(tf.expand_dims(block_ct_res, 2), [1, 1, bl, 1])#[bs,bn,vec]->[bs,bn,bl,vec] with tf.variable_scope('combination'): # input:1.rep_map[bs,bn,bl,vec]; 2.self_attn_result[bs,bn,bl,vec]; 3.rnn_res_tile[bs,bn,bl,vec] rep_tensor_with_ct = tf.concat([rep_map, self_attn_result, block_ct_res_tile], -1) # [bs,bn,bl,3vec] new_context_and_gate = linear(rep_tensor_with_ct, 2 * ivec, True, 0., 'linear_new_context_and_gate', False, wd, keep_prob, is_train) # [bs,bn,bl,2vec] new_context, gate = tf.split(new_context_and_gate, 2, 3) # bs,bn,bl,vec if activation == "relu":
tensorflow.expand_dims
3,156
import tensorflow as tf class_predictions_with_background = ops.position_sensitive_crop_regions( class_feature_map, boxes=tf.reshape(proposal_boxes, [-1, self._box_code_size]), box_ind=get_box_indices(proposal_boxes), crop_size=self._crop_size, num_spatial_bins=self._num_spatial_bins, global_pool=True) class_predictions_with_background = tf.squeeze( class_predictions_with_background, squeeze_dims=[1, 2]) class_predictions_with_background = tf.reshape( class_predictions_with_background, [batch_size * num_boxes, 1, total_classes]) return {BOX_ENCODINGS: box_encodings, CLASS_PREDICTIONS_WITH_BACKGROUND: class_predictions_with_background}
tensorflow.reshape
3,157
import tensorflow as tf else self.config['eval_batch_size'] shards = {d: tf.unstack(v, num=batch_size*self.n_gpus, axis=0) for d, v in data.items()} shards = [{d: tf.stack(v[i::self.n_gpus]) for d, v in shards.items()} for i in range(self.n_gpus)]
tensorflow.stack
3,158
import tensorflow as tf Args: params: dict, user passed parameters. Returns: List of tensors from base `Conv2D` layers. """ with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE): # Get conv block layer properties. conv_block = params["discriminator_base_conv_blocks"][0] # The base conv block is always the 0th one. base_conv_layer_block = self.conv_layer_blocks[0]
tensorflow.variable_scope
3,159
import tensorflow as tf model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size, predict_batch_size=FLAGS.predict_batch_size) if FLAGS.do_train: train_file = os.path.join(FLAGS.output_dir, "train.tf_record") file_based_convert_examples_to_features( train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file) tf.logging.info("***** Running training *****") tf.logging.info(" Num examples = %d", len(train_examples)) tf.logging.info(" Batch size = %d", FLAGS.train_batch_size) tf.logging.info(" Num steps = %d", num_train_steps) train_input_fn = file_based_input_fn_builder( input_file=train_file, seq_length=FLAGS.max_seq_length, is_training=True, drop_remainder=True) estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
tensorflow.logging.info
3,160
import tensorflow as tf tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, keep_checkpoint_max=20, tpu_config=tf.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) model_fn = model_fn_builder( bert_config=bert_config, init_checkpoint=FLAGS.init_checkpoint, learning_rate=FLAGS.learning_rate, num_train_steps=FLAGS.num_train_steps,
tensorflow.contrib.tpu.TPUConfig
3,161
import tensorflow as tf def create_int_feature(values): f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) return f features = collections.OrderedDict() features["input_ids"] = create_int_feature(feature.input_ids) features["input_mask"] = create_int_feature(feature.input_mask) features["segment_ids"] = create_int_feature(feature.segment_ids) features["label_ids"] = create_int_feature([feature.label_id]) features["is_real_example"] = create_int_feature( [int(feature.is_real_example)]) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) writer.write(tf_example.SerializeToString()) writer.close() def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" name_to_features = { "input_ids": tf.FixedLenFeature([seq_length], tf.int64), "input_mask": tf.FixedLenFeature([seq_length], tf.int64),
tensorflow.train.Features
3,162
import tensorflow as tf Parameters ---------- test: tf.Tensor Of shape (n_test, n_feat) support: tf.Tensor Of shape (n_support, n_feat) Returns ------- tf.Tensor: Of shape (n_test, n_support) """ rnorm_test = tf.rsqrt( tf.reduce_sum(tf.square(test), 1, keep_dims=True)) + 1e-7 rnorm_support = tf.rsqrt( tf.reduce_sum(tf.square(support), 1, keep_dims=True)) + 1e-7 test_normalized = test * rnorm_test support_normalized = support * rnorm_support # Transpose for mul support_normalized_t = tf.transpose(support_normalized, perm=[1, 0]) g = tf.matmul(test_normalized, support_normalized_t) # Gram matrix return g def elu(x, alpha=1.): """Exponential linear unit. Parameters ----------
tensorflow.square
3,163
import tensorflow as tf # casting to float on GPU ensures lower data transfer times. if lander: obs_t_float = self.obs_t_ph obs_tp1_float = self.obs_tp1_ph else: obs_t_float = tf.cast(self.obs_t_ph, tf.float32) / 255.0 obs_tp1_float = tf.cast(self.obs_tp1_ph, tf.float32) / 255.0 # Here, you should fill in your own code to compute the Bellman error. This requires # evaluating the current and next Q-values and constructing the corresponding error. # TensorFlow will differentiate this error for you, you just need to pass it to the # optimizer. See assignment text for details. # Your code should produce one scalar-valued tensor: total_error
tensorflow.cast
3,164
import tensorflow as tf alphas = tf.transpose(tf.stack(alpha_list), (1, 0, 2)) # (N, T, L)
tensorflow.stack
3,165
import tensorflow as tf if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN:
tensorflow.logging.info
3,166
import tensorflow as tf def get_test_batch(image,label,batch_size): images,labels=tf.train.batch([image,label],batch_size=batch_size) return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size]) def get_valid_batch(image,label,batch_size): images,labels=tf.train.batch([image,label],batch_size=batch_size) return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size]) class trainwork(object): def __init__(self): with tf.variable_scope('scop'): self.w1=tf.get_variable('w1', [4096,2048],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.w2=tf.get_variable('w2', [2048,3072],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.w3=tf.get_variable('w3', [3072,512],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.w4=tf.get_variable('w4', [512,classnum],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.b1 = tf.get_variable('b1', [2048],initializer=tf.constant_initializer(0.0)) self.b2 = tf.get_variable('b2', [3072],initializer=tf.constant_initializer(0.0)) self.b3 = tf.get_variable('b3', [512],initializer=tf.constant_initializer(0.0)) self.b4 = tf.get_variable('b4', [classnum],initializer=tf.constant_initializer(0.0)) def inference(self,images): images=tf.cast(images,tf.float32)/255.0 l1 = tf.matmul(images, self.w1)+self.b1 l1=tf.nn.relu(l1) l2 = tf.matmul(l1, self.w2)+self.b2
tensorflow.contrib.layers.xavier_initializer_conv2d
3,167
import tensorflow as tf :param white: bool :return: N x R or R x N x N """ logger.debug("base conditional") # compute kernel stuff num_func = tf.shape(f)[1] # R Lm = tf.cholesky(Kmm) # Compute the projection matrix A A = tf.matrix_triangular_solve(Lm, Kmn, lower=True)
tensorflow.shape
3,168
import tensorflow as tf from lottery_ticket.foundations import model_fc from lottery_ticket.foundations import paths from lottery_ticket.foundations import pruning from lottery_ticket.foundations import save_restore from lottery_ticket.foundations import trainer from lottery_ticket.foundations.experiment_base import ExperimentBase from lottery_ticket.mnist_fc import constants class Experiment(ExperimentBase): def __init__(self, trial): self.output_dir = paths.trial(paths.experiment(constants.EXPERIMENT_PATH, 'big_two_layer'), trial) def train_once(self, iteration, presets=None, masks=None): tf.reset_default_graph() sess = tf.Session() dataset = dataset_mnist.DatasetMnist( constants.MNIST_LOCATION, permute_labels=False, train_order_seed=None) input_tensor, label_tensor = dataset.placeholders hyperparameters = {'layers': [(1000, tf.nn.relu), (500, tf.nn.relu), (10, None)]} model = model_fc.ModelFc(hyperparameters, input_tensor, label_tensor, presets=presets, masks=masks) params = { 'test_interval': 100, 'save_summaries': True, 'save_network': True, } return trainer.train(
tensorflow.Session
3,169
import tensorflow as tf def batched_matrix_vector_multiply(vector, matrix): """ computes x^T A in mini-batches. """ vector_batch_as_matricies = tf.expand_dims(vector, [1]) mult_result = tf.matmul(vector_batch_as_matricies, matrix) squeezed_result = tf.squeeze(mult_result, [1]) return squeezed_result def euclidean_loss_layer(a, b, multiplier=100.0, use_l1=False, eps=0.01): """ Math: out = (action - mlp_out)'*precision*(action-mlp_out) = (u-uhat)'*A*(u-uhat)""" multiplier = tf.constant(multiplier, dtype='float') #for bc #10000 uP =a*multiplier-b*multiplier if use_l1: return tf.reduce_mean(eps*tf.square(uP) + tf.abs(uP)) return tf.reduce_mean(tf.square(uP)) def conv2d(img, w, b, strides=[1, 1, 1, 1], is_dilated=False): if is_dilated: layer = tf.nn.atrous_conv2d(img, w, rate=2, padding='SAME') + b else: layer = tf.nn.conv2d(img, w, strides=strides, padding='SAME') + b return layer def dropout(layer, keep_prob=0.9, is_training=True, name=None, selu=False): if selu: return dropout_selu(layer, 1.0 - keep_prob, name=name, training=is_training) if is_training: return tf.nn.dropout(layer, keep_prob=keep_prob, name=name) else:
tensorflow.square
3,170
import tensorflow as tf with tf.variable_scope(scope): wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale)) gx = tf.get_variable("gx", [nh*4], initializer=tf.constant_initializer(1.0)) bx = tf.get_variable("bx", [nh*4], initializer=tf.constant_initializer(0.0))
tensorflow.constant_initializer
3,171
from tensorflow.python.framework import ops Args: x: A `Tensor` of type `float` or `double`. name: A name for the operation (optional). Returns: A `Tensor` of same shape and type as `x`. """ x = ops.convert_to_tensor(x, name="x") if x.dtype.is_integer: return x else: return floor(x + 0.5, name=name)
tensorflow.python.framework.ops.convert_to_tensor
3,172
import tensorflow as tf shape=[], dtype=tf.int64, initializer=tf.initializers.zeros(), trainable=False, collections=[tf.GraphKeys.GLOBAL_VARIABLES]) return global_step def get_src_train_op(loss): # pylint: disable=unused-argument """Returns the source training op.""" global_step = tf.train.get_global_step() src_learning_rate = FLAGS.learning_rate src_learning_rate = tf.train.piecewise_constant( global_step, [800,], [FLAGS.learning_rate, FLAGS.learning_rate * 0.1]) optimizer = tf.train.MomentumOptimizer( learning_rate=src_learning_rate, momentum=0.9, use_nesterov=True )
tensorflow.train.get_global_step
3,173
import tensorflow as tf # dtype=tf.float32, # initializer=tf.contrib.layers.xavier_initializer(), # regularizer=tf.contrib.layers.l2_regularizer(self.params["regularizer_rate"]), # trainable=True # ) embeddings = tf.nn.embedding_lookup(embedding_variable, word_ids) return embeddings def dropout_layer(self, data): training = self.mode == tf.estimator.ModeKeys.TRAIN
tensorflow.nn.embedding_lookup
3,174
from tensorflow.contrib.learn.python.learn import session_run_hook monitor_fetches.extend(monitor_requests) if monitor_fetches: request["monitors"] = dict( zip(monitor_fetches, [_as_graph_element(f) for f in monitor_fetches])) return session_run_hook.SessionRunArgs(request) def after_run(self, run_context, run_values): result = run_values.results[ "monitors"] if "monitors" in run_values.results else {}
tensorflow.contrib.learn.python.learn.session_run_hook.SessionRunArgs
3,175
import tensorflow as tf # In case of Bi-RNN, concatenate the forward and the backward RNN outputs. facts = tf.concat(facts, 2) if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) # Trainable parameters mask = tf.equal(mask, tf.ones_like(mask)) facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] query = tf.layers.dense(query, facts_size, activation=None, name='f1_trans_shine' + stag) query = prelu(query) queries = tf.tile(query, [1, tf.shape(facts)[1]]) queries = tf.reshape(queries, tf.shape(facts)) din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = tf.layers.dense(din_all, facts_size, activation=tf.nn.sigmoid, name='f1_shine_att' + stag) d_layer_2_all = tf.layers.dense(d_layer_1_all, facts_size, activation=tf.nn.sigmoid, name='f2_shine_att' + stag) d_layer_2_all = tf.reshape(d_layer_2_all, tf.shape(facts)) output = d_layer_2_all return output
tensorflow.shape
3,176
import tensorflow as tf if reduce_fn is None: scalar = args[i][0] else: scalar = reduce_fn(args[i]) with tf.contrib.summary.record_summaries_every_n_global_steps( 100, global_step=step): tf.contrib.summary.scalar(prefix + name, scalar, step=step)
tensorflow.contrib.summary.record_summaries_every_n_global_steps
3,177
import tensorflow as tf initializer=tf.constant_initializer(0.0)) b = tf.get_variable(name='bias', shape=[out_nodes], initializer=tf.constant_initializer(0.0)) # batch? flat_x = tf.reshape(x, [-1,size]) x = tf.nn.bias_add(tf.matmul(flat_x,w), b) x = tf.nn.relu(x) return x def lstm(): ''' Build LSTM cell '''
tensorflow.nn.relu
3,178
import tensorflow as tf # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. if is_training: d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files)) d = d.repeat() d = d.shuffle(buffer_size=len(input_files))
tensorflow.constant
3,179
import tensorflow as tf return res + bias_term def _clip_and_normalize(word_probs, epsilon): ''' word_probs: 1D tensor of [vsize] ''' word_probs = tf.clip_by_value(word_probs, epsilon, 1.0 - epsilon) return word_probs / tf.reduce_sum(word_probs, axis=-1, keep_dims=True) # scale preds so that the class probas of each sample sum to 1 def CE_loss(word_probs, answers, loss_weights): ''' word_probs: [batch_size, max_dec_steps, vocab] answers: [batch_size, max_dec_steps]
tensorflow.reduce_sum
3,180
import tensorflow as tf pairwise_improvement_bool = tf.cast(pairwise_improvement > 0, pairwise_improvement.dtype) self.pairwise_improvement_bool = pairwise_improvement_bool metrics.append(tf.summary.scalar('training/avg_dist', mean_dist)) metrics.append(tf.summary.scalar('training/pred_dist', mean_pred_error)) metrics.append(tf.summary.scalar('training/improvement', improvement)) metrics.append(tf.summary.scalar('training/improvement_abs', tf.nn.relu(improvement))) metrics.append(tf.summary.histogram('training/improvement_abs_hist', nut.nan_to_zero(improvement)))
tensorflow.summary.scalar
3,181
import tensorflow as tf @dynamic_batching.batch_fn def f(_): return tf.constant(1) output = f(tf.constant([1])) coord = tf.train.Coordinator() tf.train.start_queue_runners(coord=coord) with self.assertRaises(tf.errors.CancelledError): session.run(output) with self.assertRaisesRegexp(tf.errors.InvalidArgumentError, 'Output shape must have a batch dimension'):
tensorflow.train.start_queue_runners
3,182
import tensorflow as tf self.assertAllClose(np.asarray([4.828314, 4.828314]), res) def testModelWithBucketsScopeAndLoss(self): """Test that variable scope reuse is not reset after model_with_buckets.""" classes = 10 buckets = [(4, 4), (8, 8)] with self.test_session(): # Here comes a sample Seq2Seq model using GRU cells. def SampleGRUSeq2Seq(enc_inp, dec_inp, weights, per_example_loss): """Example sequence-to-sequence model that uses GRU cells.""" def GRUSeq2Seq(enc_inp, dec_inp): cell = tf.nn.rnn_cell.MultiRNNCell([tf.nn.rnn_cell.GRUCell(24)] * 2, state_is_tuple=True) return tf.nn.seq2seq.embedding_attention_seq2seq( enc_inp, dec_inp, cell, num_encoder_symbols=classes, num_decoder_symbols=classes, embedding_size=24) targets = [dec_inp[i+1] for i in range(len(dec_inp) - 1)] + [0] return tf.nn.seq2seq.model_with_buckets( enc_inp, dec_inp, targets, weights, buckets, GRUSeq2Seq, per_example_loss=per_example_loss) # Now we construct the copy model. inp = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)]
tensorflow.nn.rnn_cell.GRUCell
3,183
import tensorflow as tf 'predicting a single box per class per location.') batch_size = tf.shape(proposal_boxes)[0] num_boxes = tf.shape(proposal_boxes)[1] def get_box_indices(proposals): proposals_shape = proposals.get_shape().as_list() if any(dim is None for dim in proposals_shape): proposals_shape = tf.shape(proposals) ones_mat = tf.ones(proposals_shape[:2], dtype=tf.int32) multiplier = tf.expand_dims( tf.range(start=0, limit=proposals_shape[0]), 1) return tf.reshape(ones_mat * multiplier, [-1]) net = image_features with slim.arg_scope(self._conv_hyperparams): net = slim.conv2d(net, self._depth, [1, 1], scope='reduce_depth')
tensorflow.ones
3,184
import tensorflow as tf sess_soft = tf.Session(config=config) # 4. 当使用CPU时,TensorFlow默认占据大部分CPU内存。虽然这也是时常期望的,但是我们能谨慎分配GPU内存。当TensorFlow一直不释放GPU内存时,如有必要,我们可以设置GPU内存增长选项让GPU内存分配缓慢增大到最大限制 config.gpu_options.allow_growth = True sess_grow = tf.Session(config=config) # 5. 如果希望限制死TensorFlow使用GPU内存的百分比,可以使用config设置per_process_gpu_memory_fraction config.gpu_options.per_process_gpu_memory_fraction = 0.4 sess_limited = tf.Session(config=config) # 6. 有时,我们希望代码健壮到可以决定运行多少GPU合适。TensorFlow有内建函数可以探测到。如果我们期望代码在GPU内存合适时利用GPU计算能力,并分配指定操作给GPU,那么该功能是有益的 if tf.test.is_built_with_cuda(): pass # 7. 我们希望分配指定操作给GPU。下面是一个示例代码,做了一些简单的计算,并将它们分配给主CPU和两个副GPU with tf.device('/cpu:0'): a = tf.constant([1.0, 3.0, 5.0], shape=[1,3]) b = tf.constant([2.0, 4.0, 6.0], shape=[3, 1]) with tf.device('/gpu:0'): c = tf.matmul(a,b) c = tf.reshape(c, [-1]) with tf.device('/gpu:1'): d = tf.matmul(b, a) flat_d = tf.reshape(d, [-1]) combined = tf.multiply(c, flat_d) print(sess.run(combined))
tensorflow.device
3,185
import tensorflow as tf num_decoder_symbols=5, embedding_size=2, output_projection=(w, b)) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 2), res[0].shape) # Test that previous-feeding model ignores inputs after the first. dec_inp2 = [tf.constant(0, tf.int32, shape=[2]) for _ in range(3)] with tf.variable_scope("other"): d3, _ = tf.nn.seq2seq.embedding_attention_seq2seq( enc_inp, dec_inp2, cell, num_encoder_symbols=2, num_decoder_symbols=5, embedding_size=2, feed_previous=tf.constant(True)) sess.run([tf.global_variables_initializer()]) tf.get_variable_scope().reuse_variables() d1, _ = tf.nn.seq2seq.embedding_attention_seq2seq( enc_inp, dec_inp, cell, num_encoder_symbols=2, num_decoder_symbols=5, embedding_size=2, feed_previous=True) d2, _ = tf.nn.seq2seq.embedding_attention_seq2seq( enc_inp, dec_inp2, cell, num_encoder_symbols=2, num_decoder_symbols=5, embedding_size=2, feed_previous=True) res1 = sess.run(d1) res2 = sess.run(d2) res3 = sess.run(d3)
tensorflow.constant
3,186
import tensorflow as tf last_backward = encoder_outputs_[:, 0, cell_output_size:] indices = tf.stack([tf.range(batch_size), encoder_input_length_ - 1], axis=1) last_forward = tf.gather_nd(encoder_outputs_[:, :, :cell_output_size], indices) last_forward.set_shape([None, cell_output_size]) if encoder.final_state == 'concat_last': # concats last states of all backward layers (full LSTM states) encoder_state_ = tf.concat(encoder_states_, axis=1) elif encoder.final_state == 'average': mask = tf.sequence_mask(encoder_input_length_, maxlen=tf.shape(encoder_outputs_)[1], dtype=tf.float32) mask = tf.expand_dims(mask, axis=2) encoder_state_ = tf.reduce_sum(mask * encoder_outputs_, axis=1) / tf.reduce_sum(mask, axis=1) elif encoder.final_state == 'average_inputs': mask = tf.sequence_mask(encoder_input_length_, maxlen=tf.shape(encoder_inputs_)[1], dtype=tf.float32) mask = tf.expand_dims(mask, axis=2) encoder_state_ = tf.reduce_sum(mask * encoder_inputs_, axis=1) / tf.reduce_sum(mask, axis=1) elif encoder.bidir and encoder.final_state == 'last_both': encoder_state_ = tf.concat([last_forward, last_backward], axis=1)
tensorflow.shape
3,187
import tensorflow as tf with tf.variable_scope("discriminator_z_projection", reuse=tf.AUTO_REUSE) as scope: k1 = tf.get_variable("kernel1", [dim, dim * 4]) k2 = tf.get_variable("kernel2", [dim * 4, dim]) z_proj = tf.matmul(tf.nn.leaky_relu(tf.matmul(latents, k1), name=scope.name), k2) z_proj = z_proj / tf.reshape(tf.norm(z_proj, ord=2, axis=-1), [bs, 1]) return z_proj def create_loss(self, features, labels, params, is_training=True):
tensorflow.norm
3,188
import tensorflow as tf initialization: Orthogonal weights: tf.initializers.orthogonal() Xavier : tf.contrib.layers.xavier_initializer(seed=1) ''' tf.set_random_seed(1) # defines the seed of the random number generator parameters={} L = len(layers) # number of layers in the network first = activation[0] #Activation of the first layer if first == 'esp': train = True init = tf.random_normal_initializer(stddev= stbeta) #init = tf.ones_initializer() else: train= False init = tf.ones_initializer() for l in range(1, L): parameters['w' + str(l)] = tf.get_variable('w' + str(l), [layers[l], layers[l-1]],dtype= 'float64' , initializer= tf.contrib.layers.xavier_initializer(seed=1) ) parameters['b' + str(l)] = tf.get_variable('b' + str(l), [layers[l], 1],dtype= 'float64', initializer = tf.zeros_initializer()) parameters['beta' + str(l)] = tf.get_variable('beta'+ str(l), [layers[l], 1], dtype= 'float64', initializer = init, trainable= train ) assert(parameters['w' + str(l)].shape == (layers[l], layers[l-1]))
tensorflow.random_normal_initializer
3,189
import tensorflow as tf output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.matmul(output_layer, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) probabilities = tf.nn.softmax(logits, axis=-1) log_probs = tf.nn.log_softmax(logits, axis=-1)
tensorflow.nn.dropout
3,190
import tensorflow as tf bounds = [] for v in variables: key = v.name[:v.name.find(':')] lower, upper = constraint[key] bounds.append((lower, upper)) max_steps = 1000 status_every = 1 # Create an optimizer with the desired parameters. opt = tf.contrib.opt.ScipyOptimizerInterface(nll, options={'maxiter': max_steps, # 'disp': True, # 'tol': 1e-20, 'maxls': 10, }, # inequalities=inequalities, # method='SLSQP' # supports inequalities # method='BFGS', bounds=bounds, var_list=variables, # supply with bounds to match order! tol=1e-14,
tensorflow.contrib.opt.ScipyOptimizerInterface
3,191
import tensorflow as tf # bce_loss_list = [] # for pred_ind in list(range(len(pred_outputs))): # bce_loss_list.append(tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=pred_outputs[pred_ind], labels=targets_list[pred_ind]/255., name='loss_{}'.format(pred_ind)), name='loss_mean_{}'.format(pred_ind))) # mse_loss = tf.multiply(params['mse_weight'] / params['num_stacks'], tf.add_n(bce_loss_list), name='mse_loss') # tf.summary.scalar('mse', mse_loss) # tf.losses.add_loss(mse_loss) # Add weight decay to the loss. We exclude the batch norm variables because # doing so leads to a small improvement in accuracy. loss = mse_loss + params['weight_decay'] * tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'batch_normalization' not in v.name]) total_loss = tf.identity(loss, name='total_loss') tf.summary.scalar('loss', total_loss) if mode == tf.estimator.ModeKeys.EVAL: return tf.estimator.EstimatorSpec(mode=mode, loss=loss, predictions=predictions, eval_metric_ops=metrics) if mode == tf.estimator.ModeKeys.TRAIN: global_step = tf.train.get_or_create_global_step()
tensorflow.trainable_variables
3,192
from tensorflow.contrib.learn.python.learn.io import data_feeder def _get_input_fn(x, y, batch_size): # TODO(ipoloshukin): Remove this when refactor of data_feeder is done if hasattr(x, 'create_graph') and hasattr(y, 'create_graph'): def input_fn(): return x.create_graph(), y.create_graph() return input_fn, None df = data_feeder.setup_train_data_feeder(x, y, n_classes=None, batch_size=batch_size) return df.input_builder, df.get_feed_dict_fn() def _get_predict_input_fn(x, batch_size): # TODO(ipoloshukin): Remove this when refactor of data_feeder is done
tensorflow.contrib.learn.python.learn.io.data_feeder.setup_train_data_feeder
3,193
import tensorflow as tf if use_scale: gamma = tf.get_variable('gamma', [channnel], initializer=tf.constant_initializer(1.0)) gamma = tf.reshape(gamma, new_shape) else:
tensorflow.reshape
3,194
import tensorflow as tf """ with tf.variable_scope(scope, reuse=reuse): observations_ph = make_obs_ph("observation") stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic") update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps") eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0)) q_values = q_func(observations_ph.get(), num_actions, scope="q_func") deterministic_actions = tf.argmax(q_values, axis=1) batch_size = tf.shape(observations_ph.get())[0] random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64) chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions) output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions) update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps)) _act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph], outputs=output_actions, givens={update_eps_ph: -1.0, stochastic_ph: True}, updates=[update_eps_expr]) def act(ob, stochastic=True, update_eps=-1): return _act(ob, stochastic, update_eps) return act
tensorflow.stack
3,195
import tensorflow as tf """ with tf.variable_scope('_interpolate'):
tensorflow.variable_scope
3,196
import tensorflow as tf self.features = features self.labels = labels self.mode = mode self.params = params def input_layer(self): # data = np.loadtxt(self.params['vocab'], dtype=np.unicode, encoding=None) data = self.params["vocab_data"] mapping_strings = tf.Variable(data) vocab_words = tf.contrib.lookup.index_table_from_tensor( mapping_strings, num_oov_buckets=1 ) # Word Embeddings words = tf.identity(self.features["words"], name="input_words") word_ids = vocab_words.lookup(words) # # raw_nwords = tf.identity(features['words_len'], name='input_words_len') # nwords = tf.feature_column.input_layer({'words_len': raw_nwords}, params['words_len_feature_columns']) # nwords = tf.reshape(nwords, [-1]) # nwords = tf.to_int32(nwords) # words = features['words'] # words = tf.convert_to_tensor(words) # # nwords = features['words_len'] # nwords = tf.convert_to_tensor(nwords)
tensorflow.identity
3,197
import tensorflow as tf param_eta = tf.placeholder(dtype=tf.float32, shape=[], name="param_eta") param_omega = tf.placeholder(dtype=tf.float32, shape=[], name="param_omega") old_entropy = tf.placeholder(dtype=tf.float32, shape=[], name="old_entropy") varphis = tf.placeholder(dtype=tf.float32, shape=[None, None], name="varphis") Kt = tf.placeholder(dtype=tf.float32, shape=[None, None], name="Kt") prec = tf.placeholder(dtype=tf.float32, shape=[None, None], name="prec") Waa = tf.placeholder(dtype=tf.float32, shape=[None, None], name="Waa") Wsa = tf.placeholder(dtype=tf.float32, shape=[None, None], name="Wsa") wa = tf.placeholder(dtype=tf.float32, shape=[None, None], name="wa") # varphis = ext.new_tensor( # 'varphis', # ndim=2,
tensorflow.placeholder
3,198
import tensorflow as tf alpha_t = tf.where(tf.equal(onehot_labels, 1.0), alpha_t, 1-alpha_t) losses = tf.reduce_sum(-alpha_t * tf.pow(1. - predictions_pt, gamma) * tf.log(predictions_pt+epsilon),
tensorflow.pow
3,199