seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf "logits_ratio":0.5, "logits_decay_rate":0.999, "distillation":['mdd'], "feature_ratio":0.5, "feature_ratio_decay":"constant", "feature_decay_rate":0.999, "kd_type":"kd", "scope":scope }))) # get teacher logits teacher_logit = tf.log(features["label_probs"]+1e-10)/kargs.get("temperature", 2.0) # log_softmax logits student_logit = tf.nn.log_softmax(logits /kargs.get("temperature", 2.0)) # log_softmax logits distillation_features = { "student_logits_tensor":student_logit, "teacher_logits_tensor":teacher_logit, "student_feature_tensor":model.get_pooled_output(), "teacher_feature_tensor":features["distillation_feature"], "student_label":tf.ones_like(label_ids, dtype=tf.int32), "teacher_label":tf.zeros_like(label_ids, dtype=tf.int32),
tensorflow.log
11,100
import tensorflow as tf return h with tf.variable_scope("self_attention"): with tf.variable_scope("f"): f = convolution(
tensorflow.variable_scope
11,101
import tensorflow as tf """ t_rank = tf.rank(t)
tensorflow.rank
11,102
import tensorflow as tf lambda: param_noise_scale.assign(param_noise_scale * 1.01), lambda: param_noise_scale.assign(param_noise_scale / 1.01), ) return update_scale_expr # Functionality to update the threshold for parameter space noise. update_param_noise_thres_expr = param_noise_threshold.assign( tf.cond(update_param_noise_threshold_ph >= 0, lambda: update_param_noise_threshold_ph, lambda: param_noise_threshold)) # Put everything together. perturbed_deterministic_actions = tf.argmax(perturbable_policy.q_values, axis=1) deterministic_actions = tf.argmax(policy.q_values, axis=1) batch_size = tf.shape(policy.obs_ph)[0] n_actions = ac_space.nvec if isinstance(ac_space, MultiDiscrete) else ac_space.n random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=n_actions, dtype=tf.int64) chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps perturbed_stochastic_actions = tf.where(chose_random, random_actions, perturbed_deterministic_actions) stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions) perturbed_output_actions = tf.cond(stochastic_ph, lambda: perturbed_stochastic_actions, lambda: deterministic_actions) output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions) update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps)) updates = [ update_eps_expr,
tensorflow.shape
11,103
import tensorflow as tf label_ids = tf.placeholder(tf.int32, [None], name='label_ids') input_ids = tf.placeholder(tf.int32, [None, FLAGS.max_seq_length], name='input_ids')
tensorflow.placeholder
11,104
import tensorflow as tf import sys import tensorflow as tf import memory_util memory_util.vlog(1) sess = tf.Session() with sess.as_default(): tensor = tf.range(10) print_op = tf.print("tensors:", tensor, {'2': tensor * 2}, output_stream=sys.stderr) with tf.control_dependencies([print_op]): tripled_tensor = tensor * 3 with memory_util.capture_stderr() as stderr: print(sess.run(tripled_tensor)) print(stderr.getvalue())
tensorflow.range
11,105
import tensorflow as tf current_data = provider.rotate_tensor_by_label(current_data, current_label, self.graph) elif self.num_angles == 32: current_data = provider.rotate_tensor_by_label_32(current_data, current_label, self.graph) elif self.num_angles == 54: current_data = provider.rotate_tensor_by_label_54(current_data, current_label, self.graph) elif self.num_angles: #sunflower distribution current_data = provider.rotate_point_by_label_n(current_data, current_label, self.graph, self.num_angles, use_tensor=True) else: raise(NotImplementedError()) current_data = tf.convert_to_tensor(current_data) return current_data
tensorflow.convert_to_tensor
11,106
import tensorflow as tf step=global_step) tf.contrib.summary.scalar( 'fast_rcnn_class_loss', tf.reduce_mean(fast_rcnn_class_loss), step=global_step) tf.contrib.summary.scalar( 'fast_rcnn_box_loss', tf.reduce_mean(fast_rcnn_box_loss), step=global_step) if params['include_mask']: tf.contrib.summary.scalar( 'mask_loss', tf.reduce_mean(mask_loss), step=global_step)
tensorflow.reduce_mean
11,107
import tensorflow as tf self.assertAllEqual(augmented_tensor_dict[fields.InputDataFields.image], (np_image + 5) * 2) class PadInputDataToStaticShapesFnTest(test_case.TestCase): def test_pad_images_boxes_and_classes(self): input_tensor_dict = { fields.InputDataFields.image: tf.placeholder(tf.float32, [None, None, 3]), fields.InputDataFields.groundtruth_boxes: tf.placeholder(tf.float32, [None, 4]), fields.InputDataFields.groundtruth_classes: tf.placeholder(tf.int32, [None, 3]), fields.InputDataFields.true_image_shape: tf.placeholder(tf.int32, [3]), fields.InputDataFields.original_image_spatial_shape: tf.placeholder(tf.int32, [2]) } padded_tensor_dict = inputs.pad_input_data_to_static_shapes( tensor_dict=input_tensor_dict, max_num_boxes=3, num_classes=3, spatial_image_shape=[5, 6]) self.assertAllEqual( padded_tensor_dict[fields.InputDataFields.image].shape.as_list(), [5, 6, 3]) self.assertAllEqual( padded_tensor_dict[fields.InputDataFields.true_image_shape]
tensorflow.placeholder
11,108
import tensorflow as tf """ Various utility functions. """ import numpy as np import tensorflow as tf def batch_to_seq(h, nbatch, nsteps, flat=False): if flat: h = tf.reshape(h, [nbatch, nsteps]) else: h = tf.reshape(h, [nbatch, nsteps, -1]) return [tf.squeeze(v, [1]) for v in tf.split(axis=1, num_or_size_splits=nsteps, value=h)] def seq_to_batch(h, flat = False): shape = h[0].get_shape().as_list() if not flat: assert(len(shape) > 1) nh = h[0].get_shape()[-1].value return tf.reshape(tf.concat(axis=1, values=h), [-1, nh]) else: return tf.reshape(tf.stack(values=h, axis=1), [-1])
tensorflow.squeeze
11,109
from tensorflow.python.ops import variable_scope weight_collections=[parent_scope], scope=scope) hidden_layer_partitioner = ( partitioned_variables.min_max_variable_partitioner( max_partitions=num_ps_replicas)) for layer_id, num_hidden_units in enumerate(hidden_units): with variable_scope.variable_scope( parent_scope + "/hiddenlayer_%d" % layer_id, values=[net], partitioner=hidden_layer_partitioner) as scope: net = layers.fully_connected( net, num_hidden_units,
tensorflow.python.ops.variable_scope.variable_scope
11,110
from tensorflow.python.framework import tensor_util """Shape function for the DepthwiseConv2dNativeBackpropFilter op.""" filter_shape = tensor_util.constant_value(op.inputs[1]) if filter_shape is not None: return [tensor_shape.TensorShape(filter_shape.tolist())] else: return [tensor_shape.unknown_shape(ndims=4)] @ops.RegisterShape("DepthwiseConv2dNativeBackpropInput") def _DepthwiseConv2dNativeBackpropInputShape(op): """Shape function for the DepthwiseConv2dNativeBackpropInput op.""" input_shape = tensor_util.constant_value(op.inputs[0]) if input_shape is not None: return [tensor_shape.TensorShape(input_shape.tolist())] else: return [tensor_shape.unknown_shape(ndims=4)] @ops.RegisterShape("MaxPoolGrad") @ops.RegisterShape("MaxPoolGradWithArgmax") def _MaxPoolGradShape(op): """Shape function for the MaxPoolGrad op."""
tensorflow.python.framework.tensor_util.constant_value
11,111
import tensorflow as tf "output_weights", shape=[2, bert_config.hidden_size], initializer=modeling.create_initializer(bert_config.initializer_range)) output_bias = tf.get_variable( "output_bias", shape=[2], initializer=tf.zeros_initializer()) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) labels = tf.reshape(labels, [-1]) one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, log_probs)
tensorflow.nn.log_softmax
11,112
import tensorflow as tf bn_decay: float or float tensor variable in [0,1] is_training: bool Tensor variable Returns: Variable tensor """ with tf.variable_scope(scope) as sc: num_in_channels = inputs.get_shape()[-1].value kernel_shape = [kernel_size, num_in_channels, num_output_channels] kernel = _variable_with_weight_decay('weights', shape=kernel_shape,
tensorflow.variable_scope
11,113
import tensorflow as tf scores = tf.where(key_masks, scores, paddings) # [B, 1, T] # Scale # scores = scores / (facts.get_shape().as_list()[-1] ** 0.5) # Activation if softmax_stag: scores = tf.nn.softmax(scores) # [B, 1, T] # Weighted sum if mode == 'SUM': output = tf.matmul(scores, facts) # [B, 1, H] # output = tf.reshape(output, [-1, tf.shape(facts)[-1]]) else:
tensorflow.nn.softmax
11,114
import tensorflow as tf with tf.variable_scope(scope): wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale)) gx = tf.get_variable("gx", [nh*4], initializer=tf.constant_initializer(1.0)) bx = tf.get_variable("bx", [nh*4], initializer=tf.constant_initializer(0.0))
tensorflow.constant_initializer
11,115
import tensorflow as tf Args: shape: array representing the shape of the future tensor dtype: [tf.float32] dtype of the resulting tensor. name: string, name of the Op. Returns: TensorTrain object containing a TT-tensor """ shape = np.array(shape) _validate_input_parameters(is_tensor=True, shape=shape) num_dims = shape.size tt_rank = np.ones(num_dims + 1) with tf.name_scope(name): tt_cores = num_dims * [None] for i in range(num_dims): curr_core_shape = (1, shape[i], 1) tt_cores[i] = tf.ones(curr_core_shape, dtype=dtype) return TensorTrain(tt_cores, shape, tt_rank) def tensor_zeros(shape, dtype=tf.float32, name='t3f_tensor_zeros'): """Generate TT-tensor of the given shape with all entries equal to 0. Args: shape: array representing the shape of the future tensor dtype: [tf.float32] dtype of the resulting tensor.
tensorflow.name_scope
11,116
import tensorflow as tf X = self._do_separable_conv(X, w, h, ch, filter_size=filter_size, stride=stack_stride, W_d=W_d, W_p=W_p, no_batch_norm=True) X = self._add_batch_norm(X, ch, offset=batch_norm_offset, scale=batch_norm_scale, no_moving_average=is_dynamic, is_train=is_train) X = tf.reshape(X, (-1, w // stride, h // stride, ch)) # Sanity shape check return X #################################### # Utils #################################### def _do_cutout(self, image, im_width, im_height, cutout_size): mask = tf.ones([cutout_size, cutout_size], dtype=tf.int32) start_x = tf.random.uniform(shape=(1,), minval=0, maxval=im_width, dtype=tf.int32) start_y = tf.random.uniform(shape=(1,), minval=0, maxval=im_height, dtype=tf.int32) mask = tf.pad(mask, [[cutout_size + start_y[0], im_height - start_y[0]], [cutout_size + start_x[0], im_width - start_x[0]]]) mask = mask[cutout_size: cutout_size + im_height, cutout_size: cutout_size + im_width] mask = tf.tile(tf.reshape(mask, (im_height, im_width, 1)), (1, 1, 3)) image = tf.where(tf.equal(mask, 0), x=image, y=tf.zeros_like(image)) return image def _add_drop_path(self, X, keep_prob): with tf.variable_scope('drop_path'): batch_size = tf.shape(X)[0]
tensorflow.ones
11,117
import tensorflow as tf else: act_f = build_act(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse) with tf.variable_scope(scope, reuse=reuse): # set up placeholders obs_t_input = U.ensure_tf_input(make_obs_ph("obs_t")) act_t_ph = tf.placeholder(tf.int32, [None], name="action") rew_t_ph = tf.placeholder(tf.float32, [None], name="reward") obs_tp1_input = U.ensure_tf_input(make_obs_ph("obs_tp1")) done_mask_ph = tf.placeholder(tf.float32, [None], name="done") importance_weights_ph = tf.placeholder(tf.float32, [None], name="weight") # q network evaluation q_t = q_func(obs_t_input.get(), num_actions, scope="q_func", reuse=True) # reuse parameters from act
tensorflow.placeholder
11,118
import tensorflow as tf test: tf.Tensor Of shape (n_test, n_feat) support: tf.Tensor Of shape (n_support, n_feat) max_dist_sq: float, optional Maximum pairwise distance allowed. Returns ------- tf.Tensor: Of shape (n_test, n_support) """ test = tf.expand_dims(test, 1) support = tf.expand_dims(support, 0) g = -tf.maximum(tf.reduce_sum(tf.square(test - support), 2), max_dist_sq) return g def add_bias(tensor, init=None, name=None): """Add a bias term to a tensor. Parameters ---------- tensor: tf.Tensor Variable tensor. init: float
tensorflow.expand_dims
11,119
import tensorflow as tf make_tensors_fn=lambda: {'x': tf.compat.v1.placeholder(tf.float32, (None,))},
tensorflow.compat.v1.placeholder
11,120
import tensorflow as tf loss += tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=captions_out[:, t]) * mask[:, t]) if self.alpha_c > 0: alphas = tf.transpose(tf.stack(alpha_list), (1, 0, 2)) # (N, T, L) alphas_all = tf.reduce_sum(alphas, 1) # (N, L) alpha_reg = self.alpha_c * tf.reduce_sum((16./196 - alphas_all) ** 2) loss += alpha_reg return loss / tf.to_float(batch_size)
tensorflow.reduce_sum
11,121
import tensorflow as tf self._batch_env.reset, [indices], observ_dtype, name='reset') observ = tf.check_numerics(observ, 'observ')
tensorflow.check_numerics
11,122
import tensorflow as tf confidence_mat = tf.get_variable('confidence_mat', [in_size, 1]) confidence_scores = tf.concat(1, [tf.matmul(o_, confidence_mat) for o_ in self._inputs]) # dropout on confidence_scores random_tensor = (1.0 - self._dropout_keep_prob + tf.random_uniform(tf.shape(confidence_scores))) binary_tensor = -50.0 * tf.floor(random_tensor) csshape = confidence_scores.get_shape() self.cs = tf.nn.softmax(tf.constant(1.0, shape=csshape)) # The final prediction is the average of the predictions for each word # weighted by the individual confidence/utility scores. wvs = tf.pack(self._inputs) wvs_weighted = tf.mul(tf.reshape(tf.transpose(self.cs), [-1, 1]), tf.reshape(wvs, [-1, in_size])) wvs_weighted_reshaped = tf.reshape(wvs_weighted, wvs.get_shape()) wvsum = tf.reduce_sum(wvs_weighted_reshaped,0) pred_mat = tf.get_variable('pred_mat', [in_size, self._out_vocab_size]) pred_bias = tf.get_variable('pred_bias', [self._out_vocab_size]) # Make a prediction for each tweet. def GetWordPred(o_): logits = tf.nn.xw_plus_b(o_, pred_mat, pred_bias) return tf.nn.softmax(logits) preds = GetWordPred(wvsum)
tensorflow.pack
11,123
import tensorflow as tf a = self.sess.run(self.sample_op, {self.tfs: s})[0] return np.clip(a, ACTION_BOUND[0], ACTION_BOUND[1]) def get_v(self, s): if s.ndim < 4: s = s[np.newaxis, :] return self.sess.run(self.v, {self.tfs: s})[0, 0] def load(self): saver = tf.train.Saver() saver.restore(self.sess, './model_save/params') def save(self): saver = tf.train.Saver() saver.save(self.sess, './model_save/params', write_meta_graph=False) class Worker(object): def __init__( self, envpath, wid, retro, realtime_mode, env_seed=0, env_floor=0):
tensorflow.train.Saver
11,124
import tensorflow as tf tf.Assert( tf.reduce_all(tf.less(tf.abs(tf.reduce_sum(tf.square(labels), [1]) - 1), 1e-4)), ['The l2 norm of each label quaternion vector should be 1.'])) with tf.name_scope(name): with tf.control_dependencies(assertions): product = tf.multiply(predictions, labels) internal_dot_products = tf.reduce_sum(product, [1]) logcost = tf.log(1e-4 + 1 - tf.abs(internal_dot_products))
tensorflow.control_dependencies
11,125
from tensorflow.contrib.eager.python.examples.l2hmc import l2hmc learning_rate=.0003, n_warmup_iters=3) def step(dynamics, optimizer, samples): loss, grads, samples, _ = l2hmc.loss_and_grads( dynamics, samples, loss_fn=l2hmc.compute_loss) optimizer.apply_gradients(zip(grads, dynamics.variables)) return loss, samples
tensorflow.contrib.eager.python.examples.l2hmc.l2hmc.loss_and_grads
11,126
import tensorflow as tf def to_json_string(self): """Serializes this instance to a JSON string.""" return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" def create_model(config, is_training, input_ids, input_mask, segment_ids, labels, num_labels, use_one_hot_embeddings, task_name,): """Creates a classification model from_scratch.""" _true_length = tf.cast(tf.reduce_sum(input_mask, axis=-1), dtype=tf.int32) with tf.variable_scope("baseline"): with tf.variable_scope("embeddings"): # Perform embedding lookup on the word ids. (word_embedding_output, output_embedding_table) = modeling.embedding_lookup( input_ids=input_ids, vocab_size=config.vocab_size, embedding_size=config.embedding_size, initializer_range=config.initializer_range, word_embedding_name="word_embeddings", use_one_hot_embeddings=use_one_hot_embeddings)
tensorflow.variable_scope
11,127
import tensorflow as tf init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) if "plugin" in args.optimizer: init_op = tf.group(init_op, emb_opt.initializer)
tensorflow.group
11,128
import tensorflow as tf else: train_op = None scaffold_fn = None return tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, host_call=host_call, scaffold_fn=scaffold_fn) def mask_rcnn_model_fn(features, labels, mode, params): """Mask-RCNN model.""" with tf.variable_scope('', reuse=tf.AUTO_REUSE): return _model_fn( features, labels, mode, params, variable_filter_fn=remove_variables)
tensorflow.variable_scope
11,129
import tensorflow as tf update_param_noise_threshold_expr = param_noise_threshold.assign(tf.cond(update_param_noise_threshold_ph >= 0, lambda: update_param_noise_threshold_ph, lambda: param_noise_threshold)) # Put everything together. deterministic_actions = tf.argmax(q_values_perturbed, axis=1) batch_size = tf.shape(observations_ph.get())[0] random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64) chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions) output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions) update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))
tensorflow.stack
11,130
import tensorflow as tf return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size]) def get_valid_batch(image,label,batch_size): images,labels=tf.train.batch([image,label],batch_size=batch_size) return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size]) class trainwork(object): def __init__(self): with tf.variable_scope('scop'): self.w1=tf.get_variable('w1', [4096,2048],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.w2=tf.get_variable('w2', [2048,3072],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.w3=tf.get_variable('w3', [3072,512],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.w4=tf.get_variable('w4', [512,classnum],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.b1 = tf.get_variable('b1', [2048],initializer=tf.constant_initializer(0.0)) self.b2 = tf.get_variable('b2', [3072],initializer=tf.constant_initializer(0.0)) self.b3 = tf.get_variable('b3', [512],initializer=tf.constant_initializer(0.0)) self.b4 = tf.get_variable('b4', [classnum],initializer=tf.constant_initializer(0.0)) def inference(self,images): images=tf.cast(images,tf.float32)/255.0 l1 = tf.matmul(images, self.w1)+self.b1 l1=tf.nn.relu(l1)
tensorflow.contrib.layers.xavier_initializer_conv2d
11,131
import tensorflow as tf transform2 = tf.concat([padded2[:, :, :, 0], padded2[:, :, :, 1]], 1) stacked = tf.concat([img_orig, transform1, transform2], 2, 'viz') tf.summary.image('visualize', tf.expand_dims(stacked, -1), max_outputs=30) sampled = tf.concat([sampled1, sampled2], 3, 'sampled_concat') logits = (LinearWrap(sampled) .FullyConnected('fc1', out_dim=256, nl=tf.nn.relu) .FullyConnected('fc2', out_dim=128, nl=tf.nn.relu) .FullyConnected('fct', out_dim=19, nl=tf.identity)()) tf.nn.softmax(logits, name='prob') cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label) cost = tf.reduce_mean(cost, name='cross_entropy_loss') wrong = tf.to_float(tf.logical_not(tf.nn.in_top_k(logits, label, 1)), name='incorrect_vector') summary.add_moving_summary(tf.reduce_mean(wrong, name='train_error')) wd_cost = tf.multiply(1e-5, regularize_cost('fc.*/W', tf.nn.l2_loss), name='regularize_loss') summary.add_moving_summary(cost, wd_cost) self.cost = tf.add_n([wd_cost, cost], name='cost') def _get_optimizer(self): lr = tf.get_variable('learning_rate', initializer=5e-4, trainable=False) opt = tf.train.AdamOptimizer(lr, epsilon=1e-3)
tensorflow.reduce_mean
11,132
from tensorflow.contrib import losses with ops.control_dependencies([check_shape_op]): target = array_ops.reshape( target, shape=[array_ops.shape(target)[0], 1]) return losses.hinge_loss(logits, target) super(_BinarySvmTargetColumn, self).__init__(
tensorflow.contrib.losses.hinge_loss
11,133
import tensorflow as tf elif kh == 7 and kw == 7: return 49.0 * 21.0 / 1024.0 elif kh == 14 and kw == 14: return 196.0 * 21.0 / 4096.0 else: rec = tf.cast(kw * kh, tf.float32) n_max = 7 + tf.math.ceil(tf.math.log(rec) / tf.math.log(2.)) ns = tf.range(0., n_max) ns_pow = tf.pow(2., ns) ks = tf.round(ns_pow / rec) diffs = tf.math.abs(ks / ns_pow - 1 / rec) n = tf.argmin(diffs) k = ks[n] scale = k / tf.pow(2., tf.cast(n, tf.float32))
tensorflow.range
11,134
from tensorflow.contrib.framework.python.ops import variables as variables def _get_train_ops(self, features, targets): """See base class.""" global_step = variables.get_global_step() assert global_step loss = self._loss(
tensorflow.contrib.framework.python.ops.variables.get_global_step
11,135
import tensorflow as tf """ Virtual Batch Normalization """ def __init__(self, x, name, epsilon=1e-5): """ x is the reference batch """ assert isinstance(epsilon, float) shape = x.get_shape().as_list() with tf.variable_scope(name) as scope: self.epsilon = epsilon self.name = name self.mean = tf.reduce_mean(x, [0, 1, 2], keep_dims=True) self.mean_sq = tf.reduce_mean(tf.square(x), [0, 1, 2], keep_dims=True) self.batch_size = int(x.get_shape()[0]) assert x is not None assert self.mean is not None assert self.mean_sq is not None out = tf.nn.relu(self._normalize(x, self.mean, self.mean_sq, "reference")) self.reference_output = out def __call__(self, x, update=False): with tf.variable_scope(self.name) as scope: if not update: new_coeff = 1. / (self.batch_size + 1.) old_coeff = 1. - new_coeff
tensorflow.reduce_mean
11,136
from tensorflow.python.framework import ops def op(self): return self.get().op def _read_variable_op(self): if _enclosing_tpu_context() is None: return self._primary_var.read_value() v = gen_resource_variable_ops.read_variable_op(self.handle, self._dtype) return v def read_value(self): return self._read_variable_op() def assign(self, value, use_locking=None, name=None, read_value=False): del use_locking with _handle_graph(self.handle), self._assign_dependencies(): value_tensor = ops.convert_to_tensor(value, dtype=self.dtype) assign_op = gen_resource_variable_ops.assign_variable_op( self.handle, value_tensor, name=name) if read_value: return self._read_variable_op() return assign_op def assign_add(self, delta, use_locking=None, name=None, read_value=True): del use_locking with _handle_graph(self.handle), self._assign_dependencies(): assign_add_op = gen_resource_variable_ops.assign_add_variable_op( self.handle, ops.convert_to_tensor(delta, dtype=self.dtype), name=name) if read_value:
tensorflow.python.framework.ops.convert_to_tensor
11,137
import tensorflow as tf # Create mask that can be used to gather elements from L_flat and put them # into a diagonal matrix. diag_mask = np.zeros((self.nb_actions, self.nb_actions), dtype='int32') diag_mask[np.diag_indices(self.nb_actions)] = range(1, self.nb_actions + 1) # Add leading zero element to each element in the L_flat. We use this zero # element when gathering L_flat into a lower triangular matrix L. nb_rows = tf.shape(L_flat)[0] zeros = tf.expand_dims(tf.tile(K.zeros((1,)), [nb_rows]), 1) try: # Old TF behavior. L_flat = tf.concat(1, [zeros, L_flat]) except TypeError: # New TF behavior L_flat = tf.concat([zeros, L_flat], 1) # Finally, process each element of the batch. def fn(a, x): x_ = tf.gather(x, diag_mask) return x_ P = tf.scan(fn, L_flat, initializer=K.zeros((self.nb_actions, self.nb_actions))) else: raise RuntimeError('Unknown Keras backend "{}".'.format(K.backend())) assert P is not None assert K.ndim(P) == 3 # Combine a, mu and P into a scalar (over the batches). What we compute here is # -.5 * (a - mu)^T * P * (a - mu), where * denotes the dot-product. Unfortunately
tensorflow.concat
11,138
import tensorflow as tf init_w = tf.random_normal_initializer(0., 0.01) init_b = tf.constant_initializer(0.01) net = tf.layers.dense(s, 500, activation=tf.nn.relu, kernel_initializer=init_w, bias_initializer=init_b, name='l1', trainable=trainable)
tensorflow.layers.dense
11,139
from tensorflow.python.ops import math_ops prev_mean_label = update_mean_label - delta_mean_label unweighted_batch_coresiduals = ( (predictions - batch_mean_prediction) * (labels - batch_mean_label)) # batch_comoment is C_B in the update equation if weights is None: batch_comoment = math_ops.reduce_sum(unweighted_batch_coresiduals) else: batch_comoment = math_ops.reduce_sum(unweighted_batch_coresiduals * weights) # View delta_comoment as = C_AB - C_A in the update equation above.
tensorflow.python.ops.math_ops.reduce_sum
11,140
import tensorflow as tf shapes[rconst.DUPLICATE_MASK] = tf.TensorShape([batch_size]) data_generator = functools.partial( self.data_generator, epochs_between_evals=epochs_between_evals) dataset = tf.data.Dataset.from_generator( generator=data_generator, output_types=types, output_shapes=shapes)
tensorflow.data.Dataset.from_generator
11,141
import tensorflow as tf end = (i + 1) * cfgs.BATCH_SIZE img = img_batch[start:end, :, :, :] pretrain_zoo = PretrainModelZoo() if self.cfgs.NET_NAME in pretrain_zoo.pth_zoo or self.cfgs.NET_NAME in pretrain_zoo.mxnet_zoo: img = img / tf.constant([cfgs.PIXEL_STD]) gtboxes_and_label_h = get_horizen_minAreaRectangle( tf.reshape(gtboxes_and_label_batch[start:end], [-1, 9]))
tensorflow.constant
11,142
import tensorflow as tf prev_node = hidden_layers_node[i] prev_x = layer_out # output layers layer_name = 'layer_last' with tf.variable_scope(layer_name, reuse=tf.AUTO_REUSE): weights = tf.get_variable('weights', [prev_node, output_node], initializer=tf.truncated_normal_initializer(stddev=0.1)) self.nnweights.append(weights) biases = tf.get_variable('biases', [output_node], initializer=tf.constant_initializer(0.0)) layer_out = tf.matmul(prev_x, weights) + biases # Output of Network y = layer_out # Global step with tf.variable_scope('training_step', reuse=tf.AUTO_REUSE): global_step = tf.get_variable("global_step", [], dtype=tf.int32, initializer=tf.constant_initializer(0),
tensorflow.constant_initializer
11,143
import tensorflow as tf if task_name not in processors: raise ValueError("Task not found: %s" % (task_name)) processor = processors[task_name]() label_list = processor.get_labels() tokenizer = tokenization.FullTokenizer( vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver('grpc://' + os.environ['COLAB_TPU_ADDR']) is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, tpu_config=tf.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores,
tensorflow.contrib.cluster_resolver.TPUClusterResolver
11,144
import tensorflow as tf N_S = env.observation_space.shape[0] # 观测值个数 N_A = env.action_space.n # 行为值个数 class ACnet(object): # 这个class即可用于生产global net,也可生成 worker net,因为结构相同 def __init__(self, scope, globalAC=None, global_step=None): # scope 用于确定生成什么网络 # global GLOBALE_STEP # self.global_step = GLOBALE_STEP if scope == GLOBAL_NET_SCOPE: # 创建中央大脑 with tf.variable_scope(scope): self.global_step = tf.get_variable("global_step", [], tf.int32, initializer=tf.constant_initializer(0, dtype=tf.int32), trainable=False) self.obs_space = N_S self.act_space = N_A self.k = 16 self.g_dim = 256 self.c = 10
tensorflow.variable_scope
11,145
import tensorflow as tf # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Box matcher.""" # Import libraries import tensorflow as tf from official.vision.beta.ops import box_ops @tf.keras.utils.register_keras_serializable(package='Vision') class BoxMatcher(tf.keras.layers.Layer): """Match boxes with groundtruth boxes.""" def __init__(self, foreground_iou_threshold=0.5, background_iou_high_threshold=0.5, background_iou_low_threshold=0, **kwargs): """Initializes a box matcher. Args: foreground_iou_threshold: float, represent the IoU threshold for a box to
tensorflow.keras.utils.register_keras_serializable
11,146
import tensorflow as tf Args: x: a tensor of shape [num_samples, num_features] y: a tensor of shape [num_samples, num_features] kernel: a function which computes the kernel in MMD. Defaults to the GaussianKernelMatrix. Returns: a scalar denoting the squared maximum mean discrepancy loss. """ with tf.name_scope(name): # \E{ K(x, x) } + \E{ K(y, y) } - 2 \E{ K(x, y) } cost = tf.reduce_mean(kernel(x, x)) cost += tf.reduce_mean(kernel(y, y)) cost -= 2 * tf.reduce_mean(kernel(x, y)) # We do not allow the loss to become negative. cost = tf.where(cost > 0, cost, 0, name='value') return cost
tensorflow.name_scope
11,147
import tensorflow as tf of the input tensor t after padding, assuming length <= t.shape[0]. Returns: padded_t: the padded tensor, whose first dimension is length. If the length is an integer, the first dimension of padded_t is set to length statically. """ t_rank = tf.rank(t) t_shape = tf.shape(t) t_d0 = t_shape[0] pad_d0 = tf.expand_dims(length - t_d0, 0) pad_shape = tf.cond( tf.greater(t_rank, 1), lambda: tf.concat([pad_d0, t_shape[1:]], 0), lambda: tf.expand_dims(length - t_d0, 0)) padded_t = tf.concat([t, tf.zeros(pad_shape, dtype=t.dtype)], 0) if not _is_tensor(length): padded_t = _set_dim_0(padded_t, length) return padded_t def clip_tensor(t, length): """Clips the input tensor along the first dimension up to the length. Args: t: the input tensor, assuming the rank is at least 1. length: a tensor of shape [1] or an integer, indicating the first dimension
tensorflow.expand_dims
11,148
import tensorflow as tf assert(len(idx.get_shape()) == 1) idx_flattened = tf.range(0, x.shape[0]) * x.shape[1] + idx
tensorflow.range
11,149
import tensorflow as tf model_fn = model_fn_builder( bert_config=bert_config, num_labels=len(label_list), init_checkpoint=FLAGS.init_checkpoint, learning_rate=FLAGS.learning_rate, num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps, use_tpu=FLAGS.use_tpu, use_one_hot_embeddings=FLAGS.use_tpu) # If TPU is not available, this will fall back to normal Estimator on CPU # or GPU. estimator = tf.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size, predict_batch_size=FLAGS.predict_batch_size) if FLAGS.do_train: train_file = os.path.join(FLAGS.output_dir, "train.tf_record") file_based_convert_examples_to_features( train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)
tensorflow.contrib.tpu.TPUEstimator
11,150
import tensorflow as tf candidate_span_emb = self.get_span_emb(flattened_head_emb, context_outputs, candidate_starts, candidate_ends) # [num_candidates, emb] candidate_mention_scores = self.get_mention_scores(candidate_span_emb) # [k, 1] candidate_mention_scores = tf.squeeze(candidate_mention_scores, 1) # [k]
tensorflow.squeeze
11,151
import tensorflow as tf # classification loss (crossentropy) # 1. compute max conf across batch for hard negative mining loss_class = tf.where(mask_neg, 1 - class_pred[:, 0][..., tf.newaxis], 0) # 2. hard negative mining loss_class = tf.reshape(loss_class, [num_batch, num_prior]) loss_class_idx = tf.argsort(loss_class, axis=1, direction='DESCENDING') loss_class_idx_rank = tf.argsort(loss_class_idx, axis=1) mask_pos_per_batch = tf.reshape(mask_pos, [num_batch, num_prior]) num_pos_per_batch = tf.reduce_sum( tf.cast(mask_pos_per_batch, tf.float32), 1, keepdims=True) num_pos_per_batch = tf.maximum(num_pos_per_batch, 1) num_neg_per_batch = tf.minimum(neg_pos_ratio * num_pos_per_batch, tf.cast(num_prior, tf.float32) - 1) mask_hard_neg = tf.reshape( tf.cast(loss_class_idx_rank, tf.float32) < num_neg_per_batch, [num_batch * num_prior, 1]) # 3. classification loss including positive and negative examples loss_class_mask = tf.logical_or(mask_pos, mask_hard_neg) loss_class_mask_b = tf.broadcast_to(loss_class_mask, tf.shape(class_pred)) filter_class_true = tf.boolean_mask(tf.cast(mask_pos, tf.float32), loss_class_mask) filter_class_pred = tf.boolean_mask(class_pred, loss_class_mask_b) filter_class_pred = tf.reshape(filter_class_pred, [-1, num_class]) loss_class = tf.keras.losses.sparse_categorical_crossentropy( y_true=filter_class_true, y_pred=filter_class_pred)
tensorflow.cast
11,152
import tensorflow as tf with tf.Session() as sess: """Model function for CNN.""" features = tf.placeholder( tf.float32, shape=[None, IMAGE_SIZE * IMAGE_SIZE], name='features') labels = tf.placeholder(tf.int64, shape=[None], name='labels') input_layer = tf.reshape(features, [-1, IMAGE_SIZE, IMAGE_SIZE, 1]) conv1 = tf.layers.conv2d( inputs=input_layer, filters=filter_list[0], kernel_size=[5, 5], padding="same", activation=tf.nn.relu) pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2) conv2 = tf.layers.conv2d( inputs=pool1, filters=filter_list[1], kernel_size=[5, 5], padding="same", activation=tf.nn.relu) pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2) # return int(np.prod(pool2.get_shape().as_list()[1:])) return pool2.get_shape().as_list()
tensorflow.layers.max_pooling2d
11,153
import tensorflow as tf return tf.transpose(split_states(x, n), [0, 2, 1, 3]) def merge_heads(x): #[-1,head,n_ctx,emb] return merge_states(tf.transpose(x, [0, 2, 1, 3])) def conv1d(x, scope, nf, rf, w_init=tf.random_normal_initializer(stddev=0.02), b_init=tf.constant_initializer(0), pad='VALID', train=False): with tf.variable_scope(scope): #x = [-1,n_ctx,512] nx = shape_list(x)[-1] #rf = 1,nx=emb,nf=3*emb w = tf.get_variable("w", [rf, nx, nf], initializer=w_init) b = tf.get_variable("b", [nf], initializer=b_init) if rf == 1: #faster 1x1 conv c = tf.reshape(tf.matmul(tf.reshape(x, [-1, nx]), tf.reshape(w, [-1, nf]))+b, shape_list(x)[:-1]+[nf]) else: #was used to train LM c = tf.nn.conv1d(x, w, stride=1, padding=pad)+b return c def attn(x, scope, n_state, n_head, train=False, scale=False): assert n_state%n_head==0 with tf.variable_scope(scope): #c [-1,n_ctx,3*emb] c = conv1d(x, 'c_attn', n_state*3, 1, train=train)
tensorflow.get_variable
11,154
import tensorflow as tf targets_ = tf.reshape(targets, tf.stack([time_steps * batch_size])) crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_, labels=targets_) crossent = tf.reshape(crossent, tf.stack([batch_size, time_steps])) if rewards is not None: crossent *= tf.stop_gradient(rewards) log_perp = tf.reduce_sum(crossent * weights, axis=1) if average_across_timesteps: total_size = tf.reduce_sum(weights, axis=1) total_size += 1e-12 # just to avoid division by 0 for all-0 weights log_perp /= total_size cost = tf.reduce_sum(log_perp) if average_across_batch: return cost / tf.to_float(batch_size) else: return cost def reinforce_baseline(decoder_states, reward): """ Center the reward by computing a baseline reward over decoder states. :param decoder_states: internal states of the decoder, tensor of shape (batch_size, time_steps, state_size) :param reward: reward for each time step, tensor of shape (batch_size, time_steps) :return: reward - computed baseline, tensor of shape (batch_size, time_steps)
tensorflow.reduce_sum
11,155
from tensorflow.python.client import session def testSparseDistributed(self): worker, unused_ps = self._setupCluster() for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: with session.Session(worker.target): var0, var1, update_op = self._setupSparse(True, dtype) self._assertSparseCorrect(var0, var1, update_op)
tensorflow.python.client.session.Session
11,156
import tensorflow as tf self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim}) self.built = True def call(self, inputs): # use * instead of tf.matmul, we need broadcasting here output = inputs * self.kernel if self.use_bias: output = output + self.bias if self.activation is not None: output = self.activation(output) return output nan2zeroLayer = Lambda(lambda x: tf.where(tf.is_nan(x), tf.zeros_like(x), x)) ColWiseMultLayer = lambda name: Lambda(lambda l: l[0]*(tf.matmul(tf.reshape(l[1], (-1,1)), tf.ones((1, l[0].get_shape()[1]), dtype=l[1].dtype))), name=name)
tensorflow.reshape
11,157
import tensorflow as tf tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf.estimator.ModeKeys.EVAL: def metric_fn(per_example_loss, label_ids, logits, is_real_example): predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
tensorflow.contrib.tpu.TPUEstimatorSpec
11,158
import tensorflow as tf @gin.configurable(module='trax.data', denylist=['dataset', 'training']) def bair_robot_pushing_preprocess(dataset, training): """Pre-processing function that concatenates input and target frames.""" del training def concat_and_add_mask(features, targets): """Concatenate input and output frames to form a language modeling setup.""" inp = features['inputs'] concat = tf.concat([inp, targets], axis=0) mask = tf.concat([tf.zeros_like(inp), tf.ones_like(targets)], axis=0) concat = tf.reshape(concat, (-1,)) mask = tf.reshape(mask, (-1,)) concat = tf.cast(concat, tf.int32) mask = tf.cast(mask, tf.float32) features['inputs'] = features['targets'] = concat features['mask'] = mask return features, concat
tensorflow.concat
11,159
import tensorflow as tf Get the cross-entropy for each example in the vector self._xent. Args: in_size: size of the hidden state vectors mats: list of hidden state vectors """ pred_mat = tf.get_variable('pred_mat', [in_size, self._out_vocab_size]) pred_bias = tf.get_variable('pred_bias', [self._out_vocab_size]) # Make a prediction on every word. def GetWordPred(o_): logits = tf.nn.xw_plus_b(o_, pred_mat, pred_bias)
tensorflow.get_variable
11,160
import tensorflow as tf def get_test_examples(self, data_dir): """Gets a collection of `InputExample`s for prediction.""" raise NotImplementedError() def get_labels(self): """Gets the list of labels for this data set.""" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): """Reads a tab separated value file.""" with tf.gfile.Open(input_file, "r") as f: reader = csv.reader(f, delimiter="\t", quotechar=quotechar) lines = [] for line in reader: lines.append(line) return lines @classmethod def _read_csv(cls, input_file, quotechar=None): """Reads a tab separated value file.""" with tf.gfile.Open(input_file, "r") as f:
tensorflow.gfile.Open
11,161
import tensorflow as tf string, out_string will contain a random integer casted to a string. Otherwise string_tensor is returned unchanged. """ empty_string = tf.constant('', dtype=tf.string, name='EmptyString') random_source_id = tf.as_string( tf.random_uniform(shape=[], maxval=2 ** 63 - 1, dtype=tf.int64))
tensorflow.constant
11,162
import tensorflow as tf del init_params[1] if n_transfer == -1: n_transfer = 0 else: n_transfer = 1+n_transfer*12 sess.run([p.assign(ip) for p, ip in zip(params[:n_transfer], init_params[:n_transfer])]) eval_mgpu_logits, eval_mgpu_clf_losses, eval_mgpu_lm_losses = mgpu_predict(X_train, M_train, Y_train) eval_logits, eval_clf_losses, eval_lm_losses = model(X, M, Y, train=False, reuse=True) eval_clf_loss = tf.reduce_mean(eval_clf_losses) eval_mgpu_clf_loss = tf.reduce_mean(eval_mgpu_clf_losses) n_updates = 0 n_epochs = 0 if dataset != 'stsb': trYt = trY if submit: save(os.path.join(save_dir, desc, 'best_params.jl')) best_score = 0
tensorflow.reduce_mean
11,163
import tensorflow as tf # Calculate w cut_g = tf.stop_gradient(self.g) cut_g = tf.expand_dims(cut_g, [1]) gstack = tf.concat([self.prev_g, cut_g], axis=1) self.last_c_g = gstack[:, 1:] # print self.last_c_g gsum = tf.reduce_sum(gstack, axis=1) phi = tf.get_variable("phi", (self.g_dim, self.k)) w = tf.matmul(gsum, phi) w = tf.expand_dims(w, [2]) # Calculate policy and sample logits = tf.reshape(tf.matmul(U, w), [-1, num_acts]) self.pi = tf.nn.softmax(logits) self.log_pi = tf.nn.log_softmax(logits) self.sample = policy_utils.categorical_sample( tf.reshape(logits, [-1, num_acts]), num_acts)[0, :]
tensorflow.matmul
11,164
import tensorflow as tf with tf.device(worker_device): with tf.variable_scope("local"): self.local_network = pi = LSTMPolicy(env.observation_space.shape, env.action_space.n) pi.global_step = self.global_step self.ac = tf.placeholder(tf.float32, [None, env.action_space.n], name="ac") self.adv = tf.placeholder(tf.float32, [None], name="adv") self.r = tf.placeholder(tf.float32, [None], name="r") log_prob_tf = tf.nn.log_softmax(pi.logits)
tensorflow.placeholder
11,165
from tensorflow.contrib.distributions.python.ops import distribution_util with self._name_scope(name, values=[x]): x = ops.convert_to_tensor(x, name="x") sample_shape, batch_shape, event_shape = self.get_shape(x) event_shape = distribution_util.pick_vector( self._event_ndims_is_0, (1,), event_shape) batch_shape = distribution_util.pick_vector( self._batch_ndims_is_0, (1,), batch_shape) new_shape = array_ops.concat(0, ((-1,), batch_shape, event_shape)) x = array_ops.reshape(x, shape=new_shape) x = distribution_util.rotate_transpose(x, shift=-1)
tensorflow.contrib.distributions.python.ops.distribution_util.pick_vector
11,166
from tensorflow.python.framework import ops multi_cell = rnn_cell.MultiRNNCell( [cell() for _ in range(num_layers)]) outputs, final_state = core_rnn.static_rnn( multi_cell, inputs, dtype=dtypes.float32) trainable_variables = ops.get_collection( ops.GraphKeys.TRAINABLE_VARIABLES) gradients = gradients_impl.gradients([outputs, final_state], trainable_variables)
tensorflow.python.framework.ops.get_collection
11,167
from tensorflow.python.framework import tensor_util "dimension (%d) must be in the range [0, %d), where %d is the number " "of dimensions in the input" % (dimension, input_shape.ndims, input_shape.ndims)) @ops.RegisterShape("All") @ops.RegisterShape("Any") @ops.RegisterShape("Max") @ops.RegisterShape("Mean") @ops.RegisterShape("Min") @ops.RegisterShape("Prod") @ops.RegisterShape("Sum") def _ReductionShape(op): """Common shape function for reduction ops.""" input_shape = op.inputs[0].get_shape() reduction_indices = tensor_util.ConstantValue(op.inputs[1]) keep_dims = op.get_attr("keep_dims") if reduction_indices is None or input_shape.ndims is None: if keep_dims: return [tensor_shape.unknown_shape(ndims=input_shape.ndims)] else: return [tensor_shape.unknown_shape()] # Turn reduction_indices from scalar to vector if necessary reduction_indices = np.ravel(reduction_indices) for reduction_index in reduction_indices: if reduction_index < 0 or reduction_index >= input_shape.ndims: raise ValueError("Invalid reduction dimension %d for input with %d " "dimensions" % (reduction_index, input_shape.ndims))
tensorflow.python.framework.tensor_util.ConstantValue
11,168
from tensorflow.python.ops import nn return result def _get_predict_ops(self, features): """See base class.""" logits = self._logits(features) return self._logits_to_predictions(logits, proba=True) def _logits_to_predictions(self, logits, proba=False): if self._n_classes < 2: return array_ops.reshape(logits, [-1]) if self._n_classes == 2: logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits]) if proba: return nn.softmax(logits) else: return math_ops.argmax(logits, 1) def _get_feature_ops_from_example(self, examples_batch): column_types = layers.create_dict_for_parse_example( (self._get_linear_feature_columns() or []) + (self._get_dnn_feature_columns() or [])) features = parsing_ops.parse_example(examples_batch, column_types) return features def _num_label_columns(self): return 1 if self._n_classes <= 2 else self._n_classes def _get_linear_feature_columns(self):
tensorflow.python.ops.nn.softmax
11,169
import tensorflow as tf :param esp: :return: """ with tf.variable_scope(name): inputdata = tf.transpose(inputdata, [0, 3, 1, 2]) n, c, h, w = inputdata.get_shape().as_list() group_size = min(group_size, c) inputdata = tf.reshape(inputdata, [-1, group_size, c // group_size, h, w]) mean, var = tf.nn.moments(inputdata, [2, 3, 4], keep_dims=True) inputdata = (inputdata - mean) / tf.sqrt(var + esp) # 每个通道的gamma和beta gamma = tf.Variable(tf.constant(1.0, shape=[c]), dtype=tf.float32, name='gamma') beta = tf.Variable(tf.constant(0.0, shape=[c]), dtype=tf.float32, name='beta') gamma = tf.reshape(gamma, [1, c, 1, 1]) beta = tf.reshape(beta, [1, c, 1, 1]) # 根据论文进行转换 [n, c, h, w, c] 到 [n, h, w, c] output = tf.reshape(inputdata, [-1, c, h, w]) output = output * gamma + beta output = tf.transpose(output, [0, 2, 3, 1]) return output
tensorflow.constant
11,170
import tensorflow as tf ################################################################################ # Convenience functions for building the ResNet model. ################################################################################ def batch_norm(inputs, training, data_format): """Performs a batch normalization using a standard set of parameters.""" # We set fused=True for a significant performance boost. See # https://www.tensorflow.org/performance/performance_guide#common_fused_ops return tf.layers.batch_normalization( inputs=inputs, axis=1 if data_format == 'channels_first' else 3, momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True, scale=True, training=training, fused=True) def fixed_padding(inputs, kernel_size, data_format): """Pads the input along the spatial dimensions independently of input size.
tensorflow.layers.batch_normalization
11,171
import tensorflow as tf use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size, predict_batch_size=FLAGS.predict_batch_size) if FLAGS.do_train: train_file = os.path.join(FLAGS.output_dir, "train.tf_record") file_based_convert_examples_to_features( train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file) tf.logging.info("***** Running training *****") tf.logging.info(" Num examples = %d", len(train_examples)) tf.logging.info(" Batch size = %d", FLAGS.train_batch_size) tf.logging.info(" Num steps = %d", num_train_steps) train_input_fn = file_based_input_fn_builder( input_file=train_file, seq_length=FLAGS.max_seq_length, is_training=True, drop_remainder=True) estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
tensorflow.logging.info
11,172
from tensorflow.python.framework import ops mean_shape = op.inputs[1].get_shape().with_rank(1) var_shape = op.inputs[2].get_shape().with_rank(1) beta_shape = op.inputs[3].get_shape().with_rank(1) out_backprop_shape = op.inputs[4].get_shape().with_rank(4) input_shape = input_shape.merge_with(out_backprop_shape) vector_dim = input_shape[3] vector_dim = vector_dim.merge_with(mean_shape[0]) vector_dim = vector_dim.merge_with(var_shape[0]) vector_dim = vector_dim.merge_with(beta_shape[0]) return [input_shape] + ([tensor_shape.vector(vector_dim)] * 4) ops.RegisterShape("Conv2D")(common_shapes.conv2d_shape) ops.RegisterShape("DepthwiseConv2dNative")( common_shapes.depthwise_conv2d_native_shape) ops.RegisterShape("AvgPool")(common_shapes.avg_pool_shape) ops.RegisterShape("MaxPool")(common_shapes.max_pool_shape) @ops.RegisterShape("MaxPoolWithArgmax") def _MaxPoolWithArgMaxShape(op): """Shape function for MaxPoolWithArgmax op.""" return common_shapes.max_pool_shape(op) * 2
tensorflow.python.framework.ops.RegisterShape
11,173
import tensorflow as tf nsig = tf.Variable(200, name="nsig", dtype=tf.float64) nbkg = tf.Variable(800, name="nbkg", dtype=tf.float64)
tensorflow.Variable
11,174
import tensorflow as tf scale_init = init_scale / tf.sqrt(v_init + 1e-10) with tf.control_dependencies([g.assign(g * scale_init), b.assign_add(-m_init * scale_init)]): x = tf.reshape(scale_init, [1, num_units]) * (x - tf.reshape(m_init, [1, num_units]))
tensorflow.reshape
11,175
from tensorflow.python.ops import math_ops prev_count = update_count - batch_count # n_A in update equation # We update the means by Delta=Error*BatchCount/(BatchCount+PrevCount) # batch_mean_prediction is E[x_B] in the update equation batch_mean_prediction = _safe_div( math_ops.reduce_sum(weighted_predictions), batch_count, 'batch_mean_prediction') delta_mean_prediction = _safe_div( (batch_mean_prediction - mean_prediction) * batch_count, update_count, 'delta_mean_prediction')
tensorflow.python.ops.math_ops.reduce_sum
11,176
import tensorflow as tf def get_learning_rate_decay(learning_rate, global_step, params): if params.learning_rate_decay == "noam": step = tf.to_float(global_step) warmup_steps = tf.to_float(params.warmup_steps) multiplier = params.hidden_size ** -0.5 decay = multiplier * tf.minimum((step + 1) * (warmup_steps ** -1.5), (step + 1) ** -0.5) return learning_rate * decay elif params.learning_rate_decay == "new_warmup_rsqrt_decay":
tensorflow.minimum
11,177
import tensorflow as tf train_op = optimizer.apply_gradients( zip(grads, model.trainable_variables), global_step=global_step) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for _ in range(1):
tensorflow.Session
11,178
import tensorflow as tf def test_quantization_of_batch_of_uniforms(self): batch_shape = (5, 5) with self.test_session(): # The uniforms are supported on [0, 10]. The qdist considers the # intervals # ... (0, 1](1, 2]...(9, 10]... # with the intervals displayed above each holding 1 / 10 of the mass. # The qdist will be defined with no cutoffs, qdist = distributions.QuantizedDistribution( base_dist_cls=distributions.Uniform, lower_cutoff=None, upper_cutoff=None, a=tf.zeros( batch_shape, dtype=tf.float32), b=10 * tf.ones( batch_shape, dtype=tf.float32)) # x is random integers in {-3,...,12}. x = self._rng.randint(-3, 13, size=batch_shape).astype(np.float32) # pmf # qdist.pmf(j) = 1 / 10 for j in {1,...,10}, and 0 otherwise, expected_pmf = (1 / 10) * np.ones(batch_shape) expected_pmf[x < 1] = 0.
tensorflow.zeros
11,179
import tensorflow as tf shifted_sum_x, shifted_sum_x2, shift, name="normalize_moments") second_moment = variance + tf.square(mean) return mean, variance, second_moment def build_moving_stats(): return ( tf.identity(self._moving_mean), tf.identity(self._moving_variance), tf.identity(self._moving_second_moment), ) mean, variance, second_moment = utils.smart_cond( use_batch_stats, build_batch_stats, build_moving_stats, ) return mean, variance, second_moment def _build_update_ops_variance(self, mean, variance, is_training):
tensorflow.identity
11,180
import tensorflow as tf self.assertAllEqual([[i * 2] * 5], result) def test_input_batch_size_should_be_one(self): with self.test_session() as session: @dynamic_batching.batch_fn def f(a): return a output = f(tf.constant([1, 2])) coord = tf.train.Coordinator() tf.train.start_queue_runners(coord=coord) with self.assertRaises(tf.errors.CancelledError): session.run(output)
tensorflow.constant
11,181
import tensorflow as tf sess.run(v) # Restore the saved values in the parameter nodes. save.restore(sess, save_path) # Check that the parameter nodes have been restored. self.assertEqual(np.int64(15), v.eval()) def testSomeErrors(self): with tf.Graph().as_default(): v0 = tf.Variable([10.0], name="v0") v1 = tf.Variable([20.0], name="v1") v2 = tf.Variable([20.0], name="v2") v2._set_save_slice_info(tf.Variable.SaveSliceInfo("v1", [1], [0], [1])) # By default the name used for "v2" will be "v1" and raise an error. with self.assertRaisesRegexp(ValueError, "same name: v1"):
tensorflow.Graph
11,182
from tensorflow.python.framework import ops y: `Tensor` denominator of numeric type. name: A name for the operation (optional). Returns: `x / y` evaluated in floating point. Raises: TypeError: If `x` and `y` have different dtypes. """ with ops.op_scope([x, y], name, "truediv") as name: x = ops.convert_to_tensor(x, name="x") y = ops.convert_to_tensor(y, name="y") x_dtype = x.dtype.base_dtype y_dtype = y.dtype.base_dtype if x_dtype != y_dtype: raise TypeError("x and y must have the same dtype, got %r != %r" % (x_dtype, y_dtype)) try: dtype = _TRUEDIV_TABLE[x_dtype] except KeyError: raise TypeError("Invalid dtype %r in __truediv__" % x_dtype) if dtype is not None:
tensorflow.python.framework.ops.convert_to_tensor
11,183
import tensorflow as tf # of examples must be a multiple of the batch size, or else examples # will get dropped. So we pad with fake examples which are ignored # later on. These do NOT count towards the metric (all tf.metrics # support a per-instance weight, and these get a weight of 0.0). while len(eval_examples) % FLAGS.eval_batch_size != 0: eval_examples.append(PaddingInputExample()) eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record") file_based_convert_examples_to_features( eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file) tf.logging.info("***** Running evaluation *****") tf.logging.info(" Num examples = %d (%d actual, %d padding)", len(eval_examples), num_actual_eval_examples, len(eval_examples) - num_actual_eval_examples) tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size) # This tells the estimator to run through the entire set. eval_steps = None # However, if running eval on the TPU, you will need to specify the # number of steps. if FLAGS.use_tpu: assert len(eval_examples) % FLAGS.eval_batch_size == 0 eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size) eval_drop_remainder = True if FLAGS.use_tpu else False eval_input_fn = file_based_input_fn_builder( input_file=eval_file, seq_length=FLAGS.max_seq_length, is_training=False,
tensorflow.logging.info
11,184
from tensorflow.python.framework import tensor_util mvn = dists.MultivariateNormalDiag([mu], [sigma], validate_args=True) self.assertFalse(tensor_util.constant_value(mvn.is_scalar_event)) self.assertTrue(tensor_util.constant_value(mvn.is_scalar_batch)) mvn = dists.MultivariateNormalDiag([[mu]], [[sigma]], validate_args=True) self.assertFalse(tensor_util.constant_value(mvn.is_scalar_event)) self.assertFalse(tensor_util.constant_value(mvn.is_scalar_batch)) # We now test every codepath within the underlying is_scalar_helper # function. # Test case 1, 2. x = tf.placeholder(dtype=tf.int32, shape=[]) # None would fire an exception were it actually executed.
tensorflow.python.framework.tensor_util.constant_value
11,185
import tensorflow as tf num_channels_in = input_dims[-1] filter_h, filter_w, num_channels_out = filter_dims stride_h, stride_w = stride_dims with tf.variable_scope(scope): conv_weight = tf.Variable( tf.truncated_normal([filter_h, filter_w, num_channels_in, num_channels_out], stddev=0.1, dtype=tf.float32)) conv_bias = tf.Variable(tf.zeros([num_channels_out], dtype=tf.float32)) map = tf.nn.conv2d(input, conv_weight, strides=[1, stride_h, stride_w, 1], padding=padding, dilations=dilation) if bias is True: map = tf.nn.bias_add(map, conv_bias) if non_linear_fn is not None: activation = non_linear_fn(map) else: activation = map
tensorflow.nn.conv2d
11,186
from tensorflow.python.ops import math_ops def compute_specificity_at_sensitivity(name): """Computes the specificity at the given sensitivity. Args: name: The name of the operation. Returns: The specificity using the aggregated values. """ sensitivities = math_ops.div(tp, tp + fn + kepsilon) # We'll need to use this trick until tf.argmax allows us to specify # whether we should use the first or last index in case of ties. min_val = math_ops.reduce_min(math_ops.abs(sensitivities - sensitivity)) indices_at_minval = math_ops.equal( math_ops.abs(sensitivities - sensitivity), min_val) indices_at_minval = math_ops.to_int64(indices_at_minval) indices_at_minval = math_ops.cumsum(indices_at_minval) tf_index = math_ops.argmax(indices_at_minval, 0)
tensorflow.python.ops.math_ops.div
11,187
import tensorflow as tf KK = tf.matmul(K, K, transpose_b=True) K_trace = tf.expand_dims(tf.expand_dims(tf.trace(KK), -1), -1) K_loss = tf.reduce_mean(tf.abs(KK / K_trace - tf.eye(2))) loss_total_gen = crit_gen + rep_loss + K_loss gen_var = model.get_gen_vars() dis_var = model.dis.trainable_variables grads = tape.gradient([loss_total_gen, crit_dis], [gen_var, dis_var]) return grads, [crit_dis, crit_gen, rep_loss, K_loss] reader = datareader.DataReader(16) model = network.RepNet() optim = tf.optimizers.Adam(0.0001, 0.5) saver = M.Saver(model) saver.restore('./model/') MAXITER = 10000 bar = tqdm(range(MAXITER+1)) for i in bar: batch = reader.get_next() grads, lss = grad_loss(batch, model) gen_var = model.get_gen_vars() dis_var = model.dis.trainable_variables
tensorflow.optimizers.Adam
11,188
import tensorflow as tf self._initial_state_name = util.with_prefix(self._name, "initial") self._final_state_name = util.with_prefix(self._name, "final") util.export_state_tuples(self._initial_state, self._initial_state_name) util.export_state_tuples(self._final_state, self._final_state_name) def import_ops(self): """Imports ops from collections.""" if self._is_training: self._train_op = tf.get_collection_ref("train_op")[0] self._lr = tf.get_collection_ref("lr")[0] self._new_lr = tf.get_collection_ref("new_lr")[0] self._lr_update = tf.get_collection_ref("lr_update")[0] rnn_params = tf.get_collection_ref("rnn_params") if self._cell and rnn_params: params_saveable = tf.contrib.cudnn_rnn.RNNParamsSaveable( self._cell, self._cell.params_to_canonical, self._cell.canonical_to_params, rnn_params, base_variable_scope="Model/RNN") tf.add_to_collection(tf.GraphKeys.SAVEABLE_OBJECTS, params_saveable) self._cost = tf.get_collection_ref(util.with_prefix(self._name, "cost"))[0] num_replicas = FLAGS.num_gpus if self._name == "Train" else 1 self._initial_state = util.import_state_tuples(
tensorflow.get_collection_ref
11,189
import tensorflow as tf with tf.variable_scope("lm_aggregation"): self.lm_weights = tf.nn.softmax(tf.get_variable("lm_scores", [lm_num_layers], initializer=tf.constant_initializer(0.0))) self.lm_scaling = tf.get_variable("lm_scaling", [], initializer=tf.constant_initializer(1.0)) flattened_lm_emb = tf.reshape(lm_emb, [num_sentences * max_sentence_length * lm_emb_size, lm_num_layers]) flattened_aggregated_lm_emb = tf.matmul(flattened_lm_emb, tf.expand_dims(self.lm_weights, 1)) # [num_sentences * max_sentence_length * emb, 1] aggregated_lm_emb = tf.reshape(flattened_aggregated_lm_emb, [num_sentences, max_sentence_length, lm_emb_size]) aggregated_lm_emb *= self.lm_scaling context_emb_list.append(aggregated_lm_emb) context_emb = tf.concat(context_emb_list, 2) # [num_sentences, max_sentence_length, emb] head_emb = tf.concat(head_emb_list, 2) # [num_sentences, max_sentence_length, emb] context_emb = tf.nn.dropout(context_emb, self.lexical_dropout) # [num_sentences, max_sentence_length, emb] head_emb = tf.nn.dropout(head_emb, self.lexical_dropout) # [num_sentences, max_sentence_length, emb] text_len_mask = tf.sequence_mask(text_len, maxlen=max_sentence_length) # [num_sentence, max_sentence_length] context_outputs = self.lstm_contextualize(context_emb, text_len, text_len_mask) # [num_words, emb] num_words = util.shape(context_outputs, 0) genre_emb = tf.gather(tf.get_variable("genre_embeddings", [len(self.genres), self.config["feature_size"]]), genre) # [emb] sentence_indices = tf.tile(tf.expand_dims(tf.range(num_sentences), 1), [1, max_sentence_length]) # [num_sentences, max_sentence_length]
tensorflow.nn.dropout
11,190
import tensorflow as tf lowering = mtf.Lowering(graph, {mesh: mesh_impl}) tf_loss = lowering.export_to_tf_tensor(loss) tf_loss = tf.to_float(tf_loss) if logits and mode != tf.estimator.ModeKeys.TRAIN: tf_logits = lowering.export_to_tf_tensor(logits) if mode == tf.estimator.ModeKeys.TRAIN: tf_update_ops = [lowering.lowered_operation(op) for op in update_ops] tf_update_ops.append(tf.assign_add(global_step, 1)) # tf.logging.info("tf_update_ops: {}".format(tf_update_ops)) train_op = tf.group(tf_update_ops) with mtf.utils.outside_all_rewrites(): # Copy master variables to slices. Must be called first. restore_hook = mtf.MtfRestoreHook(lowering) saver = tf.train.Saver( tf.global_variables(), sharded=True, max_to_keep=10, keep_checkpoint_every_n_hours=2, defer_build=False,
tensorflow.group
11,191
import tensorflow as tf if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) # Trainable parameters mask = tf.equal(mask, tf.ones_like(mask)) facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] query = tf.layers.dense(query, facts_size, activation=None, name='f1_trans_shine' + stag) query = prelu(query) queries = tf.tile(query, [1, tf.shape(facts)[1]]) queries = tf.reshape(queries, tf.shape(facts)) din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = tf.layers.dense(din_all, facts_size, activation=tf.nn.sigmoid, name='f1_shine_att' + stag) d_layer_2_all = tf.layers.dense(d_layer_1_all, facts_size, activation=tf.nn.sigmoid, name='f2_shine_att' + stag) d_layer_2_all = tf.reshape(d_layer_2_all, tf.shape(facts)) output = d_layer_2_all return output
tensorflow.concat
11,192
from tensorflow.python.framework import ops Args: value: A 4-D `Tensor` with shape `[batch, height, width, channels]` and type `tf.float32`. ksize: A list of ints that has length >= 4. The size of the window for each dimension of the input tensor. strides: A list of ints that has length >= 4. The stride of the sliding window for each dimension of the input tensor. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. data_format: A string. 'NHWC' and 'NCHW" are supported. name: Optional name for the operation. Returns: A `Tensor` with type `tf.float32`. The max pooled output tensor. """ with ops.op_scope([value], name, "MaxPool") as name: value = ops.convert_to_tensor(value, name="input") return gen_nn_ops._max_pool(value, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name) ops.RegisterShape("Relu")(common_shapes.unchanged_shape) ops.RegisterShape("Relu6")(common_shapes.unchanged_shape) ops.RegisterShape("Elu")(common_shapes.unchanged_shape) ops.RegisterShape("Softplus")(common_shapes.unchanged_shape) ops.RegisterShape("Softsign")(common_shapes.unchanged_shape)
tensorflow.python.framework.ops.op_scope
11,193
import tensorflow as tf acc_train = [] #store train accuracy for each epoch acc_test = [] #store test accuracy for each epoch if actL == 'sigmoid': #accuracy score for binary class classification Yp = tf.greater(an , 0.5) accuracy = tf.reduce_mean(tf.cast(tf.equal(Yp, tf.equal(Y,1.0)), "float")) elif actL == 'esp' or actL == 'relu': #r2 score norm= tf.reduce_mean( tf.squared_difference(Y,tf.reduce_mean(Y)) ) accuracy = 1 - tf.divide( tf.reduce_mean(tf.squared_difference(an, Y)), norm) elif actL == 'softmax': #accuracy score for multiclass classification Yp = tf.sigmoid(betan*hn) correct = tf.equal(tf.argmax(Yp), tf.argmax(Y)) accuracy= tf.reduce_mean(tf.cast(correct, "float")) #-----------------Initialize the graph and start the session------------------------------------------------- init = tf.global_variables_initializer()
tensorflow.squared_difference
11,194
import tensorflow as tf x = tf.matmul(x, tf.nn.l2_normalize(V.initialized_value(), 0)) init_scale = .01 m_init, v_init = tf.nn.moments(x, [0]) scale_init = init_scale / tf.sqrt(v_init + 1e-10) with tf.control_dependencies([g.assign(g * scale_init), b.assign_add(-m_init * scale_init)]): x = tf.reshape(scale_init, [1, num_units]) * (x - tf.reshape(m_init, [1, num_units])) else: V = maybe_avg(V) g = maybe_avg(g) b = maybe_avg(b) x = tf.matmul(x, V) scaler = g / tf.sqrt(tf.reduce_sum(tf.square(V), [0])) x = tf.reshape(scaler, [1, num_units]) * x + tf.reshape(b, [1, num_units]) return x def sample_from_discretized_mix_logistic(l, nr_mix): """ This function is copied from https://github.com/openai/pixel-cnn/blob/master/pixel_cnn_pp/nn.py in reference to: See [Salimans et. al., 2017](https://arxiv.org/pdf/1701.05517) ([pdf](https://arxiv.org/pdf/1701.05517.pdf)) log-likelihood for mixture of discretized logistics, assumes the data has been rescaled to [-1,1] interval
tensorflow.square
11,195
import tensorflow as tf new_shape.append(self.hparams.num_blocks) new_shape.append(int(self.hparams.z_size / self.hparams.num_blocks)) c = tf.to_int32(tf.reshape(c, shape=new_shape)) h1_shape = shape_x h1_shape.append(self.hparams.hidden_size) h1 = tf.zeros(dtype=tf.float32, shape=h1_shape) c_int = self.bit_to_int( c, num_bits=int(self.hparams.z_size / self.hparams.num_blocks), base=2) c_hot = tf.one_hot(c_int, depth=self.hparams.block_v_size, axis=-1) c_hot_flat = tf.reshape( c_hot, shape=[-1, self.hparams.num_blocks, self.hparams.block_v_size]) h1 = tf.matmul(tf.transpose(c_hot_flat, perm=[1, 0, 2]), self.means) h1 = tf.transpose(h1, perm=[1, 0, 2]) h1 = tf.reshape(h1, shape=h1_shape) h1_shape[0] = self.hparams.batch_size h2 = tf.layers.dense(tf.nn.relu(h1), self.hparams.filter_size, name="vch2") res = tf.layers.dense( tf.nn.relu(h2), self.hparams.hidden_size, name="vcfin") return res def discrete_bottleneck(self, x): """Discretization bottleneck for latent variables.
tensorflow.transpose
11,196
import tensorflow as tf a_indices = op.inputs[0:numTensors] a_values = op.inputs[numTensors:numTensors*2] a_shape = op.inputs[numTensors*2:numTensors*3] b = op.inputs[numTensors*3] adj_a = op.get_attr("adjoint_a") adj_b = op.get_attr("adjoint_b") # gradient w.r.t. dense a_values_grads = [] b_list = [b[i] for i in range(numTensors)] b_grads = b_module.bspmm(a_indices, a_values, a_shape, grad, adjoint_a=True, adjoint_b=False) bg_row=tf.shape(b_grads[0])[0] bg_col=tf.shape(b_grads[0])[1] b_grads = tf.reshape(b_grads, (numTensors * bg_row, bg_col)) if adj_b: b_grads = [array_ops.transpose(b_g) for b_g in b_grads] for t in range(numTensors): rows = a_indices[t][:, 0] cols = a_indices[t][:, 1] parts_a = array_ops.gather(grad[t], rows if not adj_a else cols) parts_b = array_ops.gather(b_list[t] if not adj_b else array_ops.transpose(b_list[t]), cols if not adj_a else rows) a_values_grads.append(math_ops.reduce_sum(parts_a * parts_b, reduction_indices=1))
tensorflow.shape
11,197
import tensorflow as tf p.task_params.Define('a', p0, '') p.task_params.Define('b', p1, '') return p def _testSampleTaskHelper(self, p): model = p.cls(p) task_to_id = {model.children['a']: 'a', model.children['b']: 'b'} task_counts = {'a': 0, 'b': 0} # initialize tensorflow graph and global step with self.session() as sess: tf.global_variables_initializer().run() global_step = sess.run(model.global_step) for _ in range(100): task = model.SampleTask(global_step) task_counts[task_to_id[task]] += 1 self.assertEqual(task_counts['a'], 83) self.assertEqual(task_counts['b'], 17) def testSampleTaskSpecifiedWithoutScheduler(self): """Expected distribution: 'a': 0.8 , 'b': 0.2.""" p = self._setUpTestSampleTask()
tensorflow.global_variables_initializer
11,198
import tensorflow as tf decode_data = None, capacity = 1024, batch_size = 32, scope = None): encode = tf.placeholder(tf.int32, shape=[None], name="encode") decode = tf.placeholder(tf.int32, shape=[decode_max_length + 2], name="decode") weight = tf.placeholder(tf.float32, shape=[decode_max_length + 1], name="weight") queue = tf.PaddingFIFOQueue(capacity = capacity, dtypes = [tf.int32, tf.int32, tf.float32], shapes = [[None], [decode_max_length + 2], [decode_max_length + 1]], name = 'FIFOQueue') enqueue_op = queue.enqueue([encode, decode, weight])
tensorflow.PaddingFIFOQueue
11,199