seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf out_bw = tf.reverse_sequence( out_bw, seq_lengths=seq_len, seq_dim=1, batch_dim=0) outputs.append(tf.concat([out_fw, out_bw], axis=2)) if concat_layers: res = tf.concat(outputs[1:], axis=2) else: res = outputs[-1] return res class ptr_net: def __init__(self, batch, hidden, keep_prob=1.0, is_train=None, scope="ptr_net"): self.gru = tf.contrib.rnn.GRUCell(hidden) self.batch = batch self.scope = scope self.keep_prob = keep_prob self.is_train = is_train self.dropout_mask = dropout(tf.ones( [batch, hidden], dtype=tf.float32), keep_prob=keep_prob, is_train=is_train) def __call__(self, init, match, d, mask): with tf.variable_scope(self.scope): d_match = dropout(match, keep_prob=self.keep_prob, is_train=self.is_train)
tensorflow.contrib.rnn.GRUCell
10,600
import tensorflow as tf max_seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq, is_training=True, batch_size=FLAGS.train_batch_size, use_hvd=FLAGS.use_hvd) if FLAGS.auto_recover: hooks.append(tf.data.experimental.CheckpointInputPipelineHook(estimator)) estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps, hooks=hooks) if FLAGS.do_eval: tf.logging.info("***** Running evaluation *****") tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
tensorflow.data.experimental.CheckpointInputPipelineHook
10,601
import tensorflow as tf with tf.name_scope("Valid"): valid_input = PTBInput(config=config, data=valid_data, name="ValidInput") with tf.variable_scope("Model", reuse=True, initializer=initializer): mvalid = PTBModel(is_training=False, config=config, input_=valid_input) tf.summary.scalar("Validation Loss", mvalid.cost) with tf.name_scope("Test"): test_input = PTBInput( config=eval_config, data=test_data, name="TestInput") with tf.variable_scope("Model", reuse=True, initializer=initializer): mtest = PTBModel(is_training=False, config=eval_config, input_=test_input)
tensorflow.name_scope
10,602
import tensorflow as tf logger.debug("Conditional: Kernel") num_data = tf.shape(X)[0] # M
tensorflow.shape
10,603
import tensorflow as tf import tensorflow as tf import numpy as np import time import scipy.io np.random.seed(1234) tf.set_random_seed(1234) class PhysicsInformedNN: # Initialize the class def __init__(self, x0, u0, x1, u1, layers, dt, lb, ub, q):
tensorflow.set_random_seed
10,604
import tensorflow as tf _MergeCandidates, (tokens, candidates), parallel_iterations=1, back_prop=False)[0] def Encode(self, text): """Converts string `text` to integer ids and the encoded string. Encoding includes prefixing the beginning-of-word token to each word. Returns: ids: the encoded integer ids. tokens: the encoded string. """ words = tf.sparse.to_dense(tf.strings.split([text]), default_value='')[0] num_words = tf.size(words) ids_ta = tf.TensorArray(tf.int32, 0, dynamic_size=True) def _WordsToIds(i, words, ids_ta): encoded_ids = self._EncodeToIds(BOW_STR + words[i]) ids_ta = ids_ta.scatter( tf.range(ids_ta.size(), ids_ta.size() + tf.size(encoded_ids)), encoded_ids) return i + 1, words, ids_ta _, _, ids_ta = tf.while_loop( lambda i, *_: i < num_words,
tensorflow.strings.split
10,605
import tensorflow as tf return tf.cond( pred=tf.greater(tf.size(input=masks), 0), true_fn=lambda: tf.map_fn(_decode_png_mask, masks, dtype=tf.float32), false_fn=lambda: tf.zeros([0, height, width], dtype=tf.float32))
tensorflow.map_fn
10,606
import tensorflow as tf # state, target and action self.state = tf.placeholder(tf.float32, [None,num_state], name="state") self.target = tf.placeholder(tf.float32,[None,1], name="target") self.a_his = tf.placeholder(tf.float32, [None, num_action], name="action_hist") # layers
tensorflow.placeholder
10,607
import tensorflow as tf rnn_input = highway_input # Bidirectional RNN outputs, states = tf.nn.bidirectional_dynamic_rnn( GRUCell(half_depth), GRUCell(half_depth), rnn_input, sequence_length=input_lengths, dtype=tf.float32) return tf.concat(outputs, axis=2) # Concat forward and backward def highwaynet(inputs, scope, depth): with tf.variable_scope(scope): H = tf.layers.dense( inputs, units=depth, activation=tf.nn.relu,
tensorflow.concat
10,608
import tensorflow as tf images, labels = input_name.build_input( FLAGS.dataset, FLAGS.eval_data_path, hps.batch_size, FLAGS.mode)#FLAGS.mode='attack', batch_size=200 Res = model_name.ResNet(hps, images, FLAGS.mode, Reuse=False) Res.build_graph() saver = tf.train.Saver() #Open session and restore checkpoint sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) tf.train.start_queue_runners(sess) ckpt_state = tf.train.get_checkpoint_state(FLAGS.log_root) # Choose dir according to rt tf.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path) num_sample = hps.batch_size*FLAGS.eval_batch_count # Initialize results to save
tensorflow.train.start_queue_runners
10,609
import tensorflow as tf # The final prediction is the average of the predictions for each word # weighted by the individual confidence/utility scores. wvs = tf.pack(self._inputs) wvs_weighted = tf.mul(tf.reshape(tf.transpose(self.cs), [-1, 1]), tf.reshape(wvs, [-1, in_size])) wvs_weighted_reshaped = tf.reshape(wvs_weighted, wvs.get_shape()) wvsum = tf.reduce_sum(wvs_weighted_reshaped,0) pred_mat = tf.get_variable('pred_mat', [in_size, self._out_vocab_size]) pred_bias = tf.get_variable('pred_bias', [self._out_vocab_size]) # Make a prediction for each tweet. def GetWordPred(o_): logits = tf.nn.xw_plus_b(o_, pred_mat, pred_bias) return tf.nn.softmax(logits) preds = GetWordPred(wvsum) z = tf.tile(tf.reshape(tf.reduce_sum(preds,1),[-1,1]), [1, out_vocab_size]) self.preds, self.z = preds, z self.probs = tf.div(preds, z) #normalize self.unweighted_xent = _SafeXEnt(self.y, self.probs) self._xent = _SafeXEnt(self.y, self.probs, class_weights=weights) self.cost = tf.reduce_mean(self.example_weights * self._xent)
tensorflow.nn.xw_plus_b
10,610
import tensorflow as tf inputs = tf.random.uniform( [100, 8], minval=-10, maxval=10.0, dtype=tf.float32) centers = tf.random.uniform( [5, 8], minval=-10, maxval=10.0, dtype=tf.float32) distances1 = isu.inputs_distances_to_centers(inputs, centers) num_centers = tf.shape(centers)[0] inputs_reshaped = tf.tile(tf.expand_dims(inputs, axis=1), tf.stack([1, num_centers, 1])) distances2 = tf.reduce_sum(tf.square(inputs_reshaped - centers), axis=2) self.assertAllClose(distances1.numpy(), distances2.numpy(), atol=0.001) def test_pairwise_iou_matrix(self): mask0 = tf.constant([[1, 0], [0, 1]], dtype=tf.float32) mask1 = tf.constant([[1, 1], [0, 1]], dtype=tf.float32)
tensorflow.square
10,611
import tensorflow as tf one_hot_labels = tf.one_hot( label_ids, depth=bert_config.vocab_size, dtype=tf.float32) # The `positions` tensor might be zero-padded (if the sequence is too # short to have the maximum number of predictions). The `label_weights` # tensor has a value of 1.0 for every real prediction and 0.0 for the # padding predictions. per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1]) numerator = tf.reduce_sum(label_weights * per_example_loss) denominator = tf.reduce_sum(label_weights) + 1e-5 loss = numerator / denominator return (loss, per_example_loss, log_probs) def get_next_sentence_output(bert_config, input_tensor, labels):
tensorflow.reduce_sum
10,612
import tensorflow as tf patches = tf.image.extract_patches(tf.expand_dims(x,0),sizes=window,strides=window,rates=[1, 1, 1, 1],padding='VALID') patches = tf.reshape(patches,[n_patch,patch_size,patch_size,n_channel]) patches = tf.random.shuffle(patches) rows = tf.split(patches,n_col//patch_size,axis=0)
tensorflow.random.shuffle
10,613
import tensorflow as tf brodcast_mean = tf.reshape(mean, broadcast_shape) std = tf.reduce_mean(tf.square(_x - brodcast_mean) + epsilon, axis=reduction_axes) std = tf.sqrt(std) brodcast_std = tf.reshape(std, broadcast_shape) x_normed = (_x - brodcast_mean) / (brodcast_std + epsilon) # x_normed = tf.layers.batch_normalization(_x, center=False, scale=False)
tensorflow.reshape
10,614
import tensorflow as tf if reuse: tf.get_variable_scope().reuse_variables()
tensorflow.get_variable_scope
10,615
import tensorflow as tf # Mask # key_masks = tf.sequence_mask(facts_length, tf.shape(facts)[1]) # [B, T] key_masks = tf.expand_dims(mask, 1) # [B, 1, T] paddings = tf.ones_like(scores) * (-2 ** 32 + 1) if not forCnn: scores = tf.where(key_masks, scores, paddings) # [B, 1, T]
tensorflow.ones_like
10,616
import tensorflow as tf ''' Heteroscedastic loss.''' def het_loss(y_true, y_pred): y_mean = y_pred[:,:,:,:3] y_logvar = y_pred[:,:,:,3:] y_logvar = K.clip(y_logvar, -10, 10) if mode == 'l2': euclidian_loss = K.square(y_true/127.5 - y_mean/127.5) elif mode == 'l1': euclidian_loss = K.abs(y_true/127.5 - y_mean/127.5) loss = tf.exp(-y_logvar)*euclidian_loss + y_logvar loss *= 127.5 if mode == 'l2': loss *= 127.5 if attention: attention_mask = K.sigmoid(y_logvar) if block_attention_gradient: attention_mask = K.stop_gradient(attention_mask)
tensorflow.exp
10,617
import tensorflow as tf Tout=tf.float32) gtboxes_and_label_r = tf.reshape(gtboxes_and_label_r, [-1, 6])
tensorflow.reshape
10,618
import tensorflow as tf An implementation of the A3C algorithm that is reasonably well-tuned for the VNC environments. Below, we will have a modest amount of complexity due to the way TensorFlow handles data parallelism. But overall, we'll define the model, specify its inputs, and describe how the policy gradients step should be computed. """ self.env = env self.task = task self.freeze = freeze worker_device = "/job:worker/task:{}/cpu:0".format(task) with tf.device(tf.train.replica_device_setter(1, worker_device=worker_device)): with tf.variable_scope("global"): self.network = LSTMPolicy(env.observation_space.shape, env.action_space.n) self.global_step = tf.get_variable("global_step", [], tf.int32, initializer=tf.constant_initializer(0, dtype=tf.int32), trainable=False) with tf.device(worker_device): with tf.variable_scope("local"): self.local_network = pi = LSTMPolicy(env.observation_space.shape, env.action_space.n) pi.global_step = self.global_step self.ac = tf.placeholder(tf.float32, [None, env.action_space.n], name="ac") self.adv = tf.placeholder(tf.float32, [None], name="adv") self.r = tf.placeholder(tf.float32, [None], name="r") log_prob_tf = tf.nn.log_softmax(pi.logits) prob_tf = tf.nn.softmax(pi.logits)
tensorflow.constant_initializer
10,619
import tensorflow as tf res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 4), res[0].shape) res = sess.run([mem]) self.assertEqual((2, 2), res[0].shape) def testAttentionDecoder2(self): with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)): cell = tf.nn.rnn_cell.GRUCell(2) inp = [tf.constant(0.5, shape=[2, 2])] * 2 enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32) attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs]) dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3 dec, mem = tf.nn.seq2seq.attention_decoder( dec_inp, enc_state, attn_states, cell, output_size=4, num_heads=2) sess.run([tf.global_variables_initializer()]) res = sess.run(dec)
tensorflow.constant
10,620
import tensorflow as tf 'The momentum for the MomentumOptimizer and RMSPropOptimizer.') tf.app.flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.') tf.app.flags.DEFINE_float( 'end_learning_rate', 0.00005, 'The minimal end learning rate used by a polynomial decay learning rate.') # for learning rate exponential_decay tf.app.flags.DEFINE_float( 'learning_rate_decay_factor', 0.96, 'Learning rate decay factor.') tf.app.flags.DEFINE_float( 'decay_steps', 1000, 'Number of epochs after which learning rate decays.') # for learning rate piecewise_constant decay
tensorflow.app.flags.DEFINE_float
10,621
import tensorflow as tf setattr( self, 'kappa_%s' % layer, tf.constant(1.)) setattr( self, 'omega_%s' % layer, tf.constant(1.)) if self.adapation: setattr( self, 'eta_%s' % layer, tf.get_variable( name='%s_eta' % self.layer_name,
tensorflow.constant
10,622
import tensorflow as tf def import_ops(self): """Imports ops from collections.""" if self._is_training: self._train_op = tf.get_collection_ref("train_op")[0] self._lr = tf.get_collection_ref("lr")[0] self._new_lr = tf.get_collection_ref("new_lr")[0] self._lr_update = tf.get_collection_ref("lr_update")[0] rnn_params = tf.get_collection_ref("rnn_params") if self._cell and rnn_params: params_saveable = tf.contrib.cudnn_rnn.RNNParamsSaveable( self._cell, self._cell.params_to_canonical, self._cell.canonical_to_params, rnn_params,
tensorflow.get_collection_ref
10,623
import tensorflow as tf os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False, device_count={'GPU': gpu}) config.gpu_options.allow_growth = True config.gpu_options.per_process_gpu_memory_fraction = 0.5 # Placeholders self.sess = tf.Session(config=config) self.s_dim, self.a_dim = env.observation_space.shape, env.action_space.shape[0] self.a_bound = (env.action_space.high - env.action_space.low) / 2 self.actions = tf.placeholder(tf.float32, [None, self.a_dim], 'action') self.state = tf.placeholder(tf.float32, [None, self.s_dim[0]], 'state') self.advantage = tf.placeholder(tf.float32, [None, 1], 'advantage') self.rewards = tf.placeholder(tf.float32, [None, 1], 'discounted_r') # Dateset with experiennce replay self.dataset = tf.data.Dataset.from_tensor_slices({'state': self.state, 'actions': self.actions, 'rewards': self.rewards, 'advantage': self.advantage}) self.dataset = self.dataset.shuffle(buffer_size=10000) self.dataset = self.dataset.batch(self.MINIBATCH) self.dataset = self.dataset.cache() self.dataset = self.dataset.repeat(self.EPOCHS) self.data_iter = self.dataset.make_initializable_iterator()
tensorflow.placeholder
10,624
import tensorflow as tf q_vals, _ = self._critic(critic_input, training=False) q_vals_vec = tf.reshape(q_vals, (batch_size, num_tasks)) rewards, dones = self._task_distribution.evaluate(states_tiled, actions_tiled, tasks_tiled) dones = tf.cast(dones, tf.float32) rewards_vec = tf.reshape(rewards, (batch_size, num_tasks)) dones_vec = tf.reshape(dones, (batch_size, num_tasks)) relabelled_obs = self._task_distribution.combine(states_tiled, tasks_tiled) action_distribution = self._actor(
tensorflow.cast
10,625
from tensorflow.python.ops import math_ops return moving_averages.assign_moving_average( moving_average_variable, value, decay, zero_debias=False) # quicker adaptation at the beginning if global_step is not None: n = math_ops.cast(global_step, dtypes.float32) decay = math_ops.minimum(decay, n / (n + 1.)) # update averages mean = moving_average("mean", log_norm, decay) sq_mean = moving_average("sq_mean", math_ops.square(log_norm), decay) variance = sq_mean - math_ops.square(mean) std = math_ops.sqrt(math_ops.maximum(epsilon, variance)) max_norms = math_ops.exp(mean + std_factor * std) return max_norms, mean def adaptive_clipping_fn(std_factor=2., decay=0.95, static_max_norm=None, global_step=None, report_summary=False, epsilon=1e-8, name=None): """Adapt the clipping value using statistics on the norms.
tensorflow.python.ops.math_ops.maximum
10,626
import tensorflow as tf else: raise ValueError("Unrecognized initializer: %s" % params.initializer) def get_learning_rate_decay(learning_rate, global_step, params): if params.learning_rate_decay == "noam": step = tf.to_float(global_step) warmup_steps = tf.to_float(params.warmup_steps) multiplier = params.hidden_size ** -0.5 decay = multiplier * tf.minimum((step + 1) * (warmup_steps ** -1.5), (step + 1) ** -0.5)
tensorflow.to_float
10,627
import tensorflow as tf """ L= len(activation) #number of layers m = Y.shape[1] #number of training examples last = activation[L-1] labels= tf.transpose(Y) if last == 'sigmoid' or last == 'softmax': #use cross entropy loss function logits= tf.transpose(betan*zn[1]) cost = tf.reduce_mean(tf.losses.sigmoid_cross_entropy(logits = logits, multi_class_labels=labels)) elif last == 'esp' or last == 'relu': #use minimum squared error (L2 loss) out = tf.transpose(zn[0]) cost = tf.reduce_mean(tf.squared_difference(out, labels))/2 return cost #------------Hessian------------------- def flatten(tensor):
tensorflow.losses.sigmoid_cross_entropy
10,628
import tensorflow as tf self.paddings = [[size,size],[size,size],[0,0]] elif type=='no_op': self.augment = self.no_op def gaussian_kernel(self,size,mean,std): """Makes 2D gaussian Kernel for convolution.""" d = tfp.distributions.Normal(mean, std) vals = d.prob(tf.range(start = -size, limit = size + 1, dtype = tf.float32)) gauss_kernel = tf.einsum('i,j->ij',vals,vals) return gauss_kernel / tf.reduce_sum(gauss_kernel) def get_random_patch_size(self): return np.random.choice([1,2,4,8]) def scramble(self,x): # assume square patch n_row,n_col,n_channel = x.shape
tensorflow.einsum
10,629
import tensorflow as tf self_attention_tmp = tf.reduce_sum(self_attention_tmp, 1) output = output.write(i, self_attention_tmp) return batch, output, i + 1 output_ta = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True, element_shape=(facts[:, 0, :].get_shape())) _, output_op, _ = tf.while_loop(cond, body, [facts, output_ta, 0]) self_attention = output_op.stack() self_attention = tf.transpose(self_attention, perm = [1, 0, 2]) return self_attention def self_all_attention(facts, ATTENTION_SIZE, mask, stag='null'): if len(facts.get_shape().as_list()) == 2: facts = tf.expand_dims(facts, 1)
tensorflow.while_loop
10,630
import tensorflow as tf x = np.arange(-100, 100, 2).astype(dtype) tf.initialize_all_variables().run() proba = qdist.log_prob(x) grads = tf.gradients(proba, [mu, sigma]) self._assert_all_finite(proba.eval()) self._assert_all_finite(grads[0].eval()) self._assert_all_finite(grads[1].eval()) def test_prob_and_grad_gives_finite_results_for_common_events(self): with self.test_session(): mu = tf.Variable(0.0, name="mu") sigma = tf.Variable(1.0, name="sigma") qdist = distributions.QuantizedDistribution( base_dist_cls=distributions.Normal, mu=mu, sigma=sigma) x = tf.ceil(4 * self._rng.rand(100).astype(np.float32) - 2) tf.initialize_all_variables().run() proba = qdist.prob(x) self._assert_all_finite(proba.eval())
tensorflow.Variable
10,631
import tensorflow as tf ''' num_channels = 3 # fov_size = 3 # loc_std = 0.8 #Used to initialize weights for policy and value output layers (Do we need to use that? Maybe not now) def normalized_columns_initializer(std=1.0): def _initializer(shape, dtype=None, partition_info=None): out = np.random.randn(*shape).astype(np.float32) out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True)) return tf.constant(out) return _initializer ''' G(x) = 1 {-(1/2)*[(x-u)/sigma]^2} ------------------- e sigma*(2*pi)^(1/2) ''' def gaussian_pdf(mean, loc_std, sample): Z = 1.0 / (loc_std * tf.sqrt(2.0 * np.pi)) a = - tf.square(sample - mean) / (2.0 * tf.square(loc_std))
tensorflow.constant
10,632
import tensorflow as tf if len(shape) != 4: raise ValueError("Input data of instancebn layer has to be 4D tensor") if data_format == 'NHWC': axis = [1, 2] ch = shape[3] new_shape = [1, 1, 1, ch] else: axis = [2, 3] ch = shape[1] new_shape = [1, ch, 1, 1] if ch is None: raise ValueError("Input of instancebn require known channel!") mean, var = tf.nn.moments(inputdata, axis, keep_dims=True) if not use_affine: return tf.divide(inputdata - mean, tf.sqrt(var + epsilon), name='output') beta = tf.get_variable('beta', [ch], initializer=tf.constant_initializer()) beta = tf.reshape(beta, new_shape) gamma = tf.get_variable('gamma', [ch], initializer=tf.constant_initializer(1.0)) gamma = tf.reshape(gamma, new_shape) return tf.nn.batch_normalization(inputdata, mean, var, beta, gamma, epsilon, name=name) @staticmethod def dropout(inputdata, keep_prob, noise_shape=None, name=None): """
tensorflow.nn.moments
10,633
import tensorflow as tf tf.app.flags.DEFINE_string('device', '/gpu:0', "device") tf.app.flags.DEFINE_string('dataset', 'cifar10', "{cifar10, svhn}") tf.app.flags.DEFINE_string('log_dir', "", "log_dir") tf.app.flags.DEFINE_integer('seed', 1, "initial random seed") tf.app.flags.DEFINE_bool('validation', False, "") tf.app.flags.DEFINE_integer('batch_size', 32, "the number of examples in a batch") tf.app.flags.DEFINE_integer('ul_batch_size', 128, "the number of unlabeled examples in a batch") tf.app.flags.DEFINE_integer('eval_batch_size', 100, "the number of eval examples in a batch") tf.app.flags.DEFINE_integer('eval_freq', 5, "") tf.app.flags.DEFINE_integer('num_epochs', 120, "the number of epochs for training") tf.app.flags.DEFINE_integer('epoch_decay_start', 80, "epoch of starting learning rate decay") tf.app.flags.DEFINE_integer('num_iter_per_epoch', 400, "the number of updates per epoch") tf.app.flags.DEFINE_float('learning_rate', 0.001, "initial leanring rate") tf.app.flags.DEFINE_float('mom1', 0.9, "initial momentum rate") tf.app.flags.DEFINE_float('mom2', 0.5, "momentum rate after epoch_decay_start") tf.app.flags.DEFINE_string('method', 'vat', "{vat, vatent, baseline}")
tensorflow.app.flags.DEFINE_integer
10,634
import tensorflow as tf if FLAGS.do_serve: def serving_input_fn(): with tf.variable_scope("foo"): feature_spec = { "input_ids": tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), "input_mask": tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), "label_ids": tf.FixedLenFeature([], tf.int64), } serialized_tf_example = tf.placeholder(dtype=tf.string, shape=[None], name='input_example_tensor')
tensorflow.FixedLenFeature
10,635
import tensorflow as tf self.setup_args = SimpleNamespace( sampler_cls=sampler_cls, sampler_args=sampler_args) def initialize_tf_vars(self): """Initialize all uninitialized variables in session.""" with tf.name_scope('initialize_tf_vars'): uninited_set = [ e.decode() for e in self.sess.run(tf.report_uninitialized_variables()) ] self.sess.run(
tensorflow.name_scope
10,636
import tensorflow as tf fvar = Knn - tf.reduce_sum(tf.square(A), 0) fvar = tf.tile(fvar[None, :], [num_func, 1]) # R x N # another backsubstitution in the unwhitened case if not white: A = tf.matrix_triangular_solve(tf.transpose(Lm), A, lower=False) # construct the conditional mean fmean = tf.matmul(A, f, transpose_a=True) if q_sqrt is not None: if q_sqrt.get_shape().ndims == 2: LTA = A * tf.expand_dims(tf.transpose(q_sqrt), 2) # R x M x N elif q_sqrt.get_shape().ndims == 3: L = tf.matrix_band_part(q_sqrt, -1, 0) # R x M x M A_tiled = tf.tile(tf.expand_dims(A, 0), tf.stack([num_func, 1, 1]))
tensorflow.matmul
10,637
import tensorflow as tf grads = tf.gradients(cost, trainables) grads, _ = tf.clip_by_global_norm(grads, clip_norm=self.clip_norm)
tensorflow.clip_by_global_norm
10,638
import tensorflow as tf self.config = config self.demo = demo self.graph = graph if graph is not None else tf.Graph() with self.graph.as_default(): self.global_step = tf.get_variable('global_step', shape=[], dtype=tf.int32, initializer=tf.constant_initializer(0), trainable=False) self.dropout = tf.placeholder_with_default(0.0, (), name="dropout") if self.demo: self.c = tf.placeholder(tf.int32, [None, config.test_para_limit],"context") self.q = tf.placeholder(tf.int32, [None, config.test_ques_limit],"question") self.ch = tf.placeholder(tf.int32, [None, config.test_para_limit, config.char_limit],"context_char") self.qh = tf.placeholder(tf.int32, [None, config.test_ques_limit, config.char_limit],"question_char") self.y1 = tf.placeholder(tf.int32, [None, config.test_para_limit],"answer_index1") self.y2 = tf.placeholder(tf.int32, [None, config.test_para_limit],"answer_index2") else: self.c, self.q, self.ch, self.qh, self.y1, self.y2, self.qa_id = batch.get_next()
tensorflow.placeholder
10,639
import tensorflow as tf self.u0_tf = tf.placeholder(tf.float32, shape=(None, self.u0.shape[1])) self.u1_tf = tf.placeholder(tf.float32, shape=(None, self.u1.shape[1])) self.dummy_x0_tf = tf.placeholder(tf.float32, shape=(None, self.q)) # dummy variable for fwd_gradients self.dummy_x1_tf = tf.placeholder(tf.float32, shape=(None, self.q)) # dummy variable for fwd_gradients
tensorflow.placeholder
10,640
import tensorflow as tf res = sess.run([mem]) self.assertEqual(1, len(res)) self.assertEqual((2, 2), res[0].shape) def testEmbeddingRNNDecoder(self): with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)): inp = [tf.constant(0.5, shape=[2, 2])] * 2 cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True) _, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32) dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)] dec, mem = tf.nn.seq2seq.embedding_rnn_decoder( dec_inp, enc_state, cell, num_symbols=4, embedding_size=2) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 2), res[0].shape) res = sess.run([mem]) self.assertEqual(1, len(res)) self.assertEqual((2, 2), res[0].c.shape) self.assertEqual((2, 2), res[0].h.shape) def testEmbeddingRNNSeq2Seq(self): with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)): enc_inp = [tf.constant(1, tf.int32, shape=[2]) for i in range(2)]
tensorflow.global_variables_initializer
10,641
import tensorflow as tf logits, feat = resnet_model_fn(x, training=training_flag) cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_labels, logits=logits)) Focal_loss = tf.reduce_mean(focal_loss(one_hot_labels, logits, alpha=0.5))
tensorflow.nn.softmax_cross_entropy_with_logits
10,642
import tensorflow as tf shape=[None], name='input_example_tensor') receiver_tensors = {'examples': serialized_tf_example} features = tf.parse_example(serialized_tf_example, feature_spec) return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)
tensorflow.parse_example
10,643
import tensorflow as tf input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length label_id = label_map[example.label] if ex_index < 5: tf.logging.info("*** Example ***") tf.logging.info("guid: %s" % (example.guid)) tf.logging.info("tokens: %s" % " ".join( [tokenization.printable_text(x) for x in tokens])) tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) tf.logging.info("label: %s (id = %d)" % (example.label, label_id)) feature = InputFeatures( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, is_real_example=True) return feature
tensorflow.logging.info
10,644
import tensorflow as tf optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1) linear_regression.fit(model, dataset, optimizer, logdir=self._tmp_logdir) self.assertAllClose(true_w, model.variables[0].numpy(), rtol=1e-2) self.assertAllClose(true_b, model.variables[1].numpy(), rtol=1e-2) self.assertTrue(glob.glob(os.path.join(self._tmp_logdir, "events.out.*"))) class EagerLinearRegressionBenchmark(tf.test.Benchmark): def benchmarkEagerLinearRegression(self): num_epochs = 10 num_batches = 200 batch_size = 64 dataset = linear_regression.synthetic_dataset( w=tf.random_uniform([3, 1]), b=tf.random_uniform([1]), noise_level=0.01, batch_size=batch_size, num_batches=num_batches) burn_in_dataset = dataset.take(10) model = linear_regression.LinearModel() with tf.device(device()): optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1) # Perform burn-in. linear_regression.fit(model, burn_in_dataset, optimizer)
tensorflow.random_uniform
10,645
import tensorflow as tf def _decode_lstm(self, x, h, context, dropout=False, reuse=False): with tf.variable_scope('logits', reuse=reuse): w_h = tf.get_variable('w_h', [self.H, self.M], initializer=self.weight_initializer) b_h = tf.get_variable('b_h', [self.M], initializer=self.const_initializer) w_out = tf.get_variable('w_out', [self.M, self.V], initializer=self.weight_initializer) b_out = tf.get_variable('b_out', [self.V], initializer=self.const_initializer) if dropout: h = tf.nn.dropout(h, 0.5) h_logits = tf.matmul(h, w_h) + b_h if self.ctx2out: w_ctx2out = tf.get_variable('w_ctx2out', [self.D, self.M], initializer=self.weight_initializer) h_logits += tf.matmul(context, w_ctx2out) if self.prev2out:
tensorflow.nn.dropout
10,646
from tensorflow.python.ops import math_ops # Accumulate the prediction to current confusion matrix. current_cm = confusion_matrix_ops.confusion_matrix( predictions, labels, num_classes, weights=weights, dtype=cm_dtype) update_op = state_ops.assign_add(total_cm, current_cm) def compute_mean_iou(name): """Compute the mean intersection-over-union via the confusion matrix.""" sum_over_row = math_ops.to_float(math_ops.reduce_sum(total_cm, 0)) sum_over_col = math_ops.to_float(math_ops.reduce_sum(total_cm, 1)) cm_diag = math_ops.to_float(array_ops.diag_part(total_cm)) denominator = sum_over_row + sum_over_col - cm_diag # If the value of the denominator is 0, set it to 1 to avoid # zero division. denominator = math_ops.select( math_ops.greater(denominator, 0),
tensorflow.python.ops.math_ops.reduce_sum
10,647
from tensorflow.python.ops import array_ops """Returns the list of implicitly captured inputs.""" return self._extra_inputs def _create_definition_if_needed(self): """Creates the function definition if it's not created yet.""" if self._definition is not None: return # Create the func_def object. temp_graph = _FuncGraph() with temp_graph.as_default(): # List of placeholders for the function_def. inputs = [] for (argname, argtype) in self._args: argholder = array_ops.placeholder(argtype, name=argname) inputs.append(argholder) # Call func and gather the output tensors. with vs.variable_scope("", custom_getter=temp_graph.getvar): outputs = self._func(*inputs) # If func only returned one value, make it a tuple. if not isinstance(outputs, (list, tuple)): outputs = (outputs,) if any([_ is None for _ in outputs]): raise ValueError("Function can not return None.") # Ensures each output is a Tensor. outputs = [ops.convert_to_tensor(_) for _ in outputs] self._extra_inputs = temp_graph.extra_inputs inputs.extend(temp_graph.extra_args)
tensorflow.python.ops.array_ops.placeholder
10,648
import tensorflow as tf matrix = tf.get_variable("Matrix", [total_arg_size, output_size]) if len(args) == 1: res = tf.matmul(args[0], matrix) else: res = tf.matmul(tf.concat(values=args, axis=1), matrix) if not bias: return res bias_term = tf.get_variable("Bias", [output_size], initializer=tf.constant_initializer(bias_start))
tensorflow.concat
10,649
import tensorflow as tf return auc def attention(query, facts, attention_size, mask, stag='null', mode='LIST', softmax_stag=1, time_major=False, return_alphas=False): if isinstance(facts, tuple): # In case of Bi-RNN, concatenate the forward and the backward RNN outputs. facts = tf.concat(facts, 2) if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) mask = tf.equal(mask, tf.ones_like(mask)) hidden_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer input_size = query.get_shape().as_list()[-1] # Trainable parameters w1 = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.1)) w2 = tf.Variable(tf.random_normal([input_size, attention_size], stddev=0.1)) b = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) v = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) with tf.name_scope('v'):
tensorflow.ones_like
10,650
import tensorflow as tf G = tf.Print( self.end_points_G['softmax'], [tf.reduce_mean(G_means), tf.reduce_mean(G_vars)], "generator mean and average var", first_n=1) inputs_means = tf.reduce_mean(inputs, 0, keep_dims=True) inputs_vars = tf.reduce_mean(tf.square(inputs - inputs_means), 0, keep_dims=True) inputs = tf.Print( inputs, [tf.reduce_mean(inputs_means), tf.reduce_mean(inputs_vars)], "image mean and average var", first_n=1) joint = tf.concat([inputs, G], 0) log.info('Input size of unlabelled and generated %s' % (joint.get_shape())) self.end_points_D = self.model.discriminator( joint, True, None, num_classes=num_classes, batch_size=batch_size_train) self.end_points_D_val = self.model.discriminator(
tensorflow.reduce_mean
10,651
import tensorflow as tf elif mode == tf.estimator.ModeKeys.EVAL: def metric_fn(per_example_loss, logits, label_ids): """Computes the loss and accuracy of the model.""" sentence_log_probs = tf.reshape( logits, [-1, logits.shape[-1]]) sentence_predictions = tf.argmax( logits, axis=-1, output_type=tf.int32) sentence_labels = tf.reshape(label_ids, [-1]) sentence_accuracy = tf.metrics.accuracy( labels=label_ids, predictions=sentence_predictions) sentence_mean_loss = tf.metrics.mean( values=per_example_loss) sentence_f = tf_metrics.f1(label_ids, sentence_predictions, num_labels, label_lst, average="macro") eval_metric_ops = { "f1": sentence_f,
tensorflow.metrics.accuracy
10,652
import tensorflow as tf 'member/age': tf.io.FixedLenFeature([], tf.int64), 'member/height': tf.io.VarLenFeature(tf.float32), 'member/prefer_prods': tf.io.VarLenFeature(tf.int64)} features = tf.io.parse_single_example(example_proto, features) images = tf.image.decode_png(features['member/encoded'], channels=3) # 注意png原本有4個channel,但執行到下面的處理會出錯,所以前一行先降成3個channel。
tensorflow.io.parse_single_example
10,653
import tensorflow as tf horizon_pred = horizon_sumV1(pred, horizon) horizon_tgt = horizon_sumV1(tgt, horizon) pred_flat = tf.reshape(horizon_pred, [-1]) tgt_flat = tf.reshape(horizon_tgt, [-1]) batch = tf.stack([pred_flat, tgt_flat], 1) sample_func = sample_pair(batch) def sample_compute(_): pairs = sample_func() loss = compute_contra_loss(*pairs, hard_ratio=hard_ratio) pct = tf.math.count_nonzero(loss, dtype=tf.float32) / tf.size(loss, out_type=tf.float32) p = tf.cond(tf.random_uniform((), dtype=tf.float32) < 1e-4, lambda: tf.print('csrt acc ', [pct]), lambda: tf.no_op()) with tf.control_dependencies([p]): return tf.reduce_mean(loss) loss = tf.map_fn(fn=lambda inp: sample_compute(inp), elems=tf.range(resample), dtype=tf.float32, parallel_iterations=32) final_loss = tf.reduce_mean(loss)
tensorflow.size
10,654
import tensorflow as tf out_width = out_size[2] zero = tf.zeros([], dtype='int32') # 0 <= z < depth, 0 <= y < height & 0 <= x < width. max_z = tf.to_int32(tf.shape(im)[1] - 1) max_y = tf.to_int32(tf.shape(im)[2] - 1) max_x = tf.to_int32(tf.shape(im)[3] - 1) # Converts scale indices from [-1, 1] to [0, width/height/depth]. x = (x + 1.0) * (width_f) / 2.0 y = (y + 1.0) * (height_f) / 2.0
tensorflow.shape
10,655
import tensorflow as tf # Prediction network with feed_dict self.pred_in = {i: tf.placeholder(self.input_spec[i]['type'], shape=s, name=i) for i, s in self.data_shape.items()} self._pred_graph(self.pred_in) # Start session sess_config = tf.ConfigProto(device_count={'GPU': self.n_gpus}) sess_config.gpu_options.allow_growth = True self.sess = tf.Session(config=sess_config) # Register tf dataset handles if self.datasets: self.dataset_handles = {} for n, i in self.dataset_iterators.items(): self.dataset_handles[n] = self.sess.run(i.string_handle()) self.sess.run([tf.global_variables_initializer(),
tensorflow.Session
10,656
import tensorflow as tf X = self._add_global_avg_pool(X, w, h, ch) # Fully connected with tf.variable_scope('fully_connected'): aux_logits = self._add_fully_connected(X, (ch,), K, no_reg=True) return aux_logits def _compute_predictions(self, logits, classes): probs = tf.nn.softmax(logits) preds = tf.argmax(logits, axis=1, output_type=tf.int32) corrects = tf.equal(preds, classes) return (probs, corrects) def _compute_loss(self, logits, aux_logits_list, classes, **knobs): reg_decay = knobs['reg_decay'] aux_loss_mul = knobs['aux_loss_mul'] # Multiplier for auxiliary loss # Compute sparse softmax cross entropy loss from logits & labels log_probs = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=classes)
tensorflow.argmax
10,657
import tensorflow as tf # num_channels + 1 # ) appended_image = tf.concat( values=[X, stddev_feature_map],
tensorflow.concat
10,658
import tensorflow as tf def testEmbeddingRNNSeq2Seq(self): with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)): enc_inp = [tf.constant(1, tf.int32, shape=[2]) for i in range(2)] dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)] cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True) dec, mem = tf.nn.seq2seq.embedding_rnn_seq2seq(
tensorflow.constant
10,659
import tensorflow as tf img_h, img_w, img_c = self.env.observation_space.shape input_shape = (img_h, img_w, frame_history_len * img_c) self.num_actions = self.env.action_space.n # set up placeholders # placeholder for current observation (or state) self.obs_t_ph = tf.placeholder( tf.float32 if lander else tf.uint8, [None] + list(input_shape)) # placeholder for current action self.act_t_ph = tf.placeholder(tf.int32, [None]) # placeholder for current reward self.rew_t_ph = tf.placeholder(tf.float32, [None]) # placeholder for next observation (or state) self.obs_tp1_ph = tf.placeholder( tf.float32 if lander else tf.uint8, [None] + list(input_shape)) # placeholder for end of episode mask # this value is 1 if the next state corresponds to the end of an episode, # in which case there is no Q-value at the next state; at the end of an # episode, only the current state reward contributes to the target, not the # next state Q-value (i.e. target is just rew_t_ph, not rew_t_ph + gamma * q_tp1) self.done_mask_ph = tf.placeholder(tf.float32, [None])
tensorflow.placeholder
10,660
import tensorflow as tf rpn_bbox_outside_weights, sigma=sigma_rpn, dim=[1, 2, 3]) # RCNN, class loss cls_score = self._predictions["cls_score"] label = tf.reshape(self._proposal_targets["labels"], [-1]) cross_entropy = tf.reduce_mean( tf.nn.sparse_softmax_cross_entropy_with_logits( logits=tf.reshape(cls_score, [-1, self._num_classes]), labels=label)) # logits仍然是向量,label只含正确答案 # RCNN, bbox loss bbox_pred = self._predictions['bbox_pred'] bbox_targets = self._proposal_targets['bbox_targets'] bbox_inside_weights = self._proposal_targets['bbox_inside_weights'] bbox_outside_weights = self._proposal_targets['bbox_outside_weights']
tensorflow.reshape
10,661
import tensorflow as tf self.S = tf.placeholder(tf.float32, [None, self.num_global_s], name='S') # input Global State self.s = tf.placeholder(tf.float32, [None, self.num_s], name='s1') # input state for agent1 self.S_ = tf.placeholder(tf.float32, [None, self.num_global_s], name='S_') # input Next Global State self.s_ = tf.placeholder(tf.float32, [None, self.num_s], name='s1_') # input next state for agent1 self.R = tf.placeholder(tf.float32, [None, ], name='R') # input Reward self.a = tf.placeholder(tf.float32, [None, self.num_a], name='a') # input Action onehot for agent1 self.done = tf.placeholder(tf.float32, [None, ], name='done') # input Done info ??? self.q_m_ = tf.placeholder(tf.float32, [None, ], name='q_value_next_max') self.q_target = tf.placeholder(tf.float32, [None,], name='q_tot_target') w_initializer, b_initializer = tf.random_normal_initializer(0., 0.1), tf.constant_initializer(0.0) # ------------------ build evaluate_net ------------------ with tf.variable_scope('eval_net'): a_fc1 = tf.layers.dense(self.s, 128, tf.nn.relu, kernel_initializer=w_initializer, bias_initializer=b_initializer, name='agent_fc1_e') # a_fc2 = tf.layers.dense(a_fc1, 128, tf.nn.relu, kernel_initializer=w_initializer, # bias_initializer=b_initializer, name='agent_fc2_e') # a_fc3 = tf.layers.dense(a_fc2, 64, tf.nn.relu, kernel_initializer=w_initializer, # bias_initializer=b_initializer, name='agent_fc3_e') self.q_eval = tf.layers.dense(a_fc1, self.num_a, kernel_initializer=w_initializer,
tensorflow.random_normal_initializer
10,662
import tensorflow as tf explained_variance = q_explained_variance(tf.reshape(q_i, [self.n_envs, self.n_steps]), tf.reshape(qret, [self.n_envs, self.n_steps])) loss_q = tf.reduce_mean(tf.square(tf.stop_gradient(qret) - q_i) * 0.5) # Net loss check_shape([loss_policy, loss_q, entropy], [[]] * 3) loss = loss_policy + self.q_coef * loss_q - self.ent_coef * entropy tf.summary.scalar('entropy_loss', entropy) tf.summary.scalar('policy_gradient_loss', loss_policy) tf.summary.scalar('value_function_loss', loss_q) tf.summary.scalar('loss', loss) norm_grads_q, norm_grads_policy, avg_norm_grads_f = None, None, None avg_norm_k, avg_norm_g, avg_norm_k_dot_g, avg_norm_adj = None, None, None, None if self.trust_region:
tensorflow.summary.scalar
10,663
import tensorflow as tf Returns: one_hot -- one hot matrix encoding """ # Create a tensot flow constant equal to the number of classes C = tf.constant(N_classes, name="C") one_hot_matrix = tf.one_hot(vect-1, C, axis=0) #axis=0 means it is mapping to column vectors if N_ch != 0: one_hot_matrix= tf.expand_dims(one_hot_matrix, 1) # Create tensodr flow session sess = tf.Session() vect_hot = sess.run(one_hot_matrix) sess.close() return vect_hot #Place Holders for the input/output data def create_placeholders(Nfeat, Nlab):
tensorflow.Session
10,664
import tensorflow as tf return tf.get_variable_scope().name def absolute_scope_name(relative_scope_name): """Appends parent scope name to `relative_scope_name`""" return scope_name() + "/" + relative_scope_name def default_param_noise_filter(var): if var not in tf.trainable_variables(): # We never perturb non-trainable vars. return False if "fully_connected" in var.name: # We perturb fully-connected layers. return True # The remaining layers are likely conv or layer norm layers, which we do not wish to # perturb (in the former case because they only extract features, in the latter case because
tensorflow.trainable_variables
10,665
import tensorflow as tf tf.app.flags.DEFINE_string('input_path', '../data/tmp/grid03.14.c.tar.gz', 'input folder') tf.app.flags.DEFINE_string('input_name', '', 'input folder') tf.app.flags.DEFINE_string('test_path', '', 'test set folder') tf.app.flags.DEFINE_string('net', 'f100-f3', 'model configuration') tf.app.flags.DEFINE_string('model', 'noise', 'Type of the model to use: Autoencoder (ae)' 'WhatWhereAe (ww) U-netAe (u)') tf.app.flags.DEFINE_string('postfix', '', 'Postfix for the training folder') tf.app.flags.DEFINE_float('alpha', 10, 'Predictive reconstruction loss weight') tf.app.flags.DEFINE_float('beta', 0.0005, 'Reconstruction from noisy data loss weight') tf.app.flags.DEFINE_float('epsilon', 0.000001, 'Diameter of epsilon sphere comparing to distance to a neighbour. <= 0.5') tf.app.flags.DEFINE_float('gamma', 50., 'Loss weight for large distances') tf.app.flags.DEFINE_float('distance', 0.01, 'Maximum allowed interpoint distance') tf.app.flags.DEFINE_float('delta', 1., 'Loss weight for stacked objective') tf.app.flags.DEFINE_string('comment', '', 'Comment to leave by the model') tf.app.flags.DEFINE_float('test_max', 10000, 'max number of examples in the test set') tf.app.flags.DEFINE_integer('max_epochs', 0, 'Train for at most this number of epochs') tf.app.flags.DEFINE_integer('save_every', 250, 'Save model state every INT epochs')
tensorflow.app.flags.DEFINE_float
10,666
import tensorflow as tf mask=None))) elif self.lesion_alpha: setattr( self, 'alpha_%s' % layer, tf.constant(0.)) else: setattr( self, 'alpha_%s' % layer, tf.constant(1.)) if self.mu and not self.lesion_mu: setattr( self, 'mu_%s' % layer, tf.get_variable( name='%s_mu' % self.layer_name, dtype=self.dtype, initializer=initialization.xavier_initializer(
tensorflow.constant
10,667
import tensorflow as tf self.setup_args = None self.train_args = None def __enter__(self): """Set self.sess as the default session. Returns: This local runner. """ if tf.get_default_session() is not self.sess: self.sess.__enter__() self.sess_entered = True return self def __exit__(self, exc_type, exc_val, exc_tb): """Leave session.""" if tf.get_default_session() is self.sess and self.sess_entered: self.sess.__exit__(exc_type, exc_val, exc_tb) self.sess_entered = False
tensorflow.get_default_session
10,668
from tensorflow.python.framework import ops _OverrideBinaryOperatorHelper(logical_and, "and") _OverrideBinaryOperatorHelper(logical_or, "or") _OverrideBinaryOperatorHelper(logical_xor, "xor") ops.Tensor._override_operator("__lt__", less) ops.Tensor._override_operator("__le__", less_equal) ops.Tensor._override_operator("__gt__", greater) ops.Tensor._override_operator("__ge__", greater_equal) def range(start, limit, delta=1, name="range"): """Creates a sequence of integers. This operation creates a sequence of integers that begins at `start` and
tensorflow.python.framework.ops.Tensor._override_operator
10,669
import tensorflow as tf def update_grad(): update_op = self._opt.apply_gradients(slots_and_vars) with tf.control_dependencies([update_op]): clear_ops = [tf.assign(s, tf.zeros_like(s)) for s in slots] return tf.group(*clear_ops, name='update_grad')
tensorflow.control_dependencies
10,670
import tensorflow as tf with self.assertRaisesOpError("uninitialized value v1"): sess.run(v1) save = tf.train.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1}) save.restore(sess, save_path) # Check that the parameter nodes have been restored. self.assertEqual(10.0, v0.eval()) self.assertEqual(20.0, v1.eval()) # Add a prefix to the node names in the current graph and Restore using # remapped names. with self.test_session() as sess: v0 = tf.Variable(-1.0, name="restore_prefix/v0") v1 = tf.Variable(-1.0, name="restore_prefix/v1") with self.assertRaisesOpError("uninitialized value restore_prefix/v0"): sess.run(v0) with self.assertRaisesOpError("uninitialized value restore_prefix/v1"): sess.run(v1) # Restore the saved values in the parameter nodes. save = tf.train.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1}) save.restore(sess, save_path) # Check that the parameter nodes have been restored.
tensorflow.Variable
10,671
import tensorflow as tf p = tf.gather_nd(x_, blk_indices_) p_ = tf.reshape(p, [-1, ksize[0] * ksize[1] * ksize[2]]) # Convolution on patches. w_ = tf.reshape(w, [ksize[0] * ksize[1] * ksize[2], -1]) q = tf.matmul(p_, w_) # Center locations. blk_indices_crop = blk_indices[:, 0, 0, :] # Project back to an image. y = tf.scatter_nd(blk_indices_crop, q, out_shape) return y with tf.control_dependencies([assert_shape, assert_strides]): return tf.cond( tf.equal(tf.size(blk_indices_), 0), lambda: tf.zeros(out_shape, dtype=x.dtype), _conv_nonzero) def mask_conv2d(x, w, mask, strides, padding): """Masked 2D convolution. Used to check 2D sparse convolution.
tensorflow.scatter_nd
10,672
from tensorflow.contrib.framework import tensor_util mean_squared_error: A tensor representing the current mean, the value of `total` divided by `count`. update_op: An operation that increments the `total` and `count` variables appropriately and whose value matches `mean_squared_error`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. """ predictions, labels = tensor_util.remove_squeezable_dimensions( predictions, labels) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) squared_error = math_ops.square(labels - predictions) return streaming_mean(squared_error, weights, metrics_collections, updates_collections, name or 'mean_squared_error') def streaming_root_mean_squared_error(predictions, labels, weights=None, metrics_collections=None, updates_collections=None,
tensorflow.contrib.framework.tensor_util.remove_squeezable_dimensions
10,673
import tensorflow as tf """ var = tf.Variable(0.) loss = tf.nn.l2_loss(var) train_op = opt.get_train_op(loss)
tensorflow.nn.l2_loss
10,674
import tensorflow as tf tf.gfile.MkDir(FLAGS.checkpoint_path) input_images = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='input_images') input_score_maps = tf.placeholder(tf.float32, shape=[None, None, None, 1], name='input_score_maps') if FLAGS.geometry == 'RBOX': input_geo_maps = tf.placeholder(tf.float32, shape=[None, None, None, 5], name='input_geo_maps') else: input_geo_maps = tf.placeholder(tf.float32, shape=[None, None, None, 8], name='input_geo_maps') input_training_masks = tf.placeholder(tf.float32, shape=[None, None, None, 1], name='input_training_masks') global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False) learning_rate = tf.train.exponential_decay(FLAGS.learning_rate, global_step, decay_steps=10000, decay_rate=0.94, staircase=True) # add summary tf.summary.scalar('learning_rate', learning_rate) opt = tf.train.AdamOptimizer(learning_rate) opt = MixedPrecisionOptimizer(opt, scale=FLAGS.loss_scale) from npu_bridge.estimator.npu.npu_optimizer import NPUDistributedOptimizer opt = NPUDistributedOptimizer(opt) # split input_images_split = tf.split(input_images, len(gpus)) input_score_maps_split = tf.split(input_score_maps, len(gpus)) input_geo_maps_split = tf.split(input_geo_maps, len(gpus)) input_training_masks_split = tf.split(input_training_masks, len(gpus)) tower_grads = [] reuse_variables = None
tensorflow.summary.scalar
10,675
from tensorflow.python.framework import ops def get_shape(self, x, name="get_shape"): """Returns `Tensor`'s shape partitioned into `sample`, `batch`, `event`. Args: x: `Tensor`. name: `String`. The name to give this op. Returns: sample_shape: `Tensor` (1D, `int32`). batch_shape: `Tensor` (1D, `int32`). event_shape: `Tensor` (1D, `int32`). """ with self._name_scope(name, values=[x]): x = ops.convert_to_tensor(x, name="x") def slice_shape(start_sum, size, name): """Closure to slice out shape.""" start_sum = start_sum if start_sum else ( array_ops.zeros((), dtype=dtypes.int32, name="zero"),) if (x.get_shape().ndims is not None and self._is_all_constant_helper(size, *start_sum)): start = sum(tensor_util.constant_value(s) for s in start_sum) stop = start + tensor_util.constant_value(size) slice_ = x.get_shape()[start:stop].as_list() if all(s is not None for s in slice_): return ops.convert_to_tensor(slice_, dtype=dtypes.int32, name=name) # Fall-through intended. return array_ops.slice(array_ops.shape(x), (sum(start_sum),), (size,))
tensorflow.python.framework.ops.convert_to_tensor
10,676
import tensorflow as tf ] else: return self.create_accumulator() def extract_output(self, accumulator): # For each output, cast that output to the specified type. Note there # will be one output for each input tensor to the analyzer. return [ sub_accumulator.astype(output_dtype) for sub_accumulator, output_dtype in zip(accumulator, self._output_dtypes) ] def output_tensor_infos(self): return [ analyzer_nodes.TensorInfo(tf.as_dtype(dtype), shape, None) for dtype, shape in zip(self._output_dtypes, self._output_shapes) ] def _get_output_shape_from_input(x): if isinstance(x, tf.SparseTensor): return x.get_shape().as_list()[1:] # When reducing over batch dimensions, with known shape, the result will be # the same shape as the input, but without the batch. if x.shape.rank is not None: return x.shape.as_list()[1:] return (None,)
tensorflow.as_dtype
10,677
from tensorflow.python.framework import ops ops.RegisterShape("Square")(common_shapes.unchanged_shape) ops.RegisterShape("Sigmoid")(common_shapes.unchanged_shape) ops.RegisterShape("Tanh")(common_shapes.unchanged_shape) ops.RegisterShape("Cast")(common_shapes.unchanged_shape) ops.RegisterShape("ComplexAbs")(common_shapes.unchanged_shape) @ops.RegisterShape("Add") @ops.RegisterShape("Complex") @ops.RegisterShape("Div") @ops.RegisterShape("Equal") @ops.RegisterShape("Greater") @ops.RegisterShape("GreaterEqual") @ops.RegisterShape("Less") @ops.RegisterShape("LessEqual") @ops.RegisterShape("LogicalAnd") @ops.RegisterShape("LogicalOr") @ops.RegisterShape("Maximum") @ops.RegisterShape("Minimum") @ops.RegisterShape("Mod") @ops.RegisterShape("Mul") @ops.RegisterShape("NotEqual") @ops.RegisterShape("Pow") @ops.RegisterShape("Sub")
tensorflow.python.framework.ops.RegisterShape
10,678
import tensorflow as tf with tf.variable_scope(layer_name, reuse=tf.AUTO_REUSE): weights = tf.get_variable('weights', [prev_node, output_node], initializer=tf.truncated_normal_initializer(stddev=0.1)) self.nnweights.append(weights)
tensorflow.truncated_normal_initializer
10,679
import tensorflow as tf sess = tf.Session() my_tensor = tf.zeros([1,20]) sess.run(my_tensor) my_var = tf.Variable(tf.zeros([1,20])) sess.run(my_var.initializer) sess.run(my_var) row_dim = 2
tensorflow.zeros
10,680
import tensorflow as tf def benchmark_eager_train_defun(self): self._benchmark_eager_train( "eager_train", MockIterator, device_and_data_format(), defun=False) def benchmark_eager_train_datasets_with_defun(self): def make_iterator(tensors): with tf.device("/device:CPU:0"): ds = tf.data.Dataset.from_tensors(tensors).repeat() return tfe.Iterator(ds) self._benchmark_eager_train( "eager_train_dataset_with_defun", make_iterator, device_and_data_format(),
tensorflow.device
10,681
import tensorflow as tf order_float = tf.cast(order, dtype=var_type) tmp = sqrt_2 * _spherical_harmonics_normalization( degree, order, var_type) * evaluate_legendre_polynomial( degree, order, tf.cos(theta)) positive = tmp * tf.cos(order_float * phi) negative = tmp * tf.sin(order_float * phi) return tf.where(tf.greater(sign_order, 0), positive, negative) def evaluate_spherical_harmonics( degree_l: TensorLike, order_m: TensorLike,
tensorflow.greater
10,682
import tensorflow as tf layer2, weights2 = new_conv_layer(input=layer1, name="conv2", num_input_channels=64, num_filters=64, filter_size=5, ac_fun=tf.nn.relu, pool_ksize=[1, 3, 3, 1]) with tf.name_scope('flatten'): layer3, num_features = flatten_layer(layer2) # fully connected layers with tf.variable_scope('fc1'): layer4, weights4 = new_fc_layer(input=layer3, name="fc1", num_inputs=num_features, num_outputs=fc_size1) # print(layer4) with tf.variable_scope('fc2'): logits, weights5 = new_fc_layer(input=layer4, name="fc2", num_inputs=fc_size1, num_outputs=fc_size2) # add histograms if not reuse: tf.histogram_summary(weights1.name, weights1) tf.histogram_summary(weights2.name, weights2) return logits def loss(self, channel_1, channel_2, label, margin): """ Defines the contrastive loss. This loss ties the outputs of the branches to compute the following: L = Y * d^2 + (1-Y) * max(margin - d^2, 0) where d is the L2 distance between the given input pair s.t. d = ||x_1 - x_2||_2 and Y is
tensorflow.histogram_summary
10,683
import tensorflow as tf # if FLAGS.dev: # plt.ion() # plt.show() def _build_summaries(self): # losses with tf.name_scope('losses'): loss_names = ['loss_autoencoder', 'loss_predictive', 'loss_distance', 'loss_denoising'] for i, loss in enumerate(self.losses): self._add_loss_summary(loss_names[i], loss) self._add_loss_summary('loss_total', self.loss_total) self.summs_train = tf.summary.merge_all('train') # reconstructions with tf.name_scope('decodings'): self.image_summaries = { 'orig': self._add_decoding_summary('0_original_input', self.input), 'reco': self._add_decoding_summary('1_reconstruction', self.eval_decode), 'pred': self._add_decoding_summary('2_prediction', self.eval_decode), 'midd': self._add_decoding_summary('3_averaged', self.eval_decode), 'nois': self._add_decoding_summary('4_noisy', self.eval_decode) } # visualization
tensorflow.summary.merge_all
10,684
import tensorflow as tf mask = tf.equal(mask, tf.ones_like(mask)) facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] query = tf.layers.dense(query, facts_size, activation=None, name='f1_trans_shine' + stag) query = prelu(query) queries = tf.tile(query, [1, tf.shape(facts)[1]]) queries = tf.reshape(queries, tf.shape(facts)) din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = tf.layers.dense(din_all, facts_size, activation=tf.nn.sigmoid, name='f1_shine_att' + stag) d_layer_2_all = tf.layers.dense(d_layer_1_all, facts_size, activation=tf.nn.sigmoid, name='f2_shine_att' + stag) d_layer_2_all = tf.reshape(d_layer_2_all, tf.shape(facts)) output = d_layer_2_all
tensorflow.shape
10,685
import tensorflow as tf finetune_one_hot_labels, global_step, loss_weights, inst_weights) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): src_train_op, _ = get_src_train_op(loss) with tf.control_dependencies([src_train_op]): target_avg_pool = get_logits( target_features, mode, FLAGS.target_dataset, reuse=True) target_logits = do_cls( target_avg_pool, target_num_classes, name='final_target_dense') is_prediction_correct = tf.equal( tf.argmax(tf.identity(target_logits), axis=1), tf.argmax(target_one_hot_labels, axis=1)) acc = tf.reduce_mean(tf.cast(is_prediction_correct, tf.float32)) entropy = loss_entropy + rl_entropy log_prob = loss_log_prob + log_prob train_op, _, _ = meta_train_op(acc, entropy, log_prob, rl_scope, params) return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op) def rl_label_weights(name=None): """Returns the weight for importance.""" with tf.variable_scope(name, 'rl_op_selection'): num_classes = FLAGS.src_num_classes
tensorflow.argmax
10,686
import tensorflow as tf tf.OpError, lambda e: "uninitialized value v1" in e.message): sess.run(v1) # Restore the saved values in the parameter nodes. save.restore(sess, save_path) # Check that the parameter nodes have been restored. self.assertEqual(10.0, v0.eval()) self.assertEqual(20.0, v1.eval()) # Build another graph with 2 nodes, initialized # differently, and a Restore node for them. with self.test_session() as sess: v0_2 = tf.Variable(1000.0, name="v0") v1_2 = tf.Variable(2000.0, name="v1") save2 = tf.train.Saver({"v0": v0_2, "v1": v1_2}) tf.initialize_all_variables().run() # Check that the parameter nodes have been initialized. self.assertEqual(1000.0, v0_2.eval()) self.assertEqual(2000.0, v1_2.eval()) # Restore the values saved earlier in the parameter nodes. save2.restore(sess, save_path) # Check that the parameter nodes have been restored. self.assertEqual(10.0, v0_2.eval()) self.assertEqual(20.0, v1_2.eval())
tensorflow.Variable
10,687
from tensorflow.python.framework import ops The operation that initializes v. """ with ops.op_scope([v, init], None, v.op.name + "/"): with ops.name_scope(name) as scope: with ops.device(v.device or ops.get_default_graph().get_default_device()): if callable(init): assert v.get_shape().is_fully_defined(), "Variable shape unknown." # TODO(mrry): Convert to v.shape when the property and
tensorflow.python.framework.ops.get_default_graph
10,688
from tensorflow.contrib.learn.python.learn.estimators import run_config examples_per_layer=3, model_dir=model_dir, config=config, feature_columns=[core_feature_column.numeric_column("x")], use_core_libs=True) classifier.fit(input_fn=_train_input_fn, steps=15) classifier.evaluate(input_fn=_eval_input_fn, steps=1) classifier.export(self._export_dir_base) def testFitAndEvaluateDontThrowExceptionWithCoreForRegressor(self): learner_config = learner_pb2.LearnerConfig() learner_config.num_classes = 2 learner_config.constraints.max_tree_depth = 1 model_dir = tempfile.mkdtemp() config = run_config.RunConfig() regressor = estimator.GradientBoostedDecisionTreeRegressor( learner_config=learner_config, num_trees=1, examples_per_layer=3, model_dir=model_dir, config=config, feature_columns=[core_feature_column.numeric_column("x")], use_core_libs=True) regressor.fit(input_fn=_train_input_fn, steps=15) regressor.evaluate(input_fn=_eval_input_fn, steps=1) regressor.export(self._export_dir_base)
tensorflow.contrib.learn.python.learn.estimators.run_config.RunConfig
10,689
import tensorflow as tf dropout3_2 = contrib.layers.dropout(stitch3_2, keep_prob=keep_prob, is_training=is_training, scope="dropout3_2") output_1 = contrib.layers.fully_connected(dropout3_1, n_output_1, activation_fn=None, scope="output_1") output_2 = contrib.layers.fully_connected(dropout3_2, n_output_2, activation_fn=None, scope="output_2") with tf.variable_scope("loss"): loss_base_1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_1, logits=output_1)) loss_base_2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_2, logits=output_2)) reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) loss_total = loss_base_1 + loss_base_2 + tf.reduce_sum(reg_losses) with tf.variable_scope("evaluation"): accuracy_1 = tf.reduce_mean(tf.cast(tf.equal( tf.argmax(output_1, axis=-1), tf.argmax(y_1, axis=-1)), tf.float32), name="accuracy_1") accuracy_2 = tf.reduce_mean(tf.cast(tf.equal( tf.argmax(output_2, axis=-1), tf.argmax(y_2, axis=-1)), tf.float32), name="accuracy_2") accuracy = tf.divide(accuracy_1 + accuracy_2, 2.0, name="accuracy")
tensorflow.reduce_sum
10,690
import tensorflow as tf filed_based_convert_examples_to_features( eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file) tf.logging.info("***** Running evaluation *****") tf.logging.info(" Num examples = %d", len(eval_examples)) tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size) eval_steps = None if FLAGS.use_tpu: eval_steps = int(len(eval_examples) / FLAGS.eval_batch_size) eval_drop_remainder = True if FLAGS.use_tpu else False
tensorflow.logging.info
10,691
from tensorflow.python.ops import math_ops # `relevant_precision_per_k` (float64) - Relevant precisions; i.e., # precisions at all k for which relevance indicator is true. relevant_per_k = _sparse_true_positive_at_k( predictions_idx_per_k, labels_per_k, name='relevant_per_k') tp_per_k = math_ops.cumsum(relevant_per_k, axis=-1, name='tp_per_k') retrieved_per_k = math_ops.cumsum( array_ops.ones_like(relevant_per_k), axis=-1, name='retrieved_per_k') precision_per_k = math_ops.div( math_ops.to_double(tp_per_k), math_ops.to_double(retrieved_per_k), name='precision_per_k') relevant_precision_per_k = math_ops.mul( precision_per_k, math_ops.to_double(relevant_per_k), name='relevant_precision_per_k') # Reduce along k dimension to get the sum, yielding a [D1, ... DN] tensor. precision_sum = math_ops.reduce_sum( relevant_precision_per_k, reduction_indices=(-1,), name='precision_sum') # Divide by number of relevant items to get average precision. These are # the "num_relevant_items" and "AveP" terms from the formula above. num_relevant_items = math_ops.to_double(num_relevant(labels, k)) return math_ops.div(precision_sum, num_relevant_items, name=scope) def streaming_sparse_average_precision_at_k(predictions, labels, k, weights=None, metrics_collections=None, updates_collections=None,
tensorflow.python.ops.math_ops.reduce_sum
10,692
import tensorflow as tf shape=[num_filter], initializer=tf.constant_initializer(0.0)) H = tf.nn.bias_add( name='H', value=conv, bias=b) # Apply nonlinearity H = tf.nn.relu(H, name="relu") # max pool pooled = tf.nn.max_pool(H, ksize=[1, sequence_length - filter_size + 1, 1, 1], strides=[1, 1, 1, 1], padding='VALID', name="pool")
tensorflow.nn.relu
10,693
import tensorflow as tf if tf_output1_dtype == tf.string: cast1 = tf.dtypes.as_string(sub if not swap else add, name="TOSTR1")
tensorflow.dtypes.as_string
10,694
import tensorflow as tf def get_a_cell(state_size,input_prob,state_prob,num_input): if cell_type == 'LSTM': if activation == 'linear': lstm=tf.nn.rnn_cell.LSTMCell(num_units=state_size, activation = tf.identity, state_is_tuple=True) cell_drop=tf.contrib.rnn.DropoutWrapper(lstm,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob) elif activation == 'relu':
tensorflow.nn.rnn_cell.LSTMCell
10,695
import tensorflow as tf return batch, output, i + 1 output_ta = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True, element_shape=(facts[:, 0, :].get_shape())) _, output_op, _ = tf.while_loop(cond, body, [facts, output_ta, 0]) self_attention = output_op.stack() self_attention = tf.transpose(self_attention, perm = [1, 0, 2]) return self_attention def din_fcn_shine(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False): if isinstance(facts, tuple): # In case of Bi-RNN, concatenate the forward and the backward RNN outputs. facts = tf.concat(facts, 2)
tensorflow.transpose
10,696
import tensorflow as tf self.sess = tf.get_default_session() self.graph = tf.get_default_graph()
tensorflow.get_default_graph
10,697
import tensorflow as tf **kwargs: Optional list of keyword arguments to pass to `compute_gradients`. Returns: A tuple (total_loss, grads_and_vars). - total_loss: A Tensor containing the average of the clone losses including the regularization loss. - grads_and_vars: A List of tuples (gradient, variable) containing the sum of the gradients for each variable. """ grads_and_vars = [] clones_losses = [] num_clones = len(clones) if regularization_losses is None: regularization_losses = tf.get_collection( tf.GraphKeys.REGULARIZATION_LOSSES) for clone in clones: with tf.name_scope(clone.scope): clone_loss, clone_grad = _optimize_clone( optimizer, clone, num_clones, regularization_losses, **kwargs) if clone_loss is not None: clones_losses.append(clone_loss) grads_and_vars.append(clone_grad) # Only use regularization_losses for the first clone regularization_losses = None # Compute the total_loss summing all the clones_losses. total_loss = tf.add_n(clones_losses, name='total_loss') # Sum the gradients accross clones.
tensorflow.get_collection
10,698
import tensorflow as tf genre_emb = tf.gather(tf.get_variable("genre_embeddings", [len(self.genres), self.config["feature_size"]]), genre) # [emb] sentence_indices = tf.tile(tf.expand_dims(tf.range(num_sentences), 1), [1, max_sentence_length]) # [num_sentences, max_sentence_length] flattened_sentence_indices = self.flatten_emb_by_sentence(sentence_indices, text_len_mask) # [num_words]
tensorflow.range
10,699