seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
from tensorflow.python.ops import math_ops ``` entropy = alpha - log(beta) + log(Gamma(alpha)) + (1-alpha)digamma(alpha) ``` where digamma(alpha) is the digamma function.""") def _entropy(self): return (self.alpha + math_ops.log(self.beta) + math_ops.lgamma(self.alpha) - (1. + self.alpha) * math_ops.digamma(self.alpha)) @distribution_util.AppendDocstring( """The mean of an inverse gamma distribution is `beta / (alpha - 1)`, when `alpha > 1`, and `NaN` otherwise. If `self.allow_nan_stats` is `False`, an exception will be raised rather than returning `NaN`""") def _mean(self): mean = self.beta / (self.alpha - 1.) if self.allow_nan_stats: nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype()) return array_ops.where(
tensorflow.python.ops.math_ops.digamma
12,000
import tensorflow as tf def pad_and_gather(vecs, mask_inds, pad=None): """ pad a vector with a zero row and gather with input inds """ if pad is None: pad = tf.expand_dims(tf.zeros_like(vecs[0]), 0) else: pad = tf.expand_dims(pad, 0) vecs_padded = tf.concat(0, [vecs, pad]) # flatten mask and edges vecs_gathered = tf.gather(vecs_padded, mask_inds) return vecs_gathered def padded_segment_reduce(vecs, segment_inds, num_segments, reduction_mode): """ Reduce the vecs with segment_inds and reduction_mode Input: vecs: A Tensor of shape (batch_size, vec_dim) segment_inds: A Tensor containing the segment index of each vec row, should agree with vecs in shape[0]
tensorflow.gather
12,001
import tensorflow as tf tf.logging.info("removing {}".format(src_ckpt)) tf.gfile.Remove(src_ckpt)
tensorflow.gfile.Remove
12,002
import tensorflow as tf selected_rows = tf.nn.embedding_lookup(var_matrix, indices) row_norms = tf.sqrt(tf.reduce_sum(tf.square(selected_rows), 1)) scaling = maxnorm / tf.maximum(row_norms, maxnorm) scaled = selected_rows * tf.expand_dims(scaling, 1) return tf.scatter_update(var_matrix, indices, scaled)
tensorflow.expand_dims
12,003
import tensorflow as tf self.assertAllClose( np.array([[0.2, 0.3, 0.5], [0.1, 0.6, 0.3]], np.float32), transformed_inputs[fields.InputDataFields.groundtruth_classes]) def test_use_multiclass_scores_when_not_present(self): image = np.random.rand(4, 4, 3).astype(np.float32) tensor_dict = { fields.InputDataFields.image: tf.constant(image), fields.InputDataFields.groundtruth_boxes: tf.constant(np.array([[.5, .5, 1, 1], [.5, .5, 1, 1]], np.float32)), fields.InputDataFields.multiclass_scores: tf.placeholder(tf.float32), fields.InputDataFields.groundtruth_classes: tf.constant(np.array([1, 2], np.int32)) } input_transformation_fn = functools.partial( inputs.transform_input_data, model_preprocess_fn=_fake_model_preprocessor_fn, image_resizer_fn=_fake_image_resizer_fn, num_classes=3, use_multiclass_scores=True) with self.test_session() as sess: transformed_inputs = sess.run( input_transformation_fn(tensor_dict=tensor_dict),
tensorflow.placeholder
12,004
import tensorflow as tf self.b0 = tf.Variable(tf.zeros([512])) self.w1 = tf.Variable(tf.random_normal([512, 10]))
tensorflow.random_normal
12,005
from tensorflow.python.ops import math_ops count = _create_local('count_tensor', shape=values.get_shape()) num_values = array_ops.ones_like(values) if weights is not None: weights = math_ops.to_float(weights) values = math_ops.mul(values, weights) num_values = math_ops.mul(num_values, weights) total_compute_op = state_ops.assign_add(total, values) count_compute_op = state_ops.assign_add(count, num_values) def compute_mean(total, count, name): non_zero_count = math_ops.maximum(count, array_ops.ones_like(count), name=name) return math_ops.truediv(total, non_zero_count, name=name) mean = compute_mean(total, count, 'value') with ops.control_dependencies([total_compute_op, count_compute_op]): update_op = compute_mean(total, count, 'update_op') if metrics_collections: ops.add_to_collections(metrics_collections, mean) if updates_collections: ops.add_to_collections(updates_collections, update_op) return mean, update_op
tensorflow.python.ops.math_ops.truediv
12,006
import tensorflow as tf def model(X, M, Y, train=False, reuse=False): with tf.variable_scope('model', reuse=reuse): we = tf.get_variable("we", [n_vocab+n_special+n_ctx, n_embd], initializer=tf.random_normal_initializer(stddev=0.02)) we = dropout(we, embd_pdrop, train) #X:[n_batch_train, 2, n_ctx, 2] -> [n_batch_train*2,n_ctx,2] X = tf.reshape(X, [-1, n_ctx, 2]) M = tf.reshape(M, [-1, n_ctx]) h = embed(X, we) #h=[-1,n_ctx,emb] for layer in range(n_layer): h = block(h, 'h%d'%layer, train=train, scale=True) #h=[-1,n_ctx,emb] lm_h [-1,emb] lm_h = tf.reshape(h[:, :-1], [-1, n_embd]) lm_logits = tf.matmul(lm_h, we, transpose_b=True) lm_losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=lm_logits, labels=tf.reshape(X[:, 1:, 0], [-1])) lm_losses = tf.reshape(lm_losses, [shape_list(X)[0], shape_list(X)[1]-1]) lm_losses = tf.reduce_sum(lm_losses*M[:, 1:], 1)/tf.reduce_sum(M[:, 1:], 1) clf_h = tf.reshape(h, [-1, n_embd]) pool_idx = tf.cast(tf.argmax(tf.cast(tf.equal(X[:, :, 0], clf_token), tf.float32), 1), tf.int32) clf_h = tf.gather(clf_h, tf.range(shape_list(X)[0], dtype=tf.int32)*n_ctx+pool_idx) clf_h = tf.reshape(clf_h, [-1, 2, n_embd]) if train and clf_pdrop > 0: shape = shape_list(clf_h) shape[1] = 1 clf_h = tf.nn.dropout(clf_h, 1-clf_pdrop, shape)
tensorflow.reshape
12,007
import tensorflow as tf with tf.variable_scope('generator'):
tensorflow.variable_scope
12,008
import tensorflow as tf return sess.run(self.value_estimate, { self.state: [state] })[0][0] def update(self, state, target, sess=None): sess = sess or tf.get_default_session() for st_idx in range(len(state)): state[st_idx]=featurize_state(state[st_idx]);
tensorflow.get_default_session
12,009
import tensorflow as tf # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.to_int32(t) example[name] = t return example
tensorflow.to_int32
12,010
import tensorflow as tf self.q_next = tf.layers.dense(a_fc1_, self.num_a, kernel_initializer=w_initializer, bias_initializer=b_initializer, name='q_t') # [batch*n_agents, 1] self.q_selected = tf.reduce_sum(tf.multiply(self.q_eval, self.a), axis=1) # ------------------ build mixing_net ------------------ with tf.variable_scope('mixing_net'):
tensorflow.multiply
12,011
import tensorflow as tf layer_a1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg) layer_a2 = tf.layers.dense(layer_a1, 256, tf.nn.relu, kernel_regularizer=reg) mu = tf.layers.dense(layer_a2, self.a_dim, tf.nn.tanh, kernel_regularizer=reg) # sigma = tf.layers.dense(layer_a2, self.a_dim, tf.nn.softplus, kernel_regularizer=reg) sigma = tf.get_variable(name='pi_sigma', shape=self.a_dim, initializer=tf.constant_initializer(0.5)) sigma = tf.clip_by_value(sigma, 0.0, 1.0) norm_dist = tf.distributions.Normal(loc=mu * self.a_bound, scale=sigma) params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name) return norm_dist, params def build_cnet(self, state_in, name, reuse=False): reg = tf.contrib.layers.l2_regularizer(1e-3)
tensorflow.distributions.Normal
12,012
import tensorflow as tf new_w = epi_len - horizon + 1 weights = np.zeros([epi_len, new_w]) for i in range(new_w): weights[i:i + horizon, i] = 1.0 weights = tf.convert_to_tensor(weights, dtype=tf.float32) horizon_sum = tf.matmul(input, weights) return horizon_sum
tensorflow.convert_to_tensor
12,013
from tensorflow.examples.tutorials.mnist import input_data import TensorFI as ti # Import MNIST data from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) # In this example, we limit mnist data Xtr, Ytr = mnist.train.next_batch(5000) #5000 for training (nn candidates)
tensorflow.examples.tutorials.mnist.input_data.read_data_sets
12,014
import tensorflow as tf self.batch_pos_cnt = batch_pos_cnt self.max_iter = max_iter self.model_type = model_type self.add_bias = add_bias if opt is None: opt = tf.train.AdagradOptimizer(1.0) self.opt = opt self.sess = None self.train_step = None self.post_step = None self.graph = tf.Graph() with self.graph.as_default(): self.head_input = tf.placeholder(tf.int32, shape=[None]) self.rel_input = tf.placeholder(tf.int32, shape=[None]) self.tail_input = tf.placeholder(tf.int32, shape=[None]) self.target = tf.placeholder(tf.float32, shape=[None]) def _create_model(self, train_triples): ''' Subclasses must build Graph and set self.train_step ''' raise Exception('subclass must implement') def _create_batch_provider(self, train_triples): ''' Default implementation ''' return ContrastiveTrainingProvider(train_triples, self.batch_pos_cnt)
tensorflow.placeholder
12,015
import tensorflow as tf gpu_compute_stage_ops.extend( [put_op for _, (put_op, _) in six.iteritems(staging_ops)]) enqueue_ops.append(tf.group(*gpu_compute_stage_ops)) if gpu_grad_stage_ops: staging_delta_ops += gpu_grad_stage_ops if staging_delta_ops: enqueue_ops.append(tf.group(*(staging_delta_ops))) if not phase_train: if FLAGS.forward_only: all_logits = tf.concat(all_logits, 0) fetches = [all_logits] + enqueue_ops else: all_top_1_ops = tf.reduce_sum(all_top_1_ops) all_top_5_ops = tf.reduce_sum(all_top_5_ops) fetches = [all_top_1_ops, all_top_5_ops] + enqueue_ops return (enqueue_ops, fetches) extra_nccl_ops = [] apply_gradient_devices, gradient_state = ( self.variable_mgr.preprocess_device_grads(device_grads))
tensorflow.concat
12,016
import tensorflow as tf tmp = x + tmp * scale tmp = tf.keras.layers.LeakyReLU(alpha=0.2)(tmp)
tensorflow.keras.layers.LeakyReLU
12,017
import tensorflow as tf Q_filter = tf.cast((qf1 > min_q)|(qf2 > min_q),tf.float32) #Q_filter_1 = tf.cast(qf1 > min_q,tf.float32) #Q_filter_2 = tf.cast(qf2 > min_q,tf.float32) im_loss1 = tf.square(self.actions_ph - self.deterministic_actions_ph)*Q_filter*self.is_demo_ph #im_loss2 = tf.square(self.actions_ph - self.deterministic_actions_ph)*Q_filter_2*self.is_demo_ph #actor_loss_di1 = tf.reduce_mean(im_loss1) #actor_loss_di2 = tf.reduce_mean(im_loss2)
tensorflow.square
12,018
import tensorflow as tf # If you want to use the token-level output, use model.get_sequence_output() # instead. output_layer = model.get_pooled_output() hidden_size = output_layer.shape[-1].value output_weights = tf.get_variable( "output_weights", [num_labels, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.matmul(output_layer, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) probabilities = tf.nn.softmax(logits, axis=-1) log_probs = tf.nn.log_softmax(logits, axis=-1) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
tensorflow.variable_scope
12,019
import tensorflow as tf wvs_weighted = tf.mul(tf.reshape(tf.transpose(self.cs), [-1, 1]), tf.reshape(wvs, [-1, in_size]))
tensorflow.reshape
12,020
import tensorflow as tf # Monitor values (summary_op, monitored_values) = self._add_monitoring_of_values() # Add saver tf_vars = tf.global_variables() saver = tf.train.Saver(tf_vars) # Allow loading of model variables (var_phs, vars_assign_op) = self._add_vars_assign_op(tf_vars)
tensorflow.train.Saver
12,021
import tensorflow as tf [self.batch_size, s_h8, s_w8, self.gf_dim*4], name='d_h1')) output_h2 = lrelu(deconv2d(tf.concat([output_h1, tgtctx_h2], 3), [self.batch_size, s_h4, s_w4, self.gf_dim*2], name='d_h2')) output_h3 = lrelu(deconv2d(tf.concat([output_h2, tgtctx_h1], 3), [self.batch_size, s_h2, s_w2, self.gf_dim*1], name='d_h3')) output_h4 = deconv2d(tf.concat([output_h3, tgtctx_h0], 3), [self.batch_size, s_h, s_w, self.c_dim], name='d_h4') scope.reuse_variables() truthoutput_z_ = lrelu(linear(tgtimg_z, self.gf_dim*8*s_h16*s_w16, 'd_h0_lin')) truthoutput_h0 = tf.reshape(truthoutput_z_, [-1, s_h16, s_w16, self.gf_dim * 8]) truthoutput_h1 = lrelu(deconv2d(tf.concat([truthoutput_h0, tgtctx_h3], 3), [self.batch_size, s_h8, s_w8, self.gf_dim*4], name='d_h1')) truthoutput_h2 = lrelu(deconv2d(tf.concat([truthoutput_h1, tgtctx_h2], 3), [self.batch_size, s_h4, s_w4, self.gf_dim*2], name='d_h2')) truthoutput_h3 = lrelu(deconv2d(tf.concat([truthoutput_h2, tgtctx_h1], 3), [self.batch_size, s_h2, s_w2, self.gf_dim*1], name='d_h3')) truthoutput_h4 = deconv2d(tf.concat([truthoutput_h3, tgtctx_h0], 3), [self.batch_size, s_h, s_w, self.c_dim], name='d_h4') self.simloss = tf.reduce_mean((trans_z - tgtimg_z) ** 2) * 1e3 mean, var = tf.nn.moments(tgtimg_z, axes=[0]) print(var.get_shape())
tensorflow.concat
12,022
import tensorflow as tf # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. if is_training: d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files)) d = d.repeat() d = d.shuffle(buffer_size=len(input_files)) # `cycle_length` is the number of parallel files that get read. cycle_length = min(num_cpu_threads, len(input_files)) # `sloppy` mode means that the interleaving is not exact. This adds # even more randomness to the training pipeline. d = d.apply( tf.contrib.data.parallel_interleave( tf.data.TFRecordDataset, sloppy=is_training, cycle_length=cycle_length)) d = d.shuffle(buffer_size=100) else: d = tf.data.TFRecordDataset(input_files) # Since we evaluate for a fixed number of steps we don't want to encounter # out-of-range exceptions. d = d.repeat() # We must `drop_remainder` on training because the TPU requires fixed # size dimensions. For eval, we assume we are evaluating on the CPU or GPU
tensorflow.contrib.data.parallel_interleave
12,023
import tensorflow as tf jitter = tfhacks.eye(tf.shape(mu)[0], var.dtype) * 1e-06 L = tf.batch_cholesky(tf.transpose(var, (2, 0, 1)) + jitter) V_shape = [tf.shape(L)[0], tf.shape(L)[1], num_samples] V = tf.random_normal(V_shape, dtype=L.dtype) samples = tf.expand_dims(tf.transpose(mu), -1) + tf.batch_matmul(L, V) return tf.transpose(samples) @autoflow((tf.float64, [None, None]), (tf.float64, [None, None]), (tf.float64, [None, None])) def compute_posterior_mean_var(self, X, Y, test_points):
tensorflow.transpose
12,024
import tensorflow as tf labels = tf.random_uniform([batch_size], minval=1, maxval=4, dtype=tf.int64) prem = tf.random_uniform(
tensorflow.random_uniform
12,025
import tensorflow as tf y = self._get_one_hot_labels(labels) sampled_y = self._get_one_hot_labels(features["sampled_labels"]) else: y = None sampled_y = None all_y = None # Batch size per core. bs = images.shape[0].value def augment(imgs): imgs = random_crop_and_resize(imgs) imgs = random_apply(color_distortion, imgs, self._aug_color_jitter_prob) imgs = random_apply(color_drop, imgs, self._aug_color_drop_prob) return tf.stop_gradient(imgs) aug_images, aug_generated = augment(images), augment(generated) # concat all images all_images = tf.concat([images, generated, aug_images, aug_generated], 0) if self.conditional: all_y = tf.concat([y, sampled_y, y, sampled_y], axis=0) # Compute discriminator output for real and fake images in one batch. d_all, d_all_logits, d_latents = self.discriminator( x=all_images, y=all_y, is_training=is_training)
tensorflow.stop_gradient
12,026
import tensorflow as tf def contra_traj_lossV2(pred, tgt, horizon=9): # Step-wise contrastive loss horizon_pred = horizon_sumV1(pred, horizon) horizon_tgt = horizon_sumV1(tgt, horizon) pred1, pred2 = tf.split(horizon_pred, 2, axis=0) tgt1, tgt2 = tf.split(horizon_tgt, 2, axis=0) geq = tf.cast((tgt1 - tgt2) > 0, tf.bool) tgt_larg = tf.where(geq, tgt1, tgt2) tgt_small = tf.where(geq, tgt2, tgt1) pred_larg = tf.where(geq, pred1, pred2) pred_small = tf.where(geq, pred2, pred1)
tensorflow.split
12,027
import tensorflow as tf biases = tf.get_variable("biases", [output_size], dtype=dtype, initializer=tf.constant_initializer( bias_start, dtype=dtype)) x = tf.nn.bias_add(x, biases) # Reshape res back to: (batch_size, num_node, state_dim) return tf.reshape(x, [batch_size, self._num_nodes, output_size])
tensorflow.nn.bias_add
12,028
import tensorflow as tf def INLReLU(x, name=None): x = InstanceNorm('inorm', x) return LeakyReLU(x, name=name) class Model(GANModelDesc): def _get_inputs(self): return [InputDesc(tf.float32, (None, SHAPE, SHAPE, 3), 'inputA'), InputDesc(tf.float32, (None, SHAPE, SHAPE, 3), 'inputB')] @staticmethod def build_res_block(x, name, chan, first=False): with tf.variable_scope(name): input = x return (LinearWrap(x) .tf.pad([[0, 0], [0, 0], [1, 1], [1, 1]], mode='SYMMETRIC') .Conv2D('conv0', chan, padding='VALID') .tf.pad([[0, 0], [0, 0], [1, 1], [1, 1]], mode='SYMMETRIC') .Conv2D('conv1', chan, padding='VALID', nl=tf.identity) .InstanceNorm('inorm')()) + input @auto_reuse_variable_scope def generator(self, img): assert img is not None with argscope([Conv2D, Deconv2D], nl=INReLU, kernel_shape=3):
tensorflow.variable_scope
12,029
import tensorflow as tf tf.flags.DEFINE_string('job_name', '', 'One of "ps", "worker", "". Empty for local training') tf.flags.DEFINE_string('ps_hosts', '', 'Comma-separated list of target hosts') tf.flags.DEFINE_string('worker_hosts', '', 'Comma-separated list of target hosts') tf.flags.DEFINE_integer('task_index', 0, 'Index of task within the job') tf.flags.DEFINE_string('server_protocol', 'grpc', 'protocol for servers') tf.flags.DEFINE_boolean('cross_replica_sync', True, '') # Summary and Save & load checkpoints. tf.flags.DEFINE_integer('summary_verbosity', 0, """Verbosity level for summary ops. Pass 0 to disable both summaries and checkpoints.""") tf.flags.DEFINE_integer('save_summaries_steps', 0,
tensorflow.flags.DEFINE_boolean
12,030
import tensorflow as tf """ if pad is None: pad = tf.expand_dims(tf.zeros_like(vecs[0]), 0) else: pad = tf.expand_dims(pad, 0) vecs_padded = tf.concat(0, [vecs, pad]) # flatten mask and edges vecs_gathered = tf.gather(vecs_padded, mask_inds)
tensorflow.expand_dims
12,031
import tensorflow as tf # do logit = W*X+b logit = tf.nn.xw_plus_b(H_drop, W, b, name="scores") predictions = tf.nn.softmax(logit, name="predictions") #claulate loss and optimizer with tf.variable_scope("FCoptimize", reuse=None): loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits= logit, labels=Y) + l2_reg_lambda * l2_loss) optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss) # calculate accuracy correct_predictions = tf.equal(tf.argmax(predictions, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
tensorflow.nn.softmax_cross_entropy_with_logits
12,032
import tensorflow as tf self.assertAllClose(masks.numpy(), expected_masks.numpy()) def test_inputs_Distances_to_centers(self): inputs = tf.random.uniform( [100, 8], minval=-10, maxval=10.0, dtype=tf.float32) centers = tf.random.uniform( [5, 8], minval=-10, maxval=10.0, dtype=tf.float32) distances1 = isu.inputs_distances_to_centers(inputs, centers) num_centers = tf.shape(centers)[0] inputs_reshaped = tf.tile(tf.expand_dims(inputs, axis=1), tf.stack([1, num_centers, 1])) distances2 = tf.reduce_sum(tf.square(inputs_reshaped - centers), axis=2) self.assertAllClose(distances1.numpy(), distances2.numpy(), atol=0.001) def test_pairwise_iou_matrix(self): mask0 = tf.constant([[1, 0], [0, 1]], dtype=tf.float32) mask1 = tf.constant([[1, 1],
tensorflow.expand_dims
12,033
import tensorflow as tf if data_shape is None: self.data_shape = {i: s['shape'] for i, s in self.input_spec.items()} with tf.variable_scope('', reuse=tf.AUTO_REUSE): self._build_graph() def _gpu_tower(self, data, mode): # Split the batch between the GPUs (data parallelism) with tf.device('/cpu:0'): with tf.name_scope('{}_data_sharding'.format(mode)): batch_size = self.config['batch_size'] if (mode == Mode.TRAIN) \ else self.config['eval_batch_size'] shards = {d: tf.unstack(v, num=batch_size*self.n_gpus, axis=0) for d, v in data.items()} shards = [{d: tf.stack(v[i::self.n_gpus]) for d, v in shards.items()} for i in range(self.n_gpus)]
tensorflow.device
12,034
import tensorflow as tf assert tf.get_variable_scope().reuse is False return tf.nn.relu(x) - alpha * tf.nn.relu(-x) def instance_norm(x,name='instance_norm'): with tf.variable_scope(name): if reuse: tf.get_variable_scope().reuse_variables() else: assert tf.get_variable_scope().reuse is False
tensorflow.variable_scope
12,035
import tensorflow as tf keep_prob=1., is_train=None, wd=0., activation='elu', hn=None): assert direction is not None def scaled_tanh(x, scale=5.): return scale * tf.nn.tanh(1. / scale * x) bs, sl, vec = tf.shape(rep_tensor)[0], tf.shape(rep_tensor)[1], tf.shape(rep_tensor)[2] ivec = hn or rep_tensor.get_shape().as_list()[2] input_dim = rep_tensor.get_shape().as_list()[2] with tf.variable_scope(scope or 'block_simple'): # @1. split sequence with tf.variable_scope('split_seq'):
tensorflow.shape
12,036
import tensorflow as tf # Currently we do not support strides > 1 in this matrix multiplication mode. Could be supported # in the future. assert_strides = tf.assert_equal( tf.cast(tf.stack([strides[1], strides[2]]), tf.int64), tf.constant([1, 1], dtype=tf.int64), message='Strides > 1 not supported.') # Convolution when number of indices is larger than zero.
tensorflow.constant
12,037
import tensorflow as tf with tf.variable_scope("loss", reuse=reuse): # set up placeholders act_t_ph = tf.placeholder(tf.int32, [None], name="action") rew_t_ph = tf.placeholder(tf.float32, [None], name="reward") done_mask_ph = tf.placeholder(tf.float32, [None], name="done")
tensorflow.placeholder
12,038
import tensorflow as tf def predict(self, state, sess=None): sess = sess or tf.get_default_session()
tensorflow.get_default_session
12,039
import tensorflow as tf crop_height = ( model_options.crop_size[0] if model_options.crop_size else tf.shape(images)[1]) crop_width = (
tensorflow.shape
12,040
import tensorflow as tf h, w, _ = image.shape grid = 8 image = image[:h // grid * grid, :w // grid * grid, :] mask = mask[:h // grid * grid, :w // grid * grid, :] image = np.expand_dims(image, 0) mask = np.expand_dims(mask, 0) input_image = np.concatenate([image, mask], axis=2) sess_config = tf.ConfigProto() sess_config.gpu_options.allow_growth = True with tf.Session(config=sess_config) as sess: input_image = tf.constant(input_image, dtype=tf.float32) output = model.build_server_graph(FLAGS, input_image) output = (output + 1.) * 127.5 output = tf.reverse(output, [-1]) output = tf.saturate_cast(output, tf.uint8) # load pretrained model vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) assign_ops = [] for var in vars_list: vname = var.name from_name = vname var_value = tf.contrib.framework.load_variable(checkpoint_dir, from_name) assign_ops.append(tf.assign(var, var_value)) sess.run(assign_ops) result = sess.run(output) tf.reset_default_graph() return result[0][:, :, ::-1]
tensorflow.reverse
12,041
import tensorflow as tf #################################### # Utils #################################### def _do_cutout(self, image, im_width, im_height, cutout_size): mask = tf.ones([cutout_size, cutout_size], dtype=tf.int32) start_x = tf.random.uniform(shape=(1,), minval=0, maxval=im_width, dtype=tf.int32) start_y = tf.random.uniform(shape=(1,), minval=0, maxval=im_height, dtype=tf.int32) mask = tf.pad(mask, [[cutout_size + start_y[0], im_height - start_y[0]], [cutout_size + start_x[0], im_width - start_x[0]]]) mask = mask[cutout_size: cutout_size + im_height, cutout_size: cutout_size + im_width] mask = tf.tile(tf.reshape(mask, (im_height, im_width, 1)), (1, 1, 3)) image = tf.where(tf.equal(mask, 0), x=image, y=tf.zeros_like(image)) return image def _add_drop_path(self, X, keep_prob):
tensorflow.pad
12,042
import tensorflow as tf """ super(RNNColorbot, self).__init__(name="") self.label_dimension = label_dimension self.keep_prob = keep_prob self.cells = tf.contrib.checkpoint.List( [tf.nn.rnn_cell.BasicLSTMCell(size) for size in rnn_cell_sizes]) self.relu = layers.Dense( label_dimension, activation=tf.nn.relu, name="relu") def call(self, inputs, training=False): """Implements the RNN logic and prediction generation.
tensorflow.nn.rnn_cell.BasicLSTMCell
12,043
import tensorflow as tf recall = recall_score(labels_all,predicted_score) apk_sc = rank_metrics.apk(actual, predicted, k=50) return roc_sc, auprc_score,accuracy,precision,recall,f ,apk_sc def construct_placeholders(edge_types): placeholders = { 'batch': tf.placeholder(tf.int32, name='batch'), 'batch_neg': tf.placeholder(tf.int32, name='batch_neg'), 'batch_node':tf.placeholder(tf.int32,name = 'batch_node'), 'adj_min_batch': tf.placeholder(tf.float32,name='adj_min_batch'), 'sim_min_batch': tf.placeholder(tf.float32,name='sim_min_batch'), 'batch_edge_type_idx': tf.placeholder(tf.int32, shape=(), name='batch_edge_type_idx'), 'batch_row_edge_type': tf.placeholder(tf.int32, shape=(), name='batch_row_edge_type'), 'batch_col_edge_type': tf.placeholder(tf.int32, shape=(), name='batch_col_edge_type'), 'degrees': tf.placeholder(tf.int32), 'dropout': tf.placeholder_with_default(0., shape=()), }
tensorflow.placeholder
12,044
import tensorflow as tf def decode(roi, deltas, variances=None): with tf.name_scope('BoundingBoxTransform/decode'): (roi_width, roi_height, roi_urx, roi_ury) = get_width_upright(roi) dx, dy, dw, dh = tf.split(deltas, 4, axis=1) if variances is None: variances = [1., 1.] pred_ur_x = dx * roi_width * variances[0] + roi_urx pred_ur_y = dy * roi_height * variances[0] + roi_ury pred_w = tf.exp(dw * variances[1]) * roi_width pred_h = tf.exp(dh * variances[1]) * roi_height bbox_x1 = pred_ur_x - 0.5 * pred_w bbox_y1 = pred_ur_y - 0.5 * pred_h # This -1. extra is different from reference implementation. bbox_x2 = pred_ur_x + 0.5 * pred_w - 1. bbox_y2 = pred_ur_y + 0.5 * pred_h - 1. bboxes = tf.concat( [bbox_x1, bbox_y1, bbox_x2, bbox_y2], axis=1) return bboxes
tensorflow.exp
12,045
import tensorflow as tf if is_training: return tf.nn.dropout(layer, keep_prob=keep_prob, name=name) else: return tf.add(layer, 0, name=name) def norm(layer, norm_type='batch_norm', decay=0.9, id=0, is_training=True, activation_fn=tf.nn.relu, prefix='conv_'): if norm_type != 'batch_norm' and norm_type != 'layer_norm': return tf.nn.relu(layer) with tf.variable_scope('norm_layer_%s%d' % (prefix, id)) as vs: if norm_type == 'batch_norm': if is_training: try: layer = tf.contrib.layers.batch_norm(layer, is_training=True, center=True, scale=False, decay=decay, activation_fn=activation_fn, updates_collections=None, scope=vs) # updates_collections=None
tensorflow.nn.relu
12,046
import tensorflow as tf z_t = tf.gather(y, m) z_t_len = tf.strings.length(z_t) z_t = tf.string_split([z_t], delimiter='').values for i in tf.range(start=0, limit=x_t_len - self._p + 1, delta=1, dtype=None, name='range'): u = tf.string_join(x_t[i:i + self._p], '') vx_keys, r = tf.cond( tf.greater(vx.lookup(u), -1),
tensorflow.range
12,047
from tensorflow.contrib.opt.python.training import variable_clipping_optimizer def _setupSparse(self, is_distributed, dtype): with self._maybeWithDevice("/job:ps" if is_distributed else None): var0 = variables.Variable( [[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]], dtype=dtype) var1 = variables.Variable( [[0.0, 1.0], [0.0, 3.0], [0.0, 5.0]], dtype=dtype) with self._maybeWithDevice("/job:worker" if is_distributed else None): grads = ops.IndexedSlices( constant_op.constant( [[0.1, 0.1], [0.1, 0.1]], dtype=dtype), [0, 2], [3, 2]) sgd = gradient_descent.GradientDescentOptimizer(3.0) clip_opt = variable_clipping_optimizer.VariableClippingOptimizer( sgd, {var0: [1], var1: [0]}, 2.0) update_op = clip_opt.apply_gradients( list(zip([grads, grads], [var0, var1]))) variables.global_variables_initializer().run() return var0, var1, update_op def _assertSparseCorrect(self, var0, var1, update_op): # Fetch params to validate initial values self.assertAllCloseAccordingToType([[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]], var0.eval())
tensorflow.contrib.opt.python.training.variable_clipping_optimizer.VariableClippingOptimizer
12,048
import tensorflow as tf # 上面的wtih或者是name都是可选的,可以选择添加,也可以选择不添加,but下面的这一行是一定要写的。 # 这个表明了 在当前的目录下面创建以恶搞logs的文件家,然后把图的信息保存进去 # 这样运行完这段代码之后,就会有一个logs的文件夹被创建 if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1: # tensorflow version < 0.12 writer = tf.train.SummaryWriter('logs/', sess.graph) else: # tensorflow version >= 0.12 writer = tf.summary.FileWriter("logs/", sess.graph) if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1: init = tf.initialize_all_variables() else: init = tf.global_variables_initializer() sess.run(init)
tensorflow.__version__.split
12,049
import tensorflow as tf rnn_output = feats_all rnn_output_size = nfeats_tot if do_rnn: with tf.variable_scope('rnn_proj'): rnn_proj_w = tf.get_variable('W', [nfeats_tot, rnn_size], initializer=tf.uniform_unit_scaling_initializer(factor=1.0, dtype=dtype), dtype=dtype) rnn_proj_b = tf.get_variable('b', [rnn_size], initializer=tf.constant_initializer(0.0), dtype=dtype) rnn_inputs = tf.nn.bias_add(tf.matmul(feats_all, rnn_proj_w), rnn_proj_b) rnn_inputs = tf.reshape(rnn_inputs, [batch_size, rnn_nunroll, rnn_size]) rnn_inputs = tf.split(rnn_inputs, rnn_nunroll, axis=1) rnn_inputs = [tf.squeeze(input_, [1]) for input_ in rnn_inputs] if rnn_cell_type == 'rnn': cell_fn = tf.nn.rnn_cell.BasicRNNCell
tensorflow.matmul
12,050
import tensorflow as tf def compile_data(tmp_dir, datasets, filename, datatypes_to_clean=None): """Concatenates all `datasets` and saves to `filename`.""" datatypes_to_clean = datatypes_to_clean or [] filename = os.path.join(tmp_dir, filename) lang1_fname = filename + ".lang1" lang2_fname = filename + ".lang2" if tf.gfile.Exists(lang1_fname) and tf.gfile.Exists(lang2_fname): tf.logging.info("Skipping compile data, found files:\n%s\n%s", lang1_fname, lang2_fname) return filename with tf.gfile.GFile(lang1_fname, mode="w") as lang1_resfile: with tf.gfile.GFile(lang2_fname, mode="w") as lang2_resfile: for dataset in datasets: url = dataset[0] compressed_filename = os.path.basename(url) compressed_filepath = os.path.join(tmp_dir, compressed_filename) if url.startswith("http"): generator_utils.maybe_download(tmp_dir, compressed_filename, url) if compressed_filename.endswith(".zip"): zipfile.ZipFile(os.path.join(compressed_filepath),
tensorflow.gfile.GFile
12,051
import tensorflow as tf self.epsilon = epsilon def __call__(self,input_var,name=None,**kwargs) : if( input_var.shape.ndims > 2 ) : dims = tf.reduce_prod(tf.shape(input_var)[1:]) input_var = tf.reshape(input_var,[-1,dims]) def _init(): v_norm = tf.nn.l2_normalize(self.v,axis=0) t = tf.matmul(input_var,v_norm) mu,var = tf.nn.moments(t,axes=[0]) std = tf.sqrt(var+self.epsilon) return [tf.assign(self.g,1/std),tf.assign(self.b,-1.*mu/std)] require_init = tf.reduce_any(tf.is_nan(self.g)) init_ops = tf.cond(require_init,_init,lambda : [self.g,self.b]) with tf.control_dependencies(init_ops): w = tf.expand_dims(self.g,axis=0) * tf.nn.l2_normalize(self.v,axis=0) return tf.matmul(input_var,w)+self.b def get_variables(self): #TODO: self.v should be l2-normalized or not? / currently not. return {'v':self.v,'b':self.b,'g':self.g}
tensorflow.assign
12,052
import tensorflow as tf if self.demo: self.c = tf.placeholder(tf.int32, [None, config.test_para_limit],"context") self.q = tf.placeholder(tf.int32, [None, config.test_ques_limit],"question") self.ch = tf.placeholder(tf.int32, [None, config.test_para_limit, config.char_limit],"context_char")
tensorflow.placeholder
12,053
import tensorflow as tf return f features = collections.OrderedDict() features["input_ids"] = create_int_feature(feature.input_ids) features["input_mask"] = create_int_feature(feature.input_mask) features["segment_ids"] = create_int_feature(feature.segment_ids) features["label_ids"] = create_int_feature([feature.label_id]) features["is_real_example"] = create_int_feature( [int(feature.is_real_example)]) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) return tf_example def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" name_to_features = {
tensorflow.train.Features
12,054
import tensorflow as tf X = self._calibrate(X, w, h, ch, w // stride, h // stride, ch, is_train=is_train) X = tf.reshape(X, (-1, w // stride, h // stride, ch)) # Sanity shape check return X def _add_max_pool_3x3_op(self, X, input_idx, ni, w, h, ch, is_reduction, is_dynamic, is_train): filter_size = 3 stride = 2 if is_reduction else 1 with tf.variable_scope('max_pool_3x3_op'): X = tf.nn.max_pool(X, ksize=(1, filter_size, filter_size, 1), strides=[1, stride, stride, 1], padding='SAME') X = tf.reshape(X, (-1, w // stride, h // stride, ch)) # Sanity shape check return X def _add_separable_conv_3x3_op(self, *args, **kwargs): return self._add_separable_conv_op(*args, **kwargs, filter_size=3)
tensorflow.variable_scope
12,055
import tensorflow as tf Returns: A tensor with softmax-normalized values on the last dimension. """ with tf.name_scope(name, 'softmax_N', [tensor]): exp_tensor = tf.exp(tensor) reduction_indices = [tensor.get_shape().ndims - 1] return tf.div(exp_tensor, tf.reduce_sum( exp_tensor, axis=reduction_indices, keep_dims=True)) def optimizer(optimizer="adam", learning_rate=.001, momentum=.9): """Create model optimizer. Parameters
tensorflow.reduce_sum
12,056
import tensorflow as tf # Calculate the gradients for each model tower. tower_grads = [] with tf.variable_scope(tf.get_variable_scope()): for i in xrange(FLAGS.num_gpus): with tf.device('/gpu:%d' % i): with tf.name_scope('%s_%d' % (cifar10.TOWER_NAME, i)) as scope: # Calculate the loss for one tower of the CIFAR model. This function # constructs the entire CIFAR model but shares the variables across # all towers. loss = tower_loss(scope) # Reuse variables for the next tower. tf.get_variable_scope().reuse_variables() # Retain the summaries from the final tower. summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope) # Calculate the gradients for the batch of data on this CIFAR tower. grads = opt.compute_gradients(loss) # Keep track of the gradients across all towers. tower_grads.append(grads) # We must calculate the mean of each gradient. Note that this is the
tensorflow.get_variable_scope
12,057
import tensorflow as tf self.g_sum = tf.summary.merge([self.loss_GABA_sum,self.loss_GBAB_sum,self.g_total_loss_sum]) self.loss_db_sum = tf.summary.scalar("db_loss", self.D_B_loss) self.loss_da_sum = tf.summary.scalar("da_loss", self.D_A_loss) self.loss_d_sum = tf.summary.scalar("d_loss",self.discriminator_loss) self.db_loss_real_sum = tf.summary.scalar("db_loss_real", self.D_B_loss_real) self.db_loss_fake_sum = tf.summary.scalar("db_loss_fake", self.D_B_loss_fake) self.da_loss_real_sum = tf.summary.scalar("da_loss_real", self.D_A_loss_real) self.da_loss_fake_sum = tf.summary.scalar("da_loss_fake", self.D_A_loss_fake) self.d_sum = tf.summary.merge( [self.loss_da_sum, self.da_loss_real_sum, self.da_loss_fake_sum, self.loss_db_sum, self.db_loss_real_sum, self.db_loss_fake_sum, self.loss_d_sum] ) trainable_variables = tf.trainable_variables() self.d_variables = [var for var in trainable_variables if 'discriminator' in var.name]
tensorflow.summary.merge
12,058
import tensorflow as tf with tf.control_dependencies([baseline_update]): self.reward = tf.identity(self.reward) self.loss = self.sample_log_prob * (self.reward - self.baseline) self.train_step = tf.Variable(0, dtype=tf.int32, trainable=False, name="train_step") tf_variables = [var for var in tf.trainable_variables() if var.name.startswith(self.name)] print("-" * 80) for var in tf_variables: print(var) self.train_op, self.lr, self.grad_norm, self.optimizer = get_train_ops( self.loss,
tensorflow.trainable_variables
12,059
import tensorflow as tf else: raise NotImplementedError bias_var_shape = [nf] if one_dim_bias else [1, nf, 1, 1] nin = x.get_shape()[channel_ax].value wshape = [rf, rf, nin, nf] with tf.variable_scope(scope): w = tf.get_variable("w", wshape, initializer=ortho_init(init_scale)) b = tf.get_variable("b", bias_var_shape, initializer=tf.constant_initializer(0.0)) if not one_dim_bias and data_format == 'NHWC': b = tf.reshape(b, bshape) return tf.nn.conv2d(x, w, strides=strides, padding=pad, data_format=data_format) + b def fc(x, scope, nh, *, init_scale=1.0, init_bias=0.0): with tf.variable_scope(scope): nin = x.get_shape()[1].value w = tf.get_variable("w", [nin, nh], initializer=ortho_init(init_scale)) print("w is "+str(w)) b = tf.get_variable("b", [nh], initializer=tf.constant_initializer(init_bias))
tensorflow.reshape
12,060
from tensorflow.python.framework import ops value matches `metric`. """ default_name = _at_k_name('average_precision', k) with ops.name_scope(name, default_name, (predictions, labels)) as scope: # Calculate per-example average precision, and apply weights. average_precision = sparse_average_precision_at_k( predictions=predictions, labels=labels, k=k) if weights is not None: weights = math_ops.to_double(weights) average_precision = math_ops.mul(average_precision, weights) # Create accumulation variables and update ops for max average precision and # total average precision. with ops.name_scope(None, 'max', (average_precision,)) as max_scope: # `max` is the max possible precision. Since max for any row is 1.0: # - For the unweighted case, this is just the number of rows. # - For the weighted case, it's the sum of the weights broadcast across # `average_precision` rows. max_var = contrib_variables.local_variable( array_ops.zeros([], dtype=dtypes.float64), name=max_scope) if weights is None: batch_max = math_ops.to_double( array_ops.size(average_precision, name='batch_max')) else: # TODO(ptucker): More efficient way to broadcast? broadcast_weights = math_ops.mul(
tensorflow.python.framework.ops.name_scope
12,061
import tensorflow as tf sync_lp_time = stop - start print(f"Got {len(sync_lp_observations)} observations in {sync_lp_time:.2f}s") # %% [markdown] # ## Comparison # To compare outcomes of sync and async runs, let's plot their respective regrets side by side, and print out the running time. For this toy problem we expect async scenario to run a little bit faster on machines with multiple CPU. # %% from util.plotting import plot_regret import matplotlib.pyplot as plt fig, ax = plt.subplots(1, 2) sync_lp_min_idx = tf.squeeze(tf.argmin(sync_lp_observations, axis=0)) async_lp_min_idx = tf.squeeze(tf.argmin(async_lp_observations, axis=0)) plot_regret( sync_lp_observations.numpy(), ax[0], num_init=len(initial_data), idx_best=sync_lp_min_idx ) ax[0].set_yscale("log") ax[0].set_ylabel("Regret") ax[0].set_ylim(0.0000001, 100) ax[0].set_xlabel("# evaluations") ax[0].set_title(f"Sync LP, {len(sync_lp_observations)} points, time {sync_lp_time:.2f}") plot_regret( async_lp_observations.numpy(), ax[1], num_init=len(initial_data), idx_best=async_lp_min_idx ) ax[1].set_yscale("log")
tensorflow.argmin
12,062
import tensorflow as tf # landm loss (smooth L1) mask_landm_b = tf.broadcast_to(mask_landm, tf.shape(landm_true))
tensorflow.shape
12,063
import tensorflow as tf rep_tensor_split = tf.reshape(rep_tensor_comp, [bs, block_num, block_len, input_dim]) # bs,bn,bl,d rep_mask_split = tf.reshape(rep_mask_comp, [bs, block_num, block_len]) # bs,bn,bl # non-linear rep_map = bn_dense_layer(rep_tensor_split, ivec, True, 0., 'bn_dense_map', activation, False, wd, keep_prob, is_train) # bs,bn,bl,vec rep_map_tile = tf.tile(tf.expand_dims(rep_map, 2), [1, 1, block_len, 1, 1]) # bs,bn,bl,bl,vec # rep_map_dp = dropout(rep_map, keep_prob, is_train) bn = block_num bl = block_len with tf.variable_scope('self_attention'): # @2.self-attention in block
tensorflow.expand_dims
12,064
import tensorflow as tf layers = [] input = tf.concat([inputs, targets], axis=3) with tf.variable_scope('layer_1'): convolved = discrim_conv(input, ndf, stride=2)
tensorflow.variable_scope
12,065
import tensorflow as tf pv_sum += len(input_arr) last_index = idx return gauc / pv_sum def attention(query, facts, attention_size, mask, stag='null', mode='LIST', softmax_stag=1, time_major=False, return_alphas=False): if isinstance(facts, tuple): # In case of Bi-RNN, concatenate the forward and the backward RNN outputs. facts = tf.concat(facts, 2) if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) mask = tf.equal(mask, tf.ones_like(mask)) hidden_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer input_size = query.get_shape().as_list()[-1]
tensorflow.concat
12,066
import tensorflow as tf if encoder.final_state == 'concat_last': # concats last states of all backward layers (full LSTM states) encoder_state_ = tf.concat(encoder_states_, axis=1) elif encoder.final_state == 'average': mask = tf.sequence_mask(encoder_input_length_, maxlen=tf.shape(encoder_outputs_)[1], dtype=tf.float32) mask = tf.expand_dims(mask, axis=2) encoder_state_ = tf.reduce_sum(mask * encoder_outputs_, axis=1) / tf.reduce_sum(mask, axis=1) elif encoder.final_state == 'average_inputs': mask = tf.sequence_mask(encoder_input_length_, maxlen=tf.shape(encoder_inputs_)[1], dtype=tf.float32) mask = tf.expand_dims(mask, axis=2) encoder_state_ = tf.reduce_sum(mask * encoder_inputs_, axis=1) / tf.reduce_sum(mask, axis=1) elif encoder.bidir and encoder.final_state == 'last_both': encoder_state_ = tf.concat([last_forward, last_backward], axis=1) elif encoder.final_state == 'none': encoder_state_ = tf.zeros(shape=[batch_size, 0]) elif encoder.bidir and not encoder.final_state == 'last_forward': # last backward hidden state encoder_state_ = last_backward else: # last forward hidden state encoder_state_ = last_forward if encoder.bidir and encoder.bidir_projection: encoder_outputs_ = dense(encoder_outputs_, cell_output_size, use_bias=False, name='bidir_projection') if encoder.attend_inputs: encoder_outputs.append(encoder_inputs_) elif encoder.attend_both: encoder_outputs.append(tf.concat([encoder_inputs_, encoder_outputs_], axis=2)) else:
tensorflow.zeros
12,067
from tensorflow.python.framework import ops else: assert not self._exported_as_v1 # TODO(b/149997088): Raise an exception once we no longer support using # the Keras layer with estimator based Trainer. tf.compat.v1.logging.warning('Loading a TF2 SavedModel but eager mode ' 'seems disabled.') # If exported as TF2 SavedModel but not invoked in eager mode, # re-initialize the saved_model_loader_value as __init__ could have been # called in a different graph context. default_graph = ops.get_default_graph() if (self._loaded_saved_model_graph is None or self._loaded_saved_model_graph is not default_graph): self._saved_model_loader_value = saved_transform_io_v2.SavedModelLoader( self._tft_output.transform_savedmodel_dir) self._loaded_saved_model_graph = default_graph return self._saved_model_loader_value def _init_batch_counters(self, *args, **kwargs): # pylint: disable=g-doc-args
tensorflow.python.framework.ops.get_default_graph
12,068
import tensorflow as tf import TensorFI as ti # Import MNIST data from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) # In this example, we limit mnist data Xtr, Ytr = mnist.train.next_batch(5000) #5000 for training (nn candidates) Xte, Yte = mnist.test.next_batch(200) #200 for testing # tf Graph Input xtr = tf.placeholder("float", [None, 784]) xte = tf.placeholder("float", [784]) # Nearest Neighbor calculation using L1 Distance # Calculate L1 Distance distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.negative(xte))), reduction_indices=1) # Prediction: Get min distance index (Nearest neighbor) pred = tf.arg_min(distance, 0) accuracy = 0. # Initialize the variables (i.e. assign their default value)
tensorflow.placeholder
12,069
import tensorflow as tf rec = tf.cast(kw * kh, tf.float32) n_max = 7 + tf.math.ceil(tf.math.log(rec) / tf.math.log(2.))
tensorflow.math.log
12,070
import tensorflow as tf ''' total_loss = self.loss # add regularizers in case there are any if len(self.regularizers)>0: total_loss += tf.add_n(self.regularizers, name="regularization") # 1st part of minimize: compute_gradient self.grads_and_vars = self._optimizer.compute_gradients(total_loss) # clip gradients clipped_grads_and_vars = self._clip_gradients(self.grads_and_vars, self._grad_clipping_tuple) # compute norms in case they need to be logged self.gradient_norms = [tf.norm(g) + NUMTOL for (g, v) in clipped_grads_and_vars] self.weight_norms = [tf.norm(v) + NUMTOL for (g, v) in clipped_grads_and_vars] # check that gradients are finite grads = [tf.check_numerics(g, "grads is not finite") for (g, v) in clipped_grads_and_vars] variables = [tf.check_numerics(v, "grads is not finite") for (g, v) in clipped_grads_and_vars] self.gradient_weight_global_norms = [tf.global_norm(grads), tf.global_norm(variables)] # 2nd part of minimize: apply_gradient optimizer_step = self._optimizer.apply_gradients(clipped_grads_and_vars, global_step=self.global_step) update_ops = tf.group(*self.update_ops) self.training_op = tf.group(update_ops, optimizer_step)
tensorflow.norm
12,071
import tensorflow as tf identity_mat = tf.diag(tf.ones(shape=[embedding_size])) # Create variables for logistic regression A = tf.Variable(tf.random_normal(shape=[embedding_size,1])) b = tf.Variable(tf.random_normal(shape=[1,1])) # Initialize placeholders x_data = tf.placeholder(shape=[sentence_size], dtype=tf.int32)
tensorflow.random_normal
12,072
import tensorflow as tf outputs_to_predictions[output].append( tf.expand_dims(tf.nn.softmax(logits_reversed), 4))
tensorflow.nn.softmax
12,073
import tensorflow as tf return roc_sc, auprc_score,accuracy,precision,recall,f ,apk_sc def construct_placeholders(edge_types): placeholders = { 'batch': tf.placeholder(tf.int32, name='batch'), 'batch_neg': tf.placeholder(tf.int32, name='batch_neg'), 'batch_node':tf.placeholder(tf.int32,name = 'batch_node'), 'adj_min_batch': tf.placeholder(tf.float32,name='adj_min_batch'), 'sim_min_batch': tf.placeholder(tf.float32,name='sim_min_batch'), 'batch_edge_type_idx': tf.placeholder(tf.int32, shape=(), name='batch_edge_type_idx'), 'batch_row_edge_type': tf.placeholder(tf.int32, shape=(), name='batch_row_edge_type'), 'batch_col_edge_type': tf.placeholder(tf.int32, shape=(), name='batch_col_edge_type'), 'degrees': tf.placeholder(tf.int32), 'dropout': tf.placeholder_with_default(0., shape=()), } placeholders.update({ 'adj_mats_%d,%d,%d' % (i, j, k): tf.sparse_placeholder(tf.float32) for i, j in edge_types for k in range(edge_types[i,j])}) placeholders.update({ 'feat_%d' % i: tf.sparse_placeholder(tf.float32) for i, _ in edge_types}) return placeholders ########################################################### test_size = 0.20
tensorflow.placeholder
12,074
import tensorflow as tf def main(_): tf.reset_default_graph() # Import data cifar = cf.cifar10(batchSize=FLAGS.batch_size, downloadDir=FLAGS.data_dir) with tf.variable_scope('inputs'): # Create the model x = tf.placeholder(tf.float32, [None, FLAGS.img_width * FLAGS.img_height * FLAGS.img_channels]) # Define loss and optimizer y_ = tf.placeholder(tf.float32, [None, FLAGS.num_classes]) # Whether model is training train = tf.placeholder(tf.bool, []) # Build the graph for the deep net y_conv, img_summary = deepnn(x, train)
tensorflow.placeholder
12,075
import tensorflow as tf start_logits = tf.squeeze(conv(tf.concat([self.enc[1], self.enc[2]],axis = -1),1, bias = False, name = "start_pointer"),-1) end_logits = tf.squeeze(conv(tf.concat([self.enc[1], self.enc[3]],axis = -1),1, bias = False, name = "end_pointer"), -1) self.logits = [mask_logits(start_logits, mask = self.c_mask), mask_logits(end_logits, mask = self.c_mask)] logits1, logits2 = [l for l in self.logits] outer = tf.matmul(tf.expand_dims(tf.nn.softmax(logits1), axis=2), tf.expand_dims(tf.nn.softmax(logits2), axis=1)) outer = tf.matrix_band_part(outer, 0, config.ans_limit) self.yp1 = tf.argmax(tf.reduce_max(outer, axis=2), axis=1) self.yp2 = tf.argmax(tf.reduce_max(outer, axis=1), axis=1) losses = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits1, labels=self.y1)
tensorflow.nn.softmax
12,076
import tensorflow as tf prev_c, prev_h = next_c, next_h anchors = anchors.write(layer_id, tf.zeros_like(next_h[-1])) anchors_w_1 = anchors_w_1.write( layer_id, tf.matmul(next_h[-1], self.w_attn_1)) def _condition(layer_id, *args): return tf.less(layer_id, self.num_cells + 2) def _body(layer_id, inputs, prev_c, prev_h, anchors, anchors_w_1, arc_seq, entropy, log_prob): indices = tf.range(0, layer_id, dtype=tf.int32) start_id = 4 * (layer_id - 2)
tensorflow.less
12,077
import tensorflow as tf extended_vsize = vsize if self.max_phrase_size is not None: extended_vsize += self.max_phrase_size extra_zeros = tf.zeros((batch_size, self.max_phrase_size)) vocab_dist = tf.concat(values=[vocab_dist, extra_zeros], axis=1) # [batch_size, extended_vsize] if self.options.add_first_word_prob_for_phrase: # add prob of the first word to each phrase
tensorflow.zeros
12,078
import tensorflow as tf pred_max = tf.reduce_max(predictions, axis=-1) pred_indices = tf.argmax(predictions, axis=-1) pred_x, pred_y = tf.cast(tf.floormod(pred_indices, heatmap_size), tf.float32), tf.cast(tf.floordiv(pred_indices, heatmap_size), tf.float32) width, height = tf.cast(width, tf.float32), tf.cast(height, tf.float32) pred_x, pred_y = pred_x * width / tf.cast(heatmap_size, tf.float32), pred_y * height / tf.cast(heatmap_size, tf.float32) if clip_at_zero: pred_x, pred_y = pred_x * tf.cast(pred_max>0, tf.float32), pred_y * tf.cast(pred_max>0, tf.float32) pred_x = pred_x * tf.cast(pred_max>0, tf.float32) + tf.cast(pred_max<=0, tf.float32) * (width / 2.) pred_y = pred_y * tf.cast(pred_max>0, tf.float32) + tf.cast(pred_max<=0, tf.float32) * (height / 2.) if config.PRED_DEBUG: pred_indices_ = tf.squeeze(pred_indices) image_ = tf.squeeze(image) * 255. pred_heatmap = tf.one_hot(pred_indices_, heatmap_size*heatmap_size, on_value=1., off_value=0., axis=-1, dtype=tf.float32)
tensorflow.cast
12,079
import tensorflow as tf self.eval_op_names = ['loss', 'pr_trn', 'pr_msk'] + list(metrics.keys()) self.saver_eval = tf.train.Saver(self.vars)
tensorflow.train.Saver
12,080
import tensorflow as tf self.ch = tf.placeholder(tf.int32, [self.config.batch_size * self.max_p_num, self.config.max_p_len, self.config.max_ch_len], "context_char") self.qh = tf.placeholder(tf.int32, [self.config.batch_size * self.max_p_num, self.config.max_q_len, self.config.max_ch_len], "question_char") self.start_label = tf.placeholder(tf.int32, [self.config.batch_size], "answer_label1") self.end_label = tf.placeholder(tf.int32, [self.config.batch_size], "answer_label2") self.position_emb = position_embedding(self.c, 2 * self.config.hidden_size) self.c_mask = tf.cast(self.c, tf.bool) # index 0 is padding symbol N x self.max_p_num, max_p_len self.q_mask = tf.cast(self.q, tf.bool) self.c_len = tf.reduce_sum(tf.cast(self.c_mask, tf.int32), axis=1) self.q_len = tf.reduce_sum(tf.cast(self.q_mask, tf.int32), axis=1) self.dropout = tf.placeholder(tf.float32, name="dropout") self.global_step = tf.Variable(0, name="global_step", trainable=False) """ :descrition: The embedding layer, question and passage share embeddings """
tensorflow.cast
12,081
import tensorflow as tf def main(_): tf.reset_default_graph()
tensorflow.reset_default_graph
12,082
import tensorflow as tf self.q = tf.slice(self.q, [0, 0], [N, self.q_maxlen]) self.c_mask = tf.slice(self.c_mask, [0, 0], [N, self.c_maxlen]) self.q_mask = tf.slice(self.q_mask, [0, 0], [N, self.q_maxlen]) self.ch = tf.slice(self.ch, [0, 0, 0], [N, self.c_maxlen, CL]) self.qh = tf.slice(self.qh, [0, 0, 0], [N, self.q_maxlen, CL]) self.y1 = tf.argmax(tf.slice(self.y1, [0, 0], [N, self.c_maxlen]),axis=-1)
tensorflow.slice
12,083
from tensorflow.python.framework import ops denominator, array_ops.ones_like(denominator)) iou = math_ops.div(cm_diag, denominator) return math_ops.reduce_mean(iou, name=name) mean_iou = compute_mean_iou('mean_iou') if metrics_collections: ops.add_to_collections(metrics_collections, mean_iou) if updates_collections: ops.add_to_collections(updates_collections, update_op) return mean_iou, update_op def _next_array_size(required_size, growth_factor=1.5): """Calculate the next size for reallocating a dynamic array. Args: required_size: number or tf.Tensor specifying required array capacity. growth_factor: optional number or tf.Tensor specifying the growth factor
tensorflow.python.framework.ops.add_to_collections
12,084
import tensorflow as tf predictions = tf.nn.softmax(logits) y_as_list = tf.unstack(y, num=num_steps, axis=1) losses = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,logits=logits) total_loss = tf.reduce_mean(losses) train_step = tf.train.AdagradOptimizer(learning_rate).minimize(total_loss) '''训练网络''' def train_rnn(num_epochs, num_steps, state_size=4, verbose=True): with tf.Session() as sess: sess.run(tf.global_variables_initializer()) #sess = tf_debug.LocalCLIDebugWrapperSession(sess) training_losses = [] for idx, epoch in enumerate(gen_epochs(num_epochs, num_steps)): training_loss = 0 training_state = np.zeros((batch_size, state_size)) # ->(200, 4) if verbose: print('\nepoch', idx) for step, (X, Y) in enumerate(epoch): tr_losses, training_loss_, training_state, _ = \ sess.run([losses, total_loss, final_state, train_step], feed_dict={x:X, y:Y, init_state:training_state})
tensorflow.global_variables_initializer
12,085
import tensorflow as tf return params, params def encode(self, x, encode_params): """See base class.""" addend = dummy_rng_source(encode_params[self.SEED_PARAM_KEY], x.shape.num_elements()) addend = tf.reshape(addend, x.shape) return {self.ENCODED_VALUES_KEY: x + addend} def decode(self, encoded_tensors, decode_params,
tensorflow.reshape
12,086
import tensorflow as tf # name='last_pred_mse') # filter all invisible keypoint maybe better for this task # all_visible = tf.logical_and(key_v>0, isvalid>0) # targets_list = [tf.boolean_mask(targets_list[ind], all_visible) for ind in list(range(len(targets_list)))] # pred_outputs = [tf.boolean_mask(pred_outputs[ind], all_visible, name='boolean_mask_{}'.format(ind)) for ind in list(range(len(pred_outputs)))] all_visible = tf.expand_dims(tf.expand_dims(tf.cast(tf.logical_and(key_v>0, isvalid>0), tf.float32), axis=-1), axis=-1) targets_list = [targets_list[ind] * all_visible for ind in list(range(len(targets_list)))] pred_outputs = [pred_outputs[ind] * all_visible for ind in list(range(len(pred_outputs)))] sq_diff = tf.reduce_sum(tf.squared_difference(targets, pred_outputs[-1]), axis=-1) last_pred_mse = tf.metrics.mean_absolute_error(sq_diff, tf.zeros_like(sq_diff), name='last_pred_mse') metrics = {'normalized_error': ne_mertric, 'last_pred_mse':last_pred_mse} predictions = {'normalized_error': ne_mertric[1]} ne_mertric = tf.identity(ne_mertric[1], name='ne_mertric') base_learning_rate = params['learning_rate'] mse_loss_list = []
tensorflow.squared_difference
12,087
import tensorflow as tf self.resolution]) inter = tf.transpose(tf.reduce_max(inter, axis=a))
tensorflow.reduce_max
12,088
from tensorflow.python.ops import math_ops # To prevent broadcasting inside "-". if len(target.get_shape()) == 1: target = array_ops.expand_dims(target, dim=[1]) logits.get_shape().assert_is_compatible_with(target.get_shape()) return math_ops.square(logits - math_ops.to_float(target)) def _log_loss_with_two_classes(logits, target): # sigmoid_cross_entropy_with_logits requires [batch_size, 1] target.
tensorflow.python.ops.math_ops.to_float
12,089
from tensorflow.python.ops import parsing_ops return self._target_column.logits_to_predictions(logits, proba=True) def _get_feature_ops_from_example(self, examples_batch): column_types = layers.create_feature_spec_for_parsing(( self._get_linear_feature_columns() or []) + ( self._get_dnn_feature_columns() or [])) features = parsing_ops.parse_example(examples_batch, column_types) return features def _get_linear_feature_columns(self): if not self._linear_feature_columns: return None
tensorflow.python.ops.parsing_ops.parse_example
12,090
import tensorflow as tf if pt[1] > box_limits_z[1]: box_limits_z[1] = pt[1] mean_x = tf.reduce_mean(box_limits_x) mean_z = tf.reduce_mean(box_limits_z) else: mean_x = tf.reduce_mean(labeled_translations[:, 0])
tensorflow.reduce_mean
12,091
import tensorflow as tf output_ = generate(output, input_, context) argmax = lambda: tf.argmax(output_, 1) target = lambda: inputs.read(time + 1) softmax = lambda: tf.squeeze(tf.multinomial(tf.log(tf.nn.softmax(output_)), num_samples=1), axis=1) use_target = tf.logical_and(time < time_steps - 1, tf.random_uniform([]) >= feed_previous) predicted_symbol = tf.case([
tensorflow.nn.softmax
12,092
import tensorflow as tf sims_logits = sims_logits - tf.reshape(logits_max, [-1, 1]) sims_probs = tf.nn.softmax(sims_logits) sim_labels = tf.constant(np.arange(bs * 2, dtype=np.int32)) sims_onehot = tf.one_hot(sim_labels, bs * 2) c_real_loss = - tf.reduce_mean( tf.reduce_sum(sims_onehot * tf.log(sims_probs + 1e-10), 1)) self.d_loss += c_real_loss * self._weight_contrastive_loss_d self._tpu_summary.scalar("loss/c_real_loss", c_real_loss) self._tpu_summary.scalar("loss/penalty", penalty_loss)
tensorflow.log
12,093
import tensorflow as tf op_id = tf.to_int32(op_id) op_id = tf.reshape(op_id, [1]) arc_seq = arc_seq.write(start_id + 2 * i + 1, op_id) curr_log_prob = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=op_id) log_prob += curr_log_prob curr_ent = tf.stop_gradient(tf.nn.softmax_cross_entropy_with_logits( logits=logits, labels=tf.nn.softmax(logits))) entropy += curr_ent inputs = tf.nn.embedding_lookup(self.w_emb, op_id) next_c, next_h = stack_lstm(inputs, prev_c, prev_h, self.w_lstm) anchors = anchors.write(layer_id, next_h[-1]) anchors_w_1 = anchors_w_1.write(layer_id, tf.matmul(next_h[-1], self.w_attn_1)) inputs = self.g_emb return (layer_id + 1, inputs, next_c, next_h, anchors, anchors_w_1, arc_seq, entropy, log_prob) loop_vars = [ tf.constant(2, dtype=tf.int32, name="layer_id"), inputs, prev_c, prev_h, anchors, anchors_w_1,
tensorflow.matmul
12,094
import tensorflow as tf # in the future. assert_strides = tf.assert_equal( tf.cast(tf.stack([strides[1], strides[2]]), tf.int64), tf.constant([1, 1], dtype=tf.int64),
tensorflow.stack
12,095
import tensorflow as tf grads = tf.gradients(self.loss, self.var_list) grads, _ = tf.clip_by_global_norm(grads, 40) with tf.name_scope('sync'): # worker和global的同步过程 with tf.name_scope('pull'): # 获取global参数,复制到local—net self.pull_params_op = tf.group(*[v1.assign(v2) for v1, v2 in zip(self.var_list, globalAC.var_list)]) with tf.name_scope('push'): # 将参数传送到gloabl中去 self.update_params_op = OPT.apply_gradients(zip(grads, globalAC.var_list)) # 其中传送的是local—net的actor和critic的参数梯度grads,具体计算在上面定义 # apply_gradients是tf.train.Optimizer中自带的功能函数,将求得的梯度参数更新到global中 self.inc_step = self.global_step.assign_add(tf.shape(self.obs)[0]) self.train_op = tf.group(self.update_params_op, self.inc_step) # GLOBALE_STEP += tf.shape(self.obs)[0] def build_model(self): """ Builds the manager and worker models. """ with tf.variable_scope('FeUdal'): self.build_placeholders() self.build_perception()
tensorflow.shape
12,096
import tensorflow as tf epsilon = tf.sinh(epsilon*l2)/tf.cosh(epsilon*l2)**l1/l2 # Compute A_{h+1} A = tf.tile(alpha_mean+epsilon*alpha_std, [1, tf.shape(X)[0], 1, 1]) # Compute z_{h}A_{h+1} Z1 = tf.matmul(Z, A[:,:,:n_basis//2,:])/tf.sqrt(n_basis*.5) Z2 = tf.matmul(Z, A[:,:,n_basis//2:,:])/tf.sqrt(n_basis*.5) # Compute u_{h+1} and v_{h+1} U, V = tf.cos(Z1)+tf.cos(Z2), tf.sin(Z1)+tf.sin(Z2) Z = tf.concat([U, V], 3)/tf.sqrt(n_out*1.) KL += tf.reduce_mean(alpha_std**2+alpha_mean**2-2*alpha_logstd-1)/2.
tensorflow.sqrt
12,097
import tensorflow as tf q_tp1_using_online_net = q_func(obs_tp1_input.get(), num_actions, scope="q_func", reuse=True) q_tp1_best_using_online_net = tf.arg_max(q_tp1_using_online_net, 1) q_tp1_best = tf.reduce_sum(q_tp1 * tf.one_hot(q_tp1_best_using_online_net, num_actions), 1) else:
tensorflow.one_hot
12,098
import tensorflow as tf tf.expand_dims(tf.nn.softmax(self.logits2), axis=1)) outer = tf.matrix_band_part(outer, 0, self.max_a_len) self.yp1 = tf.argmax(tf.reduce_max(outer, axis=2), axis=1) self.yp2 = tf.argmax(tf.reduce_max(outer, axis=1), axis=1) def _compute_loss(self): def focal_loss(logits, labels, weights=None, alpha=0.25, gamma=2): logits = tf.nn.sigmoid(logits) zeros = array_ops.zeros_like(logits, dtype=logits.dtype) pos_p_sub = array_ops.where(labels > zeros, labels - logits, zeros) neg_p_sub = array_ops.where(labels > zeros, zeros, logits) cross_ent = - alpha * (pos_p_sub ** gamma) * tf.log(tf.clip_by_value(logits, 1e-8, 1.0)) \ - (1 - alpha) * (neg_p_sub ** gamma) * tf.log(tf.clip_by_value(1.0 - logits, 1e-8, 1.0)) return tf.reduce_sum(cross_ent, 1) start_label = tf.one_hot(self.start_label, tf.shape(self.logits1)[1], axis=1) end_label = tf.one_hot(self.end_label, tf.shape(self.logits2)[1], axis=1) if self.config.loss_type == 'cross_entropy': start_loss = tf.nn.softmax_cross_entropy_with_logits( logits=self.logits1, labels=start_label) end_loss = tf.nn.softmax_cross_entropy_with_logits( logits=self.logits2, labels=end_label)
tensorflow.clip_by_value
12,099