seed
stringlengths 25
2.89k
| seed_api
stringlengths 14
102
| index
int64 0
14.8k
|
---|---|---|
import tensorflow as tf
cell = tf.nn.rnn_cell.MultiRNNCell([tf.nn.rnn_cell.GRUCell(24)] * 2,
state_is_tuple=True)
return tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=classes,
num_decoder_symbols=classes, embedding_size=24)
targets = [dec_inp[i+1] for i in range(len(dec_inp) - 1)] + [0]
return tf.nn.seq2seq.model_with_buckets(
enc_inp, dec_inp, targets, weights, buckets, GRUSeq2Seq,
per_example_loss=per_example_loss)
# Now we construct the copy model.
inp = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)]
|
tensorflow.nn.seq2seq.model_with_buckets
| 9,400 |
import tensorflow as tf
tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
"masked_lm_weights":
tf.FixedLenFeature([max_predictions_per_seq], tf.float32),
"next_sentence_labels":
tf.FixedLenFeature([1], tf.int64),
}
# For training, we want a lot of parallel reading and shuffling.
|
tensorflow.FixedLenFeature
| 9,401 |
import tensorflow as tf
if self.hparams.l2_loss > 0:
for p in denoise_params:
# self.weighs_propen=p
# p=tf.Print(p,[p],message="show the weights")
self.exam_loss += self.hparams.l1_loss * tf.reduce_sum(tf.abs(p))
for p in ranking_model_params:
self.rank_loss += self.hparams.l2_loss * tf.nn.l2_loss(p)
self.loss = self.exam_loss + self.hparams.ranker_loss_weight * self.rank_loss
denoise_gradients = tf.gradients(self.exam_loss, denoise_params)
ranking_model_gradients = tf.gradients(self.rank_loss, ranking_model_params)
if self.hparams.max_gradient_norm > 0:
denoise_gradients, denoise_norm = tf.clip_by_global_norm(denoise_gradients,
self.hparams.max_gradient_norm)
ranking_model_gradients, ranking_model_norm = tf.clip_by_global_norm(ranking_model_gradients,
self.hparams.max_gradient_norm * self.hparams.ranker_loss_weight)
self.norm = tf.global_norm(denoise_gradients + ranking_model_gradients)
opt_denoise = self.optimizer_func(self.hparams.learning_rate)
opt_ranker = self.optimizer_func(self.ranker_learning_rate)
|
tensorflow.gradients
| 9,402 |
import tensorflow as tf
dec = tf.layers.max_pooling1d(dec, pool_size=2, strides=1, padding="same")
dec = tf.layers.conv1d(dec, embed_size // 2, 3, name="decoder-conv1-1", padding="SAME")
dec = tf.nn.relu(tf.layers.batch_normalization(dec, training=self.training))
dec = tf.layers.conv1d(dec, embed_size // 2, 3, name="decoder-conv1-2", padding="SAME")
dec = tf.layers.batch_normalization(dec, training=self.training)
dec = tf.layers.dense(dec, embed_size // 2)
for i in range(4):
dec = highwaynet(
dec, num_units=embed_size // 2, scope="decoder-highwaynet-{}".format(i)
)
with tf.variable_scope("decoder-gru", reuse=False):
cell = tf.contrib.rnn.GRUCell(embed_size // 2)
cell_bw = tf.contrib.rnn.GRUCell(embed_size // 2)
outputs, _ = tf.nn.bidirectional_dynamic_rnn(cell, cell_bw, dec, dtype=tf.float32)
outputs = tf.concat(outputs, 2)
self.Z_hat = tf.layers.dense(outputs, 1 + fourier_window_size // 2)
self.loss1 = tf.reduce_mean(tf.abs(self.Y_hat - self.Y))
self.loss2 = tf.reduce_mean(tf.abs(self.Z_hat - self.Z))
self.loss = self.loss1 + self.loss2
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.loss)
|
tensorflow.variable_scope
| 9,403 |
import tensorflow as tf
Args:
inputs: 5-D tensor BxDxHxWxC
kernel_size: a list of 3 ints
stride: a list of 3 ints
Returns:
Variable tensor
"""
with tf.variable_scope(scope) as sc:
kernel_d, kernel_h, kernel_w = kernel_size
stride_d, stride_h, stride_w = stride
outputs = tf.nn.avg_pool3d(inputs,
ksize=[1, kernel_d, kernel_h, kernel_w, 1],
strides=[1, stride_d, stride_h, stride_w, 1],
padding=padding,
name=sc.name)
return outputs
def batch_norm_template(inputs, is_training, scope, moments_dims, bn_decay):
""" Batch normalization on convolutional maps and beyond...
Ref.: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow
|
tensorflow.nn.avg_pool3d
| 9,404 |
import tensorflow as tf
random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)
chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
|
tensorflow.where
| 9,405 |
import tensorflow as tf
mask_ = tf.ones([FLAGS.batch_size,64,64,3])
mask = tf.pad(mask_, [[0,0],[32,32],[32,32],[0,0]])
mask2__ = tf.ones([FLAGS.batch_size,78,78,3])
mask2_ = tf.pad(mask2__, [[0,0],[25,25],[25,25],[0,0]])
mask2 = mask2_ - mask
pred_annotation, logits = inference((1-mask)*image + mask*255, keep_probability,z)
tf.summary.image("input_image", image, max_outputs=2)
tf.summary.image("ground_truth", tf.cast(annotation, tf.uint8), max_outputs=2)
tf.summary.image("pred_annotation", tf.cast(pred_annotation, tf.uint8), max_outputs=2)
# loss0 = tf.reduce_mean(tf.abs(z))
loss = tf.reduce_mean(tf.sqrt(tf.reduce_sum(tf.square((image - logits)),[1,2,3])))
# loss2 = tf.reduce_mean(tf.square((image - logits)*mask2))
# loss = loss1 + loss2 + loss0
# loss = tf.reduce_mean(tf.squared_difference(logits ,annotation ))
loss_summary = tf.summary.scalar("entropy", loss)
grads = train_z(loss,z)
trainable_var = tf.trainable_variables()
if FLAGS.debug:
|
tensorflow.cast
| 9,406 |
import tensorflow as tf
return tf.reduce_sum(kl)
@pytest.mark.parametrize('white', [True, False])
def test_oned(session_tf, white, mu, sqrt, K_batch):
"""
Check that the KL divergence matches a 1D by-hand calculation.
"""
m = 0
mu1d = mu[m,:][None,:] # 1 x N
s1d = sqrt[:,m,m][:,None,None] # N x 1 x 1
K1d = K_batch[:,m,m][:,None,None] # N x 1 x 1
kl = gauss_kl(mu1d,s1d,K1d if not white else None)
kl_tf = tf_kl_1d(tf.reshape(mu1d,(-1,)), # N
tf.reshape(s1d,(-1,)), # N
None if white else tf.reshape(K1d,(-1,))) # N
np.testing.assert_allclose(kl.eval(), kl_tf.eval())
if __name__ == "__main__":
tf.test.main()
|
tensorflow.reshape
| 9,407 |
import tensorflow as tf
phrase_starts: [batch_size, phrase_length]
vocab_dist: [batch_size, vsize]
attn_dist: [batch_size, phrase_length]
return: [batch_size, phrase_length]
'''
def singel_instance(x):
cur_passage_words = x[0] # [passage_length]
cur_phrase_starts = x[1] # [phrase_length]
cur_vocab_dist = x[2] # [vsize]
cur_attn_dist = x[3] # [passage_length]
# first: get the first word for each phrase
first_words = tf.gather(cur_passage_words, cur_phrase_starts) # [phrase_length]
# second: get the probs for each word
first_word_probs = tf.gather(cur_vocab_dist, first_words) # [phrase_length]
return cur_attn_dist + first_word_probs
elems = (in_passage_words, phrase_starts, vocab_dist, attn_dist)
return tf.map_fn(singel_instance, elems, dtype=tf.float32) # [batch_size, phrase_length]
class CovCopyAttenGen:
def __init__(self, placeholders, options, vocab):
self.options = options
|
tensorflow.gather
| 9,408 |
import tensorflow as tf
|
tensorflow.log
| 9,409 |
import tensorflow as tf
:param bias_start:
:param scope:
:return:
"""
# Reshape input to (batch_size, num_nodes, input_dim)
output_size = self._num_units
batch_size = inputs.get_shape()[0].value
inputs = tf.reshape(inputs, [batch_size, self._num_nodes, -1])
input_size = inputs.get_shape()[2].value
dtype = inputs.dtype
x = inputs
x0 = tf.transpose(x, perm=[1, 2,0]) # (num_nodes, total_arg_size, batch_size)
x0 = tf.reshape(x0, shape=[self._num_nodes, input_size * batch_size])
|
tensorflow.reshape
| 9,410 |
import tensorflow as tf
def _bn(self, name, x):
with tf.variable_scope(name):
moving_average_decay = 0.9
decay = moving_average_decay
batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2])
mu = tf.get_variable('mu', batch_mean.shape, dtype=tf.float32,
initializer=tf.zeros_initializer(), trainable=False)
tf.add_to_collection(tf.GraphKeys.GLOBAL_VARIABLES, mu)
tf.add_to_collection('mu_sigma_bn', mu)
sigma = tf.get_variable('sigma', batch_var.shape, dtype=tf.float32,
initializer=tf.ones_initializer(), trainable=False)
tf.add_to_collection(tf.GraphKeys.GLOBAL_VARIABLES, sigma)
tf.add_to_collection('mu_sigma_bn', sigma)
beta = tf.get_variable('beta', batch_mean.shape, dtype=tf.float32,
initializer=tf.zeros_initializer())
gamma = tf.get_variable('gamma', batch_var.shape, dtype=tf.float32,
initializer=tf.ones_initializer())
# BN when training
update = 1.0 - decay
update_mu = mu.assign_sub(update * (mu - batch_mean))
update_sigma = sigma.assign_sub(update * (sigma - batch_var))
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_mu)
|
tensorflow.ones_initializer
| 9,411 |
import tensorflow as tf
def prenet(inputs, is_training, layer_sizes, scope=None):
x = inputs
drop_rate = 0.5 if is_training else 0.0
with tf.variable_scope(scope or 'prenet'):
for i, size in enumerate(layer_sizes):
dense = tf.layers.dense(x, units=size, activation=tf.nn.relu, name='dense_%d' % (i + 1))
x = tf.layers.dropout(dense, rate=drop_rate, training=is_training, name='dropout_%d' % (i + 1))
return x
def encoder_cbhg(inputs, input_lengths, is_training, depth):
input_channels = inputs.get_shape()[2]
|
tensorflow.layers.dense
| 9,412 |
import tensorflow as tf
|
tensorflow.constant_initializer
| 9,413 |
import tensorflow as tf
# lin1_ = tf.matmul(tf.reshape(self.q_concat_, shape=[-1, 1, self.n_agents]), self.w1_) + tf.reshape(self.b1_, shape=[-1, 1, 32])
# a1_ = tf.nn.elu(lin1_, name='a1_')
# self.Q_tot_ = tf.reshape(tf.matmul(a1_, self.w2_), shape=[-1, 1]) + self.b2_
# todo: add q_target, loss, train_op
# with tf.variable_scope('q_target'):
with tf.variable_scope('loss'):
self.loss = tf.reduce_mean(tf.squared_difference(self.q_target, tf.squeeze(self.Q_tot), name='TD_error'))
# self.loss = tf.reduce_mean(tf.squared_difference(self.q_target, self.Q_tot, name='TD_error'))
with tf.variable_scope('train'):
self._train_op = tf.train.RMSPropOptimizer(self.lr).minimize(self.loss)
|
tensorflow.variable_scope
| 9,414 |
import tensorflow as tf
dones_vec = tf.reshape(dones, (batch_size, num_tasks))
relabelled_obs = self._task_distribution.combine(states_tiled, tasks_tiled)
action_distribution = self._actor(
relabelled_obs, step_type=(), network_state=())[0]
log_pi = common.log_probability(action_distribution, actions_tiled,
action_spec)
log_pi_vec = tf.reshape(log_pi, (batch_size, num_tasks))
logits_vec = (
rewards_vec - log_pi_vec + self._gamma * (1.0 - dones_vec) * q_vals_vec)
if self._relabel_type == "random":
logits_vec = tf.ones_like(logits_vec) # Hack to make sampling random
## End new version
if self._normalize_cols:
logits_vec = logits_vec - tf.math.reduce_logsumexp(
logits_vec, axis=0)[None]
relabel_indices = tf.random.categorical(logits=logits_vec, num_samples=1)
### Metrics
global_step = tf.compat.v1.train.get_or_create_global_step()
orig_indices = tf.range(
self._sample_batch_size, dtype=relabel_indices.dtype)
|
tensorflow.ones_like
| 9,415 |
import tensorflow as tf
inp = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)]
out = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)]
weights = [tf.ones_like(inp[0], dtype=tf.float32) for _ in range(8)]
with tf.variable_scope("root"):
_, losses = SampleGRUSeq2Seq(inp, out, weights)
updates = []
params = tf.global_variables()
optimizer = tf.train.AdamOptimizer(0.03, epsilon=1e-5)
for i in range(len(buckets)):
full_grads = tf.gradients(losses[i], params)
grads, _ = tf.clip_by_global_norm(full_grads, 30.0)
update = optimizer.apply_gradients(zip(grads, params))
updates.append(update)
sess.run([tf.global_variables_initializer()])
steps = 6
for _ in range(steps):
bucket = random.choice(np.arange(len(buckets)))
length = buckets[bucket][0]
i = [np.array([np.random.randint(9) + 1 for _ in range(batch_size)],
dtype=np.int32) for _ in range(length)]
|
tensorflow.clip_by_global_norm
| 9,416 |
import tensorflow as tf
* masks: list of masks for weight sparsification
* prune_op: pruning operation
"""
masks, prune_ops = [], []
with tf.variable_scope(self.mask_scope):
for var, var_name_n_prune_ratio in zip(self.maskable_vars, self.var_names_n_prune_ratios):
# obtain the dynamic pruning ratio
assert var.name == var_name_n_prune_ratio[0], \
'unmatched variable names: %s vs. %s' % (var.name, var_name_n_prune_ratio[0])
|
tensorflow.variable_scope
| 9,417 |
import tensorflow as tf
def int_to_bit(self, x_int, num_bits, base=2):
"""Turn x_int representing numbers into a bitwise (lower-endian) tensor.
Args:
x_int: Tensor containing integer to be converted into base
notation.
num_bits: Number of bits in the representation.
base: Base of the representation.
Returns:
Corresponding number expressed in base.
"""
x_l = tf.to_int32(tf.expand_dims(x_int, axis=-1))
x_labels = []
for i in range(num_bits):
x_labels.append(
tf.floormod(
tf.floordiv(tf.to_int32(x_l),
tf.to_int32(base)**i), tf.to_int32(base)))
res = tf.concat(x_labels, axis=-1)
return tf.to_float(res)
def embed(self, x):
"""Embedding function that takes discrete latent and returns embedding.
|
tensorflow.expand_dims
| 9,418 |
import tensorflow as tf
# Global pooling
X = self._add_global_avg_pool(X, w, h, ch)
# Fully connected
with tf.variable_scope('fully_connected'):
aux_logits = self._add_fully_connected(X, (ch,), K, no_reg=True)
return aux_logits
|
tensorflow.variable_scope
| 9,419 |
from tensorflow.python.ops import math_ops
def _log_cdf(self, x):
return math_ops.log(self.cdf(x))
@distribution_util.AppendDocstring(_poisson_sample_note)
def _cdf(self, x):
x = self._assert_valid_sample(x, check_integer=False)
return math_ops.igammac(math_ops.floor(x + 1), self.rate)
def _log_normalization(self):
return self.rate
def _log_unnormalized_prob(self, x):
x = self._assert_valid_sample(x, check_integer=True)
return x * math_ops.log(self.rate) - math_ops.lgamma(x + 1)
def _mean(self):
return array_ops.identity(self.rate)
def _variance(self):
return array_ops.identity(self.rate)
@distribution_util.AppendDocstring(
"""Note: when `rate` is an integer, there are actually two modes: `rate`
and `rate - 1`. In this case we return the larger, i.e., `rate`.""")
def _mode(self):
return math_ops.floor(self.rate)
|
tensorflow.python.ops.math_ops.lgamma
| 9,420 |
import tensorflow as tf
mean_kl = tf.reduce_mean(kl)
def update_scale():
with tf.control_dependencies([perturb_for_adaption]):
update_scale_expr = tf.cond(mean_kl < param_noise_threshold,
|
tensorflow.control_dependencies
| 9,421 |
import tensorflow as tf
|
tensorflow.math.reduce_sum
| 9,422 |
import tensorflow as tf
)
estimator = tf.estimator.Estimator(
|
tensorflow.estimator.Estimator
| 9,423 |
import tensorflow as tf
with tf.device("/device:CPU:0"):
ds = tf.data.Dataset.from_tensors(tensors).repeat()
return tfe.Iterator(ds)
self._benchmark_eager_train(
"eager_train_dataset_with_defun",
make_iterator,
device_and_data_format(),
defun=True)
if __name__ == "__main__":
tf.enable_eager_execution()
tf.test.main()
|
tensorflow.enable_eager_execution
| 9,424 |
import tensorflow as tf
tf.app.flags.DEFINE_string('pm', '66661', 'pooling scheme across scales. Each number specifies the number of scales remaining at each layer. The first number has to be the same as used in --num_scales.')
tf.app.flags.DEFINE_integer('conv_kernel', 5, 'Size of convolutional kernel')
tf.app.flags.DEFINE_integer('pool_kernel', 3, 'Size of spatial pooling kernel')
tf.app.flags.DEFINE_integer('feats_per_layer', 32, 'Number of feature channels at each layer')
tf.app.flags.DEFINE_boolean('total_pool', True, 'If true, pool all feature maps to 1x1 size in final layer')
tf.app.flags.DEFINE_integer('pool_stride', '1', 'If 2, we get progressive pooling - with overlap pooling, AlexNet style')
|
tensorflow.app.flags.DEFINE_boolean
| 9,425 |
import tensorflow as tf
self.config.update(config)
required = getattr(self, 'required_config_keys', [])
if self.datasets:
required += self.required_baseconfig
for r in required:
assert r in self.config, 'Required configuration entry: \'{}\''.format(r)
assert set(self.datasets) <= self.dataset_names, \
'Unknown dataset name: {}'.format(set(self.datasets)-self.dataset_names)
assert n_gpus > 0, 'TODO: CPU-only training is currently not supported.'
if data_shape is None:
self.data_shape = {i: s['shape'] for i, s in self.input_spec.items()}
with tf.variable_scope('', reuse=tf.AUTO_REUSE):
self._build_graph()
def _gpu_tower(self, data, mode):
# Split the batch between the GPUs (data parallelism)
with tf.device('/cpu:0'):
with tf.name_scope('{}_data_sharding'.format(mode)):
batch_size = self.config['batch_size'] if (mode == Mode.TRAIN) \
else self.config['eval_batch_size']
shards = {d: tf.unstack(v, num=batch_size*self.n_gpus, axis=0)
for d, v in data.items()}
shards = [{d: tf.stack(v[i::self.n_gpus]) for d, v in shards.items()}
for i in range(self.n_gpus)]
|
tensorflow.variable_scope
| 9,426 |
import tensorflow as tf
def _add_image_summary(self, image, boxes):
# add back mean
'''
tf.stack()这是一个矩阵拼接的函数,tf.unstack()则是一个矩阵分解的函数
'''
image += cfg.FLAGS2["pixel_means"]
# bgr to rgb (opencv uses bgr)
channels = tf.unstack(image, axis=-1)
image = tf.stack([channels[2], channels[1], channels[0]], axis=-1)
# dims for normalization
width = tf.to_float(tf.shape(image)[2])
height = tf.to_float(tf.shape(image)[1])
# from [x1, y1, x2, y2, cls] to normalized [y1, x1, y1, x1]
cols = tf.unstack(boxes, axis=1)
boxes = tf.stack([cols[1] / height,
cols[0] / width,
|
tensorflow.stack
| 9,427 |
from tensorflow.python.framework import ops
# gradients either.
return [tensor_shape.unknown_shape(ndims=4)]
@ops.RegisterShape("DepthwiseConv2dNativeBackpropFilter")
def _DepthwiseConv2dNativeBackpropFilterShape(op):
"""Shape function for the DepthwiseConv2dNativeBackpropFilter op."""
filter_shape = tensor_util.constant_value(op.inputs[1])
|
tensorflow.python.framework.ops.RegisterShape
| 9,428 |
import tensorflow as tf
:param stochastic_ph: (TensorFlow Tensor) the stochastic placeholder
:param update_eps_ph: (TensorFlow Tensor) the update_eps placeholder
:param sess: (TensorFlow session) The current TensorFlow session
:param param_noise_filter_func: (function (TensorFlow Tensor): bool) function that decides whether or not a
variable should be perturbed. Only applicable if param_noise is True. If set to None, default_param_noise_filter
is used by default.
:return: (function (TensorFlow Tensor, bool, float): TensorFlow Tensor, (TensorFlow Tensor, TensorFlow Tensor)
act function to select and action given observation (See the top of the file for details),
A tuple containing the observation placeholder and the processed observation placeholder respectively.
"""
if param_noise_filter_func is None:
param_noise_filter_func = default_param_noise_filter
update_param_noise_threshold_ph = tf.placeholder(tf.float32, (), name="update_param_noise_threshold")
update_param_noise_scale_ph = tf.placeholder(tf.bool, (), name="update_param_noise_scale")
reset_ph = tf.placeholder(tf.bool, (), name="reset")
eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0))
param_noise_scale = tf.get_variable("param_noise_scale", (), initializer=tf.constant_initializer(0.01),
trainable=False)
param_noise_threshold = tf.get_variable("param_noise_threshold", (), initializer=tf.constant_initializer(0.05),
trainable=False)
# Unmodified Q.
policy = q_func(sess, ob_space, ac_space, 1, 1, None)
obs_phs = (policy.obs_ph, policy.processed_obs)
# Perturbable Q used for the actual rollout.
|
tensorflow.placeholder
| 9,429 |
import tensorflow as tf
return deconv_layer
def variable(self, name, shape, initializer,regularizer=None):
with tf.device('/cpu:0'):
return tf.get_variable(name, shape, initializer=initializer, regularizer=regularizer, trainable=True)
def fc_layer(self, bottom, in_size, out_size, name):
with tf.variable_scope(name):
weights, biases = self.get_fc_var(in_size, out_size, name)
x = tf.reshape(bottom, [-1, in_size])
fc = tf.nn.bias_add(tf.matmul(x, weights), biases)
tf.summary.histogram('weight', weights)
tf.summary.histogram('bias', biases)
return fc
def get_conv_var(self, filter_size, in_channels, out_channels, name):
initial_value = tf.truncated_normal([filter_size, filter_size, in_channels, out_channels], 0.0, stddev = 1 / math.sqrt(float(filter_size * filter_size)))
filters = self.get_var(initial_value = initial_value, name = name, idx = 'weights', var_name = "_filters")
initial_value = tf.truncated_normal([out_channels], 0.0, 1.0)
biases = self.get_var(initial_value = initial_value, name = name, idx = 'biases', var_name = "_biases")
|
tensorflow.matmul
| 9,430 |
import tensorflow as tf
return context, alpha
def _selector(self, context, h, reuse=False):
with tf.variable_scope('selector', reuse=reuse):
w = tf.get_variable('w', [self.H, 1], initializer=self.weight_initializer)
b = tf.get_variable('b', [1], initializer=self.const_initializer)
beta = tf.nn.sigmoid(tf.matmul(h, w) + b, 'beta') # (N, 1)
context = tf.multiply(beta, context, name='selected_context')
return context, beta
def _decode_lstm(self, x, h, context, dropout=False, reuse=False):
with tf.variable_scope('logits', reuse=reuse):
w_h = tf.get_variable('w_h', [self.H, self.M], initializer=self.weight_initializer)
b_h = tf.get_variable('b_h', [self.M], initializer=self.const_initializer)
w_out = tf.get_variable('w_out', [self.M, self.V], initializer=self.weight_initializer)
b_out = tf.get_variable('b_out', [self.V], initializer=self.const_initializer)
if dropout:
h = tf.nn.dropout(h, 0.5)
h_logits = tf.matmul(h, w_h) + b_h
if self.ctx2out:
w_ctx2out = tf.get_variable('w_ctx2out', [self.D, self.M], initializer=self.weight_initializer)
h_logits += tf.matmul(context, w_ctx2out)
|
tensorflow.get_variable
| 9,431 |
import tensorflow as tf
:type shape: tuple
:type name: str
:rtype: dictionary
"""
Winit = tf.truncated_normal(shape, mean=0, stddev=0.1)
binit = tf.zeros(shape[-1])
layer = {}
layer["weights"] = tf.get_variable(name + "/weights",
dtype=tf.float32,
|
tensorflow.truncated_normal
| 9,432 |
from tensorflow.python.platform import gfile
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
self.assertEqual(2, len(gfile.Glob(s1)))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1)))
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
self.assertEqual(2, len(gfile.Glob(s1)))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1)))
self.assertEqual(2, len(gfile.Glob(s2)))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))
s3 = save.save(sess, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
self.assertEqual(0, len(gfile.Glob(s1)))
|
tensorflow.python.platform.gfile.Glob
| 9,433 |
import tensorflow as tf
'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'),
'image/height': tf.FixedLenFeature([1], tf.int64),
'image/width': tf.FixedLenFeature([1], tf.int64),
'image/channels': tf.FixedLenFeature([1], tf.int64),
'image/shape': tf.FixedLenFeature([3], tf.int64),
'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/label': tf.VarLenFeature(dtype=tf.int64),
'image/object/bbox/difficult': tf.VarLenFeature(dtype=tf.int64),
'image/object/bbox/truncated': tf.VarLenFeature(dtype=tf.int64),
}
items_to_handlers = {
'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'),
'shape': slim.tfexample_decoder.Tensor('image/shape'),
'object/bbox': slim.tfexample_decoder.BoundingBox(
['xmin', 'ymin', 'xmax', 'ymax'], 'image/object/bbox/'),
|
tensorflow.VarLenFeature
| 9,434 |
import tensorflow as tf
correct = tf.equal(
tf.cast(tf.ones_like(label_ids, dtype=tf.int32), tf.int32),
tf.cast(pred_label, tf.int32)
)
st_accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
pred_label = tf.argmax(distillation_loss["te_logits"], axis=-1, output_type=tf.int32)
correct = tf.equal(
tf.cast(tf.zeros_like(label_ids, dtype=tf.int32), tf.int32),
tf.cast(pred_label, tf.int32)
)
te_accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
except:
te_accuracy = tf.constant(0.0)
st_accuracy = tf.constant(0.0)
try:
st_accuracy = tf.reduce_mean(distillation_loss["src_f1_prob"])
te_accuracy = tf.reduce_mean(distillation_loss["tgt_f1_prob"])
except:
te_accuracy = tf.constant(0.0)
st_accuracy = tf.constant(0.0)
|
tensorflow.cast
| 9,435 |
import tensorflow as tf
features['inputs'] = targets
return (features, targets)
def spc_tokenize(tokenizer, features, targets):
del targets
tokenized_text = tokenizer.tokenize(features['text'])
features['targets'] = tf.cast(tokenized_text, tf.int64)
features['inputs'] = features['targets']
return features, features['targets']
if tokenization == 'spc':
spm_path = spm_path or t5_data().DEFAULT_SPM_PATH
with tf.compat.v1.gfile.GFile(spm_path, 'rb') as f:
spc_model = f.read()
tokenizer = tf_text.SentencepieceTokenizer(model=spc_model)
dataset = dataset.map(functools.partial(spc_tokenize, tokenizer))
else:
dataset = dataset.map(unicode_decode_chars)
def target_right_length(_, target):
return tf.less(tf.shape(target)[0], max_target_length + 1)
if max_target_length > 0:
dataset = dataset.filter(target_right_length)
|
tensorflow.compat.v1.gfile.GFile
| 9,436 |
from tensorflow.contrib.framework import deprecated
for i, _ in enumerate(self._hidden_units)
]
logits_weights = [self.get_variable_value("dnn/logits/weights")]
return hiddenlayer_weights + logits_weights
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def bias_(self):
hiddenlayer_bias = [
|
tensorflow.contrib.framework.deprecated
| 9,437 |
import tensorflow as tf
def clf(x, ny, w_init=tf.random_normal_initializer(stddev=0.02), b_init=tf.constant_initializer(0), train=False):
with tf.variable_scope('clf'):
nx = shape_list(x)[-1]
w = tf.get_variable("w", [nx, ny], initializer=w_init)
b = tf.get_variable("b", [ny], initializer=b_init)
return tf.matmul(x, w)+b
def model(X, M, Y, train=False, reuse=False):
with tf.variable_scope('model', reuse=reuse):
we = tf.get_variable("we", [n_vocab+n_special+n_ctx, n_embd], initializer=tf.random_normal_initializer(stddev=0.02))
we = dropout(we, embd_pdrop, train)
#X:[n_batch_train, 2, n_ctx, 2] -> [n_batch_train*2,n_ctx,2]
X = tf.reshape(X, [-1, n_ctx, 2])
M = tf.reshape(M, [-1, n_ctx])
h = embed(X, we)
#h=[-1,n_ctx,emb]
for layer in range(n_layer):
h = block(h, 'h%d'%layer, train=train, scale=True)
#h=[-1,n_ctx,emb] lm_h [-1,emb]
lm_h = tf.reshape(h[:, :-1], [-1, n_embd])
lm_logits = tf.matmul(lm_h, we, transpose_b=True)
lm_losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=lm_logits, labels=tf.reshape(X[:, 1:, 0], [-1]))
lm_losses = tf.reshape(lm_losses, [shape_list(X)[0], shape_list(X)[1]-1])
lm_losses = tf.reduce_sum(lm_losses*M[:, 1:], 1)/tf.reduce_sum(M[:, 1:], 1)
|
tensorflow.reshape
| 9,438 |
import tensorflow as tf
output = tf.add_n([
w_z0_y0_x0 * i_z0_y0_x0, w_z0_y0_x1 * i_z0_y0_x1,
w_z0_y1_x0 * i_z0_y1_x0, w_z0_y1_x1 * i_z0_y1_x1,
w_z1_y0_x0 * i_z1_y0_x0, w_z1_y0_x1 * i_z1_y0_x1,
w_z1_y1_x0 * i_z1_y1_x0, w_z1_y1_x1 * i_z1_y1_x1
])
return output
def _meshgrid(depth, height, width, z_near, z_far):
with tf.variable_scope('_meshgrid'):
x_t = tf.reshape(
tf.tile(tf.linspace(-1.0, 1.0, width), [height * depth]),
[depth, height, width])
y_t = tf.reshape(
tf.tile(tf.linspace(-1.0, 1.0, height), [width * depth]),
[depth, width, height])
y_t = tf.transpose(y_t, [0, 2, 1])
sample_grid = tf.tile(
tf.linspace(float(z_near), float(z_far), depth), [width * height])
z_t = tf.reshape(sample_grid, [height, width, depth])
z_t = tf.transpose(z_t, [2, 0, 1])
z_t = 1 / z_t
|
tensorflow.linspace
| 9,439 |
import tensorflow as tf
name = "core%d" % i
tt_cores[i] = tf.convert_to_tensor(tt_cores[i], name=name)
if not _are_tt_cores_valid(tt_cores, shape, tt_ranks):
raise ValueError('The tt_cores provided to TensorTrain constructor are '
'not valid, have different dtypes, or are inconsistent '
'with the provided shape or TT-ranks.')
self._tt_cores = tuple(tt_cores)
self._raw_shape = shapes.clean_raw_shape(shape)
if self._raw_shape is None:
self._raw_shape = _infer_raw_shape(self._tt_cores)
self._tt_ranks = None if tt_ranks is None else tf.TensorShape(tt_ranks)
if self._tt_ranks is None:
self._tt_ranks = _infer_tt_ranks(self._tt_cores)
@property
def tt_cores(self):
"""A tuple of TT-cores.
Returns:
A tuple of 3d or 4d tensors shape
`[r_k-1, n_k, r_k]`
or
|
tensorflow.TensorShape
| 9,440 |
import tensorflow as tf
tf_update_ops = [lowering.lowered_operation(op) for op in update_ops]
tf_update_ops.append(tf.assign_add(global_step, 1))
# tf.logging.info("tf_update_ops: {}".format(tf_update_ops))
train_op = tf.group(tf_update_ops)
with mtf.utils.outside_all_rewrites():
# Copy master variables to slices. Must be called first.
restore_hook = mtf.MtfRestoreHook(lowering)
saver = tf.train.Saver(
tf.global_variables(),
sharded=True,
max_to_keep=10,
keep_checkpoint_every_n_hours=2,
defer_build=False,
save_relative_paths=True)
tf.add_to_collection(tf.GraphKeys.SAVERS, saver)
saver_listener = mtf.MtfCheckpointSaverListener(lowering)
saver_hook = tf.train.CheckpointSaverHook(
hparams.model_dir,
save_steps=1000,
saver=saver,
listeners=[saver_listener])
# EVAL mode
if mode == tf.estimator.ModeKeys.EVAL:
tf_logits = lowering.export_to_tf_tensor(logits)
return model.estimator_spec_eval(features, tf_logits, labels, tf_loss,
restore_hook, use_tpu)
if use_tpu:
|
tensorflow.add_to_collection
| 9,441 |
import tensorflow as tf
def test_dtype_and_shape_inherited_from_base_dist(self):
batch_shape = (2, 3)
with self.test_session():
qdist = distributions.QuantizedDistribution(
base_dist_cls=distributions.Normal,
lower_cutoff=1.0,
upper_cutoff=10.0,
mu=tf.zeros(batch_shape),
sigma=tf.ones(batch_shape))
self.assertEqual(batch_shape, qdist.get_batch_shape())
self.assertAllEqual(batch_shape, qdist.batch_shape().eval())
self.assertEqual((), qdist.get_event_shape())
self.assertAllEqual((), qdist.event_shape().eval())
|
tensorflow.zeros
| 9,442 |
import tensorflow as tf
self.kernel = self.gaussian_kernel(size,mean,std)
self.kernel = tf.tile(self.kernel[:, :, tf.newaxis, tf.newaxis], [1, 1, 3, 1])
self.paddings = tf.convert_to_tensor([[size,size],[size,size],[0,0]])
x_aug = tf.nn.separable_conv2d(tf.expand_dims(tf.pad(x,self.paddings,'SYMMETRIC'), 0), self.kernel, self.pointwise_filter,strides=[1, 1, 1, 1], padding='VALID')
x_aug = tf.squeeze(x_aug)
return tf.concat([x, x_aug],axis=2)
def high_low_pass(self,x):
x_low = tf.nn.separable_conv2d(tf.expand_dims(tf.pad(x,self.paddings,'SYMMETRIC'), 0), self.kernel, self.pointwise_filter,strides=[1, 1, 1, 1], padding='VALID')
x_low = tf.squeeze(x_low)
x_high = x - x_low
return tf.concat([x, x_high, x_low],axis=2)
def no_op(self,x):
return x
|
tensorflow.concat
| 9,443 |
import tensorflow as tf
def _expand_independent_outputs(fvar, full_cov, full_output_cov):
"""
Reshapes fvar to the correct shape, specified by `full_cov` and `full_output_cov`.
:param fvar: has shape N x P (full_cov = False) or P x N x N (full_cov = True).
:return:
1. full_cov: True and full_output_cov: True
fvar N x P x N x P
2. full_cov: True and full_output_cov: False
fvar P x N x N
3. full_cov: False and full_output_cov: True
fvar N x P x P
4. full_cov: False and full_output_cov: False
fvar N x P
"""
if full_cov and full_output_cov:
fvar = tf.matrix_diag(tf.transpose(fvar)) # N x N x P x P
fvar = tf.transpose(fvar, [0, 2, 1, 3]) # N x P x N x P
if not full_cov and full_output_cov:
fvar = tf.matrix_diag(fvar) # N x P x P
if full_cov and not full_output_cov:
pass # P x N x N
if not full_cov and not full_output_cov:
pass # N x P
return fvar
|
tensorflow.transpose
| 9,444 |
import tensorflow as tf
autoencoder_optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
beta1=beta1).minimize(autoencoder_loss)
discriminator_g_optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
beta1=beta1).minimize(dc_g_loss, var_list=dc_g_var)
discriminator_c_optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
beta1=beta1).minimize(dc_c_loss, var_list=dc_c_var)
generator_optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
beta1=beta1).minimize(generator_loss, var_list=en_var)
supervised_encoder_optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
beta1=beta1).minimize(supervised_encoder_loss,
var_list=en_var)
init = tf.global_variables_initializer()
# Reshape immages to display them
input_images = tf.reshape(x_input, [-1, 28, 28, 1])
generated_images = tf.reshape(decoder_output, [-1, 28, 28, 1])
# Tensorboard visualization
tf.summary.scalar(name='Autoencoder Loss', tensor=autoencoder_loss)
tf.summary.scalar(name='Discriminator gauss Loss', tensor=dc_g_loss)
tf.summary.scalar(name='Discriminator categorical Loss', tensor=dc_c_loss)
tf.summary.scalar(name='Generator Loss', tensor=generator_loss)
tf.summary.scalar(name='Supervised Encoder Loss', tensor=supervised_encoder_loss)
tf.summary.histogram(name='Encoder Gauss Distribution', values=encoder_output_latent)
tf.summary.histogram(name='Real Gauss Distribution', values=real_distribution)
tf.summary.histogram(name='Encoder Categorical Distribution', values=encoder_output_label)
tf.summary.histogram(name='Real Categorical Distribution', values=categorial_distribution)
tf.summary.image(name='Input Images', tensor=input_images, max_outputs=10)
tf.summary.image(name='Generated Images', tensor=generated_images, max_outputs=10)
|
tensorflow.reshape
| 9,445 |
import tensorflow as tf
with tf.variable_scope('target_q'):
self.target_q = R + self.gamma * self.q_
with tf.variable_scope('abs_TD'):
self.abs_td = tf.abs(self.target_q - self.q)
self.ISWeights = tf.placeholder(tf.float32, [None, 1], name='IS_weights')
with tf.variable_scope('TD_error'):
self.loss = tf.reduce_mean(self.ISWeights * tf.squared_difference(self.target_q, self.q))
|
tensorflow.abs
| 9,446 |
import tensorflow as tf
tgtimg_h0, tgtimg_h1, tgtimg_h2, tgtimg_h3, tgtimg_h4, tgtimg_z = encode(tgtimg)
tgtctx_h0, tgtctx_h1, tgtctx_h2, tgtctx_h3, tgtctx_h4, tgtctx_z = encode(tgtctx)
with tf.variable_scope("translate") as scope:
trans_h0 = lrelu(linear(tf.nn.dropout(tf.concat([srcimg_z, tgtctx_z], 1), keep_prob), featsize, 'trans_h0'))
trans_z = linear(tf.nn.dropout(trans_h0, keep_prob), featsize, 'trans_z')
|
tensorflow.variable_scope
| 9,447 |
from tensorflow.python.ops import variable_scope as vs
"""Gated recurrent unit (GRU) with nunits cells."""
if self._gate_linear is None:
bias_ones = self._bias_initializer
if self._bias_initializer is None:
bias_ones = init_ops.constant_initializer(1.0, dtype=inputs.dtype)
with vs.variable_scope("gates"): # Reset gate and update gate.
self._gate_linear = _Linear(
[inputs, state],
2 * self._num_units,
True,
|
tensorflow.python.ops.variable_scope.variable_scope
| 9,448 |
import tensorflow as tf
if decoder.cell_type.lower() == 'lstm' and decoder.use_lstm_full_state:
output = new_state
return output, new_state
def update_pos(pos, symbol, max_pos=None):
if not decoder.pred_edits:
return pos
is_keep = tf.equal(symbol, utils.KEEP_ID)
is_del = tf.equal(symbol, utils.DEL_ID)
is_not_ins = tf.logical_or(is_keep, is_del)
pos = beam_search.resize_like(pos, symbol)
max_pos = beam_search.resize_like(max_pos, symbol)
pos += tf.to_float(is_not_ins)
if max_pos is not None:
pos = tf.minimum(pos, tf.to_float(max_pos))
|
tensorflow.equal
| 9,449 |
import tensorflow as tf
argmax = lambda: tf.argmax(output_, 1)
target = lambda: inputs.read(time + 1)
softmax = lambda: tf.squeeze(tf.multinomial(tf.log(tf.nn.softmax(output_)), num_samples=1),
axis=1)
use_target = tf.logical_and(time < time_steps - 1, tf.random_uniform([]) >= feed_previous)
predicted_symbol = tf.case([
(use_target, target),
(tf.logical_not(feed_argmax), softmax)],
default=argmax) # default case is useful for beam-search
predicted_symbol.set_shape([None])
predicted_symbol = tf.stop_gradient(predicted_symbol)
input_ = embed(predicted_symbol)
pos = update_pos(pos, predicted_symbol, encoder_input_length[align_encoder_id])
|
tensorflow.logical_not
| 9,450 |
import tensorflow as tf
decay = bn_decay if bn_decay is not None else 0.9
ema = tf.train.ExponentialMovingAverage(decay=decay)
# Operator that maintains moving averages of variables.
ema_apply_op = tf.cond(is_training,
lambda: ema.apply([batch_mean, batch_var]),
lambda: tf.no_op())
# Update moving average and return current batch's avg and var.
def mean_var_with_update():
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
# ema.average returns the Variable holding the average of var.
mean, var = tf.cond(is_training,
mean_var_with_update,
lambda: (ema.average(batch_mean), ema.average(batch_var)))
normed = tf.nn.batch_normalization(inputs, mean, var, beta, gamma, 1e-3)
return normed
|
tensorflow.control_dependencies
| 9,451 |
import tensorflow as tf
model.latest_saver.restore(sess, tf.train.latest_checkpoint(latest_dir))
else:
if tf.train.get_checkpoint_state(best_dir) and args.restore == "best":
print('Reading model parameters from %s' % best_dir)
model.best_saver.restore(sess, tf.train.latest_checkpoint(best_dir))
else:
print("Created model with fresh parameters.")
global_variable = [gv for gv in tf.global_variables() if args.name in gv.name]
sess.run(tf.variables_initializer(global_variable))
return model
def main(args):
if args.debug:
debug()
|
tensorflow.variables_initializer
| 9,452 |
from tensorflow.python.framework import ops
raise ValueError("keep_prob must be a scalar tensor or a float in the "
"range (0, 1], got %g" % keep_prob)
keep_prob = ops.convert_to_tensor(
keep_prob, dtype=x.dtype, name="keep_prob")
|
tensorflow.python.framework.ops.convert_to_tensor
| 9,453 |
import tensorflow as tf
initial_state = tf.contrib.layers.layer_norm(initial_state, activation_fn=activation_fn,
scope='initial_state_layer_norm')
else:
initial_state = dense(initial_state, cell_state_size, use_bias=True, name='initial_state_projection',
activation=activation_fn)
if decoder.cell_type.lower() == 'lstm' and decoder.use_lstm_full_state:
initial_output = initial_state
else:
# Last layer's state is the right-most part. Output is the left-most part of an LSTM's state.
initial_output = initial_state[:, -cell_output_size:]
time = tf.constant(0, dtype=tf.int32, name='time')
outputs = tf.TensorArray(dtype=tf.float32, size=time_steps)
samples = tf.TensorArray(dtype=tf.int64, size=time_steps)
inputs = tf.TensorArray(dtype=tf.int64, size=time_steps).unstack(tf.to_int64(tf.transpose(decoder_inputs)))
states = tf.TensorArray(dtype=tf.float32, size=time_steps)
weights = tf.TensorArray(dtype=tf.float32, size=time_steps)
attns = tf.TensorArray(dtype=tf.float32, size=time_steps)
initial_symbol = inputs.read(0) # first symbol is BOS
initial_input = embed(initial_symbol)
initial_pos = tf.zeros([batch_size], tf.float32)
initial_weights = tf.zeros(tf.shape(attention_states[align_encoder_id])[:2])
zero_context = tf.zeros(shape=tf.shape(attention_states[align_encoder_id][:,0])) # FIXME
with tf.variable_scope('decoder_{}'.format(decoder.name)):
initial_context, _ = look(0, initial_output, initial_input, pos=initial_pos, prev_weights=initial_weights,
context=zero_context)
|
tensorflow.transpose
| 9,454 |
import tensorflow as tf
class AssignOpTest(tf.test.TestCase):
# NOTE(mrry): We exclude thess tests from the TSAN TAP target, because they
# contain benign and deliberate data races when multiple threads update
# the same parameters without a lock.
def testParallelUpdateWithoutLocking(self):
with self.test_session() as sess:
ones_t = tf.fill([1024, 1024], 1.0)
p = tf.Variable(tf.zeros([1024, 1024]))
adds = [tf.assign_add(p, ones_t, use_locking=False)
for _ in range(20)]
tf.initialize_all_variables().run()
def run_add(add_op):
sess.run(add_op)
threads = [self.checkedThread(target=run_add, args=(add_op,))
for add_op in adds]
for t in threads:
t.start()
|
tensorflow.assign_add
| 9,455 |
import tensorflow as tf
shifted_X = tf.pad(X, ((0, 0), (0, 1), (0, 1), (0, 0)))[:, 1:, 1:, :]
half_2 = tf.nn.avg_pool(shifted_X, ksize=(1, 1, 1, 1), strides=(1, 2, 2, 1), padding='VALID')
# Apply 1 x 1 convolution to each half separately
W_half_1 = self._make_var('W_half_1', (1, 1, in_ch, out_ch >> 1))
X_half_1 = tf.nn.conv2d(half_1, W_half_1, (1, 1, 1, 1), padding='VALID')
W_half_2 = self._make_var('W_half_2', (1, 1, in_ch, out_ch >> 1))
X_half_2 = tf.nn.conv2d(half_2, W_half_2, (1, 1, 1, 1), padding='VALID')
# Concat both halves across channels
X = tf.concat([X_half_1, X_half_2], axis=3)
# Apply batch normalization
X = self._add_batch_norm(X, out_ch, is_train=is_train)
|
tensorflow.nn.conv2d
| 9,456 |
import tensorflow as tf
)
return base_conv_tensors
def build_discriminator_growth_layer_block(self, params, block_idx):
"""Creates discriminator growth block.
Args:
params: dict, user passed parameters.
block_idx: int, the current growth block's index.
Returns:
List of tensors from growth block `Conv2D` layers.
"""
with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE):
# Get conv block layer properties.
conv_block = params["discriminator_growth_conv_blocks"][block_idx]
# Create new inner convolutional layers.
conv_tensors = [
self.conv_layer_blocks[1 + block_idx][i](
inputs=tf.zeros(
shape=[1] + conv_block[i][0:3], dtype=tf.float32
)
)
for i in range(len(conv_block))
]
print_obj(
|
tensorflow.variable_scope
| 9,457 |
import tensorflow as tf
except:
te_accuracy = tf.constant(0.0)
|
tensorflow.constant
| 9,458 |
import tensorflow as tf
# RooArgusBG argus("argus","Argus PDF",mes,m0,argpar) ;
def argus_pdf(m, m0, c, p=0.5):
t = m / m0
u = 1 - t * t
argus_t_ge_1 = m * tf.pow(u, p) * tf.exp(c * u)
return tf.maximum(tf.zeros_like(m), argus_t_ge_1, name="argus_pdf")
# // --- Construct signal+background PDF ---
# RooRealVar nsig("nsig","#signal events",200,0.,10000) ;
|
tensorflow.exp
| 9,459 |
import tensorflow as tf
eval_config = get_config()
eval_config.batch_size = 1
eval_config.num_steps = 1
with tf.Graph().as_default():
initializer = tf.random_uniform_initializer(-config.init_scale,
config.init_scale)
with tf.name_scope("Train"):
train_input = PTBInput(config=config, data=train_data, name="TrainInput")
|
tensorflow.random_uniform_initializer
| 9,460 |
import tensorflow as tf
mixed_idx = tf.range(0, self.mixed_label.get_shape().as_list()[0], 1)
mixed_idx = tf.random_shuffle(mixed_idx)[0:self.batch_size]
self.mixed_pc = tf.gather(self.mixed_pc, mixed_idx)
self.mixed_label = tf.gather(self.mixed_label, mixed_idx)
self.mixed_pred, mixed_end_points = self.get_pred(self.mixed_pc)
self.mixed_loss = self.get_loss(self.mixed_pred, self.mixed_label, mixed_end_points)
with tf.variable_scope('discriminator') as scope:
self.real_prob, self.real_logit = self.discriminator(self.real_pc_rotated, scope=scope, **disc_kwargs)
self.synthetic_prob, self.synthetic_logit = self.discriminator(self.gen_out_rotated, reuse=True, scope=scope, **disc_kwargs)
# Compute WGAN losses
self.loss_d = tf.reduce_mean(self.synthetic_logit) - tf.reduce_mean(self.real_logit) # comparing rotated fake and real images
self.loss_g = -tf.reduce_mean(self.synthetic_logit)
|
tensorflow.variable_scope
| 9,461 |
import tensorflow as tf
'''
self.value, self.next_loc_mean, self.loc_std, self.next_loc, self.state_out, self.state_in, self.state_init = self._build_net(self.inputs, self.prev_loc, RNN_SIZE, TRAINING, a_size) # self.goal_pos
if TRAINING:
self.target_v = tf.placeholder(tf.float32, [None], 'Vtarget')
self.advantages = tf.placeholder(shape=[None], dtype=tf.float32)
self.sampled_next_locs = tf.placeholder(tf.float32, [None,2]) # sampled action is stored here
self.policy = gaussian_pdf(self.next_loc_mean, self.loc_std, self.sampled_next_locs) # Distribution == Policy
|
tensorflow.placeholder
| 9,462 |
import tensorflow as tf
"output_bias", shape=[2], initializer=tf.zeros_initializer())
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
labels = tf.reshape(labels, [-1])
one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, log_probs)
def gather_indexes(sequence_tensor, positions):
"""Gathers the vectors at the specific positions over a minibatch."""
sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3)
batch_size = sequence_shape[0]
|
tensorflow.reduce_mean
| 9,463 |
import tensorflow as tf
self.is_training = True
def set_is_training(self, isTrain):
self.is_training = isTrain
def build(self, rgb, label_num, train_mode=None, last_layer_type = "softmax"):
"""
load variable from npy to build the Resnet or Generate a new one
:param rgb: rgb image [batch, height, width, 3] values scaled [0, 1]
:param train_mode: a bool tensor, usually a placeholder: if True, dropout will be turned on
"""
red, green, blue = tf.split(axis=3, num_or_size_splits=3, value=rgb)
assert red.get_shape().as_list()[1:] == [224, 224, 1]
assert green.get_shape().as_list()[1:] == [224, 224, 1]
assert blue.get_shape().as_list()[1:] == [224, 224, 1]
bgr = tf.concat(axis=3, values=[
blue - configs['VGG_MEAN'][0],
green - configs['VGG_MEAN'][1],
red - configs['VGG_MEAN'][2],
])
print(bgr.get_shape().as_list())
assert bgr.get_shape().as_list()[1:] == [224, 224, 3]
|
tensorflow.split
| 9,464 |
import tensorflow as tf
if v.get_shape().ndims == 2:
variables.append(v)
with tf.name_scope('weight_decay'):
if penalty_type == 'l1':
cost = tf.add_n([tf.reduce_sum(tf.abs(v)) for v in variables])
|
tensorflow.name_scope
| 9,465 |
import tensorflow as tf
i, FLAGS.eval_batch_count, time.time() - time_start, is_succ))
else:
print('The %d batch in total %d, the eps = %f (%f sec)' % (
i, FLAGS.eval_batch_count, 0.05 * k, time.time() - time_start))
#Local logits
(predict_ADV,logits_part_adv) = sess.run(
[predict_adv, tsne_logit_adv],feed_dict={adv_image:adv_img}
)
#Local entropy and confidence for nor_img
(entropy_test_nor_help,labels_nor_help,confidence_test_nor_help) = sess.run(
[entropy,tf.argmax(predict,axis=1),tf.reduce_max(predict, axis=1)],feed_dict={predict:predict_NOR}
)
# Local entropy and confidence for adv_img
(entropy_test_adv_help, labels_adv_help, confidence_test_adv_help) = sess.run(
[entropy, tf.argmax(predict, axis=1), tf.reduce_max(predict, axis=1)], feed_dict={predict: predict_ADV}
)
if FLAGS.attack_method == 'carliniL2_specific' or FLAGS.attack_method == 'carliniL2_highden':
print('Log-density-ratio in attacking function of nor/adv is %f'%np.sum(log_density_ratio))
m_tsne_logits_adv = (copy.copy(logits_part_adv)).reshape((1, 64))
m_tsne_logits_adv = np.repeat(m_tsne_logits_adv,100,axis=0)
|
tensorflow.reduce_max
| 9,466 |
import tensorflow as tf
self.v = DenseLayer(1, False, tf.nn.relu, initializers=self._initializers,
regularizers=self._regularizers, name='OutputVector')
self.score = tf.squeeze(self.v(self._cur_user * self._cur_item))
negative_output = tf.squeeze(self.v(self._cur_user * self._cur_item_negative))
tf.add_to_collection(GraphKeys.PREDICTION, self.score)
self.loss = LossLayer()(self.score, negative_output)
self._optimizer = OptimizerLayer(self.config.optimizer, clip=5.0,
params={})
|
tensorflow.add_to_collection
| 9,467 |
import tensorflow as tf
# hardware related configuration
tf.app.flags.DEFINE_integer(
'num_readers', 16,
'The number of parallel readers that read data from the dataset.')
tf.app.flags.DEFINE_integer(
'num_preprocessing_threads', 48,
'The number of threads used to create the batches.')
tf.app.flags.DEFINE_integer(
'num_cpu_threads', 0,
'The number of cpu cores used to train.')
tf.app.flags.DEFINE_float(
'gpu_memory_fraction', 1., 'GPU memory fraction to use.')
# scaffold related configuration
tf.app.flags.DEFINE_string(
'data_dir', '../PASCAL/VOC_TF/VOC0712TF/',
'The directory where the dataset input data is stored.')
|
tensorflow.app.flags.DEFINE_integer
| 9,468 |
import tensorflow as tf
verbose=True)
print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
valid_perplexity = run_epoch(session, mvalid)
print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))
test_perplexity = run_epoch(session, mtest)
print("Test Perplexity: %.3f" % test_perplexity)
if FLAGS.save_path:
print("Saving model to %s." % FLAGS.save_path)
sv.saver.save(session, FLAGS.save_path, global_step=sv.global_step)
if __name__ == "__main__":
tf.app.run()
|
tensorflow.app.run
| 9,469 |
import tensorflow as tf
if label.shape.ndims == 1:
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
else:
loss = tf.losses.softmax_cross_entropy(
label, logits, label_smoothing=label_smoothing,
reduction=tf.losses.Reduction.NONE)
loss = tf.reduce_mean(loss, name='xentropy-loss')
def prediction_incorrect(logits, label, topk=1, name='incorrect_vector'):
with tf.name_scope('prediction_incorrect'):
x = tf.logical_not(tf.nn.in_top_k(logits, label, topk))
return tf.cast(x, tf.float32, name=name)
wrong = prediction_incorrect(logits, label, 1, name='wrong-top1')
add_moving_summary(tf.reduce_mean(wrong, name='train-error-top1'))
wrong = prediction_incorrect(logits, label, 5, name='wrong-top5')
add_moving_summary(tf.reduce_mean(wrong, name='train-error-top5'))
|
tensorflow.name_scope
| 9,470 |
import tensorflow as tf
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions, weights=is_real_example)
loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
|
tensorflow.metrics.mean
| 9,471 |
import tensorflow as tf
def _get_features_dict(input_dict):
"""Extracts features dict from input dict."""
source_id = _replace_empty_string_with_random_number(
input_dict[fields.InputDataFields.source_id])
hash_from_source_id = tf.string_to_hash_bucket_fast(source_id, HASH_BINS)
features = {
fields.InputDataFields.image:
input_dict[fields.InputDataFields.image],
HASH_KEY: tf.cast(hash_from_source_id, tf.int32),
fields.InputDataFields.true_image_shape:
|
tensorflow.string_to_hash_bucket_fast
| 9,472 |
import tensorflow as tf
A = 1/(N*N*tf.sqrt(y))
B = 2.0/(N*tf.sqrt(y+0.5))
|
tensorflow.sqrt
| 9,473 |
import tensorflow as tf
],
]
self.assertAllEqual(
expected,
relative_pos_gen.make_local_relative_att_ids(
seq_len=3, local_radius=4, batch_size=2))
def test_make_local_relative_att_ids_batch_size_2_tensor(self):
dummy_batch = tf.ones([2, 5])
relative_pos_gen = feature_utils.RelativePositionGenerator(max_distance=3)
expected = [
[
[6, 6, 5, 4, 0, 1, 2, 3, 3], #
[6, 6, 5, 4, 0, 1, 2, 3, 3], #
|
tensorflow.ones
| 9,474 |
import tensorflow as tf
endpoints = ['Conv2d_0',
'Conv2d_1_depthwise', 'Conv2d_1_pointwise',
'Conv2d_2_depthwise', 'Conv2d_2_pointwise',
'Conv2d_3_depthwise', 'Conv2d_3_pointwise',
'Conv2d_4_depthwise', 'Conv2d_4_pointwise',
'Conv2d_5_depthwise', 'Conv2d_5_pointwise',
'Conv2d_6_depthwise', 'Conv2d_6_pointwise',
'Conv2d_7_depthwise', 'Conv2d_7_pointwise',
'Conv2d_8_depthwise', 'Conv2d_8_pointwise',
'Conv2d_9_depthwise', 'Conv2d_9_pointwise',
'Conv2d_10_depthwise', 'Conv2d_10_pointwise',
'Conv2d_11_depthwise', 'Conv2d_11_pointwise',
'Conv2d_12_depthwise', 'Conv2d_12_pointwise',
'Conv2d_13_depthwise', 'Conv2d_13_pointwise']
for index, endpoint in enumerate(endpoints):
with tf.Graph().as_default():
inputs = tf.random_uniform((batch_size, height, width, 3))
out_tensor, end_points = mobilenet_v1.mobilenet_v1_base(
inputs, final_endpoint=endpoint)
self.assertTrue(out_tensor.op.name.startswith(
'MobilenetV1/' + endpoint))
self.assertItemsEqual(endpoints[:index + 1], end_points.keys())
def testBuildCustomNetworkUsingConvDefs(self):
batch_size = 5
height, width = 224, 224
conv_defs = [
mobilenet_v1.Conv(kernel=[3, 3], stride=2, depth=32),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=64),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=2, depth=128),
|
tensorflow.Graph
| 9,475 |
import tensorflow as tf
"""
if args is None:
args = self._args
else:
args = deep_merge_dict(self._args, args, local_overwrite=False)
eos = tf.constant(self._multilingual_dp.meta["eos_id"], dtype=tf.int64)
int_zero = tf.zeros([], dtype=tf.int64)
dataset = ds.build(map_func=self.get_data_preprocess_fn(mode, ds.status, args),
map_output_dtypes=self.inputs_signature(mode)[0],
|
tensorflow.constant
| 9,476 |
import tensorflow as tf
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
|
tensorflow.logging.info
| 9,477 |
import tensorflow as tf
temp_val_data = {'X': np.zeros((sh[0] * 2, sh[1], sh[2] // 2, sh[3]), self.val_data['X'].dtype),
'Y': np.zeros((sh[0] * 2, sh[1], sh[2] // 2), self.val_data['Y'].dtype)}
for i in range(sh[0]):
temp_val_data['X'][i * 2, :, :, :] = self.val_data['X'][i, :, :sh[2] // 2, :]
temp_val_data['X'][i * 2 + 1, :, :, :] = self.val_data['X'][i, :, sh[2] // 2:, :]
temp_val_data['Y'][i * 2, :, :] = self.val_data['Y'][i, :, :sh[2] // 2]
temp_val_data['Y'][i * 2 + 1, :, :] = self.val_data['Y'][i, :, sh[2] // 2:]
self.val_data = temp_val_data
def init_tfdata(self, batch_size, main_dir, resize_shape, mode='train'):
self.data_session = tf.Session()
print("Creating the iterator for training data")
with tf.device('/cpu:0'):
segdl = SegDataLoader(main_dir, batch_size, (resize_shape[0], resize_shape[1]), resize_shape,
# * 2), resize_shape,
'data/cityscapes_tfdata/train.txt')
iterator = Iterator.from_structure(segdl.data_tr.output_types, segdl.data_tr.output_shapes)
next_batch = iterator.get_next()
self.init_op = iterator.make_initializer(segdl.data_tr)
self.data_session.run(self.init_op)
|
tensorflow.Session
| 9,478 |
import tensorflow as tf
def run(self):
if FLAGS.job_name == 'ps':
log_fn('Running parameter server %s' % self.task_index)
self.server.join()
return
with tf.Graph().as_default():
if FLAGS.eval:
self._eval_cnn()
else:
self._benchmark_cnn()
def _eval_cnn(self):
"""Evaluate the model from a checkpoint using validation dataset."""
(enqueue_ops, fetches) = self._build_model()
saver = tf.train.Saver(tf.global_variables())
summary_writer = tf.summary.FileWriter(FLAGS.eval_dir,
tf.get_default_graph())
target = ''
with tf.Session(target=target, config=create_config_proto()) as sess:
for i in xrange(len(enqueue_ops)):
sess.run(enqueue_ops[:(i+1)])
if FLAGS.train_dir is None:
raise ValueError('Trained model directory not specified')
global_step = load_checkpoint(saver, sess, FLAGS.train_dir)
start_time = time.time()
count_top_1 = 0.0
count_top_5 = 0.0
total_eval_count = self.num_batches * self.batch_size
|
tensorflow.global_variables
| 9,479 |
import tensorflow as tf
op_tanh = self.tanh_constant / self.op_tanh_reduce
logits = op_tanh * tf.tanh(logits)
|
tensorflow.tanh
| 9,480 |
import tensorflow as tf
from tensorflow.tools.docs import doc_controls
# pylint: enable=g-direct-tensorflow-import
from tensorflow_metadata.proto.v0 import schema_pb2
def _get_tensor_value(tensor_or_eager_tensor: tf.Tensor) -> Any:
if ops.executing_eagerly_outside_functions():
return np.asarray(tensor_or_eager_tensor)
else:
with tf.compat.v1.Session():
return tensor_or_eager_tensor.eval()
class _TransformedFeaturesDict(dict):
"""A wrapper around dict.
Overrides pop to return None instead of throwing a KeyError when invoked with
a key that is not found in the dictionary.
|
tensorflow.compat.v1.Session
| 9,481 |
import tensorflow as tf
decoded = tf.sparse.SparseTensor(indices[0], values[0], shape[0])
decoded = tf.cast(tf.sparse.to_dense(decoded), tf.int32)
|
tensorflow.sparse.to_dense
| 9,482 |
import tensorflow as tf
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
for idx, (x, m) in enumerate(zip(xs, ms)):
c = c*(1-m)
h = h*(1-m)
z = _ln(tf.matmul(x, wx), gx, bx) + _ln(tf.matmul(h, wh), gh, bh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f*c + i*u
h = o*tf.tanh(_ln(c, gc, bc))
xs[idx] = h
s = tf.concat(axis=1, values=[c, h])
return xs, s
|
tensorflow.nn.sigmoid
| 9,483 |
import tensorflow as tf
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder)
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
with tf.gfile.GFile(output_eval_file, "w") as writer:
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if FLAGS.do_predict:
|
tensorflow.gfile.GFile
| 9,484 |
import tensorflow as tf
dec_inp_dict2["1"] = [
tf.constant(0, tf.int32, shape=[2]) for _ in range(4)]
with tf.variable_scope("other"):
outputs_dict3, _ = tf.nn.seq2seq.one2many_rnn_seq2seq(
enc_inp, dec_inp_dict2, cell, 2, dec_symbols_dict,
embedding_size=2, feed_previous=tf.constant(True))
sess.run([tf.global_variables_initializer()])
tf.get_variable_scope().reuse_variables()
outputs_dict1, _ = tf.nn.seq2seq.one2many_rnn_seq2seq(
enc_inp, dec_inp_dict, cell, 2, dec_symbols_dict,
embedding_size=2, feed_previous=True)
outputs_dict2, _ = tf.nn.seq2seq.one2many_rnn_seq2seq(
enc_inp, dec_inp_dict2, cell, 2, dec_symbols_dict,
embedding_size=2, feed_previous=True)
res1 = sess.run(outputs_dict1["0"])
res2 = sess.run(outputs_dict2["0"])
|
tensorflow.nn.seq2seq.one2many_rnn_seq2seq
| 9,485 |
import tensorflow as tf
("rbf4", RBFSampler(gamma=0.5, n_components=100))
])
featurizer.fit(scaler.transform(observation_examples))
def featurize_state(state):
scaled = scaler.transform([state])
featurized = featurizer.transform(scaled)
return featurized[0]
def build_policy_net_MountainCarContinuous(input_tf):
mu = tf.layers.dense(input_tf, num_action, tf.nn.tanh, kernel_initializer=w_init, name='mu') # estimated action value
sigma = tf.layers.dense(input_tf, num_action, tf.nn.softplus, kernel_initializer=w_init, name='sigma') # estimated variance
return mu,sigma;
class PolicyEstimator_MountainCarContinuous():
def __init__(self, entropy_beta=0.1, learning_rate=0.001, par_idx=0,scope="policy_estimator"):
w_init = tf.random_normal_initializer(0.,.1);
with tf.variable_scope(scope+"_"+str(par_idx)):
# state, target and action
self.state = tf.placeholder(tf.float32, [None,400], name="state")
self.target = tf.placeholder(tf.float32,[None,1], name="target")
|
tensorflow.layers.dense
| 9,486 |
import tensorflow as tf
w * ratio,
h / ratio
])
priors.append([
x_center,
y_center,
w / ratio,
h * ratio
])
priors = np.array(priors, dtype=np.float32)
if clamp:
np.clip(priors, 0.0, 1.0, out=priors)
return tf.convert_to_tensor(priors)
@tf.function
def assign_priors(gt_boxes, gt_labels, corner_form_priors,
iou_threshold=0.45):
"""Assign ground truth boxes and targets to priors.
Args:
gt_boxes (num_targets, 4): ground truth boxes.
gt_labels (num_targets): labels of targets.
priors (num_priors, 4): corner form priors
Returns:
boxes (num_priors, 4): real values for priors.
labels (num_priors): labels for priors.
|
tensorflow.convert_to_tensor
| 9,487 |
import tensorflow as tf
import math as m
from rec_errors import euclidean_norm_squared
def silverman_rule_of_thumb(N: int):
return tf.pow(4/(3*N), 0.4)
def cw_1d(X, y=None):
|
tensorflow.pow
| 9,488 |
import tensorflow as tf
return update_scale_expr
# Functionality to update the threshold for parameter space noise.
update_param_noise_threshold_expr = param_noise_threshold.assign(tf.cond(update_param_noise_threshold_ph >= 0,
lambda: update_param_noise_threshold_ph, lambda: param_noise_threshold))
# Put everything together.
deterministic_actions = tf.argmax(q_values_perturbed, axis=1)
batch_size = tf.shape(observations_ph.get())[0]
random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)
chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)
update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))
updates = [
update_eps_expr,
tf.cond(reset_ph, lambda: perturb_vars(original_scope="q_func", perturbed_scope="perturbed_q_func"), lambda: tf.group(*[])),
tf.cond(update_param_noise_scale_ph, lambda: update_scale(), lambda: tf.Variable(0., trainable=False)),
update_param_noise_threshold_expr,
]
_act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph, reset_ph, update_param_noise_threshold_ph, update_param_noise_scale_ph],
outputs=output_actions,
givens={update_eps_ph: -1.0, stochastic_ph: True, reset_ph: False, update_param_noise_threshold_ph: False, update_param_noise_scale_ph: False},
updates=updates)
def act(ob, reset=False, update_param_noise_threshold=False, update_param_noise_scale=False, stochastic=True, update_eps=-1):
return _act(ob, stochastic, update_eps, reset, update_param_noise_threshold, update_param_noise_scale)
return act
|
tensorflow.cond
| 9,489 |
import tensorflow as tf
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name", None,
|
tensorflow.flags.DEFINE_string
| 9,490 |
import tensorflow as tf
results, imgs = sess.run(next_element)
print('names: {}'.format(results['member/name']))
print('ages: {}'.format(results['member/age']))
print('heights: {}'.format(results['member/height']))
print('prefer_prods: {}'.format(results['member/prefer_prods']))
for img in imgs:
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
cv2.imshow('img', img)
cv2.waitKey(-1)
def parse_function(example_proto):
features = {'member/name': tf.io.FixedLenFeature([], tf.string),
'member/encoded': tf.io.FixedLenFeature([], tf.string),
'member/age': tf.io.FixedLenFeature([], tf.int64),
'member/height': tf.io.VarLenFeature(tf.float32),
'member/prefer_prods': tf.io.VarLenFeature(tf.int64)}
features = tf.io.parse_single_example(example_proto, features)
images = tf.image.decode_png(features['member/encoded'], channels=3)
# 注意png原本有4個channel,但執行到下面的處理會出錯,所以前一行先降成3個channel。
images = tf.image.random_brightness(images, 0.1)
images = tf.image.random_saturation(images, 0.7, 1.3)
images = tf.image.random_contrast(images, 0.6, 1.5)
images = tf.image.random_flip_left_right(images)
return features, images
|
tensorflow.io.FixedLenFeature
| 9,491 |
import tensorflow as tf
# TPUEstimator
estimator = tf.contrib.tpu.TPUEstimator(
model_fn=model_fn,
config=config,
params=params,
train_batch_size=args.train_batch_size,
eval_batch_size=32,
export_to_tpu=False)
else:
config = tf.estimator.RunConfig(
model_dir=args.model_dir,
save_checkpoints_steps=10,
save_summary_steps=10)
estimator = tf.estimator.Estimator(
model_fn,
config=config,
params=params)
|
tensorflow.estimator.RunConfig
| 9,492 |
import tensorflow as tf
initializer=tf.random_normal_initializer())
alpha_logstd = tf.get_variable('alpha_logstd_layer'+str(h),
shape=[1, 1, n_basis, n_out],
initializer=tf.random_normal_initializer())
alpha_std = tf.exp(alpha_logstd)
# Compute epsilon from {n_samples} standard Gaussian
# epsilon = tf.random_normal([n_samples, 1, n_out*2, n_out])
epsilon = tf.random_uniform([n_samples, 1, n_basis, n_out])
hyp_params = tf.get_variable('hyp_params_layer'+str(h),
shape=[2],
initializer=tf.random_normal_initializer())
l1, l2 = tf.nn.sigmoid(hyp_params[0]), tf.exp(hyp_params[1])
epsilon = tf.sinh(epsilon*l2)/tf.cosh(epsilon*l2)**l1/l2
# Compute A_{h+1}
A = tf.tile(alpha_mean+epsilon*alpha_std, [1, tf.shape(X)[0], 1, 1])
# Compute z_{h}A_{h+1}
Z1 = tf.matmul(Z, A[:,:,:n_basis//2,:])/tf.sqrt(n_basis*.5)
Z2 = tf.matmul(Z, A[:,:,n_basis//2:,:])/tf.sqrt(n_basis*.5)
# Compute u_{h+1} and v_{h+1}
U, V = tf.cos(Z1)+tf.cos(Z2), tf.sin(Z1)+tf.sin(Z2)
Z = tf.concat([U, V], 3)/tf.sqrt(n_out*1.)
KL += tf.reduce_mean(alpha_std**2+alpha_mean**2-2*alpha_logstd-1)/2.
# Output layer
else:
|
tensorflow.sinh
| 9,493 |
import tensorflow as tf
im = axs[fig_obj_count, 1].matshow(inter.numpy())
plt.colorbar(im, ax=axs[fig_obj_count, 1])
values = sdf_values
inter = tf.reshape(values, [self.resolution,
self.resolution,
self.resolution])
inter = tf.transpose(tf.reduce_max(inter, axis=a))
im = axs[fig_obj_count, 2].matshow(inter.numpy())
plt.colorbar(im, ax=axs[fig_obj_count, 2])
fig_obj_count += 1
intersection = tf.reduce_sum(tf.math.sign(tf.nn.relu(sdf_values - 1)))
union = tf.reduce_sum(tf.math.sign(sdf_values))
iou = intersection / union
self.collisions.append(num_collisions)
self.intersections.append(intersection)
self.ious.append(iou)
return num_collisions, intersection, iou
def evaluate(self):
"""Evaluate."""
if self.slave:
data = {'collisions': self.collisions,
'intersections': self.intersections,
'ious': self.ious}
with gfile.Open(self.path, 'wb') as file:
|
tensorflow.math.sign
| 9,494 |
import tensorflow as tf
@registry.register_model
class DenseBitwiseCategoricalPolicy(PolicyBase):
"""Dense network with bitwise input and categorical output."""
def body(self, features):
observations = features["inputs"]
flat_x = tf.layers.flatten(observations)
with tf.variable_scope("dense_bitwise"):
flat_x = discretization.int_to_bit_embed(flat_x, 8, 32)
x = tf.layers.dense(flat_x, 256, activation=tf.nn.relu)
x = tf.layers.dense(flat_x, 128, activation=tf.nn.relu)
logits = tf.layers.dense(x, self.hparams.problem.num_actions)
value = tf.layers.dense(x, 1)[..., 0]
return {"target_policy": logits, "target_value": value}
@registry.register_model
class RandomPolicy(PolicyBase):
|
tensorflow.layers.dense
| 9,495 |
import tensorflow as tf
for v in tf.trainable_variables():
if v.get_shape().ndims == 2:
variables.append(v)
with tf.name_scope('weight_decay'):
if penalty_type == 'l1':
cost = tf.add_n([tf.reduce_sum(tf.abs(v)) for v in variables])
elif penalty_type == 'l2':
cost = tf.add_n([tf.nn.l2_loss(v) for v in variables])
else:
raise NotImplementedError('Unsupported penalty_type %s' % penalty_type)
cost *= penalty
|
tensorflow.abs
| 9,496 |
import tensorflow as tf
Args:
nodes: A `Tensor` of `int64`.
edge_types: A 1-D `Tensor` of int32. Specify edge types to filter outgoing
edges.
Return:
A tuple of `SparseTensor` (neibors, weights).
neighbors: A `SparseTensor` of `int64`.
weights: A `SparseTensor` of `float`.
types: A `SparseTensor` of `int32`
"""
sp_returns = base._LIB_OP.get_sorted_full_neighbor(nodes, edge_types)
return tf.SparseTensor(*sp_returns[:3]), tf.SparseTensor(*sp_returns[3:6]), \
tf.SparseTensor(*sp_returns[6:])
def sample_fanout(nodes, edge_types, counts, default_node=-1):
"""
Sample multi-hop neighbors of nodes according to weight in graph.
Args:
nodes: A 1-D `Tensor` of `int64`.
edge_types: A list of 1-D `Tensor` of int32. Specify edge types to filter
outgoing edges in each hop.
|
tensorflow.SparseTensor
| 9,497 |
import tensorflow as tf
if tf_var.name in params else values[i]
for (i, tf_var) in enumerate(tf_vars)
}
self._sess.run(m.vars_assign_op, feed_dict=var_feeddict)
def _make_placeholders(self):
w = self._train_params['image_size']
h = self._train_params['image_size']
in_ch = 3 # Num channels of input images
train_images_ph = tf.placeholder(tf.int32, name='train_images_ph', shape=(None, w, h, in_ch)) # Train images
pred_images_ph = tf.placeholder(tf.int32, name='pred_images_ph', shape=(None, w, h, in_ch)) # Predict images
train_classes_ph = tf.placeholder(tf.int32, name='train_classes_ph', shape=(None,)) # Train classes
pred_classes_ph = tf.placeholder(tf.int32, name='pred_classes_ph', shape=(None,)) # Predict classes
normal_arch_ph = tf.placeholder(tf.int32, name='normal_arch_ph', shape=(CELL_NUM_BLOCKS, 4))
reduction_arch_ph = tf.placeholder(tf.int32, name='reduction_arch_ph', shape=(CELL_NUM_BLOCKS, 4))
return _ModelPlaceholder(train_images_ph, train_classes_ph, pred_images_ph, pred_classes_ph,
normal_arch_ph, reduction_arch_ph)
def _forward(self, X, step, normal_arch, reduction_arch, is_train=False, **knobs):
K = self._train_params['K'] # No. of classes
in_ch = 3 # Num channels of input images
w = self._train_params['image_size'] # Initial input width
h = self._train_params['image_size'] # Initial input height
|
tensorflow.placeholder
| 9,498 |
from tensorflow.python.framework import ops
return cost
@ops.RegisterShape("SparseSoftmaxCrossEntropyWithLogits")
def _SparseSoftmaxCrossEntropyWithLogitsShape(op):
"""Shape function for SparseSoftmaxCrossEntropyWithLogits op."""
logits_shape = op.inputs[0].get_shape()
input_shape = logits_shape.with_rank(2)
batch_size = input_shape[0]
# labels_shape
op.inputs[1].get_shape().merge_with(tensor_shape.vector(batch_size))
return [tensor_shape.vector(batch_size.value), input_shape]
@ops.RegisterShape("SoftmaxCrossEntropyWithLogits")
def _SoftmaxCrossEntropyWithLogitsShape(op):
"""Shape function for SoftmaxCrossEntropyWithLogits op."""
logits_shape = op.inputs[0].get_shape()
labels_shape = op.inputs[1].get_shape()
input_shape = logits_shape.merge_with(labels_shape).with_rank(2)
batch_size = input_shape[0]
return [tensor_shape.vector(batch_size.value), input_shape]
def avg_pool(value, ksize, strides, padding, data_format="NHWC", name=None):
"""Performs the average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
|
tensorflow.python.framework.ops.RegisterShape
| 9,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.