seed
stringlengths 25
2.89k
| seed_api
stringlengths 14
102
| index
int64 0
14.8k
|
---|---|---|
import tensorflow as tf
self.global_step = tf.train.get_or_create_global_step()
self.saver = tf.train.Saver()
# Loss functions and training
epsilon_decay = tf.train.polynomial_decay(self.EPSILON, self.global_step, self.EPS_LEN, 0.1, power=0)
ratio = tf.maximum(pi.prob(batch['actions']), 1e-6) / tf.maximum(pi_old.prob(batch['actions']), 1e-6)
ratio = tf.clip_by_value(ratio, 0, 10)
surr1 = batch['advantage'] * ratio
surr2 = batch['advantage'] * tf.clip_by_value(ratio, 1 - epsilon_decay, 1 + epsilon_decay)
loss_pg = - 2.0 * tf.reduce_mean(tf.minimum(surr1, surr2))
loss_vf = 0.5 * tf.reduce_mean(tf.square(batch['rewards'] - self.vf))
loss_entropy = - 0.01 * tf.reduce_mean(pi.entropy())
loss = loss_pg + loss_vf + loss_entropy
opt = tf.train.AdamOptimizer(self.LR)
self.train_op = opt.minimize(loss, global_step=self.global_step, var_list=pi_params + vf_params)
self.pi_new_params = [oldp.assign(p) for p, oldp in zip(pi_params, pi_old_params)]
self.vf_new_params = [oldp.assign(p) for p, oldp in zip(vf_params, vf_old_params)]
self.sess.run(tf.global_variables_initializer())
# Tensorboard
if summary_dir is not None:
self.writer = tf.summary.FileWriter(summary_dir)
tf.summary.scalar('Loss/Policy', loss_pg)
tf.summary.scalar('Loss/Value', loss_vf)
| tensorflow.train.AdamOptimizer | 13,700 |
from tensorflow.python.framework import ops
def _apply_sparse(self, grad, var):
raise NotImplementedError("Sparse gradient updates are not supported.")
class RegularizeGradientDescentOptimizer(optimizer.Optimizer):
def __init__(self, learning_rate=0.001, lambd=0.5, use_locking=False, name="RGD"):
super(RegularizeGradientDescentOptimizer, self).__init__(use_locking,
name)
self._lr = learning_rate
self._lambda = lambd
# Tensor versions of the constructor arguments, created in _prepare().
self._lr_t = None
self._lambda_t = None
def _prepare(self):
self._lr_t = ops.convert_to_tensor(self._lr, name="learning_rate")
self._lambda_t = ops.convert_to_tensor(self._lambda, name="lambda")
def _apply_dense(self, grad, var):
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
lambda_t = math_ops.cast(self._lambda_t, var.dtype.base_dtype)
g_t = grad
var_update = state_ops.assign_sub(var,
lr_t * (g_t - lambda_t * var) )
return control_flow_ops.group(*[var_update])
def _apply_sparse(self, grad, var):
raise NotImplementedError("Sparse gradient updates are not supported.")
| tensorflow.python.framework.ops.convert_to_tensor | 13,701 |
import tensorflow as tf
is_class_agnostic=False)
nms_masks_expected2 = tf.constant(1.0, shape=[0, 2, 2], dtype=tf.float32)
nms_scores_expected2 = tf.constant([], dtype=tf.float32)
nms_classes_expected2 = tf.constant([], dtype=tf.int32)
| tensorflow.constant | 13,702 |
import tensorflow as tf
if __name__ == '__main__':
tf.test.main()
| tensorflow.test.main | 13,703 |
import tensorflow as tf
init = tf.truncated_normal_initializer(mean=0.0, stddev=0.02)
filters = tf.get_variable('zero_conv_weights' + id, initializer=init, shape=[size[0], size[1], in_ch, channels])
filters = filters - tf.reduce_mean(filters, axis=[0, 1, 2], keepdims=True)
if padding == "PARTIAL":
with tf.variable_scope('mask'):
_, h, w, _ = input.get_shape().as_list()
slide_window = size[0] * size[1]
mask = tf.ones(shape=[1, h, w, 1])
update_mask = tf.layers.conv2d(mask, filters=1, name='mask' + id,
kernel_size=size, kernel_initializer=tf.constant_initializer(1.0),
strides=stride, padding="SAME", use_bias=False, trainable=False,
dilation_rate=(dilation, dilation))
mask_ratio = slide_window / (update_mask + 1e-8)
update_mask = tf.clip_by_value(update_mask, 0.0, 1.0)
mask_ratio = mask_ratio * update_mask
| tensorflow.ones | 13,704 |
import tensorflow as tf
self.conv4_3 = self.conv_layer(self.conv4_2, "conv4_3")
self.conv4_4 = self.conv_layer(self.conv4_3, "conv4_4")
self.pool4 = self.max_pool(self.conv4_4, 'pool4')
self.conv5_1 = self.conv_layer(self.pool4, "conv5_1")
self.conv5_2 = self.conv_layer(self.conv5_1, "conv5_2")
self.conv5_3 = self.conv_layer(self.conv5_2, "conv5_3")
self.conv5_4 = self.conv_layer(self.conv5_3, "conv5_4")
self.pool5 = self.max_pool(self.conv5_4, 'pool5')
self.fc6 = self.fc_layer(self.pool5, "fc6")
assert self.fc6.get_shape().as_list()[1:] == [4096]
self.relu6 = tf.nn.relu(self.fc6)
self.fc7 = self.fc_layer(self.relu6, "fc7")
self.relu7 = tf.nn.relu(self.fc7)
self.fc8 = self.fc_layer(self.relu7, "fc8")
log("finished building VGG19 in %ds" % (time.time() - start_time))
return self.fc8
def avg_pool(self, bottom, name):
| tensorflow.nn.relu | 13,705 |
import tensorflow as tf
with tf.control_dependencies(control_inputs=(assignment,)):
assignment = tf.assign(
ref=self.episode_indices[-1],
value=tf.where(self.memory_index + num_instances > self.capacity,
self.episode_indices[self.episode_count - 1], self.capacity - 1)
)
with tf.control_dependencies(control_inputs=(assignment,)):
assignment = tf.assign(ref=self.memory_index, value=((self.memory_index + num_instances) % self.capacity))
with tf.control_dependencies(control_inputs=(assignment,)):
return tf.no_op()
def tf_retrieve_indices(self, indices):
"""
Fetches experiences for given indices.
Args:
indices: Index tensor
Returns: Batch of experiences
"""
| tensorflow.no_op | 13,706 |
import tensorflow as tf
with tf.variable_scope('a_grad'):
self.a_grads = tf.gradients(self.q, a)[0] # tensor of gradients of each sample (None, a_dim)
| tensorflow.gradients | 13,707 |
import tensorflow as tf
Returns:
a `float` `scalar`, KL divergence.
"""
if num_classes == 2:
q = tf.nn.sigmoid(q_logits)
p = tf.nn.sigmoid(p_logits)
kl = (-tf.nn.sigmoid_cross_entropy_with_logits(logits=q_logits, labels=q) +
f.nn.sigmoid_cross_entropy_with_logits(logits=p_logits, labels=q))
else:
q = tf.nn.softmax(q_logits)
p = tf.nn.softmax(p_logits)
kl = tf.reduce_sum(q * (tf.log(q) - tf.log(p)), 1)
num_labels = tf.reduce_sum(weights)
num_labels = tf.where(tf.equal(num_labels, 0.), 1., num_labels)
kl.get_shape().assert_has_rank(2)
weights.get_shape().assert_has_rank(1)
loss = tf.identity(tf.reduce_sum(tf.expand_dims(weights, -1) * kl) / num_labels, name='kl')
return loss
def cross_entropy_sequence_loss(logits, targets, sequence_length):
"""Calculates the per-example cross-entropy loss for a sequence of logits and
masks out all losses passed the sequence length.
Args:
logits: Logits of shape `[T, B, vocab_size]`
| tensorflow.reduce_sum | 13,708 |
import tensorflow as tf
# 第二次[1,2,none,none]
to_caffe = tf.transpose(bottom, [0, 3, 1, 2])
# then force it to have channel 2
#[1,2,none.none],将9个anchor的前景得分和背景得分分开
# 第二次[1,18,none,none]
reshaped = tf.reshape(to_caffe, tf.concat(axis=0, values=[[self._batch_size], [num_dim, -1], [input_shape[2]]]))
# then swap the channel back
# [1,none,none,2], 第一个none应该为(行*9)
# 第二次[1,none,none,18]
to_tf = tf.transpose(reshaped, [0, 2, 3, 1])
return to_tf
def _softmax_layer(self, bottom, name):
if name == 'rpn_cls_prob_reshape':
input_shape = tf.shape(bottom)
# tf.reshape()中-1的应用,-1表示不知道该填什么数字合适的情况下,可以选择,由python通过原数组和其他的值推测出来
# 每一行是1个anchor的前景、背景得分,先显示所有点产生的第一种anchor,然后是所有点产生的第二种anchor,........
bottom_reshaped = tf.reshape(bottom, [-1, input_shape[-1]])
reshaped_score = tf.nn.softmax(bottom_reshaped, name=name)
return tf.reshape(reshaped_score, input_shape) # [1,none,none,2]
return tf.nn.softmax(bottom, name=name)
def _proposal_top_layer(self, rpn_cls_prob, rpn_bbox_pred, name):
with tf.variable_scope(name):
rois, rpn_scores = tf.py_func(proposal_top_layer,
[rpn_cls_prob, rpn_bbox_pred, self._im_info,
self._feat_stride, self._anchors, self._num_anchors],
| tensorflow.shape | 13,709 |
import tensorflow as tf
features = tf.io.parse_single_example(example_proto, features)
images = tf.image.decode_png(features['member/encoded'], channels=3)
# 注意png原本有4個channel,但執行到下面的處理會出錯,所以前一行先降成3個channel。
images = tf.image.random_brightness(images, 0.1)
images = tf.image.random_saturation(images, 0.7, 1.3)
images = tf.image.random_contrast(images, 0.6, 1.5)
| tensorflow.image.random_brightness | 13,710 |
import tensorflow as tf
def build_batch_stats():
"""Builds the batch statistics calculation ops."""
# We use the moving mean as an estimate of the mean in order to perform
# a more numerically stable calculation of the batch mean.
# Copy for better stability.
shift = tf.add(self._moving_mean, 0)
counts, shifted_sum_x, shifted_sum_x2, _ = tf.nn.sufficient_statistics(
input_batch,
reduction_indices,
keep_dims=True,
shift=shift,
name="batch_norm_ss")
mean, variance = tf.nn.normalize_moments(counts,
shifted_sum_x,
shifted_sum_x2,
shift,
name="normalize_moments")
return mean, variance
def build_moving_stats():
return (
tf.identity(self._moving_mean),
tf.identity(self._moving_variance),
)
mean, variance = utils.smart_cond(
| tensorflow.nn.normalize_moments | 13,711 |
import tensorflow as tf
self.b_out_train = params.get('b_out_train', True)
self.init_state_train = params.get('init_state_train', True)
# Tensorflow initializations
self.x = tf.placeholder("float", [N_batch, N_steps, N_in])
self.y = tf.placeholder("float", [N_batch, N_steps, N_out])
self.output_mask = tf.placeholder("float", [N_batch, N_steps, N_out])
# trainable variables
with tf.variable_scope("model"):
| tensorflow.placeholder | 13,712 |
import tensorflow as tf
rate = tf.train.exponential_decay(starter_learning, global_step, 500, 0.9) #exponential learning rate decay
#rate = starter_learning
tvars = tf.trainable_variables() #list of trainable variables
Npar= flatten(tvars).get_shape()[1] #total number of paramters in the network
print('there are:', Npar,'parameters in the network')
optimizer = tf.train.AdamOptimizer(learning_rate = rate) #Initialize Adam optimizer
grads_var = optimizer.compute_gradients(cost, tvars ) #Get gradients layer by layer. Note that this function returns the pair (grads, var)
grads = [grads_var[i][0] for i in range(len(grads_var))] #extract the gradients
min = optimizer.apply_gradients(grads_and_vars= grads_var, global_step= global_step) #Apply the gradients to look for critical points
gradients_and_par = [] #store gradients and training paramters for different epochs
| tensorflow.train.AdamOptimizer | 13,713 |
import tensorflow as tf
strides = [1, stride, stride, 1]
bshape = [1, 1, 1, nf]
elif data_format == 'NCHW':
channel_ax = 1
strides = [1, 1, stride, stride]
bshape = [1, nf, 1, 1]
else:
raise NotImplementedError
bias_var_shape = [nf] if one_dim_bias else [1, nf, 1, 1]
nin = x.get_shape()[channel_ax].value
wshape = [rf, rf, nin, nf]
with tf.variable_scope(scope):
w = tf.get_variable("w", wshape, initializer=ortho_init(init_scale))
b = tf.get_variable("b", bias_var_shape, initializer=tf.constant_initializer(0.0))
if not one_dim_bias and data_format == 'NHWC':
b = tf.reshape(b, bshape)
return tf.nn.conv2d(x, w, strides=strides, padding=pad, data_format=data_format) + b
def fc(x, scope, nh, *, init_scale=1.0, init_bias=0.0):
with tf.variable_scope(scope):
nin = x.get_shape()[1].value
w = tf.get_variable("w", [nin, nh], initializer=ortho_init(init_scale))
print("w is "+str(w))
b = tf.get_variable("b", [nh], initializer=tf.constant_initializer(init_bias))
return tf.matmul(x, w)+b
| tensorflow.constant_initializer | 13,714 |
import tensorflow as tf
strides = [1, stride, stride, 1]
bshape = [1, 1, 1, nf]
elif data_format == 'NCHW':
channel_ax = 1
strides = [1, 1, stride, stride]
bshape = [1, nf, 1, 1]
else:
raise NotImplementedError
bias_var_shape = [nf] if one_dim_bias else [1, nf, 1, 1]
nin = x.get_shape()[channel_ax].value
wshape = [rf, rf, nin, nf]
with tf.variable_scope(scope):
w = tf.get_variable("w", wshape, initializer=ortho_init(init_scale))
b = tf.get_variable("b", bias_var_shape, initializer=tf.constant_initializer(0.0))
if not one_dim_bias and data_format == 'NHWC':
b = tf.reshape(b, bshape)
return tf.nn.conv2d(x, w, strides=strides, padding=pad, data_format=data_format) + b
def fc(x, scope, nh, *, init_scale=1.0, init_bias=0.0):
with tf.variable_scope(scope):
nin = x.get_shape()[1].value
w = tf.get_variable("w", [nin, nh], initializer=ortho_init(init_scale))
b = tf.get_variable("b", [nh], initializer=tf.constant_initializer(init_bias))
return tf.matmul(x, w)+b
def batch_to_seq(h, nbatch, nsteps, flat=False):
if flat:
h = tf.reshape(h, [nbatch, nsteps])
else:
h = tf.reshape(h, [nbatch, nsteps, -1])
| tensorflow.reshape | 13,715 |
import tensorflow as tf
# Get the paths for the corresponding images
paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs)
image_paths_placeholder = tf.placeholder(tf.string, shape=(None,1), name='image_paths')
labels_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='labels')
batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
| tensorflow.placeholder | 13,716 |
import tensorflow as tf
xs_ops = _to_ops(xs)
fwd_ops = [op for op in fwd_ops if not op in xs_ops]
fwd_ops = [op for op in fwd_ops if not '/assign' in op.name]
fwd_ops = [op for op in fwd_ops if not '/Assign' in op.name]
fwd_ops = [op for op in fwd_ops if not '/read' in op.name]
ts_all = ge.filter_ts(fwd_ops, True) # get the tensors
ts_all = [t for t in ts_all if '/read' not in t.name]
ts_all = set(ts_all) - set(xs) - set(ys)
# construct list of tensors to checkpoint during forward pass, if not
# given as input
if type(checkpoints) is not list:
if checkpoints == 'collection':
checkpoints = tf.get_collection('checkpoints')
elif checkpoints == 'speed':
# checkpoint all expensive ops to maximize running speed
checkpoints = ge.filter_ts_from_regex(fwd_ops, 'conv2d|Conv|MatMul')
elif checkpoints == 'memory':
# remove very small tensors and some weird ops
def fixdims(t): # tf.Dimension values are not compatible with int, convert manually
try:
return [int(e if e.value is not None else 64) for e in t]
except:
| tensorflow.get_collection | 13,717 |
import tensorflow as tf
# https://en.wikipedia.org/wiki/Matthews_correlation_coefficient
tp, tp_op = tf.metrics.true_positives(
predictions, label_ids, weights=is_real_example)
tn, tn_op = tf.metrics.true_negatives(
predictions, label_ids, weights=is_real_example)
fp, fp_op = tf.metrics.false_positives(
predictions, label_ids, weights=is_real_example)
fn, fn_op = tf.metrics.false_negatives(
predictions, label_ids, weights=is_real_example)
| tensorflow.metrics.false_positives | 13,718 |
import tensorflow as tf
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, logits, probabilities)
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
| tensorflow.reduce_sum | 13,719 |
import tensorflow as tf
log_scale: a float, used to multiply the clipped log loss, e.g: 0.5
log_cutoff:a float, minimum log loss value; e.g. 0.50
name: Optional scope/name for op_scope.
Returns:
A tensor with the clipped kappa log loss.
"""
with tf.name_scope(name):
num_classes = labels.get_shape()[-1].value
labels = tf.cast(labels, predictions.dtype)
if label_smoothing > 0:
smooth_positives = 1.0 - label_smoothing
smooth_negatives = label_smoothing / num_classes
labels = labels * smooth_positives + smooth_negatives
log_loss_res = log_loss_tf(predictions, labels)
kappa_loss_res = kappa_loss(
predictions, labels, y_pow=y_pow, num_ratings=num_classes, batch_size=batch_size)
return kappa_loss_res + log_scale * tf.clip_by_value(log_loss_res, log_cutoff, 10**3)
| tensorflow.cast | 13,720 |
import tensorflow as tf
"""Check that we're using a compatible TF version.
Raises a warning if either Tensorflow version is less that 2.0 or TF 2.x is
not enabled.
If TF 2.x is enabled, but version is < TF 2.3, raises a warning to indicate
that resources may not be initialized.
"""
major, minor, _ = tf.version.VERSION.split('.')
if not (int(major) >= 2 and tf2.enabled()):
tf.compat.v1.logging.warning(
'Tensorflow version (%s) found. TransformFeaturesLayer is supported '
'only for TF 2.x with TF 2.x behaviors enabled and may not work as '
'intended.', tf.version.VERSION)
elif int(major) == 2 and int(minor) < 3:
# TODO(varshaan): Log a more specific warning.
tf.compat.v1.logging.warning(
'Tensorflow version (%s) found. TransformFeaturesLayer may not work '
'as intended if the SavedModel contains an initialization op.',
tf.version.VERSION)
| tensorflow.compat.v1.logging.warning | 13,721 |
import tensorflow as tf
def discriminator_fn(data, generator_inputs):
outputs = tf.layers.dense(data, 1)
return outputs
def model_fn(features, labels, mode, params):
# build model
global_step = tf.train.get_global_step()
generator_inputs = features
real_data = labels
gan_model = tf.contrib.gan.gan_model(generator_fn, discriminator_fn, real_data, generator_inputs)
predictions = gan_model.generated_data
| tensorflow.train.get_global_step | 13,722 |
import tensorflow as tf
return logits, prediction
def general_conv2d(self, input_data, filters = 64, kernel_size = 7, stride = 1, stddev = 0.02, activation_function = "relu", padding = "VALID", do_norm=True, relu_factor = 0, name="conv2d"):
with tf.variable_scope(name):
conv = tf.layers.conv2d(input_data, filters, kernel_size, stride, padding, activation=None)
if do_norm:
conv = tf.layers.batch_normalization(conv, momentum=0.9)
if activation_function == "relu":
conv = tf.nn.relu(conv, name = 'relu')
if activation_function == "leakyrelu":
conv = tf.nn.leaky_relu(conv, alpha=relu_factor)
if activation_function == "elu":
conv = tf.nn.elu(conv, name = 'elu')
return conv
def general_deconv2d(self, input_data, filters = 64, kernel_size = 7, stride = 1, stddev = 0.02, activation_function = "relu", padding = "VALID", do_norm = True, relu_factor = 0, name="deconv2d"):
with tf.variable_scope(name):
deconv = tf.layers.conv2d_transpose(input_data, filters, kernel_size, (stride, stride), padding, activation = None)
if do_norm:
deconv = tf.layers.batch_normalization(deconv, momentum = 0.9)
| tensorflow.nn.leaky_relu | 13,723 |
import tensorflow as tf
def update(state, input_, context=None, symbol=None):
if context is not None and decoder.rnn_feed_attn:
input_ = tf.concat([input_, context], axis=1)
input_size = input_.get_shape()[1].value
initializer = CellInitializer(decoder.cell_size) if decoder.orthogonal_init else None
with tf.variable_scope(tf.get_variable_scope(), initializer=initializer):
try:
output, new_state = get_cell(input_size)(input_, state)
except ValueError: # auto_reuse doesn't work with LSTM cells
output, new_state = get_cell(input_size, reuse=True)(input_, state)
if decoder.skip_update and decoder.pred_edits and symbol is not None:
| tensorflow.get_variable_scope | 13,724 |
import tensorflow as tf
Returns:
resized_image: A 3-D tensor containing the resized image.
"""
smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)
shape = tf.shape(image)
height = shape[0]
| tensorflow.convert_to_tensor | 13,725 |
import tensorflow as tf
predict_drop_remainder = True if FLAGS.use_tpu else False
predict_input_fn = file_based_input_fn_builder(
input_file=predict_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=predict_drop_remainder)
result = estimator.predict(input_fn=predict_input_fn)
output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
with tf.gfile.GFile(output_predict_file, "w") as writer:
num_written_lines = 0
tf.logging.info("***** Predict results *****")
for (i, prediction) in enumerate(result):
if i >= num_actual_predict_examples:
break
probabilities = prediction["probabilities"]
texta=predict_examples[i].text_a
texta=tokenizer.tokenize(texta)
phrase=[texta[j] if probabilities[j]>=0.5 else ' ' for j in range(min(len(texta),128))]
phrase=''.join(phrase).strip()
# output_line = "\t".join(
# str(class_probability)
# for class_probability in probabilities) + "\n"
writer.write(phrase+'\n')
num_written_lines += 1
| tensorflow.logging.info | 13,726 |
import tensorflow as tf
batch_size = 128
sequence_length = 15
d_embed = 200
d_out = 4
embed = tf.random_normal((vocab_size, d_embed))
config = _test_spinn_config(d_embed, d_out)
model = spinn.SNLIClassifier(config, embed)
trainer = spinn.SNLIClassifierTrainer(model, config.lr)
| tensorflow.random_normal | 13,727 |
import tensorflow as tf
# Trainable parameters
facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer
querry_size = query.get_shape().as_list()[-1]
query = tf.layers.dense(query, facts_size, activation=None, name='f1' + stag)
query = prelu(query)
queries = tf.tile(query, [1, tf.shape(facts)[1]])
queries = tf.reshape(queries, tf.shape(facts))
din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)
d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag)
d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag)
d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag)
d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]])
scores = d_layer_3_all
# Mask
| tensorflow.concat | 13,728 |
import tensorflow as tf
epsilon = tf.constant(value=1e-7)
labels = tf.to_float(labels)
# labels = tf.to_float(tf.reshape(labels, (-1, num_classes)))
softmax = tf.nn.softmax(logits) + epsilon
if head is not None:
cross_entropy = -tf.reduce_sum(tf.mul(labels * tf.log(softmax), head), axis=[1])
| tensorflow.nn.softmax | 13,729 |
import tensorflow as tf
return {self.ENCODED_VALUES_KEY: x + addend}
def decode(self,
encoded_tensors,
decode_params,
num_summands=None,
shape=None):
"""See base class."""
del num_summands # Unused.
del shape # Unused.
x = encoded_tensors[self.ENCODED_VALUES_KEY]
addend = dummy_rng_source(decode_params[self.SEED_PARAM_KEY],
x.shape.num_elements())
addend = tf.reshape(addend, x.shape)
return x - addend
@encoding_stage.tf_style_adaptive_encoding_stage
class PlusOneOverNEncodingStage(encoding_stage.AdaptiveEncodingStageInterface):
"""[Example] adaptive encoding stage, adding 1/N in N-th iteration.
This is an example implementation of an `AdaptiveEncodingStageInterface` that
modifies state, which controls the creation of params. This is also a simple
example of how an `EncodingStageInterface` can be wrapped as an
`AdaptiveEncodingStageInterface`, without modifying the wrapped encode and
decode methods.
| tensorflow.reshape | 13,730 |
from tensorflow.python.ops import gen_math_ops
name: A name for the operation (optional).
Returns:
A `Tensor` the same size and type as `x` with absolute values.
"""
with ops.op_scope([x], name, "Abs") as name:
x = ops.convert_to_tensor(x, name="x")
if x.dtype == types.complex64:
return gen_math_ops.complex_abs(x, name=name)
return gen_math_ops._abs(x, name=name)
def pow(x, y, name=None):
"""Computes the power of one value to another.
Given a tensor `x` and a tensor `y`, this operation computes \\\\(x^y\\\\) for
corresponding elements in `x` and `y`. For example:
| tensorflow.python.ops.gen_math_ops._abs | 13,731 |
import tensorflow as tf
log_sigmas = self.parameterizer(x1)
x2, ildj = half_gaussianize(z2, log_sigmas, inverse=tf.constant(True))
return x2, ildj
def exponentiate(x, log_lambdas, inverse=tf.constant(False)):
if not inverse:
z = tf.math.exp(log_lambdas)*x
ldj = tf.math.reduce_sum(log_lambdas, axis=[1,2,3])
else:
z = x*tf.math.exp(-log_lambdas)
ldj = -tf.math.reduce_sum(log_lambdas, axis=[1,2,3])
return z, ldj
class Exponentiate(Parameterize):
"""
Implementation of parameterize for an exponetial prior.
"""
def __init__(self, input_shape=None, name='gaussianize', *args, **kwargs):
| tensorflow.math.exp | 13,732 |
import tensorflow as tf
def testModelWithBuckets(self):
"""Larger tests that does full sequence-to-sequence model training."""
# We learn to copy 10 symbols in 2 buckets: length 4 and length 8.
classes = 10
buckets = [(4, 4), (8, 8)]
perplexities = [[], []] # Results for each bucket.
tf.set_random_seed(111)
random.seed(111)
np.random.seed(111)
with self.test_session() as sess:
# We use sampled softmax so we keep output projection separate.
w = tf.get_variable("proj_w", [24, classes])
| tensorflow.set_random_seed | 13,733 |
import tensorflow as tf
input_partition_dims = None
num_cores_per_replica = None
if params.use_tpu:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
params.platform.tpu,
zone=params.platform.tpu_zone,
project=params.platform.gcp_project)
tpu_grpc_url = tpu_cluster_resolver.get_master()
tf.Session.reset(tpu_grpc_url)
# If the input image is transposed (from NHWC to HWCN), the partition
# dimensions also need to be transposed the same way.
def _maybe_transpose(input_partition_dims):
if input_partition_dims and params.train.transpose_input:
return [input_partition_dims[i] for i in [1, 2, 3, 0]]
else:
return input_partition_dims
| tensorflow.Session.reset | 13,734 |
import tensorflow as tf
self.model_W = tf.get_variable("{}_W".format(name), initializer=kernel_initializer([n_in, n_out])) # variational parameters
self.model_b = tf.get_variable("{}_b".format(name), initializer=tf.zeros([n_out]))
self.model_DMW = tf.einsum('pij,jk->pik', self.DM, self.model_W) # Masked weight: p_s * i_s * o_s
self.model_tiled_b = tf.tile(tf.reshape(self.model_b, [1, n_out]), [self.p_s, 1])
if activation is None:
self.activation = tf.identity
| tensorflow.reshape | 13,735 |
import tensorflow as tf
def nature_cnn(unscaled_images, **conv_kwargs):
"""
CNN from Nature paper.
"""
scaled_images = tf.cast(unscaled_images, tf.float32) / 255.
activ = tf.nn.relu
h = activ(conv(scaled_images, 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2),
**conv_kwargs))
| tensorflow.cast | 13,736 |
import tensorflow as tf
tf.summary.scalar("teacher_forcing_ratio", model.ratio) # Control teacher forcing
# ratio decay when mode = "scheduled"
gradient_norms = [tf.norm(grad) for grad in model.gradients]
tf.summary.histogram("gradient_norm", gradient_norms)
tf.summary.scalar("max_gradient_norm", tf.reduce_max(gradient_norms)) # visualize
# gradients (in case of explosion)
| tensorflow.summary.histogram | 13,737 |
from tensorflow.python.framework import constant_op
]])
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_integer_mixed_string_dense(self):
"""Tests mixed dense inputs.
"""
op = sparse_feature_cross_op.sparse_feature_cross([
constant_op.constant([[11, 333], [55555, 999999]], dtypes.int64),
constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],
['batch2-FC2-F1', 'batch2-FC2-F2']],
dtypes.string),
])
expected_out = self._sparse_tensor([[
'11_X_batch1-FC2-F1', '11_X_batch1-FC2-F2', '333_X_batch1-FC2-F1',
'333_X_batch1-FC2-F2'
], [
'55555_X_batch2-FC2-F1', '55555_X_batch2-FC2-F2',
| tensorflow.python.framework.constant_op.constant | 13,738 |
import tensorflow as tf
dtype = op.inputs[0].dtype
return grad * tf.cast(grad > 0., dtype) * \
tf.cast(op.inputs[0] > 0., dtype)
| tensorflow.cast | 13,739 |
import tensorflow as tf
gradients[i] = (tf.clip_by_norm(grad, grad_norm_clipping), var)
with tf.variable_scope("input_info", reuse=False):
tf.summary.scalar('rewards', tf.reduce_mean(rew_t_ph))
tf.summary.scalar('importance_weights', tf.reduce_mean(importance_weights_ph))
if full_tensorboard_log:
tf.summary.histogram('rewards', rew_t_ph)
tf.summary.histogram('importance_weights', importance_weights_ph)
if tf_util.is_image(obs_phs[0]):
tf.summary.image('observation', obs_phs[0])
elif len(obs_phs[0].shape) == 1:
tf.summary.histogram('observation', obs_phs[0])
| tensorflow.summary.histogram | 13,740 |
import tensorflow as tf
candidate_labels = tf.matmul(tf.expand_dims(labels, 0), tf.to_int32(same_span)) # [1, num_candidates]
candidate_labels = tf.squeeze(candidate_labels, 0) # [num_candidates]
| tensorflow.squeeze | 13,741 |
import tensorflow as tf
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps,
use_tpu, optimizer)
output_spec = contrib_tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
if task_name not in ["sts-b", "cola"]:
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions,
weights=is_real_example)
loss = tf.metrics.mean(
values=per_example_loss, weights=is_real_example)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
elif task_name == "sts-b":
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
"""Compute Pearson correlations for STS-B."""
# Display labels and predictions
| tensorflow.metrics.accuracy | 13,742 |
import tensorflow as tf
ldj = tf.where(z2 > self.epsilon, ldj, tf.zeros_like(ldj))
return x2, tf.math.reduce_sum(ldj, axis=[1,2,3])
def half_gaussianize(x, log_sigmas, inverse=tf.constant(False)):
if inverse:
z = tf.math.exp(log_sigmas)*x
ldj = tf.math.reduce_sum(log_sigmas, axis=[1,2,3])
else:
z = x*tf.math.exp(-log_sigmas)
ldj = -tf.math.reduce_sum(log_sigmas, axis=[1,2,3])
return z, ldj
class HalfGaussianize(Parameterize):
"""
Implementation of parameterize for a half-Gaussian prior.
"""
| tensorflow.math.exp | 13,743 |
import tensorflow as tf
import tensorflow_probability as tfp
from normalizing_flows.flows import Transform
from . import Parameterize
def gaussianize(x, mus, log_sigmas, inverse=tf.constant(False)):
if inverse:
z = tf.math.exp(log_sigmas)*x + mus
ldj = tf.math.reduce_sum(log_sigmas, axis=[1,2,3])
else:
z = (x - mus)*tf.math.exp(-log_sigmas)
ldj = -tf.math.reduce_sum(log_sigmas, axis=[1,2,3])
return z, ldj
| tensorflow.math.exp | 13,744 |
import tensorflow as tf
def testVariables(self):
save_path = os.path.join(self.get_temp_dir(), "variables")
with tf.Session("", graph=tf.Graph()) as sess:
one = tf.Variable(1.0)
| tensorflow.Graph | 13,745 |
import tensorflow as tf
# partial derivatives to the checkpointed nodes
for r, dr in zip(checkpoints_other, dv[:len(checkpoints_other)]):
if dr is not None:
if d_checkpoints[r] is None:
d_checkpoints[r] = dr
else:
d_checkpoints[r] += dr
def _unsparsify(x):
if not isinstance(x, tf.IndexedSlices):
return x
assert x.dense_shape is not None, "memory_saving_gradients encountered sparse gradients of unknown shape"
indices = x.indices
while indices.shape.ndims < x.values.shape.ndims:
indices = tf.expand_dims(indices, -1)
return tf.scatter_nd(indices, x.values, x.dense_shape)
# partial derivatives to xs (usually the params of the neural net)
d_xs_new = dv[len(checkpoints_other):]
for j in range(len(xs)):
if d_xs_new[j] is not None:
if d_xs[j] is None:
d_xs[j] = _unsparsify(d_xs_new[j])
else:
d_xs[j] += _unsparsify(d_xs_new[j])
| tensorflow.expand_dims | 13,746 |
from tensorflow.python.ops import array_ops
init_shape = [init_size] + fixed_shape
array = _create_local(
'array', shape=init_shape, validate_shape=False, dtype=values.dtype)
size = _create_local('size', shape=[], dtype=dtypes.int32)
perm = [0 if n == axis else n + 1 if n < axis else n for n in range(ndim)]
valid_array = array[:size]
valid_array.set_shape([None] + fixed_shape)
value = array_ops.transpose(valid_array, perm, name='concat')
values_size = array_ops.shape(values)[axis]
if max_size is None:
batch_size = values_size
else:
batch_size = math_ops.minimum(values_size, max_size - size)
perm = [axis] + [n for n in range(ndim) if n != axis]
batch_values = array_ops.transpose(values, perm)[:batch_size]
def reallocate():
| tensorflow.python.ops.array_ops.shape | 13,747 |
import tensorflow as tf
flattened_emb = tf.reshape(emb, [num_sentences * max_sentence_length, util.shape(emb, 2)])
else:
raise ValueError("Unsupported rank: {}".format(emb_rank))
return tf.boolean_mask(flattened_emb, tf.reshape(text_len_mask, [num_sentences * max_sentence_length]))
def lstm_contextualize(self, text_emb, text_len, text_len_mask):
| tensorflow.reshape | 13,748 |
import tensorflow as tf
include_multiclass_scores=include_multiclass_scores,
include_instance_masks=include_instance_masks,
include_keypoints=include_keypoints))
tensor_dict[fields.InputDataFields.image] = tf.squeeze(
tensor_dict[fields.InputDataFields.image], axis=0)
return tensor_dict
| tensorflow.squeeze | 13,749 |
import tensorflow as tf
if self.demo:
self.c = tf.placeholder(tf.int32, [None, self.config.max_p_len], "context")
self.q = tf.placeholder(tf.int32, [None, self.config.max_q_len], "question")
self.ch = tf.placeholder(tf.int32, [None, self.config.max_p_len, self.config.max_ch_len], "context_char")
self.qh = tf.placeholder(tf.int32, [None, self.config.max_q_len, self.config.max_ch_len], "question_char")
self.start_label = tf.placeholder(tf.int32, [None], "answer_label1")
self.end_label = tf.placeholder(tf.int32, [None], "answer_label2")
else:
self.c = tf.placeholder(tf.int32, [self.config.batch_size * self.max_p_num, self.config.max_p_len],
"context")
self.q = tf.placeholder(tf.int32, [self.config.batch_size * self.max_p_num, self.config.max_q_len],
"question")
self.ch = tf.placeholder(tf.int32, [self.config.batch_size * self.max_p_num, self.config.max_p_len,
self.config.max_ch_len], "context_char")
self.qh = tf.placeholder(tf.int32, [self.config.batch_size * self.max_p_num, self.config.max_q_len,
self.config.max_ch_len], "question_char")
self.start_label = tf.placeholder(tf.int32, [self.config.batch_size], "answer_label1")
self.end_label = tf.placeholder(tf.int32, [self.config.batch_size], "answer_label2")
self.position_emb = position_embedding(self.c, 2 * self.config.hidden_size)
self.c_mask = tf.cast(self.c, tf.bool) # index 0 is padding symbol N x self.max_p_num, max_p_len
self.q_mask = tf.cast(self.q, tf.bool)
self.c_len = tf.reduce_sum(tf.cast(self.c_mask, tf.int32), axis=1)
self.q_len = tf.reduce_sum(tf.cast(self.q_mask, tf.int32), axis=1)
self.dropout = tf.placeholder(tf.float32, name="dropout")
self.global_step = tf.Variable(0, name="global_step", trainable=False)
| tensorflow.placeholder | 13,750 |
from tensorflow.contrib.metrics.python.ops import confusion_matrix_ops
labels = array_ops.reshape(labels, [-1])
weights = _mask_weights(ignore_mask, weights)
if weights is not None:
weights_rank = weights.get_shape().ndims
if weights_rank > 1:
weights = array_ops.reshape(weights, [-1])
# Accumulate the prediction to current confusion matrix.
current_cm = confusion_matrix_ops.confusion_matrix(
predictions, labels, num_classes, weights=weights, dtype=cm_dtype)
update_op = state_ops.assign_add(total_cm, current_cm)
def compute_mean_iou(name):
"""Compute the mean intersection-over-union via the confusion matrix."""
sum_over_row = math_ops.to_float(math_ops.reduce_sum(total_cm, 0))
sum_over_col = math_ops.to_float(math_ops.reduce_sum(total_cm, 1))
cm_diag = math_ops.to_float(array_ops.diag_part(total_cm))
| tensorflow.contrib.metrics.python.ops.confusion_matrix_ops.confusion_matrix | 13,751 |
import tensorflow as tf
def _init_session(self, sess, model):
w = self._train_params['image_size']
h = self._train_params['image_size']
in_ch = 3
m = model
# Do initialization of all variables
sess.run(tf.global_variables_initializer())
# Load datasets with defaults
sess.run([m.train_dataset_init_op, m.pred_dataset_init_op], feed_dict={
m.ph.train_images: np.zeros((1, w, h, in_ch)),
m.ph.train_classes: np.zeros((1,)),
m.ph.pred_images: np.zeros((1, w, h, in_ch)),
| tensorflow.global_variables_initializer | 13,752 |
import tensorflow as tf
"""
shape = inputdata.get_shape().as_list()[1:]
if None not in shape:
inputdata = tf.reshape(inputdata, [-1, int(np.prod(shape))])
else:
inputdata = tf.reshape(inputdata, tf.stack([tf.shape(inputdata)[0], -1]))
if w_init is None:
w_init = tf.contrib.layers.variance_scaling_initializer()
if b_init is None:
b_init = tf.constant_initializer()
ret = tf.layers.dense(inputs=inputdata, activation=lambda x: tf.identity(x, name='output'),
use_bias=use_bias, name=name,
kernel_initializer=w_init, bias_initializer=b_init,
trainable=True, units=out_dim)
return ret
@staticmethod
def layerbn(inputdata, is_training, name, scale=True):
"""
:param inputdata:
:param is_training:
| tensorflow.identity | 13,753 |
import tensorflow as tf
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
| tensorflow.flags.DEFINE_string | 13,754 |
import tensorflow as tf
self.assertEqual(np.int64(15), v.eval())
def testSomeErrors(self):
with tf.Graph().as_default():
v0 = tf.Variable([10.0], name="v0")
v1 = tf.Variable([20.0], name="v1")
v2 = tf.Variable([20.0], name="v2")
v2._set_save_slice_info(tf.Variable.SaveSliceInfo("v1", [1], [0], [1]))
# By default the name used for "v2" will be "v1" and raise an error.
with self.assertRaisesRegexp(ValueError, "same name: v1"):
tf.train.Saver([v0, v1, v2])
# The names are different and will work.
tf.train.Saver({"vee1": v1, "other": [v2]})
def testBasicsWithListOfVariables(self):
save_path = os.path.join(self.get_temp_dir(), "basics_with_list")
with self.test_session(graph=tf.Graph()) as sess:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = tf.Variable(10.0, name="v0")
v1 = tf.Variable(20.0, name="v1")
save = tf.train.Saver([v0, v1])
tf.initialize_all_variables().run()
# Check that the parameter nodes have been initialized.
| tensorflow.train.Saver | 13,755 |
import tensorflow as tf
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
| tensorflow.logging.info | 13,756 |
import tensorflow as tf
head_emb = tf.nn.dropout(head_emb, self.lexical_dropout) # [num_sentences, max_sentence_length, emb]
text_len_mask = tf.sequence_mask(text_len, maxlen=max_sentence_length) # [num_sentence, max_sentence_length]
context_outputs = self.lstm_contextualize(context_emb, text_len, text_len_mask) # [num_words, emb]
num_words = util.shape(context_outputs, 0)
genre_emb = tf.gather(tf.get_variable("genre_embeddings", [len(self.genres), self.config["feature_size"]]), genre) # [emb]
sentence_indices = tf.tile(tf.expand_dims(tf.range(num_sentences), 1), [1, max_sentence_length]) # [num_sentences, max_sentence_length]
flattened_sentence_indices = self.flatten_emb_by_sentence(sentence_indices, text_len_mask) # [num_words]
flattened_head_emb = self.flatten_emb_by_sentence(head_emb, text_len_mask) # [num_words]
candidate_starts = tf.tile(tf.expand_dims(tf.range(num_words), 1), [1, self.max_span_width]) # [num_words, max_span_width]
candidate_ends = candidate_starts + tf.expand_dims(tf.range(self.max_span_width), 0) # [num_words, max_span_width]
candidate_start_sentence_indices = tf.gather(flattened_sentence_indices, candidate_starts) # [num_words, max_span_width]
candidate_end_sentence_indices = tf.gather(flattened_sentence_indices, tf.minimum(candidate_ends, num_words - 1)) # [num_words, max_span_width]
candidate_mask = tf.logical_and(candidate_ends < num_words, tf.equal(candidate_start_sentence_indices, candidate_end_sentence_indices)) # [num_words, max_span_width]
flattened_candidate_mask = tf.reshape(candidate_mask, [-1]) # [num_words * max_span_width]
candidate_starts = tf.boolean_mask(tf.reshape(candidate_starts, [-1]), flattened_candidate_mask) # [num_candidates]
candidate_ends = tf.boolean_mask(tf.reshape(candidate_ends, [-1]), flattened_candidate_mask) # [num_candidates]
candidate_sentence_indices = tf.boolean_mask(tf.reshape(candidate_start_sentence_indices, [-1]), flattened_candidate_mask) # [num_candidates]
candidate_cluster_ids = self.get_candidate_labels(candidate_starts, candidate_ends, gold_starts, gold_ends, cluster_ids) # [num_candidates]
candidate_span_emb = self.get_span_emb(flattened_head_emb, context_outputs, candidate_starts, candidate_ends) # [num_candidates, emb]
candidate_mention_scores = self.get_mention_scores(candidate_span_emb) # [k, 1]
candidate_mention_scores = tf.squeeze(candidate_mention_scores, 1) # [k]
k = tf.to_int32(tf.floor(tf.to_float(tf.shape(context_outputs)[0]) * self.config["top_span_ratio"]))
| tensorflow.gather | 13,757 |
import tensorflow as tf
else:
self.scope_reuse = None
self.param_initializer = {
'moving_mean': tf.constant_initializer(0., dtype=self.dtype),
'moving_variance': tf.constant_initializer(1., dtype=self.dtype),
'gamma': tf.constant_initializer(0.1, dtype=self.dtype)
}
self.param_trainable = {
| tensorflow.constant_initializer | 13,758 |
import tensorflow as tf
tf.flags.DEFINE_string('trace_file', None,
"""Enable TensorFlow tracing and write trace to
this file.""")
tf.flags.DEFINE_string('graph_file', None,
"""Write the model's graph definition to this
file. Defaults to binary format unless filename ends
in 'txt'.""")
tf.flags.DEFINE_string('optimizer', 'sgd',
'Optimizer to use: momentum or sgd or rmsprop')
tf.flags.DEFINE_float('learning_rate', None,
"""Initial learning rate for training.""")
tf.flags.DEFINE_float('num_epochs_per_decay', 0,
"""Steps after which learning rate decays.""")
tf.flags.DEFINE_float('learning_rate_decay_factor', 0.94,
"""Learning rate decay factor.""")
tf.flags.DEFINE_float('momentum', 0.9, """Momentum for training.""")
tf.flags.DEFINE_float('rmsprop_decay', 0.9, """Decay term for RMSProp.""")
tf.flags.DEFINE_float('rmsprop_momentum', 0.9, """Momentum in RMSProp.""")
tf.flags.DEFINE_float('rmsprop_epsilon', 1.0, """Epsilon term for RMSProp.""")
tf.flags.DEFINE_float('gradient_clip', None, """Gradient clipping magnitude.
Disabled by default.""")
tf.flags.DEFINE_float('weight_decay', 0.00004,
| tensorflow.flags.DEFINE_float | 13,759 |
import tensorflow as tf
embeddings: The embeddings to be orthogonalized for varied faces.
Shape [batch_size, embeddings_dim]
Return: pull away term loss
"""
with tf.name_scope(name):
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
similarity = tf.matmul(normalized_embeddings, normalized_embeddings, transpose_b=True)
batch_size = tf.cast(tf.shape(embeddings)[0], tf.float32)
pt_loss = (tf.reduce_sum(similarity) - batch_size) / \
(batch_size * (batch_size - 1))
| tensorflow.square | 13,760 |
import tensorflow as tf
width_assert = tf.Assert(
tf.equal(width, image_width),
['Wrong width for tensor %s [expected][actual]',
image.name, width, image_width])
asserts.extend([height_assert, width_assert])
# Create a random bounding box.
#
# Use tf.random_uniform and not numpy.random.rand as doing the former would
# generate random numbers at graph eval time, unlike the latter which
# generates random numbers at graph definition time.
max_offset_height = control_flow_ops.with_dependencies(
asserts, tf.reshape(image_height - crop_height + 1, []))
max_offset_width = control_flow_ops.with_dependencies(
asserts, tf.reshape(image_width - crop_width + 1, []))
offset_height = tf.random_uniform(
[], maxval=max_offset_height, dtype=tf.int32)
offset_width = tf.random_uniform(
[], maxval=max_offset_width, dtype=tf.int32)
return [_crop(image, offset_height, offset_width,
crop_height, crop_width) for image in image_list]
| tensorflow.reshape | 13,761 |
import tensorflow as tf
class SetFromFlat(object):
def __init__(self, var_list, dtype=tf.float32):
assigns = []
shapes = list(map(var_shape, var_list))
total_size = np.sum([intprod(shape) for shape in shapes])
self.theta = theta = tf.placeholder(dtype, [total_size])
start = 0
assigns = []
for (shape, v) in zip(shapes, var_list):
size = intprod(shape)
assigns.append(tf.assign(v, tf.reshape(theta[start:start + size], shape)))
start += size
self.op = tf.group(*assigns)
def __call__(self, theta):
get_session().run(self.op, feed_dict={self.theta: theta})
class GetFlat(object):
def __init__(self, var_list):
self.op = tf.concat(axis=0, values=[tf.reshape(v, [numel(v)]) for v in var_list])
def __call__(self):
return get_session().run(self.op)
def get_monte_carlo(reward, y, trace_length, batch_size):
| tensorflow.group | 13,762 |
import tensorflow as tf
data_format='channels_last', padding= "same",
strides=(2, 1),
activation=tf.nn.relu)
pool5 = conv5
pool5 = tf.transpose(pool5, [0, 3, 1, 2])
size = pool5.shape[-1] * pool5.shape[-2] * pool5.shape[-3]
logits = tf.layers.dense(tf.reshape(pool5,(-1, size)), units=256*amp_factor)
| tensorflow.transpose | 13,763 |
import tensorflow as tf
tgtimg_h0 = lrelu(conv2d(tgtimg, self.df_dim, name='h0_conv'))
tgtimg_h1 = lrelu(conv2d(tgtimg_h0, self.df_dim*2, name='h1_conv'))
tgtimg_h2 = lrelu(conv2d(tgtimg_h1, self.df_dim*4, name='h2_conv'))
tgtimg_h3 = lrelu(conv2d(tgtimg_h2, self.df_dim*8, name='h3_conv'))
tgtimg_h4 = lrelu(linear(tf.reshape(tgtimg_h3, [self.batch_size, -1]), featsize, 'h4_lin'))
tgtimg_z = lrelu(linear(tgtimg_h4, featsize, 'hz_lin'))
with tf.variable_scope("translate") as scope:
trans_h0 = lrelu(linear(tf.concat([srcimg_z, tgtctx_z], 1), featsize, 'trans_h0'))
| tensorflow.reshape | 13,764 |
import tensorflow as tf
self.c_mask = tf.cast(self.c, tf.bool)
self.q_mask = tf.cast(self.q, tf.bool)
self.c_len = tf.reduce_sum(tf.cast(self.c_mask, tf.int32), axis=1)
self.q_len = tf.reduce_sum(tf.cast(self.q_mask, tf.int32), axis=1)
| tensorflow.cast | 13,765 |
import tensorflow as tf
regularizers=self._embedding_regularizers,
name="ItemMemory")
# [batch, embedding size]
self._cur_user = self.user_memory(self.input_users)
# Item memories a query
self._cur_item = self.item_memory(self.input_items)
self._cur_item_negative = self.item_memory(self.input_items_negative)
def _construct_placeholders(self):
self.input_users = tf.placeholder(tf.int32, [None], 'UserID')
self.input_items = tf.placeholder(tf.int32, [None], 'ItemID')
self.input_items_negative = tf.placeholder(tf.int32, [None], 'NegativeItemID')
# Add our placeholders
add_to_collection(GraphKeys.PLACEHOLDER, [self.input_users,
self.input_items,
self.input_items_negative])
| tensorflow.placeholder | 13,766 |
import tensorflow as tf
self.a = self._build_net(S, scope='eval_net', trainable=True)
# input s_, output a, get a_ for critic
self.a_ = self._build_net(S_, scope='target_net', trainable=False)
self.e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/eval_net')
self.t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/target_net')
def _build_net(self, s, scope, trainable):
with tf.variable_scope(scope):
init_w = tf.random_normal_initializer(0., 0.01)
init_b = tf.constant_initializer(0.01)
net = tf.layers.dense(s, 500, activation=tf.nn.relu,
kernel_initializer=init_w, bias_initializer=init_b, name='l1', trainable=trainable)
net = tf.layers.dense(net, 200, activation=tf.nn.relu,
kernel_initializer=init_w, bias_initializer=init_b, name='l2', trainable=trainable)
with tf.variable_scope('a'):
actions = tf.layers.dense(net, self.a_dim, activation=tf.nn.tanh, kernel_initializer=init_w,
bias_initializer=init_b, name='a', trainable=trainable)
scaled_a = tf.multiply(actions, self.action_bound, name='scaled_a') # Scale output to -action_bound to action_bound
return scaled_a
| tensorflow.constant_initializer | 13,767 |
import tensorflow as tf
def test_instance_non_maximum_suppression_1d_scores_empty_inputs(self):
masks = tf.constant(1.0, shape=[0, 2, 2], dtype=tf.float32)
scores = tf.constant([], dtype=tf.float32)
classes = tf.constant([], dtype=tf.int32)
(nms_masks1,
nms_scores1,
nms_classes1,
_) = isu.instance_non_maximum_suppression_1d_scores(
masks,
scores,
classes,
min_score_thresh=0.65,
min_iou_thresh=0.5,
is_class_agnostic=True)
nms_masks_expected1 = tf.constant(1.0, shape=[0, 2, 2], dtype=tf.float32)
nms_scores_expected1 = tf.constant([], dtype=tf.float32)
nms_classes_expected1 = tf.constant([], dtype=tf.int32)
(nms_masks2,
nms_scores2,
nms_classes2,
_) = isu.instance_non_maximum_suppression_1d_scores(
masks,
scores,
classes,
min_score_thresh=0.65,
min_iou_thresh=0.5,
is_class_agnostic=False)
nms_masks_expected2 = tf.constant(1.0, shape=[0, 2, 2], dtype=tf.float32)
| tensorflow.constant | 13,768 |
import tensorflow as tf
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class AssignOpTest(tf.test.TestCase):
# NOTE(mrry): We exclude thess tests from the TSAN TAP target, because they
# contain benign and deliberate data races when multiple threads update
# the same parameters without a lock.
def testParallelUpdateWithoutLocking(self):
with self.test_session() as sess:
ones_t = tf.fill([1024, 1024], 1.0)
p = tf.Variable(tf.zeros([1024, 1024]))
adds = [tf.assign_add(p, ones_t, use_locking=False)
for _ in range(20)]
tf.initialize_all_variables().run()
def run_add(add_op):
sess.run(add_op)
threads = [self.checkedThread(target=run_add, args=(add_op,))
for add_op in adds]
for t in threads:
t.start()
for t in threads:
t.join()
| tensorflow.fill | 13,769 |
import tensorflow as tf
# Intentionally using tf.Session() instead of self.test_session() to have
# control over closing the session. test_session() is a cached session.
with tf.Session():
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord)
# Sleep to make sure the queue runner has started the first run call.
time.sleep(_SLEEP_TIME)
| tensorflow.train.Coordinator | 13,770 |
import tensorflow as tf
else:
raise("ERROR: invalid type passed into Simulator class (only accepts 'D', 'P', or 'T')")
self.rgb2lms = tf.convert_to_tensor([[17.8824, 43.5161, 4.11935], [3.45565, 27.1554, 3.86714], [0.0299566, 0.184309, 1.46709]])
def simulate_image(self, image):
# passes an image through the color-blindness simulator
inverted_rgb2lms = tf.linalg.inv(self.rgb2lms)
product1 = tf.matmul(inverted_rgb2lms, self.color_matrix)
product2 = tf.matmul(product1, self.rgb2lms)
original_image_shape = image.shape
simulated_image = tf.transpose(tf.matmul(product2, tf.reshape(tf.transpose(image, perm=[2, 0, 1]), (image.shape[2], image.shape[0] * image.shape[1]))), perm=[1, 0])
| tensorflow.linalg.inv | 13,771 |
import tensorflow as tf
if FLAGS.use_tpu:
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
| tensorflow.contrib.tpu.TPUEstimatorSpec | 13,772 |
import tensorflow as tf
head_selection, head_org_idx, sl_head, rep_head_mask,
dep_selection, dep_org_idx, sl_dep, rep_dep_mask,
rep_map, rep_dep_tensor, keep_prob, is_train, direction, ivec
):
# data for self-attention
rep_map_dp = dropout(rep_map, keep_prob, is_train)
rep_dep_tensor_dp, _, _ = reduce_data_rep_max_len(rep_map_dp, dep_selection)
rep_head_tensor_dp, _, _ = reduce_data_rep_max_len(rep_map_dp, head_selection)
# mask generation
dep_idxs = tf.tile(tf.expand_dims(dep_org_idx, 1), [1, sl_head, 1])
head_idxs = tf.tile(tf.expand_dims(head_org_idx, 2), [1, 1, sl_dep])
if direction is None:
direct_mask = tf.not_equal(head_idxs, dep_idxs) # [bs, slh, sld]
else:
if direction == 'forward':
direct_mask = tf.greater(head_idxs, dep_idxs) # [bs, slh, sld]
else:
direct_mask = tf.less(head_idxs, dep_idxs) # [bs, slh, sld]
# [bs, slh, slh]
rep_mask_tile = tf.logical_and(tf.expand_dims(rep_dep_mask, 1), tf.expand_dims(rep_head_mask, 2))
| tensorflow.expand_dims | 13,773 |
import tensorflow as tf
epoch_size = batch_patition_length // num_steps # ->5000/5=1000 就是每一轮的大小
for i in range(epoch_size): # 抽取 epoch_size 个数据
x = data_x[:, i * num_steps:(i + 1) * num_steps] # ->(200, 5)
y = data_y[:, i * num_steps:(i + 1) * num_steps]
yield (x, y) # yield 是生成器,生成器函数在生成值后会自动挂起并暂停他们的执行和状态(最后就是for循环结束后的结果,共有1000个(x, y))
def gen_epochs(n, num_steps):
for i in range(n):
yield gen_batch(gen_data(), batch_size, num_steps)
'''定义placeholder'''
x = tf.placeholder(tf.int32, [batch_size, num_steps], name="x")
y = tf.placeholder(tf.int32, [batch_size, num_steps], name='y')
init_state = tf.zeros([batch_size, state_size])
'''RNN输入'''
rnn_inputs = tf.one_hot(x, num_classes)
#rnn_inputs = tf.unstack(x_one_hot, axis=1)
'''不需要了,使用tensorflow中定义好的cell即可'''
#'''定义RNN cell'''
#with tf.variable_scope('rnn_cell'):
| tensorflow.placeholder | 13,774 |
import tensorflow as tf
def inputs(self):
return [tf.TensorSpec([None, self.image_shape, self.image_shape, 3], self.image_dtype, 'input'),
| tensorflow.TensorSpec | 13,775 |
import tensorflow as tf
if is_training and config.keep_prob < 1:
cell = tf.contrib.rnn.DropoutWrapper(
cell, output_keep_prob=config.keep_prob)
return cell
cell = tf.contrib.rnn.MultiRNNCell(
[make_cell() for _ in range(config.num_layers)], state_is_tuple=True)
self._initial_state = cell.zero_state(config.batch_size, tf.float32)
state = self._initial_state
outputs = []
with tf.variable_scope('RNN'):
for time_step in range(self.num_steps):
if time_step > 0: tf.get_variable_scope().reuse_variables()
(cell_output, state) = cell(inputs[:, time_step, :], state)
outputs.append(cell_output)
output = tf.reshape(tf.concat(outputs, 1), [-1, config.hidden_size])
return output, state
def assign_lr(self, session, lr_value):
session.run(self._lr_update, feed_dict={self._new_lr: lr_value})
def with_prefix(self, prefix, name):
return '/'.join((prefix, name))
| tensorflow.get_variable_scope | 13,776 |
import tensorflow as tf
self.runner = RunnerThread(env, pi, 20)
grads = tf.gradients(self.loss, pi.var_list)
tf.summary.scalar("model/policy_loss", pi_loss / bs)
tf.summary.scalar("model/value_loss", vf_loss / bs)
tf.summary.scalar("model/entropy", entropy / bs)
tf.summary.image("model/state", pi.x)
tf.summary.scalar("model/grad_global_norm", tf.global_norm(grads))
tf.summary.scalar("model/var_global_norm", tf.global_norm(pi.var_list))
self.summary_op = tf.summary.merge_all()
grads, _ = tf.clip_by_global_norm(grads, 40.0)
# copy weights from the parameter server to the local model
self.sync = tf.group(*[v1.assign(v2) for v1, v2 in zip(pi.var_list, self.network.var_list)])
grads_and_vars = list(zip(grads, self.network.var_list))
| tensorflow.global_norm | 13,777 |
import tensorflow as tf
def _initialize_weights(self):
with tf.name_scope('parameters'):
self.w0 = tf.Variable(tf.random_normal([28 * 28, 512]))
self.b0 = tf.Variable(tf.zeros([512]))
self.w1 = tf.Variable(tf.random_normal([512, 10]))
self.b1 = tf.Variable(tf.zeros([10]))
def _build_model(self, x, y):
w0 = self.w0.read_value()
b0 = self.b0.read_value()
| tensorflow.zeros | 13,778 |
import tensorflow as tf
def body(batch, output, i):
self_attention_tmp = din_fcn_attention(batch[:, i, :], batch[:, 0:i+1, :],
ATTENTION_SIZE, mask[:, 0:i+1], softmax_stag=1, stag=stag,
mode='LIST')
self_attention_tmp = tf.reduce_sum(self_attention_tmp, 1)
output = output.write(i, self_attention_tmp)
return batch, output, i + 1
output_ta = tf.TensorArray(dtype=tf.float32,
size=0,
dynamic_size=True,
element_shape=(facts[:, 0, :].get_shape()))
_, output_op, _ = tf.while_loop(cond, body, [facts, output_ta, 0])
self_attention = output_op.stack()
self_attention = tf.transpose(self_attention, perm = [1, 0, 2])
return self_attention
def self_all_attention(facts, ATTENTION_SIZE, mask, stag='null'):
if len(facts.get_shape().as_list()) == 2:
facts = tf.expand_dims(facts, 1)
def cond(batch, output, i):
return tf.less(i, tf.shape(batch)[1])
| tensorflow.while_loop | 13,779 |
import tensorflow as tf
with tf.variable_scope(scope, reuse=reuse):
if param_noise:
act_f, obs_phs = build_act_with_param_noise(q_func, ob_space, ac_space, stochastic_ph, update_eps_ph, sess,
param_noise_filter_func=param_noise_filter_func)
else:
act_f, obs_phs = build_act(q_func, ob_space, ac_space, stochastic_ph, update_eps_ph, sess, layers=layers)
# q network evaluation
with tf.variable_scope("step_model", reuse=True, custom_getter=tf_util.outer_scope_getter("step_model")):
step_model = q_func(sess, ob_space, ac_space, 1, 1, None, reuse=True, obs_phs=obs_phs, layers=layers)
q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.get_variable_scope().name + "/model")
# target q network evaluation
with tf.variable_scope("target_q_func", reuse=False):
target_policy = q_func(sess, ob_space, ac_space, 1, 1, None, reuse=False, layers=layers)
target_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
scope=tf.get_variable_scope().name + "/target_q_func")
# compute estimate of best possible value starting from state at t + 1
double_q_values = None
double_obs_ph = target_policy.obs_ph
if double_q:
with tf.variable_scope("double_q", reuse=True, custom_getter=tf_util.outer_scope_getter("double_q")):
double_policy = q_func(sess, ob_space, ac_space, 1, 1, None, reuse=True, layers=layers)
double_q_values = double_policy.q_values
double_obs_ph = double_policy.obs_ph
| tensorflow.variable_scope | 13,780 |
import tensorflow as tf
with tf.variable_scope(name):
moving_mean = get_variable("mean", shape=[channels], dtype=tf.float32, initializer=tf.constant_initializer(0.0), trainable=False)
moving_variance = get_variable("var", shape=[channels], dtype=tf.float32, initializer=tf.constant_initializer(1.0), trainable=False)
| tensorflow.constant_initializer | 13,781 |
from tensorflow.python.ops import math_ops
weights=weights)
metric = math_ops.div(tp, math_ops.add(tp, fp), name=name)
update = math_ops.div(
| tensorflow.python.ops.math_ops.add | 13,782 |
import tensorflow as tf
q_t_selected = tf.reduce_sum(step_model.q_values * tf.one_hot(act_t_ph, n_actions), axis=1)
# compute estimate of best possible value starting from state at t + 1
if double_q:
q_tp1_best_using_online_net = tf.argmax(double_q_values, axis=1)
q_tp1_best = tf.reduce_sum(target_policy.q_values * tf.one_hot(q_tp1_best_using_online_net, n_actions), axis=1)
else:
q_tp1_best = tf.reduce_max(target_policy.q_values, axis=1)
q_tp1_best_masked = (1.0 - done_mask_ph) * q_tp1_best
| tensorflow.one_hot | 13,783 |
import tensorflow as tf
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
| tensorflow.flags.DEFINE_string | 13,784 |
import tensorflow as tf
mask0 = tf.constant([[1, 0],
[0, 1]], dtype=tf.float32)
mask1 = tf.constant([[1, 1],
[0, 1]], dtype=tf.float32)
mask2 = tf.constant([[1, 0],
[1, 1]], dtype=tf.float32)
mask3 = tf.constant([[1, 1],
[1, 1]], dtype=tf.float32)
| tensorflow.constant | 13,785 |
import tensorflow as tf
with tf.variable_scope("network_parameters"):
with tf.variable_scope("policy"):
x = flat_observations
for size in config.policy_layers:
x = tf.layers.dense(x, size, activation=tf.nn.relu)
mean = tf.layers.dense(
x, action_space.shape[0], activation=tf.tanh,
kernel_initializer=mean_weights_initializer)
logstd = tf.get_variable(
"logstd", mean.shape[2:], tf.float32, logstd_initializer)
logstd = tf.tile(
logstd[None, None],
[tf.shape(mean)[0], tf.shape(mean)[1]] + [1] * (mean.shape.ndims - 2))
with tf.variable_scope("value"):
x = flat_observations
for size in config.value_layers:
| tensorflow.get_variable | 13,786 |
import tensorflow as tf
tokens = self._StringToToken(chars)
tokens = tf.where(
tf.equal(tokens, NO_TOKEN),
# Unseen character.
tf.broadcast_to(self.unk_id, tf.shape(tokens)),
tokens)
# Create initial candidate list.
candidates = tf.map_fn(
| tensorflow.shape | 13,787 |
import tensorflow as tf
string, out_string will contain a random integer casted to a string.
Otherwise string_tensor is returned unchanged.
"""
empty_string = tf.constant('', dtype=tf.string, name='EmptyString')
random_source_id = tf.as_string(
tf.random_uniform(shape=[], maxval=2 ** 63 - 1, dtype=tf.int64))
out_string = tf.cond(
tf.equal(string_tensor, empty_string),
true_fn=lambda: random_source_id,
false_fn=lambda: string_tensor)
return out_string
| tensorflow.random_uniform | 13,788 |
from tensorflow.python.framework import ops
| tensorflow.python.framework.ops.reset_default_graph | 13,789 |
import tensorflow as tf
# either the Tower was originally created with reuse,
# or a training tower without vs has to use reuse.
reuse = (self.is_training and self._index > 0 and not
self.has_own_variables) or self._initial_vs_reuse
if len(self._vs_name):
ret.append(tf.variable_scope(self._vs_name, reuse=reuse))
else:
if reuse:
ret.append(tf.variable_scope(
tf.get_variable_scope(), reuse=True))
else:
| tensorflow.variable_scope | 13,790 |
import tensorflow as tf
self.epsilon = epsilon
self.data_format = data_format
self.name = name
def __call__(self,input_var,**kwargs) :
mean, var = tf.nn.moments(input_var, self.axis, keep_dims=True)
ret = (input_var - mean) / tf.sqrt(var+self.epsilon)
if self.gamma is None :
return ret
else:
return tf.nn.bias_add(ret*self.gamma,
| tensorflow.sqrt | 13,791 |
import tensorflow as tf
# non-linear
rep_map = bn_dense_layer(rep_tensor_split, ivec, True, 0., 'bn_dense_map', activation,
False, wd, keep_prob, is_train) # bs,bn,bl,vec
rep_map_tile = tf.tile(tf.expand_dims(rep_map, 2), [1, 1, block_len, 1, 1]) # bs,bn,bl,bl,vec
# rep_map_dp = dropout(rep_map, keep_prob, is_train)
bn = block_num
bl = block_len
with tf.variable_scope('self_attention'):
# @2.self-attention in block
# mask generation
sl_indices = tf.range(block_len, dtype=tf.int32)
sl_col, sl_row = tf.meshgrid(sl_indices, sl_indices)
if direction == 'forward':
direct_mask = tf.greater(sl_row, sl_col) # bl,bl
else:
direct_mask = tf.greater(sl_col, sl_row) # bl,bl
direct_mask_tile = tf.tile(
tf.expand_dims(tf.expand_dims(direct_mask, 0), 0), [bs, bn, 1, 1]) # bs,bn,bl,bl
rep_mask_tile_1 = tf.tile(tf.expand_dims(rep_mask_split, 2), [1, 1, bl, 1]) # bs,bn,bl,bl
rep_mask_tile_2 = tf.tile(tf.expand_dims(rep_mask_split, 3), [1, 1, 1, bl]) # bs,bn,bl,bl
rep_mask_tile = tf.logical_and(rep_mask_tile_1, rep_mask_tile_2)
attn_mask = tf.logical_and(direct_mask_tile, rep_mask_tile, name='attn_mask') # bs,bn,bl,bl
# attention
f_bias = tf.get_variable('f_bias', [ivec], tf.float32, tf.constant_initializer(0.))
dependent_head = linear(
rep_map, 2 * ivec, False, 0., 'linear_dependent_head', False, wd, keep_prob, is_train) # bs,bn,bl,2vec
| tensorflow.greater | 13,792 |
import tensorflow as tf
filter_h, filter_w = filter_dims
stride_h, stride_w = stride_dims
with tf.variable_scope(scope):
pool = tf.nn.avg_pool(input, ksize=[1, filter_h, filter_w, 1], strides=[1, stride_h, stride_w, 1],
padding=padding)
return pool
| tensorflow.nn.avg_pool | 13,793 |
import tensorflow as tf
def contra_traj_lossV4(pred, tgt, horizon=12, resample=1, hard_ratio=1.0):
horizon_pred = horizon_sumV1(pred, horizon)
horizon_tgt = horizon_sumV1(tgt, horizon)
pred_flat = tf.reshape(horizon_pred, [-1])
tgt_flat = tf.reshape(horizon_tgt, [-1])
batch = tf.stack([pred_flat, tgt_flat], 1)
sample_func = sample_pair(batch)
def sample_compute(_):
| tensorflow.reshape | 13,794 |
import tensorflow as tf
tf.app.flags.DEFINE_integer('save-model', 1000,
'Number of steps between model saves (default: %(default)d)')
# Optimisation hyperparameters
tf.app.flags.DEFINE_integer('batch-size', 256, 'Number of examples per mini-batch (default: %(default)d)')
tf.app.flags.DEFINE_float('learning-rate', 1e-4, 'Learning rate (default: %(default)d)')
tf.app.flags.DEFINE_integer('img-width', 32, 'Image width (default: %(default)d)')
tf.app.flags.DEFINE_integer('img-height', 32, 'Image height (default: %(default)d)')
tf.app.flags.DEFINE_integer('img-channels', 3, 'Image channels (default: %(default)d)')
tf.app.flags.DEFINE_integer('num-classes', 10, 'Number of classes (default: %(default)d)')
tf.app.flags.DEFINE_string('log-dir', '{cwd}/logs/'.format(cwd=os.getcwd()),
'Directory where to write event logs and checkpoint. (default: %(default)s)')
run_log_dir = os.path.join(FLAGS.log_dir,
'exp_BN_bs_{bs}_lr_{lr}_aug_flip_brightness'.format(bs=FLAGS.batch_size,
lr=FLAGS.learning_rate))
| tensorflow.app.flags.DEFINE_integer | 13,795 |
import tensorflow as tf
# For each of the timestamps its vector of size A from `tmp` is reduced with `v` vector
v_dot_tmp = tf.tensordot(tmp, v, axes=1, name='v_dot_tmp') # (B,T) shape
key_masks = mask # [B, 1, T]
# key_masks = tf.expand_dims(mask, 1) # [B, 1, T]
paddings = tf.ones_like(v_dot_tmp) * (-2 ** 32 + 1)
v_dot_tmp = tf.where(key_masks, v_dot_tmp, paddings) # [B, 1, T]
alphas = tf.nn.softmax(v_dot_tmp, name='alphas') # (B,T) shape
# Output of (Bi-)RNN is reduced with attention vector; the result has (B,D) shape
| tensorflow.ones_like | 13,796 |
import tensorflow as tf
return tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp, cell, num_decoder_symbols, embedding_size=2,
feed_previous=feed_previous)
def EmbeddingAttentionSeq2Seq(enc_inp, dec_inp, feed_previous):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
return tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols,
num_decoder_symbols, embedding_size=2, feed_previous=feed_previous)
def EmbeddingAttentionSeq2SeqNoTuple(enc_inp, dec_inp, feed_previous):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
return tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols,
num_decoder_symbols, embedding_size=2, feed_previous=feed_previous)
for model in (EmbeddingRNNSeq2SeqF, EmbeddingRNNSeq2SeqNoTupleF,
EmbeddingTiedRNNSeq2Seq, EmbeddingTiedRNNSeq2SeqNoTuple,
EmbeddingAttentionSeq2Seq, EmbeddingAttentionSeq2SeqNoTuple):
TestModel(model)
| tensorflow.nn.rnn_cell.BasicLSTMCell | 13,797 |
import tensorflow as tf
random.seed(i)
def get_session():
tf.reset_default_graph()
tf_config = tf.ConfigProto(
inter_op_parallelism_threads=1,
intra_op_parallelism_threads=1)
session = tf.Session(config=tf_config)
print("AVAILABLE GPUS: ", get_available_gpus())
| tensorflow.ConfigProto | 13,798 |
import tensorflow as tf
if not elems_shape or not elems_shape[0]:
return tf.map_fn(fn, elems, dtype, parallel_iterations, back_prop)
outputs = [fn(arg) for arg in tf.unstack(elems)]
# Stack `outputs`, which is a list of Tensors or list of lists of Tensors
if all([isinstance(output, tf.Tensor) for output in outputs]):
return tf.stack(outputs)
else:
if all([isinstance(output, list) for output in outputs]):
if all([all(
[isinstance(entry, tf.Tensor) for entry in output_list])
| tensorflow.stack | 13,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.