seed
stringlengths 25
2.89k
| seed_api
stringlengths 14
102
| index
int64 0
14.8k
|
---|---|---|
import tensorflow as tf
sess.run(logits)
# Creates a saver.
saver0 = tf.train.Saver()
saver0.save(sess, saver0_ckpt)
# Generates MetaGraphDef.
saver0.export_meta_graph(filename)
def _testGraphExtensionRestore(self):
test_dir = os.path.join(self.get_temp_dir(), "graph_extension")
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
with self.test_session(graph=tf.Graph()) as sess:
# Restores from MetaGraphDef.
new_saver = tf.train.import_meta_graph(filename)
# Generates a new MetaGraphDef.
new_saver.export_meta_graph()
# Restores from checkpoint.
new_saver.restore(sess, saver0_ckpt)
# Addes loss and train.
labels = tf.constant(0, tf.int32, shape=[100], name="labels")
batch_size = tf.size(labels)
labels = tf.expand_dims(labels, 1)
indices = tf.expand_dims(tf.range(0, batch_size), 1)
concated = tf.concat(1, [indices, labels])
onehot_labels = tf.sparse_to_dense(
concated, tf.pack([batch_size, 10]), 1.0, 0.0)
| tensorflow.train.import_meta_graph | 13,100 |
import tensorflow as tf
(entropy_test_adv_help, labels_adv_help, confidence_test_adv_help) = sess.run(
[entropy, tf.argmax(predict, axis=1), tf.reduce_max(predict, axis=1)], feed_dict={predict: predict_ADV}
| tensorflow.reduce_max | 13,101 |
import tensorflow as tf
# The output is (mean, var).
if self._compute_variance and not self._compute_weighted:
return [
analyzer_nodes.TensorInfo(
tf.as_dtype(self._output_numpy_dtype), self._output_shape, None)
] * 2
else:
return [
analyzer_nodes.TensorInfo(
tf.as_dtype(np.int64), self._output_shape, None),
analyzer_nodes.TensorInfo(
tf.as_dtype(self._output_numpy_dtype), self._output_shape, None),
analyzer_nodes.TensorInfo(
tf.as_dtype(self._output_numpy_dtype), self._output_shape, None),
analyzer_nodes.TensorInfo(
tf.as_dtype(self._output_numpy_dtype), self._output_shape, None)
]
def _combine_mean_and_var_accumulators(
self, a: _WeightedMeanAndVarAccumulator,
b: _WeightedMeanAndVarAccumulator) -> _WeightedMeanAndVarAccumulator:
"""Combines two mean and var accumulators.
Args:
a: A _WeightedMeanAndVarAccumulator.
b: A _WeightedMeanAndVarAccumulator.
Returns:
A _WeightedMeanAndVarAccumulator computed as the combination of a and b.
"""
| tensorflow.as_dtype | 13,102 |
import tensorflow as tf
# size: num_priors
best_target_per_prior = tf.math.reduce_max(ious, axis=1)
best_target_per_prior_index = tf.math.argmax(ious, axis=1)
# size: num_targets
| tensorflow.math.argmax | 13,103 |
import tensorflow as tf
phase = conv3d(phase,3,channel_nr, 'SYMMETRIC', 'relu')
concat_layer = tf.keras.layers.concatenate([phase, pc])
concat_layer = conv3d(concat_layer, 1, channel_nr, 'SYMMETRIC', 'relu')
| tensorflow.keras.layers.concatenate | 13,104 |
import tensorflow as tf
net_2 = convLinear(inpOp, nIn, nOut, kH, kW, dH, dW, padType, name+'_2', phase_train, use_batch_norm, weight_decay)
out = tf.maximum(net_1, net_2)
return out
def affine(inpOp, nIn, nOut, name, weight_decay=0.0):
with tf.variable_scope(name):
l2_regularizer = lambda t: l2_loss(t, weight=weight_decay)
weights = tf.get_variable("weights", [nIn, nOut],
initializer=tf.truncated_normal_initializer(stddev=1e-1),
regularizer=l2_regularizer, dtype=inpOp.dtype)
biases = tf.get_variable("biases", [nOut], initializer=tf.constant_initializer(), dtype=inpOp.dtype)
affine1 = tf.nn.relu_layer(inpOp, weights, biases)
return affine1
def l2_loss(tensor, weight=1.0, scope=None):
"""Define a L2Loss, useful for regularize, i.e. weight decay.
Args:
tensor: tensor to regularize.
weight: an optional weight to modulate the loss.
scope: Optional scope for op_scope.
Returns:
| tensorflow.constant_initializer | 13,105 |
import tensorflow as tf
q_values = q_func(observations_ph.get(), num_actions, scope="q_func")
deterministic_actions = tf.argmax(q_values, axis=1)
batch_size = tf.shape(observations_ph.get())[0]
random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)
chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)
update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))
act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph],
| tensorflow.stack | 13,106 |
import tensorflow as tf
variable=self._moving_second_moment,
value=second_moment,
decay=self._decay_rate,
name="update_moving_second_moment").op
return update_mean_op, update_second_moment_op
def build_no_ops():
return (tf.no_op(), tf.no_op())
# Only make the ops if we know that `is_training=True`, or the value of
# `is_training` is unknown.
is_training_const = utils.constant_value(is_training)
if is_training_const is None or is_training_const:
update_mean_op, update_second_moment_op = utils.smart_cond(
is_training,
| tensorflow.no_op | 13,107 |
from tensorflow.python.platform import gfile
self.assertEqual([s1, s2], save.last_checkpoints)
self.assertTrue(gfile.Exists(s1))
self.assertTrue(gfile.Exists(s2))
s3 = save.save(sess, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
self.assertFalse(gfile.Exists(s1))
self.assertTrue(gfile.Exists(s2))
self.assertTrue(gfile.Exists(s3))
# Create a second helper, identical to the first.
save2 = tf.train.Saver(saver_def=save.as_saver_def())
save2.set_last_checkpoints(save.last_checkpoints)
| tensorflow.python.platform.gfile.Exists | 13,108 |
import tensorflow as tf
idx = tf.reshape(idx, [-1, attn_length])
low = pos - encoder.attn_window_size
high = pos + encoder.attn_window_size
mlow = tf.to_float(idx < low)
mhigh = tf.to_float(idx > high)
m = mlow + mhigh
m += tf.to_float(idx >= encoder_input_length)
mask = tf.to_float(tf.equal(m, 0.0))
e = compute_energy(hidden_states, state, encoder, input_length=encoder_input_length, **kwargs)
weights = softmax(e, mask=mask)
if encoder.attn_window_size > 0:
sigma = encoder.attn_window_size / 2
numerator = -tf.pow((idx - pos), tf.convert_to_tensor(2, dtype=tf.float32))
div = tf.truediv(numerator, 2 * sigma ** 2)
| tensorflow.equal | 13,109 |
import tensorflow as tf
sok_saver = sok.Saver()
restore_op = list()
for i, embedding_layer in enumerate(sok_sparse_demo.embedding_layers):
control_inputs = [restore_op[-1]] if restore_op else None
with tf.control_dependencies(control_inputs):
if args.restore_params:
filepath = r"./embedding_variables"
op = sok_saver.restore_from_file(embedding_layer.embedding_variable, filepath)
else:
op = sok_saver.load_embedding_values(embedding_layer.embedding_variable, init_tensors[i])
restore_op.append(op)
loss_fn = tf.keras.losses.BinaryCrossentropy(from_logits=True, reduction="none")
def _replica_loss(labels, logits):
loss = loss_fn(labels, logits)
return tf.nn.compute_average_loss(loss, global_batch_size=args.global_batch_size)
def _train_step(inputs, labels, training):
def _step_fn(inputs, labels):
logit, embedding_vector = sok_sparse_demo(inputs, training=training)
loss = _replica_loss(labels, logit)
emb_var, other_var = sok.split_embedding_variable_from_others(sok_sparse_demo.trainable_variables)
grads = tf.gradients(loss, emb_var + other_var, colocate_gradients_with_ops=True,
unconnected_gradients=tf.UnconnectedGradients.NONE)
emb_grads, other_grads = grads[:len(emb_var)], grads[len(emb_var):]
if "plugin" in args.optimizer:
emb_train_op = emb_opt.apply_gradients(zip(emb_grads, emb_var))
else:
with sok.OptimizerScope(emb_var):
emb_train_op = emb_opt.apply_gradients(zip(emb_grads, emb_var))
| tensorflow.nn.compute_average_loss | 13,110 |
import tensorflow as tf
tf.einsum("nij,dji->nd", Li_eKuffu_Lit, cov) +
tf.einsum("ig,nij,jg->ng", q_mu, Li_eKuffu_Lit, q_mu) -
| tensorflow.einsum | 13,111 |
import tensorflow as tf
def output_size(self):
output_size = self._num_nodes * self._num_units
if self._num_proj is not None:
output_size = self._num_nodes * self._num_proj
return output_size
@staticmethod
def _concat(x, x_):
x_ = tf.expand_dims(x_, 0)
return tf.concat([x, x_], axis=0)
def __call__(self, inputs, bias_start=0.0):
"""Graph convolution between input and the graph matrix.
:param args: a 2D Tensor or a list of 2D, batch x n, Tensors.
:param output_size:
:param bias:
| tensorflow.expand_dims | 13,112 |
import tensorflow as tf
dataset = dataset.filter(eval_right_length)
return dataset
@gin.configurable(module='trax.data', denylist=['dataset', 'training'])
def wmt_concat_preprocess(dataset, training, max_length=-1, max_eval_length=-1):
"""Preprocessing for WMT: filter exceeding maximum length and concatenate."""
dataset = wmt_preprocess(dataset, training, max_length, max_eval_length)
def concat_and_add_mask(features, targets):
inp = features['inputs']
pad = tf.expand_dims(tf.zeros_like(inp[0]), axis=0)
concat = tf.concat([inp, pad, targets], axis=0)
mask = tf.concat([tf.zeros_like(inp), pad, tf.ones_like(targets)], axis=0)
features['inputs'] = concat
features['mask'] = mask
return features, concat
dataset = dataset.map(concat_and_add_mask)
return dataset
@gin.configurable(module='trax.data', denylist=['dataset', 'training'])
def lm_token_preprocessing(dataset, training):
"""Concatenates inputs, 0, targets, with masking only for targets."""
del training
| tensorflow.ones_like | 13,113 |
import tensorflow as tf
facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer
querry_size = query.get_shape().as_list()[-1]
query = tf.layers.dense(query, facts_size, activation=None, name='f1_trans_shine' + stag)
query = prelu(query)
queries = tf.tile(query, [1, tf.shape(facts)[1]])
queries = tf.reshape(queries, tf.shape(facts))
din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)
d_layer_1_all = tf.layers.dense(din_all, facts_size, activation=tf.nn.sigmoid, name='f1_shine_att' + stag)
d_layer_2_all = tf.layers.dense(d_layer_1_all, facts_size, activation=tf.nn.sigmoid, name='f2_shine_att' + stag)
d_layer_2_all = tf.reshape(d_layer_2_all, tf.shape(facts))
output = d_layer_2_all
return output
| tensorflow.concat | 13,114 |
import tensorflow as tf
class ValueEstimator_Pendulum():
def __init__(self, learning_rate=0.1, par_idx=0,scope="value_estimator"):
w_init = tf.random_normal_initializer(0.,.1);
with tf.variable_scope(scope+"_"+str(par_idx)):
# state and target
self.state = tf.placeholder(tf.float32, [None,num_state], "state")
self.target = tf.placeholder(tf.float32, [None,1], name="target")
# layers
l_c = tf.layers.dense(self.state, 100, tf.nn.relu6, kernel_initializer=w_init, name='lc')
self.value_estimate = tf.layers.dense(l_c, 1, kernel_initializer=w_init, name='v') # estimated value for state
# loss and optimizer
self.loss = tf.reduce_mean(tf.square(tf.subtract(self.value_estimate, self.target)))
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
self.train_op = self.optimizer.minimize(
self.loss, global_step=tf.contrib.framework.get_global_step())
def predict(self, state, sess=None):
sess = sess or tf.get_default_session()
return sess.run(self.value_estimate, { self.state: [state] })[0][0]
def update(self, state, target, sess=None):
sess = sess or tf.get_default_session()
feed_dict = { self.state: state, self.target: target }
_, loss = sess.run([self.train_op, self.loss], feed_dict)
return loss
| tensorflow.train.AdamOptimizer | 13,115 |
import tensorflow as tf
# embed is the outputs of the hidden layer (embedding layer), it is a
# row vector with 'embedding_size' values.
with tf.variable_scope(name):
embeddings = tf.get_variable(
name='embeddings', shape=(vocabulary_size, embedding_size), initializer=E_init, dtype=LayersConfig.tf_dtype, **E_init_args)
embed = tf.nn.embedding_lookup(embeddings, self.inputs)
# Construct the variables for the NCE loss (i.e. negative sampling)
nce_weights = tf.get_variable(
name='nce_weights', shape=(vocabulary_size, embedding_size), initializer=nce_W_init, dtype=LayersConfig.tf_dtype, **nce_W_init_args)
nce_biases = tf.get_variable(name='nce_biases', shape=(vocabulary_size), initializer=nce_b_init, dtype=LayersConfig.tf_dtype, **nce_b_init_args)
| tensorflow.nn.embedding_lookup | 13,116 |
import tensorflow as tf
tf.app.flags.DEFINE_float('nb_epochs_rat', 1.0, '# of training epochs\'s ratio')
tf.app.flags.DEFINE_float('lrn_rate_init', 1e-1, 'initial learning rate')
tf.app.flags.DEFINE_float('batch_size_norm', 128, 'normalization factor of batch size')
tf.app.flags.DEFINE_float('momentum', 0.9, 'momentum coefficient')
tf.app.flags.DEFINE_float('loss_w_dcy', 2e-4, 'weight decaying loss\'s coefficient')
| tensorflow.app.flags.DEFINE_float | 13,117 |
import tensorflow as tf
num_layers = len(weights) + 1
H = 2.0*(X - self.lb)/(self.ub - self.lb) - 1.0
for l in range(0,num_layers-2):
W1, W2 = weights[l]
b = biases[l]
H1 = tf.add(tf.matmul(H, W1), b)
H2 = tf.matmul(H, W2)
H = tf.tanh(tf.add(H1 * H2, H1))
W1, W2 = weights[-1]
b = biases[-1]
H1 = tf.add(tf.matmul(H, W1), b)
H2 = tf.matmul(H, W2)
Y = tf.add(H1 * H2, H1)
| tensorflow.matmul | 13,118 |
from tensorflow.python.platform import gfile
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1)))
def testSharded(self):
save_dir = os.path.join(self.get_temp_dir(), "max_to_keep_sharded")
try:
gfile.DeleteRecursively(save_dir)
except OSError:
pass # Ignore
gfile.MakeDirs(save_dir)
| tensorflow.python.platform.gfile.DeleteRecursively | 13,119 |
import tensorflow as tf
predictions[key[5::]] = val
if key[0:3] == 'gt_':
groundtruths[key[3::]] = val
if key[0:5] == 'loss_':
losses[key[5::]] += (np.mean(val) / eval_steps)
self._evaluator.update(predictions, groundtruths)
metrics = self._evaluator.evaluate()
# Summary writer writes out eval metrics.
output_dir = os.path.join(self._model_dir, 'eval')
tf.gfile.MakeDirs(output_dir)
summary_writer = tf.summary.FileWriter(output_dir)
write_summary(metrics, summary_writer, current_step)
write_summary(losses, summary_writer, current_step)
summary_writer.close()
return metrics
def predict(self, input_fn):
return self._estimator.predict(input_fn=input_fn)
| tensorflow.gfile.MakeDirs | 13,120 |
import tensorflow as tf
def _create_params(self):
initializer = tf.random_uniform_initializer(minval=-0.1, maxval=0.1)
| tensorflow.random_uniform_initializer | 13,121 |
import tensorflow as tf
tf.reduce_mean(tf.to_float(tf.nn.in_top_k(
self.end_points_D_val['logits'], targets, 1)))
self.error_rate = 1. - \
tf.reduce_mean(tf.to_float(tf.nn.in_top_k(
self.end_points_D['class_logits'], targets, 1)))
if gpu_idx == 0:
update = tf.assign(num_error_rate, num_error_rate + 1.)
with tf.control_dependencies([update]):
tc = tf.maximum(.01, 1. / num_error_rate)
update = tf.assign(avg_error_rate, (1. - tc) * avg_error_rate + tc * self.error_rate)
with tf.control_dependencies([update]):
self.d_loss_class = tf.identity(self.d_loss_class)
self.d_loss_fake = tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.end_points_D['D_on_G_logits'],
labels=tf.zeros_like(self.end_points_D['D_on_G_logits']))
self.d_loss_class = tf.reduce_mean(self.d_loss_class)
self.d_loss_real = tf.reduce_mean(self.d_loss_real)
self.d_loss_fake = tf.reduce_mean(self.d_loss_fake)
if is_fm_loss:
global_pool_head = self.end_points_D['global_pool']
real_data_features = tf.slice(global_pool_head, [0, 0], [batch_size_train, num_classes])
fake_data_features = tf.slice(global_pool_head, [batch_size_train, 0],
[batch_size_train, num_classes])
self.g_loss = self._feature_matching_loss(real_data_features, fake_data_features)
else:
generator_target_prob = self.cnf['generator_target_prob'] # 0.75 / 2.0
self.g_loss = self._sigmoid_kl_with_logits(self.end_points_D['D_on_G_logits'],
| tensorflow.zeros_like | 13,122 |
import tensorflow as tf
# embedding
dists = l2(self.embedding_test[:-1] - self.embedding_test[1:])
self.dist = dists
metrics = []
metrics.append(tf.summary.histogram('point_distance', dists))
metrics.append(tf.summary.scalar('training/trajectory_length', tf.reduce_sum(dists)))
self.blur_ph = tf.placeholder(dtype=tf.float32)
metrics.append(tf.summary.scalar('training/blur_sigma', self.blur_ph))
pred = self.embedding_test[1:-1]*2 - self.embedding_test[0:-2]
pred_error = l2(pred - self.embedding_test[2:])
| tensorflow.reduce_sum | 13,123 |
import tensorflow as tf
(old_action_op.prob(self.tfa) + 1e-5)
# 替代損失
surr = ratio * self.tfadv
# 減少代理損失
self.aloss = -tf.reduce_mean(tf.minimum(
surr,
tf.clip_by_value(ratio, 1. - EPSILON, 1. + EPSILON) * self.tfadv))
self.atrain_op = tf.train.AdamOptimizer(A_LR).minimize(self.aloss)
# log
self.train_writer = tf.summary.FileWriter("logs/", self.sess.graph)
self.sess.run(tf.global_variables_initializer())
self.tableAction = self.createActionTable()
def createActionTable(self):
tableAction = []
for a in range(0, 3):
for b in range(0, 3):
for c in range(0, 2):
| tensorflow.summary.FileWriter | 13,124 |
import tensorflow as tf
if b_init is None:
b_init = tf.constant_initializer()
| tensorflow.constant_initializer | 13,125 |
import tensorflow as tf
name='embeddings', shape=(vocabulary_size, embedding_size), initializer=E_init, dtype=LayersConfig.tf_dtype, **E_init_args)
embed = tf.nn.embedding_lookup(embeddings, self.inputs)
# Construct the variables for the NCE loss (i.e. negative sampling)
nce_weights = tf.get_variable(
name='nce_weights', shape=(vocabulary_size, embedding_size), initializer=nce_W_init, dtype=LayersConfig.tf_dtype, **nce_W_init_args)
nce_biases = tf.get_variable(name='nce_biases', shape=(vocabulary_size), initializer=nce_b_init, dtype=LayersConfig.tf_dtype, **nce_b_init_args)
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels
# each time we evaluate the loss.
self.nce_cost = tf.reduce_mean(
tf.nn.nce_loss(
weights=nce_weights,
biases=nce_biases,
inputs=embed,
labels=train_labels,
num_sampled=num_sampled,
num_classes=vocabulary_size,
**nce_loss_args))
self.outputs = embed
self.normalized_embeddings = tf.nn.l2_normalize(embeddings, 1)
| tensorflow.nn.nce_loss | 13,126 |
from tensorflow.python.platform import gfile
# Created by the first helper.
self.assertTrue(gfile.Exists(s1))
| tensorflow.python.platform.gfile.Exists | 13,127 |
import tensorflow as tf
clf_h = tf.reshape(h, [-1, n_embd])
pool_idx = tf.cast(tf.argmax(tf.cast(tf.equal(X[:, :, 0], clf_token), tf.float32), 1), tf.int32)
clf_h = tf.gather(clf_h, tf.range(shape_list(X)[0], dtype=tf.int32)*n_ctx+pool_idx)
clf_h = tf.reshape(clf_h, [-1, 2, n_embd])
if train and clf_pdrop > 0:
shape = shape_list(clf_h)
shape[1] = 1
clf_h = tf.nn.dropout(clf_h, 1-clf_pdrop, shape)
clf_h = tf.reshape(clf_h, [-1, n_embd])
clf_logits = clf(clf_h, 1, train=train)
clf_logits = tf.reshape(clf_logits, [-1, 2])
clf_losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=clf_logits, labels=Y)
return clf_logits, clf_losses, lm_losses
def mgpu_train(*xs):
gpu_ops = []
gpu_grads = []
xs = (tf.split(x, n_gpu, 0) for x in xs)
for i, xs in enumerate(zip(*xs)):
do_reuse = True if i > 0 else None
| tensorflow.reshape | 13,128 |
import tensorflow as tf
# max pool
pooled = tf.nn.max_pool(H,
ksize=[1, sequence_length - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID',
name="pool")
pooled_outputs.append(pooled)
with tf.name_scope("preFc"):
# combine all pooled outputs
total_filters = num_filter * len(filter_list)
# concat all the pooled weights
H_pool = tf.concat(pooled_outputs, 3)
#flatten it for fully connected layer
H_pool_flat = tf.reshape(H_pool, [-1, total_filters])
with tf.name_scope("dropout"):
H_drop = tf.nn.dropout(H_pool_flat, keep_prob = keep_prob)
# Final (unnormalized) layer
with tf.name_scope("output"):
W = tf.get_variable("W",
shape=[total_filters, nb_classes],
initializer=tf.contrib.layers.xavier_initializer())
# add final layer bias
b = tf.Variable(tf.constant(0.1, shape=[nb_classes]), name="b")
# calc l2 losses
l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)
| tensorflow.reshape | 13,129 |
import tensorflow as tf
self_attention_tmp = din_fcn_attention(batch[:, i, :], batch[:, 0:i+1, :],
ATTENTION_SIZE, mask[:, 0:i+1], softmax_stag=1, stag=stag,
mode='LIST')
self_attention_tmp = tf.reduce_sum(self_attention_tmp, 1)
output = output.write(i, self_attention_tmp)
return batch, output, i + 1
output_ta = tf.TensorArray(dtype=tf.float32,
size=0,
dynamic_size=True,
element_shape=(facts[:, 0, :].get_shape()))
_, output_op, _ = tf.while_loop(cond, body, [facts, output_ta, 0])
self_attention = output_op.stack()
self_attention = tf.transpose(self_attention, perm = [1, 0, 2])
return self_attention
def self_all_attention(facts, ATTENTION_SIZE, mask, stag='null'):
if len(facts.get_shape().as_list()) == 2:
facts = tf.expand_dims(facts, 1)
def cond(batch, output, i):
return tf.less(i, tf.shape(batch)[1])
def body(batch, output, i):
self_attention_tmp = din_fcn_attention(batch[:, i, :], batch,
ATTENTION_SIZE, mask, softmax_stag=1, stag=stag,
| tensorflow.transpose | 13,130 |
import tensorflow as tf
def mix_scramble(self,x):
# assume square patch
# sizes = tf.convert_to_tensor([1,2,4,8])
# idx = tf.random.categorical([tf.ones_like(sizes)], 1)
# print(idx)
# patch_size = int(sizes[idx[0][0]])
patch_size = self.get_random_patch_size()
print('Patch size:',patch_size)
window = [1,patch_size,patch_size,1]
print('Window:',window)
n_row,n_col,n_channel = x.shape
n_patch = n_row*n_col // (patch_size**2)
patches = tf.image.extract_patches(tf.expand_dims(x,0),sizes=window,strides=window,rates=[1, 1, 1, 1],padding='VALID')
patches = tf.reshape(patches,[n_patch,patch_size,patch_size,n_channel])
patches = tf.random.shuffle(patches)
rows = tf.split(patches,n_col//patch_size,axis=0)
rows = [tf.concat(tf.unstack(x),axis=1) for x in rows]
x_aug = tf.concat(rows,axis=0)
x_aug = tf.convert_to_tensor(x_aug)
return tf.concat([x, x_aug],axis=2)
def gaussian_blur(self,x):
#create random gaussian blur filter
mean = 0
| tensorflow.expand_dims | 13,131 |
import tensorflow as tf
self.a_dim = action_dim
self.action_bound = action_bound
self.lr = learning_rate
self.t_replace_iter = t_replace_iter
self.t_replace_counter = 0
with tf.variable_scope('Actor'):
# input s, output a
self.a = self._build_net(S, scope='eval_net', trainable=True)
# input s_, output a, get a_ for critic
self.a_ = self._build_net(S_, scope='target_net', trainable=False)
self.e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/eval_net')
self.t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/target_net')
def _build_net(self, s, scope, trainable):
with tf.variable_scope(scope):
init_w = tf.random_normal_initializer(0., 0.01)
init_b = tf.constant_initializer(0.01)
net = tf.layers.dense(s, 500, activation=tf.nn.relu,
kernel_initializer=init_w, bias_initializer=init_b, name='l1', trainable=trainable)
net = tf.layers.dense(net, 200, activation=tf.nn.relu,
kernel_initializer=init_w, bias_initializer=init_b, name='l2', trainable=trainable)
with tf.variable_scope('a'):
| tensorflow.get_collection | 13,132 |
import tensorflow as tf
tf.app.flags.DEFINE_integer('ws_nb_rlouts', 200, 'WS: # of roll-outs for the RL agent')
tf.app.flags.DEFINE_integer('ws_nb_rlouts_min', 50,
'WS: minimal # of roll-outs for the RL agent to start training')
tf.app.flags.DEFINE_string('ws_reward_type', 'single-obj',
'WS: reward type (\'single-obj\' OR \'multi-obj\')')
tf.app.flags.DEFINE_float('ws_lrn_rate_rg', 3e-2, 'WS: learning rate for layerwise regression')
tf.app.flags.DEFINE_integer('ws_nb_iters_rg', 20, 'WS: # of iterations for layerwise regression')
tf.app.flags.DEFINE_float('ws_lrn_rate_ft', 3e-4, 'WS: learning rate for global fine-tuning')
tf.app.flags.DEFINE_integer('ws_nb_iters_ft', 400, 'WS: # of iterations for global fine-tuning')
tf.app.flags.DEFINE_integer('ws_nb_iters_feval', 25, 'WS: # of iterations for fast evaluation')
tf.app.flags.DEFINE_float('ws_prune_ratio_exp', 3.0, 'WS: pruning ratio\'s exponent term')
tf.app.flags.DEFINE_float('ws_iter_ratio_beg', 0.1, 'WS: iteration ratio (at starting time)')
| tensorflow.app.flags.DEFINE_integer | 13,133 |
import tensorflow as tf
mean_dist, mean_pred_error = tf.reduce_mean(dists), tf.reduce_mean(pred_error)
improvement = (mean_dist-mean_pred_error)/mean_dist
pairwise_improvement = tf.nn.relu(dists[1:] - pred_error)
pairwise_improvement_bool = tf.cast(pairwise_improvement > 0, pairwise_improvement.dtype)
self.pairwise_improvement_bool = pairwise_improvement_bool
metrics.append(tf.summary.scalar('training/avg_dist', mean_dist))
metrics.append(tf.summary.scalar('training/pred_dist', mean_pred_error))
metrics.append(tf.summary.scalar('training/improvement', improvement))
metrics.append(tf.summary.scalar('training/improvement_abs', tf.nn.relu(improvement)))
metrics.append(tf.summary.histogram('training/improvement_abs_hist', nut.nan_to_zero(improvement)))
metrics.append(tf.summary.scalar('training/improvement_pairwise', tf.reduce_mean(pairwise_improvement_bool)))
metrics.append(tf.summary.histogram('training/improvement_pairwise_hist', pairwise_improvement_bool))
self.eval_summs = tf.summary.merge(metrics)
def _build_embedding_saver(self, sess):
"""To use embedding visualizer data has to be stored in variable
| tensorflow.summary.scalar | 13,134 |
import tensorflow as tf
value_dtype=tf.int64,
default_value=-1)
vz = tf.contrib.lookup.MutableHashTable(key_dtype=tf.string,
value_dtype=tf.int64,
default_value=-1)
vx_keys = tf.reshape(tf.Variable([], collections=[], dtype=tf.string), (-1, 1))
vz_keys = tf.reshape(tf.Variable([], collections=[], dtype=tf.string), (-1, 1))
x_t = tf.gather(x, l)
x_t_len = tf.strings.length(x_t)
x_t = tf.string_split([x_t], delimiter='').values
| tensorflow.Variable | 13,135 |
import tensorflow as tf
x = tf.layers.dense(x, size, activation=tf.nn.relu)
mean = tf.layers.dense(
x, action_space.shape[0], activation=tf.tanh,
kernel_initializer=mean_weights_initializer)
logstd = tf.get_variable(
"logstd", mean.shape[2:], tf.float32, logstd_initializer)
logstd = tf.tile(
logstd[None, None],
[tf.shape(mean)[0], tf.shape(mean)[1]] + [1] * (mean.shape.ndims - 2))
with tf.variable_scope("value"):
x = flat_observations
for size in config.value_layers:
x = tf.layers.dense(x, size, activation=tf.nn.relu)
value = tf.layers.dense(x, 1)[..., 0]
mean = tf.check_numerics(mean, "mean")
logstd = tf.check_numerics(logstd, "logstd")
value = tf.check_numerics(value, "value")
policy = tfp.distributions.MultivariateNormalDiag(mean, tf.exp(logstd))
return NetworkOutput(policy, value, lambda a: tf.clip_by_value(a, -2., 2))
def clip_logits(logits, config):
| tensorflow.layers.dense | 13,136 |
import tensorflow as tf
# the shape of `tmp` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size
tmp1 = tf.tensordot(facts, w1, axes=1)
| tensorflow.tensordot | 13,137 |
import tensorflow as tf
def flatgrad(loss, var_list, clip_norm=None):
grads = tf.gradients(loss, var_list)
if clip_norm is not None:
grads = [tf.clip_by_norm(grad, clip_norm=clip_norm) for grad in grads]
return tf.concat(axis=0, values=[
tf.reshape(grad if grad is not None else tf.zeros_like(v), [numel(v)])
for (v, grad) in zip(var_list, grads)
])
class SetFromFlat(object):
| tensorflow.zeros_like | 13,138 |
import tensorflow as tf
"How many steps to make in each estimator call.")
flags.DEFINE_integer("max_eval_steps", 100, "Maximum number of eval steps.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
| tensorflow.flags.DEFINE_string | 13,139 |
import tensorflow as tf
tower_preds = []
tower_metrics = []
for i in range(self.n_gpus):
worker = '/gpu:{}'.format(i)
device_setter = tf.train.replica_device_setter(
worker_device=worker, ps_device='/cpu:0', ps_tasks=1)
with tf.name_scope('{}_{}'.format(mode, i)) as scope:
with tf.device(device_setter):
net_outputs = self._model(shards[i], mode, **self.config)
if mode == Mode.TRAIN:
loss = self._loss(net_outputs, shards[i], **self.config)
loss += tf.reduce_sum(
tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
scope))
model_params = tf.trainable_variables()
grad = tf.gradients(loss, model_params)
tower_losses.append(loss)
tower_gradvars.append(zip(grad, model_params))
if i == 0:
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,
scope)
elif mode == Mode.EVAL:
tower_metrics.append(self._metrics(
net_outputs, shards[i], **self.config))
else:
tower_preds.append(net_outputs)
if mode == Mode.TRAIN:
| tensorflow.trainable_variables | 13,140 |
from tensorflow.contrib.learn.python.learn.datasets import base
train_path = os.path.join(data_dir, 'dbpedia_csv', 'train.csv')
test_path = os.path.join(data_dir, 'dbpedia_csv', 'test.csv')
if size == 'small':
# Reduce the size of original data by a factor of 1000.
base.shrink_csv(train_path, 1000)
base.shrink_csv(test_path, 1000)
train_path = train_path.replace('train.csv', 'train_small.csv')
test_path = test_path.replace('test.csv', 'test_small.csv')
else:
| tensorflow.contrib.learn.python.learn.datasets.base.shrink_csv | 13,141 |
import tensorflow as tf
net = DenseLayer(net, n_units=4, act=tf.identity,
W_init=tf.random_uniform_initializer(0, 0.01), b_init=None, name='q_a_s')
y = net.outputs # action-value / rewards of 4 actions
predict = tf.argmax(y, 1) # chose action greedily with reward. in Q-Learning, policy is greedy, so we use "max" to select the next action.
## Below we obtain the loss by taking the sum of squares difference between the target and prediction Q values.
| tensorflow.argmax | 13,142 |
import tensorflow as tf
x: A rank-2 `Tensor`, 0th dim are rows, 1st dim are indices in each input
vector.
dtype: Tensorflow dtype of entries in the returned matrix.
name: (Optional) A name for this operation.
Raises:
ValueError: if input is not a rank-2 Tensor.
Returns:
A rank-2 (matrix) covariance `Tensor`
"""
if not isinstance(x, tf.Tensor):
raise TypeError('Expected a Tensor, but got %r' % x)
with tf.compat.v1.name_scope(name, 'covariance'):
x.shape.assert_has_rank(2)
input_dim = x.shape.as_list()[1]
shape = (input_dim, input_dim)
(result,) = _apply_cacheable_combiner(
CovarianceCombiner(shape, dtype.as_numpy_dtype), x)
return result
class PCACombiner(CovarianceCombiner):
"""Compute PCA of accumulated data using the biased covariance matrix."""
def __init__(self, output_shape, output_dim=None, numpy_dtype=np.float64):
| tensorflow.compat.v1.name_scope | 13,143 |
import tensorflow as tf
tf.expand_dims(candidate_starts, 0),
tf.expand_dims(candidate_ends, 0),
tf.expand_dims(k, 0),
util.shape(context_outputs, 0),
True) # [1, k]
top_span_indices.set_shape([1, None])
top_span_indices = tf.squeeze(top_span_indices, 0) # [k]
top_span_starts = tf.gather(candidate_starts, top_span_indices) # [k]
top_span_ends = tf.gather(candidate_ends, top_span_indices) # [k]
top_span_emb = tf.gather(candidate_span_emb, top_span_indices) # [k, emb]
top_span_cluster_ids = tf.gather(candidate_cluster_ids, top_span_indices) # [k]
top_span_mention_scores = tf.gather(candidate_mention_scores, top_span_indices) # [k]
top_span_sentence_indices = tf.gather(candidate_sentence_indices, top_span_indices) # [k]
top_span_speaker_ids = tf.gather(speaker_ids, top_span_starts) # [k]
c = tf.minimum(self.config["max_top_antecedents"], k)
if self.config["coarse_to_fine"]:
top_antecedents, top_antecedents_mask, top_fast_antecedent_scores, top_antecedent_offsets = self.coarse_to_fine_pruning(top_span_emb, top_span_mention_scores, c)
| tensorflow.gather | 13,144 |
import tensorflow as tf
# Train
with tf.Session(config=config) as sess:
try:
summary_writer = tf.summary.FileWriter(tensorboard_dir, sess.graph)
sess.run(tf.global_variables_initializer())
| tensorflow.summary.FileWriter | 13,145 |
import tensorflow as tf
# In the demo, we are doing a simple classification task on the entire
# segment.
#
# If you want to use the token-level output, use model.get_sequence_output()
# instead.
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
| tensorflow.zeros_initializer | 13,146 |
import tensorflow as tf
result, batch_size = session.run(output)
self.assertAllEqual([[3, 5]], result)
self.assertAllEqual([1], batch_size)
def test_two(self):
with self.test_session() as session:
@dynamic_batching.batch_fn
def f(a, b):
batch_size = tf.shape(a)[0]
return a + b, tf.tile([batch_size], [batch_size])
output0 = f(tf.constant([1]), tf.constant([2]))
output1 = f(tf.constant([2]), tf.constant([3]))
tp = pool.ThreadPool(2)
f0 = tp.apply_async(session.run, [output0])
f1 = tp.apply_async(session.run, [output1])
# Make sure both inputs are in the batcher before starting it.
time.sleep(_SLEEP_TIME)
| tensorflow.tile | 13,147 |
import tensorflow as tf
net, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)
self.assertTrue(net.op.name.startswith('MobilenetV1/Logits/AvgPool'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 1024])
self.assertFalse('Logits' in end_points)
self.assertFalse('Predictions' in end_points)
def testBuildBaseNetwork(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3))
net, end_points = mobilenet_v1.mobilenet_v1_base(inputs)
self.assertTrue(net.op.name.startswith('MobilenetV1/Conv2d_13'))
self.assertListEqual(net.get_shape().as_list(),
[batch_size, 7, 7, 1024])
expected_endpoints = ['Conv2d_0',
'Conv2d_1_depthwise', 'Conv2d_1_pointwise',
'Conv2d_2_depthwise', 'Conv2d_2_pointwise',
'Conv2d_3_depthwise', 'Conv2d_3_pointwise',
'Conv2d_4_depthwise', 'Conv2d_4_pointwise',
| tensorflow.random_uniform | 13,148 |
import tensorflow as tf
except EOFError:
return
if not color_name:
return
_, chars, length = parse(color_name)
with tf.device(device):
(chars, length) = (tf.identity(chars), tf.identity(length))
chars = tf.expand_dims(chars, 0)
length = tf.expand_dims(length, 0)
preds = tf.unstack(model((chars, length), training=False)[0])
# Predictions cannot be negative, as they are generated by a ReLU layer;
# they may, however, be greater than 1.
clipped_preds = tuple(min(float(p), 1.0) for p in preds)
| tensorflow.identity | 13,149 |
import tensorflow as tf
W_d = W_d[input_idx]
W_p = self._make_var('W_p', (ni, 1, 1, ch, ch))
W_p = W_p[input_idx]
batch_norm_offset = self._make_var('batch_norm_offset', (ni, ch), init_constant=0)
batch_norm_offset = batch_norm_offset[input_idx]
batch_norm_scale = self._make_var('batch_norm_scale', (ni, ch), init_constant=1)
batch_norm_scale = batch_norm_scale[input_idx]
X = self._do_separable_conv(X, w, h, ch, filter_size=filter_size, stride=stack_stride,
W_d=W_d, W_p=W_p, no_batch_norm=True)
X = self._add_batch_norm(X, ch, offset=batch_norm_offset, scale=batch_norm_scale,
no_moving_average=is_dynamic, is_train=is_train)
X = tf.reshape(X, (-1, w // stride, h // stride, ch)) # Sanity shape check
return X
####################################
# Utils
####################################
def _do_cutout(self, image, im_width, im_height, cutout_size):
mask = tf.ones([cutout_size, cutout_size], dtype=tf.int32)
start_x = tf.random.uniform(shape=(1,), minval=0, maxval=im_width, dtype=tf.int32)
start_y = tf.random.uniform(shape=(1,), minval=0, maxval=im_height, dtype=tf.int32)
mask = tf.pad(mask, [[cutout_size + start_y[0], im_height - start_y[0]],
[cutout_size + start_x[0], im_width - start_x[0]]])
| tensorflow.reshape | 13,150 |
import tensorflow as tf
else:
ac_regularizers = [tf.zeros([1])]
| tensorflow.zeros | 13,151 |
from tensorflow.python.framework import tensor_util
validate_args=True)
self.assertTrue(tensor_util.constant_value(normal.is_scalar_event))
self.assertFalse(tensor_util.constant_value(normal.is_scalar_batch))
mvn = dists.MultivariateNormalDiag([mu], [sigma],
validate_args=True)
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_event))
self.assertTrue(tensor_util.constant_value(mvn.is_scalar_batch))
mvn = dists.MultivariateNormalDiag([[mu]], [[sigma]],
validate_args=True)
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_event))
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_batch))
# We now test every codepath within the underlying is_scalar_helper
# function.
# Test case 1, 2.
x = tf.placeholder(dtype=tf.int32, shape=[])
# None would fire an exception were it actually executed.
self.assertTrue(normal._is_scalar_helper(x.get_shape, lambda: None))
self.assertTrue(normal._is_scalar_helper(lambda: tf.TensorShape(None),
| tensorflow.python.framework.tensor_util.constant_value | 13,152 |
import tensorflow as tf
drop_remainder=eval_drop_remainder)
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
with tf.gfile.GFile(output_eval_file, "w") as writer:
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
| tensorflow.gfile.GFile | 13,153 |
import tensorflow as tf
with tf.variable_scope('Actor'):
# input s, output a
self.a = self._build_net(S, scope='eval_net', trainable=True)
# input s_, output a, get a_ for critic
self.a_ = self._build_net(S_, scope='target_net', trainable=False)
self.e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/eval_net')
self.t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/target_net')
def _build_net(self, s, scope, trainable):
with tf.variable_scope(scope):
init_w = tf.random_normal_initializer(0., 0.01)
init_b = tf.constant_initializer(0.01)
net = tf.layers.dense(s, 500, activation=tf.nn.relu,
kernel_initializer=init_w, bias_initializer=init_b, name='l1', trainable=trainable)
net = tf.layers.dense(net, 200, activation=tf.nn.relu,
kernel_initializer=init_w, bias_initializer=init_b, name='l2', trainable=trainable)
with tf.variable_scope('a'):
actions = tf.layers.dense(net, self.a_dim, activation=tf.nn.tanh, kernel_initializer=init_w,
bias_initializer=init_b, name='a', trainable=trainable)
scaled_a = tf.multiply(actions, self.action_bound, name='scaled_a') # Scale output to -action_bound to action_bound
| tensorflow.variable_scope | 13,154 |
import tensorflow as tf
os.mkdir(weights_dir + '/best_models')
# Create a saver.
saver = tf.train.Saver(max_to_keep=None)
if self.is_summary:
training_batch_summary_op = tf.merge_all_summaries(key=TRAINING_BATCH_SUMMARIES)
| tensorflow.train.Saver | 13,155 |
import tensorflow as tf
cell = tf.nn.rnn_cell.MultiRNNCell(cells=[cell] * 2,
state_is_tuple=True)
inp = [tf.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size])
for e in enc_outputs])
| tensorflow.nn.rnn | 13,156 |
import tensorflow as tf
num_decoder_symbols=5, embedding_size=2, feed_previous=True)
res1 = sess.run(d1)
res2 = sess.run(d2)
res3 = sess.run(d3)
self.assertAllClose(res1, res2)
self.assertAllClose(res1, res3)
def testOne2ManyRNNSeq2Seq(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
enc_inp = [tf.constant(1, tf.int32, shape=[2]) for i in range(2)]
dec_inp_dict = {}
dec_inp_dict["0"] = [
tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
dec_inp_dict["1"] = [
tf.constant(i, tf.int32, shape=[2]) for i in range(4)]
dec_symbols_dict = {"0": 5, "1": 6}
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
| tensorflow.constant_initializer | 13,157 |
import tensorflow as tf
print(cm)
print('------valid_confusion_matrix-----')
coord.request_stop()
coord.join(threads)
def predict_time(loop=100):
feed_dict={
testnum:1
}
with tf.Session(config=config) as sess:
sess.run(init)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
tf.train.Saver().restore(sess,path)
total=0.0
for i in range(loop):
a = datetime.now()
accuracy_np = sess.run([accuracy],feed_dict=feed_dict)
b = datetime.now()
c = (b - a).microseconds
total+=c
print('predict_time(ms): ',total/(loop*1000))
coord.request_stop()
| tensorflow.train.Coordinator | 13,158 |
import tensorflow as tf
ema = tf.train.ExponentialMovingAverage(decay=MOVING_AVERAGE_DECAY)
variables_to_restore = ema.variables_to_restore()
self.load_ema = tf.contrib.framework.assign_from_checkpoint_fn(
tf.train.latest_checkpoint(self.model_dir), variables_to_restore
)
def after_create_session(self, sess, coord):
tf.logging.info('Loading EMA weights...')
self.load_ema(sess)
| tensorflow.logging.info | 13,159 |
import tensorflow as tf
self._lr = lr
self._lr_summary = tf.summary.scalar('learning_rate', self._lr)
tvars = tf.trainable_variables()
grads = tf.gradients(avg_neg_log_lhood, tvars)
if grad_clip > 0.0:
grads, _ = tf.clip_by_global_norm(grads, grad_clip)
if opt == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(lr)
else:
raise NotImplementedError()
train_op = optimizer.apply_gradients(zip(grads, tvars), global_step=tf.contrib.framework.get_or_create_global_step())
# Tensor exports
self.feats_audio = feats_audio_nunroll
self.feats_other = feats_other_nunroll
if export_feat_name:
self.feats_export = export_feat_tensors[export_feat_name]
self.prediction = prediction_inspect
self.prediction_final = prediction_final
if mode != 'gen':
self.neg_log_lhoods = neg_log_lhoods_inspect
self.avg_neg_log_lhood = avg_neg_log_lhood
self.targets = targets_nunroll
| tensorflow.contrib.framework.get_or_create_global_step | 13,160 |
import tensorflow as tf
# For each of the timestamps its vector of size A from `tmp` is reduced with `v` vector
v_dot_tmp = tf.tensordot(tmp, v, axes=1, name='v_dot_tmp') # (B,T) shape
key_masks = mask # [B, 1, T]
# key_masks = tf.expand_dims(mask, 1) # [B, 1, T]
paddings = tf.ones_like(v_dot_tmp) * (-2 ** 32 + 1)
v_dot_tmp = tf.where(key_masks, v_dot_tmp, paddings) # [B, 1, T]
alphas = tf.nn.softmax(v_dot_tmp, name='alphas') # (B,T) shape
# Output of (Bi-)RNN is reduced with attention vector; the result has (B,D) shape
#output = tf.reduce_sum(facts * tf.expand_dims(alphas, -1), 1)
output = facts * tf.expand_dims(alphas, -1)
output = tf.reshape(output, tf.shape(facts))
# output = output / (facts.get_shape().as_list()[-1] ** 0.5)
if not return_alphas:
return output
else:
return output, alphas
def din_attention(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False):
if isinstance(facts, tuple):
| tensorflow.expand_dims | 13,161 |
import tensorflow as tf
graph_results = _train_step(inputs, labels, training=True)
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
restore_op = list()
for i, embedding_weight in enumerate(tf_sparse_demo.embedding_weights):
restore_op.append(embedding_weight.assign(tf.concat(init_tensors[i], axis=0)))
emb_values = list()
for embedding_weight in tf_sparse_demo.embedding_weights:
if args.save_params:
filepath = r"./embedding_variables/"
| tensorflow.concat | 13,162 |
import tensorflow as tf
for layer in layers:
layer_wo_bos_eos = layer[:, 1:, :]
layer_wo_bos_eos = tf.reverse_sequence(
layer_wo_bos_eos,
| tensorflow.reverse_sequence | 13,163 |
import tensorflow as tf
"""
Train RNN graph
"""
def train_rnn(raw_data_x, raw_data_y, val_data_x, val_data_y,g, num_epochs, num_steps, batch_size, input_prob, output_prob, state_prob, epoch_before_val = 50, max_checks_without_progress=50,epoch_overlap=None, verbose=True, save=False):
with tf.Session() as sess:
"initialize the variables"
sess.run(tf.global_variables_initializer())
raw_data_yp = np.insert(raw_data_y,0,0,axis=0)[:-1]
val_data_yp = np.insert(val_data_y,0,0,axis=0)[:-1]
"see the trainable variables"
# print("The trainable variables are:")
variable_names = [v.name for v in tf.trainable_variables()]
variable_shapes = [v.get_shape() for v in tf.trainable_variables()]
parameter_num = 0
| tensorflow.global_variables_initializer | 13,164 |
import tensorflow as tf
# def sample_compute(i):
# batch1 = tf.gather(batch, tf.random.shuffle(index))
# batch2 = tf.gather(batch, tf.random.shuffle(index))
# pred1 = tf.slice(batch1, [0, 0], [num_sam, 1])
# pred2 = tf.slice(batch2, [0, 0], [num_sam, 1])
# tgt1 = tf.slice(batch1, [0, 1], [num_sam, 1])
# tgt2 = tf.slice(batch2, [0, 1], [num_sam, 1])
# loss = compute_contra_loss(pred1, pred2, tgt1, tgt2)
# print(loss)
# return loss
i = tf.constant(0)
loss = tf.constant(0.)
final_loss = tf.while_loop(lambda l, i: i < resample, sample_compute, [loss, i])[0]
# final_loss = tf.scan(sample_compute, tf.range(resample), loss)[-1]
# final_loss = tf.map_fn(fn=lambda inp: sample_compute(inp), elems= tf.range(resample), dtype=tf.float32, parallel_iterations=1)
# print('final', final_loss)
# final_loss = loss
avg_loss = tf.reduce_mean(final_loss) / divider
# p = tf.print('cur_loss', [final_loss, avg_loss])
# with tf.control_dependencies([p]):
# avg_loss = tf.identity(avg_loss)
| tensorflow.constant | 13,165 |
import tensorflow as tf
(
assignment_map,
initialized_variable_names,
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
| tensorflow.train.init_from_checkpoint | 13,166 |
import tensorflow as tf
def gradsafe_sqrt(x, clip_low=1e-18, name=None):
with tf.name_scope(name, "gradsafe_sqrt"):
return tf.sqrt(tf.clip_by_value(x, clip_low, x))
def argus_integral_phalf(m_low, m_high, m0, c):
"""
Only valid for argus_pdf with p=0.5! Otherwise need to do numerical
integral.
"""
def F(m_bound, name=None):
with tf.name_scope(name, "argus_integral_phalf_primitive"):
a = tf.minimum(m_bound, m0)
x = 1 - tf.pow(a / m0, 2)
primitive = -0.5 * m0 * m0 * (tf.exp(c * x) * tf.sqrt(x) / c + 0.5 / tf.pow(-c, 1.5) * tf.sqrt(pi) * tf.erf(gradsafe_sqrt(-c * x)))
# We have to safeguard the sqrt, because otherwise the analytic
# derivative blows up for x = 0
return primitive
area = tf.sub(F(m_high, name="F2"), F(m_low, name="F1"), name="argus_integral_phalf")
return area
def argus_pdf_phalf_WN(m, m0, c, m_low, m_high):
| tensorflow.name_scope | 13,167 |
import tensorflow as tf
tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/stop_token_loss",
simple_value=stop_token_loss),
tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_loss", simple_value=loss),
]
if linear_loss is not None:
values.append(tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_linear_loss",
simple_value=linear_loss))
test_summary = tf.Summary(value=values)
summary_writer.add_summary(test_summary, step)
def time_string():
return datetime.now().strftime("%Y-%m-%d %H:%M")
| tensorflow.Summary | 13,168 |
import tensorflow as tf
mask = tf.equal(mask, tf.ones_like(mask))
facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer
querry_size = query.get_shape().as_list()[-1]
query = tf.layers.dense(query, facts_size, activation=None, name='f1_trans_shine' + stag)
query = prelu(query)
queries = tf.tile(query, [1, tf.shape(facts)[1]])
| tensorflow.layers.dense | 13,169 |
import tensorflow as tf
return_logits=True)
total_loss = tf.reduce_mean(per_example_loss)
return total_loss, per_example_loss, logits
def get_regression_loss(
FLAGS, features, is_training):
"""Loss for downstream regression tasks."""
bsz_per_core = tf.shape(features["input_ids"])[0]
inp = tf.transpose(features["input_ids"], [1, 0])
seg_id = tf.transpose(features["segment_ids"], [1, 0])
inp_mask = tf.transpose(features["input_mask"], [1, 0])
label = tf.reshape(features["label_ids"], [bsz_per_core])
xlnet_config = xlnet.XLNetConfig(json_path=FLAGS.model_config_path)
run_config = xlnet.create_run_config(is_training, True, FLAGS)
xlnet_model = xlnet.XLNetModel(
xlnet_config=xlnet_config,
run_config=run_config,
input_ids=inp,
seg_ids=seg_id,
| tensorflow.transpose | 13,170 |
from tensorflow.python.framework import ops
with ops.op_scope([x], name, "Tanh") as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops._tanh(x, name=name)
ops.RegisterShape("Abs")(common_shapes.unchanged_shape)
ops.RegisterShape("Ceil")(common_shapes.unchanged_shape)
ops.RegisterShape("Conj")(common_shapes.unchanged_shape)
ops.RegisterShape("Cos")(common_shapes.unchanged_shape)
ops.RegisterShape("Exp")(common_shapes.unchanged_shape)
ops.RegisterShape("Floor")(common_shapes.unchanged_shape)
ops.RegisterShape("Imag")(common_shapes.unchanged_shape)
ops.RegisterShape("Inv")(common_shapes.unchanged_shape)
ops.RegisterShape("IsFinite")(common_shapes.unchanged_shape)
ops.RegisterShape("IsInf")(common_shapes.unchanged_shape)
ops.RegisterShape("IsNan")(common_shapes.unchanged_shape)
ops.RegisterShape("Log")(common_shapes.unchanged_shape)
ops.RegisterShape("LogicalNot")(common_shapes.unchanged_shape)
ops.RegisterShape("Neg")(common_shapes.unchanged_shape)
ops.RegisterShape("Real")(common_shapes.unchanged_shape)
ops.RegisterShape("Rsqrt")(common_shapes.unchanged_shape)
ops.RegisterShape("Sign")(common_shapes.unchanged_shape)
ops.RegisterShape("Sin")(common_shapes.unchanged_shape)
ops.RegisterShape("Sqrt")(common_shapes.unchanged_shape)
ops.RegisterShape("Square")(common_shapes.unchanged_shape)
ops.RegisterShape("Sigmoid")(common_shapes.unchanged_shape)
ops.RegisterShape("Tanh")(common_shapes.unchanged_shape)
ops.RegisterShape("Cast")(common_shapes.unchanged_shape)
ops.RegisterShape("ComplexAbs")(common_shapes.unchanged_shape)
| tensorflow.python.framework.ops.RegisterShape | 13,171 |
import tensorflow as tf
hyper_decoder=hyper_decoder,
estimator=entropy_bottleneck)
status = checkpoint.restore(tf.train.latest_checkpoint(ckpt_dir))
x = tf.convert_to_tensor(x_color, "float32")
x_coori = tf.convert_to_tensor(x_coori, "float32")
def loop_analysis(element):
x = tf.expand_dims(element[0], 0)
x_coori = tf.expand_dims(element[1], 0)
y = analysis_transform(x_coori,x)
return tf.squeeze(y,axis=0)
element = [x,x_coori]
ys = tf.map_fn(loop_analysis, element, dtype=tf.float32, parallel_iterations=1, back_prop=False)
print("Analysis Transform")
def loop_hyper_encoder(y):
| tensorflow.expand_dims | 13,172 |
import tensorflow as tf
self.value, self.next_loc_mean, self.loc_std, self.next_loc, self.state_out, self.state_in, self.state_init = self._build_net(self.inputs, self.prev_loc, RNN_SIZE, TRAINING, a_size) # self.goal_pos
if TRAINING:
self.target_v = tf.placeholder(tf.float32, [None], 'Vtarget')
self.advantages = tf.placeholder(shape=[None], dtype=tf.float32)
self.sampled_next_locs = tf.placeholder(tf.float32, [None,2]) # sampled action is stored here
self.policy = gaussian_pdf(self.next_loc_mean, self.loc_std, self.sampled_next_locs) # Distribution == Policy
# Loss Functions
self.value_loss = 0.5*tf.reduce_sum(tf.square(self.target_v - tf.reshape(self.value, shape=[-1])))
# H(x) = Sum[p(x)*log(p(x))]
self.entropy = - 0.01 * tf.reduce_sum(self.policy * tf.log(tf.clip_by_value(self.policy,1e-10,1.0)))
self.policy_loss = - 0.2 * tf.reduce_sum( tf.log(tf.clip_by_value(self.policy[:,0],1e-15,1.0)) * self.advantages + tf.log(tf.clip_by_value(self.policy[:,1],1e-15,1.0)) * self.advantages)
#For Normal RL Part
self.loss = self.value_loss + self.policy_loss - self.entropy # removed self.blocking_loss, valid_loss, discrete_policy _loss #+ 0.5*self.mypos_loss + 0.5*self.goalpos_loss
#For Imitation Learning Part
# self.bc_loss = 0.5 * tf.reduce_mean(tf.contrib.keras.backend.categorical_crossentropy(self.optimal_actions_onehot,self.policy))
# self.next_loc_loss_il = 0.2 * tf.reduce_sum(tf.sqrt(tf.square(self.next_loc_mean[:-1,:] - self.il_nextloc)))
# self.imitation_loss = self.bc_loss #+ self.next_loc_loss_il
# Get gradients from local network using local losses and
# normalize the gradients using clipping
| tensorflow.clip_by_value | 13,173 |
import tensorflow as tf
cast1 = tf.dtypes.as_string(sub if not swap else add, name="TOSTR1")
else:
cast1 = tf.cast(sub if not swap else add, tf_output1_dtype, "CAST1")
| tensorflow.cast | 13,174 |
import tensorflow as tf
elif validate_args:
assertions += [tf.compat.v1.assert_rank(rightmost_transposed_ndims, 0)]
rightmost_transposed_ndims_ = tf.get_static_value(
rightmost_transposed_ndims)
msg = '`rightmost_transposed_ndims` must be non-negative.'
| tensorflow.get_static_value | 13,175 |
import tensorflow as tf
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
| tensorflow.train.init_from_checkpoint | 13,176 |
import tensorflow as tf
tf.unique(self.head_input)[0],
self.maxnorm)
rel_constraint = self._norm_constraint_op(self.rel_embedding_vars,
tf.unique(self.rel_input)[0],
self.maxnorm)
tail_constraint = self._norm_constraint_op(self.tail_embedding_vars,
tf.unique(self.tail_input)[0],
self.maxnorm)
self.post_step = [head_constraint, rel_constraint, tail_constraint]
def _create_batch_provider(self, train):
# CP treats head and tail entities separately
| tensorflow.unique | 13,177 |
import tensorflow as tf
@staticmethod
def seq_length(data):
used = tf.sign(tf.reduce_max(tf.abs(data), axis=2))
length = tf.reduce_sum(used, axis=1)
| tensorflow.abs | 13,178 |
import tensorflow as tf
self.U0_pred = self.net_U0(self.x0_tf) # N0 x q
self.U1_pred = self.net_U1(self.x1_tf) # N1 x q
self.loss = tf.reduce_sum(tf.square(self.u0_tf - self.U0_pred)) + \
tf.reduce_sum(tf.square(self.u1_tf - self.U1_pred))
self.optimizer = tf.contrib.opt.ScipyOptimizerInterface(self.loss,
method = 'L-BFGS-B',
options = {'maxiter': 50000,
'maxfun': 50000,
'maxcor': 50,
'maxls': 50,
'ftol' : 1.0 * np.finfo(float).eps})
self.optimizer_Adam = tf.train.AdamOptimizer()
self.train_op_Adam = self.optimizer_Adam.minimize(self.loss)
init = tf.global_variables_initializer()
self.sess.run(init)
def initialize_NN(self, layers):
weights = []
biases = []
num_layers = len(layers)
for l in range(0,num_layers-1):
W = self.xavier_init(size=[layers[l], layers[l+1]])
b = tf.Variable(tf.zeros([1,layers[l+1]], dtype=tf.float32), dtype=tf.float32)
weights.append(W)
| tensorflow.train.AdamOptimizer | 13,179 |
import tensorflow as tf
class Seq2SeqTest(tf.test.TestCase):
def testRNNDecoder(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
inp = [tf.constant(0.5, shape=[2, 2])] * 2
_, enc_state = tf.nn.rnn(
tf.nn.rnn_cell.GRUCell(2), inp, dtype=tf.float32)
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
cell = tf.nn.rnn_cell.OutputProjectionWrapper(
tf.nn.rnn_cell.GRUCell(2), 4)
dec, mem = tf.nn.seq2seq.rnn_decoder(dec_inp, enc_state, cell)
sess.run([tf.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
| tensorflow.constant | 13,180 |
import tensorflow as tf
# scalar
with self.test_session():
x = tf.constant(1.0, tf.float32)
y = tf.constant(2.0, tf.float32)
z = tf.py_func(my_func, [x, y], [tf.float32])
self.assertEqual(z[0].eval(), my_func(1.0, 2.0).astype(np.float32))
# array
with self.test_session():
x = tf.constant([1.0, 2.0], tf.float64)
y = tf.constant([2.0, 3.0], tf.float64)
z = tf.py_func(my_func, [x, y], [tf.float64])
self.assertAllEqual(
z[0].eval(),
my_func([1.0, 2.0], [2.0, 3.0]).astype(np.float64))
# a bit exotic type (complex64)
with self.test_session():
x = tf.constant(1+2j, tf.complex64)
y = tf.constant(3+4j, tf.complex64)
z, = tf.py_func(my_func, [x, y], [tf.complex64])
self.assertAllClose(z.eval(), my_func(1+2j, 3+4j))
| tensorflow.py_func | 13,181 |
import tensorflow as tf
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
flags.DEFINE_integer("hooking_frequence", 100, "Hooking frequence.")
| tensorflow.flags.DEFINE_string | 13,182 |
import tensorflow as tf
name='linear_projection_1'
)
projection = batch_norm_lin(projection, dialogue_state_size, self.phase_train,
name='linear_projection_1_bn')
activation = tf.nn.relu(projection)
activation = dropout(activation, self.dropout_keep_prob)
projection = linear(
| tensorflow.nn.relu | 13,183 |
import tensorflow as tf
# Gain bias
bias_shape = [1, 1, 1, 1, self.hgru_k[idx]]
if self.gate_bias_init == 'chronos':
bias_init = -tf.log(
tf.random_uniform(
bias_shape,
minval=1,
maxval=self.timesteps - 1,
dtype=self.dtype))
else:
bias_init = tf.ones(bias_shape, dtype=self.dtype)
setattr(
self,
'gain_bias_%s' % layer,
tf.get_variable(
name='%s_gain_bias' % self.layer_name,
dtype=self.dtype,
trainable=True,
initializer=bias_init))
if self.gate_bias_init == 'chronos':
bias_init = -bias_init
| tensorflow.ones | 13,184 |
import tensorflow as tf
if do_norm:
conv = tf.layers.batch_normalization(conv, momentum=0.9)
if activation_function == "relu":
conv = tf.nn.relu(conv, name = 'relu')
if activation_function == "leakyrelu":
conv = tf.nn.leaky_relu(conv, alpha=relu_factor)
if activation_function == "elu":
conv = tf.nn.elu(conv, name = 'elu')
return conv
def general_deconv2d(self, input_data, filters = 64, kernel_size = 7, stride = 1, stddev = 0.02, activation_function = "relu", padding = "VALID", do_norm = True, relu_factor = 0, name="deconv2d"):
with tf.variable_scope(name):
deconv = tf.layers.conv2d_transpose(input_data, filters, kernel_size, (stride, stride), padding, activation = None)
if do_norm:
deconv = tf.layers.batch_normalization(deconv, momentum = 0.9)
| tensorflow.nn.elu | 13,185 |
import tensorflow as tf
def add_train_stats(model, hparams):
with tf.variable_scope("stats") as scope:
for i in range(hparams.tacotron_num_gpus):
tf.summary.histogram("mel_outputs %d" % i, model.tower_mel_outputs[i])
tf.summary.histogram("mel_targets %d" % i, model.tower_mel_targets[i])
tf.summary.scalar("before_loss", model.before_loss)
tf.summary.scalar("after_loss", model.after_loss)
if hparams.predict_linear:
tf.summary.scalar("linear_loss", model.linear_loss)
for i in range(hparams.tacotron_num_gpus):
| tensorflow.summary.scalar | 13,186 |
from tensorflow.python.ops import math_ops
Returns:
Integer `Tensor` of shape [D1, ... DN], where each value is the number of
relevant values for that row.
Raises:
ValueError: if inputs have invalid dtypes or values.
"""
if k < 1:
raise ValueError('Invalid k=%s.' % k)
with ops.name_scope(None, 'num_relevant', (labels,)) as scope:
# For SparseTensor, calculate separate count for each row.
if isinstance(labels, (ops.SparseTensor, ops.SparseTensorValue)):
labels_sizes = set_ops.set_size(labels)
return math_ops.minimum(labels_sizes, k, name=scope)
# For dense Tensor, calculate scalar count based on last dimension, and
# tile across labels shape.
labels_shape = array_ops.shape(labels)
labels_size = labels_shape[-1]
num_relevant_scalar = math_ops.minimum(labels_size, k)
return array_ops.fill(labels_shape[0:-1], num_relevant_scalar, name=scope)
def expand_and_tile(tensor, multiple, dim=0, name=None):
"""Slice `tensor` shape in 2, then tile along the sliced dimension.
| tensorflow.python.ops.math_ops.minimum | 13,187 |
import tensorflow as tf
# added.
self.assertGreaterEqual(.5, duration.total_seconds())
self.assertEqual(2, batch_size)
def test_maximum_batch_size(self):
with self.test_session() as session:
@dynamic_batching.batch_fn_with_options(maximum_batch_size=2)
def f(a, b):
batch_size = tf.shape(a)[0]
return a + b, tf.tile([batch_size], [batch_size])
outputs = [
f(tf.constant([1]), tf.constant([2])),
f(tf.constant([1]), tf.constant([2])),
f(tf.constant([1]), tf.constant([2])),
f(tf.constant([1]), tf.constant([2])),
f(tf.constant([1]), tf.constant([2])),
]
tf.train.start_queue_runners()
results = session.run(outputs)
for value, batch_size in results:
| tensorflow.constant | 13,188 |
import tensorflow as tf
x = tf.image.random_saturation(x, lower=lower, upper=upper)
x = tf.image.random_hue(x, max_delta=0.2*s)
x = tf.clip_by_value(x, 0, 1)
return x
def color_drop(image):
image = tf.image.rgb_to_grayscale(image)
image = tf.tile(image, [1, 1, 1, 3])
return image
# pylint: disable=not-callable
@gin.configurable(blacklist=["kwargs"])
class CLGAN(modular_gan.ModularGAN):
"""Self-Supervised GAN with Contrastive Loss"""
| tensorflow.tile | 13,189 |
import tensorflow as tf
observations_ph = U.ensure_tf_input(make_obs_ph("observation"))
stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic")
update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps")
update_param_noise_threshold_ph = tf.placeholder(tf.float32, (), name="update_param_noise_threshold")
update_param_noise_scale_ph = tf.placeholder(tf.bool, (), name="update_param_noise_scale")
reset_ph = tf.placeholder(tf.bool, (), name="reset")
eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0))
param_noise_scale = tf.get_variable("param_noise_scale", (), initializer=tf.constant_initializer(0.01), trainable=False)
param_noise_threshold = tf.get_variable("param_noise_threshold", (), initializer=tf.constant_initializer(0.05), trainable=False)
# Unmodified Q.
q_values = q_func(observations_ph.get(), num_actions, scope="q_func")
# Perturbable Q used for the actual rollout.
q_values_perturbed = q_func(observations_ph.get(), num_actions, scope="perturbed_q_func")
| tensorflow.constant_initializer | 13,190 |
import tensorflow as tf
tf.app.flags.DEFINE_integer('eval_every', 25, 'Save encoding and visualizations every')
tf.app.flags.DEFINE_integer('visualiza_max', 10, 'Max pairs to show on visualization')
tf.app.flags.DEFINE_boolean('load_state', True, 'Load state if possible ')
tf.app.flags.DEFINE_boolean('kill_depth', False, 'Ignore depth information')
| tensorflow.app.flags.DEFINE_boolean | 13,191 |
import tensorflow as tf
in0 = tf.placeholder(tf_input_dtype, tu.shape_to_tf_shape(input_shape),
"INPUT0")
in1 = tf.placeholder(tf_input_dtype, tu.shape_to_tf_shape(input_shape),
"INPUT1")
else:
in0 = tf.placeholder(tf_input_dtype, [
None,
] + tu.shape_to_tf_shape(input_shape), "INPUT0")
in1 = tf.placeholder(tf_input_dtype, [
None,
] + tu.shape_to_tf_shape(input_shape), "INPUT1")
# If the input is a string, then convert each string to the
# equivalent int32 value.
if tf_input_dtype == tf.string:
in0 = tf.strings.to_number(in0, tf.int32)
in1 = tf.strings.to_number(in1, tf.int32)
add = tf.add(in0, in1, "ADD")
sub = tf.subtract(in0, in1, "SUB")
# Cast or convert result to the output dtype.
if tf_output0_dtype == tf.string:
cast0 = tf.dtypes.as_string(add if not swap else sub, name="TOSTR0")
else:
cast0 = tf.cast(add if not swap else sub, tf_output0_dtype, "CAST0")
if tf_output1_dtype == tf.string:
cast1 = tf.dtypes.as_string(sub if not swap else add, name="TOSTR1")
else:
| tensorflow.strings.to_number | 13,192 |
import tensorflow as tf
tf.app.flags.DEFINE_float('ws_iter_ratio_beg', 0.1, 'WS: iteration ratio (at starting time)')
tf.app.flags.DEFINE_float('ws_iter_ratio_end', 0.5, 'WS: iteration ratio (at ending time)')
tf.app.flags.DEFINE_float('ws_mask_update_step', 500, 'WS: step size for updating the pruning mask')
def calc_prune_ratio(vars_list):
"""Calculate the overall pruning ratio for the given list of variables.
Args:
* vars_list: list of variables
Returns:
* prune_ratio: overall pruning ratio of the given list of variables
"""
nb_params_nnz = tf.add_n([tf.count_nonzero(var) for var in vars_list])
nb_params_all = tf.add_n([tf.size(var) for var in vars_list])
prune_ratio = 1.0 - tf.cast(nb_params_nnz, tf.float32) / tf.cast(nb_params_all, tf.float32)
return prune_ratio
class WeightSparseLearner(AbstractLearner): # pylint: disable=too-many-instance-attributes
"""Weight sparsification learner."""
def __init__(self, sm_writer, model_helper):
"""Constructor function.
Args:
* sm_writer: TensorFlow's summary writer
* model_helper: model helper with definitions of model & dataset
"""
| tensorflow.cast | 13,193 |
import tensorflow as tf
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
features.append(feature)
return features
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor,
"xnli": XnliProcessor,
"tag": CommentsTagsProcessor,
"segtag":SegmentedCommentsTagsProcessor
}
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
| tensorflow.logging.set_verbosity | 13,194 |
import tensorflow as tf
return tags
def id2tag(self, pred_ids, name=None):
mapping_strings = self.load_tag_data()
reverse_vocab_tags = tf.contrib.lookup.index_to_string_table_from_tensor(
mapping_strings, name=name
)
pred_strings = reverse_vocab_tags.lookup(tf.to_int64(pred_ids))
| tensorflow.contrib.lookup.index_to_string_table_from_tensor | 13,195 |
import tensorflow as tf
tf.GraphKeys.GLOBAL_VARIABLES],
initializer=tf.ones_initializer(),
| tensorflow.ones_initializer | 13,196 |
import tensorflow as tf
shortcut = inputs
inputs = batch_norm(inputs, training, data_format)
inputs = tf.nn.relu(inputs)
# The projection shortcut should come after the first batch norm and ReLU
# since it performs a 1x1 convolution.
if projection_shortcut is not None:
shortcut = projection_shortcut(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=3, strides=strides,
data_format=data_format)
inputs = batch_norm(inputs, training, data_format)
inputs = tf.nn.relu(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=3, strides=1,
data_format=data_format)
return inputs + shortcut
def _bottleneck_block_v1(inputs, filters, training, projection_shortcut,
strides, data_format):
"""A single block for ResNet v1, with a bottleneck.
Similar to _building_block_v1(), except using the "bottleneck" blocks
described in:
| tensorflow.nn.relu | 13,197 |
import tensorflow as tf
# initialize uninitialized vars (only initialize vars that were not loaded)
uninit_vars = [var for var in tf.global_variables() if not sess.run(tf.is_variable_initialized(var))]
| tensorflow.global_variables | 13,198 |
import tensorflow as tf
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
# Start a second session. In that session the parameter nodes
# have not been initialized either.
with self.test_session() as sess:
v0 = tf.Variable(-1.0, name="v0")
v1 = tf.Variable(-1.0, name="v1")
save = tf.train.Saver({"v0": v0, "v1": v1})
with self.assertRaisesWithPredicateMatch(
tf.OpError, lambda e: "uninitialized value v0" in e.message):
sess.run(v0)
with self.assertRaisesWithPredicateMatch(
tf.OpError, lambda e: "uninitialized value v1" in e.message):
sess.run(v1)
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
| tensorflow.Variable | 13,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.