seed
stringlengths 25
2.89k
| seed_api
stringlengths 14
102
| index
int64 0
14.8k
|
---|---|---|
import tensorflow as tf
# the character embeddings
with tf.device("/cpu:0"):
self.embedding_weights = tf.get_variable(
"char_embed", [n_chars, char_embed_dim],
dtype=DTYPE,
initializer=tf.random_uniform_initializer(-1.0, 1.0)
)
# shape (batch_size, unroll_steps, max_chars, embed_dim)
self.char_embedding = tf.nn.embedding_lookup(self.embedding_weights,
self.ids_placeholder)
# the convolutions
def make_convolutions(inp):
with tf.variable_scope('CNN') as scope:
convolutions = []
for i, (width, num) in enumerate(filters):
if cnn_options['activation'] == 'relu':
| tensorflow.nn.embedding_lookup | 11,800 |
import tensorflow as tf
dtype=tf.int64,
initializer=tf.initializers.zeros(),
trainable=False,
collections=[tf.GraphKeys.GLOBAL_VARIABLES])
return global_step
def get_src_train_op(loss): # pylint: disable=unused-argument
"""Returns the source training op."""
global_step = tf.train.get_global_step()
src_learning_rate = FLAGS.learning_rate
src_learning_rate = tf.train.piecewise_constant(
global_step, [800,],
[FLAGS.learning_rate, FLAGS.learning_rate * 0.1])
optimizer = tf.train.MomentumOptimizer(
learning_rate=src_learning_rate,
momentum=0.9,
use_nesterov=True
)
with tf.variable_scope('src'):
return optimizer.minimize(loss, global_step), src_learning_rate
| tensorflow.train.piecewise_constant | 11,801 |
import tensorflow as tf
mode='LIST')
self_attention_tmp = tf.reduce_sum(self_attention_tmp, 1)
| tensorflow.reduce_sum | 11,802 |
import tensorflow as tf
self._final_state_name = self.with_prefix(self._name, 'final')
for state_tuple in self._initial_state:
tf.add_to_collection(self._initial_state_name, state_tuple.c)
tf.add_to_collection(self._initial_state_name, state_tuple.h)
for state_tuple in self._final_state:
tf.add_to_collection(self._final_state_name, state_tuple.c)
tf.add_to_collection(self._final_state_name, state_tuple.h)
def import_state_tuples(self, state_tuples, name, num_replicas):
| tensorflow.add_to_collection | 11,803 |
import tensorflow as tf
tmp2 = tf.reshape(tmp2, [-1, 1, tf.shape(tmp2)[-1]])
tmp = tf.tanh((tmp1 + tmp2) + b)
# For each of the timestamps its vector of size A from `tmp` is reduced with `v` vector
v_dot_tmp = tf.tensordot(tmp, v, axes=1, name='v_dot_tmp') # (B,T) shape
key_masks = mask # [B, 1, T]
# key_masks = tf.expand_dims(mask, 1) # [B, 1, T]
paddings = tf.ones_like(v_dot_tmp) * (-2 ** 32 + 1)
v_dot_tmp = tf.where(key_masks, v_dot_tmp, paddings) # [B, 1, T]
alphas = tf.nn.softmax(v_dot_tmp, name='alphas') # (B,T) shape
# Output of (Bi-)RNN is reduced with attention vector; the result has (B,D) shape
#output = tf.reduce_sum(facts * tf.expand_dims(alphas, -1), 1)
output = facts * tf.expand_dims(alphas, -1)
output = tf.reshape(output, tf.shape(facts))
# output = output / (facts.get_shape().as_list()[-1] ** 0.5)
| tensorflow.where | 11,804 |
import tensorflow as tf
embedding = config.embeddings.add()
# Specifiy the embedding variable and the metadata
embedding.tensor_name = embedding_name
embedding.metadata_path = path_to_meta
# Project the embeddings to space dimensions for visualization
tf.contrib.tensorboard.plugins.projector.visualize_embeddings(summary_writer, config)
def add_train_stats(model, hparams):
with tf.variable_scope("stats") as scope:
for i in range(hparams.tacotron_num_gpus):
tf.summary.histogram("mel_outputs %d" % i, model.tower_mel_outputs[i])
tf.summary.histogram("mel_targets %d" % i, model.tower_mel_targets[i])
tf.summary.scalar("before_loss", model.before_loss)
tf.summary.scalar("after_loss", model.after_loss)
if hparams.predict_linear:
tf.summary.scalar("linear_loss", model.linear_loss)
for i in range(hparams.tacotron_num_gpus):
tf.summary.histogram("mel_outputs %d" % i, model.tower_linear_outputs[i])
tf.summary.histogram("mel_targets %d" % i, model.tower_linear_targets[i])
tf.summary.scalar("regularization_loss", model.regularization_loss)
tf.summary.scalar("stop_token_loss", model.stop_token_loss)
tf.summary.scalar("loss", model.loss)
| tensorflow.summary.histogram | 11,805 |
import tensorflow as tf
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
| tensorflow.contrib.cluster_resolver.TPUClusterResolver | 11,806 |
import tensorflow as tf
with tf.name_scope('BoundingBoxTransform/clip_bboxes'):
bboxes = tf.cast(bboxes, dtype=tf.float32)
imshape = tf.cast(imshape, dtype=tf.float32)
x1, y1, x2, y2 = tf.split(bboxes, 4, axis=1)
width = imshape[1]
height = imshape[0]
x1 = tf.maximum(tf.minimum(x1, width - 1.0), 0.0)
x2 = tf.maximum(tf.minimum(x2, width - 1.0), 0.0)
y1 = tf.maximum(tf.minimum(y1, height - 1.0), 0.0)
y2 = tf.maximum(tf.minimum(y2, height - 1.0), 0.0)
bboxes = tf.concat([x1, y1, x2, y2], axis=1)
| tensorflow.minimum | 11,807 |
import tensorflow as tf
"""
搭建模型,包括数据中的自变量,应变量和损失函数
参数
----
dimension : int,自变量的个数
返回
----
model :dict,里面包含模型的参数,损失函数,自变量,应变量
"""
np.random.seed(1024)
# 定义自变量和应变量
x = tf.placeholder(tf.float64, shape=[None, dimension], name='x')
## 将被预测值写成矩阵形式,会极大加快速度
y = tf.placeholder(tf.float64, shape=[None, 1], name="y")
# 定义参数估计值和预测值
betaPred = tf.Variable(np.random.random([dimension, 1]))
yPred = tf.matmul(x, betaPred, name="y_pred")
# 定义损失函数
loss = tf.reduce_mean(tf.square(yPred - y))
model = {"loss_function": loss, "independent_variable": x,
"dependent_variable": y, "prediction": yPred, "model_params": betaPred}
return model
| tensorflow.placeholder | 11,808 |
import tensorflow as tf
Args:
boxlist1: Nx4 floatbox
boxlist2: Mx4
Returns:
a tensor with shape [N, M] representing pairwise intersections
"""
x_min1, y_min1, x_max1, y_max1 = tf.split(boxlist1, 4, axis=1)
x_min2, y_min2, x_max2, y_max2 = tf.split(boxlist2, 4, axis=1)
all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(y_max2))
all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(y_min2))
intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(x_max2))
all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(x_min2))
| tensorflow.split | 11,809 |
import tensorflow as tf
l1=tf.nn.relu(l1)
l2 = tf.matmul(l1, self.w2)+self.b2
| tensorflow.matmul | 11,810 |
import tensorflow as tf
name="divisbility_condition"
)
less_than_condition = tf.less(
x=cur_batch_size, y=group_size, name="less_than_condition"
)
| tensorflow.less | 11,811 |
import tensorflow as tf
global_step=tf.train.get_or_create_global_step())
self._new_lr = tf.placeholder(
tf.float32, shape=[], name='new_learning_rate')
| tensorflow.placeholder | 11,812 |
import tensorflow as tf
self.n_envs * (self.n_steps + 1), reuse=True,
**self.policy_kwargs)
with tf.variable_scope("loss", reuse=False):
self.done_ph = tf.placeholder(tf.float32, [self.n_batch]) # dones
self.reward_ph = tf.placeholder(tf.float32, [self.n_batch]) # rewards, not returns
self.mu_ph = tf.placeholder(tf.float32, [self.n_batch, self.n_act]) # mu's
self.action_ph = train_model.pdtype.sample_placeholder([self.n_batch])
self.learning_rate_ph = tf.placeholder(tf.float32, [])
eps = 1e-6
| tensorflow.placeholder | 11,813 |
import tensorflow as tf
# Model output
raw_output = tf.reduce_sum(tf.mul(tf.mul(head_embed, rel_embed), tail_embed), 1)
self.output, self.loss = self._create_output_and_loss(raw_output)
# Optimization
self.train_step = self.opt.minimize(self.loss)
if self.maxnorm is not None:
# Post-processing to limit embedding vars to L2 ball
head_constraint = self._norm_constraint_op(self.head_embedding_vars,
tf.unique(self.head_input)[0],
self.maxnorm)
rel_constraint = self._norm_constraint_op(self.rel_embedding_vars,
tf.unique(self.rel_input)[0],
self.maxnorm)
tail_constraint = self._norm_constraint_op(self.tail_embedding_vars,
tf.unique(self.tail_input)[0],
self.maxnorm)
self.post_step = [head_constraint, rel_constraint, tail_constraint]
def _create_batch_provider(self, train):
# CP treats head and tail entities separately
return ContrastiveTrainingProvider(train,
self.batch_pos_cnt,
| tensorflow.unique | 11,814 |
import tensorflow as tf
if submit:
teX, teM = transform_roc(teX1, teX2, teX3)
n_train = len(trY)
n_valid = len(vaY)
n_batch_train = n_batch*n_gpu
n_updates_total = (n_train//n_batch_train)*n_iter
X_train = tf.placeholder(tf.int32, [n_batch_train, 2, n_ctx, 2])
M_train = tf.placeholder(tf.float32, [n_batch_train, 2, n_ctx])
X = tf.placeholder(tf.int32, [None, 2, n_ctx, 2])
M = tf.placeholder(tf.float32, [None, 2, n_ctx])
Y_train = tf.placeholder(tf.int32, [n_batch_train])
Y = tf.placeholder(tf.int32, [None])
train, logits, clf_losses, lm_losses = mgpu_train(X_train, M_train, Y_train)
clf_loss = tf.reduce_mean(clf_losses)
| tensorflow.placeholder | 11,815 |
import tensorflow as tf
return tf.transpose(a=x, perm=perm)
def _maybe_validate_rightmost_transposed_ndims(
rightmost_transposed_ndims, validate_args, name=None):
"""Checks that `rightmost_transposed_ndims` is valid."""
with tf.name_scope(name, 'maybe_validate_rightmost_transposed_ndims',
[rightmost_transposed_ndims]):
assertions = []
if not rightmost_transposed_ndims.dtype.is_integer:
raise TypeError('`rightmost_transposed_ndims` must be integer type.')
| tensorflow.name_scope | 11,816 |
import tensorflow as tf
config = tf.ConfigProto()
config.allow_soft_placement = True
sess_soft = tf.Session(config=config)
| tensorflow.Session | 11,817 |
import tensorflow as tf
embed_inputs = tf.nn.embedding_lookup(self.embedding_init, self.x) ## (batch_size, seq_len, 100)
with tf.variable_scope('hidden', reuse=forward_only):
with tf.variable_scope('lstm_cell'):
| tensorflow.variable_scope | 11,818 |
import tensorflow as tf
drop_remainder=predict_drop_remainder)
result = estimator.predict(input_fn=predict_input_fn)
output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
with tf.gfile.GFile(output_predict_file, "w") as writer:
tf.logging.info("***** Predict results *****")
for prediction in result:
output_line = "\t".join(
str(class_probability) for class_probability in prediction) + "\n"
| tensorflow.gfile.GFile | 11,819 |
from tensorflow.python.ops import math_ops
A 2-D Tensor computing matmul(x, weights) + biases.
Dimensions typically: batch, out_units.
"""
with ops.op_scope([x, weights, biases], name, "xw_plus_b_v1") as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
return bias_add_v1(mm, biases, name=name)
# pylint: disable=invalid-name
def dropout(x, keep_prob, noise_shape=None, seed=None, name=None):
"""Computes dropout.
| tensorflow.python.ops.math_ops.matmul | 11,820 |
from tensorflow.python.ops import math_ops
random_tensor += random_ops.random_uniform(
noise_shape, seed=seed, dtype=x.dtype)
# 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
binary_tensor = math_ops.floor(random_tensor)
ret = x * math_ops.inv(keep_prob) * binary_tensor
ret.set_shape(x.get_shape())
| tensorflow.python.ops.math_ops.floor | 11,821 |
from tensorflow.python.platform import gfile
# Adding s1 (s3 should now be deleted as oldest in list)
s1 = save2.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save2.last_checkpoints)
self.assertFalse(gfile.Exists(s3))
self.assertFalse(gfile.Exists(save._MetaGraphFilename(s3)))
self.assertTrue(gfile.Exists(s2))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))
self.assertTrue(gfile.Exists(s1))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1)))
| tensorflow.python.platform.gfile.Exists | 11,822 |
from tensorflow.python.eager import context
_already_logged = set()
def _eager_log(level, *args):
if context.in_eager_mode() and args in _already_logged:
return
_already_logged.add(args)
getattr(tf.logging, level)(*args)
| tensorflow.python.eager.context.in_eager_mode | 11,823 |
import tensorflow as tf
flattened_candidate_mask = tf.reshape(candidate_mask, [-1]) # [num_words * max_span_width]
candidate_starts = tf.boolean_mask(tf.reshape(candidate_starts, [-1]), flattened_candidate_mask) # [num_candidates]
candidate_ends = tf.boolean_mask(tf.reshape(candidate_ends, [-1]), flattened_candidate_mask) # [num_candidates]
candidate_sentence_indices = tf.boolean_mask(tf.reshape(candidate_start_sentence_indices, [-1]), flattened_candidate_mask) # [num_candidates]
candidate_cluster_ids = self.get_candidate_labels(candidate_starts, candidate_ends, gold_starts, gold_ends, cluster_ids) # [num_candidates]
candidate_span_emb = self.get_span_emb(flattened_head_emb, context_outputs, candidate_starts, candidate_ends) # [num_candidates, emb]
candidate_mention_scores = self.get_mention_scores(candidate_span_emb) # [k, 1]
candidate_mention_scores = tf.squeeze(candidate_mention_scores, 1) # [k]
k = tf.to_int32(tf.floor(tf.to_float(tf.shape(context_outputs)[0]) * self.config["top_span_ratio"]))
top_span_indices = coref_ops.extract_spans(tf.expand_dims(candidate_mention_scores, 0),
tf.expand_dims(candidate_starts, 0),
tf.expand_dims(candidate_ends, 0),
tf.expand_dims(k, 0),
util.shape(context_outputs, 0),
True) # [1, k]
top_span_indices.set_shape([1, None])
top_span_indices = tf.squeeze(top_span_indices, 0) # [k]
top_span_starts = tf.gather(candidate_starts, top_span_indices) # [k]
top_span_ends = tf.gather(candidate_ends, top_span_indices) # [k]
top_span_emb = tf.gather(candidate_span_emb, top_span_indices) # [k, emb]
top_span_cluster_ids = tf.gather(candidate_cluster_ids, top_span_indices) # [k]
top_span_mention_scores = tf.gather(candidate_mention_scores, top_span_indices) # [k]
| tensorflow.expand_dims | 11,824 |
import tensorflow as tf
total_step+=1;
if Done:
break
# Print out which step we're on, useful for debugging. (average recent 10 episode scores)
if(i_episode>=10):
print("Episode {}/{} ({})".format(i_episode + 1, num_episodes, np.max(np.mean(stats["episode_rewards"][:,i_episode-10:i_episode - 1],axis=1))))
return stats
tf.reset_default_graph()
global_step = tf.Variable(0, name="global_step", trainable=False)
policy_estimator = np.zeros(n_particles,dtype=object);
value_estimator = np.zeros(n_particles,dtype=object);
# call proper policy and value estimators for each envs
for i in range(n_particles):
if(ENV_NAME=="Pendulum-v0"):
policy_estimator[i] = PolicyEstimator_Pendulum(entropy_beta=ENTROPY_BETA,learning_rate=POLICY_LR,par_idx=i)
| tensorflow.reset_default_graph | 11,825 |
import tensorflow as tf
Variable tensor
"""
with tf.variable_scope(scope) as sc:
kernel_h, kernel_w = kernel_size
| tensorflow.variable_scope | 11,826 |
import tensorflow as tf
batch_size = tf.shape(labels[0])[0]
return tf.reduce_sum(loss) / tf.cast(batch_size, dtypes.float32) #/ (tf.reduce_sum(propensity_weights)+1)
| tensorflow.reduce_sum | 11,827 |
import tensorflow as tf
num_pos_per_batch = tf.reduce_sum(
tf.cast(mask_pos_per_batch, tf.float32), 1, keepdims=True)
num_pos_per_batch = tf.maximum(num_pos_per_batch, 1)
num_neg_per_batch = tf.minimum(neg_pos_ratio * num_pos_per_batch,
tf.cast(num_prior, tf.float32) - 1)
mask_hard_neg = tf.reshape(
tf.cast(loss_class_idx_rank, tf.float32) < num_neg_per_batch,
[num_batch * num_prior, 1])
# 3. classification loss including positive and negative examples
loss_class_mask = tf.logical_or(mask_pos, mask_hard_neg)
loss_class_mask_b = tf.broadcast_to(loss_class_mask,
tf.shape(class_pred))
filter_class_true = tf.boolean_mask(tf.cast(mask_pos, tf.float32),
loss_class_mask)
filter_class_pred = tf.boolean_mask(class_pred, loss_class_mask_b)
filter_class_pred = tf.reshape(filter_class_pred, [-1, num_class])
loss_class = tf.keras.losses.sparse_categorical_crossentropy(
y_true=filter_class_true, y_pred=filter_class_pred)
loss_class = tf.reduce_mean(loss_class)
return loss_loc, loss_landm, loss_class
return multi_box_loss
| tensorflow.shape | 11,828 |
import tensorflow as tf
# Usually we'd write np.float(X) here, but a recent Eager bug would
# erroneously coerce the value to float32 anyway. We therefore use constants
# here, until the bug is resolved in TensorFlow 1.12.
normal = tfd.Normal(loc=tf.constant(0, tf.float16),
scale=tf.constant(1, tf.float16))
self.assertEqual(
| tensorflow.constant | 11,829 |
import tensorflow as tf
# squeeze and 2d resize
squeeze_b_z = tf.reshape(reoriented, [-1, y_size_new, x_size, c_size], name='reshape_bz')
resize_b_z = tf.compat.v1.image.resize_bilinear(squeeze_b_z, [y_size_new, x_size_new], align_corners=align)
resume_b_z = tf.reshape(resize_b_z, [-1, z_size_new, y_size_new, x_size_new, c_size], name='resume_bz')
output_tensor = tf.transpose(resume_b_z, [0, 3, 2, 1, 4])
return output_tensor
def conv3d(x, kernel_size, filters, padding='SYMMETRIC', activation=None, initialization=None, use_bias=True):
| tensorflow.transpose | 11,830 |
import tensorflow as tf
(fw_outputs, bw_outputs), _ = tf.nn.bidirectional_dynamic_rnn(
cell_fw=cell_fw,
cell_bw=cell_bw,
inputs=current_inputs,
sequence_length=text_len,
initial_state_fw=state_fw,
initial_state_bw=state_bw)
text_outputs = tf.concat([fw_outputs, bw_outputs], 2) # [num_sentences, max_sentence_length, emb]
text_outputs = tf.nn.dropout(text_outputs, self.lstm_dropout)
if layer > 0:
highway_gates = tf.sigmoid(util.projection(text_outputs, util.shape(text_outputs, 2))) # [num_sentences, max_sentence_length, emb]
text_outputs = highway_gates * text_outputs + (1 - highway_gates) * current_inputs
current_inputs = text_outputs
return self.flatten_emb_by_sentence(text_outputs, text_len_mask)
| tensorflow.concat | 11,831 |
import tensorflow as tf
return z, ldj
else:
scales = tf.math.exp(-log_sigmas)
log_x = tf.math.log(x)
ldj = -log_x
log_y = (log_x - mus)*scales
| tensorflow.math.log | 11,832 |
import tensorflow as tf
tf.get_variable_scope().reuse_variables()
d1, _ = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp, cell, num_symbols=5, embedding_size=2,
feed_previous=True)
d2, _ = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp2, cell, num_symbols=5, embedding_size=2,
feed_previous=True)
res1 = sess.run(d1)
res2 = sess.run(d2)
res3 = sess.run(d3)
self.assertAllClose(res1, res2)
self.assertAllClose(res1, res3)
def testAttentionDecoder1(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
cell = tf.nn.rnn_cell.GRUCell(2)
inp = [tf.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size])
for e in enc_outputs])
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
dec, mem = tf.nn.seq2seq.attention_decoder(
dec_inp, enc_state,
attn_states, cell, output_size=4)
sess.run([tf.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
| tensorflow.constant_initializer | 11,833 |
import tensorflow as tf
inst_weights = gather_init_weights()
bs = FLAGS.train_batch_size
hw = FLAGS.src_hw
inst_weights, indices = tf.nn.top_k(
inst_weights,
k=bs,
sorted=True,
)
src_features = tf.reshape(src_features, [
bs * FLAGS.source_train_batch_multiplier,
hw,
hw,
1,
])
src_features = tf.gather(src_features, indices, axis=0)
src_features = tf.stop_gradient(src_features)
src_labels = tf.gather(src_labels, indices)
inst_weights = bs * inst_weights / tf.reduce_sum(inst_weights)
src_one_hot_labels = tf.one_hot(tf.cast(src_labels, tf.int64), num_classes)
src_logits, dst_logits = get_model_logits(src_features, finetune_features,
mode, num_classes,
target_num_classes)
loss, _, _ = get_final_loss(src_logits, src_one_hot_labels, dst_logits,
| tensorflow.gather | 11,834 |
import tensorflow as tf
mask_q = tf.expand_dims(self.q_mask, 1)
S_ = tf.nn.softmax(mask_logits(S, mask=mask_q))
mask_c = tf.expand_dims(self.c_mask, 2)
S_T = tf.transpose(tf.nn.softmax(mask_logits(S, mask=mask_c), dim=1), (0, 2, 1))
self.c2q = tf.matmul(S_, self.q_embed_encoding)
self.q2c = tf.matmul(tf.matmul(S_, S_T), self.c_embed_encoding)
self.attention_outputs = [self.c_embed_encoding, self.c2q, self.c_embed_encoding * self.c2q,
self.c_embed_encoding * self.q2c]
| tensorflow.matmul | 11,835 |
import tensorflow as tf
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
| tensorflow.nn.bias_add | 11,836 |
from tensorflow.contrib.slim.python.slim.data import dataset
items_to_handlers = {
'image': tfexample_decoder.Image(),
'label': tfexample_decoder.Tensor('image/class/label'),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
return dataset.Dataset(
data_sources=data_sources,
reader=io_ops.TFRecordReader,
decoder=decoder,
num_samples=100,
items_to_descriptions=None)
class DatasetDataProviderTest(test.TestCase):
| tensorflow.contrib.slim.python.slim.data.dataset.Dataset | 11,837 |
from tensorflow.python.ops import gen_math_ops
A `Tensor` of the same type as `a`.
"""
with ops.op_scope([a, b], name, "MatMul") as name:
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
if a.dtype == types.float32 and (a_is_sparse or b_is_sparse):
return sparse_matmul(a, b,
transpose_a=transpose_a,
transpose_b=transpose_b,
a_is_sparse=a_is_sparse,
b_is_sparse=b_is_sparse,
name=name)
else:
return gen_math_ops._mat_mul(a, b,
transpose_a=transpose_a,
transpose_b=transpose_b,
name=name)
sparse_matmul = gen_math_ops._sparse_mat_mul
batch_matmul = gen_math_ops._batch_mat_mul
ops.RegisterShape("MatMul")(common_shapes.matmul_shape)
ops.RegisterShape("SparseMatMul")(common_shapes.matmul_shape)
def _as_indexed_slices(x):
| tensorflow.python.ops.gen_math_ops._mat_mul | 11,838 |
import tensorflow as tf
with tf.device(assign_to_gpu(i, "/gpu:0")), tf.variable_scope(tf.get_variable_scope(), reuse=do_reuse):
clf_logits, clf_losses, lm_losses = model(*xs, train=True, reuse=do_reuse)
if lm_coef > 0:
train_loss = tf.reduce_mean(clf_losses) + lm_coef*tf.reduce_mean(lm_losses)
else:
train_loss = tf.reduce_mean(clf_losses)
| tensorflow.reduce_mean | 11,839 |
import tensorflow.contrib.graph_editor as ge
copied_sgv, info = ge.copy_with_input_replacements(ge.sgv(ops_to_copy), {})
for origin_op, op in info._transformed_ops.items():
op._set_device(origin_op.node_def.device)
copied_ops = info._transformed_ops.values()
debug_print("Copied %s to %s", ops_to_copy, copied_ops)
ge.reroute_ts(checkpoints_disconnected_other, checkpoints_other, can_modify=copied_ops)
debug_print("Rewired %s in place of %s restricted to %s",
checkpoints_disconnected_other, checkpoints_other, copied_ops)
# gradient flowing through the checkpointed node
| tensorflow.contrib.graph_editor.reroute_ts | 11,840 |
import tensorflow as tf
# Restore from some checkpoint
if self._restore_chkptfile is not None:
raw_sess = self.get_raw_session()
if raw_sess.run(self.global_step) == 0:
self._network.restore(raw_sess, self._restore_chkptfile)
else:
self.sess = tf.Session(config=sess_config)
#all_variables = tf.get_collection_ref(tf.GraphKeys.GLOBAL_VARIABLES)
#self.sess.run(tf.variables_initializer(all_variables))
if self._save_model:
self._save_graph()
| tensorflow.Session | 11,841 |
import tensorflow as tf
num_layers = [16,16,10,6]
else:
raise ValueError('depth=%g is a not a valid setting!' % depth)
# input tensors
self.input_x = tf.placeholder(tf.int32, [None, sequence_max_length], name="input_x")
self.input_tags = tf.placeholder(tf.int32, [None, sequence_max_length], name="input_tags")
self.input_deps = tf.placeholder(tf.int32, [None, sequence_max_length], name="input_dependency")
self.input_head = tf.placeholder(tf.int32, [None, sequence_max_length], name="input_head")
self.input_y = tf.placeholder(tf.float32, [None, num_classes], name="input_y")
| tensorflow.placeholder | 11,842 |
from tensorflow.python.ops import array_ops
event_start = array_ops.where(
self._batch_ndims_is_0, 2, 1 + self.batch_ndims)
event_shape = array_ops.slice(s, (event_start,), (self.event_ndims,))
new_shape = array_ops.concat(0, (sample_shape, batch_shape, event_shape))
x = array_ops.reshape(x, shape=new_shape)
return x
| tensorflow.python.ops.array_ops.concat | 11,843 |
import tensorflow as tf
def batch_norm_layer(self, name, input_tensor,training):
with tf.variable_scope(name) as scope:
| tensorflow.variable_scope | 11,844 |
import tensorflow as tf
param_noise_filter_func = default_param_noise_filter
with tf.variable_scope(scope, reuse=reuse):
observations_ph = make_obs_ph("observation")
| tensorflow.variable_scope | 11,845 |
import tensorflow as tf
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
| tensorflow.logging.info | 11,846 |
import tensorflow as tf
with self.test_session(graph=tf.Graph()) as sess:
# Creates a graph.
v0 = tf.Variable(10.0, name="v0")
v1 = tf.Variable(11.0, name="v1")
# Creates 2 savers.
saver0 = tf.train.Saver({"v0": v0}, name="saver0")
| tensorflow.Variable | 11,847 |
import tensorflow.contrib as contrib
fc2_2 = contrib.layers.fully_connected(stitch1_2, 32, scope="fc2_2")
if cross_stitch_enabled:
with tf.variable_scope("cross_stitch_2"):
stitch2_1, stitch2_2 = apply_cross_stitch(fc2_1, fc2_2)
else:
stitch2_1, stitch2_2 = fc2_1, fc2_2
dropout2_1 = contrib.layers.dropout(stitch2_1, keep_prob=keep_prob, is_training=is_training,
scope="dropout2_1")
dropout2_2 = contrib.layers.dropout(stitch2_2, keep_prob=keep_prob, is_training=is_training,
scope="dropout2_2")
fc3_1 = contrib.layers.fully_connected(dropout2_1, 32, scope="fc3_1")
fc3_2 = contrib.layers.fully_connected(dropout2_2, 32, scope="fc3_2")
if cross_stitch_enabled:
with tf.variable_scope("cross_stitch_3"):
stitch3_1, stitch3_2 = apply_cross_stitch(fc3_1, fc3_2)
else:
| tensorflow.contrib.layers.dropout | 11,848 |
from tensorflow.examples.tutorials.mnist import input_data
# MNIST Handwriting Data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
print(len(mnist.train.images))
print(len(mnist.test.images))
print(len(mnist.validation.images))
print(mnist.train.labels[1,:])
| tensorflow.examples.tutorials.mnist.input_data.read_data_sets | 11,849 |
import tensorflow as tf
targets_dx = (gt_boxes_urx - bboxes_urx)/(bboxes_width * variances[0])
targets_dy = (gt_boxes_ury - bboxes_ury)/(bboxes_height * variances[0])
targets_dw = tf.log(gt_boxes_width / bboxes_width) / variances[1]
targets_dh = tf.log(gt_boxes_height / bboxes_height) / variances[1]
targets = tf.concat(
[targets_dx, targets_dy, targets_dw, targets_dh], axis=1)
return targets
def decode(roi, deltas, variances=None):
with tf.name_scope('BoundingBoxTransform/decode'):
(roi_width, roi_height,
roi_urx, roi_ury) = get_width_upright(roi)
dx, dy, dw, dh = tf.split(deltas, 4, axis=1)
if variances is None:
variances = [1., 1.]
pred_ur_x = dx * roi_width * variances[0] + roi_urx
pred_ur_y = dy * roi_height * variances[0] + roi_ury
pred_w = tf.exp(dw * variances[1]) * roi_width
pred_h = tf.exp(dh * variances[1]) * roi_height
| tensorflow.name_scope | 11,850 |
import tensorflow as tf
return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size])
def get_test_batch(image,label,batch_size):
images,labels=tf.train.batch([image,label],batch_size=batch_size)
return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size])
def get_valid_batch(image,label,batch_size):
images,labels=tf.train.batch([image,label],batch_size=batch_size)
| tensorflow.reshape | 11,851 |
import tensorflow as tf
with tf.variable_scope("conv") as scope:
srcimg_h0 = lrelu(conv2d(srcimg, self.df_dim, name='h0_conv'))
srcimg_h1 = lrelu(conv2d(srcimg_h0, self.df_dim*2, name='h1_conv'))
srcimg_h2 = lrelu(conv2d(srcimg_h1, self.df_dim*4, name='h2_conv'))
srcimg_h3 = lrelu(conv2d(srcimg_h2, self.df_dim*8, name='h3_conv'))
print(srcimg_h3.get_shape())
srcimg_h4 = lrelu(linear(tf.reshape(srcimg_h3, [self.batch_size, -1]), featsize, 'h4_lin'))
srcimg_z = lrelu(linear(srcimg_h4, featsize, 'hz_lin'))
scope.reuse_variables()
tgtimg_h0 = lrelu(conv2d(tgtimg, self.df_dim, name='h0_conv'))
tgtimg_h1 = lrelu(conv2d(tgtimg_h0, self.df_dim*2, name='h1_conv'))
| tensorflow.reshape | 11,852 |
from tensorflow.python.framework import ops
ret.set_shape(shape)
return ret
# NOTE(mrry): Shapes are conditionally set in the Python wrapper.
ops.RegisterShape("Variable")(common_shapes.unknown_shape)
@ops.RegisterShape("TemporaryVariable")
def _TemporaryVariableShape(op):
"""Shape function for the TemporaryVariable op."""
shape = tensor_util.TensorShapeProtoToList(op.get_attr("shape"))
return [tensor_shape.TensorShape(shape)]
@ops.RegisterShape("DestroyTemporaryVariable")
def _DestroyTemporaryVariableShape(op):
"""Shape function for the DestroyTemporaryVariable op."""
return [op.inputs[0].get_shape()]
def init_variable(v, init, name="init"):
"""Initializes variable with "init".
This op does the following:
if init is a Tensor, v = init
if callable(init): v = init(VariableShape(v), v.dtype)
Args:
v: Variable to initialize
| tensorflow.python.framework.ops.RegisterShape | 11,853 |
from tensorflow.python.ops import array_ops
nn_activations = layers.fully_connected(data, 1)
# There is always one activation per instance by definition, so squeeze
# away the extra dimension.
return array_ops.squeeze(nn_activations, squeeze_dims=[1])
class FlattenedFullyConnectedLayer(hybrid_layer.HybridLayer):
| tensorflow.python.ops.array_ops.squeeze | 11,854 |
import tensorflow as tf
x = tf.placeholder_with_default(input=1, shape=[])
# None would fire an exception were it actually executed.
self.assertTrue(normal._is_scalar_helper(x.shape, lambda: None))
self.assertTrue(
normal._is_scalar_helper(tf.TensorShape(None), lambda: tf.shape(x)))
x = tf.placeholder_with_default(input=[1], shape=[1])
# None would fire an exception were it actually executed.
self.assertFalse(normal._is_scalar_helper(x.shape, lambda: None))
self.assertFalse(
normal._is_scalar_helper(tf.TensorShape(None), lambda: tf.shape(x)))
# There's no notion of partially known shapes in eager mode, so exit
# early.
if tf.executing_eagerly():
return
# Test case 3.
x = tf.placeholder_with_default(input=1, shape=None)
is_scalar = normal._is_scalar_helper(x.shape, lambda: tf.shape(x))
self.assertTrue(self.evaluate(is_scalar))
x = tf.placeholder_with_default(input=[1], shape=None)
is_scalar = normal._is_scalar_helper(x.shape, lambda: tf.shape(x))
self.assertFalse(self.evaluate(is_scalar))
def _GetFakeDistribution(self):
class FakeDistribution(tfd.Distribution):
| tensorflow.executing_eagerly | 11,855 |
import tensorflow as tf
input_files = []
for input_pattern in FLAGS.input_file.split(","):
input_files.extend(tf.gfile.Glob(input_pattern))
tf.logging.info("*** Input Files ***")
for input_file in input_files:
tf.logging.info(" %s" % input_file)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
| tensorflow.logging.info | 11,856 |
import tensorflow as tf
edge_types: A list of 1-D `tf.Tensor` of `int32`. Specify edge types to
filter outgoing edges in each hop.
Return:
A tuple of list: (nodes, adjcents)
nodes: A list of N + 1 `tf.Tensor` of `int64`, N is the number of
hops. Specify node set of each hop, including the root.
adjcents: A list of N `tf.SparseTensor` of `int64`. Specify adjacent
matrix between hops.
"""
nodes = tf.reshape(nodes, [-1])
nodes_list = [nodes]
adj_list = []
for hop_edge_types in edge_types:
neighbor, weight, _ = get_full_neighbor(nodes, hop_edge_types)
next_nodes, next_idx = tf.unique(neighbor.values, out_idx=tf.int64)
next_indices = tf.stack([neighbor.indices[:, 0], next_idx], 1)
next_values = weight.values
next_shape = tf.stack([tf.size(nodes), tf.size(next_nodes)])
next_shape = tf.cast(next_shape, tf.int64)
| tensorflow.reshape | 11,857 |
import tensorflow as tf
return output
def din_fcn_attention(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False, forCnn=False):
if isinstance(facts, tuple):
# In case of Bi-RNN, concatenate the forward and the backward RNN outputs.
facts = tf.concat(facts, 2)
if len(facts.get_shape().as_list()) == 2:
facts = tf.expand_dims(facts, 1)
if time_major:
# (T,B,D) => (B,T,D)
facts = tf.array_ops.transpose(facts, [1, 0, 2])
# Trainable parameters
mask = tf.equal(mask, tf.ones_like(mask))
facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer
querry_size = query.get_shape().as_list()[-1]
query = tf.layers.dense(query, facts_size, activation=None, name='f1' + stag)
query = prelu(query)
queries = tf.tile(query, [1, tf.shape(facts)[1]])
queries = tf.reshape(queries, tf.shape(facts))
din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)
d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag)
d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag)
d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag)
d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]])
scores = d_layer_3_all
| tensorflow.ones_like | 11,858 |
import tensorflow as tf
if w_init is None:
w_init = tf.contrib.layers.variance_scaling_initializer()
if b_init is None:
b_init = tf.constant_initializer()
ret = tf.layers.dense(inputs=inputdata, activation=lambda x: tf.identity(x, name='output'),
| tensorflow.constant_initializer | 11,859 |
from tensorflow.python.ops import array_ops
parameters.pop("self")
with ops.name_scope(name, values=[alpha, beta]) as ns:
with ops.control_dependencies([
check_ops.assert_positive(alpha),
check_ops.assert_positive(beta),
] if validate_args else []):
self._alpha = array_ops.identity(alpha, name="alpha")
self._beta = array_ops.identity(beta, name="beta")
super(InverseGamma, self).__init__(
dtype=self._alpha.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
is_continuous=True,
is_reparameterized=False,
| tensorflow.python.ops.array_ops.identity | 11,860 |
import tensorflow as tf
[self.batch_size, s_h4, s_w4, self.gf_dim*2], name='d_h2'))
output_h3 = lrelu(deconv2d(tf.concat([output_h2, tgtctx_h1], 3),
[self.batch_size, s_h2, s_w2, self.gf_dim*1], name='d_h3'))
output_h4 = deconv2d(tf.concat([output_h3, tgtctx_h0], 3),
[self.batch_size, s_h, s_w, self.c_dim], name='d_h4')
scope.reuse_variables()
truthoutput_z_ = lrelu(linear(tgtimg_z, self.gf_dim*8*s_h16*s_w16, 'd_h0_lin'))
truthoutput_h0 = tf.reshape(truthoutput_z_, [-1, s_h16, s_w16, self.gf_dim * 8])
truthoutput_h1 = lrelu(deconv2d(tf.concat([truthoutput_h0, tgtctx_h3], 3),
[self.batch_size, s_h8, s_w8, self.gf_dim*4], name='d_h1'))
truthoutput_h2 = lrelu(deconv2d(tf.concat([truthoutput_h1, tgtctx_h2], 3),
[self.batch_size, s_h4, s_w4, self.gf_dim*2], name='d_h2'))
truthoutput_h3 = lrelu(deconv2d(tf.concat([truthoutput_h2, tgtctx_h1], 3),
[self.batch_size, s_h2, s_w2, self.gf_dim*1], name='d_h3'))
truthoutput_h4 = deconv2d(tf.concat([truthoutput_h3, tgtctx_h0], 3),
[self.batch_size, s_h, s_w, self.c_dim], name='d_h4')
self.simloss = tf.reduce_mean((trans_z - tgtimg_z) ** 2) * 1e3
mean, var = tf.nn.moments(tgtimg_z, axes=[0])
print(var.get_shape())
# self.simloss /= tf.reduce_mean(var)
print(tgtimg_z.get_shape())
| tensorflow.concat | 11,861 |
from tensorflow.contrib.learn.python.learn.estimators import run_config
regressor.evaluate(input_fn=_eval_input_fn, steps=1)
regressor.export(self._export_dir_base)
def testRankingDontThrowExceptionForForEstimator(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
model = estimator.GradientBoostedDecisionTreeRanker(
head=head_fn,
learner_config=learner_config,
| tensorflow.contrib.learn.python.learn.estimators.run_config.RunConfig | 11,862 |
import tensorflow as tf
memory_limit = int(fraction*total_memory)
print(memory_info)
if tf.version.VERSION[0]=="2":
gpus = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(gpus[0], True)
tf.config.experimental.set_virtual_device_configuration(gpus[0],
[tf.config.experimental.VirtualDeviceConfiguration(memory_limit=memory_limit)])
else:
gpu_options = tf.GPUOptions(allow_growth=allow_growth,
per_process_gpu_memory_fraction=fraction)
config = tf.ConfigProto(gpu_options=gpu_options)
session = tf.Session(config=config)
| tensorflow.config.experimental.VirtualDeviceConfiguration | 11,863 |
import tensorflow as tf
def _preprocess_train(image, clazz):
# Do random crop + horizontal flip for each train image
image = tf.pad(image, [[4, 4], [4, 4], [0, 0]])
image = tf.image.random_crop(image, (w, h, in_ch))
image = tf.image.random_flip_left_right(image)
if cutout_size > 0:
image = self._do_cutout(image, w, h, cutout_size)
return (image, clazz)
(images, classes) = _prepare(images, classes)
dataset = tf.data.Dataset.from_tensor_slices((images, classes)).repeat()
if is_train:
dataset = dataset.apply(tf.data.experimental.map_and_batch(map_func=_preprocess_train, batch_size=batch_size))
else:
dataset = dataset.batch(batch_size)
dataset_itr = dataset.make_initializable_iterator()
(images_batch, classes_batch) = dataset_itr.get_next()
dataset_init_op = dataset_itr.initializer
return (images_batch, classes_batch, dataset_init_op)
def _get_drop_path_keep_prob(self, layers_ratio, step, is_train=False, **knobs):
| tensorflow.data.Dataset.from_tensor_slices | 11,864 |
import tensorflow as tf
def id2tag(self, pred_ids, name=None):
mapping_strings = self.load_tag_data()
reverse_vocab_tags = tf.contrib.lookup.index_to_string_table_from_tensor(
mapping_strings, name=name
)
pred_strings = reverse_vocab_tags.lookup(tf.to_int64(pred_ids))
return pred_strings
def id2word(self, word_ids, name=None):
mapping_strings = self.load_word_data()
| tensorflow.to_int64 | 11,865 |
import tensorflow as tf
)
)
if keep_unselected:
input_idx = tf.tile(tf.expand_dims(tf.range(sl), 0), [bs, 1])
pooling_result = tf.cond(
tf.equal(sl_unhead, 0),
lambda: tf.zeros([bs, 0, hn], tf.float32),
lambda: mean_pooling_for_unselected_head(
unhead_org_idx, sl_unhead, rep_unhead_mask,
input_idx, sl, rep_mask, rep_map, None) # todo: point !
)
with tf.variable_scope('output'):
| tensorflow.zeros | 11,866 |
import tensorflow as tf
img_w = img_w_batch[i]
inputs_list.append([img, gtboxes_and_label_h, gtboxes_and_label_r, num_objects, img_h, img_w])
tower_grads = []
biases_regularizer = tf.no_regularizer
weights_regularizer = tf.contrib.layers.l2_regularizer(cfgs.WEIGHT_DECAY)
with tf.variable_scope(tf.get_variable_scope()):
for i in range(num_gpu):
with tf.device('/gpu:%d' % i):
with tf.name_scope('tower_%d' % i):
with slim.arg_scope(
[slim.model_variable, slim.variable],
device='/device:CPU:0'):
with slim.arg_scope([slim.conv2d, slim.conv2d_in_plane,
| tensorflow.get_variable_scope | 11,867 |
import tensorflow as tf
if label_smoothing != 0.:
nclass = logits.shape[-1]
label = tf.one_hot(label, nclass) if label.shape.ndims == 1 else label
if label.shape.ndims == 1:
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
else:
loss = tf.losses.softmax_cross_entropy(
label, logits, label_smoothing=label_smoothing,
reduction=tf.losses.Reduction.NONE)
loss = tf.reduce_mean(loss, name='xentropy-loss')
def prediction_incorrect(logits, label, topk=1, name='incorrect_vector'):
with tf.name_scope('prediction_incorrect'):
x = tf.logical_not(tf.nn.in_top_k(logits, label, topk))
return tf.cast(x, tf.float32, name=name)
wrong = prediction_incorrect(logits, label, 1, name='wrong-top1')
add_moving_summary(tf.reduce_mean(wrong, name='train-error-top1'))
| tensorflow.reduce_mean | 11,868 |
import tensorflow as tf
import numpy as np
import tensorflow as tf
from typing import Tuple
from .tensor import TensorLike
def _double_factorial_loop_body(n, result, two):
result = tf.where(tf.greater_equal(n, two), result * n, result)
return n - two, result, two
def _double_factorial_loop_condition(n, result, two):
return tf.cast(tf.math.count_nonzero(tf.greater_equal(n, two)), tf.bool)
def double_factorial(n: TensorLike) -> TensorLike:
n = tf.convert_to_tensor(value=n)
two = tf.ones_like(n) * 2
result = tf.ones_like(n)
_, result, _ = tf.while_loop(
cond=_double_factorial_loop_condition,
body=_double_factorial_loop_body,
loop_vars=[n, result, two])
| tensorflow.greater_equal | 11,869 |
import tensorflow as tf
x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keep_dims=True)
means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keep_dims=True)
scalar_prod = tf.matmul(
tf.transpose(x, perm=[1, 0, 2]), tf.transpose(means, perm=[0, 2, 1]))
scalar_prod = tf.transpose(scalar_prod, perm=[1, 0, 2])
dist = x_norm_sq + tf.transpose(
means_norm_sq, perm=[2, 0, 1]) - 2 * scalar_prod
if self.hparams.soft_em:
nearest_idx = tf.stack(
[
tf.multinomial(
-dist[:, i, :], num_samples=self.hparams.num_samples)
for i in range(self.hparams.num_blocks)
],
axis=1)
nearest_hot = tf.one_hot(nearest_idx, depth=self.hparams.block_v_size)
nearest_hot = tf.reduce_mean(nearest_hot, axis=-2)
else:
if self.hparams.random_top_k > 1:
_, top_k_idx = tf.nn.top_k(-dist, k=self.hparams.random_top_k)
nearest_idx = tf.gather(
| tensorflow.multinomial | 11,870 |
import tensorflow as tf
self._SaveAndLoad("var0", 0.0, 1.0, save_path)
for use_tensor in [True, False]:
with self.test_session() as sess:
var = tf.Variable(1.0, name="var0")
save = tf.train.Saver({var.op.name: var})
var.initializer.run()
| tensorflow.Variable | 11,871 |
import tensorflow as tf
e_mean_Kuf = tf.reshape(e_mean_Kuf, [num_data, num_func, num_ind])
e_fmean_mean = tf.einsum("nqm,mz->nqz", e_mean_Kuf, Lit_q_mu) # N x D x D
| tensorflow.einsum | 11,872 |
import tensorflow as tf
import tensorflow.contrib.eager as tfe
from tensorflow.contrib.eager.python.examples.l2hmc import l2hmc
def get_default_hparams():
return tf.contrib.training.HParams(
x_dim=2,
n_samples=200,
n_steps=10,
eps=.1,
| tensorflow.contrib.training.HParams | 11,873 |
import tensorflow as tf
mu = tf.layers.dense(layer_a2, self.a_dim, tf.nn.tanh, kernel_regularizer=reg)
# sigma = tf.layers.dense(layer_a2, self.a_dim, tf.nn.softplus, kernel_regularizer=reg)
sigma = tf.get_variable(name='pi_sigma', shape=self.a_dim, initializer=tf.constant_initializer(0.5))
sigma = tf.clip_by_value(sigma, 0.0, 1.0)
norm_dist = tf.distributions.Normal(loc=mu * self.a_bound, scale=sigma)
params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name)
return norm_dist, params
def build_cnet(self, state_in, name, reuse=False):
reg = tf.contrib.layers.l2_regularizer(1e-3)
with tf.variable_scope(name, reuse=reuse):
layer_c1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg)
layer_c2 = tf.layers.dense(layer_c1, 256, tf.nn.relu, kernel_regularizer=reg)
vf = tf.layers.dense(layer_c2, 1, kernel_regularizer=reg)
params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name)
return vf, params
# Update the network
def train(self, s, a, r, adv):
start = time()
self.sess.run([self.pi_new_params, self.vf_new_params, self.data_iter.initializer],
feed_dict={self.state: s, self.actions: a, self.rewards: r, self.advantage: adv})
while True:
try:
| tensorflow.layers.dense | 11,874 |
import tensorflow as tf
for i in range(num_gpu):
img = tf.expand_dims(img_batch[i], axis=0)
pretrain_zoo = PretrainModelZoo()
if self.cfgs.NET_NAME in pretrain_zoo.pth_zoo or self.cfgs.NET_NAME in pretrain_zoo.mxnet_zoo:
img = img / tf.constant([cfgs.PIXEL_STD])
gtboxes_and_label_r = tf.py_func(backward_convert,
inp=[gtboxes_and_label_batch[i]],
| tensorflow.constant | 11,875 |
import tensorflow as tf
| tensorflow.constant_initializer | 11,876 |
from tensorflow.python.framework import tensor_shape
@ops.RegisterShape("Range")
def _RangeShape(op):
start_value = tensor_util.ConstantValue(op.inputs[0])
limit_value = tensor_util.ConstantValue(op.inputs[1])
delta_value = tensor_util.ConstantValue(op.inputs[2])
if start_value is None or limit_value is None or delta_value is None:
return [tensor_shape.vector(None)]
else:
return [tensor_shape.vector((limit_value - start_value + delta_value - 1) //
delta_value)]
# Reduction operations
def _ReductionDims(x, reduction_indices):
| tensorflow.python.framework.tensor_shape.vector | 11,877 |
import tensorflow as tf
self._is_training = is_training
self._input = input_
self._rnn_params = None
self._cell = None
self.batch_size = input_.batch_size
self.num_steps = input_.num_steps
hidden_size = config.hidden_size
vocab_size = config.vocab_size
self.graph = graph
with self.graph.as_default():
with tf.device('/cpu:0'):
embedding = tf.get_variable(
'embedding', [vocab_size, hidden_size], dtype=tf.float32)
inputs = tf.nn.embedding_lookup(embedding, input_.input_data)
if is_training and config.keep_prob < 1:
inputs = tf.nn.dropout(inputs, config.keep_prob)
output, state = self._build_rnn_graph(inputs, config, is_training)
softmax_w = tf.get_variable(
| tensorflow.device | 11,878 |
import tensorflow as tf
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
| tensorflow.logging.set_verbosity | 11,879 |
import tensorflow as tf
self.qnode = qnode
dtype = tf.float32 if tf.keras.backend.floatx() == tf.float32 else tf.float64
| tensorflow.keras.backend.floatx | 11,880 |
import tensorflow as tf
if use_bias:
beta = tf.get_variable('beta', [channnel], initializer=tf.constant_initializer())
beta = tf.reshape(beta, new_shape)
else:
beta = tf.zeros([1] * ndims, name='beta')
if use_scale:
gamma = tf.get_variable('gamma', [channnel], initializer=tf.constant_initializer(1.0))
gamma = tf.reshape(gamma, new_shape)
else:
| tensorflow.zeros | 11,881 |
import tensorflow as tf
return (loss, per_example_loss, logits, probabilities)
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_real_example = None
if "is_real_example" in features:
is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32)
else:
is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
| tensorflow.logging.info | 11,882 |
from tensorflow.contrib.layers.python.layers import feature_column
'language', hash_bucket_size=2e7)
embedding_feature = feature_column.embedding_column(
| tensorflow.contrib.layers.python.layers.feature_column.embedding_column | 11,883 |
from tensorflow.python.platform import gfile
# Adding s2 again (but helper is unaware of previous s2)
s2 = save3.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s2], save3.last_checkpoints)
# Created by the first helper.
self.assertTrue(gfile.Exists(s1))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1)))
# Deleted by the first helper.
self.assertFalse(gfile.Exists(s3))
self.assertFalse(gfile.Exists(save._MetaGraphFilename(s3)))
self.assertTrue(gfile.Exists(s2))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))
# Adding s1 (s3 should not be deleted because helper is unaware of it)
s1 = save3.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save3.last_checkpoints)
self.assertFalse(gfile.Exists(s3))
self.assertFalse(gfile.Exists(save._MetaGraphFilename(s3)))
self.assertTrue(gfile.Exists(s2))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))
self.assertTrue(gfile.Exists(s1))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1)))
def testSharded(self):
save_dir = os.path.join(self.get_temp_dir(), "max_to_keep_sharded")
try:
gfile.DeleteRecursively(save_dir)
except OSError:
pass # Ignore
gfile.MakeDirs(save_dir)
| tensorflow.python.platform.gfile.Exists | 11,884 |
from tensorflow.python.framework import ops as _ops
def resource_using_op(resource, name=None):
r"""TODO: add doc.
Args:
resource: A `Tensor` of type `resource`.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
result = _op_def_lib.apply_op("ResourceUsingOp", resource=resource,
name=name)
return result
_ops.RegisterShape("ResourceUsingOp")(None)
_stub_resource_handle_op_outputs = ["resource"]
def stub_resource_handle_op(container=None, shared_name=None, name=None):
r"""Creates a handle to a StubResource
Args:
container: An optional `string`. Defaults to `""`.
shared_name: An optional `string`. Defaults to `""`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `resource`.
"""
| tensorflow.python.framework.ops.RegisterShape | 11,885 |
from tensorflow.python.ops import math_ops
def _predictions_streaming_mean(predictions, unused_labels, weights=None):
return metric_ops.streaming_mean(predictions, weights=weights)
def _streaming_auc(predictions, labels, weights=None):
return metric_ops.streaming_auc(
predictions, labels, weights=_float_weights_or_none(weights))
def _accuracy_at_threshold(threshold):
def _accuracy_metric(predictions, labels, weights=None):
threshold_predictions = math_ops.to_float(
math_ops.greater_equal(predictions, threshold))
return metric_ops.streaming_accuracy(
predictions=threshold_predictions, labels=labels, weights=weights)
return _accuracy_metric
def _streaming_at_threshold(streaming_metrics_fn, threshold):
def _streaming_metrics(predictions, labels, weights=None):
precision_tensor, update_op = streaming_metrics_fn(
predictions,
labels=labels,
thresholds=[threshold],
weights=_float_weights_or_none(weights))
| tensorflow.python.ops.math_ops.greater_equal | 11,886 |
import tensorflow as tf
init = tf.global_variables_initializer()
config = tf.ConfigProto(log_device_placement=False)
if self.on_gpu:
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
self.sess.run(init)
checkpoint_file = self.model_dir
meta_graph_location = self.model_dir + '.meta'
saver = tf.train.import_meta_graph(meta_graph_location, clear_devices=True)
saver.restore(self.sess, checkpoint_file)
def encode(self, data: List['np.ndarray'], *args, **kwargs) -> np.ndarray:
def _padding(data):
_data = np.array(
[np.concatenate((d, np.zeros((self.num_frame_per_clib - d.shape[0],
self.frame_size_x,
self.frame_size_y,
self.rgb_channels), dtype=np.float32)), axis=0)
if d.shape[0] < self.num_frame_per_clib else d[:self.num_frame_per_clib] for d in data])
| tensorflow.train.import_meta_graph | 11,887 |
import tensorflow as tf
dataset = dataset.map(squeeze)
return dataset
@gin.configurable(module='trax.data', denylist=['dataset', 'training'])
def lm1b_preprocess(dataset,
training,
max_target_length=-1,
max_eval_target_length=-1):
"""Preprocessing for LM1B: filter out targets exceeding maximum length."""
def target_right_length(_, target):
return tf.less(tf.shape(target)[0], max_target_length + 1)
def eval_target_right_length(_, target):
return tf.less(tf.shape(target)[0], max_eval_target_length + 1)
if max_target_length > 0 and training:
dataset = dataset.filter(target_right_length)
if max_eval_target_length > 0 and not training:
dataset = dataset.filter(eval_target_right_length)
return dataset
| tensorflow.shape | 11,888 |
import tensorflow as tf
enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size])
for e in enc_outputs])
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
dec, mem = tf.nn.seq2seq.attention_decoder(
dec_inp, enc_state,
| tensorflow.constant | 11,889 |
import tensorflow as tf
if is_training:
pool_height = scale_dimension(model_options.crop_size[0],
1. / model_options.output_stride)
pool_width = scale_dimension(model_options.crop_size[1],
1. / model_options.output_stride)
image_feature = slim.avg_pool2d(
features, [pool_height, pool_width], [pool_height, pool_width],
padding='VALID')
else:
pool_height = tf.shape(features)[1]
pool_width = tf.shape(features)[2]
image_feature = tf.reduce_mean(features, axis=[1,2])[:, tf.newaxis, tf.newaxis, :]
image_feature = slim.conv2d(
image_feature, depth, 1, scope=_IMAGE_POOLING_SCOPE)
image_feature = tf.image.resize_bilinear(
image_feature, [pool_height, pool_width], align_corners=True)
if is_training:
image_feature.set_shape([None, pool_height, pool_width, depth])
branch_logits.append(image_feature)
# Employ a 1x1 convolution.
branch_logits.append(slim.conv2d(features, depth, 1,
scope=_ASPP_SCOPE + str(0)))
if model_options.atrous_rates:
# Employ 3x3 convolutions with different atrous rates.
for i, rate in enumerate(model_options.atrous_rates, 1):
scope = _ASPP_SCOPE + str(i)
| tensorflow.image.resize_bilinear | 11,890 |
import tensorflow as tf
def test_points_mask_iou(self):
masks1 = tf.constant([[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[1, 0, 1, 0, 1],
[0, 1, 0, 1, 0]], dtype=tf.int32)
masks2 = tf.constant([[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[1, 0, 1, 0, 1]], dtype=tf.int32)
iou = isu.points_mask_iou(masks1=masks1, masks2=masks2)
expected_iou = tf.constant([[0, 0, 0],
| tensorflow.constant | 11,891 |
from tensorflow.python.framework import tensor_shape
else:
dim0 = None
return [tensor_shape.TensorShape([dim0]).concatenate(input_shape[1:])]
| tensorflow.python.framework.tensor_shape.TensorShape | 11,892 |
import tensorflow as tf
def click_weighted_softmax_cross_entropy_loss(self, output, labels, propensity_weights, name=None):
"""Computes listwise softmax loss with propensity weighting.
Args:
output: (tf.Tensor) A tensor with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
labels: (tf.Tensor) A tensor of the same shape as `output`. A value >= 1 means a
relevant example.
propensity_weights: (tf.Tensor) A tensor of the same shape as `output` containing the weight of each element.
name: A string used as the name for this variable scope.
Returns:
(tf.Tensor) A single value tensor containing the loss.
"""
loss = None
with tf.name_scope(name, "click_softmax_cross_entropy",[output]):
label_dis = labels*propensity_weights / tf.reduce_sum(labels*propensity_weights, 1, keep_dims=True)
loss = tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=label_dis) * tf.reduce_sum(labels*propensity_weights, 1)
return tf.reduce_sum(loss) / tf.reduce_sum(labels*propensity_weights)
def click_loglikelihood(self, labels, propensity,train_output, name=None):
"""Computes listwise softmax loss with propensity weighting.
Args:
output: (tf.Tensor) A tensor with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
labels: (tf.Tensor) A tensor of the same shape as `output`. A value >= 1 means a
relevant example.
propensity_weights: (tf.Tensor) A tensor of the same shape as `output` containing the weight of each element.
name: A string used as the name for this variable scope.
| tensorflow.name_scope | 11,893 |
import tensorflow as tf
logits = tf.convert_to_tensor(cls_preds)
onehot_labels = tf.convert_to_tensor(onehot_labels)
precise_logits = tf.cast(logits, tf.float32) if (
logits.dtype == tf.float16) else logits
onehot_labels = tf.cast(onehot_labels, precise_logits.dtype)
predictions = tf.nn.sigmoid(logits)
predictions_pt = tf.where(tf.equal(onehot_labels, 1), predictions, 1.-predictions)
# add small value to avoid 0
epsilon = 1e-8
alpha_t = tf.scalar_mul(alpha, tf.ones_like(onehot_labels, dtype=tf.float32))
alpha_t = tf.where(tf.equal(onehot_labels, 1.0), alpha_t, 1-alpha_t)
losses = tf.reduce_sum(-alpha_t * tf.pow(1. - predictions_pt, gamma) * tf.log(predictions_pt+epsilon),
name=name, axis=1)
return losses
def Evaluate(sess):
test_acc = 0.0
test_loss = 0.0
for it in range(test_iteration):
batch_data = next(scene_data_val)
| tensorflow.equal | 11,894 |
import tensorflow as tf
# define filter mask: class_true = 1 (pos), 0 (neg), -1 (ignore)
# landm_valid = 1 (w landm), 0 (w/o landm)
mask_pos = tf.equal(class_true, 1)
mask_neg = tf.equal(class_true, 0)
mask_landm = tf.logical_and(tf.equal(landm_valid, 1), mask_pos)
# landm loss (smooth L1)
mask_landm_b = tf.broadcast_to(mask_landm, tf.shape(landm_true))
loss_landm = _smooth_l1_loss(tf.boolean_mask(landm_true, mask_landm_b),
| tensorflow.equal | 11,895 |
import tensorflow as tf
scales_to_logits = outputs_to_scales_to_logits[output]
logits = _resize_bilinear(
scales_to_logits[MERGED_LOGITS_SCOPE],
tf.shape(images)[1:3],
scales_to_logits[MERGED_LOGITS_SCOPE].dtype)
outputs_to_predictions[output].append(
tf.expand_dims(tf.nn.softmax(logits), 4))
if add_flipped_images:
scales_to_logits_reversed = (
outputs_to_scales_to_logits_reversed[output])
logits_reversed = _resize_bilinear(
| tensorflow.nn.softmax | 11,896 |
import tensorflow as tf
tf.float32, shape=[], name="new_learning_rate")
self._lr_update = tf.assign(self._lr, self._new_lr)
def _build_rnn_graph(self, inputs, config, is_training):
if config.rnn_mode == CUDNN:
return self._build_rnn_graph_cudnn(inputs, config, is_training)
else:
return self._build_rnn_graph_lstm(inputs, config, is_training)
def _build_rnn_graph_cudnn(self, inputs, config, is_training):
"""Build the inference graph using CUDNN cell."""
inputs = tf.transpose(inputs, [1, 0, 2])
self._cell = tf.contrib.cudnn_rnn.CudnnLSTM(
num_layers=config.num_layers,
num_units=config.hidden_size,
input_size=config.hidden_size,
dropout=1 - config.keep_prob if is_training else 0)
params_size_t = self._cell.params_size()
self._rnn_params = tf.get_variable(
"lstm_params",
initializer=tf.random_uniform(
[params_size_t], -config.init_scale, config.init_scale),
| tensorflow.transpose | 11,897 |
import tensorflow as tf
outputs = tf.unstack(outputs, axis=0)
else:
lstm_input = tf.unstack(x, self.time_steps, 1)
outputs, _ = tf.nn.static_rnn(lstm_layer, lstm_input, dtype="float32")
# Compute logits by multiplying outputs[-1] of shape [batch_size,num_units]
# by the softmax layer's out_weight of shape [num_units,n_classes]
# plus out_bias
prediction = tf.matmul(outputs[-1], out_weights) + out_bias
output_class = tf.nn.softmax(prediction, name="OUTPUT_CLASS")
return x, prediction, output_class
def trainModel(self, x, prediction, output_class, sess):
"""Train the model.
Args:
x: The input tensor.
| tensorflow.nn.softmax | 11,898 |
import tensorflow as tf
if reuse:
tf.get_variable_scope().reuse_variables()
| tensorflow.get_variable_scope | 11,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.