seed
stringlengths 25
2.89k
| seed_api
stringlengths 14
102
| index
int64 0
14.8k
|
---|---|---|
import tensorflow as tf
t = tf.transpose(words, perm=[1, 0, 2])
lstm_cell_fw = tf.contrib.rnn.LSTMBlockFusedCell(num_units=num_units, **kwargs)
outputs_fw, (hidden_fw, output_fw) = lstm_cell_fw(t, dtype=tf.float32, sequence_length=nwords)
if bidirectional:
lstm_cell_bw = tf.contrib.rnn.LSTMBlockFusedCell(num_units=num_units, **kwargs)
lstm_cell_bw = tf.contrib.rnn.TimeReversedFusedRNN(lstm_cell_bw)
outputs_bw, (hidden_bw, output_bw) = lstm_cell_bw(t, dtype=tf.float32, sequence_length=nwords)
outputs = tf.concat([outputs_fw, outputs_bw], axis=-1)
hidden = tf.concat([hidden_fw, hidden_bw], axis=-1)
| tensorflow.contrib.rnn.LSTMBlockFusedCell | 10,700 |
import tensorflow as tf
"""Warmup optimization to reduce overhead."""
samples = tf.random_normal(
shape=[n_samples, dynamics.x_dim], dtype=tf.float32)
| tensorflow.random_normal | 10,701 |
import tensorflow as tf
flat_offsets = tf.reshape(
tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
flat_positions = tf.reshape(positions + flat_offsets, [-1])
flat_sequence_tensor = tf.reshape(sequence_tensor,
[batch_size * seq_length, width])
| tensorflow.reshape | 10,702 |
import tensorflow as tf
self.feed1 = self.conv4
self.feed2 = self.conv3
self.encoder_1 = self.conv2
self.encoder_2 = self.conv3
self.encoder_3 = self.conv4
self.encoder_4 = self.conv5
print("\nEncoder RESNET is built successfully\n\n")
@timeit
def load_pretrained_weights(self, sess):
print("Loading pretrained weights of resnet18")
all_vars = tf.trainable_variables()
all_vars += tf.get_collection('mu_sigma_bn')
for v in all_vars:
if v.op.name in self.pretrained_weights.keys():
assign_op = v.assign(self.pretrained_weights[v.op.name])
sess.run(assign_op)
print(v.op.name + " - loaded successfully")
print("All pretrained weights of resnet18 is loaded")
def _residual_block(self, name, x, filters, pool_first=False, strides=1, dilation=1):
print('Building residual unit: %s' % name)
with tf.variable_scope(name):
# get input channels
in_channel = x.shape.as_list()[-1]
| tensorflow.get_collection | 10,703 |
import tensorflow as tf
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
if task_name != "sts-b":
probabilities = tf.nn.softmax(logits, axis=-1)
predictions = tf.argmax(probabilities, axis=-1, output_type=tf.int32)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
else:
probabilities = logits
| tensorflow.argmax | 10,704 |
import tensorflow as tf
strides = [1] + strides + [1]
filter_ = get_variable('filter_{}'.format(k),
[filter_height, filter_width, in_channels, out_channels])
encoder_inputs_ = tf.nn.conv2d(encoder_inputs_, filter_, strides, padding='SAME')
if encoder.batch_norm:
encoder_inputs_ = tf.layers.batch_normalization(encoder_inputs_, training=training,
name='conv_batch_norm_{}'.format(k))
if encoder.conv_activation is not None and encoder.conv_activation.lower() == 'relu':
encoder_inputs_ = tf.nn.relu(encoder_inputs_)
encoder_input_length_ = tf.to_int32(tf.ceil(encoder_input_length_ / strides[1]))
feature_size = encoder_inputs_.shape[2].value
channels = encoder_inputs_.shape[3].value
time_steps = tf.shape(encoder_inputs_)[1]
encoder_inputs_ = tf.reshape(encoder_inputs_, [batch_size, time_steps, feature_size * channels])
conv_outputs_ = encoder_inputs_
| tensorflow.nn.relu | 10,705 |
import tensorflow as tf
# independent: each GPU has its own copy of the variables, and gradients are
# not shared between towers. This can be used to check performance when no
# data is moved between GPUs.
# distributed_replicated: Distributed training only. Each GPU has a copy of
# the variables, and updates its copy after the parameter servers are all
# updated with the gradients from all servers. Only works with
# cross_replica_sync=true. Unlike 'replicated', currently never uses
# nccl all-reduce for replicating within a server.
tf.flags.DEFINE_string(
'variable_update', 'parameter_server',
('The method for managing variables: '
'parameter_server, replicated, distributed_replicated, independent'))
tf.flags.DEFINE_boolean(
'use_nccl', True,
'Whether to use nccl all-reduce primitives where possible')
| tensorflow.flags.DEFINE_string | 10,706 |
import tensorflow as tf
with tf.name_scope('LossA'):
# reconstruction loss
recon_loss_A = tf.reduce_mean(tf.abs(A - ABA), name='recon_loss')
# gan loss
G_loss_A, D_loss_A = LSGAN_losses(A_dis_real, A_dis_fake)
with tf.name_scope('LossB'):
recon_loss_B = tf.reduce_mean(tf.abs(B - BAB), name='recon_loss')
G_loss_B, D_loss_B = LSGAN_losses(B_dis_real, B_dis_fake)
LAMBDA = 10.0
self.g_loss = tf.add((G_loss_A + G_loss_B),
(recon_loss_A + recon_loss_B) * LAMBDA, name='G_loss_total')
self.d_loss = tf.add(D_loss_A, D_loss_B, name='D_loss_total')
self.collect_variables('gen', 'discrim')
add_moving_summary(recon_loss_A, recon_loss_B, self.g_loss, self.d_loss)
def _get_optimizer(self):
lr = tf.get_variable('learning_rate', initializer=2e-4, trainable=False)
return tf.train.AdamOptimizer(lr, beta1=0.5, epsilon=1e-3)
| tensorflow.add | 10,707 |
import tensorflow as tf
# instead of transfering a word id to one-hot-format vector and then
# multiply by the embedding matrix.
# embed is the outputs of the hidden layer (embedding layer), it is a
# row vector with 'embedding_size' values.
with tf.variable_scope(name):
embeddings = tf.get_variable(
name='embeddings', shape=(vocabulary_size, embedding_size), initializer=E_init, dtype=LayersConfig.tf_dtype, **E_init_args)
embed = tf.nn.embedding_lookup(embeddings, self.inputs)
| tensorflow.variable_scope | 10,708 |
import tensorflow as tf
model_adv = model_name.ResNet(hps, adv_images, FLAGS.mode, Reuse=True)
model_adv.build_graph()
# Open session and restore checkpoint
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
tf.train.start_queue_runners(sess)
sess.run(tf.global_variables_initializer())
ckpt_state = tf.train.get_checkpoint_state(FLAGS.log_root) # Choose dir according to rt
tf.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path)
saver.restore(sess, ckpt_state.model_checkpoint_path)
logits_nor = model_nor.t_SNE_logits
logits_adv = model_adv.t_SNE_logits
dim_logits = logits_nor.shape[1]
| tensorflow.train.get_checkpoint_state | 10,709 |
import tensorflow as tf
self.loss = loss_action + loss_arguments
tf.scalar_summary('loss', self.loss)
with tf.name_scope('accuracy'):
correct_prediction_action = tf.equal(
tf.argmax(one_hot_labels_action, 1),
tf.argmax(self.predictions_action, 1)
)
self.accuracy_action = tf.reduce_mean(tf.cast(correct_prediction_action, 'float'))
tf.scalar_summary('accuracy_action', self.accuracy_action)
correct_prediction_arguments = tf.equal(tf.argmax(one_hot_labels_arguments, 2),
tf.argmax(self.predictions_arguments, 2))
self.accuracy_arguments = tf.reduce_mean(tf.cast(correct_prediction_arguments, 'float'))
tf.scalar_summary('accuracy_arguments', self.accuracy_arguments)
| tensorflow.argmax | 10,710 |
import tensorflow as tf
"""
maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0
| tensorflow.log | 10,711 |
import tensorflow as tf
pred1, pred2 = tf.split(pred, 2, axis=0)
tgt1, tgt2 = tf.split(tgt, 2, axis=0)
| tensorflow.split | 10,712 |
from tensorflow.python.framework import ops
class InverseGammaWithSoftplusAlphaBeta(InverseGamma):
"""Inverse Gamma with softplus applied to `alpha` and `beta`."""
def __init__(self,
alpha,
beta,
validate_args=False,
allow_nan_stats=True,
name="InverseGammaWithSoftplusAlphaBeta"):
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[alpha, beta]) as ns:
super(InverseGammaWithSoftplusAlphaBeta, self).__init__(
alpha=nn.softplus(alpha, name="softplus_alpha"),
beta=nn.softplus(beta, name="softplus_gamma"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=ns)
self._parameters = parameters
| tensorflow.python.framework.ops.name_scope | 10,713 |
import tensorflow as tf
average_across_batch=True)
res = sess.run(average_loss_per_sequence)
self.assertAllClose(4.828314, res)
total_loss = tf.nn.seq2seq.sequence_loss(
logits, targets, weights,
average_across_timesteps=False,
average_across_batch=False)
| tensorflow.nn.seq2seq.sequence_loss | 10,714 |
import tensorflow as tf
log_score_t = tf.log(output_t) # [batch_size, vsize]
wordidx_t = tf.multinomial(log_score_t, 1) # [batch_size, 1]
wordidx_t = tf.reshape(wordidx_t, [-1]) # [batch_size]
elif mode_gen in ('ce_train', 'loss',):
| tensorflow.reshape | 10,715 |
import tensorflow as tf
return outputs
# 这个就是在tensorboard上可视化的时候的区别:
# 使用with tf.name_scope('inputs')可以将xs和ys包含进来
# 形成一个大的图层,图层的名字就是with tf.name_scope()方法里的参数。
with tf.name_scope('inputs'):
xs = tf.placeholder(tf.float32, [None, 1], name='x_input') # 这个name的属性,也是为了使用tensorboard,添加上来的
ys = tf.placeholder(tf.float32, [None, 1], name='y_input') # 同上
# add hidden layer
l1 = add_layer(xs, 1, 10, activation_function=tf.nn.relu,nameScope="layerTest1")
# add output layer
prediction = add_layer(l1, 10, 1, activation_function=None,nameScope="layerTest2")
sess = tf.Session()
| tensorflow.placeholder | 10,716 |
import tensorflow as tf
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
with tf.Session(config=sess_config) as sess:
input_image = tf.constant(input_image, dtype=tf.float32)
| tensorflow.Session | 10,717 |
import tensorflow as tf
b1 = tf.matmul(state, hyper_b_1)
w1_reshaped = tf.reshape(w1, [-1, n_agents, n_h_mixer]) # reshape into batch of matrices
b1_reshaped = tf.reshape(b1, [-1, 1, n_h_mixer])
# [batch, 1, n_h_mixer]
hidden = tf.nn.elu(tf.matmul(agent_qs_reshaped, w1_reshaped) + b1_reshaped)
# Second layer
w_final = tf.abs(tf.matmul(state, hyper_w_final))
w_final_reshaped = tf.reshape(w_final, [-1, n_h_mixer, 1]) # reshape into batch of matrices
b_final_reshaped = tf.reshape(hyper_b_final, [-1, 1, 1])
# [batch, 1, 1]
y = tf.matmul(hidden, w_final_reshaped) + b_final_reshaped
q_tot = tf.reshape(y, [-1, 1])
return q_tot
| tensorflow.reshape | 10,718 |
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
def dnn_bias_(self):
"""Returns bias of deep neural network part."""
return (self._dnn_model.get_bias(model_dir=self._model_dir) +
[self.get_variable_value("centered_bias_weight")])
def _get_feature_dict(self, features):
if isinstance(features, dict):
return features
return {"": features}
def _get_train_ops(self, features, targets):
"""See base class."""
global_step = contrib_variables.get_global_step()
assert global_step
features = self._get_feature_dict(features)
logits = self._logits(features, is_training=True)
if self._enable_centered_bias:
centered_bias_step = [self._centered_bias_step(targets, features)]
else:
centered_bias_step = []
with ops.control_dependencies(centered_bias_step):
loss = self._target_column.loss(logits, targets, features)
logging_ops.scalar_summary("loss", loss)
| tensorflow.contrib.framework.python.ops.variables.get_global_step | 10,719 |
import tensorflow as tf
out, num_outputs=num_filters, kernel_size=8, stride=4,
activation_fn=tf.nn.relu, weights_initializer=gauss_initializer,
trainable=False)
out = layers.flatten(out)
with tf.variable_scope("action_value"):
out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)
return out
| tensorflow.variable_scope | 10,720 |
from tensorflow.python.ops import math_ops
update_mean_label = state_ops.assign_add(mean_label, delta_mean_label)
# prev_mean_label is E[y_A] in the update equation
prev_mean_label = update_mean_label - delta_mean_label
unweighted_batch_coresiduals = (
(predictions - batch_mean_prediction) * (labels - batch_mean_label))
# batch_comoment is C_B in the update equation
if weights is None:
batch_comoment = math_ops.reduce_sum(unweighted_batch_coresiduals)
else:
batch_comoment = math_ops.reduce_sum(unweighted_batch_coresiduals *
weights)
# View delta_comoment as = C_AB - C_A in the update equation above.
# Since C_A is stored in a var, by how much do we need to increment that var
# to make the var = C_AB?
delta_comoment = (batch_comoment +
(prev_mean_prediction - batch_mean_prediction) *
(prev_mean_label - batch_mean_label) *
(prev_count * batch_count / update_count))
| tensorflow.python.ops.math_ops.reduce_sum | 10,721 |
import tensorflow as tf
with tf.variable_scope("layer_{}".format(layer_id)):
w = tf.get_variable("w", [2 * self.lstm_size, 4 * self.lstm_size])
self.w_lstm.append(w)
self.g_emb = tf.get_variable("g_emb", [1, self.lstm_size])
with tf.variable_scope("emb"):
self.w_emb = tf.get_variable("w", [self.num_branches, self.lstm_size])
with tf.variable_scope("softmax"):
self.w_soft = tf.get_variable("w", [self.lstm_size, self.num_branches])
b_init = np.array([10.0, 10.0] + [0] * (self.num_branches - 2),
| tensorflow.variable_scope | 10,722 |
import tensorflow as tf
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testDynamicAttentionDecoder1(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
cell = tf.nn.rnn_cell.GRUCell(2)
inp = tf.constant(0.5, shape=[2, 2, 2])
enc_outputs, enc_state = tf.nn.dynamic_rnn(cell, inp, dtype=tf.float32)
attn_states = enc_outputs
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
dec, mem = tf.nn.seq2seq.attention_decoder(
dec_inp, enc_state,
| tensorflow.nn.rnn_cell.GRUCell | 10,723 |
import tensorflow as tf
for l in losses + [total_loss]:
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
loss_name = re.sub('%s_[0-9]*/' % cifar10.TOWER_NAME, '', l.op.name)
tf.summary.scalar(loss_name, l)
return total_loss
| tensorflow.summary.scalar | 10,724 |
import tensorflow as tf
tf.app.flags.DEFINE_string('data-dir', os.getcwd() + '/dataset/',
'Directory where the dataset will be stored and checkpoint. (default: %(default)s)')
tf.app.flags.DEFINE_integer('max-steps', 10000,
'Number of mini-batches to train on. (default: %(default)d)')
tf.app.flags.DEFINE_integer('log-frequency', 10,
'Number of steps between logging results to the console and saving summaries (default: %(default)d)')
tf.app.flags.DEFINE_integer('save-model', 1000,
'Number of steps between model saves (default: %(default)d)')
# Optimisation hyperparameters
tf.app.flags.DEFINE_integer('batch-size', 256, 'Number of examples per mini-batch (default: %(default)d)')
tf.app.flags.DEFINE_float('learning-rate', 1e-4, 'Learning rate (default: %(default)d)')
tf.app.flags.DEFINE_integer('img-width', 32, 'Image width (default: %(default)d)')
tf.app.flags.DEFINE_integer('img-height', 32, 'Image height (default: %(default)d)')
tf.app.flags.DEFINE_integer('img-channels', 3, 'Image channels (default: %(default)d)')
tf.app.flags.DEFINE_integer('num-classes', 10, 'Number of classes (default: %(default)d)')
tf.app.flags.DEFINE_string('log-dir', '{cwd}/logs/'.format(cwd=os.getcwd()),
'Directory where to write event logs and checkpoint. (default: %(default)s)')
run_log_dir = os.path.join(FLAGS.log_dir,
| tensorflow.app.flags.DEFINE_integer | 10,725 |
import tensorflow as tf
trainable=trainable
)
sigma = tf.layers.dense(inputs=l1,
units=action_dim, # output units
activation=tf.nn.softplus, # get action probabilities
name='sigma',
trainable=trainable
)
norm_dist = tf.distributions.Normal(loc=mu, scale=sigma)
params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name)
return norm_dist, params
def choose_action(self, s):
# 決定下一步該怎麼做
# s = s[np.newaxis, :]
s = s.reshape(-1, 84, 84, 3)
a = self.sess.run(self.sample_op, {self.tfs: s})[0]
return np.clip(a, ACTION_BOUND[0], ACTION_BOUND[1])
| tensorflow.get_collection | 10,726 |
import tensorflow as tf
input_files = []
for input_pattern in FLAGS.input_file.split(","):
input_files.extend(tf.gfile.Glob(input_pattern))
tf.logging.info("*** Input Files ***")
for input_file in input_files:
tf.logging.info(" %s" % input_file)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project
| tensorflow.logging.info | 10,727 |
from tensorflow.python.framework import ops as _ops
_ops.RegisterShape("TestStringOutput")(None)
def _InitOpDefLibrary():
| tensorflow.python.framework.ops.RegisterShape | 10,728 |
import tensorflow as tf
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
name_to_features = {
"input_ids":
tf.FixedLenFeature([max_seq_length], tf.int64),
"input_mask":
tf.FixedLenFeature([max_seq_length], tf.int64),
"segment_ids":
tf.FixedLenFeature([max_seq_length], tf.int64),
"masked_lm_positions":
tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
"masked_lm_ids":
tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
"masked_lm_weights":
tf.FixedLenFeature([max_predictions_per_seq], tf.float32),
"next_sentence_labels":
tf.FixedLenFeature([1], tf.int64),
}
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
if is_training:
| tensorflow.FixedLenFeature | 10,729 |
import tensorflow as tf
box_xy, box_wh, obj, cls = tf.split(logits, (2, 2, 1, num_classes), axis=-1)
box_xy = tf.sigmoid(box_xy)
obj = tf.sigmoid(obj)
cls = tf.sigmoid(cls)
anchors = anchors.astype(np.float32)
grid_shape = x_shape[1:3]
# print(grid_shape)
grid_h, grid_w = grid_shape[0], grid_shape[1]
# print(grid_h,tf.range(grid_h))
grid = tf.meshgrid(tf.range(grid_w), tf.range(grid_h))
grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2) # [gx, gy, 1, 2]
box_xy = (box_xy + tf.cast(grid, dtype)) * stride
box_wh = tf.exp(box_wh) * anchors
box_x1y1 = box_xy - box_wh / 2.
box_x2y2 = box_xy + box_wh / 2.
box = tf.concat([box_x1y1, box_x2y2], axis=-1)
boxes.append(tf.reshape(box, (x_shape[0], -1, 1, 4)))
objects.append(tf.reshape(obj, (x_shape[0], -1, 1)))
| tensorflow.range | 10,730 |
import tensorflow as tf
attn_mask = tf.logical_and(direct_mask_tile, rep_mask_tile, name='attn_mask') # bs,bn,bl,bl
# attention
f_bias = tf.get_variable('f_bias', [ivec], tf.float32, tf.constant_initializer(0.))
dependent_head = linear(
rep_map, 2 * ivec, False, 0., 'linear_dependent_head', False, wd, keep_prob, is_train) # bs,bn,bl,2vec
| tensorflow.constant_initializer | 10,731 |
import tensorflow as tf
# patches = tf.gather(patches, rand_idx, axis=0)
rows = tf.split(patches,n_col//self.size,axis=0)
| tensorflow.split | 10,732 |
import tensorflow as tf
del reduction_axes[axis]
broadcast_shape = [1] * len(input_shape)
broadcast_shape[axis] = input_shape[axis]
# case: train mode (uses stats of the current batch)
mean = tf.reduce_mean(_x, axis=reduction_axes)
brodcast_mean = tf.reshape(mean, broadcast_shape)
std = tf.reduce_mean(tf.square(_x - brodcast_mean) + epsilon, axis=reduction_axes)
std = tf.sqrt(std)
brodcast_std = tf.reshape(std, broadcast_shape)
x_normed = (_x - brodcast_mean) / (brodcast_std + epsilon)
# x_normed = tf.layers.batch_normalization(_x, center=False, scale=False)
x_p = tf.sigmoid(x_normed)
return alphas * (1.0 - x_p) * _x + x_p * _x
| tensorflow.sqrt | 10,733 |
import tensorflow as tf
Returns:
Either a tf.no_op() when shapes are all static and a tf.assert_equal() op
when the shapes are dynamic.
Raises:
ValueError: When shapes are both static and unequal.
"""
if (all(isinstance(dim, int) for dim in shape_a) and
all(isinstance(dim, int) for dim in shape_b)):
if shape_a != shape_b:
raise ValueError('Unequal shapes {}, {}'.format(shape_a, shape_b))
else: return tf.no_op()
else:
return tf.assert_equal(shape_a, shape_b)
def assert_shape_equal_along_first_dimension(shape_a, shape_b):
"""Asserts that shape_a and shape_b are the same along the 0th-dimension.
If the shapes are static, raises a ValueError when the shapes
mismatch.
| tensorflow.no_op | 10,734 |
import tensorflow as tf
Y = tf.placeholder(tf.float32, [None, config.n_classes])
print("-------X Y----------")
print(X)
X = tf.reshape(X, shape=[-1, 32, 36])
print(X)
print(Y)
Y = tf.reshape(Y, shape=[-1, 6])
print(Y)
# Weight Initialization
def weight_variable(shape):
# tra ve 1 gia tri random theo thuat toan truncated_ normal
initial = tf.truncated_normal(shape, mean=0.0, stddev=0.1, dtype=tf.float32)
return tf.Variable(initial)
def bias_varibale(shape):
initial = tf.constant(0.1, shape=shape, name='Bias')
return tf.Variable(initial)
# Convolution and Pooling
def conv2d(x, W):
# Must have `strides[0] = strides[3] = 1 `.
# For the most common case of the same horizontal and vertices strides, `strides = [1, stride, stride, 1] `.
return tf.nn.conv2d(input=x, filter=W, strides=[1, 1, 1, 1], padding='SAME', name='conv_2d')
def max_pool_2x2(x):
| tensorflow.truncated_normal | 10,735 |
import tensorflow as tf
q_online_tp1 = q_online(obs_tp1_float)
q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,scope='online_q_func')
q_target = q_func(obs_tp1_float,num_actions,scope="target_q_func",reuse=False)
target_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,scope='target_q_func')
# Bellman training error
if double_q:
q_max = gather_2d(q_target,tf.argmax(q_online_tp1,axis=1,output_type=tf.int32))
else:
q_max = tf.reduce_max(q_target,axis=1)
target = rew_t_ph + gamma * q_max * (1.0 - done_mask_ph)
q_t_act = gather_2d(q_online_t,act_t_ph)
total_error = tf.reduce_mean(huber_loss(target - q_t_act))
######
# construct optimization op (with gradient clipping)
learning_rate = tf.placeholder(tf.float32, (), name="learning_rate")
| tensorflow.reduce_max | 10,736 |
import tensorflow as tf
)
# Episodes index
self.episode_count = tf.get_variable(
name='episode-count',
dtype=util.tf_dtype('int'),
initializer=0,
trainable=False
)
def tf_store(self, states, internals, actions, terminal, reward):
# Memory indices to overwrite.
num_instances = tf.shape(input=terminal)[0]
with tf.control_dependencies([tf.assert_less_equal(num_instances, self.capacity)]):
indices = tf.range(self.memory_index, self.memory_index + num_instances) % self.capacity
# Remove episode indices.
num_episodes = tf.count_nonzero(
input_tensor=tf.gather(params=self.terminal_memory, indices=indices),
axis=0,
dtype=util.tf_dtype('int')
)
num_episodes = tf.minimum(x=num_episodes, y=self.episode_count)
assignment = tf.assign(
ref=self.episode_indices[:self.episode_count - num_episodes],
value=self.episode_indices[num_episodes: self.episode_count]
| tensorflow.assert_less_equal | 10,737 |
import tensorflow as tf
backward_cell = tf.nn.rnn_cell.LSTMCell(128)
encoder_outputs,_ = tf.nn.bidirectional_dynamic_rnn(
forward_cell,
backward_cell,
logits,
dtype=tf.float32
)
encoder_outputs = tf.concat(encoder_outputs, axis=2)
logits = tf.reshape(tf.layers.dense(encoder_outputs, units=num_classes), [-1, bil_lstm_win_size, num_classes])
return logits
def cnn_model(x, amp_factor=1):
with tf.variable_scope('model'):
conv1 = tf.layers.conv2d(x, filters=32*amp_factor, kernel_size=[5, 3],
data_format='channels_last', padding= "same",
| tensorflow.layers.dense | 10,738 |
import tensorflow as tf
box_diff = bbox_pred - bbox_targets
in_box_diff = bbox_inside_weights * box_diff #属于前景的行不为0,其他的行都为0
abs_in_box_diff = tf.abs(in_box_diff)
| tensorflow.abs | 10,739 |
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
def begin(self):
self._last_step = None
self._global_step_tensor = contrib_variables.get_global_step()
for m in self._monitors:
| tensorflow.contrib.framework.python.ops.variables.get_global_step | 10,740 |
import tensorflow as tf
lookup_table = tf.get_variable(
"lookup_table",
dtype=tf.float32,
shape=[len(vocab), size_layers],
initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.01),
)
lookup_table = tf.concat((tf.zeros(shape=[1, size_layers]), lookup_table[1:, :]), 0)
forward = tf.nn.embedding_lookup(lookup_table, self.X)
self.Y = tf.placeholder(tf.float32, (None, None, n_mels * resampled))
self.decoder_inputs = tf.concat((tf.zeros_like(self.Y[:, :1, :]), self.Y[:, :-1, :]), 1)
self.decoder_inputs = self.decoder_inputs[:, :, -n_mels:]
self.Z = tf.placeholder(tf.float32, (None, None, fourier_window_size // 2 + 1))
batch_size = tf.shape(self.X)[0]
seq_lens = tf.count_nonzero(tf.reduce_sum(self.decoder_inputs, -1), 1, dtype=tf.int32) + 1
def cells(reuse=False):
return tf.contrib.rnn.DropoutWrapper(
tf.nn.rnn_cell.LSTMCell(
size_layers, initializer=tf.orthogonal_initializer(), reuse=reuse
),
state_keep_prob=dropout,
output_keep_prob=dropout,
)
def attention(encoder_out, seq_len, reuse=False):
| tensorflow.shape | 10,741 |
import tensorflow as tf
valid_image_batch,valid_label_batch=get_valid_batch(valid_image,valid_label,validnum)
valid_inf=work.valid_inference(valid_image_batch)
valid_labels=tf.one_hot(valid_label_batch,classnum)
#train_step=tf.train.GradientDescentOptimizer(0.001).minimize(cross_entropy)
valid_pre = tf.reshape(valid_inf, [validnum, classnum])
valid_correct_prediction=tf.equal(tf.argmax(valid_inf,1),tf.argmax(valid_labels,1))
valid_accuracy=tf.reduce_mean(tf.cast(valid_correct_prediction,tf.float32))
valid_pre = tf.argmax(valid_pre, 1)
valid_true = tf.argmax(valid_labels, 1)
| tensorflow.reshape | 10,742 |
import tensorflow as tf
attn_score = tf.nn.softmax(logits_masked, 3) # bs,bn,bl,bl,vec
attn_score = mask_for_high_rank(attn_score, attn_mask) # bs,bn,bl,bl,vec
self_attn_result = tf.reduce_sum(attn_score * rep_map_tile, 3) # bs,bn,bl,vec
with tf.variable_scope('source2token_self_attn'):
inter_block_logits = bn_dense_layer(self_attn_result, ivec, True, 0., 'bn_dense_map', 'linear',
False, wd, keep_prob, is_train) # bs,bn,bl,vec
inter_block_logits_masked = exp_mask_for_high_rank(inter_block_logits, rep_mask_split) # bs,bn,bl,vec
inter_block_soft = tf.nn.softmax(inter_block_logits_masked, 2) # bs,bn,bl,vec
inter_block_attn_output = tf.reduce_sum(self_attn_result * inter_block_soft, 2) # bs,bn,vec
with tf.variable_scope('self_attn_inter_block'):
inter_block_attn_output_mask = tf.cast(tf.ones([bs, bn], tf.int32), tf.bool)
block_ct_res = directional_attention_with_dense(
inter_block_attn_output, inter_block_attn_output_mask, direction, 'disa',
keep_prob, is_train, wd, activation
) # [bs,bn,vec]
block_ct_res_tile = tf.tile(tf.expand_dims(block_ct_res, 2), [1, 1, bl, 1])#[bs,bn,vec]->[bs,bn,bl,vec]
with tf.variable_scope('combination'):
# input:1.rep_map[bs,bn,bl,vec]; 2.self_attn_result[bs,bn,bl,vec]; 3.rnn_res_tile[bs,bn,bl,vec]
rep_tensor_with_ct = tf.concat([rep_map, self_attn_result, block_ct_res_tile], -1) # [bs,bn,bl,3vec]
new_context_and_gate = linear(rep_tensor_with_ct, 2 * ivec, True, 0., 'linear_new_context_and_gate',
| tensorflow.ones | 10,743 |
import tensorflow as tf
rconst.MASK_START_INDEX: tf.FixedLenFeature([1], dtype=tf.string),
"labels": tf.FixedLenFeature([], dtype=tf.string),
}
_EVAL_FEATURE_MAP = {
movielens.USER_COLUMN: tf.FixedLenFeature([], dtype=tf.string),
movielens.ITEM_COLUMN: tf.FixedLenFeature([], dtype=tf.string),
rconst.DUPLICATE_MASK: tf.FixedLenFeature([], dtype=tf.string)
}
| tensorflow.FixedLenFeature | 10,744 |
import tensorflow as tf
ch_emb = tf.reshape(tf.nn.embedding_lookup(
self.char_mat, self.ch), [N * PL * self.max_p_num, CL, dc])
qh_emb = tf.reshape(tf.nn.embedding_lookup(
self.char_mat, self.qh), [N * QL * self.max_p_num, CL, dc])
ch_emb = tf.nn.dropout(ch_emb, 1.0 - 0.5 * self.dropout)
qh_emb = tf.nn.dropout(qh_emb, 1.0 - 0.5 * self.dropout)
ch_emb = conv(ch_emb, d,
bias=True, activation=tf.nn.relu, kernel_size=5, name="char_conv", reuse=None)
qh_emb = conv(qh_emb, d,
| tensorflow.nn.dropout | 10,745 |
import tensorflow as tf
Return:
A tuple of `SparseTensor` (neibors, weights).
neighbors: A `SparseTensor` of `int64`.
weights: A `SparseTensor` of `float`.
types: A `SparseTensor` of `int32`
"""
sp_returns = base._LIB_OP.get_full_neighbor(nodes, edge_types)
return tf.SparseTensor(*sp_returns[:3]), tf.SparseTensor(*sp_returns[3:6]), \
tf.SparseTensor(*sp_returns[6:])
def get_sorted_full_neighbor(nodes, edge_types):
"""
Args:
| tensorflow.SparseTensor | 10,746 |
import tensorflow as tf
# compute RHS of bellman equation
q_t_selected_target = rew_t_ph + gamma * q_tp1_best_masked
# compute the error (potentially clipped)
td_error = q_t_selected - tf.stop_gradient(q_t_selected_target)
errors = tf_util.huber_loss(td_error)
weighted_error = tf.reduce_mean(importance_weights_ph * errors)
tf.summary.scalar("td_error", tf.reduce_mean(td_error))
tf.summary.scalar("loss", weighted_error)
if full_tensorboard_log:
tf.summary.histogram("td_error", td_error)
# update_target_fn will be called periodically to copy Q network to target Q network
update_target_expr = []
for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name),
| tensorflow.reduce_mean | 10,747 |
import tensorflow as tf
segment_ids.append(0)
label_ids.append(0)
# print(len(input_ids))
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.logging.info("label_ids: %s" % " ".join([str(x) for x in label_ids]))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
| tensorflow.logging.info | 10,748 |
import tensorflow as tf
def _word_embedding(self, inputs, reuse=False):
with tf.variable_scope('word_embedding', reuse=reuse):
w = tf.get_variable('w', [self.V, self.M], initializer=self.emb_initializer)
| tensorflow.variable_scope | 10,749 |
import tensorflow as tf
a `float` `scalar`, KL divergence.
"""
logits = tf.stop_gradient(logits)
weights = _end_of_seq_mask(labels, vocab_size)
d = _mask_by_length(tf.random_normal(shape=tf.shape(embedded)), length)
for _ in range(num_power_iteration):
d = _scale_l2(d, small_constant_for_finite_diff)
d_logits = logits_from_embedding_fn(embedded + d)
kl = _kl_divergence_with_logits(logits, d_logits, weights, num_classes)
d, = tf.gradients(kl, d, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)
d = tf.stop_gradient(d)
perturb = _scale_l2(_mask_by_length(d, length), perturb_norm_length)
vadv_logits = logits_from_embedding_fn(embedded + perturb)
return _kl_divergence_with_logits(logits, vadv_logits, weights, num_classes)
def random_perturbation_loss_brnn(embedded, length, loss_fn, perturb_norm_length=0.1):
"""Adds noise to embeddings and recomputes classification loss fir
bidirectional rnn models.
| tensorflow.gradients | 10,750 |
import tensorflow as tf
location_pred = tf.reshape(location_pred, [-1, 4])
glabels = tf.reshape(glabels, [-1])
gscores = tf.reshape(gscores, [-1])
gtargets = tf.reshape(gtargets, [-1, 4])
# raw mask for positive > 0.5, and for negetive < 0.3
| tensorflow.reshape | 10,751 |
import tensorflow as tf
def _norm(x, g=None, b=None, e=1e-5, axis=[1]):
u = tf.reduce_mean(x, axis=axis, keep_dims=True)
| tensorflow.reduce_mean | 10,752 |
import tensorflow as tf
'A flag to override the data format used in the model. channels_first '
'provides a performance boost on GPU but is not always compatible '
'with CPU. If left unspecified, the data format will be chosen '
'automatically based on whether TensorFlow was built for CPU or GPU.')
tf.app.flags.DEFINE_float(
'negative_ratio', 3., 'Negative ratio in the loss function.')
tf.app.flags.DEFINE_float(
'match_threshold', 0.56, 'Matching threshold in the loss function.')
tf.app.flags.DEFINE_float(
'neg_threshold', 0.4, 'Matching threshold for the negtive examples in the loss function.')
# optimizer related configuration
tf.app.flags.DEFINE_float(
'weight_decay', 0.0005, 'The weight decay on the model weights.')
tf.app.flags.DEFINE_float(
'momentum', 0.9,
'The momentum for the MomentumOptimizer and RMSPropOptimizer.')
| tensorflow.app.flags.DEFINE_float | 10,753 |
import tensorflow as tf
self.strides = [1, d_h, d_w, 1]
self.padding = padding
self.epsilon = epsilon
def __call__(self,input_var,name=None,**kwargs) :
def _init():
v_norm = tf.nn.l2_normalize(self.v,axis=[0,1,2])
t = tf.nn.conv2d(input_var,v_norm,self.strides,self.padding,data_format='NHWC')
mu,var = tf.nn.moments(t,axes=[0,1,2])
std = tf.sqrt(var+self.epsilon)
return [tf.assign(self.g,1/std),tf.assign(self.b,-1.*mu/std)]
require_init = tf.reduce_any(tf.is_nan(self.g))
init_ops = tf.cond(require_init,_init,lambda : [self.g,self.b])
with tf.control_dependencies(init_ops):
| tensorflow.nn.conv2d | 10,754 |
import tensorflow as tf
# hss(s): eta * (\varphi(s)^T * K^T * \Sigma^{-1} * K * \varphi(s))
varphisKt = tf.matmul(varphis, Kt)
| tensorflow.matmul | 10,755 |
import tensorflow as tf
while_loop = tf.contrib.tpu.while_loop if params['use_tpu'] else tf.while_loop
# train the discriminator 100 steps
inputs = [tf.constant(0), tf.constant(0.0)]
cond = lambda i, x: tf.less(i, 100)
def body(i, x):
| tensorflow.constant | 10,756 |
import tensorflow as tf
items = sorted(items, key=lambda x: x[0])
existing = {}
for name, params in items:
outdir = params.save_episode_dir
tf.gfile.MakeDirs(outdir)
if outdir not in existing:
existing[outdir] = len(tf.gfile.Glob(os.path.join(outdir, '*.npz')))
if params.num_episodes <= existing[outdir]:
| tensorflow.gfile.MakeDirs | 10,757 |
from tensorflow.python.ops import array_ops
zeros = array_ops.zeros_like(logits, dtype=logits.dtype)
pos_p_sub = array_ops.where(labels > zeros, labels - logits, zeros)
neg_p_sub = array_ops.where(labels > zeros, zeros, logits)
cross_ent = - alpha * (pos_p_sub ** gamma) * tf.log(tf.clip_by_value(logits, 1e-8, 1.0)) \
| tensorflow.python.ops.array_ops.where | 10,758 |
import tensorflow as tf
self.assertEqual(len(meta_graph_def.collection_def), 0)
def _testMultiSaverCollectionSave(self):
test_dir = self._TestDir("saver_collection")
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
with self.test_session(graph=tf.Graph()) as sess:
# Creates a graph.
v0 = tf.Variable(10.0, name="v0")
v1 = tf.Variable(11.0, name="v1")
# Creates 2 savers.
saver0 = tf.train.Saver({"v0": v0}, name="saver0")
saver1 = tf.train.Saver({"v1": v1}, name="saver1")
tf.add_to_collection("savers", saver0)
tf.add_to_collection("savers", saver1)
tf.initialize_all_variables().run()
# Saves to different checkpoints.
saver0.save(sess, saver0_ckpt)
saver1.save(sess, saver1_ckpt)
# Generates MetaGraphDef.
meta_graph_def = tf.train.export_meta_graph(filename)
meta_graph_def0 = saver0.export_meta_graph()
meta_graph_def1 = saver1.export_meta_graph()
# Verifies that there is no saver_def in meta_graph_def.
self.assertFalse(meta_graph_def.HasField("saver_def"))
# Verifies that there is saver_def in meta_graph_def0 and 1.
| tensorflow.add_to_collection | 10,759 |
import tensorflow as tf
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
| tensorflow.nn.softmax | 10,760 |
import tensorflow as tf
shape = np.array(shape)
tt_rank = np.array(tt_rank)
_validate_input_parameters(is_tensor=True, shape=shape, tt_rank=tt_rank,
batch_size=batch_size)
num_dims = shape.size
if tt_rank.size == 1:
tt_rank = tt_rank * np.ones(num_dims - 1)
tt_rank = np.insert(tt_rank, 0, 1)
tt_rank = np.append(tt_rank, 1)
tt_rank = tt_rank.astype(int)
cr_exponent = -1.0 / (2 * num_dims)
var = np.prod(tt_rank ** cr_exponent)
cr_stddev = stddev ** (1.0 / num_dims) * var
with tf.name_scope(name):
tt = tensor_batch_with_random_cores(shape, tt_rank=tt_rank, stddev=cr_stddev,
batch_size=batch_size, dtype=dtype)
if np.abs(mean) < 1e-8:
return tt
else:
raise NotImplementedError('non-zero mean is not supported yet')
def random_matrix(shape, tt_rank=2, mean=0., stddev=1.,
dtype=tf.float32, name='t3f_random_matrix'):
"""Generate a random TT-matrix of the given shape with given mean and stddev.
| tensorflow.name_scope | 10,761 |
import tensorflow as tf
"At least one of `do_train`, `do_eval` or `do_predict' must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model_bak "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
tf.gfile.MakeDirs(FLAGS.output_dir)
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
| tensorflow.gfile.MakeDirs | 10,762 |
import tensorflow as tf
def testInvalidConcentration(self):
scale = 1.
invalid_concentrations = [-.01, 0., -2.]
for concentration in invalid_concentrations:
with self.assertRaisesOpError("Condition x > 0"):
pareto = tfd.Pareto(concentration, scale, validate_args=True)
self.evaluate(pareto.concentration)
def testParetoLogPdf(self):
batch_size = 6
scale = tf.constant([3.] * batch_size)
scale_v = 3.
concentration = tf.constant([2.])
concentration_v = 2.
x = [3., 3.1, 4., 5., 6., 7.]
pareto = tfd.Pareto(concentration, scale)
log_prob = pareto.log_prob(x)
self.assertEqual(log_prob.shape, (6,))
self.assertAllClose(
self.evaluate(log_prob),
self._scipy_pareto(concentration_v, scale_v).logpdf(x))
| tensorflow.constant | 10,763 |
import tensorflow as tf
# Read image
img = Image.open(image_path)
img = img.resize([width,height], Image.ANTIALIAS)
img = np.array(img).astype('float32')
img = np.expand_dims(np.asarray(img), axis = 0)
# Create a placeholder for the input image
input_node = tf.placeholder(tf.float32, shape=(None, height, width, channels))
# Construct the network
net = models.ResNet50UpProj({'data': input_node}, batch_size)
with tf.Session() as sess:
# Load the converted parameters
| tensorflow.placeholder | 10,764 |
import tensorflow as tf
val_losses_warmup = deque(maxlen=warmup_n_model_iters // FLAGS.model.validation_freq)
val_losses_slbo = deque(maxlen=slbo_n_model_iters // FLAGS.model.validation_freq)
# NOTE: For each test task, we should reset model to the loaded one, and randomly initialize policy and vfn
#if test:
# saver.load_state_dict(np.load(model_load, allow_pickle=True)[()])
# logger.warning('Load model from %s', model_load)
if test:
logger.info("################################################## TESTING TASK %d ################################################", TEST_TASK_NUM)
logger.info(f'TEST_TASK_NUM={TEST_TASK_NUM}, TASK_NUM={TASK_NUM}')
logger.warning('Revert model and normalizers')
tf.get_default_session().run(sync_model_from_lazymodel)
tf.get_default_session().run(revert_normalizers)
else:
logger.info("################################################## TRAINING TASK %d ################################################", TASK_NUM)
if test:
test_returns = []
test_summary['warmupprocess'].append([])
test_summary['slbo'].append([])
if not test: #and FLAGS.task.method == 'random':
if inittask != 'none' and TASK_NUM == 1:
| tensorflow.get_default_session | 10,765 |
from tensorflow.python.framework import ops
arguments are always checked.)
name: `String`. The name prepended to Ops created by this class.
Raises:
ValueError: if either `batch_ndims` or `event_ndims` are: `None`,
negative, not `int32`.
"""
if batch_ndims is None: raise ValueError("batch_ndims cannot be None")
if event_ndims is None: raise ValueError("event_ndims cannot be None")
self._batch_ndims = batch_ndims
self._event_ndims = event_ndims
self._validate_args = validate_args
with ops.name_scope(name) as ns:
self._name = ns
with ops.name_scope("init"):
self._batch_ndims = self._assert_non_negative_int32_scalar(
ops.convert_to_tensor(
batch_ndims, name="batch_ndims"))
self._batch_ndims_static, self._batch_ndims_is_0 = (
self._introspect_ndims(self._batch_ndims))
self._event_ndims = self._assert_non_negative_int32_scalar(
ops.convert_to_tensor(
event_ndims, name="event_ndims"))
self._event_ndims_static, self._event_ndims_is_0 = (
| tensorflow.python.framework.ops.name_scope | 10,766 |
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
def fit(self,
x=None,
y=None,
input_fn=None,
steps=None,
batch_size=None,
monitors=None,
max_steps=None):
"""See trainable.Trainable."""
# TODO(roumposg): Remove when deprecated monitors are removed.
hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)
self._estimator.fit(x=x,
y=y,
input_fn=input_fn,
steps=steps,
batch_size=batch_size,
monitors=hooks,
max_steps=max_steps)
return self
def evaluate(self,
| tensorflow.contrib.learn.python.learn.monitors.replace_monitors_with_hooks | 10,767 |
import tensorflow as tf
outputs = tf.nn.conv3d(inputs, kernel,
[1, stride_d, stride_h, stride_w, 1],
padding=padding)
biases = _variable_on_cpu('biases', [num_output_channels],
tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
| tensorflow.constant_initializer | 10,768 |
import tensorflow as tf
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
output_bias = tf.get_variable(
"output_bias",
shape=[bert_config.vocab_size],
initializer=tf.zeros_initializer())
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
if clip:
log_probs = tf.log(tf.clip_by_value(tf.nn.softmax(logits, axis=-1), 1e-6, 1.0 - 1e-6))
else:
log_probs = tf.nn.log_softmax(logits, axis=-1)
label_ids = tf.reshape(label_ids, [-1])
label_weights = tf.reshape(label_weights, [-1])
| tensorflow.nn.bias_add | 10,769 |
import tensorflow as tf
self.a_grads, _ = tf.clip_by_global_norm(self.a_grads, 20.0)
self.c_grads, _ = tf.clip_by_global_norm(self.c_grads, 20.0)
| tensorflow.clip_by_global_norm | 10,770 |
import tensorflow as tf
activation=None,
name='V'
)
# 計算損益
self.advantage = self.tfdc_r - self.v
self.closs = tf.reduce_mean(tf.square(self.advantage))
self.ctrain_op = tf.train.AdamOptimizer(C_LR).minimize(self.closs)
# Actor
# 建立網路
| tensorflow.square | 10,771 |
import tensorflow as tf
from metric import tf_metrics
from optimizer import distributed_optimizer as optimizer
from model_io import model_io
from distillation import knowledge_distillation as distill
def correlation(x, y):
x = x - tf.reduce_mean(x, axis=-1, keepdims=True)
y = y - tf.reduce_mean(y, axis=-1, keepdims=True)
x = tf.nn.l2_normalize(x, -1)
y = tf.nn.l2_normalize(y, -1)
return -tf.reduce_sum(x*y, axis=-1) # higher the better
def kd(x, y):
x_prob = tf.nn.softmax(x)
| tensorflow.reduce_mean | 10,772 |
import tensorflow as tf
if decoder.use_lstm is False:
decoder.cell_type = 'GRU'
embedding_shape = [decoder.vocab_size, decoder.embedding_size]
weight_scale = decoder.embedding_weight_scale or decoder.weight_scale
if weight_scale is None:
initializer = None # FIXME
elif decoder.embedding_initializer == 'uniform' or (decoder.embedding_initializer is None
and decoder.initializer == 'uniform'):
initializer = tf.random_uniform_initializer(minval=-weight_scale, maxval=weight_scale)
else:
initializer = tf.random_normal_initializer(stddev=weight_scale)
with tf.device('/cpu:0'):
embedding = get_variable('embedding_{}'.format(decoder.name), shape=embedding_shape, initializer=initializer)
input_shape = tf.shape(decoder_inputs)
batch_size = input_shape[0]
time_steps = input_shape[1]
scope_name = 'decoder_{}'.format(decoder.name)
scope_name += '/' + '_'.join(encoder.name for encoder in encoders)
def embed(input_):
embedded_input = tf.nn.embedding_lookup(embedding, input_)
if decoder.use_dropout and decoder.word_keep_prob is not None:
| tensorflow.device | 10,773 |
import tensorflow as tf
rightmost_transposed_ndims = tf.size(
input=perm, name='rightmost_transposed_ndims')
rightmost_transposed_ndims_ = tf.get_static_value(
rightmost_transposed_ndims)
| tensorflow.get_static_value | 10,774 |
import tensorflow as tf
# set some important options
if self._gpu == -1:
sess_config = tf.ConfigProto(device_count = {'GPU': 0},
allow_soft_placement=True)
else:
#config = tf.ConfigProto(log_device_placement=True)
sess_config = tf.ConfigProto(allow_soft_placement=True)
sess_config.gpu_options.allow_growth = True
# self.sess = tf.Session(config=config)
# self.sess = tf.InteractiveSession()
| tensorflow.ConfigProto | 10,775 |
import tensorflow as tf
if trainable:
self.lr = tf.minimum(config.learning_rate, 0.001 / tf.log(999.) * tf.log(tf.cast(self.global_step, tf.float32) + 1))
self.opt = tf.train.AdamOptimizer(learning_rate = self.lr, beta1 = 0.8, beta2 = 0.999, epsilon = 1e-7)
grads = self.opt.compute_gradients(self.loss)
gradients, variables = zip(*grads)
capped_grads, _ = tf.clip_by_global_norm(
gradients, config.grad_clip)
self.train_op = self.opt.apply_gradients(
zip(capped_grads, variables), global_step=self.global_step)
def forward(self):
| tensorflow.clip_by_global_norm | 10,776 |
import tensorflow as tf
final_loss = tf.reduce_mean(loss)
return final_loss, cstr_pct
def contra_traj_lossV7(pred, tgt, horizon=12, temp=100):
horizon_pred, horizon_tgt = horizon_sumV1(pred, horizon), horizon_sumV1(tgt, horizon)
# horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon)
pred_flat1, pred_flat2 = tf.reshape(horizon_pred, [-1, 1]), tf.reshape(horizon_pred, [1, -1])
tgt_flat1, tgt_flat2 = tf.reshape(horizon_tgt, [-1, 1]), tf.reshape(horizon_tgt, [1, -1])
tgt_dif = tgt_flat1 - tgt_flat2
pred_dif = pred_flat1 - pred_flat2
geq = tf.cast(tgt_dif > 0, tf.bool)
tgt_posi_dif = tf.where(geq, tgt_dif, -tgt_dif)
pred_posi_dif = tf.where(geq, pred_dif, -pred_dif)
loss = tf.maximum(0., tgt_posi_dif - pred_posi_dif)
cstr_pct = tf.math.count_nonzero(loss, dtype=tf.float32) / tf.cast(tf.reduce_prod(tf.shape(loss)), tf.float32)
| tensorflow.reshape | 10,777 |
import tensorflow as tf
tf.tile(tf.linspace(-1.0, 1.0, width), [height * depth]),
[depth, height, width])
y_t = tf.reshape(
tf.tile(tf.linspace(-1.0, 1.0, height), [width * depth]),
[depth, width, height])
y_t = tf.transpose(y_t, [0, 2, 1])
sample_grid = tf.tile(
tf.linspace(float(z_near), float(z_far), depth), [width * height])
z_t = tf.reshape(sample_grid, [height, width, depth])
z_t = tf.transpose(z_t, [2, 0, 1])
z_t = 1 / z_t
d_t = 1 / z_t
x_t /= z_t
y_t /= z_t
| tensorflow.reshape | 10,778 |
import tensorflow as tf
tgt_dif = tgt_flat1 - tgt_flat2
pred_dif = pred_flat1 - pred_flat2
geq = tf.cast(tgt_dif > 0, tf.bool)
tgt_posi_dif = tf.where(geq, tgt_dif, -tgt_dif)
pred_posi_dif = tf.where(geq, pred_dif, -pred_dif)
loss = tf.maximum(0., tgt_posi_dif - pred_posi_dif)
cstr_pct = tf.math.count_nonzero(loss, dtype=tf.float32) / tf.cast(tf.reduce_prod(tf.shape(loss)), tf.float32)
unorm_w = tf.exp((tgt_flat1 + tgt_flat2)/temp)
loss = unorm_w * loss / (tf.reduce_sum(unorm_w))
a = tf.print(tf.reduce_sum(unorm_w))
| tensorflow.shape | 10,779 |
import tensorflow as tf
mask_neg = tf.equal(class_true, 0)
mask_landm = tf.logical_and(tf.equal(landm_valid, 1), mask_pos)
# landm loss (smooth L1)
mask_landm_b = tf.broadcast_to(mask_landm, tf.shape(landm_true))
loss_landm = _smooth_l1_loss(tf.boolean_mask(landm_true, mask_landm_b),
tf.boolean_mask(landm_pred, mask_landm_b))
loss_landm = tf.reduce_mean(loss_landm)
# localization loss (smooth L1)
mask_pos_b = tf.broadcast_to(mask_pos, tf.shape(loc_true))
loss_loc = _smooth_l1_loss(tf.boolean_mask(loc_true, mask_pos_b),
tf.boolean_mask(loc_pred, mask_pos_b))
loss_loc = tf.reduce_mean(loss_loc)
# classification loss (crossentropy)
# 1. compute max conf across batch for hard negative mining
loss_class = tf.where(mask_neg,
1 - class_pred[:, 0][..., tf.newaxis], 0)
# 2. hard negative mining
loss_class = tf.reshape(loss_class, [num_batch, num_prior])
loss_class_idx = tf.argsort(loss_class, axis=1, direction='DESCENDING')
loss_class_idx_rank = tf.argsort(loss_class_idx, axis=1)
mask_pos_per_batch = tf.reshape(mask_pos, [num_batch, num_prior])
num_pos_per_batch = tf.reduce_sum(
| tensorflow.reduce_mean | 10,780 |
import tensorflow as tf
qh_emb = conv(qh_emb, d,
bias = True, activation = tf.nn.relu, kernel_size = 5, name = "char_conv", reuse = True)
ch_emb = tf.reduce_max(ch_emb, axis = 1)
qh_emb = tf.reduce_max(qh_emb, axis = 1)
ch_emb = tf.reshape(ch_emb, [N, PL, ch_emb.shape[-1]])
qh_emb = tf.reshape(qh_emb, [N, QL, ch_emb.shape[-1]])
c_emb = tf.nn.dropout(tf.nn.embedding_lookup(self.word_mat, self.c), 1.0 - self.dropout)
q_emb = tf.nn.dropout(tf.nn.embedding_lookup(self.word_mat, self.q), 1.0 - self.dropout)
c_emb = tf.concat([c_emb, ch_emb], axis=2)
q_emb = tf.concat([q_emb, qh_emb], axis=2)
c_emb = highway(c_emb, size = d, scope = "highway", dropout = self.dropout, reuse = None)
q_emb = highway(q_emb, size = d, scope = "highway", dropout = self.dropout, reuse = True)
with tf.variable_scope("Embedding_Encoder_Layer"):
c = residual_block(c_emb,
num_blocks = 1,
num_conv_layers = 4,
kernel_size = 7,
mask = self.c_mask,
num_filters = d,
num_heads = nh,
| tensorflow.concat | 10,781 |
import tensorflow as tf
# Scale
# scores = scores / (facts.get_shape().as_list()[-1] ** 0.5)
# Activation
if softmax_stag:
scores = tf.nn.softmax(scores) # [B, 1, T]
# Weighted sum
if mode == 'SUM':
output = tf.matmul(scores, facts) # [B, 1, H]
# output = tf.reshape(output, [-1, tf.shape(facts)[-1]])
else:
scores = tf.reshape(scores, [-1, tf.shape(facts)[1]])
output = facts * tf.expand_dims(scores, -1)
output = tf.reshape(output, tf.shape(facts))
return output
def din_fcn_attention(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False, forCnn=False):
if isinstance(facts, tuple):
| tensorflow.matmul | 10,782 |
import tensorflow as tf
max_val = params.initializer_gain
return tf.random_uniform_initializer(-max_val, max_val)
elif params.initializer == "normal":
return tf.random_normal_initializer(0.0, params.initializer_gain)
elif params.initializer == "normal_unit_scaling":
return tf.variance_scaling_initializer(params.initializer_gain,
| tensorflow.random_normal_initializer | 10,783 |
import tensorflow as tf
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"label_ids":
tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),
})
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
# This function is not used by this file but is still used by the Colab and
| tensorflow.constant | 10,784 |
import tensorflow as tf
self.assertEqual((2, 2), res[0].shape)
def testEmbeddingAttentionSeq2Seq(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
enc_inp = [tf.constant(1, tf.int32, shape=[2]) for i in range(2)]
dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
| tensorflow.constant_initializer | 10,785 |
import tensorflow.contrib.graph_editor as ge
debug_print("Processing list %s", ts)
checkpoints_other = [r for r in checkpoints if r not in ts]
checkpoints_disconnected_other = [checkpoints_disconnected[r] for r in checkpoints_other]
# copy part of the graph below current checkpoint node, stopping at
# other checkpoints nodes
ops_to_copy = fast_backward_ops(within_ops=fwd_ops, seed_ops=[r.op for r in ts], stop_at_ts=checkpoints_other)
debug_print("Found %s ops to copy within %s, seed %s, stop_at %s",
len(ops_to_copy), fwd_ops, [r.op for r in ts],
checkpoints_other)
debug_print("ops_to_copy = %s", ops_to_copy)
if not ops_to_copy: # we're done!
break
copied_sgv, info = ge.copy_with_input_replacements(ge.sgv(ops_to_copy), {})
for origin_op, op in info._transformed_ops.items():
op._set_device(origin_op.node_def.device)
copied_ops = info._transformed_ops.values()
debug_print("Copied %s to %s", ops_to_copy, copied_ops)
ge.reroute_ts(checkpoints_disconnected_other, checkpoints_other, can_modify=copied_ops)
debug_print("Rewired %s in place of %s restricted to %s",
checkpoints_disconnected_other, checkpoints_other, copied_ops)
# gradient flowing through the checkpointed node
boundary = [info._transformed_ops[r.op]._outputs[0] for r in ts]
substitute_backprops = [d_checkpoints[r] for r in ts]
dv = tf_gradients(boundary,
| tensorflow.contrib.graph_editor.sgv | 10,786 |
import tensorflow as tf
if self.inputs.get_shape().ndims != 2:
raise Exception("The input dimension must be rank 2")
n_in = int(self.inputs.get_shape()[-1])
self.n_units = n_units
with tf.variable_scope(name):
W = tf.get_variable(name='W', shape=(n_in, n_units), initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args)
b = tf.get_variable(name='b', shape=(n_units), initializer=b_init, dtype=LayersConfig.tf_dtype, **b_init_args)
# self.outputs = act(tf.matmul(self.inputs, W) + b)
| tensorflow.variable_scope | 10,787 |
import tensorflow as tf
def _tower_loss_semi_supervised(self, inputs, targets, gpu_idx=0, num_classes=11,
is_fm_loss=False):
with tf.variable_scope("train_specific"):
avg_error_rate = tf.get_variable(
'avg_error_rate', [], initializer=tf.constant_initializer(0.), trainable=False)
num_error_rate = tf.get_variable(
'num_error_rate', [], initializer=tf.constant_initializer(0.), trainable=False)
batch_size_train = self.cnf['batch_size_train']
| tensorflow.constant_initializer | 10,788 |
import tensorflow as tf
random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=n_actions, dtype=tf.int64)
chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
perturbed_stochastic_actions = tf.where(chose_random, random_actions, perturbed_deterministic_actions)
stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
perturbed_output_actions = tf.cond(stochastic_ph, lambda: perturbed_stochastic_actions,
lambda: deterministic_actions)
output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)
update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))
updates = [
| tensorflow.cond | 10,789 |
import tensorflow as tf
logger.info('Update all copies! (lazymodel, normalizers_copy)')
tf.get_default_session().run(sync_model_to_lazymodel)
tf.get_default_session().run(copy_normalizers)
| tensorflow.get_default_session | 10,790 |
import tensorflow as tf
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_integer("batch_size", "2", "batch size for training")
tf.flags.DEFINE_string("logs_dir", r"E:\work\01-Myproject\imag_division\FCN.tensorflow-master\logs", "path to logs directory")
tf.flags.DEFINE_string("data_dir", r"E:\work\01-Myproject\imag_division\FCN.tensorflow-master\Data_zoo\STEM", "path to dataset")
tf.flags.DEFINE_float("learning_rate", "1e-4", "Learning rate for Adam Optimizer")
tf.flags.DEFINE_string("model_dir", r"E:\work\01-Myproject\imag_division\FCN.tensorflow-master\Model_zoo", "Path to vgg model mat")
| tensorflow.flags.DEFINE_string | 10,791 |
import tensorflow as tf
p.input.cur_iter_in_seed = False
p.input.bucket_batch_limit = [
b * 2 / num_splits for b in p.input.bucket_batch_limit
]
with cluster_factory.ForTestingWorker(gpus=num_splits):
mdl = p.Instantiate()
metrics = mdl.FPropDefaultTheta()[0]
tf.global_variables_initializer().run()
return sess.run(metrics['loss'])
res1, res2 = Run(1), Run(2)
self.assertAllClose(res1[0], res2[0])
self.assertAllEqual(res1[1], res2[1])
| tensorflow.global_variables_initializer | 10,792 |
import tensorflow as tf
# output = tf.reshape(output, [-1, tf.shape(facts)[-1]])
else:
scores = tf.reshape(scores, [-1, tf.shape(facts)[1]])
output = facts * tf.expand_dims(scores, -1)
output = tf.reshape(output, tf.shape(facts))
if return_alphas:
return output, scores
return output
def self_attention(facts, ATTENTION_SIZE, mask, stag='null'):
if len(facts.get_shape().as_list()) == 2:
facts = tf.expand_dims(facts, 1)
def cond(batch, output, i):
return tf.less(i, tf.shape(batch)[1])
def body(batch, output, i):
self_attention_tmp = din_fcn_attention(batch[:, i, :], batch[:, 0:i+1, :],
ATTENTION_SIZE, mask[:, 0:i+1], softmax_stag=1, stag=stag,
mode='LIST')
self_attention_tmp = tf.reduce_sum(self_attention_tmp, 1)
output = output.write(i, self_attention_tmp)
return batch, output, i + 1
output_ta = tf.TensorArray(dtype=tf.float32,
size=0,
dynamic_size=True,
element_shape=(facts[:, 0, :].get_shape()))
| tensorflow.shape | 10,793 |
import tensorflow as tf
the dual variable is scaled by this factor.
Returns:
dual_value: An op that computes the absolute value of the dual variable
and reverses its gradient.
dual_variable: The underlying variable itself.
"""
# We disable partitioning while constructing dual variables because they will
# be updated with assign, which is not available for partitioned variables.
partitioner = tf.get_variable_scope().partitioner
try:
tf.get_variable_scope().set_partitioner(None)
dual_variable = tf.contrib.framework.model_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
collections=collections,
trainable=trainable)
finally:
tf.get_variable_scope().set_partitioner(partitioner)
| tensorflow.get_variable_scope | 10,794 |
from tensorflow.python.framework import ops
if self._grad_func:
self._grad_func.add_to_graph(g)
def __call__(self, *args, **kwargs):
self.add_to_graph(ops.get_default_graph())
args = [ops.convert_to_tensor(_) for _ in args] + self._extra_inputs
return _call(self._definition.signature, *args, **kwargs)
| tensorflow.python.framework.ops.get_default_graph | 10,795 |
import tensorflow as tf
return gauc / pv_sum
def attention(query, facts, attention_size, mask, stag='null', mode='LIST', softmax_stag=1, time_major=False, return_alphas=False):
if isinstance(facts, tuple):
# In case of Bi-RNN, concatenate the forward and the backward RNN outputs.
facts = tf.concat(facts, 2)
if time_major:
# (T,B,D) => (B,T,D)
facts = tf.array_ops.transpose(facts, [1, 0, 2])
mask = tf.equal(mask, tf.ones_like(mask))
hidden_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer
input_size = query.get_shape().as_list()[-1]
# Trainable parameters
w1 = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.1))
w2 = tf.Variable(tf.random_normal([input_size, attention_size], stddev=0.1))
b = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
v = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
| tensorflow.array_ops.transpose | 10,796 |
import tensorflow as tf
def get_valid_batch(image,label,batch_size):
images,labels=tf.train.batch([image,label],batch_size=batch_size)
return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size])
class trainwork(object):
def __init__(self):
with tf.variable_scope('scop'):
self.w1=tf.get_variable('w1', [4096,2048],initializer=tf.contrib.layers.xavier_initializer_conv2d())
self.w2=tf.get_variable('w2', [2048,3072],initializer=tf.contrib.layers.xavier_initializer_conv2d())
self.w3=tf.get_variable('w3', [3072,512],initializer=tf.contrib.layers.xavier_initializer_conv2d())
self.w4=tf.get_variable('w4', [512,classnum],initializer=tf.contrib.layers.xavier_initializer_conv2d())
self.b1 = tf.get_variable('b1', [2048],initializer=tf.constant_initializer(0.0))
self.b2 = tf.get_variable('b2', [3072],initializer=tf.constant_initializer(0.0))
| tensorflow.variable_scope | 10,797 |
import tensorflow as tf
return input_tensor
with tf.variable_scope(name_or_scope=name):
| tensorflow.variable_scope | 10,798 |
import tensorflow as tf
# Step-wise contrastive loss
even = [2 * i for i in range(25)]
odd = [2 * i + 1 for i in range(25)]
pred1 = tf.gather(pred, even)
pred2 = tf.gather(pred, odd)
tgt1 = tf.gather(tgt, even)
tgt2 = tf.gather(tgt, odd)
geq = tf.cast((tgt1 - tgt2) > 0, tf.bool)
tgt_larg = tf.where(geq, tgt1, tgt2)
tgt_small = tf.where(geq, tgt2, tgt1)
pred_larg = tf.where(geq, pred1, pred2)
pred_small = tf.where(geq, pred2, pred1)
loss = tf.maximum(0.0, (tgt_larg - tgt_small) - (pred_larg - pred_small))
# loss = tf.maximum(0.0, tf.math.abs(tgt_larg - pred_larg) - tf.math.abs(tgt_small - pred_small))
loss = tf.reduce_mean(loss)
return loss
def contra_step_lossV5(pred, tgt, resample=1):
# p = tf.print('begin loss v5', [resample, pred.shape,tgt.shape])
# with tf.control_dependencies([p]):
pred_flat = tf.reshape(pred, [-1])
tgt_flat = tf.reshape(tgt, [-1])
batch = tf.stack([pred_flat, tgt_flat], 1)
num_sam = tools.shape(batch)[0]
index = tf.range(num_sam)
| tensorflow.maximum | 10,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.