seed
stringlengths 25
2.89k
| seed_api
stringlengths 14
102
| index
int64 0
14.8k
|
---|---|---|
import tensorflow as tf
dtype=tf.float32,
initializer=tf.constant_initializer(
self.vocab.word_embeddings[:2],
dtype=tf.float32),
trainable=True)
self.word_mat = tf.concat([self.word_pad_unk_mat, self.pretrained_word_mat], axis=0)
self.pretrained_char_mat = tf.get_variable("char_emb_mat",
[self.vocab.char_size() - 2, self.vocab.char_embed_dim],
dtype=tf.float32,
initializer=tf.constant_initializer(
self.vocab.char_embeddings[2:],
dtype=tf.float32),
trainable=False)
self.char_pad_unk_mat = tf.get_variable("char_unk_pad",
[2, self.pretrained_char_mat.get_shape()[1]],
dtype=tf.float32,
initializer=tf.constant_initializer(
self.vocab.char_embeddings[:2],
dtype=tf.float32),
trainable=True)
| tensorflow.constant_initializer | 12,300 |
from tensorflow.python.platform import gfile
output.append(self.vocabulary_.reverse(class_id))
yield ' '.join(output)
def save(self, filename):
"""Saves vocabulary processor into given file.
Args:
filename: Path to output file.
"""
with gfile.Open(filename, 'wb') as f:
f.write(pickle.dumps(self))
@classmethod
def restore(cls, filename):
"""Restores vocabulary processor from given file.
Args:
filename: Path to file to load from.
| tensorflow.python.platform.gfile.Open | 12,301 |
import tensorflow as tf
opt = tf.train.AdamOptimizer(self.LR)
self.update_a_op = opt.apply_gradients(zip(self.a_grads, self.pi_params))
self.update_c_op = opt.apply_gradients(zip(self.c_grads, self.vf_params))
self.sess.run(tf.global_variables_initializer())
# Tensorboard
if summary_dir is not None:
self.writer = tf.summary.FileWriter(summary_dir)
tf.summary.scalar('Loss/Policy', loss_pg)
tf.summary.scalar('Loss/Value', loss_vf)
tf.summary.scalar('Loss/Entropy', - 0.01 * tf.reduce_mean(pi.entropy()))
tf.summary.scalar('Var/Policy Mode', tf.reduce_mean(pi.mode()))
tf.summary.scalar('Var/Policy Sigma', tf.reduce_mean(pi.stddev()))
tf.summary.scalar('Var/Value', tf.reduce_mean(self.vf))
self.summarise = tf.summary.merge(tf.get_collection(tf.GraphKeys.SUMMARIES))
# AC net
def build_anet(self, state_in, name, reuse=False):
reg = tf.contrib.layers.l2_regularizer(1e-3)
with tf.variable_scope(name, reuse=reuse):
layer_a1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg)
layer_a2 = tf.layers.dense(layer_a1, 256, tf.nn.relu, kernel_regularizer=reg)
mu = tf.layers.dense(layer_a2, self.a_dim, tf.nn.tanh, kernel_regularizer=reg)
# sigma = tf.layers.dense(layer_a2, self.a_dim, tf.nn.softplus, kernel_regularizer=reg)
sigma = tf.get_variable(name='pi_sigma', shape=self.a_dim, initializer=tf.constant_initializer(0.5))
sigma = tf.clip_by_value(sigma, 0.0, 1.0)
| tensorflow.reduce_mean | 12,302 |
import tensorflow as tf
n_actions = ac_space.nvec if isinstance(ac_space, MultiDiscrete) else ac_space.n
with tf.variable_scope("input", reuse=reuse):
stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic")
update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps")
with tf.variable_scope(scope, reuse=reuse):
if param_noise:
act_f, obs_phs = build_act_with_param_noise(q_func, ob_space, ac_space, stochastic_ph, update_eps_ph, sess,
param_noise_filter_func=param_noise_filter_func)
else:
| tensorflow.variable_scope | 12,303 |
import tensorflow as tf
for l in range(0,num_layers-1):
W = self.xavier_init(size=[layers[l], layers[l+1]])
b = tf.Variable(tf.zeros([1,layers[l+1]], dtype=tf.float32), dtype=tf.float32)
weights.append(W)
biases.append(b)
return weights, biases
def xavier_init(self, size):
in_dim = size[0]
out_dim = size[1]
xavier_stddev = np.sqrt(2/(in_dim + out_dim))
return tf.Variable(tf.truncated_normal([in_dim, out_dim], stddev=xavier_stddev), dtype=tf.float32)
def neural_net(self, X, weights, biases):
num_layers = len(weights) + 1
H = 2.0*(X - self.lb)/(self.ub - self.lb) - 1.0
for l in range(0,num_layers-2):
W = weights[l]
b = biases[l]
H = tf.tanh(tf.add(tf.matmul(H, W), b))
W = weights[-1]
| tensorflow.truncated_normal | 12,304 |
import tensorflow as tf
if encoder.bidir:
rnn = lambda reuse: stack_bidirectional_dynamic_rnn(
cells_fw=[get_cell(input_size if j == 0 else 2 * cell_output_size, reuse=reuse)
for j in range(encoder.layers)],
cells_bw=[get_cell(input_size if j == 0 else 2 * cell_output_size, reuse=reuse)
for j in range(encoder.layers)],
initial_states_fw=[get_initial_state('initial_state_fw')] * encoder.layers,
initial_states_bw=[get_initial_state('initial_state_bw')] * encoder.layers,
time_pooling=encoder.time_pooling, pooling_avg=encoder.pooling_avg,
**parameters)
initializer = CellInitializer(encoder.cell_size) if encoder.orthogonal_init else None
with tf.variable_scope(tf.get_variable_scope(), initializer=initializer):
try:
encoder_outputs_, _, encoder_states_ = rnn(reuse=False)
except ValueError: # Multi-task scenario where we're reusing the same RNN parameters
encoder_outputs_, _, encoder_states_ = rnn(reuse=True)
else:
if encoder.time_pooling or encoder.final_state == 'concat_last':
raise NotImplementedError
if encoder.layers > 1:
cell = MultiRNNCell([get_cell(input_size if j == 0 else cell_output_size)
for j in range(encoder.layers)])
initial_state = (get_initial_state(),) * encoder.layers
| tensorflow.get_variable_scope | 12,305 |
import tensorflow as tf
def log_prob_from_logits(x):
"""numerically stable log_softmax implementation that prevents overflow."""
axis = len(x.get_shape()) - 1
m = tf.reduce_max(x, axis, keep_dims=True)
return x - m - tf.log(tf.reduce_sum(tf.exp(x - m), axis, keep_dims=True))
| tensorflow.reduce_max | 12,306 |
from tensorflow.python.framework import tensor_util
validate_args=True)
self.assertTrue(tensor_util.constant_value(normal.is_scalar_event))
self.assertTrue(tensor_util.constant_value(normal.is_scalar_batch))
normal = dists.Normal([mu], [sigma],
validate_args=True)
self.assertTrue(tensor_util.constant_value(normal.is_scalar_event))
self.assertFalse(tensor_util.constant_value(normal.is_scalar_batch))
mvn = dists.MultivariateNormalDiag([mu], [sigma],
validate_args=True)
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_event))
self.assertTrue(tensor_util.constant_value(mvn.is_scalar_batch))
mvn = dists.MultivariateNormalDiag([[mu]], [[sigma]],
validate_args=True)
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_event))
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_batch))
# We now test every codepath within the underlying is_scalar_helper
# function.
# Test case 1, 2.
x = tf.placeholder(dtype=tf.int32, shape=[])
| tensorflow.python.framework.tensor_util.constant_value | 12,307 |
from tensorflow.python.framework import ops
TypeError: if `alpha` and `beta` are different dtypes.
"""
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[alpha, beta]) as ns:
with ops.control_dependencies([
check_ops.assert_positive(alpha),
check_ops.assert_positive(beta),
| tensorflow.python.framework.ops.name_scope | 12,308 |
import tensorflow as tf
for _, var in grads_and_vars:
with self._maybe_colocate(var):
op = self._func(var)
if op is not None:
assert isinstance(op, tf.Operation), op
ops.append(op)
update_op = tf.group(update_op, *ops, name=name)
return update_op
@contextmanager
def _maybe_colocate(self, var):
G = tf.get_default_graph()
| tensorflow.group | 12,309 |
import tensorflow as tf
s = tf.matmul(g, f, transpose_b=True) # # [bs, N, N]
beta = tf.nn.softmax(s) # attention map
print('attention beta dims: ' + str(s.get_shape().as_list()))
| tensorflow.nn.softmax | 12,310 |
import tensorflow as tf
print('feats_cnn: {}'.format(feats_conv.get_shape()))
print('feats_all: {}'.format(feats_all.get_shape()))
# Project to RNN size
rnn_output = feats_all
rnn_output_size = nfeats_tot
if do_rnn:
with tf.variable_scope('rnn_proj'):
rnn_proj_w = tf.get_variable('W', [nfeats_tot, rnn_size], initializer=tf.uniform_unit_scaling_initializer(factor=1.0, dtype=dtype), dtype=dtype)
rnn_proj_b = tf.get_variable('b', [rnn_size], initializer=tf.constant_initializer(0.0), dtype=dtype)
rnn_inputs = tf.nn.bias_add(tf.matmul(feats_all, rnn_proj_w), rnn_proj_b)
rnn_inputs = tf.reshape(rnn_inputs, [batch_size, rnn_nunroll, rnn_size])
rnn_inputs = tf.split(rnn_inputs, rnn_nunroll, axis=1)
rnn_inputs = [tf.squeeze(input_, [1]) for input_ in rnn_inputs]
if rnn_cell_type == 'rnn':
cell_fn = tf.nn.rnn_cell.BasicRNNCell
elif rnn_cell_type == 'gru':
cell_fn = tf.nn.rnn_cell.GRUCell
elif rnn_cell_type == 'lstm':
cell_fn = tf.nn.rnn_cell.BasicLSTMCell
else:
raise NotImplementedError()
cell = cell_fn(rnn_size)
if mode == 'train' and rnn_keep_prob < 1.0:
cell = tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob=rnn_keep_prob)
| tensorflow.squeeze | 12,311 |
import tensorflow as tf
with tf.gfile.Open(p_name) as fd:
tf.logging.info("Restoring hyper parameters from %s" % p_name)
json_str = fd.readline()
| tensorflow.logging.info | 12,312 |
import tensorflow as tf
{'x': tf.compat.v1.placeholder(tf.int64, (None,))},
feature_spec={'x': tf.io.FixedLenFeature([], tf.int64)}),
dict(
testcase_name='fixed_len_string',
make_tensors_fn=lambda:
{'x': tf.compat.v1.placeholder(tf.string, (None,))},
feature_spec={'x': tf.io.FixedLenFeature([], tf.string)}),
dict(
testcase_name='fixed_len_float',
make_tensors_fn=lambda:
{'x': tf.compat.v1.placeholder(tf.float32, (None,))},
feature_spec={'x': tf.io.FixedLenFeature([], tf.float32)}),
dict(
testcase_name='override',
make_tensors_fn=_make_tensors_with_override,
feature_spec={'x': tf.io.FixedLenFeature([], tf.int64)},
domains={'x': schema_pb2.IntDomain(is_categorical=True)}),
dict(
testcase_name='override_with_session',
make_tensors_fn=_make_tensors_with_override,
feature_spec={'x': tf.io.FixedLenFeature([], tf.int64)},
domains={
| tensorflow.io.FixedLenFeature | 12,313 |
import tensorflow as tf
[max_predictions_per_seq], tf.int64
),
"masked_lm_ids": tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
"masked_lm_weights": tf.FixedLenFeature(
[max_predictions_per_seq], tf.float32
),
| tensorflow.FixedLenFeature | 12,314 |
import tensorflow as tf
reduction_indices,
use_batch_stats)
self._build_update_ops_variance(mean, variance, is_training)
# Set up optional scale and offset factors.
if self._offset:
self._set_default_initializer(self.BETA)
self._beta = tf.get_variable(
self.BETA,
shape=self._mean_shape,
initializer=self._initializers[self.BETA])
else:
self._beta = None
if self._scale:
| tensorflow.get_variable | 12,315 |
import tensorflow as tf
epsilon=self.rprop_epsilon)
_opt_op = trainer.apply_gradients(grads)
# so when you call _train, you first do the gradient step, then you apply ema
with tf.control_dependencies([_opt_op]):
_train = tf.group(ema_apply_op)
# Ops/Summaries to run, and their names for logging
assert norm_grads is not None
run_ops = [_train, loss, loss_q, entropy, loss_policy, loss_f, loss_bc, explained_variance, norm_grads]
| tensorflow.group | 12,316 |
from tensorflow.python.ops import array_ops
@distribution_util.AppendDocstring(_poisson_sample_note)
def _cdf(self, x):
x = self._assert_valid_sample(x, check_integer=False)
return math_ops.igammac(math_ops.floor(x + 1), self.rate)
def _log_normalization(self):
return self.rate
def _log_unnormalized_prob(self, x):
x = self._assert_valid_sample(x, check_integer=True)
return x * math_ops.log(self.rate) - math_ops.lgamma(x + 1)
def _mean(self):
return array_ops.identity(self.rate)
def _variance(self):
return array_ops.identity(self.rate)
@distribution_util.AppendDocstring(
"""Note: when `rate` is an integer, there are actually two modes: `rate`
and `rate - 1`. In this case we return the larger, i.e., `rate`.""")
def _mode(self):
return math_ops.floor(self.rate)
def _assert_valid_sample(self, x, check_integer=True):
if not self.validate_args:
return x
| tensorflow.python.ops.array_ops.identity | 12,317 |
import tensorflow as tf
maxnorm: the maximum Euclidean norm
Returns:
An operation that will update var_matrix when run in a Session
'''
row_norms = tf.sqrt(tf.reduce_sum(tf.square(var_matrix), 1))
scaling = maxnorm / tf.maximum(row_norms, maxnorm)
scaled = var_matrix * tf.expand_dims(scaling, 1)
return tf.assign(var_matrix, scaled)
def dense_maxnorm(var_matrix, maxnorm=1.0):
'''Similar to dense_maxnorm_update(), except this returns a new Tensor
instead of an operation that modifies var_matrix.
Args:
| tensorflow.assign | 12,318 |
import tensorflow as tf
tf.app.flags.DEFINE_string('pm', '66661', 'pooling scheme across scales. Each number specifies the number of scales remaining at each layer. The first number has to be the same as used in --num_scales.')
tf.app.flags.DEFINE_integer('conv_kernel', 5, 'Size of convolutional kernel')
tf.app.flags.DEFINE_integer('pool_kernel', 3, 'Size of spatial pooling kernel')
tf.app.flags.DEFINE_integer('feats_per_layer', 32, 'Number of feature channels at each layer')
tf.app.flags.DEFINE_boolean('total_pool', True, 'If true, pool all feature maps to 1x1 size in final layer')
tf.app.flags.DEFINE_integer('pool_stride', '1', 'If 2, we get progressive pooling - with overlap pooling, AlexNet style')
TRAIN_FILE = 'train_{}.tfrecords'.format(records.tfrecord_name())
VALIDATION_FILE = 'validation_{}.tfrecords'.format(records.tfrecord_name())
| tensorflow.app.flags.DEFINE_integer | 12,319 |
import tensorflow as tf
# TODO(koz4k): Translate it to T2TModel or remove.
def feed_forward_gaussian_fun(action_space, config, observations):
"""Feed-forward Gaussian."""
if not isinstance(action_space, gym.spaces.box.Box):
raise ValueError("Expecting continuous action space.")
mean_weights_initializer = tf.initializers.variance_scaling(
scale=config.init_mean_factor)
logstd_initializer = tf.random_normal_initializer(config.init_logstd, 1e-10)
flat_observations = tf.reshape(observations, [
tf.shape(observations)[0], tf.shape(observations)[1],
functools.reduce(operator.mul, observations.shape.as_list()[2:], 1)])
with tf.variable_scope("network_parameters"):
with tf.variable_scope("policy"):
x = flat_observations
for size in config.policy_layers:
x = tf.layers.dense(x, size, activation=tf.nn.relu)
mean = tf.layers.dense(
x, action_space.shape[0], activation=tf.tanh,
kernel_initializer=mean_weights_initializer)
logstd = tf.get_variable(
"logstd", mean.shape[2:], tf.float32, logstd_initializer)
| tensorflow.shape | 12,320 |
import tensorflow as tf
return tf.reshape(tf.concat(axis=1, values=h), [-1, nh])
else:
return tf.reshape(tf.stack(values=h, axis=1), [-1])
def lstm(xs, ms, s, scope, nh, init_scale=1.0):
nbatch, nin = [v.value for v in xs[0].get_shape()]
with tf.variable_scope(scope):
wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale))
wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale))
b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
| tensorflow.variable_scope | 12,321 |
import tensorflow as tf
import TensorflowUtils as utils
import read_MITSceneParsingData as scene_parsing
import datetime
import BatchDatsetReader as dataset
from six.moves import xrange
import os.path as osp
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_integer("batch_size", "2", "batch size for training")
tf.flags.DEFINE_string("logs_dir", r"E:\work\01-Myproject\imag_division\FCN.tensorflow-master\logs", "path to logs directory")
tf.flags.DEFINE_string("data_dir", r"E:\work\01-Myproject\imag_division\FCN.tensorflow-master\Data_zoo\STEM", "path to dataset")
tf.flags.DEFINE_float("learning_rate", "1e-4", "Learning rate for Adam Optimizer")
tf.flags.DEFINE_string("model_dir", r"E:\work\01-Myproject\imag_division\FCN.tensorflow-master\Model_zoo", "Path to vgg model mat")
tf.flags.DEFINE_bool('debug', "False", "Debug mode: True/ False")
tf.flags.DEFINE_string('mode', "train", "Mode train/ test/ visualize")
MODEL_URL = 'http://www.vlfeat.org/matconvnet/models/beta16/imagenet-vgg-verydeep-19.mat'
MAX_ITERATION = 100 #最大步数
NUM_OF_CLASSESS = 3 #分类数目
IMAGE_SIZE = 2048 #图像大小
| tensorflow.flags.DEFINE_float | 12,322 |
from tensorflow.python.framework import ops
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose shape is broadcastable to the the first [D1, ... DN]
dimensions of `predictions_idx` and `labels`.
name: Name of operation.
Returns:
A [D1, ... DN] `Tensor` of true positive counts.
"""
with ops.name_scope(name, 'true_positives', (predictions_idx, labels)):
labels, predictions_idx = _maybe_select_class_id(
labels, predictions_idx, class_id)
tp = set_ops.set_size(set_ops.set_intersection(predictions_idx, labels))
tp = math_ops.to_double(tp)
if weights is not None:
weights = math_ops.to_double(weights)
tp = math_ops.mul(tp, weights)
return tp
| tensorflow.python.framework.ops.name_scope | 12,323 |
import tensorflow as tf
#
# hello = tf.constant('Hello, TensorFlow!')
# print(sess.run(hello))
#
# a = tf.constant(10)
# b = tf.constant(32)
# print(sess.run(a + b))
#
# c = tf.constant('haHa')
# print(sess.run(c))
#
# sess.close()
identity_matrix = tf.diag([1.0, 3.0, 1.0])
A = tf.truncated_normal([2, 3])
B = tf.fill([2, 3], 5.0)
C = tf.random_uniform([3, 2], maxval=100)
D = tf.convert_to_tensor(np.array([[1., 2., 3.], [-3., -7., -1.], [0., 5., -2.]]))
sess = tf.Session()
# sess.run(tf.global_variables_initializer())
# print(sess.run(tf.random_normal(mean=10, shape=[10])))
# A = tf.Variable(tf.random_normal(shape=[1, 1]))
# sess.run(tf.global_variables_initializer())
# print(sess.run(A))
print('\nI=')
print(sess.run(identity_matrix))
| tensorflow.truncated_normal | 12,324 |
import tensorflow as tf
self.docid_inputs = [] # a list of top documents
self.letor_features = tf.placeholder(tf.float32, shape=[None, self.feature_size],
| tensorflow.placeholder | 12,325 |
import tensorflow as tf
tf.float32, size=self.num_cells + 2, clear_after_read=False)
arc_seq = tf.TensorArray(tf.int32, size=self.num_cells * 4)
if prev_c is None:
assert prev_h is None, "prev_c and prev_h must both be None"
prev_c = [tf.zeros([1, self.lstm_size], tf.float32)
for _ in range(self.lstm_num_layers)]
prev_h = [tf.zeros([1, self.lstm_size], tf.float32)
for _ in range(self.lstm_num_layers)]
inputs = self.g_emb
for layer_id in range(2):
next_c, next_h = stack_lstm(inputs, prev_c, prev_h, self.w_lstm)
| tensorflow.zeros | 12,326 |
from tensorflow.contrib import framework as contrib_framework
def _evaluate_model(self, input_fn, steps, feed_fn=None, metrics=None):
if self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset'):
return
checkpoint_path = saver.latest_checkpoint(self._model_dir)
eval_dir = os.path.join(self._model_dir, 'eval')
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, targets = input_fn()
self._check_inputs(features, targets)
eval_dict = self._get_eval_ops(features, targets, metrics or
self._get_default_metric_functions())
eval_results, _ = evaluate(
graph=g,
output_dir=eval_dir,
| tensorflow.contrib.framework.create_global_step | 12,327 |
import tensorflow as tf
# scores = scores / (facts.get_shape().as_list()[-1] ** 0.5)
# Activation
if softmax_stag:
scores = tf.nn.softmax(scores) # [B, 1, T]
# Weighted sum
if mode == 'SUM':
output = tf.matmul(scores, facts) # [B, 1, H]
# output = tf.reshape(output, [-1, tf.shape(facts)[-1]])
else:
scores = tf.reshape(scores, [-1, tf.shape(facts)[1]])
output = facts * tf.expand_dims(scores, -1)
output = tf.reshape(output, tf.shape(facts))
return output
| tensorflow.matmul | 12,328 |
import tensorflow as tf
a0 = logits - tf.reduce_max(logits, 1, keepdims=True)
ea0 = tf.exp(a0)
z0 = tf.reduce_sum(ea0, 1, keepdims=True)
p0 = ea0 / z0
| tensorflow.reduce_sum | 12,329 |
import tensorflow as tf
tf.expand_dims(stacked, -1), max_outputs=30)
sampled = tf.concat([sampled1, sampled2], 3, 'sampled_concat')
logits = (LinearWrap(sampled)
.FullyConnected('fc1', out_dim=256, nl=tf.nn.relu)
.FullyConnected('fc2', out_dim=128, nl=tf.nn.relu)
.FullyConnected('fct', out_dim=19, nl=tf.identity)())
tf.nn.softmax(logits, name='prob')
cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
cost = tf.reduce_mean(cost, name='cross_entropy_loss')
wrong = tf.to_float(tf.logical_not(tf.nn.in_top_k(logits, label, 1)), name='incorrect_vector')
summary.add_moving_summary(tf.reduce_mean(wrong, name='train_error'))
| tensorflow.nn.softmax | 12,330 |
import tensorflow as tf
)
next_sentence_predictions = tf.argmax(
next_sentence_log_probs, axis=-1, output_type=tf.int32
)
next_sentence_labels = tf.reshape(next_sentence_labels, [-1])
next_sentence_accuracy = tf.metrics.accuracy(
labels=next_sentence_labels, predictions=next_sentence_predictions
)
next_sentence_mean_loss = tf.metrics.mean(
values=next_sentence_example_loss
)
return {
"masked_lm_accuracy": masked_lm_accuracy,
"masked_lm_loss": masked_lm_mean_loss,
"next_sentence_accuracy": next_sentence_accuracy,
| tensorflow.metrics.mean | 12,331 |
import tensorflow as tf
self.assertAllClose(res1, res2)
self.assertAllClose(res1, res3)
def testEmbeddingTiedRNNSeq2Seq(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
enc_inp = [tf.constant(1, tf.int32, shape=[2]) for i in range(2)]
dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
dec, mem = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp, cell, num_symbols=5, embedding_size=2)
sess.run([tf.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
| tensorflow.nn.rnn_cell.BasicLSTMCell | 12,332 |
import tensorflow as tf
the ranking score of the corresponding example.
labels: (tf.Tensor) A tensor of the same shape as `output`. A value >= 1 means a
relevant example.
propensity_weights: (tf.Tensor) A tensor of the same shape as `output` containing the weight of each element.
name: A string used as the name for this variable scope.
Returns:
(tf.Tensor) A single value tensor containing the loss.
(tf.Tensor) A tensor containing the propensity weights.
"""
loss = None
with tf.name_scope(name, "click_weighted_pairwise_loss",[output]):
sliced_output = tf.unstack(output, axis=1)
sliced_label = tf.unstack(labels, axis=1)
sliced_propensity = tf.unstack(propensity_weights, axis=1)
for i in range(len(sliced_output)):
for j in range(i+1, len(sliced_output)):
cur_label_weight = tf.math.sign(sliced_label[i] - sliced_label[j])
cur_propensity = sliced_propensity[i] * sliced_label[i] + sliced_propensity[j] * sliced_label[j]
cur_pair_loss = -tf.exp(sliced_output[i]) / (tf.exp(sliced_output[i]) + tf.exp(sliced_output[j]))
if loss == None:
loss = cur_label_weight * cur_pair_loss * cur_propensity
loss += cur_label_weight * cur_pair_loss * cur_propensity
batch_size = tf.shape(labels[0])[0]
return tf.reduce_sum(loss) / tf.cast(batch_size, dtypes.float32) #/ (tf.reduce_sum(propensity_weights)+1)
def click_weighted_log_loss(self, output, labels, propensity_weights, name=None):
| tensorflow.unstack | 12,333 |
import tensorflow as tf
train_op = opt.apply_gradients(grads_and_vars, global_step)
# Validation
'''
if params.validation and params.references[0]:
files = [params.validation] + list(params.references)
eval_inputs = files
eval_input_fn = dataset.get_evaluation_input
else:
print("Don't evaluate")
eval_input_fn = None
'''
# Add hooks
train_hooks = [
tf.train.StopAtStepHook(last_step=params.train_steps),
tf.train.NanTensorHook(loss), # Monitors the loss tensor and stops training if loss is NaN
tf.train.LoggingTensorHook(
{
"step": global_step,
"loss": loss,
"chars": tf.shape(features["chars"]),
"source": tf.shape(features["source"]),
#"bert": tf.shape(features["bert"]),
"lr": learning_rate
},
every_n_iter=1
),
tf.train.CheckpointSaverHook(
| tensorflow.train.StopAtStepHook | 12,334 |
import tensorflow as tf
false_fn=lambda: tf.constant(0, dtype=tf.int64)
)
kk = tf.math.add(kk, to_add)
kernel[l][m] = kk
return tf.convert_to_tensor(kernel, dtype=tf.int64)
def dim(self):
return self._dim
| tensorflow.convert_to_tensor | 12,335 |
import tensorflow as tf
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
for idx, x in enumerate(xs):
c = c
h = h
z = tf.matmul(x, wx) + tf.matmul(h, wh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
| tensorflow.split | 12,336 |
import tensorflow as tf
`[num_nodes * count1]`, `[num_nodes * count1 * count2]` ...
types: A list of `Tensor` of `int32`, with shapes
`[num_nodes * count1]`, `[num_nodes * count1 * count2]` ...
"""
neighbors_list = [tf.reshape(nodes, [-1])]
weights_list = []
type_list = []
for hop_edge_types, count in zip(edge_types, counts):
| tensorflow.reshape | 12,337 |
import tensorflow as tf
# Implement an exponential learning rate decay every 1000 epochs
#Implement a dynamical learning rate
global_step = tf.Variable(0., trainable=False)
rate = tf.train.exponential_decay(starter_learning, global_step, 500, 0.9) #exponential learning rate decay
#rate = starter_learning
| tensorflow.Variable | 12,338 |
from tensorflow.contrib.framework import deprecated_args
ops.add_to_collections(metrics_collections, mean_distance)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_distance, update_op
@deprecated_args(IGNORE_MASK_DATE, IGNORE_MASK_INSTRUCTIONS, 'ignore_mask')
def streaming_percentage_less(values, threshold, ignore_mask=None, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the percentage of values less than the given threshold.
The `streaming_percentage_less` function creates two local variables,
| tensorflow.contrib.framework.deprecated_args | 12,339 |
import tensorflow as tf
else:
entropy = - targets * np.log(targets) - \
(1. - targets) * np.log(1. - targets)
return tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.ones_like(logits) * targets, logits=logits) - entropy
def _setup_model_loss(self, update_ops=None, num_classes=6):
self.learning_rate_d = tf.placeholder(tf.float32, shape=[], name="learning_rate_placeholder")
self.learning_rate_g = tf.placeholder(tf.float32, shape=[], name="learning_rate_placeholder")
d_optimizer = self._optimizer(
self.learning_rate_d,
optname=self.cnf.get('optname', 'momentum'),
**self.cnf.get('opt_kwargs', {'decay': 0.9}))
| tensorflow.placeholder | 12,340 |
import tensorflow as tf
返回
----
summaryWriter :FileWriter,日志写入器
"""
if tf.gfile.Exists(logPath):
tf.gfile.DeleteRecursively(logPath)
summaryWriter = tf.summary.FileWriter(logPath, graph=tf.get_default_graph())
return summaryWriter
| tensorflow.gfile.DeleteRecursively | 12,341 |
import tensorflow as tf
tf.app.flags.DEFINE_float('beta', 0.0005, 'Reconstruction from noisy data loss weight')
tf.app.flags.DEFINE_float('epsilon', 0.000001,
'Diameter of epsilon sphere comparing to distance to a neighbour. <= 0.5')
tf.app.flags.DEFINE_float('gamma', 50., 'Loss weight for large distances')
tf.app.flags.DEFINE_float('distance', 0.01, 'Maximum allowed interpoint distance')
tf.app.flags.DEFINE_float('delta', 1., 'Loss weight for stacked objective')
tf.app.flags.DEFINE_string('comment', '', 'Comment to leave by the model')
| tensorflow.app.flags.DEFINE_float | 12,342 |
import tensorflow as tf
# once per loop.
with (tf.contrib.summary.create_file_writer(
params['model_dir'],
max_queue=params['iterations_per_loop']).as_default()):
with tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar(
'total_loss', tf.reduce_mean(total_loss), step=global_step)
tf.contrib.summary.scalar(
'total_rpn_loss', tf.reduce_mean(total_rpn_loss),
step=global_step)
tf.contrib.summary.scalar(
'rpn_score_loss', tf.reduce_mean(rpn_score_loss),
step=global_step)
tf.contrib.summary.scalar(
'rpn_box_loss', tf.reduce_mean(rpn_box_loss), step=global_step)
tf.contrib.summary.scalar(
'total_fast_rcnn_loss', tf.reduce_mean(total_fast_rcnn_loss),
step=global_step)
tf.contrib.summary.scalar(
'fast_rcnn_class_loss', tf.reduce_mean(fast_rcnn_class_loss),
step=global_step)
tf.contrib.summary.scalar(
'fast_rcnn_box_loss', tf.reduce_mean(fast_rcnn_box_loss),
step=global_step)
if params['include_mask']:
tf.contrib.summary.scalar(
'mask_loss', tf.reduce_mean(mask_loss), step=global_step)
tf.contrib.summary.scalar(
| tensorflow.reduce_mean | 12,343 |
import tensorflow as tf
a_mask = tf.matrix_band_part(ones, -1, 0)
s_ex12 = tf.expand_dims(tf.expand_dims(mask_sequence, 1), 2)
s_ex13 = tf.expand_dims(tf.expand_dims(mask_sequence, 1), 3)
a_mask = (1 - s_ex13) * (1 - s_ex12) + s_ex13 * a_mask
# generate mask of batch x seq_len x seq_len
a_mask = tf.reshape(a_mask, (-1, seq_len, seq_len))
out_mask = attention_mask * a_mask
else:
ones = tf.ones_like(attention_mask[:1])
mask = (tf.matrix_band_part(ones, -1, 0))
out_mask = attention_mask * mask
else:
out_mask = attention_mask
return out_mask | tensorflow.matrix_band_part | 12,344 |
from tensorflow.contrib.eager.python.examples.l2hmc import l2hmc
hparams = get_default_hparams()
for sample_size in [10, 25, 50, 100, 200]:
hparams.n_samples = sample_size
energy_fn, _, _ = l2hmc.get_scg_energy_fn()
dynamics = l2hmc.Dynamics(
x_dim=hparams.x_dim,
minus_loglikelihood_fn=energy_fn,
n_steps=hparams.n_steps,
| tensorflow.contrib.eager.python.examples.l2hmc.l2hmc.Dynamics | 12,345 |
import tensorflow as tf
# main
with tf.Session() as sess:
| tensorflow.Session | 12,346 |
import tensorflow as tf
# Ensure maxnorm constraints are initially satisfied
entity_init = dense_maxnorm(entity_init, self.maxnorm)
self.entity_embedding_vars = tf.Variable(entity_init)
self.rel_embedding_vars = tf.Variable(rel_init)
# Embedding layer for each (head, rel, tail) triple being fed in as input
head_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.head_input)
tail_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.tail_input)
rel_embed = tf.nn.embedding_lookup(self.rel_embedding_vars, self.rel_input)
# Relationship vector acts as a translation in entity embedding space
diff_vec = tail_embed - (head_embed + rel_embed)
# negative dist so higher scores are better (important for pairwise loss)
if self.dist == 'manhattan':
raw_output = -tf.reduce_sum(tf.abs(diff_vec), 1)
elif self.dist == 'euclidean':
# +eps because gradients can misbehave for small values in sqrt
raw_output = -tf.sqrt(tf.reduce_sum(tf.square(diff_vec), 1) + self.EPS)
elif self.dist == 'sqeuclidean':
raw_output = -tf.reduce_sum(tf.square(diff_vec), 1)
else:
raise Exception('Unknown distance type')
# Model output
self.output, self.loss = ranking_margin_objective(raw_output, self.margin)
# Optimization with postprocessing to limit embedding vars to L2 ball
self.train_step = self.opt.minimize(self.loss)
| tensorflow.abs | 12,347 |
import tensorflow as tf
from planet.tools import filter_variables_lib
Objective = collections.namedtuple(
'Objective', 'name, value, goal, include, exclude')
def set_up_logging():
"""Configure the TensorFlow logger."""
tf.logging.set_verbosity(tf.logging.INFO)
logging.getLogger('tensorflow').propagate = False
logging.getLogger('tensorflow').format = '%(message)s'
logging.basicConfig(level=logging.INFO, format='%(message)s')
def save_config(config, logdir=None):
"""Save a new configuration by name.
| tensorflow.logging.set_verbosity | 12,348 |
import tensorflow as tf
with tf.gfile.Open(m_name) as fd:
tf.logging.info("Restoring model parameters from %s" % m_name)
json_str = fd.readline()
params.parse_json(json_str)
return params
def export_params(output_dir, name, params):
if not tf.gfile.Exists(output_dir):
tf.gfile.MkDir(output_dir)
# Save params as params.json
filename = os.path.join(output_dir, name)
with tf.gfile.Open(filename, "w") as fd:
fd.write(params.to_json())
def collect_params(all_params, params):
collected = tf.contrib.training.HParams()
for k in params.values().keys():
collected.add_hparam(k, getattr(all_params, k))
return collected
def merge_parameters(params1, params2):
| tensorflow.gfile.Open | 12,349 |
import tensorflow as tf
ch_emb = conv(ch_emb, d,
bias=True, activation=tf.nn.relu, kernel_size=5, name="char_conv", reuse=None)
qh_emb = conv(qh_emb, d,
bias=True, activation=tf.nn.relu, kernel_size=5, name="char_conv", reuse=True)
ch_emb = tf.reduce_max(ch_emb, axis=1)
qh_emb = tf.reduce_max(qh_emb, axis=1)
ch_emb = tf.reshape(ch_emb, [N * self.max_p_num, PL, -1])
qh_emb = tf.reshape(qh_emb, [N * self.max_p_num, QL, -1])
c_emb = tf.nn.dropout(tf.nn.embedding_lookup(self.word_mat, self.c), 1.0 - self.dropout)
q_emb = tf.nn.dropout(tf.nn.embedding_lookup(self.word_mat, self.q), 1.0 - self.dropout)
c_emb = tf.concat([c_emb, ch_emb], axis=2)
q_emb = tf.concat([q_emb, qh_emb], axis=2)
self.c_emb = highway(c_emb, size=d, scope="highway", dropout=self.dropout, reuse=None)
self.q_emb = highway(q_emb, size=d, scope="highway", dropout=self.dropout, reuse=True)
def _encode(self):
N, PL, QL, CL, d, dc, nh = self._params()
if self.config.fix_pretrained_vector:
dc = self.char_mat.get_shape()[-1]
with tf.variable_scope("Embedding_Encoder_Layer"):
self.c_embed_encoding = residual_block(self.c_emb,
num_blocks=1,
num_conv_layers=2,
kernel_size=7,
| tensorflow.concat | 12,350 |
import tensorflow as tf
logits1, logits2 = [l for l in self.logits]
outer = tf.matmul(tf.expand_dims(tf.nn.softmax(logits1), axis=2),
tf.expand_dims(tf.nn.softmax(logits2), axis=1))
outer = tf.matrix_band_part(outer, 0, config.ans_limit)
self.yp1 = tf.argmax(tf.reduce_max(outer, axis=2), axis=1)
self.yp2 = tf.argmax(tf.reduce_max(outer, axis=1), axis=1)
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits1, labels=self.y1)
losses2 = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits2, labels=self.y2)
| tensorflow.reduce_max | 12,351 |
import tensorflow as tf
# Generates a new MetaGraphDef.
new_saver.export_meta_graph()
# Restores from checkpoint.
new_saver.restore(sess, saver0_ckpt)
# Addes loss and train.
labels = tf.constant(0, tf.int32, shape=[100], name="labels")
batch_size = tf.size(labels)
labels = tf.expand_dims(labels, 1)
indices = tf.expand_dims(tf.range(0, batch_size), 1)
concated = tf.concat(1, [indices, labels])
onehot_labels = tf.sparse_to_dense(
concated, tf.pack([batch_size, 10]), 1.0, 0.0)
logits = tf.get_collection("logits")[0]
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits,
onehot_labels,
name="xentropy")
loss = tf.reduce_mean(cross_entropy, name="xentropy_mean")
| tensorflow.concat | 12,352 |
import tensorflow as tf
self.rnn_step_scan,
tf.transpose(self.x, [1, 0, 2]),
| tensorflow.transpose | 12,353 |
import tensorflow as tf
self.sess.run(tf.global_variables_initializer())
# Tensorboard
if summary_dir is not None:
self.writer = tf.summary.FileWriter(summary_dir)
tf.summary.scalar('Loss/Policy', loss_pg)
tf.summary.scalar('Loss/Value', loss_vf)
tf.summary.scalar('Loss/Entropy', loss_entropy)
tf.summary.scalar('Loss/Total', loss)
tf.summary.scalar('Var/Epsilon', epsilon_decay)
tf.summary.scalar('Var/Policy Mode', tf.reduce_mean(pi.mode()))
tf.summary.scalar('Var/Policy Sigma', tf.reduce_mean(pi.stddev()))
tf.summary.scalar('Var/Value', tf.reduce_mean(self.vf))
self.summarise = tf.summary.merge(tf.get_collection(tf.GraphKeys.SUMMARIES))
# AC net
def build_anet(self, state_in, name, reuse=False):
reg = tf.contrib.layers.l2_regularizer(1e-3)
| tensorflow.summary.scalar | 12,354 |
import tensorflow as tf
rows = [tf.concat(tf.unstack(x),axis=1) for x in rows]
x_aug = tf.concat(rows,axis=0)
| tensorflow.concat | 12,355 |
import tensorflow as tf
tf.app.flags.DEFINE_float('learning_rate', 0.0001, '')
tf.app.flags.DEFINE_integer('max_steps', 100000, '')
tf.app.flags.DEFINE_integer('loss_scale', 1024, '')
tf.app.flags.DEFINE_float('moving_average_decay', 0.997, '')
tf.app.flags.DEFINE_string('gpu_list', '1', '')
tf.app.flags.DEFINE_string('checkpoint_path', '/tmp/east_resnet_v1_50_rbox/', '')
tf.app.flags.DEFINE_boolean('restore', False, 'whether to resotre from checkpoint')
tf.app.flags.DEFINE_integer('save_checkpoint_steps', 1000, '')
tf.app.flags.DEFINE_integer('save_summary_steps', 100, '')
tf.app.flags.DEFINE_string('pretrained_model_path', None, '')
tf.app.flags.DEFINE_boolean('allow_mix_precision', False, 'whether to allow mix precision')
tf.app.flags.DEFINE_boolean('auto_tune', False, 'whether to autotune')
tf.app.flags.DEFINE_boolean('use_processed_data', False, 'whether to use processed data')
tf.app.flags.DEFINE_string('processed_data', './processed_dataset/', 'where to save preprocessed datasets')
import model
import icdar
| tensorflow.app.flags.DEFINE_string | 12,356 |
import tensorflow as tf
a `float`, center loss
"""
with tf.variable_scope(name):
num_features = features.get_shape()[1]
centers = tf.get_variable(
'centers', [num_classes, num_features],
dtype=tf.float32,
initializer=tf.constant_initializer(0),
trainable=False)
label = tf.reshape(label, [-1])
centers_batch = tf.gather(centers, label)
diff = (1 - alpha) * (centers_batch - features)
centers = tf.scatter_sub(centers, label, diff)
loss = tf.nn.l2_loss(features - centers_batch)
| tensorflow.constant_initializer | 12,357 |
import tensorflow as tf
train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
train_hook=tf.train.LoggingTensorHook(['loss/train_loss'],every_n_iter=100)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps,hooks=[train_hook])
if FLAGS.do_eval:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
num_actual_eval_examples = len(eval_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
| tensorflow.train.LoggingTensorHook | 12,358 |
import tensorflow as tf
return default_params
def bilstm_layer(self, embeddings, nwords):
t = tf.transpose(embeddings, perm=[1, 0, 2])
lstm_cell_fw = tf.contrib.rnn.LSTMBlockFusedCell(self.params['lstm_size'])
lstm_cell_bw = tf.contrib.rnn.LSTMBlockFusedCell(self.params['lstm_size'])
| tensorflow.transpose | 12,359 |
import tensorflow as tf
train_step = tf.train.AdamOptimizer(
learning_rate=config.learning_rate).minimize(cost)
correct_prediction = tf.equal(tf.argmax(pred_Y, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, dtype=tf.float32))
# --------------------------------------------
# step4: Hooray, now train the neural network
# --------------------------------------------
# Note that log_device_placement can be turned ON but will cause console spam.
# Initializing the variables
init = tf.initialize_all_variables()
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
best_accuracy = 0.0
# sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=False))
if (FLAG == 'train') : # If it is the training mode
with tf.Session() as sess:
# tf.initialize_all_variables().run()
sess.run(init) # .run()
f.write("---Save model \n")
| tensorflow.initialize_all_variables | 12,360 |
import tensorflow as tf
# 2nd part of minimize: apply_gradient
optimizer_step = self._optimizer.apply_gradients(clipped_grads_and_vars, global_step=self.global_step)
update_ops = tf.group(*self.update_ops)
self.training_op = tf.group(update_ops, optimizer_step)
def set_check_ops(self):
self._check_ops = 1
# TODO argo2 This is not working anymore with the new session
#with self.sess.graph.as_default():
self._numerics_ops = tf.add_check_numerics_ops()
def release(self):
super().release()
self.sess.close()
tf.reset_default_graph()
def set_summaries(self):
"""This function sets summaries and summaryFileWriters, it needs to be invoked before
training to keep track of the summaries.
(cannot be invoked in create_and_init_network because the FileWriter will corrupt data in the logfolder
at each initialization)
"""
| tensorflow.add_check_numerics_ops | 12,361 |
import tensorflow as tf
self.build_predictive_model() # builds on top of predictive model. Reuses triplet encoding
# build denoising objective
models = self.models
self.loss_dn = self._noisy_decode(models[1])
self.losses = [self.loss_reco, self.loss_pred, self.loss_dist, self.loss_dn]
def _noisy_decode(self, model):
"""Distort middle encoding with [<= 1/3*dist(neigbour)] and demand good reconstruction"""
# dist = l2(x1 - x2)
# noise = dist * self.epsilon_sphere_noise()
# tf.stop_gradient(noise)
noise = tf.random_normal(self.model.encode.get_shape().as_list()) * FLAGS.epsilon
noisy_encoding = noise + self.models[1].encode
tf.stop_gradient(noisy_encoding) # or maybe here, who knows
noisy_decode = interpreter.build_decoder(noisy_encoding, model.config, reuse=True, masks=model.mask_list)
loss = interpreter.l2_loss(noisy_decode, self.raw_targets[1], alpha=FLAGS.beta)
self.models += [noisy_decode]
return loss
def _tensor_to_image(self, net):
with tf.name_scope('to_image'):
if FLAGS.new_blur:
net = net[..., :self.batch_shape[-1]]
net = tf.nn.relu(net)
net = tf.cast(net <= 1, net.dtype) * net * 255
net = tf.cast(net, tf.uint8)
return net
| tensorflow.stop_gradient | 12,362 |
import tensorflow as tf
writer.close()
def file_based_input_fn_builder(input_file, seq_length, is_training,
drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([seq_length], tf.int64),
"is_real_example": tf.FixedLenFeature([1], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
| tensorflow.FixedLenFeature | 12,363 |
import tensorflow as tf
initializer=tf.constant_initializer(0.0))
def __call__(self,input_var,name=None,w=None,b=None,**kwargs) :
w = w if w is not None else self.w
b = b if b is not None else self.b
if( input_var.shape.ndims > 2 ) :
dims = tf.reduce_prod(tf.shape(input_var)[1:])
return tf.matmul(tf.reshape(input_var,[-1,dims]),w) + b
else :
return tf.matmul(input_var,w)+b
def get_variables(self):
return {'w':self.w,'b':self.b}
class WeightNormLinear(object):
def __init__(self,name,input_dim,output_dim,stddev=0.02,epsilon=1e-10) :
| tensorflow.reshape | 12,364 |
import tensorflow as tf
def testStrippedOpListDef(self):
with self.test_session():
# Creates a graph.
v0 = tf.Variable(0.0)
var = tf.Variable(10.0)
tf.add(v0, var)
@function.Defun(x=tf.float32)
def minus_one(x):
return x - 1
minus_one(tf.identity(v0))
save = tf.train.Saver({"v0": v0})
tf.initialize_all_variables()
# Generates MetaGraphDef.
meta_graph_def = save.export_meta_graph()
ops = [o.name for o in meta_graph_def.meta_info_def.stripped_op_list.op]
self.assertEqual(ops, ["Add", "Assign", "Const", "Identity", "NoOp",
"RestoreSlice", "SaveSlices", "Sub", "Variable"])
if __name__ == "__main__":
tf.test.main()
| tensorflow.initialize_all_variables | 12,365 |
import tensorflow as tf
[int(feature.is_real_example)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
| tensorflow.train.Features | 12,366 |
import tensorflow as tf
Return:
A tuple of `SparseTensor` (neibors, weights).
neighbors: A `SparseTensor` of `int64`.
weights: A `SparseTensor` of `float`.
types: A `SparseTensor` of `int32`
"""
sp_returns = base._LIB_OP.get_full_neighbor(nodes, edge_types)
return tf.SparseTensor(*sp_returns[:3]), tf.SparseTensor(*sp_returns[3:6]), \
tf.SparseTensor(*sp_returns[6:])
def get_sorted_full_neighbor(nodes, edge_types):
"""
Args:
nodes: A `Tensor` of `int64`.
edge_types: A 1-D `Tensor` of int32. Specify edge types to filter outgoing
edges.
| tensorflow.SparseTensor | 12,367 |
import tensorflow as tf
self.sample_action = tf.squeeze(pi_eval.sample(1), axis=0)
self.eval_action = pi_eval.mode()
self.global_step = tf.train.get_or_create_global_step()
self.saver = tf.train.Saver()
| tensorflow.train.get_or_create_global_step | 12,368 |
import tensorflow as tf
predict_nor, tsne_logit_nor = models(hps, image, FLAGS.RCE_train, logits=False, tsne_logits=True)
predict_adv, tsne_logit_adv = models(hps, adv_image, FLAGS.RCE_train, logits=False, tsne_logits=True)
# Calculate entropy
argmax_y_onehot = tf.one_hot(tf.argmax(predict, 1), 10, on_value=0.0, off_value=1.0, axis=-1)
normalized_y_nonmaximal = tf.reduce_sum(predict * argmax_y_onehot, 1)
entropy = tf.reduce_sum(-tf.log(predict) * predict * argmax_y_onehot, 1) / normalized_y_nonmaximal + tf.log(
normalized_y_nonmaximal)
| tensorflow.argmax | 12,369 |
import tensorflow as tf
:param name:
:return: tf.Tensor: a NC tensor named ``output`` with attribute `variables`.
"""
shape = inputdata.get_shape().as_list()[1:]
if None not in shape:
inputdata = tf.reshape(inputdata, [-1, int(np.prod(shape))])
else:
inputdata = tf.reshape(inputdata, tf.stack([tf.shape(inputdata)[0], -1]))
if w_init is None:
w_init = tf.contrib.layers.variance_scaling_initializer()
if b_init is None:
b_init = tf.constant_initializer()
| tensorflow.shape | 12,370 |
import tensorflow as tf
def add_dense_layer(layer, filter_dims, act_func=tf.nn.relu, scope='dense_layer',
use_bn=True, bn_phaze=False, use_bias=False, dilation=[1, 1, 1, 1]):
with tf.variable_scope(scope):
l = layer
if use_bn:
| tensorflow.variable_scope | 12,371 |
import tensorflow as tf
def _forward(self, x1, x2, **kwargs):
log_sigmas = self.parameterizer(x1)
z2, fldj = half_gaussianize(x2, log_sigmas)
return z2, fldj
def _inverse(self, x1, z2, **kwargs):
log_sigmas = self.parameterizer(x1)
x2, ildj = half_gaussianize(z2, log_sigmas, inverse=tf.constant(True))
return x2, ildj
def exponentiate(x, log_lambdas, inverse=tf.constant(False)):
if not inverse:
z = tf.math.exp(log_lambdas)*x
ldj = tf.math.reduce_sum(log_lambdas, axis=[1,2,3])
else:
z = x*tf.math.exp(-log_lambdas)
ldj = -tf.math.reduce_sum(log_lambdas, axis=[1,2,3])
return z, ldj
class Exponentiate(Parameterize):
"""
Implementation of parameterize for an exponetial prior.
"""
def __init__(self, input_shape=None, name='gaussianize', *args, **kwargs):
| tensorflow.math.exp | 12,372 |
from tensorflow.python.framework import ops
total = _create_local('total', shape=[])
count = _create_local('count', shape=[])
if weights is not None:
weights = math_ops.to_float(weights)
values = math_ops.mul(values, weights)
num_values = math_ops.reduce_sum(_broadcast_weights(weights, values))
else:
num_values = math_ops.to_float(array_ops.size(values))
total_compute_op = state_ops.assign_add(total, math_ops.reduce_sum(values))
count_compute_op = state_ops.assign_add(count, num_values)
mean = _safe_div(total, count, 'value')
with ops.control_dependencies([total_compute_op, count_compute_op]):
update_op = _safe_div(total, count, 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, mean)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean, update_op
def streaming_mean_tensor(values, weights=None, metrics_collections=None,
updates_collections=None, name=None):
| tensorflow.python.framework.ops.control_dependencies | 12,373 |
import tensorflow as tf
reshaped_train_labels = tf.transpose(tf.convert_to_tensor(train_labels)) # reshape from [rank_list_size, ?] to [?, rank_list_size]
self.propensity_weights = self.get_normalized_weights(self.logits_to_prob(self.propensity))
self.rank_loss = self.loss_func(train_output, reshaped_train_labels, self.propensity_weights)
pw_list = tf.unstack(self.propensity_weights, axis=1) # Compute propensity weights
self.click_metrics=self.click_loglikelihood(reshaped_train_labels,\
self.propensity,train_output)
tf.summary.scalar('click_metrics',self.click_metrics,collections=['train'])
for i in range(len(pw_list)):
tf.summary.scalar('Inverse Propensity weights %d' % i, tf.reduce_mean(pw_list[i]), collections=['train'])
tf.summary.scalar('Rank Loss', tf.reduce_mean(self.rank_loss), collections=['train'])
# Compute examination loss
| tensorflow.summary.scalar | 12,374 |
import tensorflow as tf
self.IRK_times = tmp[q**2+q:]
# tf placeholders and graph
self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
log_device_placement=True))
self.x0_tf = tf.placeholder(tf.float32, shape=(None, self.x0.shape[1]))
self.x1_tf = tf.placeholder(tf.float32, shape=(None, self.x1.shape[1]))
self.u0_tf = tf.placeholder(tf.float32, shape=(None, self.u0.shape[1]))
self.u1_tf = tf.placeholder(tf.float32, shape=(None, self.u1.shape[1]))
self.dummy_x0_tf = tf.placeholder(tf.float32, shape=(None, self.q)) # dummy variable for fwd_gradients
self.dummy_x1_tf = tf.placeholder(tf.float32, shape=(None, self.q)) # dummy variable for fwd_gradients
self.U0_pred = self.net_U0(self.x0_tf) # N0 x q
self.U1_pred = self.net_U1(self.x1_tf) # N1 x q
| tensorflow.placeholder | 12,375 |
import tensorflow as tf
if init:
x = tf.nn.conv2d(x, tf.nn.l2_normalize(V.initialized_value(), [0, 1, 2]), [1] + list(stride) + [1], pad)
init_scale=.01
m_init, v_init = tf.nn.moments(x, [0,1,2])
scale_init = init_scale / tf.sqrt(v_init + 1e-10)
with tf.control_dependencies([g.assign(g * scale_init), b.assign_add(-m_init * scale_init)]):
x = tf.reshape(scale_init, [1, 1, 1, num_filters]) * (x - tf.reshape(m_init, [1, 1, 1, num_filters]))
else:
V = maybe_avg(V)
g = maybe_avg(g)
b = maybe_avg(b)
# use weight normalization (Salimans & Kingma, 2016)
W = tf.reshape(g, [1, 1, 1, num_filters]) * tf.nn.l2_normalize(V, [0, 1, 2])
# calculate convolutional layer output
x = tf.nn.bias_add(tf.nn.conv2d(x, W, [1] + list(stride) + [1], pad), b)
return x
def gated_resnet(x, aux, dim=(32, [3, 3], [1, 1]), activation=tf.nn.elu, scope="gated_resnet", residual=True, dropout=.0, conv=conv2d, training=True, ema=None, init=False):
out = conv(activation(x), [dim[0], dim[1], [1, 1]], scope="%s_conv_in"%scope, training=training, ema=ema, init=init)
in_shp = x.get_shape().as_list()
assert in_shp[1] == in_shp[2]
if aux is not None:
aux_shp = aux.get_shape().as_list()
| tensorflow.nn.l2_normalize | 12,376 |
import tensorflow as tf
if self.dale_ratio:
new_output = tf.matmul(tf.nn.relu(new_state),
tf.matmul(tf.abs(self.W_out) * self.output_Connectivity,
self.Dale_out, name="in_2"), transpose_b=True, name="3") \
+ self.b_out
else:
new_output = tf.matmul(tf.nn.relu(new_state),
self.W_out * self.output_Connectivity, transpose_b=True, name="3") \
+ self.b_out
return new_output
| tensorflow.nn.relu | 12,377 |
import tensorflow as tf
print('\ndeterminant(D)={:.1f}'.format(sess.run(tf.matrix_determinant(D))))
print('\ncholesky(D):')
print(sess.run(tf.cholesky(identity_matrix)))
print('\nselfAdjointEig(D):')
print(sess.run(tf.self_adjoint_eig(D)))
print(sess.run(tf.div(13, 4)))
print(sess.run(tf.truediv(13, 4)))
print(sess.run(tf.floordiv(13, 4)))
print(sess.run(tf.mod(13.2, 4)))
print(sess.run(tf.cross([1, 0, 0], [0, 1, 0])))
print(sess.run(tf.square([1, 2, 3])))
def custom_polynomial(local_tf, value):
return local_tf.subtract(3 * local_tf.square(value), value) + 10
print((sess.run(custom_polynomial(tf, 11))))
alpha = 0.1
val = tf.constant([[2, 3], [1, 4]], dtype=tf.float32)
l1 = tf.contrib.layers.l1_regularizer(alpha)(val)
| tensorflow.cross | 12,378 |
import tensorflow as tf
tf.flags.DEFINE_float('num_epochs_per_decay', 0,
"""Steps after which learning rate decays.""")
tf.flags.DEFINE_float('learning_rate_decay_factor', 0.94,
"""Learning rate decay factor.""")
tf.flags.DEFINE_float('momentum', 0.9, """Momentum for training.""")
tf.flags.DEFINE_float('rmsprop_decay', 0.9, """Decay term for RMSProp.""")
tf.flags.DEFINE_float('rmsprop_momentum', 0.9, """Momentum in RMSProp.""")
tf.flags.DEFINE_float('rmsprop_epsilon', 1.0, """Epsilon term for RMSProp.""")
| tensorflow.flags.DEFINE_float | 12,379 |
import tensorflow as tf
input_files = []
for input_pattern in FLAGS.input_file.split(","):
input_files.extend(tf.gfile.Glob(input_pattern))
tf.logging.info("*** Input Files ***")
for input_file in input_files:
tf.logging.info(" %s" % input_file)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
| tensorflow.logging.info | 12,380 |
import tensorflow as tf
x_blend_expected_np = sess.run(layers.upscale(layers.downscale(x, 2), 2))
self.assertNDArrayNear(x_blend_np, x_blend_expected_np, 1.0e-6)
def test_blend_images_in_transition_stage(self):
x_np = np.random.normal(size=[2, 8, 8, 3])
x = tf.constant(x_np, tf.float32)
x_blend = networks.blend_images(
x,
tf.constant(0.2),
resolution_schedule=networks.ResolutionSchedule(
scale_base=2, num_resolutions=2),
num_blocks=2)
with self.test_session(use_gpu=True) as sess:
x_blend_np = sess.run(x_blend)
x_blend_expected_np = 0.8 * sess.run(
layers.upscale(layers.downscale(x, 2), 2)) + 0.2 * x_np
| tensorflow.constant | 12,381 |
import tensorflow as tf
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
| tensorflow.logging.info | 12,382 |
import tensorflow as tf
# Need to prepare a mask to zero out the padding symbols.
# Make a batch_size x max_sequence_len matrix where each
# row contains the length repeated max_sequence_len times.
lengths_transposed = tf.expand_dims(tf.to_int32(self.seq_lens), 1)
lengths_tiled = tf.tile(lengths_transposed, [1, max_sequence_len])
# Make a matrix where each row contains [0, 1, ..., max_sequence_len]
r = tf.range(0, max_sequence_len, 1)
range_row = tf.expand_dims(r, 0)
range_tiled = tf.tile(range_row, [batch_size, 1])
| tensorflow.tile | 12,383 |
import tensorflow as tf
# L2 weight regularization
reg += self.L2_in * tf.reduce_mean(tf.square(tf.abs(self.W_in) * self.input_Connectivity))
reg += self.L2_rec * tf.reduce_mean(tf.square(tf.abs(self.W_rec) * self.rec_Connectivity))
if self.dale_ratio:
reg += self.L2_out * tf.reduce_mean(tf.square(
tf.matmul(tf.abs(self.W_out) * self.output_Connectivity, self.Dale_out)))
else:
reg += self.L2_out * tf.reduce_mean(tf.square(tf.abs(self.W_out) * self.output_Connectivity))
# L2 firing rate regularization
reg += self.L2_firing_rate * tf.reduce_mean(tf.square(tf.nn.relu(self.states)))
| tensorflow.abs | 12,384 |
import tensorflow as tf
idx = tf.tile(tf.to_float(tf.range(attn_length)), tf.stack([batch_size]))
idx = tf.reshape(idx, [-1, attn_length])
low = pos - encoder.attn_window_size
high = pos + encoder.attn_window_size
mlow = tf.to_float(idx < low)
mhigh = tf.to_float(idx > high)
m = mlow + mhigh
m += tf.to_float(idx >= encoder_input_length)
mask = tf.to_float(tf.equal(m, 0.0))
| tensorflow.to_float | 12,385 |
import tensorflow as tf
tgt_small = tf.where(geq, tgt2, tgt1)
pred_larg = tf.where(geq, pred1, pred2)
pred_small = tf.where(geq, pred2, pred1)
loss = tf.maximum(0.0, (tgt_larg - tgt_small) - (pred_larg - pred_small))
loss = tf.reduce_mean(loss)
return loss
def contra_step_lossV3(pred, tgt, margin=1.0):
# Step-wise contrastive loss
pred1, pred2 = tf.split(pred, 2, axis=0)
tgt1, tgt2 = tf.split(tgt, 2, axis=0)
geq = tf.cast((tgt1 - tgt2) > 0, tf.bool)
tgt_larg = tf.where(geq, tgt1, tgt2)
tgt_small = tf.where(geq, tgt2, tgt1)
pred_larg = tf.where(geq, pred1, pred2)
pred_small = tf.where(geq, pred2, pred1)
loss = tf.maximum(0.0, (tgt_larg - tgt_small) - (pred_larg - pred_small) + margin)
loss = tf.reduce_mean(loss)
return loss
def contra_step_lossV4(pred, tgt):
# 50*50
# Step-wise contrastive loss
| tensorflow.cast | 12,386 |
import tensorflow as tf
# Open session and restore checkpoint
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
| tensorflow.ConfigProto | 12,387 |
import tensorflow as tf
layer_name = 'layer' + str(i+1)
with tf.variable_scope(layer_name, reuse=tf.AUTO_REUSE):
weights = tf.get_variable('weights', [prev_node, hidden_layers_node[i]],
initializer=tf.truncated_normal_initializer(stddev=0.1))
self.nnweights.append(weights)
biases = tf.get_variable('biases', [hidden_layers_node[i]],
initializer=tf.constant_initializer(0.0))
layer_out = tf.nn.dropout(tf.matmul(prev_x, weights) + biases, dropout_keep_prob)
if activation == 'relu':
layer_out = tf.nn.relu(layer_out)
elif activation == 'sigmoid':
layer_out = tf.nn.sigmoid(layer_out)
elif activation == 'tanh':
layer_out = tf.nn.tanh(layer_out)
else:
| tensorflow.matmul | 12,388 |
import tensorflow as tf
def uint8_resize_bicubic(image, shape):
ret = tf.image.resize_bicubic([image], shape)
return tf.cast(tf.clip_by_value(ret, 0, 255), tf.uint8)[0]
| tensorflow.clip_by_value | 12,389 |
import tensorflow as tf
# Add episode indices.
with tf.control_dependencies(control_inputs=assignments):
num_episodes = tf.count_nonzero(input_tensor=terminal, axis=0, dtype=util.tf_dtype('int'))
assignment = tf.assign(
ref=self.episode_indices[self.episode_count: self.episode_count + num_episodes],
value=tf.boolean_mask(tensor=indices, mask=terminal)
)
# Increment episode count.
with tf.control_dependencies(control_inputs=(assignment,)):
assignment = tf.assign_add(ref=self.episode_count, value=num_episodes)
# Increment memory index.
with tf.control_dependencies(control_inputs=(assignment,)):
assignment = tf.assign(
ref=self.episode_indices[-1],
value=tf.where(self.memory_index + num_instances > self.capacity,
self.episode_indices[self.episode_count - 1], self.capacity - 1)
| tensorflow.control_dependencies | 12,390 |
import tensorflow as tf
with tf.variable_scope('q1', reuse=True):
q1_pi, q1_pi_reg = mlp_variational(tf.concat([x, pi[0]], axis=-1), q1_dropout_mask_phs, list(hidden_sizes) + [1],
activation, None, dropout_rate)
q1_pi = tf.squeeze(q1_pi, axis=2)
with tf.variable_scope('q2'):
q2_in_ph = tf.concat([x, a], axis=-1)
q2_in_dim = q2_in_ph.shape.as_list()[1]
q2_dropout_mask_generator = DropoutMaskGenerator(q2_in_dim, hidden_sizes, model_prob=1.0 - dropout_rate)
q2_dropout_mask_phs = q2_dropout_mask_generator.generate_dropout_mask_placeholders()
q2, q2_reg = mlp_variational(q2_in_ph, q2_dropout_mask_phs, list(hidden_sizes) + [1],
activation, None, dropout_rate)
q2 = tf.squeeze(q2, axis=2)
else:
raise ValueError('Please choose a proper nn_type!')
return pi, pi_reg, pi_dropout_mask_generator, pi_dropout_mask_phs,\
q1, q1_reg, q1_dropout_mask_generator, q1_dropout_mask_phs, q1_pi, q1_pi_reg,\
q2, q2_reg, q2_dropout_mask_generator, q2_dropout_mask_phs
| tensorflow.squeeze | 12,391 |
import tensorflow as tf
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
"""
true_image_shapes = [inputs.shape[:-1].as_list()
for _ in range(inputs.shape[-1])]
return tf.image.resize_images(inputs, [28, 28]), true_image_shapes
def predict(self, preprocessed_inputs, true_image_shapes):
"""Prediction tensors from inputs tensor.
Args:
preprocessed_inputs: a [batch, 28, 28, channels] float32 tensor.
| tensorflow.image.resize_images | 12,392 |
from tensorflow.python.platform import tf_logging as logging
failure_message = "Model diverged with loss = NaN."
if self._fail_on_nan_loss:
logging.error(failure_message)
raise NanLossDuringTrainingError
else:
logging.warning(failure_message)
# We don't raise an error but we return "should stop" so we stop, but
# without an exception.
return True
| tensorflow.python.platform.tf_logging.warning | 12,393 |
import tensorflow as tf
def squeeze_targets_preprocess(dataset, training):
"""Pre-processing function that squeezes last axis of targets."""
del training
def squeeze(features, targets):
if targets.shape[-1] == 1:
targets = tf.squeeze(targets, axis=-1)
return features, targets
dataset = dataset.map(squeeze)
return dataset
| tensorflow.squeeze | 12,394 |
import tensorflow as tf
"""
Used to create a dense layer.
:param x: input tensor to the dense layer
:param n1: no. of input neurons
:param n2: no. of output neurons
:param name: name of the entire dense layer.i.e, variable scope name.
:return: tensor with shape [batch_size, n2]
"""
with tf.variable_scope(name, reuse=None):
weights = tf.get_variable("weights", shape=[n1, n2],
initializer=tf.random_normal_initializer(mean=0., stddev=0.01))
bias = tf.get_variable("bias", shape=[n2], initializer=tf.constant_initializer(0.0))
out = tf.add(tf.matmul(x, weights), bias, name='matmul')
return out
| tensorflow.variable_scope | 12,395 |
from tensorflow.python.ops import math_ops
delta_mean_prediction = _safe_div(
(batch_mean_prediction - mean_prediction) * batch_count, update_count,
'delta_mean_prediction')
update_mean_prediction = state_ops.assign_add(mean_prediction,
delta_mean_prediction)
# prev_mean_prediction is E[x_A] in the update equation
prev_mean_prediction = update_mean_prediction - delta_mean_prediction
# batch_mean_label is E[y_B] in the update equation
batch_mean_label = _safe_div(
math_ops.reduce_sum(weighted_labels), batch_count, 'batch_mean_label')
delta_mean_label = _safe_div((batch_mean_label - mean_label) * batch_count,
update_count, 'delta_mean_label')
update_mean_label = state_ops.assign_add(mean_label, delta_mean_label)
# prev_mean_label is E[y_A] in the update equation
prev_mean_label = update_mean_label - delta_mean_label
unweighted_batch_coresiduals = (
(predictions - batch_mean_prediction) * (labels - batch_mean_label))
# batch_comoment is C_B in the update equation
| tensorflow.python.ops.math_ops.reduce_sum | 12,396 |
import tensorflow as tf
if input_layer is None:
input_layer = self.top_layer
self.top_layer = tf.reshape(input_layer, shape)
self.top_size = shape[-1] # HACK This may not always work
| tensorflow.reshape | 12,397 |
import tensorflow as tf
"""
matrices = [tf.convert_to_tensor(matrix, dtype=dtype) for matrix in matrices]
blocked_rows = tf.Dimension(0)
blocked_cols = tf.Dimension(0)
batch_shape = tf.TensorShape(None)
| tensorflow.Dimension | 12,398 |
import tensorflow as tf
with tf.variable_scope('lstm', reuse=(t!=0)):
_, (c, h) = lstm_cell(inputs=tf.concat(axis=1, values=[x[:,t,:], context]), state=[c, h])
logits = self._decode_lstm(x[:,t,:], h, context, dropout=self.dropout, reuse=(t!=0))
loss += tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=captions_out[:, t]) * mask[:, t])
if self.alpha_c > 0:
alphas = tf.transpose(tf.stack(alpha_list), (1, 0, 2)) # (N, T, L)
alphas_all = tf.reduce_sum(alphas, 1) # (N, L)
alpha_reg = self.alpha_c * tf.reduce_sum((16./196 - alphas_all) ** 2)
loss += alpha_reg
return loss / tf.to_float(batch_size)
| tensorflow.stack | 12,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.