seed
stringlengths 25
2.89k
| seed_api
stringlengths 14
102
| index
int64 0
14.8k
|
---|---|---|
import tensorflow as tf
def conv2d(x, W):
# Must have `strides[0] = strides[3] = 1 `.
# For the most common case of the same horizontal and vertices strides, `strides = [1, stride, stride, 1] `.
return tf.nn.conv2d(input=x, filter=W, strides=[1, 1, 1, 1], padding='SAME', name='conv_2d')
def max_pool_2x2(x):
return tf.nn.max_pool(value=x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME', name='max_pool')
def LSTM_Network(feature_mat, config):
"""model a LSTM Network,
it stacks 2 LSTM layers, each layer has n_hidden=32 cells
| tensorflow.nn.max_pool | 11,300 |
import tensorflow as tf
Returns:
the cropped (and resized) image.
Raises:
InvalidArgumentError: if the rank is not 3 or if the image dimensions are
less than the crop size.
"""
original_shape = tf.shape(image)
rank_assertion = tf.Assert(
tf.equal(tf.rank(image), 3),
['Rank of image must be equal to 3.'])
cropped_shape = control_flow_ops.with_dependencies(
[rank_assertion],
tf.pack([crop_height, crop_width, original_shape[2]]))
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_height),
tf.greater_equal(original_shape[1], crop_width)),
['Crop size greater than the image size.'])
offsets = tf.to_int32(tf.pack([offset_height, offset_width, 0]))
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
image = control_flow_ops.with_dependencies(
[size_assertion],
tf.slice(image, offsets, cropped_shape))
| tensorflow.pack | 11,301 |
import tensorflow as tf
tf.app.flags.DEFINE_float(
'weight_decay', 0.0005, 'The weight decay on the model weights.')
tf.app.flags.DEFINE_float(
'momentum', 0.9,
'The momentum for the MomentumOptimizer and RMSPropOptimizer.')
tf.app.flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.')
tf.app.flags.DEFINE_float(
'end_learning_rate', 0.00005,
'The minimal end learning rate used by a polynomial decay learning rate.')
# for learning rate exponential_decay
tf.app.flags.DEFINE_float(
'learning_rate_decay_factor', 0.96, 'Learning rate decay factor.')
| tensorflow.app.flags.DEFINE_float | 11,302 |
import tensorflow as tf
:param seed:
:return:
"""
def f1():
input_shape = input_tensor.get_shape().as_list()
noise_shape = tf.constant(value=[input_shape[0], 1, 1, input_shape[3]])
return tf.nn.dropout(input_tensor, keep_prob, noise_shape, seed=seed, name="spatial_dropout")
def f2():
return input_tensor
with tf.variable_scope(name_or_scope=name):
| tensorflow.nn.dropout | 11,303 |
import tensorflow as tf
# placeholder for next observation (or state)
self.obs_tp1_ph = tf.placeholder(
tf.float32 if lander else tf.uint8, [None] + list(input_shape))
# placeholder for end of episode mask
# this value is 1 if the next state corresponds to the end of an episode,
# in which case there is no Q-value at the next state; at the end of an
# episode, only the current state reward contributes to the target, not the
# next state Q-value (i.e. target is just rew_t_ph, not rew_t_ph + gamma * q_tp1)
self.done_mask_ph = tf.placeholder(tf.float32, [None])
# casting to float on GPU ensures lower data transfer times.
if lander:
obs_t_float = self.obs_t_ph
obs_tp1_float = self.obs_tp1_ph
else:
obs_t_float = tf.cast(self.obs_t_ph, tf.float32) / 255.0
| tensorflow.placeholder | 11,304 |
import tensorflow as tf
f.close()
def weight_variable(shape):
return tf.get_variable('W', shape, initializer=tf.random_normal_initializer(0., 0.02))
def bias_variable(shape):
return tf.get_variable('b', shape, initializer=tf.constant_initializer(0.))
def keep_prob(dropout, train):
return tf.cond(train, lambda: tf.constant(dropout), lambda: tf.constant(1.))
def softmax_ce_with_logits(logits, labels):
return tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits)
def sigmoid_ce_with_logits(logits, labels):
return tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits)
def sigmoid_kl_with_logits(logits, targets):
assert isinstance(targets, float)
if targets in [0., 1.]:
entropy = 0.
else:
entropy = - targets*tf.log(targets) - (1. - targets)*tf.log(1. - targets)
return sigmoid_ce_with_logits(logits, tf.ones_like(logits)*targets) - entropy
| tensorflow.nn.softmax_cross_entropy_with_logits | 11,305 |
import tensorflow as tf
>>> samples.dtype
dtype('float64')
>>> m.dtype = tf.float32
>>> samples = m.compute_posterior_samples(X, Y, test_points, 2)
>>> samples.dtype
dtype('float32')
"""
mu, var = self.build_posterior_mean_var(X, Y, test_points, True)
jitter = tfhacks.eye(tf.shape(mu)[0], var.dtype) * 1e-06
L = tf.batch_cholesky(tf.transpose(var, (2, 0, 1)) + jitter)
V_shape = [tf.shape(L)[0], tf.shape(L)[1], num_samples]
V = tf.random_normal(V_shape, dtype=L.dtype)
samples = tf.expand_dims(tf.transpose(mu), -1) + tf.batch_matmul(L, V)
return tf.transpose(samples)
#samples = []
#for i in range(self.num_latent_functions):
# L = tf.cholesky(var[:, :, i] + jitter)
# V = tf.random_normal([tf.shape(L)[0], num_samples], dtype=L.dtype)
# samples.append(mu[:, i:i + 1] + tf.matmul(L, V)) # broadcast
| tensorflow.transpose | 11,306 |
import tensorflow as tf
self.w1=tf.get_variable('w1', [4096,2048],initializer=tf.contrib.layers.xavier_initializer_conv2d())
self.w2=tf.get_variable('w2', [2048,3072],initializer=tf.contrib.layers.xavier_initializer_conv2d())
self.w3=tf.get_variable('w3', [3072,512],initializer=tf.contrib.layers.xavier_initializer_conv2d())
self.w4=tf.get_variable('w4', [512,classnum],initializer=tf.contrib.layers.xavier_initializer_conv2d())
self.b1 = tf.get_variable('b1', [2048],initializer=tf.constant_initializer(0.0))
self.b2 = tf.get_variable('b2', [3072],initializer=tf.constant_initializer(0.0))
self.b3 = tf.get_variable('b3', [512],initializer=tf.constant_initializer(0.0))
self.b4 = tf.get_variable('b4', [classnum],initializer=tf.constant_initializer(0.0))
def inference(self,images):
| tensorflow.constant_initializer | 11,307 |
import tensorflow as tf
'wav_raw': tf.FixedLenFeature([], tf.string),
'noisy_raw': tf.FixedLenFeature([], tf.string),
})
wave = tf.decode_raw(features['wav_raw'], tf.int32)
wave.set_shape(canvas_size)
wave = (2./65535.) * tf.cast((wave - 32767), tf.float32) + 1.
noisy = tf.decode_raw(features['noisy_raw'], tf.int32)
noisy.set_shape(canvas_size)
noisy = (2./65535.) * tf.cast((noisy - 32767), tf.float32) + 1.
| tensorflow.cast | 11,308 |
import tensorflow as tf
word_probs = _clip_and_normalize(word_probs, epsilon)
one_hot_spare_rep = tf.one_hot(answers, vsize)
| tensorflow.one_hot | 11,309 |
import tensorflow as tf
def build_anet(self, state_in, name, reuse=False):
reg = tf.contrib.layers.l2_regularizer(1e-3)
with tf.variable_scope(name, reuse=reuse):
layer_a1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg)
layer_a2 = tf.layers.dense(layer_a1, 256, tf.nn.relu, kernel_regularizer=reg)
mu = tf.layers.dense(layer_a2, self.a_dim, tf.nn.tanh, kernel_regularizer=reg)
# sigma = tf.layers.dense(layer_a2, self.a_dim, tf.nn.softplus, kernel_regularizer=reg)
sigma = tf.get_variable(name='pi_sigma', shape=self.a_dim, initializer=tf.constant_initializer(0.5))
sigma = tf.clip_by_value(sigma, 0.0, 1.0)
norm_dist = tf.distributions.Normal(loc=mu * self.a_bound, scale=sigma)
params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name)
return norm_dist, params
def build_cnet(self, state_in, name, reuse=False):
reg = tf.contrib.layers.l2_regularizer(1e-3)
with tf.variable_scope(name, reuse=reuse):
layer_c1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg)
layer_c2 = tf.layers.dense(layer_c1, 256, tf.nn.relu, kernel_regularizer=reg)
| tensorflow.distributions.Normal | 11,310 |
import tensorflow as tf
loss_summary = tf.summary.scalar('Loss', cross_entropy)
acc_summary = tf.summary.scalar('Accuracy', accuracy)
# summaries for TensorBoard visualisation
validation_summary = tf.summary.merge([img_summary, acc_summary])
training_summary = tf.summary.merge([img_summary, loss_summary])
test_summary = tf.summary.merge([img_summary, acc_summary])
# saver for checkpoints
saver = tf.train.Saver(tf.global_variables(), max_to_keep=1)
with tf.Session() as sess:
summary_writer = tf.summary.FileWriter(run_log_dir + '_train', sess.graph, flush_secs=5)
summary_writer_validation = tf.summary.FileWriter(run_log_dir + '_validate', sess.graph, flush_secs=5)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
# Training and validation
for step in range(FLAGS.max_steps):
# Training: Backpropagation using train set
(trainImages, trainLabels) = cifar.getTrainBatch()
| tensorflow.Session | 11,311 |
import tensorflow as tf
def din_fcn_shine(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False):
if isinstance(facts, tuple):
# In case of Bi-RNN, concatenate the forward and the backward RNN outputs.
facts = tf.concat(facts, 2)
if time_major:
| tensorflow.concat | 11,312 |
import tensorflow as tf
self.input_y = tf.placeholder(tf.float32, [None, num_classes], name="input_y")
self.is_training = tf.placeholder(tf.bool)
initializer = tf.contrib.layers.variance_scaling_initializer()
# Embedding Lookup 16
with tf.device('/cpu:0'), tf.name_scope("embedding"):
if use_he_uniform:
self.embedding_W = tf.get_variable(name='lookup_W', shape=[num_quantized_chars, embedding_size],
initializer=tf.contrib.layers.variance_scaling_initializer())
else:
self.embedding_W = tf.Variable(tf.random_uniform([num_quantized_chars, embedding_size], -1.0, 1.0),name="embedding_W")
self.embedded_characters = tf.nn.embedding_lookup(self.embedding_W, self.input_x)
embedded_text_expand = tf.expand_dims(self.embedded_characters, -1)
with tf.device('/cpu:0'), tf.name_scope("embedding_tags"):
W_tags = tf.get_variable("embed_W_tags", [tags_vocab_size, embedding_size], initializer=initializer)
embedded_tags = tf.nn.embedding_lookup(W_tags, self.input_tags)
embedded_tags_expanded = tf.expand_dims(embedded_tags, -1)
with tf.device('/cpu:0'), tf.name_scope("embedding_deps"):
W_deps = tf.get_variable("embed_W_deps", [deps_vocab_size, embedding_size], initializer=initializer)
embedded_deps = tf.nn.embedding_lookup(W_deps, self.input_deps)
embedded_deps_expanded = tf.expand_dims(embedded_deps, -1)
with tf.device('/cpu:0'), tf.name_scope("embedding_head"):
| tensorflow.expand_dims | 11,313 |
import tensorflow as tf
classnum=12
testnum = tf.placeholder(tf.int32)
trainnum = tf.placeholder(tf.int32)
validnum = tf.placeholder(tf.int32)
learnrate = tf.placeholder(tf.float32)
def getinputs(path):
filename_queue=tf.train.string_input_producer([path])
reader=tf.TFRecordReader()
_,serialized_example=reader.read(filename_queue)
features=tf.parse_single_example(serialized_example,
features={
'label':tf.FixedLenFeature([], tf.int64),
'img_raw' : tf.FixedLenFeature([], tf.string),
})
image=tf.decode_raw(features['img_raw'],tf.uint8)
| tensorflow.TFRecordReader | 11,314 |
import tensorflow as tf
images, labels = input_name.build_input(
FLAGS.dataset, FLAGS.eval_data_path, hps.batch_size, FLAGS.mode) # FLAGS.mode='attack', batch_size=200
Res = model_name.ResNet(hps, images, FLAGS.mode, Reuse=False)
Res.build_graph()
saver = tf.train.Saver()
adv_images = adv_craft_func(hps, images, FLAGS.attack_method, eps=FLAGS.eps, RCE_train=FLAGS.RCE_train)
model_nor = model_name.ResNet(hps, images, FLAGS.mode, Reuse=True)
model_nor.build_graph()
model_adv = model_name.ResNet(hps, adv_images, FLAGS.mode, Reuse=True)
model_adv.build_graph()
# Open session and restore checkpoint
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
tf.train.start_queue_runners(sess)
sess.run(tf.global_variables_initializer())
ckpt_state = tf.train.get_checkpoint_state(FLAGS.log_root) # Choose dir according to rt
tf.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path)
saver.restore(sess, ckpt_state.model_checkpoint_path)
logits_nor = model_nor.t_SNE_logits
logits_adv = model_adv.t_SNE_logits
dim_logits = logits_nor.shape[1]
if hps.batch_size!=logits_nor.shape[0]:
print('Error!!!!!')
return
logits_all = np.reshape(np.array([]),(0,dim_logits))
| tensorflow.global_variables_initializer | 11,315 |
from tensorflow.contrib.eager.python import tfe
def loss(labels, predictions):
"""Computes mean squared loss."""
return tf.reduce_mean(tf.squared_difference(predictions, labels))
def test(model, eval_data):
"""Computes the average loss on eval_data, which should be a Dataset."""
avg_loss = tfe.metrics.Mean("loss")
for (labels, chars, sequence_length) in tfe.Iterator(eval_data):
predictions = model((chars, sequence_length), training=False)
avg_loss(loss(labels, predictions))
print("eval/loss: %.6f\n" % avg_loss.result())
with tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar("loss", avg_loss.result())
| tensorflow.contrib.eager.python.tfe.metrics.Mean | 11,316 |
from tensorflow.contrib import layers
default_batch_size=1,
exports_to_keep=None):
"""See BaseEstimator.export."""
def default_input_fn(unused_estimator, examples):
return layers.parse_feature_columns_from_examples(examples,
self._feature_columns)
return self._estimator.export(
export_dir=export_dir,
| tensorflow.contrib.layers.parse_feature_columns_from_examples | 11,317 |
import tensorflow as tf
if encoder.position_bias and input_length is not None and time is not None:
src_pos = tf.tile(tf.expand_dims(tf.range(time_steps), axis=0), [batch_size, 1])
trg_pos = tf.tile(tf.reshape(time, [1, 1]), [batch_size, time_steps])
src_len = tf.tile(tf.expand_dims(input_length, axis=1), [1, time_steps]) # - 1
| tensorflow.reshape | 11,318 |
import tensorflow as tf
elif vocab_path.endswith('tfrecord.gz'):
dataset = tf.data.TFRecordDataset(vocab_path, compression_type='GZIP')
vocab_tensor = dataset.batch(tf.int32.max).reduce(
tf.constant([], dtype=tf.string),
lambda state, elem: tf.concat([state, elem], axis=-1))
# Using as_numpy_iterator only works when executing eagerly.
| tensorflow.constant | 11,319 |
import tensorflow as tf
tf.flags.DEFINE_integer('autotune_threshold', None,
'The autotune threshold for the models')
tf.flags.DEFINE_integer('num_gpus', 1, 'the number of GPUs to run on')
tf.flags.DEFINE_integer('display_every', 10,
"""Number of local steps after which progress is printed
out""")
tf.flags.DEFINE_string('data_dir', None, """Path to dataset in TFRecord format
(aka Example protobufs). If not specified,
synthetic data will be used.""")
tf.flags.DEFINE_string('data_name', None,
"""Name of dataset: imagenet or flowers.
If not specified, it is automatically guessed
based on --data_dir.""")
tf.flags.DEFINE_string('resize_method', 'bilinear',
"""Method for resizing input images:
crop,nearest,bilinear,bicubic or area.
The 'crop' mode requires source images to be at least
as large as the network input size,
| tensorflow.flags.DEFINE_string | 11,320 |
import tensorflow as tf
if b_init is None:
b_init = tf.constant_initializer()
w = tf.get_variable('W', filter_shape, initializer=w_init)
b = None
if use_bias:
b = tf.get_variable('b', [out_channel], initializer=b_init)
if split == 1:
conv = tf.nn.conv2d(inputdata, w, strides, padding, data_format=data_format)
else:
inputs = tf.split(inputdata, split, channel_axis)
kernels = tf.split(w, split, 3)
outputs = [tf.nn.conv2d(i, k, strides, padding, data_format=data_format)
for i, k in zip(inputs, kernels)]
conv = tf.concat(outputs, channel_axis)
ret = tf.identity(tf.nn.bias_add(conv, b, data_format=data_format)
if use_bias else conv, name=name)
| tensorflow.nn.conv2d | 11,321 |
import tensorflow as tf
batch_size = tf.shape(attention_weights)[0]
src_len = tf.shape(attention_weights)[2]
trg_len = tf.shape(attention_weights)[1]
src_indices = tf.tile(tf.reshape(tf.range(src_len), shape=[1, 1, src_len]), [batch_size, trg_len, 1])
trg_indices = tf.tile(tf.reshape(tf.range(trg_len), shape=[1, trg_len, 1]), [batch_size, 1, src_len])
source_length = encoder_input_length[0]
target_length = tf.to_int32(tf.reduce_sum(trg_mask, axis=1))
true_src_len = tf.reshape(source_length, shape=[batch_size, 1, 1]) - 1
true_trg_len = tf.reshape(target_length, shape=[batch_size, 1, 1]) - 1
src_mask = tf.to_float(tf.sequence_mask(source_length, maxlen=src_len))
mask = tf.matmul(tf.expand_dims(trg_mask, axis=2), tf.expand_dims(src_mask, axis=1))
monotonous = tf.sqrt(((true_trg_len * src_indices - true_src_len * trg_indices) ** 2)
/ (true_trg_len**2 + true_src_len**2))
monotonous = tf.to_float(monotonous < monotonicity_dist)
non_monotonous = (1 - monotonous) * mask
attn_loss = tf.reduce_sum(attention_weights * tf.stop_gradient(non_monotonous)) / tf.to_float(batch_size)
if monotonicity_decay:
decay = tf.stop_gradient(0.5 ** (tf.to_float(global_step) / monotonicity_decay))
else:
decay = 1.0
xent_loss += monotonicity_weight * decay * attn_loss
losses = [xent_loss, reinforce_loss, baseline_loss_]
| tensorflow.sqrt | 11,322 |
import tensorflow as tf
z0_valid = tf.to_float(
tf.less_equal(z0, max_z) & tf.greater_equal(z0, 0))
z1_valid = tf.to_float(
tf.less_equal(z1, max_z) & tf.greater_equal(z1, 0))
w_z0_y0_x0 = tf.expand_dims(((x1_f - x) * (y1_f - y) *
(z1_f - z) * x1_valid * y1_valid * z1_valid),
1)
w_z0_y0_x1 = tf.expand_dims(((x - x0_f) * (y1_f - y) *
(z1_f - z) * x0_valid * y1_valid * z1_valid),
1)
w_z0_y1_x0 = tf.expand_dims(((x1_f - x) * (y - y0_f) *
(z1_f - z) * x1_valid * y0_valid * z1_valid),
1)
w_z0_y1_x1 = tf.expand_dims(((x - x0_f) * (y - y0_f) *
(z1_f - z) * x0_valid * y0_valid * z1_valid),
1)
w_z1_y0_x0 = tf.expand_dims(((x1_f - x) * (y1_f - y) *
(z - z0_f) * x1_valid * y1_valid * z0_valid),
1)
w_z1_y0_x1 = tf.expand_dims(((x - x0_f) * (y1_f - y) *
(z - z0_f) * x0_valid * y1_valid * z0_valid),
1)
w_z1_y1_x0 = tf.expand_dims(((x1_f - x) * (y - y0_f) *
(z - z0_f) * x1_valid * y0_valid * z0_valid),
1)
w_z1_y1_x1 = tf.expand_dims(((x - x0_f) * (y - y0_f) *
(z - z0_f) * x0_valid * y0_valid * z0_valid),
| tensorflow.expand_dims | 11,323 |
from tensorflow.python.framework import ops
# Tensor versions of the constructor arguments, created in _prepare().
self._lr_t = None
self._mu_t = None
def _prepare(self):
self._lr_t = ops.convert_to_tensor(self._lr, name="learning_rate")
self._mu_t = ops.convert_to_tensor(self._mu, name="prox_mu")
def _create_slots(self, var_list):
# Create slots for the global solution.
for v in var_list:
self._zeros_slot(v, "vstar", self._name)
self._zeros_slot(v, "gold", self._name)
| tensorflow.python.framework.ops.convert_to_tensor | 11,324 |
import tensorflow as tf
expand_W = tf.get_variable("W_%d" % i, [current_size, output_sizes[i]])
expand_b = tf.get_variable("b_%d" % i, [output_sizes[i]])
output_data = tf.nn.bias_add(tf.matmul(output_data, expand_W), expand_b)
output_data = tf.nn.elu(output_data)
current_size = output_sizes[i]
#expand_W = tf.get_variable("final_W", [current_size, 1])
| tensorflow.nn.elu | 11,325 |
import tensorflow as tf
asserts = [rank_assertions[0], crop_size_assert]
for i in range(1, len(image_list)):
image = image_list[i]
asserts.append(rank_assertions[i])
shape = control_flow_ops.with_dependencies([rank_assertions[i]],
tf.shape(image))
height = shape[0]
width = shape[1]
height_assert = tf.Assert(
tf.equal(height, image_height),
['Wrong height for tensor %s [expected][actual]',
image.name, height, image_height])
width_assert = tf.Assert(
tf.equal(width, image_width),
['Wrong width for tensor %s [expected][actual]',
image.name, width, image_width])
asserts.extend([height_assert, width_assert])
# Create a random bounding box.
#
| tensorflow.equal | 11,326 |
import tensorflow as tf
depthwise_filter_shape = [kernel_size, kernel_size] + [in_channel, depth_multiplier]
w_init = tf.contrib.layers.variance_scaling_initializer()
| tensorflow.contrib.layers.variance_scaling_initializer | 11,327 |
import tensorflow as tf
num_features = tensor.get_shape()[-1].value
weight_init = tf.truncated_normal([num_features, size], stddev=0.01)
if bias_init is None:
bias_init = tf.zeros([size])
with tf.name_scope(name, 'fully_connected', [tensor]):
w = tf.Variable(weight_init, name='w', dtype=tf.float32)
b = tf.Variable(bias_init, name='b', dtype=tf.float32)
return tf.nn.xw_plus_b(tensor, w, b)
| tensorflow.name_scope | 11,328 |
import tensorflow as tf
with tf.name_scope(scope, 'focal_loss', [cls_preds, onehot_labels]) as sc:
logits = tf.convert_to_tensor(cls_preds)
onehot_labels = tf.convert_to_tensor(onehot_labels)
precise_logits = tf.cast(logits, tf.float32) if (
logits.dtype == tf.float16) else logits
onehot_labels = tf.cast(onehot_labels, precise_logits.dtype)
predictions = tf.nn.sigmoid(logits)
| tensorflow.cast | 11,329 |
import tensorflow as tf
def din_attention(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False):
if isinstance(facts, tuple):
# In case of Bi-RNN, concatenate the forward and the backward RNN outputs.
facts = tf.concat(facts, 2)
print ("querry_size mismatch")
query = tf.concat(values = [
query,
query,
], axis=1)
| tensorflow.concat | 11,330 |
import tensorflow as tf
import config
# hardware related configuration
tf.app.flags.DEFINE_integer(
'num_readers', 16,#16
'The number of parallel readers that read data from the dataset.')
tf.app.flags.DEFINE_integer(
'num_preprocessing_threads', 48,#48
'The number of threads used to create the batches.')
tf.app.flags.DEFINE_integer(
'num_cpu_threads', 0,
'The number of cpu cores used to train.')
| tensorflow.app.flags.DEFINE_integer | 11,331 |
import tensorflow as tf
cross_stitch = tf.get_variable("cross_stitch", shape=(input.shape[1], input.shape[1]), dtype=tf.float32,
collections=['cross_stitches', tf.GraphKeys.GLOBAL_VARIABLES],
initializer=tf.initializers.identity())
output = tf.matmul(input, cross_stitch)
# need to call .value to convert Dimension objects to normal value
| tensorflow.matmul | 11,332 |
import tensorflow as tf
global_step,
decay_steps,
cifar10.LEARNING_RATE_DECAY_FACTOR,
staircase=True)
# Create an optimizer that performs gradient descent.
opt = tf.train.GradientDescentOptimizer(lr)
# Calculate the gradients for each model tower.
tower_grads = []
with tf.variable_scope(tf.get_variable_scope()):
for i in xrange(FLAGS.num_gpus):
| tensorflow.train.GradientDescentOptimizer | 11,333 |
import tensorflow as tf
self.all_params = tf.trainable_variables()
if self.config.l2_norm is not None:
self.logger.info("applying l2 loss")
variables = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
l2_loss = tf.contrib.layers.apply_regularization(regularizer, variables)
self.loss += l2_loss
if self.config.decay is not None:
| tensorflow.get_collection | 11,334 |
import tensorflow as tf
return fn_with_timing
def _create_dummy_vars():
"""Dummy vars for restore to work when not using TPU codepath."""
var_names = set([v.name for v in tf.global_variables()])
if "losses_avg/problem_0/total_loss:0" in var_names:
return
with tf.variable_scope("losses_avg"):
with tf.variable_scope("problem_0"):
for var_name in ["total", "extra", "training"]:
tf.get_variable(
"%s_loss" % var_name, initializer=100.0, trainable=False)
with tf.variable_scope("train_stats"):
tf.get_variable("problem_0_steps", initializer=0, trainable=False)
# These metrics are implemented with py_funcs and therefore do no work with TPU
TPU_METRIC_BLACKLIST = set([
metrics.Metrics.APPROX_BLEU,
metrics.Metrics.ROUGE_2_F,
metrics.Metrics.ROUGE_L_F,
])
def _create_tpu_eval_metrics_fn(problem, hparams):
"""Create the metrics_fn that TPUEstimatorSpec expects."""
tm = problem.get_hparams().target_modality
| tensorflow.get_variable | 11,335 |
import tensorflow as tf
def build_network(self, u, v, w, u_mag, v_mag, w_mag, low_resblock=8, hi_resblock=4, channel_nr=64):
channel_nr = 64
speed = (u ** 2 + v ** 2 + w ** 2) ** 0.5
mag = (u_mag ** 2 + v_mag ** 2 + w_mag ** 2) ** 0.5
pcmr = mag * speed
phase = tf.keras.layers.concatenate([u,v,w])
pc = tf.keras.layers.concatenate([pcmr, mag, speed])
pc = conv3d(pc,3,channel_nr, 'SYMMETRIC', 'relu')
pc = conv3d(pc,3,channel_nr, 'SYMMETRIC', 'relu')
phase = conv3d(phase,3,channel_nr, 'SYMMETRIC', 'relu')
| tensorflow.keras.layers.concatenate | 11,336 |
import tensorflow as tf
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
| tensorflow.contrib.tpu.TPUEstimator | 11,337 |
import tensorflow as tf
loss_loc = tf.reduce_mean(loss_loc)
# classification loss (crossentropy)
# 1. compute max conf across batch for hard negative mining
loss_class = tf.where(mask_neg,
1 - class_pred[:, 0][..., tf.newaxis], 0)
# 2. hard negative mining
loss_class = tf.reshape(loss_class, [num_batch, num_prior])
loss_class_idx = tf.argsort(loss_class, axis=1, direction='DESCENDING')
loss_class_idx_rank = tf.argsort(loss_class_idx, axis=1)
mask_pos_per_batch = tf.reshape(mask_pos, [num_batch, num_prior])
num_pos_per_batch = tf.reduce_sum(
tf.cast(mask_pos_per_batch, tf.float32), 1, keepdims=True)
num_pos_per_batch = tf.maximum(num_pos_per_batch, 1)
num_neg_per_batch = tf.minimum(neg_pos_ratio * num_pos_per_batch,
tf.cast(num_prior, tf.float32) - 1)
mask_hard_neg = tf.reshape(
tf.cast(loss_class_idx_rank, tf.float32) < num_neg_per_batch,
[num_batch * num_prior, 1])
# 3. classification loss including positive and negative examples
| tensorflow.reshape | 11,338 |
import tensorflow as tf
else:
f1 = 'CE'
np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/tSNE/tSNE_' + f1, tsne_return)
np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/tSNE/tSNElabels_' + f1, labels_all)
np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/tSNE/tSNEisadv_' + f1, is_adv_all)
return None
def tSNE_visual_carliniLi(hps, num_batch):
# Construct graph
images, labels = input_name.build_input(
FLAGS.dataset, FLAGS.eval_data_path, hps.batch_size, FLAGS.mode) # FLAGS.mode='attack', batch_size=200
Res = model_name.ResNet(hps, images, FLAGS.mode, Reuse=False)
Res.build_graph()
saver = tf.train.Saver()
# Open session and restore checkpoint
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
tf.train.start_queue_runners(sess)
sess.run(tf.global_variables_initializer())
ckpt_state = tf.train.get_checkpoint_state(FLAGS.log_root) # Choose dir according to rt
tf.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path)
saver.restore(sess, ckpt_state.model_checkpoint_path)
model_carlini = models_carlini(hps)
if FLAGS.attack_method == 'carliniLi':
attack_carlini = attacks.carliniLi.CarliniLi(sess, model_carlini, largest_const=10 ** -3)
| tensorflow.train.Saver | 11,339 |
import tensorflow as tf
loss_class_mask)
filter_class_pred = tf.boolean_mask(class_pred, loss_class_mask_b)
| tensorflow.boolean_mask | 11,340 |
import tensorflow as tf
print("Test", i, "Prediction:", np.argmax(Ytr[nn_index]), \
"True Class:", np.argmax(Yte[i]))
# Calculate accuracy
if np.argmax(Ytr[nn_index]) == np.argmax(Yte[i]):
accuracy += 1. / len(Xte)
print("Accuracy:", accuracy)
# Make the log files in TensorBoard
logs_path = "./logs"
logWriter = tf.summary.FileWriter(logs_path, sess.graph)
| tensorflow.summary.FileWriter | 11,341 |
import tensorflow as tf
x_labels = []
for i in range(num_bits):
x_labels.append(
tf.floormod(
tf.floordiv(tf.to_int32(x_l),
tf.to_int32(base)**i), tf.to_int32(base)))
res = tf.concat(x_labels, axis=-1)
return tf.to_float(res)
def embed(self, x):
"""Embedding function that takes discrete latent and returns embedding.
| tensorflow.concat | 11,342 |
import tensorflow as tf
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
| tensorflow.train.Scaffold | 11,343 |
import tensorflow as tf
# match attn_dist[batch_size, passage_length] to sparse one-hot representation [batch_size, passage_length, extended_vsize]
batch_nums = tf.range(0, limit=batch_size) # shape (batch_size)
batch_nums = tf.expand_dims(batch_nums, axis=1) # shape (batch_size, 1)
batch_nums = tf.tile(batch_nums, [1, passage_length]) # shape (batch_size, passage_length)
step_nums = tf.range(0, limit=passage_length) # [passage_length]
| tensorflow.expand_dims | 11,344 |
import tensorflow as tf
loss = tf.maximum(0.0, soft_sign * ((tgt1 - tgt2) - (pred1 - pred2)))
loss = tf.reduce_mean(loss)
return loss
def contra_step_lossV2(pred, tgt):
# Step-wise contrastive loss
pred1, pred2 = tf.split(pred, 2, axis=0)
tgt1, tgt2 = tf.split(tgt, 2, axis=0)
geq = tf.cast((tgt1 - tgt2) > 0, tf.bool)
tgt_larg = tf.where(geq, tgt1, tgt2)
tgt_small = tf.where(geq, tgt2, tgt1)
pred_larg = tf.where(geq, pred1, pred2)
pred_small = tf.where(geq, pred2, pred1)
loss = tf.maximum(0.0, (tgt_larg - tgt_small) - (pred_larg - pred_small))
loss = tf.reduce_mean(loss)
return loss
| tensorflow.cast | 11,345 |
import tensorflow as tf
w_scale = strides[2]
scale = tf.stack([1, h_scale, w_scale])
| tensorflow.stack | 11,346 |
import tensorflow as tf
seq_lengths = tf.fill([batch_size], seq_length)
# Perform beam search
indices, values, shape, indices_u, values_u, shape_u, log_probs = ctc_ext_beam_search_decoder(
inputs=inputs, sequence_length=seq_lengths,
beam_width=beam_width, blank_index=blank_index, top_paths=1,
blank_label=0)
decoded = tf.sparse.SparseTensor(indices[0], values[0], shape[0])
decoded = tf.cast(tf.sparse.to_dense(decoded), tf.int32)
decoded_u = tf.sparse.SparseTensor(indices_u[0], values_u[0], shape_u[0])
decoded_u = tf.cast(tf.sparse.to_dense(decoded_u), tf.int32)
# Adjust event vals according to representation
decoded = tf.where(tf.not_equal(decoded, 0), decoded+shift, decoded)
decoded_u = tf.where(tf.not_equal(decoded_u, 0), decoded_u+shift, decoded_u)
# Set default vals
decoded = tf.where(tf.equal(decoded, 0), def_val, decoded)
decoded_u = tf.where(tf.equal(decoded_u, 0), def_val, decoded_u)
# We know the shape pf decoded_u, and first dim for decoded
decoded_u.set_shape([batch_size, seq_length])
decoded = tf.reshape(decoded, [batch_size, -1])
return decoded_u, decoded
| tensorflow.not_equal | 11,347 |
import tensorflow as tf
use_feature_trans=self.use_feature_trans)
self.get_loss = partial(self.model_pred.get_loss, use_trans_loss=self.use_trans_loss)
with tf.variable_scope(name):
self.noise = tf.placeholder(tf.float32, shape=[self.batch_size, self.noise_dim], name='noise') # Noise vector.
self.real_pc = tf.placeholder(tf.float32, shape=[self.batch_size] + self.n_output, name='real_pc') # Ground-truth.
with tf.variable_scope('rotation'):
self.rot_label_pl = tf.placeholder(tf.int32, shape=self.batch_size, name='rot_label_pl')
self.real_pc_rotated = self.rotate_n_angles(self.real_pc, self.rot_label_pl)
self.real_pc_pred, real_pc_end_points = self.get_pred(self.real_pc_rotated)
self.real_pc_rot_loss = self.get_loss(self.real_pc_pred, self.rot_label_pl, real_pc_end_points)
with tf.variable_scope('generator'):
| tensorflow.placeholder | 11,348 |
import tensorflow as tf
rectified = lrelu(convolved, 0.2)
layers.append(rectified)
for i in range(n_layers):
with tf.variable_scope('layer_%d' % (len(layers) + 1)):
out_channels = ndf * min(2 ** (i + 1), 8)
stride = 1 if i == n_layers - 1 else 2
convolved = discrim_conv(
layers[-1], out_channels, stride=stride
)
normalized = batchnorm(convolved)
rectified = lrelu(normalized, 0.2)
layers.append(rectified)
with tf.variable_scope('layer_%d' % (len(layers) + 1)):
convolved = discrim_conv(rectified, out_channels=1, stride=1)
output = tf.sigmoid(convolved)
layers.append(output)
self.logits = layers[-1]
| tensorflow.sigmoid | 11,349 |
import tensorflow as tf
attention_output = dropout(attention_output, hidden_dropout_prob)
attention_output = layer_norm(attention_output + layer_input)
with tf.variable_scope("ffn_1"):
with tf.variable_scope("intermediate"):
intermediate_output = dense_layer_2d(
attention_output,
| tensorflow.variable_scope | 11,350 |
import tensorflow as tf
querry_size = query.get_shape().as_list()[-1]
queries = tf.tile(query, [1, tf.shape(facts)[1]])
| tensorflow.shape | 11,351 |
import tensorflow as tf
span_text_emb = tf.gather(head_emb, span_indices) # [k, max_span_width, emb]
with tf.variable_scope("head_scores"):
self.head_scores = util.projection(context_outputs, 1) # [num_words, 1]
span_head_scores = tf.gather(self.head_scores, span_indices) # [k, max_span_width, 1]
span_mask = tf.expand_dims(tf.sequence_mask(span_width, self.config["max_span_width"], dtype=tf.float32), 2) # [k, max_span_width, 1]
span_head_scores += tf.log(span_mask) # [k, max_span_width, 1]
| tensorflow.gather | 11,352 |
import tensorflow as tf
def get_masked_lm_output(
bert_config, input_tensor, output_weights, positions, label_ids, label_weights
):
"""Get loss and log probs for the masked LM."""
input_tensor = gather_indexes(input_tensor, positions)
with tf.variable_scope("cls/predictions"):
# We apply one more non-linear transformation before the output layer.
# This matrix is not used after pre-training.
with tf.variable_scope("transform"):
input_tensor = tf.layers.dense(
input_tensor,
units=bert_config.hidden_size,
activation=modeling.get_activation(bert_config.hidden_act),
kernel_initializer=modeling.create_initializer(
bert_config.initializer_range
),
)
input_tensor = modeling.layer_norm(input_tensor)
| tensorflow.variable_scope | 11,353 |
from tensorflow.python.framework import constant_op
def _ranking_train_input_fn():
features = {
"a.f1": constant_op.constant([[3.], [0.3], [1.]]),
"a.f2": constant_op.constant([[0.1], [3.], [1.]]),
"b.f1": constant_op.constant([[13.], [0.4], [5.]]),
"b.f2": constant_op.constant([[1.], [3.], [0.01]]),
}
| tensorflow.python.framework.constant_op.constant | 11,354 |
import tensorflow as tf
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
with self.test_session(graph=tf.Graph()) as sess:
# Restores from MetaGraphDef.
new_saver = tf.train.import_meta_graph(filename)
# Generates a new MetaGraphDef.
new_saver.export_meta_graph()
# Restores from checkpoint.
new_saver.restore(sess, saver0_ckpt)
# Addes loss and train.
labels = tf.constant(0, tf.int32, shape=[100], name="labels")
batch_size = tf.size(labels)
labels = tf.expand_dims(labels, 1)
indices = tf.expand_dims(tf.range(0, batch_size), 1)
concated = tf.concat(1, [indices, labels])
onehot_labels = tf.sparse_to_dense(
concated, tf.pack([batch_size, 10]), 1.0, 0.0)
logits = tf.get_collection("logits")[0]
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits,
onehot_labels,
name="xentropy")
loss = tf.reduce_mean(cross_entropy, name="xentropy_mean")
tf.scalar_summary(loss.op.name, loss)
# Creates the gradient descent optimizer with the given learning rate.
| tensorflow.range | 11,355 |
import tensorflow as tf
t_params += tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name + '/mixing_net' + '/target_hyper')
with tf.variable_scope('soft_replacement'):
self.target_replace_op = [tf.assign(t, e) for t, e in zip(t_params, e_params)]
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
train_log_dir = 'logs/' + current_time
| tensorflow.Session | 11,356 |
import tensorflow as tf
self.resnet_size = resnet_size
if not data_format:
data_format = (
'channels_first' if tf.test.is_built_with_cuda() else 'channels_last')
data_format = "channels_last" #ki: added to make cpu runnable model
self.resnet_version = resnet_version
| tensorflow.test.is_built_with_cuda | 11,357 |
import tensorflow as tf
with tf.device('/cpu:0'), tf.name_scope("embedding_head"):
W_head = tf.get_variable("embed_W_head", [num_quantized_chars, embedding_size], initializer=initializer)
embedded_head = tf.nn.embedding_lookup(W_head, self.input_head)
| tensorflow.get_variable | 11,358 |
import tensorflow as tf
bias = np.zeros([filter_in_sizes[-1]], dtype=np.float32)
no_strides = [1, 1, 1, 1, 1]
[t1ind, t1val, t1sh] = sp.createRandomSparseTensor(rho_data, tensor_in_sizes, -3, 3)
s1 = tf.SparseTensor(indices=t1ind, values=t1val, dense_shape=t1sh)
d1 = sp.sparse_to_dense(t1ind, t1val, t1sh)
[t2ind, t2val, t2sh] = sp.createRandomSparseTensor(rho_filter, filter_in_sizes)
s2 = tf.SparseTensor(indices=t2ind, values=t2val, dense_shape=t2sh)
d2 = sp.sparse_to_dense(t2ind, t2val, t2sh)
print("strides: \n", strides)
print("input shape", tensor_in_sizes)
print("filter shape", filter_in_sizes)
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.7
with tf.device("/gpu:0"):
convd = sc_module.direct_sparse_data_conversion(t1ind, t1val, t1sh)
convf = sc_module.direct_sparse_filter_conversion(t2ind, t2val, t2sh, t1sh)
with tf.Session(config=config) as sess:
pd = sess.run(convd)
pf = sess.run(convf)
tf.reset_default_graph()
ts = 0
with tf.device("/gpu:0"):
approx_scskconv = sc_module.direct_sparse_conv_kd(pd.out_indices, pd.out_values, pd.out_shape, pd.out_block_channel_mapping, pf.out_indices, pf.out_values, pf.out_shape, pf.out_channel_mapping, bias, strides, padding, out_entry_count, dim, max_density, filter_type);
| tensorflow.ConfigProto | 11,359 |
from tensorflow.python.ops import array_ops
2 * self._num_units,
True,
bias_initializer=bias_ones,
kernel_initializer=self._kernel_initializer)
value = math_ops.sigmoid(self._gate_linear([inputs, state]))
r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)
r_state = r * state
if self._candidate_linear is None:
with vs.variable_scope("candidate"):
self._candidate_linear = _Linear(
| tensorflow.python.ops.array_ops.split | 11,360 |
import tensorflow as tf
"""
with tf.variable_scope("attention_1"):
with tf.variable_scope("self"):
| tensorflow.variable_scope | 11,361 |
import tensorflow as tf
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
labels = tf.reshape(labels, [-1])
one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
| tensorflow.reshape | 11,362 |
import tensorflow as tf
width = input_tensor.shape[-1]
output_tensor = tf.reshape(input_tensor, [-1, width])
return output_tensor
def reshape_from_matrix(output_tensor, orig_shape_list):
"""Reshapes a rank 2 tensor back to its original rank >= 2 tensor."""
if len(orig_shape_list) == 2:
return output_tensor
output_shape = get_shape_list(output_tensor)
orig_dims = orig_shape_list[0:-1]
width = output_shape[-1]
return tf.reshape(output_tensor, orig_dims + [width])
def assert_rank(tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
if name is None:
| tensorflow.reshape | 11,363 |
import tensorflow as tf
['image size must be >= {} in both height and width.'.format(min_dim)])
with tf.control_dependencies([shape_assert]):
| tensorflow.control_dependencies | 11,364 |
import tensorflow as tf
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions, weights=is_real_example)
loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
eval_metrics = (metric_fn,
[per_example_loss, label_ids, logits, is_real_example])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
# The code to modify out_put nodes
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={"probabilities": probabilities},
scaffold_fn=scaffold_fn)
return output_spec
| tensorflow.contrib.tpu.TPUEstimatorSpec | 11,365 |
import tensorflow as tf
# Optimizer
with tf.name_scope("training_op"):
self.training_op = tf.compat.v1.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
| tensorflow.compat.v1.train.AdamOptimizer | 11,366 |
from tensorflow.python.ops import array_ops
# Reshape predictions and labels.
predictions_2d = array_ops.reshape(predictions, [-1, 1])
labels_2d = array_ops.reshape(
| tensorflow.python.ops.array_ops.reshape | 11,367 |
import tensorflow as tf
with tf.name_scope(name):
inputs_shape = list(map(int, inputs.get_shape()))
predictions_shape = list(map(int, predictions.get_shape()))
nr_mix = int(predictions_shape[-1] / 10)
logit_probs = predictions[:, :, :, :nr_mix]
predictions = tf.reshape(predictions[:, :, :, nr_mix:], inputs_shape + [nr_mix * 3])
means = predictions[:, :, :, :, :nr_mix]
log_scales = tf.maximum(predictions[:, :, :, :, nr_mix:2 * nr_mix], -7.)
coeffs = tf.nn.tanh(predictions[:, :, :, :, 2 * nr_mix:3 * nr_mix])
inputs = tf.reshape(inputs, inputs_shape + [1]) + tf.zeros(inputs_shape + [nr_mix])
m2 = tf.reshape(means[:, :, :, 1, :] + coeffs[:, :, :, 0, :] * inputs[:, :, :, 0, :],
[inputs_shape[0], inputs_shape[1], inputs_shape[2], 1, nr_mix])
m3 = tf.reshape(
means[:, :, :, 2, :] + coeffs[:, :, :, 1, :] * inputs[:, :, :, 0, :] +
coeffs[:, :, :, 2, :] * inputs[:, :, :, 1, :],
[inputs_shape[0], inputs_shape[1], inputs_shape[2], 1, nr_mix])
means = tf.concat([
tf.reshape(means[:, :, :, 0, :],
| tensorflow.zeros | 11,368 |
import tensorflow as tf
with tf.variable_scope(scope):
wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale))
wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale))
b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
for idx, (x, m) in enumerate(zip(xs, ms)):
c = c*(1-m)
h = h*(1-m)
z = tf.matmul(x, wx) + tf.matmul(h, wh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f*c + i*u
h = o*tf.tanh(c)
xs[idx] = h
| tensorflow.matmul | 11,369 |
import tensorflow as tf
tf.constant(2, dtype=tf.int32, name="layer_id"),
inputs,
prev_c,
prev_h,
anchors,
anchors_w_1,
arc_seq,
tf.constant([0.0], dtype=tf.float32, name="entropy"),
tf.constant([0.0], dtype=tf.float32, name="log_prob"),
]
loop_outputs = tf.while_loop(_condition, _body, loop_vars,
parallel_iterations=1)
| tensorflow.constant | 11,370 |
from tensorflow.python.framework import constant_op
var0 = variables.Variable([[0.0, 1.0], [2.0, 3.0]], dtype=dtype)
var1 = variables.Variable([4.0, 5.0], dtype=dtype)
with self._maybeWithDevice("/job:worker" if is_distributed else None):
grads0 = constant_op.constant([[0.1, 0.1], [0.1, 0.1]], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
sgd = gradient_descent.GradientDescentOptimizer(3.0)
| tensorflow.python.framework.constant_op.constant | 11,371 |
import tensorflow as tf
if __name__ == '__main__':
tf.test.main()
| tensorflow.test.main | 11,372 |
import tensorflow as tf
z = tf.matmul(x, wx) + tf.matmul(h, wh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f*c + i*u
h = o*tf.tanh(c)
xs[idx] = h
s = tf.concat(axis=1, values=[c, h])
return xs, s
def _ln(x, g, b, e=1e-5, axes=[1]):
u, s = tf.nn.moments(x, axes=axes, keep_dims=True)
x = (x-u)/tf.sqrt(s+e)
x = x*g+b
return x
def lnlstm(xs, ms, s, scope, nh, init_scale=1.0):
nbatch, nin = [v.value for v in xs[0].get_shape()]
with tf.variable_scope(scope):
wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale))
gx = tf.get_variable("gx", [nh*4], initializer=tf.constant_initializer(1.0))
bx = tf.get_variable("bx", [nh*4], initializer=tf.constant_initializer(0.0))
wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale))
gh = tf.get_variable("gh", [nh*4], initializer=tf.constant_initializer(1.0))
bh = tf.get_variable("bh", [nh*4], initializer=tf.constant_initializer(0.0))
| tensorflow.sqrt | 11,373 |
from tensorflow.python.ops import math_ops
metric = math_ops.div(tp, math_ops.add(tp, fn), name=scope)
update = math_ops.div(
tp_update, math_ops.add(tp_update, fn_update), name='update')
if metrics_collections:
ops.add_to_collections(metrics_collections, metric)
| tensorflow.python.ops.math_ops.add | 11,374 |
import tensorflow as tf
return (logits, top_1_op, top_5_op)
loss = loss_function(logits, labels)
params = self.variable_mgr.trainable_variables_on_device(device_num)
l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in params])
weight_decay = FLAGS.weight_decay
if weight_decay is not None and weight_decay != 0.:
loss += weight_decay * l2_loss
aggmeth = tf.AggregationMethod.DEFAULT
grads = tf.gradients(loss, params, aggregation_method=aggmeth)
if FLAGS.staged_vars:
grad_dtypes = [grad.dtype for grad in grads]
grad_shapes = [grad.shape for grad in grads]
grad_stage = data_flow_ops.StagingArea(grad_dtypes, grad_shapes)
grad_stage_op = grad_stage.put(grads)
# In general, this decouples the computation of the gradients and
# the updates of the weights.
| tensorflow.gradients | 11,375 |
import tensorflow as tf
im_flat = tf.to_float(im_flat)
i_z0_y0_x0 = tf.gather(im_flat, idx_z0_y0_x0)
i_z0_y0_x1 = tf.gather(im_flat, idx_z0_y0_x1)
i_z0_y1_x0 = tf.gather(im_flat, idx_z0_y1_x0)
i_z0_y1_x1 = tf.gather(im_flat, idx_z0_y1_x1)
i_z1_y0_x0 = tf.gather(im_flat, idx_z1_y0_x0)
i_z1_y0_x1 = tf.gather(im_flat, idx_z1_y0_x1)
i_z1_y1_x0 = tf.gather(im_flat, idx_z1_y1_x0)
i_z1_y1_x1 = tf.gather(im_flat, idx_z1_y1_x1)
# Finally calculate interpolated values.
x0_f = tf.to_float(x0)
x1_f = tf.to_float(x1)
y0_f = tf.to_float(y0)
y1_f = tf.to_float(y1)
z0_f = tf.to_float(z0)
z1_f = tf.to_float(z1)
# Check the out-of-boundary case.
x0_valid = tf.to_float(
tf.less_equal(x0, max_x) & tf.greater_equal(x0, 0))
x1_valid = tf.to_float(
tf.less_equal(x1, max_x) & tf.greater_equal(x1, 0))
y0_valid = tf.to_float(
tf.less_equal(y0, max_y) & tf.greater_equal(y0, 0))
y1_valid = tf.to_float(
tf.less_equal(y1, max_y) & tf.greater_equal(y1, 0))
z0_valid = tf.to_float(
tf.less_equal(z0, max_z) & tf.greater_equal(z0, 0))
| tensorflow.to_float | 11,376 |
import tensorflow as tf
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
features.append(feature)
return features
def serving_input_fn():
label_ids = tf.placeholder(tf.int32, [None], name='label_ids')
input_ids = tf.placeholder(tf.int32, [None, FLAGS.max_seq_length], name='input_ids')
input_mask = tf.placeholder(tf.int32, [None, FLAGS.max_seq_length], name='input_mask')
segment_ids = tf.placeholder(tf.int32, [None, FLAGS.max_seq_length], name='segment_ids')
input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn({
'label_ids': label_ids,
'input_ids': input_ids,
'input_mask': input_mask,
'segment_ids': segment_ids,
})()
return input_fn
| tensorflow.placeholder | 11,377 |
import tensorflow as tf
e_fmean_mean = tf.einsum("nqm,mz->nqz", e_mean_Kuf, Lit_q_mu) # N x D x D
e_related_to_mean = e_fmean_mean + tf.matrix_transpose(e_fmean_mean) + e_mean_mean
if full_output_cov:
fvar = (
tf.matrix_diag(tf.tile((eKff - tf.trace(Li_eKuffu_Lit))[:, None], [1, num_func])) +
tf.matrix_diag(tf.einsum("nij,dji->nd", Li_eKuffu_Lit, cov)) +
# tf.matrix_diag(tf.trace(tf.matmul(Li_eKuffu_Lit, cov))) +
tf.einsum("ig,nij,jh->ngh", q_mu, Li_eKuffu_Lit, q_mu) -
# tf.matmul(q_mu, tf.matmul(Li_eKuffu_Lit, q_mu), transpose_a=True) -
fmean[:, :, None] * fmean[:, None, :] +
e_related_to_mean
)
else:
fvar = (
(eKff - tf.trace(Li_eKuffu_Lit))[:, None] +
tf.einsum("nij,dji->nd", Li_eKuffu_Lit, cov) +
tf.einsum("ig,nij,jg->ng", q_mu, Li_eKuffu_Lit, q_mu) -
fmean ** 2 +
tf.matrix_diag_part(e_related_to_mean)
)
return fmean, fvar
# ---------------------------------------------------------------
########################## HELPERS ##############################
# ---------------------------------------------------------------
def _sample_mvn(mean, cov, cov_structure):
| tensorflow.trace | 11,378 |
import tensorflow as tf
dict(
testcase_name='fixed_len_float',
make_tensors_fn=lambda:
{'x': tf.compat.v1.placeholder(tf.float32, (None,))},
feature_spec={'x': tf.io.FixedLenFeature([], tf.float32)}),
dict(
testcase_name='override',
make_tensors_fn=_make_tensors_with_override,
feature_spec={'x': tf.io.FixedLenFeature([], tf.int64)},
domains={'x': schema_pb2.IntDomain(is_categorical=True)}),
dict(
testcase_name='override_with_session',
make_tensors_fn=_make_tensors_with_override,
feature_spec={'x': tf.io.FixedLenFeature([], tf.int64)},
domains={
'x': schema_pb2.IntDomain(min=5, max=6, is_categorical=True)
},
create_session=True))
# pylint: enable=g-long-lambda
def test_infer_feature_schema(self,
make_tensors_fn,
feature_spec,
domains=None,
create_session=False):
with tf.compat.v1.Graph().as_default() as graph:
tensors = make_tensors_fn()
| tensorflow.io.FixedLenFeature | 11,379 |
import tensorflow as tf
def dropout_layer(self, data):
training = self.mode == tf.estimator.ModeKeys.TRAIN
output = tf.layers.dropout(data, rate=self.params["dropout"], training=training)
return output
def layer_normalization_layer(self, data):
output = tf.contrib.layers.layer_norm(data)
return output
def dense_layer(self, data, num_tags):
logits = tf.layers.dense(data, num_tags)
return logits
def load_tag_data(self):
# data = np.loadtxt(self.params['tags'], dtype=np.unicode, encoding=None)
data = self.params["tags_data"]
mapping_strings = tf.Variable(data)
return mapping_strings
def load_word_data(self):
| tensorflow.layers.dense | 11,380 |
import tensorflow as tf
conv = tf.nn.conv2d(bottom, filt, [1,stride,stride,1], padding='SAME')
bias = tf.nn.bias_add(conv, conv_biases)
tf.summary.histogram('weight', filt)
tf.summary.histogram('bias', conv_biases)
return bias
def conv_bn_relu(self, bottom,name, kernel_size, output_channels, initializer,stride=1, bn=False,training=False,relu=True):
input_channels = bottom.get_shape().as_list()[-1]
with tf.variable_scope(name) as scope:
kernel = self.variable('weights', [kernel_size, kernel_size, input_channels, output_channels], initializer, regularizer=tf.contrib.layers.l2_regularizer(0.0005))
conv = tf.nn.conv2d(bottom, kernel, [1, stride, stride, 1], padding='SAME')
biases = self.variable('biases', [output_channels], tf.constant_initializer(0.0))
conv_layer = tf.nn.bias_add(conv, biases)
if bn:
conv_layer = self.batch_norm_layer('batch_norm_layer',conv_layer,training)
if relu:
conv_layer = tf.nn.relu(conv_layer, name=scope.name)
print('Conv layer {0} -> {1}'.format(bottom.get_shape().as_list(),conv_layer.get_shape().as_list()))
return conv_layer
def batch_norm_layer(self, name, input_tensor,training):
with tf.variable_scope(name) as scope:
return tf.contrib.layers.batch_norm(input_tensor,scope=scope,is_training=training,decay=0.99)
def deconv_bn_relu(self, bottom, name, kernel_size, output_channels, initializer, stride = 1, bn=False, training=False, relu=True):
input_shape = bottom.get_shape().as_list()
| tensorflow.nn.bias_add | 11,381 |
import tensorflow as tf
[self.batch_size, s_h2, s_w2, nf2], name='d_h1', d_h=ns3, d_w=ns3))
h2 = lrelu(deconv2d(tf.concat([h1, skip_h2], 3),
[self.batch_size, s_h1, s_w1, nf1], name='d_h2', d_h=ns2, d_w=ns2))
h3 = lrelu(deconv2d(tf.concat([h2, skip_h1], 3),
[self.batch_size, s_h0, s_w0, nf0], name='d_h3', d_h=ns1, d_w=ns1))
print(h3.get_shape())
h4 = deconv2d(tf.concat([h3, skip_h0], 3),
[self.batch_size, s_h, s_w, self.c_dim], name='d_h4', d_h=ns0, d_w=ns0)
return h4
with tf.variable_scope("deconv") as scope:
output_h4 = decode(trans_z, tgtctx_h3, tgtctx_h2, tgtctx_h1, tgtctx_h0)
scope.reuse_variables()
truthoutput_h4 = decode(tgtimg_z, tgtctx_h3, tgtctx_h2, tgtctx_h1, tgtctx_h0)
self.simloss = tf.reduce_mean((trans_z - tgtimg_z) ** 2) * 1e3
print(tgtimg_z.get_shape())
self.out = output_h4
self.out2 = truthoutput_h4
| tensorflow.variable_scope | 11,382 |
import tensorflow as tf
return res + bias_term
def _clip_and_normalize(word_probs, epsilon):
'''
word_probs: 1D tensor of [vsize]
'''
word_probs = tf.clip_by_value(word_probs, epsilon, 1.0 - epsilon)
return word_probs / tf.reduce_sum(word_probs, axis=-1, keep_dims=True) # scale preds so that the class probas of each sample sum to 1
def CE_loss(word_probs, answers, loss_weights):
'''
| tensorflow.clip_by_value | 11,383 |
import tensorflow as tf
'The number of parallel readers that read data from the dataset.')
tf.app.flags.DEFINE_integer(
'num_preprocessing_threads', 48,#48
'The number of threads used to create the batches.')
tf.app.flags.DEFINE_integer(
'num_cpu_threads', 0,
'The number of cpu cores used to train.')
tf.app.flags.DEFINE_float(
| tensorflow.app.flags.DEFINE_integer | 11,384 |
import tensorflow as tf
masked_lm_ids,
masked_lm_weights,
next_sentence_example_loss,
next_sentence_log_probs,
next_sentence_labels,
):
"""Computes the loss and accuracy of the model."""
masked_lm_log_probs = tf.reshape(
masked_lm_log_probs, [-1, masked_lm_log_probs.shape[-1]]
)
masked_lm_predictions = tf.argmax(
masked_lm_log_probs, axis=-1, output_type=tf.int32
)
masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])
| tensorflow.reshape | 11,385 |
import tensorflow as tf
the smallest side after resize.
Returns:
new_height: an int32 scalar tensor indicating the new height.
new_width: and int32 scalar tensor indicating the new width.
"""
smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)
height = tf.to_float(height)
width = tf.to_float(width)
smallest_side = tf.to_float(smallest_side)
scale = tf.cond(tf.greater(height, width),
lambda: smallest_side / width,
lambda: smallest_side / height)
new_height = tf.to_int32(height * scale)
new_width = tf.to_int32(width * scale)
return new_height, new_width
def _aspect_preserving_resize(image, smallest_side):
"""Resize images preserving the original aspect ratio.
Args:
image: A 3-D image `Tensor`.
smallest_side: A python integer or scalar `Tensor` indicating the size of
the smallest side after resize.
Returns:
resized_image: A 3-D tensor containing the resized image.
| tensorflow.to_int32 | 11,386 |
import tensorflow as tf
with tf.variable_scope("num_{}".format(k)):
output = tf.layers.conv1d(inputs, embed_size // 2, k, padding="SAME")
outputs = tf.concat((outputs, output), -1)
outputs = tf.nn.relu(tf.layers.batch_normalization(outputs, training=is_training))
return outputs
| tensorflow.layers.batch_normalization | 11,387 |
import tensorflow as tf
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
| tensorflow.flags.DEFINE_string | 11,388 |
import tensorflow as tf
z = tf.matmul(x, wx) + tf.matmul(h, wh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f*c + i*u
h = o*tf.tanh(c)
xs[idx] = h
s = tf.concat(axis=1, values=[c, h])
return xs, s
def _ln(x, g, b, e=1e-5, axes=[1]):
u, s = tf.nn.moments(x, axes=axes, keep_dims=True)
| tensorflow.tanh | 11,389 |
import tensorflow as tf
` See the top of the file for details.
"""
with tf.variable_scope(scope, reuse=reuse):
observations_ph = U.ensure_tf_input(make_obs_ph("observation"))
stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic")
update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps")
eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0.0))
| tensorflow.placeholder | 11,390 |
import tensorflow as tf
import numpy as np
import tensorflow as tf
import layers
import networks
def _get_grad_norm(ys, xs):
"""Compute 2-norm of dys / dxs."""
return tf.sqrt(
tf.add_n([tf.reduce_sum(tf.square(g)) for g in tf.gradients(ys, xs)]))
def _num_filters_stub(block_id):
return networks.num_filters(block_id, 8, 1, 8)
class NetworksTest(tf.test.TestCase):
def test_resolution_schedule_correct(self):
rs = networks.ResolutionSchedule(
| tensorflow.square | 11,391 |
import tensorflow as tf
def tearDownClass(cls):
shutil.rmtree(FLAGS.data_dir)
def setUp(self):
# Reset FLAGS
FLAGS.rnn_num_layers = 1
FLAGS.sync_replicas = False
FLAGS.adv_training_method = None
FLAGS.num_candidate_samples = -1
FLAGS.num_classes = 2
FLAGS.use_seq2seq_autoencoder = False
# Reset Graph
tf.reset_default_graph()
def testClassifierGraph(self):
FLAGS.rnn_num_layers = 2
model = graphs.VatxtModel()
train_op, _, _ = model.classifier_training()
# Pretrained vars: embedding + LSTM layers
self.assertEqual(
len(model.pretrained_variables), 1 + 2 * FLAGS.rnn_num_layers)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess)
sess.run(train_op)
| tensorflow.reset_default_graph | 11,392 |
import tensorflow as tf
input_props = []
input_props.append((tf.string, [None, None])) # Tokens.
input_props.append((tf.float32, [None, None, self.context_embeddings.size])) # Context embeddings.
input_props.append((tf.float32, [None, None, self.head_embeddings.size])) # Head embeddings.
input_props.append((tf.float32, [None, None, self.lm_size, self.lm_layers])) # LM embeddings.
input_props.append((tf.int32, [None, None, None])) # Character indices.
input_props.append((tf.int32, [None])) # Text lengths.
input_props.append((tf.int32, [None])) # Speaker IDs.
input_props.append((tf.int32, [])) # Genre.
input_props.append((tf.bool, [])) # Is training.
input_props.append((tf.int32, [None])) # Gold starts.
input_props.append((tf.int32, [None])) # Gold ends.
input_props.append((tf.int32, [None])) # Cluster ids.
self.queue_input_tensors = [tf.placeholder(dtype, shape) for dtype, shape in input_props]
dtypes, shapes = zip(*input_props)
queue = tf.PaddingFIFOQueue(capacity=10, dtypes=dtypes, shapes=shapes)
self.enqueue_op = queue.enqueue(self.queue_input_tensors)
self.input_tensors = queue.dequeue()
self.predictions, self.loss = self.get_predictions_and_loss(*self.input_tensors)
self.global_step = tf.Variable(0, name="global_step", trainable=False)
self.reset_global_step = tf.assign(self.global_step, 0)
learning_rate = tf.train.exponential_decay(self.config["learning_rate"], self.global_step,
self.config["decay_frequency"], self.config["decay_rate"], staircase=True)
trainable_params = tf.trainable_variables()
gradients = tf.gradients(self.loss, trainable_params)
gradients, _ = tf.clip_by_global_norm(gradients, self.config["max_gradient_norm"])
optimizers = {
| tensorflow.placeholder | 11,393 |
import tensorflow as tf
#Construct predictions
image = tf.placeholder(tf.float32,shape=[hps.batch_size, image_size, image_size,
num_channel])############MNIST and CIFAR10 are different ar here
adv_image = tf.placeholder(tf.float32,shape=[hps.batch_size, image_size, image_size,
num_channel])############MNIST and CIFAR10 are different ar here
predict = tf.placeholder(tf.float32,shape=[hps.batch_size, 10])
logit_nor,tsne_logit_nor = model_carlini_adv.predict(image,tsne_logits=True)
logit_adv,tsne_logit_adv = model_carlini_adv.predict(adv_image,tsne_logits=True)
predict_nor = tf.nn.softmax(logit_nor)
predict_adv = tf.nn.softmax(logit_adv)
# Calculate entropy
argmax_y_onehot = tf.one_hot(tf.argmax(predict, 1), 10, on_value=0.0, off_value=1.0, axis=-1)
normalized_y_nonmaximal = tf.reduce_sum(predict * argmax_y_onehot, 1)
entropy = tf.reduce_sum(-tf.log(predict) * predict * argmax_y_onehot,1) / normalized_y_nonmaximal + tf.log(normalized_y_nonmaximal)
for k in range(1):
result_dict = loadmat('kernel_para_'+FLAGS.dataset+'/kernel1000_for_attack_' + f1 + '.mat')
result_dict_median = loadmat('kernel_para_'+FLAGS.dataset+'/kernel1000_median_for_attack_' + f1 + '.mat')
# e_mean = result_dict['mean_logits_' + f1] # 10X64
# e_invcovar = result_dict['inv_covar_' + f1] # 64X64X10
e_kernel_train = result_dict['kernel_'+f1+'_for_attack'] #100X64X10
e_median = result_dict_median['median_out'] # 10X1
if FLAGS.attack_method == 'carliniL2':
attack1 = attacks.carliniL2.CarliniL2(sess, model_carlini_adv, batch_size=10, max_iterations=10,targeted=True,
confidence=0, initial_const=1.0,binary_search_steps=9)
attack2 = None
| tensorflow.log | 11,394 |
import tensorflow as tf
with tf.Session() as sess:
self.assertTrue(assert_tensors_equal(sess, tensor1, tensor2, 20))
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testProblemHparamsModality(self):
problem = problem_hparams.TestProblem(input_vocab_size=2,
| tensorflow.contrib.eager.run_test_in_graph_and_eager_modes | 11,395 |
from tensorflow.python.client import session
def testDenseDistributed(self):
worker, unused_ps = self._setupCluster()
for dtype in [dtypes.float64, dtypes.half, dtypes.float32]:
with session.Session(worker.target):
var0, var1, update_op = self._setupDense(True, dtype)
self._assertDenseCorrect(var0, var1, update_op)
| tensorflow.python.client.session.Session | 11,396 |
import tensorflow as tf
for i in range(2): # op_1, op_2
next_c, next_h = stack_lstm(inputs, prev_c, prev_h, self.w_lstm)
prev_c, prev_h = next_c, next_h
logits = tf.matmul(next_h[-1], self.w_soft) + self.b_soft
if self.temperature is not None:
logits /= self.temperature
if self.tanh_constant is not None:
op_tanh = self.tanh_constant / self.op_tanh_reduce
logits = op_tanh * tf.tanh(logits)
if use_bias:
logits += self.b_soft_no_learn
op_id = tf.multinomial(logits, 1)
op_id = tf.to_int32(op_id)
op_id = tf.reshape(op_id, [1])
arc_seq = arc_seq.write(start_id + 2 * i + 1, op_id)
curr_log_prob = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=op_id)
log_prob += curr_log_prob
curr_ent = tf.stop_gradient(tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=tf.nn.softmax(logits)))
entropy += curr_ent
inputs = tf.nn.embedding_lookup(self.w_emb, op_id)
next_c, next_h = stack_lstm(inputs, prev_c, prev_h, self.w_lstm)
| tensorflow.to_int32 | 11,397 |
import tensorflow as tf
def restore_param(self):
Saver = tf.train.Saver()
self.sess.run(tf.global_variables_initializer())
checkpoint = tf.train.get_checkpoint_state("saved_networks")
if checkpoint and checkpoint.model_checkpoint_path:
Saver.restore(self.sess,checkpoint.model_checkpoint_path)
print("Successfully loaded",checkpoint.model_checkpoint_path)
else:
print("Could not find old network weights")
| tensorflow.train.get_checkpoint_state | 11,398 |
import tensorflow as tf
"b",
bias_var_shape,
initializer=tf.constant_initializer(0.0))
if not one_dim_bias and data_format == 'NHWC':
b = tf.reshape(b, bshape)
return tf.nn.conv2d(
x,
w,
strides=strides,
padding=pad,
| tensorflow.nn.conv2d | 11,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.