seed
stringlengths 25
2.89k
| seed_api
stringlengths 14
102
| index
int64 0
14.8k
|
---|---|---|
import tensorflow as tf
return model_fn
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
processors = {
"ner": NerProcessor
}
if not FLAGS.do_train and not FLAGS.do_eval:
| tensorflow.logging.set_verbosity | 12,200 |
import tensorflow as tf
with tf.variable_scope("layer_{}".format(layer)):
with tf.variable_scope("fw_cell"):
cell_fw = util.CustomLSTMCell(self.config["contextualization_size"], num_sentences, self.lstm_dropout)
with tf.variable_scope("bw_cell"):
cell_bw = util.CustomLSTMCell(self.config["contextualization_size"], num_sentences, self.lstm_dropout)
state_fw = tf.contrib.rnn.LSTMStateTuple(tf.tile(cell_fw.initial_state.c, [num_sentences, 1]), tf.tile(cell_fw.initial_state.h, [num_sentences, 1]))
state_bw = tf.contrib.rnn.LSTMStateTuple(tf.tile(cell_bw.initial_state.c, [num_sentences, 1]), tf.tile(cell_bw.initial_state.h, [num_sentences, 1]))
(fw_outputs, bw_outputs), _ = tf.nn.bidirectional_dynamic_rnn(
cell_fw=cell_fw,
cell_bw=cell_bw,
inputs=current_inputs,
| tensorflow.tile | 12,201 |
import tensorflow as tf
output_ = dense(output_, decoder.vocab_size, use_bias=True, name='softmax1')
return output_
if decoder.use_dropout: # FIXME: why no pervasive dropout here?
initial_state = tf.nn.dropout(initial_state, keep_prob=decoder.initial_state_keep_prob)
with tf.variable_scope(scope_name):
activation_fn = None if decoder.initial_state == 'linear' else tf.nn.tanh
if decoder.initial_state == 'trained':
initial_state = get_variable(shape=[cell_state_size], name='initial_state')
initial_state = tf.tile(tf.expand_dims(initial_state, axis=0), [batch_size, 1])
elif decoder.initial_state == 'zero':
initial_state = tf.zeros(shape=[batch_size, cell_state_size])
elif decoder.layer_norm:
initial_state = dense(initial_state, cell_state_size, use_bias=False, name='initial_state_projection')
initial_state = tf.contrib.layers.layer_norm(initial_state, activation_fn=activation_fn,
scope='initial_state_layer_norm')
else:
initial_state = dense(initial_state, cell_state_size, use_bias=True, name='initial_state_projection',
activation=activation_fn)
if decoder.cell_type.lower() == 'lstm' and decoder.use_lstm_full_state:
initial_output = initial_state
else:
# Last layer's state is the right-most part. Output is the left-most part of an LSTM's state.
initial_output = initial_state[:, -cell_output_size:]
time = tf.constant(0, dtype=tf.int32, name='time')
outputs = tf.TensorArray(dtype=tf.float32, size=time_steps)
samples = tf.TensorArray(dtype=tf.int64, size=time_steps)
| tensorflow.contrib.layers.layer_norm | 12,202 |
import tensorflow as tf
per_example_loss, logits = modeling.classification_loss(
hidden=summary,
labels=label,
n_class=n_class,
initializer=xlnet_model.get_initializer(),
scope=cls_scope,
return_logits=True)
total_loss = tf.reduce_mean(per_example_loss)
return total_loss, per_example_loss, logits
def get_regression_loss(
FLAGS, features, is_training):
"""Loss for downstream regression tasks."""
| tensorflow.reduce_mean | 12,203 |
from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten
conv2c = Conv2D(padding="same", filters=RNN_SIZE//4, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv2b)
pool2 = MaxPool2D(pool_size=[2,2])(conv2c)
# Block 3
conv3a = Conv2D(padding="same", filters=RNN_SIZE//2, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(pool2)
conv3b = Conv2D(padding="same", filters=RNN_SIZE//2, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv3a)
conv3c = Conv2D(padding="same", filters=RNN_SIZE//2, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv3b)
pool3 = MaxPool2D(pool_size=[2,2])(conv3c)
# final convolutional layer
#removed GOAL_SIZE
conv4 = Conv2D(padding="valid", filters=RNN_SIZE-loc_layer_size, kernel_size=[2, 2], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=None)(pool3)
# FC layers
flat1a = Flatten(data_format='channels_last')(conv4)
#removed GOAL_SIZE
flat1b = Dense(units=RNN_SIZE-loc_layer_size)(flat1a)
# FC layers for goal_pos input
# goal_layer1 = Dense(units=GOAL_SIZE)(goal_pos)
# goal_layer2 = Dense(units=GOAL_SIZE)(goal_layer1)
# FC layers to find next location
loc_layer1 = Dense(units=loc_layer_size)(prev_loc)
loc_layer2 = Dense(units=loc_layer_size)(loc_layer1)
# Concatenationation of above layers, followed by FC layer
concat = tf.concat([flat1b, loc_layer2],1) # goal_layer2
| tensorflow.keras.layers.Flatten | 12,204 |
import tensorflow as tf
def CE_loss(word_probs, answers, loss_weights):
'''
word_probs: [batch_size, max_dec_steps, vocab]
answers: [batch_size, max_dec_steps]
loss_weigts: [batch_size, max_dec_steps]
'''
#word_probs = tf.nn.softmax(word_probs, dim=-1)
input_shape = tf.shape(word_probs)
vsize = input_shape[2]
epsilon = 1.0e-6
word_probs = _clip_and_normalize(word_probs, epsilon)
one_hot_spare_rep = tf.one_hot(answers, vsize)
xent = -tf.reduce_sum(one_hot_spare_rep * tf.log(word_probs), axis=-1) # [batch_size, max_dec_steps]
| tensorflow.shape | 12,205 |
import tensorflow as tf
# 'golden': (golden, tf.zeros([], tf.int32)),
# 'predict': (predict, tf.zeros([], tf.int32))
}
for metric_name, op in metrics.items():
tf.summary.scalar(metric_name, op[1])
return metrics
def call(self, embeddings, nwords):
| tensorflow.summary.scalar | 12,206 |
import tensorflow as tf
Args:
* is_train: whether to restore a model for training
"""
save_path = tf.train.latest_checkpoint(os.path.dirname(FLAGS.ws_save_path))
if is_train:
self.saver_train.restore(self.sess_train, save_path)
else:
self.saver_eval.restore(self.sess_eval, save_path)
tf.logging.info('model restored from ' + save_path)
def __monitor_progress(self, summary, log_rslt, idx_iter, time_step):
"""Monitor the training progress.
Args:
* summary: summary protocol buffer
* log_rslt: logging operations' results
* idx_iter: index of the training iteration
* time_step: time step between two summary operations
| tensorflow.logging.info | 12,207 |
import tensorflow as tf
A = tf.reduce_sum(phi_sampling(A0/(4*y), D))
B0 = euclidean_norm_squared(tf.subtract(tf.expand_dims(Y, 0), tf.expand_dims(Y, 1)), axis=2)
B = tf.reduce_sum(phi_sampling(B0/(4*y), D))
C0 = euclidean_norm_squared(tf.subtract(tf.expand_dims(X, 0), tf.expand_dims(Y, 1)), axis=2)
C = tf.reduce_sum(phi_sampling(C0/(4*y), D))
return T*(A + B - 2*C)
| tensorflow.expand_dims | 12,208 |
import tensorflow as tf
# Randomly set epoch to 0 in some cases as that's the inference value.
rand = tf.random.uniform([x_shape[0]])
epoch = tf.where(rand < 0.1, tf.zeros_like(epoch), epoch)
# Embed the epoch number.
emb_epoch = common_layers.embedding(epoch, 32, 32) # [batch, 32]
flat_x = tf.concat([flat_x, emb_epoch], axis=1)
flat_x = tf.layers.dropout(flat_x, rate=dropout)
x = tf.layers.dense(flat_x, 128, activation=tf.nn.relu)
logits = tf.layers.dense(
x, self.hparams.problem.num_actions, name="dense2"
)
| tensorflow.layers.dropout | 12,209 |
import tensorflow as tf
import numpy as np
import tensorflow as tf
import layers as L
import vat
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('device', '/gpu:0', "device")
tf.app.flags.DEFINE_string('dataset', 'cifar10', "{cifar10, svhn}")
tf.app.flags.DEFINE_string('log_dir', "", "log_dir")
tf.app.flags.DEFINE_integer('seed', 1, "initial random seed")
tf.app.flags.DEFINE_bool('validation', False, "")
tf.app.flags.DEFINE_integer('batch_size', 32, "the number of examples in a batch")
tf.app.flags.DEFINE_integer('ul_batch_size', 128, "the number of unlabeled examples in a batch")
tf.app.flags.DEFINE_integer('eval_batch_size', 100, "the number of eval examples in a batch")
tf.app.flags.DEFINE_integer('eval_freq', 5, "")
tf.app.flags.DEFINE_integer('num_epochs', 120, "the number of epochs for training")
tf.app.flags.DEFINE_integer('epoch_decay_start', 80, "epoch of starting learning rate decay")
tf.app.flags.DEFINE_integer('num_iter_per_epoch', 400, "the number of updates per epoch")
tf.app.flags.DEFINE_float('learning_rate', 0.001, "initial leanring rate")
tf.app.flags.DEFINE_float('mom1', 0.9, "initial momentum rate")
| tensorflow.app.flags.DEFINE_string | 12,210 |
import tensorflow as tf
with self.test_session():
v0 = tf.Variable(123, name="v0")
save = tf.train.Saver({"v0": v0}, sharded=True)
sd = save.as_saver_def()
| tensorflow.train.Saver | 12,211 |
from tensorflow.python.ops import math_ops
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
return bias_add(mm, biases, name=name)
| tensorflow.python.ops.math_ops.matmul | 12,212 |
import tensorflow as tf
mask_ratio = slide_window / (update_mask + 1e-8)
update_mask = tf.clip_by_value(update_mask, 0.0, 1.0)
mask_ratio = mask_ratio * update_mask
with tf.variable_scope('parconv'):
x = tf.nn.conv2d(input, filters, strides=[1, stride, stride, 1], padding="SAME", name='zero-conv_' + id,
dilations=(1, dilation, dilation, 1))
x = x * mask_ratio
if use_bias:
bias = tf.get_variable("bias" + id, [channels], initializer=tf.constant_initializer(0.0))
x = tf.nn.bias_add(x, bias)
return x * update_mask
x = tf.nn.conv2d(input, filters, strides=[1, stride, stride, 1], padding=padding, name='zero-conv_' + id,
dilations=(1, dilation, dilation, 1))
if use_bias:
bias = tf.get_variable("bias", [channels], initializer=tf.constant_initializer(0.0))
x = tf.nn.bias_add(x, bias)
return x
def t_conv(self, id, input, channels, size=3, stride=1, use_bias=True, padding="SAME", init_stddev=-1.0):
# good old t-conv. I love it!
assert padding in ["SAME", "VALID"], 'valid paddings are "SAME", "VALID"'
if type(size) == int:
size = [size, size]
| tensorflow.nn.conv2d | 12,213 |
import tensorflow as tf
# choose the appropriate moments
if train:
used_mean, used_var = tf.nn.moments(input_, axes, name="batch_norm")
cur_mean, cur_var = used_mean, used_var
| tensorflow.nn.moments | 12,214 |
import tensorflow as tf
as_text = filename.endswith('txt')
log_fn('Writing GraphDef as %s to %s' % (
'text' if as_text else 'binary', self.graph_file))
tf.train.write_graph(sess.graph_def, path, filename, as_text)
log_fn('Running warm up')
| tensorflow.train.write_graph | 12,215 |
import tensorflow as tf
images = images.reshape((num_images, 3, 32, 32))
labels = data['labels']
with tf.Graph().as_default():
image_placeholder = tf.placeholder(dtype=tf.uint8)
encoded_image = tf.image.encode_png(image_placeholder)
with tf.Session('') as sess:
| tensorflow.placeholder | 12,216 |
import tensorflow as tf
valid_pre = tf.reshape(valid_inf, [validnum, classnum])
valid_correct_prediction=tf.equal(tf.argmax(valid_inf,1),tf.argmax(valid_labels,1))
| tensorflow.argmax | 12,217 |
import tensorflow as tf
def tearDown(self):
shutil.rmtree(self._temp_data_dir)
super(SpinnTest, self).tearDown()
def testBundle(self):
with tf.device(self._test_device):
lstm_iter = [np.array([[0, 1], [2, 3]], dtype=np.float32),
np.array([[0, -1], [-2, -3]], dtype=np.float32),
np.array([[0, 2], [4, 6]], dtype=np.float32),
np.array([[0, -2], [-4, -6]], dtype=np.float32)]
| tensorflow.device | 12,218 |
import tensorflow as tf
self._placeholders = [
input_placeholder,
target_placeholder,
tf.placeholder(tf.int32, shape=(None, ), name='input_lengths'),
]
queue_types = [tf.float32, target_type, tf.int32]
if self.local_condition:
self._placeholders.append(tf.placeholder(tf.float32, shape=(None, hparams.num_mels, None), name='local_condition_features'))
queue_types.append(tf.float32)
if self.global_condition:
self._placeholders.append(tf.placeholder(tf.int32, shape=(None, 1), name='global_condition_features'))
queue_types.append(tf.int32)
# Create queue for buffering data
queue = tf.FIFOQueue(8, queue_types, name='input_queue')
self._enqueue_op = queue.enqueue(self._placeholders)
variables = queue.dequeue()
self.inputs = variables[0]
self.inputs.set_shape(self._placeholders[0].shape)
self.targets = variables[1]
self.targets.set_shape(self._placeholders[1].shape)
self.input_lengths = variables[2]
| tensorflow.placeholder | 12,219 |
import tensorflow as tf
hidden_size = input_shape[2]
with tf.variable_scope(name):
w = tf.get_variable(
name="kernel",
shape=[hidden_size, output_size],
initializer=initializer)
b = tf.get_variable(
name="bias", shape=[output_size], initializer=tf.zeros_initializer)
ret = tf.einsum("BFH,HO->BFO", input_tensor, w)
ret += b
if activation is not None:
return activation(ret)
else:
return ret
| tensorflow.einsum | 12,220 |
import tensorflow as tf
in_ch = 3 # Num channels of input images
train_images_ph = tf.placeholder(tf.int32, name='train_images_ph', shape=(None, w, h, in_ch)) # Train images
pred_images_ph = tf.placeholder(tf.int32, name='pred_images_ph', shape=(None, w, h, in_ch)) # Predict images
train_classes_ph = tf.placeholder(tf.int32, name='train_classes_ph', shape=(None,)) # Train classes
pred_classes_ph = tf.placeholder(tf.int32, name='pred_classes_ph', shape=(None,)) # Predict classes
| tensorflow.placeholder | 12,221 |
import tensorflow as tf
nearest_idx = tf.gather(
top_k_idx,
tf.random_uniform(
[1],
minval=0,
maxval=self.hparams.random_top_k - 1,
dtype=tf.int32),
axis=-1)
else:
if self.hparams.use_scales:
dist /= tf.reshape(self.hparams.scales,
[1, 1, self.hparams.moe_num_experts])
nearest_idx = tf.argmax(-dist, axis=-1)
nearest_hot = tf.one_hot(nearest_idx, self.hparams.block_v_size)
return nearest_hot
def embedding_lookup(self, x, means):
"""Compute nearest neighbors and loss for training the embeddings.
Args:
| tensorflow.reshape | 12,222 |
import tensorflow as tf
t1, t2 = tf.split(traj_tgt, 2, axis=0)
soft_sign = tf.tanh((t1 - t2) * temp)
loss = tf.maximum(0.0, soft_sign * ((t1 - t2) - (p1 - p2)))
loss = tf.reduce_mean(loss)
| tensorflow.maximum | 12,223 |
import tensorflow as tf
# init epoch counter for the queue
local_init_op = tf.local_variables_initializer()
with tf.Session() as sess:
sess.run(local_init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for i in range(a.workers):
t = threading.Thread(target=worker, args=(coord,))
t.start()
| tensorflow.train.Coordinator | 12,224 |
import tensorflow as tf
# tf.gradients will calculate dys/dxs with a initial gradients for ys, so this is dq/da * da/dparams
self.policy_grads_and_vars = tf.gradients(ys=self.a, xs=self.e_params, grad_ys=a_grads)
with tf.variable_scope('A_train'):
opt = tf.train.RMSPropOptimizer(-self.lr) # (- 1_tensorflow_new rate) for ascent policy
self.train_op = opt.apply_gradients(zip(self.policy_grads_and_vars, self.e_params), global_step=GLOBAL_STEP)
| tensorflow.train.RMSPropOptimizer | 12,225 |
import tensorflow as tf
for _ in xrange(1000):
outputs.append(f(tf.ones([1, 10]), tf.ones([1, 10])))
| tensorflow.ones | 12,226 |
import tensorflow as tf
wd=weight_decay)
stride_h, stride_w = stride
outputs = tf.nn.conv2d(inputs, kernel,
[1, stride_h, stride_w, 1],
padding=padding)
biases = _variable_on_cpu('biases', [num_output_channels],
tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
outputs = tf.layers.batch_normalization(outputs, momentum=0.99, epsilon=1e-6, training=is_training)
if activation_fn is not None:
outputs = tf.nn.leaky_relu(outputs, alpha=0.2)
return outputs
def conv2d_transpose(inputs,
num_output_channels,
kernel_size,
scope,
stride=[1, 1],
padding='SAME',
use_xavier=False,
stddev=1e-3,
| tensorflow.nn.leaky_relu | 12,227 |
import tensorflow as tf
if is_training:
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
output_layer = tf.reshape(output_layer, [-1, hidden_size])
logits = tf.matmul(output_layer, output_weight, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
logits = tf.reshape(logits, [-1, FLAGS.max_seq_length, 11])
| tensorflow.matmul | 12,228 |
import tensorflow as tf
elif depth == 49:
num_layers = [16,16,10,6]
else:
raise ValueError('depth=%g is a not a valid setting!' % depth)
# input tensors
self.input_x = tf.placeholder(tf.int32, [None, sequence_max_length], name="input_x")
self.input_tags = tf.placeholder(tf.int32, [None, sequence_max_length], name="input_tags")
self.input_deps = tf.placeholder(tf.int32, [None, sequence_max_length], name="input_dependency")
self.input_head = tf.placeholder(tf.int32, [None, sequence_max_length], name="input_head")
self.input_y = tf.placeholder(tf.float32, [None, num_classes], name="input_y")
self.is_training = tf.placeholder(tf.bool)
initializer = tf.contrib.layers.variance_scaling_initializer()
| tensorflow.placeholder | 12,229 |
import tensorflow as tf
Args:
nodes: A `Tensor` of `int64`.
edge_types: A 1-D `Tensor` of int32. Specify edge types to filter outgoing
edges.
Return:
A tuple of `SparseTensor` (neibors, weights).
neighbors: A `SparseTensor` of `int64`.
weights: A `SparseTensor` of `float`.
types: A `SparseTensor` of `int32`
"""
sp_returns = base._LIB_OP.get_sorted_full_neighbor(nodes, edge_types)
return tf.SparseTensor(*sp_returns[:3]), tf.SparseTensor(*sp_returns[3:6]), \
tf.SparseTensor(*sp_returns[6:])
def sample_fanout(nodes, edge_types, counts, default_node=-1):
"""
Sample multi-hop neighbors of nodes according to weight in graph.
Args:
nodes: A 1-D `Tensor` of `int64`.
edge_types: A list of 1-D `Tensor` of int32. Specify edge types to filter
outgoing edges in each hop.
| tensorflow.SparseTensor | 12,230 |
from tensorflow.python.ops import math_ops
if specificity < 0 or specificity > 1:
raise ValueError('`specificity` must be in the range [0, 1].')
with variable_scope.variable_scope(name, 'sensitivity_at_specificity',
[predictions, labels]):
kepsilon = 1e-7 # to account for floating point imprecisions
thresholds = [(i + 1) * 1.0 / (num_thresholds - 1)
for i in range(num_thresholds-2)]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]
(tp, fn, tn, fp, tp_update_op, fn_update_op, tn_update_op,
fp_update_op) = _tp_fn_tn_fp(predictions, labels, thresholds, weights)
assert array_ops.squeeze(fp).get_shape().as_list()[0] == num_thresholds
def compute_sensitivity_at_specificity(name):
specificities = math_ops.div(tn, tn + fp + kepsilon)
tf_index = math_ops.argmin(math_ops.abs(specificities - specificity), 0)
tf_index = math_ops.cast(tf_index, dtypes.int32)
# Now, we have the implicit threshold, so compute the sensitivity:
return math_ops.div(tp[tf_index],
tp[tf_index] + fn[tf_index] + kepsilon,
name)
sensitivity = compute_sensitivity_at_specificity('value')
with ops.control_dependencies(
[tp_update_op, fn_update_op, tn_update_op, fp_update_op]):
update_op = compute_sensitivity_at_specificity('update_op')
if metrics_collections:
| tensorflow.python.ops.math_ops.div | 12,231 |
import tensorflow as tf
self.w2=tf.get_variable('w2', [1024,classnum],initializer=tf.contrib.layers.xavier_initializer_conv2d())
self.b1 = tf.get_variable('b1', [1024],initializer=tf.constant_initializer(0.0))
self.b2 = tf.get_variable('b2', [classnum],initializer=tf.constant_initializer(0.0))
def inference(self,images):
images=tf.cast(images,tf.float32)/255.0
l1 = tf.matmul(images, self.w1)+self.b1
l1=tf.nn.relu(l1)
out = tf.matmul(l1, self.w2)+self.b2
return out
| tensorflow.cast | 12,232 |
import tensorflow as tf
]
loop_outputs = tf.while_loop(_condition, _body, loop_vars,
parallel_iterations=1)
arc_seq = loop_outputs[-3].stack()
arc_seq = tf.reshape(arc_seq, [-1])
entropy = tf.reduce_sum(loop_outputs[-2])
log_prob = tf.reduce_sum(loop_outputs[-1])
last_c = loop_outputs[-7]
last_h = loop_outputs[-6]
return arc_seq, entropy, log_prob, last_c, last_h
def build_trainer(self, child_model):
| tensorflow.reduce_sum | 12,233 |
import tensorflow as tf
anchor_negative_distances, anchor_negative_mining_distances = (
compute_hard_negative_distances(
anchor_match_distance_matrix,
anchor_match_negative_indicator_matrix,
use_semi_hard=use_semi_hard,
anchor_positive_mining_distances=anchor_positive_mining_distances,
anchor_match_mining_distance_matrix=(
anchor_match_mining_distance_matrix)))
def compute_triplet_loss(positive_distances, negative_distances):
losses = tf.nn.relu(positive_distances + margin - negative_distances)
losses = tf.where(
tf.stop_gradient(losses < losses.dtype.max), losses,
tf.zeros_like(losses))
num_nonzero_losses = tf.math.count_nonzero(losses)
loss = tf.math.reduce_mean(losses)
return loss, num_nonzero_losses
loss, num_active_triplets = compute_triplet_loss(anchor_positive_distances,
anchor_negative_distances)
| tensorflow.nn.relu | 12,234 |
import tensorflow as tf
if label.shape.ndims == 1:
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
| tensorflow.nn.sparse_softmax_cross_entropy_with_logits | 12,235 |
import tensorflow as tf
def export_params(output_dir, name, params):
if not tf.gfile.Exists(output_dir):
tf.gfile.MkDir(output_dir)
| tensorflow.gfile.Exists | 12,236 |
from tensorflow.contrib.layers.python.layers import initializers
def simple_model(img_in, num_actions, scope, reuse=False, num_filters=64):
with tf.variable_scope(scope, reuse=reuse):
out = img_in
gauss_initializer = initializers.xavier_initializer(uniform=False) # stddev = 1/n
with tf.variable_scope("convnet"):
out = layers.convolution2d(
out, num_outputs=num_filters, kernel_size=8, stride=4,
| tensorflow.contrib.layers.python.layers.initializers.xavier_initializer | 12,237 |
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
graph = tf.Graph()
with graph.as_default():
tf.set_random_seed(self.seed)
self.user_id = tf.placeholder(shape=[None, ], dtype=tf.int32, name='user_id')
self.item_id = tf.placeholder(shape=[None, ], dtype=tf.int32, name='item_id')
self.labels = tf.placeholder(shape=[None, 1], dtype=tf.float32, name='labels')
self.interaction = gmf(uid=self.user_id, iid=self.item_id, num_users=self.train_set.num_users,
num_items=self.train_set.num_items, emb_size=self.num_factors,
reg_user=self.regs[0], reg_item=self.regs[1], seed=self.seed)
logits = tf.layers.dense(self.interaction, units=1, name='logits',
| tensorflow.placeholder | 12,238 |
import tensorflow as tf
Parameters
----------
shape: Tuple of integers, shape of returned Keras variable
dtype: Tensorflow dtype
name: String, name of returned Keras variable
Returns
-------
A variable (including Keras metadata), filled with `0.0`.
"""
shape = tuple(map(int, shape))
return tf.Variable(
tf.constant_initializer(0., dtype=dtype)(shape), dtype, name)
def cosine_distances(test, support):
"""Computes pairwise cosine distances between provided tensors
Parameters
----------
test: tf.Tensor
Of shape (n_test, n_feat)
support: tf.Tensor
Of shape (n_support, n_feat)
| tensorflow.constant_initializer | 12,239 |
import tensorflow as tf
return uint8_resize_bicubic(image, shape)
def center_crop(image, size):
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
| tensorflow.shape | 12,240 |
import tensorflow as tf
def _meshgrid(depth, height, width, z_near, z_far):
with tf.variable_scope('_meshgrid'):
x_t = tf.reshape(
tf.tile(tf.linspace(-1.0, 1.0, width), [height * depth]),
[depth, height, width])
y_t = tf.reshape(
tf.tile(tf.linspace(-1.0, 1.0, height), [width * depth]),
[depth, width, height])
y_t = tf.transpose(y_t, [0, 2, 1])
sample_grid = tf.tile(
tf.linspace(float(z_near), float(z_far), depth), [width * height])
z_t = tf.reshape(sample_grid, [height, width, depth])
z_t = tf.transpose(z_t, [2, 0, 1])
z_t = 1 / z_t
d_t = 1 / z_t
x_t /= z_t
y_t /= z_t
x_t_flat = tf.reshape(x_t, (1, -1))
y_t_flat = tf.reshape(y_t, (1, -1))
d_t_flat = tf.reshape(d_t, (1, -1))
ones = tf.ones_like(x_t_flat)
| tensorflow.transpose | 12,241 |
import tensorflow as tf
False -> Create or search of variables before creating
:return: tensor of shape [batch_size, 1]
"""
if reuse:
tf.get_variable_scope().reuse_variables()
with tf.name_scope('Discriminator_Gauss'):
dc_den1 = tf.nn.relu(dense(x, z_dim, n_l1, name='dc_g_den1'))
dc_den2 = tf.nn.relu(dense(dc_den1, n_l1, n_l2, name='dc_g_den2'))
output = dense(dc_den2, n_l2, 1, name='dc_g_output')
return output
| tensorflow.name_scope | 12,242 |
import tensorflow as tf
logits.get_shape().assert_is_compatible_with(labels.get_shape())
with tf.name_scope(name):
num_classes = labels.get_shape()[-1].value
labels = tf.cast(labels, logits.dtype)
if label_smoothing > 0:
smooth_positives = 1.0 - label_smoothing
| tensorflow.cast | 12,243 |
import tensorflow as tf
'The minimal end learning rate used by a polynomial decay learning rate.')
tf.app.flags.DEFINE_float(
| tensorflow.app.flags.DEFINE_float | 12,244 |
import tensorflow as tf
"""
Check that K=Identity and K=None give same answer
"""
chol_from_diag = tf.stack([tf.diag(sqrt_diag[:, i]) for i in range(Datum.N)]) # N x M x M
s = sqrt_diag if diag else chol_from_diag
| tensorflow.diag | 12,245 |
import tensorflow as tf
mask loss.
learning_rate: `Tensor` with shape `[batch, ]` for the learning_rate.
Returns:
List of summary ops to run on the CPU host.
"""
# Outfeed supports int32 but global_step is expected to be int64.
global_step = tf.reduce_mean(global_step)
# Host call fns are executed FLAGS.iterations_per_loop times after one
# TPU loop is finished, setting max_queue value to the same as number of
# iterations will make the summary writer only flush the data to storage
# once per loop.
with (tf.contrib.summary.create_file_writer(
params['model_dir'],
| tensorflow.reduce_mean | 12,246 |
import tensorflow as tf
init = tf.contrib.layers.variance_scaling_initializer(dtype=tf.float32)
else:
init = tf.truncated_normal_initializer(stddev=init_stddev)
return tf.layers.conv2d_transpose(input, channels, kernel_size=size, strides=[stride, stride],
padding=padding, kernel_initializer=init, name='tr_conv' + id, use_bias=use_bias)
| tensorflow.layers.conv2d_transpose | 12,247 |
from tensorflow.python.ops import array_ops
else:
pool_conv = pool
net.append(pool_conv)
incept = array_ops.concat(net, 3, name=name)
return incept
| tensorflow.python.ops.array_ops.concat | 12,248 |
import tensorflow as tf
# Reshape patches.
p = tf.reshape(p, [blk_shape[0], blk_shape[1], blk_shape[2], -1])
# Convolution on patches.
q = tf.nn.conv2d(p, w, strides, 'VALID', use_cudnn_on_gpu=True)
# Paste convolution results.
q_shape = tf.shape(q)
| tensorflow.nn.conv2d | 12,249 |
import tensorflow as tf
"""
batch_size = tf.shape(rewards)[0]
| tensorflow.shape | 12,250 |
import tensorflow as tf
inputs = batch_norm(inputs, training, data_format)
inputs = tf.nn.relu(inputs)
| tensorflow.nn.relu | 12,251 |
import tensorflow as tf
eps=hparams.eps)
x = tf.placeholder(tf.float32, shape=[None, hparams.x_dim])
x_, v_, x_accept_prob, x_out = dynamics.apply_transition(x)
samples = npr.normal(size=[hparams.n_samples, hparams.x_dim])
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
np_x_, np_v_, np_x_accept_prob, np_x_out = sess.run(
[x_, v_, x_accept_prob, x_out], feed_dict={x: samples})
| tensorflow.Session | 12,252 |
import tensorflow as tf
embedded_input = tf.nn.embedding_lookup(embedding, input_)
if decoder.use_dropout and decoder.word_keep_prob is not None:
noise_shape = [1, 1] if decoder.pervasive_dropout else [tf.shape(input_)[0], 1]
embedded_input = tf.nn.dropout(embedded_input, keep_prob=decoder.word_keep_prob, noise_shape=noise_shape)
if decoder.use_dropout and decoder.embedding_keep_prob is not None:
| tensorflow.shape | 12,253 |
import tensorflow as tf
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
def linear(x, shape, name, bias=False):
with tf.variable_scope(name):
W = weight_variable(shape)
h = tf.matmul(x, W)
if bias:
b = bias_variable([shape[-1]])
h = h + b
return h
def conv2d(x, shape, name, bias=False, stride=2, padding='SAME'):
| tensorflow.matmul | 12,254 |
import tensorflow as tf
num_to_add = int(max(self.batch_size/self.num_angles, 1))
idx = tf.range(0, self.batch_size, 1)
idx = tf.random_shuffle(idx)[0:num_to_add]
self.fake_to_add = tf.gather(self.generator_out, idx)
self.mixed_pc = tf.concat([self.real_pc_rotated, self.fake_to_add], 0)
self.mixed_label = tf.concat([self.rot_label_pl, tf.constant(self.num_angles, shape = (num_to_add,))], axis = 0)
mixed_idx = tf.range(0, self.mixed_label.get_shape().as_list()[0], 1)
mixed_idx = tf.random_shuffle(mixed_idx)[0:self.batch_size]
self.mixed_pc = tf.gather(self.mixed_pc, mixed_idx)
self.mixed_label = tf.gather(self.mixed_label, mixed_idx)
self.mixed_pred, mixed_end_points = self.get_pred(self.mixed_pc)
self.mixed_loss = self.get_loss(self.mixed_pred, self.mixed_label, mixed_end_points)
with tf.variable_scope('discriminator') as scope:
self.real_prob, self.real_logit = self.discriminator(self.real_pc_rotated, scope=scope, **disc_kwargs)
| tensorflow.gather | 12,255 |
from tensorflow.contrib.eager.python.examples.revnet import config as config_
return logits, loss
class RevNetTest(tf.test.TestCase):
def setUp(self):
super(RevNetTest, self).setUp()
config = config_.get_hparams_cifar_38()
config.add_hparam("n_classes", 10)
config.add_hparam("dataset", "cifar-10")
# Reconstruction could cause numerical error, use double precision for tests
config.dtype = tf.float64
config.fused = False # Fused batch norm does not support tf.float64
# Reduce the batch size for tests because the OSS version runs
# in constrained GPU environment with 1-2GB of memory.
| tensorflow.contrib.eager.python.examples.revnet.config.get_hparams_cifar_38 | 12,256 |
import tensorflow as tf
def body(self, features):
observations = features["inputs"]
obs_shape = observations.shape.as_list()
# Just so Saver doesn't complain because of no variables.
tf.get_variable("dummy_var", initializer=0.0)
num_actions = self.hparams.problem.num_actions
logits = tf.constant(
1. / float(num_actions),
shape=(obs_shape[:1] + [1, num_actions])
| tensorflow.get_variable | 12,257 |
import tensorflow as tf
return input_
shape = input_.get_shape().as_list()
batch_size = shape[0]
height = shape[1]
width = shape[2]
channels = shape[3]
if channels % 4 != 0:
raise ValueError("Number of channels not divisible by 4.")
res = tf.reshape(input_, [batch_size, height, width, channels // 4, 2, 2])
res = tf.transpose(res, [0, 1, 4, 2, 5, 3])
res = tf.reshape(res, [batch_size, 2 * height, 2 * width, channels // 4])
return res
# batch norm
def batch_norm(input_,
dim,
| tensorflow.transpose | 12,258 |
import tensorflow as tf
"""See base class."""
del shape # Unused.
a, b = decode_params[self.A_PARAM_KEY], decode_params[self.B_PARAM_KEY]
if num_summands is not None:
shift = b * tf.cast(num_summands, b.dtype)
else:
shift = b
return (encoded_tensors[self.ENCODED_VALUES_KEY] - shift) / a
| tensorflow.cast | 12,259 |
import tensorflow as tf
with tf.variable_scope("no_tuple"):
cell1 = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
| tensorflow.nn.rnn_cell.BasicLSTMCell | 12,260 |
import tensorflow as tf
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
| tensorflow.to_int32 | 12,261 |
import tensorflow as tf
# matrix vector -> tile concat
sequence_max_length = hidden.shape[1]
multipliers = tf.concat(
[[1], [sequence_max_length], [1]],
0
)
tiled_representation = tf.tile(
tf.expand_dims(dependency_final_hidden, 1),
multipliers
)
# todo future: maybe modify this with TF2 mask mechanics
sequence_length = sequence_length_3D(hidden)
mask = tf.sequence_mask(
sequence_length,
sequence_max_length
)
tiled_representation = tf.multiply(
tiled_representation,
tf.cast(mask[:, :, tf.newaxis], dtype=tf.float32)
)
dependencies_hidden.append(tiled_representation)
else:
if len(dependency_final_hidden.shape) > 2:
| tensorflow.sequence_mask | 12,262 |
import tensorflow as tf
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
| tensorflow.logging.info | 12,263 |
import tensorflow as tf
x = tf.get_variable('x', shape=[6])
cost = tf.reduce_sum(tf.abs(x), name='cost')
opt = tf.train.GradientDescentOptimizer(0.01)
opt = AccumGradOptimizer(opt, 5)
min_op = opt.minimize(cost)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
with sess.as_default():
for k in range(20):
min_op.run()
print(x.eval())
| tensorflow.global_variables_initializer | 12,264 |
import tensorflow as tf
'The number of threads used to create the batches.')
tf.app.flags.DEFINE_integer(
'num_cpu_threads', 0,
'The number of cpu cores used to train.')
tf.app.flags.DEFINE_float(
'gpu_memory_fraction', 1., 'GPU memory fraction to use.')
# scaffold related configuration
tf.app.flags.DEFINE_string(
'data_dir', '../PASCAL/VOC_TF/VOC0712TF/',
'The directory where the dataset input data is stored.')
tf.app.flags.DEFINE_string(
'dataset_name', 'pascalvoc_0712', 'The name of the dataset to load.')
tf.app.flags.DEFINE_integer(
'num_classes', 21, 'Number of classes to use in the dataset.')
tf.app.flags.DEFINE_string(
'dataset_split_name', 'train', 'The name of the train/test split.')
tf.app.flags.DEFINE_string(
'model_dir', './logs_v3/',
'The directory where the model will be stored.')
tf.app.flags.DEFINE_integer(
'log_every_n_steps', 10,
'The frequency with which logs are print.')
tf.app.flags.DEFINE_integer(
'save_summary_steps', 500,
'The frequency with which summaries are saved, in seconds.')
tf.app.flags.DEFINE_integer(
'save_checkpoints_secs', 7200,
'The frequency with which the model is saved, in seconds.')
| tensorflow.app.flags.DEFINE_string | 12,265 |
from tensorflow.python.framework import ops
with _handle_graph(self.handle), self._assign_dependencies():
assign_add_op = gen_resource_variable_ops.assign_add_variable_op(
self.handle,
ops.convert_to_tensor(delta, dtype=self.dtype),
name=name)
if read_value:
return self._read_variable_op()
return assign_add_op
def assign_sub(self, delta, use_locking=None, name=None, read_value=True):
del use_locking
with _handle_graph(self.handle), self._assign_dependencies():
assign_sub_op = gen_resource_variable_ops.assign_sub_variable_op(
self.handle,
ops.convert_to_tensor(delta, dtype=self.dtype),
name=name)
if read_value:
return self._read_variable_op()
return assign_sub_op
def get(self):
return self._primary_var
@property
def _in_graph_mode(self):
return self._primary_var._in_graph_mode # pylint: disable=protected-access
def _should_act_as_resource_variable(self):
| tensorflow.python.framework.ops.convert_to_tensor | 12,266 |
import tensorflow as tf
bias = np.zeros([filter_in_sizes[-1]], dtype=np.float32)
no_strides = [1, 1, 1, 1, 1]
[t1ind, t1val, t1sh] = sp.createRandomSparseTensor(rho_data, tensor_in_sizes, -3, 3)
s1 = tf.SparseTensor(indices=t1ind, values=t1val, dense_shape=t1sh)
d1 = sp.sparse_to_dense(t1ind, t1val, t1sh)
[t2ind, t2val, t2sh] = sp.createRandomSparseTensor(rho_filter, filter_in_sizes)
s2 = tf.SparseTensor(indices=t2ind, values=t2val, dense_shape=t2sh)
d2 = sp.sparse_to_dense(t2ind, t2val, t2sh)
print("strides: \n", strides)
print("input shape", tensor_in_sizes)
print("filter shape", filter_in_sizes)
config = tf.ConfigProto()
| tensorflow.SparseTensor | 12,267 |
import tensorflow as tf
tf.nn.rnn_cell.GRUCell(2), 4)
dec, mem = tf.nn.seq2seq.basic_rnn_seq2seq(inp, dec_inp, cell)
sess.run([tf.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testTiedRNNSeq2Seq(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
inp = [tf.constant(0.5, shape=[2, 2])] * 2
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
cell = tf.nn.rnn_cell.OutputProjectionWrapper(
tf.nn.rnn_cell.GRUCell(2), 4)
dec, mem = tf.nn.seq2seq.tied_rnn_seq2seq(inp, dec_inp, cell)
sess.run([tf.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual(1, len(res))
self.assertEqual((2, 2), res[0].shape)
def testEmbeddingRNNDecoder(self):
| tensorflow.constant | 12,268 |
import tensorflow as tf
def build_placeholders(self):
# standard for all policies
self.obs = tf.placeholder(tf.float32, [None,
self.obs_space]) # ! self.obs = tf.placeholder(tf.float32, [None] + list(self.obs_space))
# ! self.obs_space = env.observation_space.shape
self.r = tf.placeholder(tf.float32, (None,1))
self.ac = tf.placeholder(tf.float32, (None, self.act_space))
self.adv = tf.placeholder(tf.float32, [None]) # unused
# specific to FeUdal
self.prev_g = tf.placeholder(tf.float32, (None, None, self.g_dim))
self.ri = tf.placeholder(tf.float32, (None,))
self.s_diff = tf.placeholder(tf.float32, (None, self.g_dim))
| tensorflow.placeholder | 12,269 |
import tensorflow as tf
class_pred = tf.reshape(y_pred[2], [num_batch * num_prior, num_class])
loc_true = tf.reshape(y_true[..., :4], [num_batch * num_prior, 4])
| tensorflow.reshape | 12,270 |
from tensorflow.python.framework import tensor_shape
# NOTE(mrry): We could in principle work out the shape from the
# gradients and the attrs, but if we do not know input_shape
# statically, then we are unlikely to know the shape of the
# gradients either.
return [tensor_shape.unknown_shape(ndims=4)]
@ops.RegisterShape("DepthwiseConv2dNativeBackpropFilter")
| tensorflow.python.framework.tensor_shape.unknown_shape | 12,271 |
import tensorflow as tf
self.weights, self.biases = self.initialize_NN(layers)
# Initialize parameters
self.lambda_1 = tf.Variable([0.0], dtype=tf.float32)
self.lambda_2 = tf.Variable([-6.0], dtype=tf.float32)
# Load IRK weights
tmp = np.float32(np.loadtxt('../../Utilities/IRK_weights/Butcher_IRK%d.txt' % (q), ndmin = 2))
weights = np.reshape(tmp[0:q**2+q], (q+1,q))
self.IRK_alpha = weights[0:-1,:]
self.IRK_beta = weights[-1:,:]
self.IRK_times = tmp[q**2+q:]
# tf placeholders and graph
self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
log_device_placement=True))
self.x0_tf = tf.placeholder(tf.float32, shape=(None, self.x0.shape[1]))
self.x1_tf = tf.placeholder(tf.float32, shape=(None, self.x1.shape[1]))
self.u0_tf = tf.placeholder(tf.float32, shape=(None, self.u0.shape[1]))
self.u1_tf = tf.placeholder(tf.float32, shape=(None, self.u1.shape[1]))
self.dummy_x0_tf = tf.placeholder(tf.float32, shape=(None, self.q)) # dummy variable for fwd_gradients
self.dummy_x1_tf = tf.placeholder(tf.float32, shape=(None, self.q)) # dummy variable for fwd_gradients
self.U0_pred = self.net_U0(self.x0_tf) # N0 x q
self.U1_pred = self.net_U1(self.x1_tf) # N1 x q
self.loss = tf.reduce_sum(tf.square(self.u0_tf - self.U0_pred)) + \
| tensorflow.ConfigProto | 12,272 |
import tensorflow as tf
}
with tf.variable_scope(tf.get_variable_scope(), reuse=None):
outputs_to_scales_to_logits = multi_scale_logits(
images,
model_options=model_options,
is_training=False,
fine_tune_batch_norm=False)
if add_flipped_images:
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
outputs_to_scales_to_logits_reversed = multi_scale_logits(
tf.reverse_v2(images, [2]),
model_options=model_options,
is_training=False,
fine_tune_batch_norm=False)
for output in sorted(outputs_to_scales_to_logits):
scales_to_logits = outputs_to_scales_to_logits[output]
logits = _resize_bilinear(
scales_to_logits[MERGED_LOGITS_SCOPE],
tf.shape(images)[1:3],
scales_to_logits[MERGED_LOGITS_SCOPE].dtype)
outputs_to_predictions[output].append(
| tensorflow.reverse_v2 | 12,273 |
import tensorflow as tf
with tf.gfile.Open(filename, 'r') as f:
data = cPickle.load(f)
images = data['data']
num_images = images.shape[0]
images = images.reshape((num_images, 3, 32, 32))
labels = data['labels']
with tf.Graph().as_default():
image_placeholder = tf.placeholder(dtype=tf.uint8)
encoded_image = tf.image.encode_png(image_placeholder)
with tf.Session('') as sess:
for j in range(num_images):
sys.stdout.write('\r>> Reading file [%s] image %d' % (
filename, offset + 1))
sys.stdout.flush()
if ('train' == name) and ( math.floor(offset / images_per_shard) > shard) :
tfrecord_writer.close()
shard = shard + 1
record_filename = _get_output_filename(dataset_dir, name, shard, FLAGS.train_shards)
tfrecord_writer = tf.python_io.TFRecordWriter(record_filename)
| tensorflow.Session | 12,274 |
import tensorflow as tf
# Local entropy and confidence for nor_img
(entropy_test_nor_help, labels_nor_help, confidence_test_nor_help) = sess.run(
[entropy, tf.argmax(predict, axis=1), tf.reduce_max(predict, axis=1)], feed_dict={predict: predict_NOR}
)
| tensorflow.reduce_max | 12,275 |
import tensorflow as tf
for g, _ in grad_and_vars:
expanded_g = tf.expand_dims(g, 0)
| tensorflow.expand_dims | 12,276 |
import tensorflow as tf
masked_lm_weights = tf.reshape(masked_lm_weights, [-1])
masked_lm_accuracy = tf.metrics.accuracy(
labels=masked_lm_ids,
predictions=masked_lm_predictions,
weights=masked_lm_weights)
masked_lm_mean_loss = tf.metrics.mean(
values=masked_lm_example_loss, weights=masked_lm_weights)
next_sentence_log_probs = tf.reshape(
next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]])
next_sentence_predictions = tf.argmax(
next_sentence_log_probs, axis=-1, output_type=tf.int32)
next_sentence_labels = tf.reshape(next_sentence_labels, [-1])
next_sentence_accuracy = tf.metrics.accuracy(
labels=next_sentence_labels, predictions=next_sentence_predictions)
next_sentence_mean_loss = tf.metrics.mean(
values=next_sentence_example_loss)
return {
"masked_lm_accuracy": masked_lm_accuracy,
| tensorflow.argmax | 12,277 |
import tensorflow as tf
pred_mat = tf.get_variable('pred_mat',
[in_size, self._out_vocab_size])
pred_bias = tf.get_variable('pred_bias', [self._out_vocab_size])
# Make a prediction on every word.
def GetWordPred(o_):
logits = tf.nn.xw_plus_b(o_, pred_mat, pred_bias)
return tf.nn.softmax(logits)
#self.preds_by_word1 = tf.pack([GetWordPred(o_) for o_ in mats])
#self.preds_by_word = tf.reshape(self.preds_by_word1, self.y.get_shape())
#self.probs = tf.mul(tf.expand_dims(self._mask,2), self.preds_by_word)
self.preds_by_word = tf.pack([GetWordPred(o_) for o_ in mats])
self.preds_by_instance = tf.pack([self.preds_by_word[:,i,:] for i in range(self.preds_by_word.get_shape()[1])])
self.probs = tf.mul(tf.expand_dims(self._mask,2), self.preds_by_instance)
self._xent = _SafeXEnt(self.y, self.probs, class_weights=class_weights, sumd=[1,2])
def _SafeXEnt(y, probs, eps=0.0001, class_weights=None, sumd=[1]):
"""Version of cross entropy loss that should not produce NaNs.
If the predicted proability for the true class is near zero then when
taking the log it can produce a NaN, which ruins everything. This
function ensures each probability is at least eps and no more than one
before taking the log.
Args:
y: matrix of true probabilities same size as probs
| tensorflow.expand_dims | 12,278 |
import tensorflow as tf
lm_h = tf.reshape(h[:, :-1], [-1, n_embd])
lm_logits = tf.matmul(lm_h, we, transpose_b=True)
lm_losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=lm_logits, labels=tf.reshape(X[:, 1:, 0], [-1]))
lm_losses = tf.reshape(lm_losses, [shape_list(X)[0], shape_list(X)[1]-1])
lm_losses = tf.reduce_sum(lm_losses*M[:, 1:], 1)/tf.reduce_sum(M[:, 1:], 1)
clf_h = tf.reshape(h, [-1, n_embd])
pool_idx = tf.cast(tf.argmax(tf.cast(tf.equal(X[:, :, 0], clf_token), tf.float32), 1), tf.int32)
clf_h = tf.gather(clf_h, tf.range(shape_list(X)[0], dtype=tf.int32)*n_ctx+pool_idx)
clf_h = tf.reshape(clf_h, [-1, 2, n_embd])
if train and clf_pdrop > 0:
shape = shape_list(clf_h)
shape[1] = 1
| tensorflow.equal | 12,279 |
import tensorflow as tf
mus, log_sigmas = params[:,:,:,0::2], params[:,:,:,1::2]
z2, fldj = gaussianize(x2, mus, log_sigmas)
return z2, fldj
def _inverse(self, x1, z2, **kwargs):
params = self.parameterizer(x1)
mus, log_sigmas = params[:,:,:,0::2], params[:,:,:,1::2]
x2, ildj = gaussianize(z2, mus, log_sigmas, inverse=tf.constant(True))
return x2, ildj
def log_gaussianize(x, mus, log_sigmas, inverse=tf.constant(False)):
"""
Standardize log normal random variable x using mus and log_sigmas.
"""
if inverse:
scales = tf.math.exp(log_sigmas)
log_x = tf.math.log(x)
ldj = log_x
log_y = log_x*scales + mus
ldj += log_sigmas
z = tf.math.exp(log_y)
return z, ldj
else:
scales = tf.math.exp(-log_sigmas)
log_x = tf.math.log(x)
ldj = -log_x
log_y = (log_x - mus)*scales
ldj -= log_sigmas
z = tf.math.exp(log_y)
return z, ldj
| tensorflow.math.exp | 12,280 |
import tensorflow as tf
test_data.decoded_x.name)
if is_adaptive_stage(stage):
# The property should have keys matching those of state_update_tensors.
self.assertSameElements(stage.state_update_aggregation_modes.keys(),
test_data.state_update_tensors.keys())
for mode in six.itervalues(stage.state_update_aggregation_modes):
self.assertIn(mode, encoding_stage.StateAggregationMode)
for tensor in six.itervalues(test_data.initial_state):
self.assertTrue(tf.is_tensor(tensor))
for tensor in six.itervalues(test_data.state_update_tensors):
self.assertTrue(tf.is_tensor(tensor))
for tensor in six.itervalues(test_data.updated_state):
self.assertTrue(tf.is_tensor(tensor))
# The state related Tensors should have appropriate substrings in their
# names.
for tensor in six.itervalues(test_data.initial_state):
self.assertIn(encoding_stage.INITIAL_STATE_SCOPE_SUFFIX, tensor.name)
for tensor in six.itervalues(test_data.updated_state):
self.assertIn(encoding_stage.UPDATE_STATE_SCOPE_SUFFIX, tensor.name)
for tensor in six.itervalues(test_data.state_update_tensors):
self.assertIn(encoding_stage.ENCODE_SCOPE_SUFFIX, tensor.name)
def asserts_for_test_many_to_one_encode_decode(self, data):
"""Additional asserts for `test_many_to_one_encode_decode` method.
| tensorflow.is_tensor | 12,281 |
import tensorflow as tf
self.drop2 = tf.layers.dropout(self.conv2, self.config.cifar10_cnn["keep_prob"], training=self.train)
self.pool2 = tf.layers.max_pooling2d(self.drop2, 2, 2)
self.conv3 = tf.layers.conv2d(self.pool2,
self.config.cifar10_cnn["num_filters"],
self.config.cifar10_cnn["filter_size"],
padding='same', activation=tf.nn.relu)
self.pool3 = tf.layers.max_pooling2d(self.conv3, 2, 2)
self.conv4 = tf.layers.conv2d(self.pool3,
self.config.cifar10_cnn["num_filters"],
self.config.cifar10_cnn["filter_size"],
padding='same', activation=tf.nn.relu)
self.drop3 = tf.layers.dropout(self.conv4, self.config.cifar10_cnn["keep_prob"], training=self.train)
| tensorflow.layers.max_pooling2d | 12,282 |
import tensorflow as tf
mu = tf.layers.dense(input_tf, num_action, tf.nn.tanh, kernel_initializer=w_init, name='mu') # estimated action value
sigma = tf.layers.dense(input_tf, num_action, tf.nn.softplus, kernel_initializer=w_init, name='sigma') # estimated variance
return mu,sigma;
class PolicyEstimator_MountainCarContinuous():
def __init__(self, entropy_beta=0.1, learning_rate=0.001, par_idx=0,scope="policy_estimator"):
w_init = tf.random_normal_initializer(0.,.1);
with tf.variable_scope(scope+"_"+str(par_idx)):
# state, target and action
self.state = tf.placeholder(tf.float32, [None,400], name="state")
self.target = tf.placeholder(tf.float32,[None,1], name="target")
self.a_his = tf.placeholder(tf.float32, [None, num_action], name="action_hist")
# layers
# wrap output
self.mu = self.mu * action_bound[1];
self.sigma = self.sigma + 1e-5
self.normal_dist = tf.contrib.distributions.Normal(self.mu, self.sigma)
| tensorflow.placeholder | 12,283 |
import tensorflow as tf
self.loss = tf.reduce_mean(losses + losses2)
if config.l2_norm is not None:
variables = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
l2_loss = tf.contrib.layers.apply_regularization(regularizer, variables)
self.loss += l2_loss
if config.decay is not None:
self.var_ema = tf.train.ExponentialMovingAverage(config.decay)
ema_op = self.var_ema.apply(tf.trainable_variables())
with tf.control_dependencies([ema_op]):
self.loss = tf.identity(self.loss)
self.assign_vars = []
for var in tf.global_variables():
v = self.var_ema.average(var)
if v:
self.assign_vars.append(tf.assign(var,v))
def get_loss(self):
return self.loss
| tensorflow.identity | 12,284 |
from tensorflow.contrib.distributions.python.ops import distribution_util
x: `Tensor`. Input transposed/reshaped to `B_+E_+S_`.
sample_shape: `Tensor` (1D, `int32`).
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
sample_shape, batch_shape, event_shape = self.get_shape(x)
event_shape = distribution_util.pick_vector(
self._event_ndims_is_0, (1,), event_shape)
batch_shape = distribution_util.pick_vector(
self._batch_ndims_is_0, (1,), batch_shape)
new_shape = array_ops.concat(0, ((-1,), batch_shape, event_shape))
x = array_ops.reshape(x, shape=new_shape)
| tensorflow.contrib.distributions.python.ops.distribution_util.pick_vector | 12,285 |
import tensorflow as tf
new_saver.restore(sess, saver0_ckpt)
# Addes loss and train.
labels = tf.constant(0, tf.int32, shape=[100], name="labels")
batch_size = tf.size(labels)
labels = tf.expand_dims(labels, 1)
indices = tf.expand_dims(tf.range(0, batch_size), 1)
concated = tf.concat(1, [indices, labels])
onehot_labels = tf.sparse_to_dense(
| tensorflow.expand_dims | 12,286 |
import tensorflow as tf
all_perturbed_vars = scope_vars(absolute_scope_name(perturbed_scope))
assert len(all_vars) == len(all_perturbed_vars)
perturb_ops = []
for var, perturbed_var in zip(all_vars, all_perturbed_vars):
if param_noise_filter_func(perturbed_var):
# Perturb this variable.
operation = tf.assign(perturbed_var,
var + tf.random_normal(shape=tf.shape(var), mean=0.,
stddev=param_noise_scale))
else:
# Do not perturb, just assign.
operation = tf.assign(perturbed_var, var)
perturb_ops.append(operation)
assert len(perturb_ops) == len(all_vars)
| tensorflow.shape | 12,287 |
import tensorflow as tf
"""Model function for CNN."""
features = tf.placeholder(
tf.float32, shape=[None, IMAGE_SIZE * IMAGE_SIZE], name='features')
labels = tf.placeholder(tf.int64, shape=[None], name='labels')
input_layer = tf.reshape(features, [-1, IMAGE_SIZE, IMAGE_SIZE, 1])
conv1 = tf.layers.conv2d(
| tensorflow.placeholder | 12,288 |
import tensorflow as tf
# sum mean over each batch by time steps
Omega = tf.square(bounded - 1.0)
Omega = tf.reduce_sum(tf.reduce_mean(Omega, axis=1)) / (1.0 * tf.reduce_sum(nelems))
| tensorflow.reduce_mean | 12,289 |
import tensorflow as tf
for checkpoint in _get_next_checkpoint():
tf.logging.info('Starting to evaluate.')
try:
eval_results = image_classifier.evaluate(
input_fn=imagenet_eval.input_fn,
steps=eval_steps,
hooks=eval_hooks,
checkpoint_path=checkpoint)
tf.logging.info('Evaluation results: %s' % eval_results)
except tf.errors.NotFoundError:
# skip checkpoint if it gets deleted prior to evaluation
tf.logging.info('Checkpoint %s no longer exists ... skipping')
elif mode == 'train_and_eval':
current_step = _load_global_step_from_checkpoint_dir(model_dir)
tf.logging.info('Starting training at step=%d.' % current_step)
train_steps_per_eval = int(
hparams.num_epochs_per_eval * train_steps_per_epoch)
# Final Evaluation if training is finished.
if current_step >= hparams.num_epochs * train_steps_per_epoch:
eval_results = image_classifier.evaluate(
input_fn=imagenet_eval.input_fn, steps=eval_steps, hooks=eval_hooks)
tf.logging.info('Evaluation results: %s' % eval_results)
while current_step < hparams.num_epochs * train_steps_per_epoch:
image_classifier.train(
input_fn=imagenet_train.input_fn, steps=train_steps_per_eval)
current_step += train_steps_per_eval
tf.logging.info('Starting evaluation at step=%d.' % current_step)
eval_results = image_classifier.evaluate(
| tensorflow.logging.info | 12,290 |
from tensorflow.python.ops import gen_math_ops
TypeError: If `x` cannot be cast to the `dtype`.
"""
with ops.op_scope([x], name, "Cast") as name:
if isinstance(x, ops.SparseTensor):
values_cast = cast(x.values, dtype, name=name)
return ops.SparseTensor(x.indices, values_cast, x.shape)
else:
# TODO(touts): Handle what Josh said.
#
# Could return ops.convert_to_tensor(x, dtype=dtype, ...) here, but that
# allows some conversions that cast() can't do, e.g. casting numbers to
# strings.
x = ops.convert_to_tensor(x, name="x")
if x.dtype.base_dtype == dtype:
return x
return gen_math_ops.cast(x, dtype, name=name)
def to_float(x, name="ToFloat"):
"""Casts a tensor to type `float32`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `float32`.
Raises:
TypeError: If `x` cannot be cast to the `float32`.
| tensorflow.python.ops.gen_math_ops.cast | 12,291 |
import tensorflow as tf
two = tf.ones_like(n) * 2
result = tf.ones_like(n)
_, result, _ = tf.while_loop(
cond=_double_factorial_loop_condition,
body=_double_factorial_loop_body,
loop_vars=[n, result, two])
return result
def factorial(n: TensorLike) -> TensorLike:
n = tf.convert_to_tensor(value=n)
return tf.exp(tf.math.lgamma(n + 1))
def generate_l_m_permutations(
max_band: int,
name: str = "spherical_harmonics_generate_l_m_permutations") -> Tuple[TensorLike, TensorLike]:
with tf.name_scope(name):
degree_l = []
order_m = []
| tensorflow.convert_to_tensor | 12,292 |
import tensorflow as tf
Returns:
Variable tensor
"""
with tf.variable_scope(scope) as sc:
kernel_d, kernel_h, kernel_w = kernel_size
stride_d, stride_h, stride_w = stride
outputs = tf.nn.max_pool3d(inputs,
ksize=[1, kernel_d, kernel_h, kernel_w, 1],
strides=[1, stride_d, stride_h, stride_w, 1],
padding=padding,
name=sc.name)
return outputs
| tensorflow.nn.max_pool3d | 12,293 |
import tensorflow as tf
Compute loss
'''
with tf.name_scope('loss') as scope:
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels, name='cross-entropy')
loss = tf.reduce_mean(cross_entropy, name='loss')
tf.summary.scalar(scope+'/loss', loss)
return loss
def accuracy(logits, labels):
| tensorflow.summary.scalar | 12,294 |
import tensorflow as tf
for metric in self.exp_settings['metrics']:
for topn in self.exp_settings['metrics_topn']:
list_weights = tf.reduce_mean(self.propensity_weights * clipped_labels, axis=1, keep_dims=True)
metric_value = utils.make_ranking_metric_fn(metric, topn)(reshaped_train_labels, pad_removed_train_output, None)
tf.summary.scalar('%s_%d' % (metric, topn), metric_value, collections=['train'])
weighted_metric_value = utils.make_ranking_metric_fn(metric, topn)(reshaped_train_labels, pad_removed_train_output, list_weights)
tf.summary.scalar('Weighted_%s_%d' % (metric, topn), weighted_metric_value, collections=['train'])
self.train_summary = tf.summary.merge_all(key='train')
self.eval_summary = tf.summary.merge_all(key='eval')
self.saver = tf.train.Saver(tf.global_variables())
def separate_gradient_update(self):
denoise_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "denoising_model")
ranking_model_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "ranking_model")
self.weighs_propen=denoise_params
if self.hparams.l2_loss > 0:
for p in denoise_params:
# self.weighs_propen=p
# p=tf.Print(p,[p],message="show the weights")
| tensorflow.global_variables | 12,295 |
import tensorflow as tf
# checkpoint related configuration
tf.app.flags.DEFINE_string(
'checkpoint_path', './model',
'The path to a checkpoint from which to fine-tune.')
tf.app.flags.DEFINE_string(
'checkpoint_model_scope', '',
'Model scope in the checkpoint. None if the same as the trained model.')
tf.app.flags.DEFINE_string(
#'blouse', 'dress', 'outwear', 'skirt', 'trousers', 'all'
'model_scope', None,
'Model scope name used to replace the name_scope in checkpoint.')
tf.app.flags.DEFINE_string(
'checkpoint_exclude_scopes', None,
'Comma-separated list of scopes of variables to exclude when restoring from a checkpoint.')
tf.app.flags.DEFINE_boolean(
'ignore_missing_vars', True,
'When restoring a checkpoint would ignore missing variables.')
tf.app.flags.DEFINE_boolean(
'run_on_cloud', False,
'Wether we will train on cloud.')
tf.app.flags.DEFINE_boolean(
'seq_train', False,
| tensorflow.app.flags.DEFINE_string | 12,296 |
import tensorflow as tf
w_out = tf.get_variable('w_out', [self.M, self.V], initializer=self.weight_initializer)
b_out = tf.get_variable('b_out', [self.V], initializer=self.const_initializer)
| tensorflow.get_variable | 12,297 |
import tensorflow as tf
# Below:
# * a token is a wordpiece ID.
# * the tokens array will be merged in-place.
# * the candidates array is an array of size len(tokens) - 1.
# It contains the token for the merged wordpiece, if it exists,
# -1 otherwise. For instance, candidate[3] = id(token[3] + token[4]).
# First, split into basic UTF-8 characters (letters).
chars = tf.strings.unicode_split(word, 'UTF-8')
tokens = self._StringToToken(chars)
tokens = tf.where(
tf.equal(tokens, NO_TOKEN),
# Unseen character.
tf.broadcast_to(self.unk_id, tf.shape(tokens)),
tokens)
# Create initial candidate list.
candidates = tf.map_fn(
self._MergeTokens, (tokens[:-1], tokens[1:]), dtype=tokens.dtype)
def _ShouldMerge(unused_tokens, candidates):
"""Merge until not possible, or we abort early according to merge_prob."""
return tf.logical_and(
tf.reduce_any(tf.not_equal(candidates, NO_TOKEN)),
tf.random.uniform([]) < self._merge_prob)
def _MergeOneToken(tokens, i):
return tf.expand_dims(
self._MergeTokens((tokens[i], tokens[i + 1])), axis=-1)
def _MergeCandidates(tokens, candidates):
"""Merge in the reverse binary tree."""
| tensorflow.map_fn | 12,298 |
import tensorflow as tf
input_tensor_dict = {
fields.InputDataFields.image:
tf.placeholder(tf.float32, [None, None, 3]),
fields.InputDataFields.groundtruth_boxes:
tf.placeholder(tf.float32, [None, 4]),
fields.InputDataFields.groundtruth_classes:
tf.placeholder(tf.int32, [None, 3]),
fields.InputDataFields.true_image_shape:
tf.placeholder(tf.int32, [3]),
fields.InputDataFields.original_image_spatial_shape:
tf.placeholder(tf.int32, [2])
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=3,
spatial_image_shape=[5, 6])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.image].shape.as_list(),
| tensorflow.placeholder | 12,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.