seed
stringlengths 25
2.89k
| seed_api
stringlengths 14
102
| index
int64 0
14.8k
|
---|---|---|
import tensorflow as tf
self._train_op = optimizer.apply_gradients(
zip(grads, tvars),
global_step=tf.train.get_or_create_global_step())
self._new_lr = tf.placeholder(
tf.float32, shape=[], name='new_learning_rate')
self._lr_update = tf.assign(self._lr, self._new_lr)
self.saver = tf.train.Saver(tf.global_variables())
def _get_lstm_cell(self, config, is_training):
if config.rnn_mode == BASIC:
| tensorflow.assign | 11,200 |
import tensorflow as tf
# Calculate loss, which includes softmax cross entropy and L2 regularization.
cross_entropy = tf.cond(n_positives > 0., lambda: tf.losses.sparse_softmax_cross_entropy(labels=glabels, logits=cls_pred), lambda: 0.)
#cross_entropy = tf.losses.sparse_softmax_cross_entropy(labels=glabels, logits=cls_pred)
# Create a tensor named cross_entropy for logging purposes.
tf.identity(cross_entropy, name='cross_entropy_loss')
tf.summary.scalar('cross_entropy_loss', cross_entropy)
loc_loss = tf.cond(n_positives > 0., lambda: modified_smooth_l1(location_pred, tf.stop_gradient(gtargets), sigma=1.), lambda: tf.zeros_like(location_pred))
#loc_loss = modified_smooth_l1(location_pred, tf.stop_gradient(gtargets))
loc_loss = tf.reduce_mean(tf.reduce_sum(loc_loss, axis=-1))
loc_loss = tf.identity(loc_loss, name='location_loss')
tf.summary.scalar('location_loss', loc_loss)
tf.losses.add_loss(loc_loss)
# Add weight decay to the loss. We exclude the batch norm variables because
# doing so leads to a small improvement in accuracy.
loss = cross_entropy + loc_loss + params['weight_decay'] * tf.add_n(
[tf.nn.l2_loss(v) for v in tf.trainable_variables()
if 'batch_normalization' not in v.name])
total_loss = tf.identity(loss, name='total_loss')
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_or_create_global_step()
| tensorflow.summary.scalar | 11,201 |
import tensorflow as tf
with tf.Graph().as_default():
config = tf.ConfigProto(inter_op_parallelism_threads=args.num_inter_threads,
intra_op_parallelism_threads=args.num_intra_threads)
with tf.Session(config = config) as sess:
# Read the file containing the pairs used for testing
pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
# Get the paths for the corresponding images
paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs)
image_paths_placeholder = tf.placeholder(tf.string, shape=(None,1), name='image_paths')
labels_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='labels')
batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
control_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='control')
phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
nrof_preprocess_threads = 4
image_size = (args.image_size, args.image_size)
eval_input_queue = data_flow_ops.FIFOQueue(capacity=2000000,
dtypes=[tf.string, tf.int32, tf.int32],
shapes=[(1,), (1,), (1,)],
shared_name=None, name=None)
eval_enqueue_op = eval_input_queue.enqueue_many([image_paths_placeholder, labels_placeholder, control_placeholder], name='eval_enqueue_op')
image_batch, label_batch = facenet.create_input_pipeline(eval_input_queue, image_size, nrof_preprocess_threads, batch_size_placeholder)
# Load the model
| tensorflow.placeholder | 11,202 |
import tensorflow as tf
for var in self._train_summaries: #添加tf.trainable_variables(),显示张量分布监控数据随着迭代轮数的变化趋势
self._add_train_summary(var)
self._summary_op = tf.summary.merge_all() # tf.summary.merge_all()函数来整理所有的日志生成操作
if not testing:
self._summary_op_val = tf.summary.merge(val_summaries)
| tensorflow.summary.merge_all | 11,203 |
import tensorflow as tf
#accuracy1 = tf.reduce_sum(
# tf.nn.in_top_k(tf.cast(tf.Variable(predictions2), tf.float32),
# tf.cast((tf.constant(np_labels), 1), tf.float32)))
accuracy1 = tf.reduce_sum(
input_tensor=tf.cast(tf.nn.in_top_k(predictions=tf.constant(predictions1),
targets=tf.constant(np_labels), k=1), tf.float32))
accuracy5 = tf.reduce_sum(
| tensorflow.constant | 11,204 |
import tensorflow as tf
return two_stream_loss(FLAGS, features, labels, mems, is_training)
def get_classification_loss(
FLAGS, features, n_class, is_training):
"""Loss for downstream classification tasks."""
bsz_per_core = tf.shape(features["input_ids"])[0]
inp = tf.transpose(features["input_ids"], [1, 0])
seg_id = tf.transpose(features["segment_ids"], [1, 0])
inp_mask = tf.transpose(features["input_mask"], [1, 0])
label = tf.reshape(features["label_ids"], [bsz_per_core])
xlnet_config = xlnet.XLNetConfig(json_path=FLAGS.model_config_path)
run_config = xlnet.create_run_config(is_training, True, FLAGS)
xlnet_model = xlnet.XLNetModel(
xlnet_config=xlnet_config,
run_config=run_config,
input_ids=inp,
seg_ids=seg_id,
input_mask=inp_mask)
| tensorflow.reshape | 11,205 |
import tensorflow as tf
:param mask: [Tensor] Binary mask, 3D or 4D, [N, H, W] or [N, H, W, 1], dtype float32.
:param strides: [list] List of 4 int. Convolution strides.
:param padding: [string] Convolution padding method, `VALID` or `SAME`.
"""
assert len(mask.get_shape()) in [3, 4], 'Mask shape must be 3D or 4D.'
if len(mask.get_shape()) == 3:
mask_ = tf.expand_dims(mask, 3)
elif len(mask.get_shape()) == 4:
mask_ = mask
assert mask.get_shape()[-1] == 1, '4D mask last dimension must be 1.'
ksize = [int(ss) for ss in w.get_shape()]
psize = [1, ksize[0], ksize[1], 1]
mask_ = tf.nn.max_pool(mask_, psize, strides, padding)
return tf.nn.conv2d(x, w, strides, padding) * mask_
| tensorflow.nn.conv2d | 11,206 |
import tensorflow as tf
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, logits, probabilities) = create_model(
bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
| tensorflow.train.init_from_checkpoint | 11,207 |
import tensorflow as tf
# Currently we do not support strides > 1 in this matrix multiplication mode. Could be supported
# in the future.
assert_strides = tf.assert_equal(
tf.cast(tf.stack([strides[1], strides[2]]), tf.int64),
tf.constant([1, 1], dtype=tf.int64),
message='Strides > 1 not supported.')
# Convolution when number of indices is larger than zero.
def _conv_nonzero():
# Gather patches.
p = tf.gather_nd(x_, blk_indices_)
p_ = tf.reshape(p, [-1, ksize[0] * ksize[1] * ksize[2]])
# Convolution on patches.
w_ = tf.reshape(w, [ksize[0] * ksize[1] * ksize[2], -1])
q = tf.matmul(p_, w_)
# Center locations.
blk_indices_crop = blk_indices[:, 0, 0, :]
# Project back to an image.
| tensorflow.gather_nd | 11,208 |
import tensorflow as tf
with tf.name_scope('data'):
batch_histories = tf.Variable(data.batch_histories, name='histories',
trainable=False)
batch_actions_template = tf.Variable(data.batch_actions_template, name='actions',
trainable=False)
batch_action_arguments = tf.Variable(data.batch_actions_arguments, name='actions_arguments',
trainable=False)
histories = tf.gather(batch_histories, self.batch_idx)
actions_template = tf.gather(batch_actions_template, self.batch_idx)
actions_arguments = tf.gather(batch_action_arguments, self.batch_idx)
| tensorflow.Variable | 11,209 |
import tensorflow as tf
with self.graph.as_default():
with tf.device('/cpu:0'):
embedding = tf.get_variable(
'embedding', [vocab_size, hidden_size], dtype=tf.float32)
inputs = tf.nn.embedding_lookup(embedding, input_.input_data)
if is_training and config.keep_prob < 1:
inputs = tf.nn.dropout(inputs, config.keep_prob)
output, state = self._build_rnn_graph(inputs, config, is_training)
softmax_w = tf.get_variable(
'softmax_w', [hidden_size, vocab_size], dtype=tf.float32)
softmax_b = tf.get_variable('softmax_b', [vocab_size], dtype=tf.float32)
logits = tf.nn.xw_plus_b(output, softmax_w, softmax_b)
logits = tf.reshape(logits, [self.batch_size, self.num_steps, vocab_size])
loss = tf.contrib.seq2seq.sequence_loss(
logits,
input_.targets,
tf.ones([self.batch_size, self.num_steps], dtype=tf.float32),
average_across_timesteps=False,
average_across_batch=True)
self._cost = tf.reduce_sum(loss)
self._final_state = state
| tensorflow.get_variable | 11,210 |
import tensorflow as tf
matched_gt_boxes)
matched_gt_classes = tf.where(
background_indicator,
tf.zeros_like(matched_gt_classes),
matched_gt_classes)
matched_gt_indices = tf.where(
background_indicator,
-tf.ones_like(matched_gt_indices),
matched_gt_indices)
return (matched_gt_boxes, matched_gt_classes, matched_gt_indices,
positive_matches, negative_matches, ignored_matches)
def get_config(self):
| tensorflow.ones_like | 11,211 |
import tensorflow as tf
#negtive_mask = tf.logical_and(tf.logical_and(tf.logical_not(positive_mask), gscores < params['neg_threshold']), gscores > 0.)
#negtive_mask = tf.logical_and(gscores < params['neg_threshold'], tf.logical_not(positive_mask))
fnegtive_mask = tf.cast(negtive_mask, tf.float32)
n_negtives = tf.reduce_sum(fnegtive_mask)
n_neg_to_select = tf.cast(params['negative_ratio'] * n_positives, tf.int32)
| tensorflow.reduce_sum | 11,212 |
from tensorflow.python.ops import array_ops
thresholds = [(i + 1) * 1.0 / (num_thresholds - 1)
for i in range(num_thresholds-2)]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]
(tp, fn, tn, fp, tp_update_op, fn_update_op, tn_update_op,
fp_update_op) = _tp_fn_tn_fp(predictions, labels, thresholds, weights)
# Add epsilons to avoid dividing by 0.
epsilon = 1.0e-6
assert array_ops.squeeze(fp).get_shape().as_list()[0] == num_thresholds
def compute_auc(tp, fn, tn, fp, name):
"""Computes the roc-auc or pr-auc based on confusion counts."""
recall = math_ops.div(tp + epsilon, tp + fn + epsilon)
if curve == 'ROC':
fp_rate = math_ops.div(fp, fp + tn + epsilon)
x = fp_rate
y = recall
| tensorflow.python.ops.array_ops.squeeze | 11,213 |
import tensorflow as tf
self.color_matrix = tf.convert_to_tensor([[1, 0, 0], [0.494207, 0, 1.24827], [0, 0, 1]])
elif type == 'P':
# protanope
self.color_matrix = tf.convert_to_tensor([[0, 2.02344, -2.52581], [0, 1, 0], [0, 0, 1]])
elif type == 'T':
# tritanope
self.color_matrix = tf.convert_to_tensor([[1, 0, 0], [0, 1, 0], [-0.395913, 0.801109, 0]])
else:
raise("ERROR: invalid type passed into Simulator class (only accepts 'D', 'P', or 'T')")
self.rgb2lms = tf.convert_to_tensor([[17.8824, 43.5161, 4.11935], [3.45565, 27.1554, 3.86714], [0.0299566, 0.184309, 1.46709]])
def simulate_image(self, image):
# passes an image through the color-blindness simulator
inverted_rgb2lms = tf.linalg.inv(self.rgb2lms)
product1 = tf.matmul(inverted_rgb2lms, self.color_matrix)
product2 = tf.matmul(product1, self.rgb2lms)
| tensorflow.convert_to_tensor | 11,214 |
import tensorflow as tf
reconstructor.append(self.call(x))
return tf.stack(reconstructor)
| tensorflow.stack | 11,215 |
import tensorflow as tf
# We have to wrap this code into a function due to the way tf.cond() works. See
# https://stackoverflow.com/questions/37063952/confused-by-the-behavior-of-tf-cond for
# a more detailed discussion.
def perturb_vars(original_scope, perturbed_scope):
all_vars = scope_vars(absolute_scope_name(original_scope))
all_perturbed_vars = scope_vars(absolute_scope_name(perturbed_scope))
assert len(all_vars) == len(all_perturbed_vars)
perturb_ops = []
for var, perturbed_var in zip(all_vars, all_perturbed_vars):
if param_noise_filter_func(perturbed_var):
# Perturb this variable.
op = tf.assign(perturbed_var, var + tf.random_normal(shape=tf.shape(var), mean=0., stddev=param_noise_scale))
else:
# Do not perturb, just assign.
op = tf.assign(perturbed_var, var)
perturb_ops.append(op)
assert len(perturb_ops) == len(all_vars)
return tf.group(*perturb_ops)
# Set up functionality to re-compute `param_noise_scale`. This perturbs yet another copy
# of the network and measures the effect of that perturbation in action space. If the perturbation
# is too big, reduce scale of perturbation, otherwise increase.
q_values_adaptive = q_func(observations_ph.get(), num_actions, scope="adaptive_q_func")
perturb_for_adaption = perturb_vars(original_scope="q_func", perturbed_scope="adaptive_q_func")
kl = tf.reduce_sum(tf.nn.softmax(q_values) * (tf.log(tf.nn.softmax(q_values)) - tf.log(tf.nn.softmax(q_values_adaptive))), axis=-1)
mean_kl = tf.reduce_mean(kl)
def update_scale():
with tf.control_dependencies([perturb_for_adaption]):
| tensorflow.assign | 11,216 |
import tensorflow as tf
return logits
def create_base_discriminator_network(self, X, params):
"""Creates base discriminator network.
Args:
X: tensor, input image to discriminator.
params: dict, user passed parameters.
Returns:
Final logits tensor of discriminator.
"""
print_obj("\ncreate_base_discriminator_network", "X", X)
with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE):
# Only need the first fromRGB conv layer & block for base network.
from_rgb_conv_layer = self.from_rgb_conv_layers[0]
block_layers = self.conv_layer_blocks[0]
# Pass inputs through layer chain.
from_rgb_conv = from_rgb_conv_layer(inputs=X)
print_obj(
"create_base_discriminator_network",
"from_rgb_conv",
from_rgb_conv
)
| tensorflow.variable_scope | 11,217 |
import tensorflow as tf
# max pool
pooled = tf.nn.max_pool(H,
| tensorflow.nn.max_pool | 11,218 |
import tensorflow as tf
table_values = np.arange(len(vocab), dtype=np.int64)
table = tf.lookup.StaticVocabularyTable(
tf.lookup.KeyValueTensorInitializer(vocab, table_values),
num_oov_buckets=1)
def to_ids(example):
sentence = tf.reshape(example['tokens'], shape=[1])
words = tf.strings.split(sentence, sep=' ').values
truncated_words = words[:max_seq_len]
tokens = table.lookup(truncated_words) + 1
tokens = tf.cond(
tf.less(tf.size(tokens), max_seq_len),
lambda: tf.concat([tokens, [eos]], 0), lambda: tokens)
return tf.concat([[bos], tokens], 0)
return to_ids
def batch_and_split(dataset, max_seq_len, batch_size):
return dataset.padded_batch(
batch_size, padded_shapes=[max_seq_len + 1]).map(
| tensorflow.size | 11,219 |
import tensorflow as tf
trainable=False)
with tf.device(worker_device):
with tf.variable_scope("local"):
self.local_network = pi = LSTMPolicy(env.observation_space.shape, env.action_space.n)
pi.global_step = self.global_step
self.ac = tf.placeholder(tf.float32, [None, env.action_space.n], name="ac")
self.adv = tf.placeholder(tf.float32, [None], name="adv")
self.r = tf.placeholder(tf.float32, [None], name="r")
log_prob_tf = tf.nn.log_softmax(pi.logits)
prob_tf = tf.nn.softmax(pi.logits)
# the "policy gradients" loss: its derivative is precisely the policy gradient
# notice that self.ac is a placeholder that is provided externally.
# adv will contain the advantages, as calculated in process_rollout
pi_loss = - tf.reduce_sum(tf.reduce_sum(log_prob_tf * self.ac, [1]) * self.adv)
# loss of value function
vf_loss = 0.5 * tf.reduce_sum(tf.square(pi.vf - self.r))
entropy = - tf.reduce_sum(prob_tf * log_prob_tf)
| tensorflow.nn.log_softmax | 11,220 |
import tensorflow as tf
total_loss = tf.reduce_mean(per_example_loss)
return total_loss, per_example_loss, logits
def get_qa_outputs(FLAGS, features, is_training):
"""Loss for downstream span-extraction QA tasks such as SQuAD."""
inp = tf.transpose(features["input_ids"], [1, 0])
seg_id = tf.transpose(features["segment_ids"], [1, 0])
inp_mask = tf.transpose(features["input_mask"], [1, 0])
cls_index = tf.reshape(features["cls_index"], [-1])
seq_len = tf.shape(inp)[0]
xlnet_config = xlnet.XLNetConfig(json_path=FLAGS.model_config_path)
run_config = xlnet.create_run_config(is_training, True, FLAGS)
xlnet_model = xlnet.XLNetModel(
xlnet_config=xlnet_config,
| tensorflow.transpose | 11,221 |
import tensorflow as tf
# create saver
saver = tf.train.Saver()
| tensorflow.train.Saver | 11,222 |
from tensorflow.python.ops import variable_scope
w_c = None
if options.use_coverage:
with variable_scope.variable_scope("coverage"):
w_c = variable_scope.get_variable("w_c", [options.attention_vec_size])
w_c = tf.expand_dims(tf.expand_dims(w_c, axis=0), axis=0)
| tensorflow.python.ops.variable_scope.get_variable | 11,223 |
import tensorflow as tf
tgt_small = tf.where(geq, tgt2, tgt1)
pred_larg = tf.where(geq, pred1, pred2)
pred_small = tf.where(geq, pred2, pred1)
loss = tf.maximum(0., (tgt_larg - tgt_small) - (pred_larg - pred_small))
if hard_ratio < 1.0:
hard_num = tf.cast(tools.shape(pred1)[0] * hard_ratio, tf.int32)
loss = tf.reshape(loss, [-1])
hard_loss, _ = tf.math.top_k(loss, k=hard_num)
return hard_loss
return loss
def sample_pair(batch):
num_sam = tools.shape(batch)[0]
index = tf.range(num_sam)
tgt1 = tf.slice(batch, [0, 1], [num_sam, 1])
pred1 = tf.slice(batch, [0, 0], [num_sam, 1])
def uniform():
batch2 = tf.gather(batch, tf.random.shuffle(index))
pred2 = tf.slice(batch2, [0, 0], [num_sam, 1])
tgt2 = tf.slice(batch2, [0, 1], [num_sam, 1])
return pred1, pred2, tgt1, tgt2
return uniform
def contra_traj_lossV5(pred, tgt, horizon=12, resample=1, hard_ratio=1.0):
| tensorflow.range | 11,224 |
from tensorflow.python.platform import tf_logging as logging
continue
logging.info("op=%s name=%s.", node.op, node.name)
| tensorflow.python.platform.tf_logging.info | 11,225 |
import tensorflow as tf
self.paddings = [[size,size],[size,size],[0,0]]
elif type=='no_op':
self.augment = self.no_op
def gaussian_kernel(self,size,mean,std):
"""Makes 2D gaussian Kernel for convolution."""
d = tfp.distributions.Normal(mean, std)
vals = d.prob(tf.range(start = -size, limit = size + 1, dtype = tf.float32))
gauss_kernel = tf.einsum('i,j->ij',vals,vals)
return gauss_kernel / tf.reduce_sum(gauss_kernel)
def get_random_patch_size(self):
return np.random.choice([1,2,4,8])
def scramble(self,x):
| tensorflow.range | 11,226 |
import tensorflow as tf
qh_emb = tf.reshape(tf.nn.embedding_lookup(
self.char_mat, self.qh), [N * QL, CL, dc])
ch_emb = tf.nn.dropout(ch_emb, 1.0 - 0.5 * self.dropout)
qh_emb = tf.nn.dropout(qh_emb, 1.0 - 0.5 * self.dropout)
# Bidaf style conv-highway encoder
ch_emb = conv(ch_emb, d,
bias = True, activation = tf.nn.relu, kernel_size = 5, name = "char_conv", reuse = None)
qh_emb = conv(qh_emb, d,
bias = True, activation = tf.nn.relu, kernel_size = 5, name = "char_conv", reuse = True)
ch_emb = tf.reduce_max(ch_emb, axis = 1)
qh_emb = tf.reduce_max(qh_emb, axis = 1)
ch_emb = tf.reshape(ch_emb, [N, PL, ch_emb.shape[-1]])
qh_emb = tf.reshape(qh_emb, [N, QL, ch_emb.shape[-1]])
c_emb = tf.nn.dropout(tf.nn.embedding_lookup(self.word_mat, self.c), 1.0 - self.dropout)
q_emb = tf.nn.dropout(tf.nn.embedding_lookup(self.word_mat, self.q), 1.0 - self.dropout)
c_emb = tf.concat([c_emb, ch_emb], axis=2)
q_emb = tf.concat([q_emb, qh_emb], axis=2)
c_emb = highway(c_emb, size = d, scope = "highway", dropout = self.dropout, reuse = None)
q_emb = highway(q_emb, size = d, scope = "highway", dropout = self.dropout, reuse = True)
with tf.variable_scope("Embedding_Encoder_Layer"):
c = residual_block(c_emb,
num_blocks = 1,
num_conv_layers = 4,
| tensorflow.reshape | 11,227 |
import tensorflow as tf
pad_shape = tf.cond(
tf.greater(t_rank, 1), lambda: tf.concat([pad_d0, t_shape[1:]], 0),
| tensorflow.greater | 11,228 |
import tensorflow as tf
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(
" name = %s, shape = %s%s", var.name, var.shape, init_string
)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
| tensorflow.logging.info | 11,229 |
import tensorflow as tf
labels = tf.reshape(labels, [-1])
one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
| tensorflow.reduce_sum | 11,230 |
import tensorflow as tf
if __name__ == '__main__':
import numpy as np
bboxes = tf.placeholder(tf.float32)
bboxes_val = [[10, 10, 20, 22]]
gt_boxes = tf.placeholder(tf.float32)
gt_boxes_val = [[11, 13, 34, 31]]
| tensorflow.placeholder | 11,231 |
import tensorflow as tf
cutoff_vf_manager = tf.reshape(tf.stop_gradient(self.manager_vf), [-1])
dot = tf.reduce_sum(tf.multiply(self.s_diff, self.g), axis=1)
gcut = tf.stop_gradient(self.g)
mag = tf.norm(self.s_diff, axis=1) * tf.norm(gcut, axis=1) + .0001
dcos = dot / mag
manager_loss = -tf.reduce_sum((self.r - cutoff_vf_manager) * dcos)
cutoff_vf_worker = tf.reshape(tf.stop_gradient(self.worker_vf), [-1])
log_p = tf.reduce_sum(self.log_pi * self.ac, [1])
worker_loss = (self.r + self.alpha * self.ri - cutoff_vf_worker) * log_p
worker_loss = -tf.reduce_sum(worker_loss, axis=0)
Am = self.r - self.manager_vf
manager_vf_loss = .5 * tf.reduce_sum(tf.square(Am))
Aw = (self.r + self.alpha * self.ri) - self.worker_vf
worker_vf_loss = .5 * tf.reduce_sum(tf.square(Aw))
entropy = -tf.reduce_sum(self.pi * self.log_pi)
| tensorflow.reduce_sum | 11,232 |
import tensorflow as tf
estimator=entropy_bottleneck)
status = checkpoint.restore(tf.train.latest_checkpoint(ckpt_dir))
x = tf.convert_to_tensor(x_color, "float32")
x_coori = tf.convert_to_tensor(x_coori, "float32")
def loop_analysis(element):
x = tf.expand_dims(element[0], 0)
x_coori = tf.expand_dims(element[1], 0)
y = analysis_transform(x_coori,x)
return tf.squeeze(y,axis=0)
element = [x,x_coori]
ys = tf.map_fn(loop_analysis, element, dtype=tf.float32, parallel_iterations=1, back_prop=False)
print("Analysis Transform")
def loop_hyper_encoder(y):
y = tf.expand_dims(y, 0)
z = hyper_encoder(y)
return tf.squeeze(z,axis=0)
zs = tf.map_fn(loop_hyper_encoder, ys, dtype=tf.float32, parallel_iterations=1, back_prop=False)
print("Hyper Encoder")
z_hats, _ = entropy_bottleneck(zs, False)
print("Quantize hyperprior")
| tensorflow.map_fn | 11,233 |
import tensorflow as tf
Returns:
A tensor of shape [T, B] that contains the loss per example, per time step.
"""
with tf.name_scope("cross_entropy_sequence_loss"):
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets)
loss_mask = tf.sequence_mask(tf.to_int32(sequence_length), tf.to_int32(tf.shape(targets)[0]))
losses = losses * tf.transpose(tf.to_float(loss_mask), [1, 0])
return losses
def dice_loss(predictions, targets, weights=1., name='dice_loss'):
with tf.name_scope(name):
# predictions = tf.to_float(predictions)
targets = tf.to_float(targets)
intersection = 2 * tf.reduce_sum(predictions * targets) + weights
union = weights + tf.reduce_sum(predictions) + tf.reduce_sum(targets)
loss = -(intersection / (union))
return loss
def precision_recall_auc_loss(labels,
logits,
precision_range=(0.0, 1.0),
num_anchors=20,
| tensorflow.name_scope | 11,234 |
import tensorflow as tf
images=tf.cast(images,tf.float32)/255.0
l1 = tf.matmul(images, self.w1)+self.b1
l1=tf.nn.relu(l1)
out = tf.matmul(l1, self.w2)+self.b2
return out
def valid_inference(self,images):
images=tf.cast(images,tf.float32)/255.0
l1 = tf.matmul(images, self.w1)+self.b1
l1=tf.nn.relu(l1)
out = tf.matmul(l1, self.w2)+self.b2
return out
def softmax_loss(self,predicts,labels):
predicts=tf.nn.softmax(predicts)
labels=tf.one_hot(labels,classnum)
loss=-tf.reduce_sum(labels*tf.log(predicts))
return loss
| tensorflow.nn.relu | 11,235 |
import tensorflow as tf
image: 4D tensor of ``self.input_shape`` in ``self.data_format``
Returns:
Nx#class logits
"""
def optimizer(self):
lr = tf.get_variable('learning_rate', initializer=0.1, trainable=False)
tf.summary.scalar('learning_rate-summary', lr)
return tf.train.MomentumOptimizer(lr, 0.9, use_nesterov=True)
def image_preprocess(self, image):
with tf.name_scope('image_preprocess'):
if image.dtype.base_dtype != tf.float32:
image = tf.cast(image, tf.float32)
mean = [0.485, 0.456, 0.406] # rgb
| tensorflow.summary.scalar | 11,236 |
import tensorflow as tf
self.IRK_beta = weights[-1:,:]
self.IRK_times = tmp[q**2+q:]
# tf placeholders and graph
self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
log_device_placement=True))
self.x0_tf = tf.placeholder(tf.float32, shape=(None, self.x0.shape[1]))
| tensorflow.ConfigProto | 11,237 |
import tensorflow as tf
o_c1 = self.general_conv2d(input_data, self.base_number_of_features, 3, stride = 1, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_conv2d_1')
o_mp1 = tf.layers.max_pooling2d(o_c1, 2, 2, name = name + '_maxpooling_1')
o_c2 = self.general_conv2d(o_mp1, self.base_number_of_features * 2, 3, stride = 1, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_conv2d_2')
o_mp2 = tf.layers.max_pooling2d(o_c2, 2, 2, name = name + '_maxpooling_2')
o_c3 = self.general_conv2d(o_mp2, self.base_number_of_features * 4, 3, stride = 1, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_conv2d_3')
o_mp3 = tf.layers.max_pooling2d(o_c3, 2, 2, name = name + '_maxpooling_3')
o_c4 = self.general_conv2d(o_mp3, self.base_number_of_features * 8, 3, stride = 1, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_conv2d_4')
o_mp4 = tf.layers.max_pooling2d(o_c4, 2, 2, name = name + '_maxpooling_4')
o_c5 = self.general_conv2d(o_mp4, self.base_number_of_features * 16, 3, stride = 1, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_conv2d_5')
# Decoder definition
o_d1 = self.general_deconv2d(o_c5, self.base_number_of_features * 8, 3, stride = 2, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_deconv2d_1')
o_me1 = tf.concat([o_d1, o_c4], 3) # Skip connection
o_d2 = self.general_deconv2d(o_me1, self.base_number_of_features * 4, 3, stride = 2, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_deconv2d_2')
o_me2 = tf.concat([o_d2, o_c3], 3) # Skip connection
o_d3 = self.general_deconv2d(o_me2, self.base_number_of_features * 2, 3, stride = 2, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_deconv2d_3')
o_me3 = tf.concat([o_d3, o_c2], 3) # Skip connection
o_d4 = self.general_deconv2d(o_me3, self.base_number_of_features, 3, stride = 2, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_deconv2d_4')
o_me4 = tf.concat([o_d4, o_c1], 3) # Skip connection
logits = tf.layers.conv2d(o_me4, self.args.num_classes, 1, 1, 'SAME', activation = None)
prediction = tf.nn.softmax(logits, name = name + '_softmax')
return logits, prediction
def general_conv2d(self, input_data, filters = 64, kernel_size = 7, stride = 1, stddev = 0.02, activation_function = "relu", padding = "VALID", do_norm=True, relu_factor = 0, name="conv2d"):
| tensorflow.concat | 11,238 |
import tensorflow as tf
images, labels = batch['image'], batch['label']
num_channels = tf.shape(images)[3]
image_height, image_width = tf.shape(images)[1], tf.shape(images)[2]
| tensorflow.shape | 11,239 |
import tensorflow as tf
embedding = tf.get_variable(
"embedding", [vocab_size, size], dtype=data_type())
inputs = tf.nn.embedding_lookup(embedding, input_.input_data)
if is_training and config.keep_prob < 1:
inputs = tf.nn.dropout(inputs, config.keep_prob)
output, state = self._build_rnn_graph(inputs, config, is_training)
softmax_w = tf.get_variable(
| tensorflow.nn.dropout | 11,240 |
import tensorflow as tf
l2_loss = []
for v in tf.trainable_variables():
if 'batch_normalization' not in v.name and 'rl_controller' not in v.name:
l2_loss.append(tf.nn.l2_loss(v))
l2_loss = FLAGS.dst_weight_decay * tf.add_n(l2_loss)
enable_pretrain = tf.cast(
tf.greater_equal(global_step, FLAGS.first_pretrain_steps), tf.float32)
| tensorflow.add_n | 11,241 |
import tensorflow as tf
if encoder.final_state == 'concat_last': # concats last states of all backward layers (full LSTM states)
encoder_state_ = tf.concat(encoder_states_, axis=1)
elif encoder.final_state == 'average':
mask = tf.sequence_mask(encoder_input_length_, maxlen=tf.shape(encoder_outputs_)[1], dtype=tf.float32)
mask = tf.expand_dims(mask, axis=2)
encoder_state_ = tf.reduce_sum(mask * encoder_outputs_, axis=1) / tf.reduce_sum(mask, axis=1)
elif encoder.final_state == 'average_inputs':
mask = tf.sequence_mask(encoder_input_length_, maxlen=tf.shape(encoder_inputs_)[1], dtype=tf.float32)
mask = tf.expand_dims(mask, axis=2)
| tensorflow.expand_dims | 11,242 |
import tensorflow as tf
scales_to_logits = outputs_to_scales_to_logits[output]
logits = tf.image.resize_bilinear(
scales_to_logits[_MERGED_LOGITS_SCOPE],
tf.shape(images)[1:3],
align_corners=True)
predictions[output] = tf.argmax(logits, 3)
return predictions
| tensorflow.argmax | 11,243 |
import tensorflow as tf
one_hot_spare_rep = one_hot_spare_rep * passage_mask
one_hot_spare_rep = tf.sparse_reduce_sum(one_hot_spare_rep, axis=1) # [batch_size, extended_vsize]
vocab_dist = tf.add(vocab_dist, one_hot_spare_rep)
if self.options.add_first_word_prob_for_phrase:
vocab_dist = tf.nn.softmax(vocab_dist) # normalize
return vocab_dist # [batch_size, extended_vsize]
def linear(args, output_size, bias=True, bias_start=0.0, scope=None):
if args is None or (isinstance(args, (list, tuple)) and not args):
| tensorflow.nn.softmax | 11,244 |
import tensorflow as tf
"""Add prediction layer"""
with tf.variable_scope('softmax'):
W = tf.get_variable('W', [state_size, input_size_y])
b = tf.get_variable('b', [input_size_y], initializer=tf.constant_initializer(0.0))
rnn_outputs = tf.reshape(rnn_outputs, [-1, state_size])
predictions = tf.matmul(rnn_outputs, W) + b
yy = tf.reshape(y, [-1, input_size_y]) #batch_size*num_steps when yo udefine a placeholder in Tensorflow, the shape of the input during the session should be the same as the shape of the plcae holder
"Mean squared error loss"
loss=tf.reduce_mean(tf.square(tf.reshape(predictions,[-1])-tf.reshape(yy,[-1])))
"Adding regularization"
if lambda_l2_reg > 0 :
cell_l2 = tf.reduce_sum([tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables() if not ("noreg" in tf_var.name or "Bias" in tf_var.name)])
Predict_l2 = tf.nn.l2_loss(W) #+ tf.nn.l2_loss(b)
total_loss = tf.reduce_sum(loss + lambda_l2_reg* tf.reduce_sum(cell_l2+Predict_l2) )
else:
total_loss = loss
"Define the train_step"
train_step = tf.train.AdamOptimizer(learning_rate).minimize(total_loss)
return dict(x=x,
y=y,
| tensorflow.nn.l2_loss | 11,245 |
import tensorflow as tf
def testEmbeddingRNNDecoder(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
inp = [tf.constant(0.5, shape=[2, 2])] * 2
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
| tensorflow.constant_initializer | 11,246 |
import tensorflow as tf
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
optimiser = tf.train.AdamOptimizer(decayed_learning_rate, name="Adam").minimize(cross_entropy)
# calculate the prediction and the accuracy
accuracy, acc_op = tf.metrics.accuracy(labels=tf.argmax(y_, axis=1), predictions=tf.argmax(y_conv, axis=1))
loss_summary = tf.summary.scalar('Loss', cross_entropy)
acc_summary = tf.summary.scalar('Accuracy', accuracy)
# summaries for TensorBoard visualisation
validation_summary = tf.summary.merge([img_summary, acc_summary])
training_summary = tf.summary.merge([img_summary, loss_summary])
test_summary = tf.summary.merge([img_summary, acc_summary])
# saver for checkpoints
| tensorflow.summary.scalar | 11,247 |
import tensorflow as tf
aspp_features = split_separable_conv2d(
features,
filters=depth,
rate=rate,
weight_decay=weight_decay,
scope=scope)
else:
aspp_features = slim.conv2d(
features, depth, 3, rate=rate, scope=scope)
branch_logits.append(aspp_features)
# Merge branch logits.
concat_logits = tf.concat(branch_logits, 3)
if model_options.aspp_with_concat_projection:
concat_logits = slim.conv2d(
concat_logits, depth, 1, scope=CONCAT_PROJECTION_SCOPE)
concat_logits = slim.dropout(
concat_logits,
keep_prob=0.9,
is_training=is_training,
scope=CONCAT_PROJECTION_SCOPE + '_dropout')
if model_options.aspp_with_squeeze_and_excitation:
concat_logits *= image_feature
| tensorflow.concat | 11,248 |
import tensorflow as tf
# loss of value function
vf_loss = 0.5 * tf.reduce_sum(tf.square(pi.vf - self.r))
entropy = - tf.reduce_sum(prob_tf * log_prob_tf)
bs = tf.to_float(tf.shape(pi.x)[0])
self.loss = pi_loss + 0.5 * vf_loss - entropy * 0.01
# 20 represents the number of "local steps": the number of timesteps
| tensorflow.shape | 11,249 |
from tensorflow.python.util.deprecation import deprecated
@deprecated("2018-06-30", "TensorLayer relies on TensorFlow to check naming.")
def clear_layers_name():
| tensorflow.python.util.deprecation.deprecated | 11,250 |
import tensorflow as tf
the preference for slonger translations.
Returns:
samples: an integer `Tensor`. Top samples from the beam search
"""
batch_size = common_layers.shape_list(features["inputs"])[0]
def symbols_to_logits_fn(ids):
"""Go from ids to logits."""
ids = tf.expand_dims(tf.expand_dims(ids, axis=2), axis=3)
ids = tf.pad(ids[:, 1:], [[0, 0], [0, 1], [0, 0], [0, 0]])
if "partial_targets" in features:
pt = features["partial_targets"]
pt_length = common_layers.shape_list(pt)[1]
pt = tf.tile(pt, [1, beam_size])
pt = tf.reshape(pt, [batch_size * beam_size, pt_length, 1, 1])
ids = tf.concat([pt, ids], axis=1)
features["targets"] = ids
self._coverage = None
logits, _ = self(features) # pylint: disable=not-callable
# now self._coverage is a coverage tensor for the first datashard.
# it has shape [batch_size] and contains floats between 0 and
# source_length.
if self._problem_hparams:
modality = self._problem_hparams.target_modality
if modality.top_is_pointwise:
return tf.squeeze(logits, axis=[1, 2, 3])
# -1 due to the pad above.
current_output_position = common_layers.shape_list(ids)[1] - 1
| tensorflow.reshape | 11,251 |
import tensorflow as tf
update_second_moment_op = moving_averages.assign_moving_average(
variable=self._moving_second_moment,
value=second_moment,
decay=self._decay_rate,
name="update_moving_second_moment").op
return update_mean_op, update_second_moment_op
def build_no_ops():
return (tf.no_op(), tf.no_op())
# Only make the ops if we know that `is_training=True`, or the value of
# `is_training` is unknown.
is_training_const = utils.constant_value(is_training)
if is_training_const is None or is_training_const:
update_mean_op, update_second_moment_op = utils.smart_cond(
is_training,
build_update_ops,
| tensorflow.no_op | 11,252 |
import tensorflow as tf
result = estimator.predict(input_fn=predict_input_fn)
output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
with tf.gfile.GFile(output_predict_file, "w") as writer:
num_written_lines = 0
tf.logging.info("***** Predict results *****")
| tensorflow.gfile.GFile | 11,253 |
import tensorflow as tf
self.s = tf.layers.dense(inputs=self.z,
units=self.g_dim,
activation=tf.nn.elu)
# Calculate manager output g
x = tf.expand_dims(self.s, [0])
self.manager_lstm = SingleStepLSTM(x,
self.g_dim,
step_size=tf.shape(self.obs)[:1])
g_hat = self.manager_lstm.output
self.g = tf.nn.l2_normalize(g_hat, dim=1)
self.manager_vf = self.build_value(g_hat)
def build_worker(self):
with tf.variable_scope('worker'):
num_acts = self.act_space
# Calculate U
self.worker_lstm = SingleStepLSTM(tf.expand_dims(self.z, [0]),
size=num_acts * self.k,
step_size=tf.shape(self.obs)[:1])
flat_logits = self.worker_lstm.output
self.worker_vf = self.build_value(flat_logits)
U = tf.reshape(flat_logits, [-1, num_acts, self.k])
# Calculate w
cut_g = tf.stop_gradient(self.g)
| tensorflow.variable_scope | 11,254 |
import tensorflow as tf
print()
net = []
with tf.variable_scope(name):
with tf.variable_scope('branch1_1x1'):
if o1s>0:
conv1 = conv(inp, inSize, o1s, 1, 1, 1, 1, 'SAME', 'conv1x1', phase_train=phase_train, use_batch_norm=use_batch_norm, weight_decay=weight_decay)
| tensorflow.variable_scope | 11,255 |
import tensorflow as tf
def calc_prune_ratio(vars_list):
"""Calculate the overall pruning ratio for the given list of variables.
Args:
* vars_list: list of variables
Returns:
* prune_ratio: overall pruning ratio of the given list of variables
"""
nb_params_nnz = tf.add_n([tf.count_nonzero(var) for var in vars_list])
nb_params_all = tf.add_n([tf.size(var) for var in vars_list])
prune_ratio = 1.0 - tf.cast(nb_params_nnz, tf.float32) / tf.cast(nb_params_all, tf.float32)
return prune_ratio
class WeightSparseLearner(AbstractLearner): # pylint: disable=too-many-instance-attributes
"""Weight sparsification learner."""
def __init__(self, sm_writer, model_helper):
"""Constructor function.
Args:
* sm_writer: TensorFlow's summary writer
| tensorflow.cast | 11,256 |
import tensorflow as tf
inputs, labels = train_iterator.get_next()
graph_results = _train_step(inputs, labels, training=True)
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
if "plugin" in args.optimizer:
init_op = tf.group(init_op, emb_opt.initializer)
| tensorflow.local_variables_initializer | 11,257 |
import tensorflow as tf
return h
def minibatch_discrimination(x, n_kernels, dim_per_kernel, name):
with tf.variable_scope(name):
batch_size, nf = x.get_shape().as_list()
h = linear(x, [nf, n_kernels*dim_per_kernel], 'h1')
activation = tf.reshape(h, (batch_size, n_kernels, dim_per_kernel))
big = tf.eye(batch_size)
big = tf.expand_dims(big, 1)
abs_dif = tf.reduce_sum(tf.abs(tf.expand_dims(activation, 3) - tf.expand_dims(tf.transpose(activation, [1, 2, 0]), 0)), 2)
mask = 1. - big
masked = tf.exp(-abs_dif) * mask
def half(tens, second):
| tensorflow.eye | 11,258 |
import tensorflow as tf
with tf.variable_scope("translate") as scope:
trans_h0 = lrelu(linear(tf.concat([srcimg_z, tgtctx_z], 1), featsize, 'trans_h0'))
trans_z = linear(trans_h0, featsize, 'trans_z')
self.translated_z = trans_z
with tf.variable_scope("deconv") as scope:
s_h, s_w = self.output_height, self.output_width
s_h2, s_h4, s_h8, s_h16 = \
int(s_h/2), int(s_h/4), int(s_h/8), int(s_h/16)
s_w2, s_w4, s_w8, s_w16 = \
int(s_w/2), int(s_w/4), int(s_w/8), int(s_w/16)
output_z_ = lrelu(linear(trans_z, self.gf_dim*8*s_h16*s_w16, 'd_h0_lin'))
output_h0 = tf.reshape(output_z_, [-1, s_h16, s_w16, self.gf_dim * 8])
output_h1 = lrelu(deconv2d(tf.concat([output_h0, tgtctx_h3], 3),
[self.batch_size, s_h8, s_w8, self.gf_dim*4], name='d_h1'))
output_h2 = lrelu(deconv2d(tf.concat([output_h1, tgtctx_h2], 3),
[self.batch_size, s_h4, s_w4, self.gf_dim*2], name='d_h2'))
output_h3 = lrelu(deconv2d(tf.concat([output_h2, tgtctx_h1], 3),
[self.batch_size, s_h2, s_w2, self.gf_dim*1], name='d_h3'))
output_h4 = deconv2d(tf.concat([output_h3, tgtctx_h0], 3),
[self.batch_size, s_h, s_w, self.c_dim], name='d_h4')
scope.reuse_variables()
truthoutput_z_ = lrelu(linear(tgtimg_z, self.gf_dim*8*s_h16*s_w16, 'd_h0_lin'))
truthoutput_h0 = tf.reshape(truthoutput_z_, [-1, s_h16, s_w16, self.gf_dim * 8])
truthoutput_h1 = lrelu(deconv2d(tf.concat([truthoutput_h0, tgtctx_h3], 3),
| tensorflow.concat | 11,259 |
import tensorflow as tf
with tf.name_scope('S_'):
S_ = tf.placeholder(tf.float32, shape=[None, STATE_DIM], name='s_')
############################### Actor ####################################
class Actor(object):
def __init__(self, sess, action_dim, action_bound, learning_rate, t_replace_iter):
self.sess = sess
self.a_dim = action_dim
self.action_bound = action_bound
self.lr = learning_rate
self.t_replace_iter = t_replace_iter
self.t_replace_counter = 0
with tf.variable_scope('Actor'):
# input s, output a
self.a = self._build_net(S, scope='eval_net', trainable=True)
# input s_, output a, get a_ for critic
self.a_ = self._build_net(S_, scope='target_net', trainable=False)
self.e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/eval_net')
self.t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/target_net')
def _build_net(self, s, scope, trainable):
with tf.variable_scope(scope):
init_w = tf.random_normal_initializer(0., 0.01)
init_b = tf.constant_initializer(0.01)
| tensorflow.variable_scope | 11,260 |
import tensorflow as tf
box_xy, box_wh, obj, cls = tf.split(logits, (2, 2, 1, num_classes), axis=-1)
box_xy = tf.sigmoid(box_xy)
obj = tf.sigmoid(obj)
cls = tf.sigmoid(cls)
anchors = anchors.astype(np.float32)
| tensorflow.sigmoid | 11,261 |
import tensorflow as tf
self.num_a = num_a
self.lr = lr
self.gamma = gamma
self.replace_target_iter = replace_target_iter
self.memory_size = memory_size
self.batch_size = batch_size
self.epsilon = epsilon
self.epsilon_decay = epsilon_decay
self.epsilon_min = 0.1
self.learn_step_cnt = 0 # total learning step
self.episode_cnt = 0
self.memory = []
self.memory_counter = 0
self._build_net()
t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name + '/target_net')
e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name + '/eval_net')
e_params += tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name + '/mixing_net' + '/eval_hyper')
t_params += tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name + '/mixing_net' + '/target_hyper')
with tf.variable_scope('soft_replacement'):
self.target_replace_op = [tf.assign(t, e) for t, e in zip(t_params, e_params)]
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
train_log_dir = 'logs/' + current_time
self.summary_writer = tf.summary.FileWriter(train_log_dir, self.sess.graph)
| tensorflow.get_collection | 11,262 |
import tensorflow as tf
if max_iterations:
pbar.close()
# List of dicts to dict of lists
metrics = dict(zip(metrics[0], zip(*[m.values() for m in metrics])))
metrics = {m: np.nanmean(metrics[m], axis=0) for m in metrics}
return metrics
def _checkpoint_var_search(self, checkpoint_path):
reader = tf.train.NewCheckpointReader(checkpoint_path)
saved_shapes = reader.get_variable_to_shape_map()
model_names = tf.model_variables() # Used by tf.slim layers
if not len(tf.model_variables()):
model_names = tf.global_variables() # Fallback when slim is not used
model_names = set([v.name.split(':')[0] for v in model_names])
checkpoint_names = set(saved_shapes.keys())
found_names = model_names & checkpoint_names
missing_names = model_names - checkpoint_names
| tensorflow.train.NewCheckpointReader | 11,263 |
import tensorflow as tf
self.D_B_loss_real = binary_cross_entropy_loss(tf.ones_like(self.D_B_real),self.D_B_real)
self.D_B_loss_fake = binary_cross_entropy_loss(tf.zeros_like(self.D_B_fake),self.D_B_fake)
self.D_B_loss = (self.D_B_loss_real + self.D_B_loss_fake) / 2.0
self.D_A_loss_real = binary_cross_entropy_loss(tf.ones_like(self.D_A_real),self.D_A_real)
self.D_A_loss_fake = binary_cross_entropy_loss(tf.zeros_like(self.D_A_fake),self.D_A_fake)
self.D_A_loss = (self.D_A_loss_real + self.D_A_loss_fake) / 2.0
self.discriminator_loss = self.D_B_loss + self.D_A_loss
self.loss_GABA_sum = tf.summary.scalar("g_loss_a2b", self.loss_GABA)
| tensorflow.zeros_like | 11,264 |
import tensorflow as tf
'a',
py_utils.WeightParams(shape=[], init=py_utils.WeightInit.Constant(0)))
var_a = task.theta.a
# Infinite gradient.
var_grads = py_utils.NestedMap(a=(var_a, tf.log(0.)))
has_nan_or_inf, grad_scale, final_var_grads = task.ScaleGradients(var_grads)
with self.session():
tf.global_variables_initializer().run()
self.assertTrue(has_nan_or_inf.eval())
self.assertEqual(0., grad_scale.eval())
# The final gradient must be finite.
self.assertFalse(tf.is_nan(final_var_grads.a[1]).eval())
self.assertTrue(tf.is_finite(final_var_grads.a[1]).eval())
def testScaleGradientsNaN(self):
FLAGS.enable_check_numerics = False
p = self.TestParams()
p.input = base_input_generator.BaseSequenceInputGenerator.Params()
task = p.cls(p)
task.CreateVariable(
'a',
py_utils.WeightParams(shape=[], init=py_utils.WeightInit.Constant(0)))
var_a = task.theta.a
| tensorflow.is_nan | 11,265 |
import tensorflow as tf
# first_token_tensor = tf.squeeze(sequence_output[:, -1:, :], axis=1)
last_token_tensor = tf.squeeze(sequence_output[:, -1:, :], axis=1)
output_layer = tf.layers.dense(
last_token_tensor,
config.hidden_size,
activation=tf.tanh,
kernel_initializer=modeling.create_initializer(config.initializer_range))
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
if task_name != "sts-b":
| tensorflow.truncated_normal_initializer | 11,266 |
import tensorflow as tf
preprocessed_resized_image, true_image_shape = model_preprocess_fn(
tf.expand_dims(tf.cast(image, dtype=tf.float32), axis=0))
if use_bfloat16:
preprocessed_resized_image = tf.cast(
preprocessed_resized_image, tf.bfloat16)
tensor_dict[fields.InputDataFields.image] = tf.squeeze(
preprocessed_resized_image, axis=0)
tensor_dict[fields.InputDataFields.true_image_shape] = tf.squeeze(
true_image_shape, axis=0)
if fields.InputDataFields.groundtruth_instance_masks in tensor_dict:
masks = tensor_dict[fields.InputDataFields.groundtruth_instance_masks]
_, resized_masks, _ = image_resizer_fn(image, masks)
if use_bfloat16:
resized_masks = tf.cast(resized_masks, tf.bfloat16)
| tensorflow.squeeze | 11,267 |
import tensorflow as tf
query = prelu(query)
queries = tf.tile(query, [1, tf.shape(facts)[1]])
queries = tf.reshape(queries, tf.shape(facts))
din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)
| tensorflow.shape | 11,268 |
from tensorflow.python.framework import ops
assert isinstance(x, ops.Tensor)
y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name="y")
return func(x, y, name=name)
ops.Tensor._override_operator("__%s__" % op_name, binary_op_wrapper)
del binary_op_wrapper
def r_binary_op_wrapper(y, x):
| tensorflow.python.framework.ops.Tensor._override_operator | 11,269 |
import tensorflow as tf
#For Imitation Learning Part
# self.bc_loss = 0.5 * tf.reduce_mean(tf.contrib.keras.backend.categorical_crossentropy(self.optimal_actions_onehot,self.policy))
# self.next_loc_loss_il = 0.2 * tf.reduce_sum(tf.sqrt(tf.square(self.next_loc_mean[:-1,:] - self.il_nextloc)))
# self.imitation_loss = self.bc_loss #+ self.next_loc_loss_il
# Get gradients from local network using local losses and
# normalize the gradients using clipping
local_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope+'/qvalues')
self.gradients = tf.gradients(self.loss, local_vars)
self.var_norms = tf.global_norm(local_vars)
grads, self.grad_norms = tf.clip_by_global_norm(self.gradients, GRAD_CLIP)
# Apply local gradients to global network
global_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, GLOBAL_NET_SCOPE+'/qvalues')
self.apply_grads = trainer.apply_gradients(zip(grads, global_vars))
#now the gradients for imitation loss
# self.i_gradients = tf.gradients(self.imitation_loss, local_vars)
# self.i_var_norms = tf.global_norm(local_vars)
# i_grads, self.i_grad_norms = tf.clip_by_global_norm(self.i_gradients, GRAD_CLIP)
# Apply local gradients to global network
| tensorflow.clip_by_global_norm | 11,270 |
from tensorflow.python.ops import state_ops
with ops.op_scope(inputs, name, "AccumulateN") as name:
var = gen_state_ops._temporary_variable(shape=shape, dtype=tensor_dtype)
var_name = var.op.name
var = state_ops.assign(var, array_ops.zeros_like(inputs[0]))
update_ops = []
for input_tensor in inputs:
op = state_ops.assign_add(var, input_tensor, use_locking=True)
update_ops.append(op)
with ops.control_dependencies(update_ops):
return gen_state_ops._destroy_temporary_variable(var,
var_name=var_name,
name=name)
| tensorflow.python.ops.state_ops.assign_add | 11,271 |
import tensorflow as tf
env_floor=0) for i in range(N_WORKER)]
# 觀察者
# workers.append(Worker(envpath='./ObstacleTower/obstacletower.exe',
# wid=N_WORKER + 1,
# retro=False,
# realtime_mode=True,
# env_seed=0,
# env_floor=0))
GLOBAL_UPDATE_COUNTER, GLOBAL_EP = 0, 0
GLOBAL_RUNNING_R = []
COORD = tf.train.Coordinator()
# 宣告共用記憶體
QUEUE = queue.Queue()
threads = []
for worker in workers: # worker threads
t = threading.Thread(target=worker.work, args=())
t.start() # training
threads.append(t)
# 建立模型更新的執行緒
threads.append(threading.Thread(target=GLOBAL_KPRUN.update, ))
threads[-1].start()
COORD.join(threads)
| tensorflow.train.Coordinator | 11,272 |
import tensorflow as tf
reduction_arch = [cell_archs[(4 * i):(4 * i + 4)] for i in range(b, b + b)]
return (normal_arch, reduction_arch)
def _add_aux_head(self, X, in_w, in_h, in_ch, K, is_train):
pool_ksize = 5
pool_stride = 3
conv_ch = 128
global_conv_ch = 768
w = in_w
h = in_h
ch = in_ch
# Pool
with tf.variable_scope('pool'):
X = tf.nn.relu(X)
X = tf.nn.avg_pool(X, ksize=(1, pool_ksize, pool_ksize, 1), strides=(1, pool_stride, pool_stride, 1),
padding='VALID')
w //= pool_stride
h //= pool_stride
# Conv 1x1
with tf.variable_scope('conv_0'):
X = self._do_conv(X, w, h, ch, conv_ch, filter_size=1, no_reg=True, is_train=is_train)
ch = conv_ch
# Global conv
with tf.variable_scope('conv_1'):
X = self._do_conv(X, w, h, ch, global_conv_ch, filter_size=w, no_reg=True, is_train=is_train)
ch = global_conv_ch
| tensorflow.nn.relu | 11,273 |
import tensorflow as tf
weighted_loss = loss_weight * loss
summaries = {
'regularization_loss/KL/PriorMean/Mean':
tf.math.reduce_mean(tf.constant(prior_mean)),
'regularization_loss/KL/PriorVar/Mean':
tf.math.reduce_mean(tf.constant(prior_stddev)**2),
| tensorflow.constant | 11,274 |
import tensorflow as tf
def get_conv_dimension(filter_list):
with tf.Graph().as_default():
with tf.Session() as sess:
"""Model function for CNN."""
features = tf.placeholder(
tf.float32, shape=[None, IMAGE_SIZE * IMAGE_SIZE], name='features')
labels = tf.placeholder(tf.int64, shape=[None], name='labels')
input_layer = tf.reshape(features, [-1, IMAGE_SIZE, IMAGE_SIZE, 1])
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=filter_list[0],
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
conv2 = tf.layers.conv2d(
| tensorflow.layers.conv2d | 11,275 |
import tensorflow as tf
if ltype == 'conv':
self.conv(*args, **kwargs)
elif ltype == 'mpool':
self.mpool(*args, **kwargs)
elif ltype == 'apool':
self.apool(*args, **kwargs)
elif ltype == 'share': # Share matching layer from previous column
self.top_layer = col_layers[c - 1][l]
self.top_size = col_layer_sizes[c - 1][l]
else:
raise KeyError('Invalid layer type for inception module: \'%s\'' %
ltype)
col_layers[c].append(self.top_layer)
col_layer_sizes[c].append(self.top_size)
catdim = 3 if self.data_format == 'NHWC' else 1
self.top_layer = tf.concat([layers[-1] for layers in col_layers], catdim)
self.top_size = sum([sizes[-1] for sizes in col_layer_sizes])
return self.top_layer
def residual(self, nout, net, scale=1.0):
inlayer = self.top_layer
net(self)
self.conv(nout, 1, 1, activation=None)
self.top_layer = tf.nn.relu(inlayer + scale * self.top_layer)
def spatial_mean(self, keep_dims=False):
name = 'spatial_mean' + str(self.counts['spatial_mean'])
self.counts['spatial_mean'] += 1
axes = [1, 2] if self.data_format == 'NHWC' else [2, 3]
self.top_layer = tf.reduce_mean(
| tensorflow.concat | 11,276 |
import tensorflow as tf
self.update_c_op = opt.apply_gradients(zip(self.c_grads, self.vf_params))
self.sess.run(tf.global_variables_initializer())
# Tensorboard
if summary_dir is not None:
self.writer = tf.summary.FileWriter(summary_dir)
tf.summary.scalar('Loss/Policy', loss_pg)
tf.summary.scalar('Loss/Value', loss_vf)
tf.summary.scalar('Loss/Entropy', - 0.01 * tf.reduce_mean(pi.entropy()))
tf.summary.scalar('Var/Policy Mode', tf.reduce_mean(pi.mode()))
tf.summary.scalar('Var/Policy Sigma', tf.reduce_mean(pi.stddev()))
tf.summary.scalar('Var/Value', tf.reduce_mean(self.vf))
self.summarise = tf.summary.merge(tf.get_collection(tf.GraphKeys.SUMMARIES))
| tensorflow.summary.scalar | 11,277 |
import tensorflow as tf
mapping_strings, name=name
)
word_strings = reverse_vocab_tags.lookup(tf.to_int64(word_ids))
return word_strings
def loss_layer(self, preds, ground_true, nwords, crf_params):
with tf.name_scope("CRF_log_likelihood"):
log_likelihood, _ = tf.contrib.crf.crf_log_likelihood(
preds, ground_true, nwords, crf_params
)
loss = tf.reduce_mean(-log_likelihood)
# regularizer = tf.contrib.layers.l2_regularizer(0.001)
# reg = regularizer(embedding_variable)
# loss += reg
return loss
def crf_decode_layer(self, logits, crf_params, nwords):
with tf.name_scope("CRF_decode"):
pred_ids, _ = tf.contrib.crf.crf_decode(logits, crf_params, nwords)
return pred_ids
| tensorflow.reduce_mean | 11,278 |
import tensorflow as tf
for i, xs in enumerate(zip(*xs)):
do_reuse = True if i > 0 else None
with tf.device(assign_to_gpu(i, "/gpu:0")), tf.variable_scope(tf.get_variable_scope(), reuse=do_reuse):
clf_logits, clf_losses, lm_losses = model(*xs, train=True, reuse=do_reuse)
if lm_coef > 0:
train_loss = tf.reduce_mean(clf_losses) + lm_coef*tf.reduce_mean(lm_losses)
else:
train_loss = tf.reduce_mean(clf_losses)
params = find_trainable_variables("model")
grads = tf.gradients(train_loss, params)
grads = list(zip(grads, params))
gpu_grads.append(grads)
gpu_ops.append([clf_logits, clf_losses, lm_losses])
ops = [tf.concat(op, 0) for op in zip(*gpu_ops)]
| tensorflow.reduce_mean | 11,279 |
import tensorflow as tf
axes = [np.arange(-cc, int(ss) - cc).astype(np.int32) for cc, ss in zip(center, shape)]
if len(shape) > 1:
for jj in range(len(shape)):
for ii in range(len(shape) + 1):
if ii != jj:
axes[jj] = np.expand_dims(axes[jj], ii)
for jj in range(len(shape)):
shape_ = [int(ss) for ss in shape] + [1]
shape_[jj] = 1
axes[jj] = np.tile(axes[jj], shape_)
offset = np.concatenate(axes, len(shape))
return tf.constant(offset)
else:
return tf.constant(axes[0])
def _calc_block_strides(bsize, ksize, strides):
"""Calculates strides for blocks.
:param bsize: [list] List of 4 int. Size of blocks, or downsample ratio.
:param ksize: [list] List of 4 int. Sparse convolution kernel size.
:param strides: [list] List of 4 int. Sparse convolution strides.
| tensorflow.constant | 11,280 |
import tensorflow as tf
if w > 1:
right = (w - 1) // 2
left = (w - 1) - right
pad_right = tf.tile(pad, [1, right, 1])
pad_left = tf.tile(pad, [1, left, 1])
inputs_ = tf.concat([pad_left, encoder_inputs_, pad_right], axis=1)
else:
inputs_ = encoder_inputs_
inputs_ = tf.nn.convolution(inputs_, filter=filter_, padding='VALID')
inputs.append(inputs_)
encoder_inputs_ = tf.concat(inputs, axis=2)
# if encoder.convolution_activation.lower() == 'relu':
encoder_inputs_ = tf.nn.relu(encoder_inputs_)
if encoder.maxout_stride:
if encoder.binary:
raise NotImplementedError
| tensorflow.nn.convolution | 11,281 |
import tensorflow as tf
input_var = tf.reshape(input_var,[-1,dims])
def _init():
v_norm = tf.nn.l2_normalize(self.v,axis=0)
t = tf.matmul(input_var,v_norm)
mu,var = tf.nn.moments(t,axes=[0])
std = tf.sqrt(var+self.epsilon)
return [tf.assign(self.g,1/std),tf.assign(self.b,-1.*mu/std)]
require_init = tf.reduce_any(tf.is_nan(self.g))
init_ops = tf.cond(require_init,_init,lambda : [self.g,self.b])
with tf.control_dependencies(init_ops):
w = tf.expand_dims(self.g,axis=0) * tf.nn.l2_normalize(self.v,axis=0)
return tf.matmul(input_var,w)+self.b
def get_variables(self):
#TODO: self.v should be l2-normalized or not? / currently not.
return {'v':self.v,'b':self.b,'g':self.g}
class SymPadConv2d(object): #Resize and Convolution(upsacle by 2)
def __init__(self,name,input_dim,output_dim,
k_h=3,k_w=3,stddev=0.02) :
assert k_h%2==1 and k_w%2==1, 'kernel size should be odd numbers to ensure exact size'
with tf.variable_scope(name) :
self.w = tf.get_variable('w', [k_h, k_w, input_dim, output_dim],
initializer=tf.random_normal_initializer(stddev=stddev))
self.b = tf.get_variable('b',[output_dim], initializer=tf.constant_initializer(0.0))
| tensorflow.matmul | 11,282 |
import tensorflow as tf
accuracy_1 = tf.reduce_mean(tf.cast(tf.equal(
tf.argmax(output_1, axis=-1),
tf.argmax(y_1, axis=-1)), tf.float32), name="accuracy_1")
accuracy_2 = tf.reduce_mean(tf.cast(tf.equal(
tf.argmax(output_2, axis=-1),
tf.argmax(y_2, axis=-1)), tf.float32), name="accuracy_2")
accuracy = tf.divide(accuracy_1 + accuracy_2, 2.0, name="accuracy")
with tf.variable_scope("train"):
global_step = tf.get_variable("global_step", shape=(), dtype=tf.int32, trainable=False)
train_op = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss_total, global_step=global_step)
with tf.variable_scope("summary"):
summary_loss_total = tf.summary.scalar("loss_total", loss_total)
summary_accuracy_test = tf.summary.scalar("accuracy_test", accuracy)
summary_accuracy_train = tf.summary.scalar("accuracy_train", accuracy)
# standardization
train_X_reshaped = train_X.reshape([train_X.shape[0], -1])
train_X_means = np.mean(train_X_reshaped, axis=0, keepdims=True)
| tensorflow.train.AdamOptimizer | 11,283 |
import tensorflow as tf
class PadInputDataToStaticShapesFnTest(test_case.TestCase):
def test_pad_images_boxes_and_classes(self):
input_tensor_dict = {
fields.InputDataFields.image:
tf.placeholder(tf.float32, [None, None, 3]),
fields.InputDataFields.groundtruth_boxes:
tf.placeholder(tf.float32, [None, 4]),
fields.InputDataFields.groundtruth_classes:
tf.placeholder(tf.int32, [None, 3]),
fields.InputDataFields.true_image_shape:
tf.placeholder(tf.int32, [3]),
fields.InputDataFields.original_image_spatial_shape:
tf.placeholder(tf.int32, [2])
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
| tensorflow.placeholder | 11,284 |
import tensorflow as tf
if encoder.mult_attn:
state = dense(state, encoder.attn_size, use_bias=False, name='state')
hidden = dense(hidden, encoder.attn_size, use_bias=False, name='hidden')
return tf.einsum('ijk,ik->ij', hidden, state)
y = dense(state, encoder.attn_size, use_bias=not encoder.layer_norm, name='W_a')
y = tf.expand_dims(y, axis=1)
if encoder.layer_norm:
y = tf.contrib.layers.layer_norm(y, scope='layer_norm_state')
hidden = tf.contrib.layers.layer_norm(hidden, center=False, scope='layer_norm_hidden')
y += dense(hidden, encoder.attn_size, use_bias=False, name='U_a')
if encoder.position_bias and input_length is not None and time is not None:
src_pos = tf.tile(tf.expand_dims(tf.range(time_steps), axis=0), [batch_size, 1])
trg_pos = tf.tile(tf.reshape(time, [1, 1]), [batch_size, time_steps])
src_len = tf.tile(tf.expand_dims(input_length, axis=1), [1, time_steps]) # - 1
pos_feats = tf.to_float(tf.stack([src_pos, trg_pos, src_len], axis=2))
pos_feats = tf.log(1 + pos_feats)
| tensorflow.contrib.layers.layer_norm | 11,285 |
import tensorflow as tf
rgb = tf.image.decode_png(tf.read_file(img_path), channels=3, dtype=tf.uint8)
h = config.model.data.dimensions.h
w = config.model.data.dimensions.w
c = config.model.data.dimensions.c
# rgb.set_shape([h,w,c]) # variable size per example
# crop for lsun 96
rgb = tf.image.random_crop(rgb,size=[h,w,c])
# crop for patch training
crop_h = h//self.crop_factor
crop_w = w//self.crop_factor
rgb = tf.image.random_crop(rgb,size=[crop_h,crop_w,c])
# cast, bit conversion, compress domain, center
rgb = tf.cast(rgb, tf.float32)
if n_bits < 8:
rgb = tf.floor(rgb/(2**(8-n_bits)))
rgb = rgb/(n_bins) - 0.5
return rgb
def post_process(self, rgb, add_dequantization_noise=True):
n_bits = config.model.data.n_bits
n_bins = 2**n_bits
| tensorflow.image.random_crop | 11,286 |
import tensorflow as tf
z_ = lrelu(linear(tf.nn.dropout(z, keep_prob), nf3*s_h3*s_w3, 'd_h0_lin'))
h0 = tf.nn.dropout(tf.reshape(z_, [-1, s_h3, s_w3, nf3]), keep_prob)
import IPython
IPython.embed()
h1 = lrelu(deconv2d(tf.concat([h0, skip_h3], 3),
[self.batch_size, s_h2, s_w2, nf2], name='d_h1', d_h=ns3, d_w=ns3))
h2 = lrelu(deconv2d(tf.concat([h1, skip_h2], 3),
[self.batch_size, s_h1, s_w1, nf1], name='d_h2', d_h=ns2, d_w=ns2))
h3 = lrelu(deconv2d(tf.concat([h2, skip_h1], 3),
[self.batch_size, s_h0, s_w0, nf0], name='d_h3', d_h=ns1, d_w=ns1))
print(h3.get_shape())
h4 = deconv2d(tf.concat([h3, skip_h0], 3),
| tensorflow.concat | 11,287 |
import tensorflow as tf
'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'),
'image/height': tf.FixedLenFeature([1], tf.int64),
'image/width': tf.FixedLenFeature([1], tf.int64),
'image/channels': tf.FixedLenFeature([1], tf.int64),
'image/shape': tf.FixedLenFeature([3], tf.int64),
'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32),
| tensorflow.FixedLenFeature | 11,288 |
import tensorflow as tf
features.append(feature)
return features
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor,
| tensorflow.logging.set_verbosity | 11,289 |
import tensorflow as tf
labeled_classes = tf.cast(sample['groundtruth_valid_classes'], tf.int64)
| tensorflow.cast | 11,290 |
import tensorflow as tf
def neural_net(self, X, weights, biases):
num_layers = len(weights) + 1
H = 2.0*(X - self.lb)/(self.ub - self.lb) - 1.0
for l in range(0,num_layers-2):
W1, W2 = weights[l]
b = biases[l]
H1 = tf.add(tf.matmul(H, W1), b)
H2 = tf.matmul(H, W2)
H = tf.tanh(tf.add(H1 * H2, H1))
W1, W2 = weights[-1]
b = biases[-1]
H1 = tf.add(tf.matmul(H, W1), b)
H2 = tf.matmul(H, W2)
Y = tf.add(H1 * H2, H1)
| tensorflow.matmul | 11,291 |
import tensorflow as tf
self.assertTrue(gfile.Exists(s4))
class SaveRestoreWithVariableNameMap(tf.test.TestCase):
def testNonReshape(self):
save_path = os.path.join(self.get_temp_dir(), "basics")
with self.test_session() as sess:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = tf.Variable(10.0, name="v0")
v1 = tf.Variable(20.0, name="v1")
save = tf.train.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1})
tf.initialize_all_variables().run()
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
# Save the initialized values in the file at "save_path"
# Use a variable name map to set the saved tensor names
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
| tensorflow.Variable | 11,292 |
import tensorflow as tf
def test_prob_and_grad_gives_finite_results_for_common_events(self):
with self.test_session():
mu = tf.Variable(0.0, name="mu")
sigma = tf.Variable(1.0, name="sigma")
qdist = distributions.QuantizedDistribution(
base_dist_cls=distributions.Normal,
mu=mu,
sigma=sigma)
x = tf.ceil(4 * self._rng.rand(100).astype(np.float32) - 2)
tf.initialize_all_variables().run()
proba = qdist.prob(x)
self._assert_all_finite(proba.eval())
grads = tf.gradients(proba, [mu, sigma])
self._assert_all_finite(grads[0].eval())
self._assert_all_finite(grads[1].eval())
def test_lower_cutoff_must_be_below_upper_cutoff_or_we_raise(self):
with self.test_session():
| tensorflow.initialize_all_variables | 11,293 |
import tensorflow as tf
# Categorical Discrimminator Loss
dc_c_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(d_c_real), logits=d_c_real))
dc_c_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(d_c_fake), logits=d_c_fake))
dc_c_loss = dc_c_loss_fake + dc_c_loss_real
# Generator loss
generator_g_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(d_g_fake), logits=d_g_fake))
generator_c_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(d_c_fake), logits=d_c_fake))
generator_loss = generator_c_loss + generator_g_loss
# Supervised Encoder Loss
supervised_encoder_loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_input, logits=encoder_output_label_))
all_variables = tf.trainable_variables()
| tensorflow.ones_like | 11,294 |
import tensorflow as tf
)
assert mode == tf.estimator.ModeKeys.TRAIN
with tf.variable_scope('learning_rate'):
global_step = tf.train.get_global_step()
learning_rate = tf.train.cosine_decay(
params['initial_learning_rate'],
global_step, decay_steps=params['num_steps']
)
tf.summary.scalar('learning_rate', learning_rate)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops), tf.variable_scope('optimizer'):
optimizer = tf.train.AdamOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(total_loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step)
for g, v in grads_and_vars:
tf.summary.histogram(v.name[:-2] + '_hist', v)
tf.summary.histogram(v.name[:-2] + '_grad_hist', g)
with tf.control_dependencies([train_op]), tf.name_scope('ema'):
ema = tf.train.ExponentialMovingAverage(decay=MOVING_AVERAGE_DECAY, num_updates=global_step)
train_op = ema.apply(tf.trainable_variables())
return tf.estimator.EstimatorSpec(mode, loss=total_loss, train_op=train_op)
| tensorflow.train.AdamOptimizer | 11,295 |
import tensorflow as tf
of the bboxes are outside the image.
"""
with tf.name_scope('BoundingBoxTransform/clip_bboxes'):
bboxes = tf.cast(bboxes, dtype=tf.float32)
imshape = tf.cast(imshape, dtype=tf.float32)
x1, y1, x2, y2 = tf.split(bboxes, 4, axis=1)
width = imshape[1]
height = imshape[0]
x1 = tf.maximum(tf.minimum(x1, width - 1.0), 0.0)
x2 = tf.maximum(tf.minimum(x2, width - 1.0), 0.0)
| tensorflow.split | 11,296 |
import tensorflow as tf
from tensorflow.python.client import timeline
from tensorflow.python.layers import convolutional as conv_layers
from tensorflow.python.layers import core as core_layers
from tensorflow.python.layers import pooling as pooling_layers
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import gfile
import benchmark_storage
import cnn_util
import datasets
import model_config
import preprocessing
import variable_mgr
tf.flags.DEFINE_string('model', 'trivial', 'name of the model to run')
# The code will first check if it's running under benchmarking mode
# or evaluation mode, depending on FLAGS.eval:
# Under the evaluation mode, this script will read a saved model,
# and compute the accuracy of the model against a validation dataset.
# Additional ops for accuracy and top_k predictors are only used under this
# mode.
# Under the benchmarking mode, user can specify whether nor not to use
# the forward-only option, which will only compute the loss function.
# forward-only cannot be enabled with eval at the same time.
tf.flags.DEFINE_boolean('eval', False, 'whether use eval or benchmarking')
tf.flags.DEFINE_boolean('forward_only', False, """whether use forward-only or
training for benchmarking""")
| tensorflow.flags.DEFINE_string | 11,297 |
import tensorflow as tf
if reuse:
tf.get_variable_scope().reuse_variables()
else:
assert tf.get_variable_scope().reuse is False
d = tf.contrib.layers.conv2d(layer_input,filters,kernel_size=f_size,stride=stride,padding=padding)
if norm:
d = tf.contrib.layers.batch_norm(d)
d = lrelu(d,alpha=0.2)
return d
#def common_deconv2d(layer_input,skip_input, filters,f_size=4,stride=2,dropout_rate=0,name='common_deconv2d'):
def common_deconv2d(layer_input,filters,f_size=4,stride=2,padding='SAME',dropout_rate=0,name='common_deconv2d'):
"""Layers used during upsampling"""
with tf.variable_scope(name):
| tensorflow.contrib.layers.batch_norm | 11,298 |
import tensorflow.contrib.slim as slim
img_h = img_h_batch[i]
img_w = img_w_batch[i]
inputs_list.append([img, gtboxes_and_label_h, gtboxes_and_label_r, num_objects, img_h, img_w])
tower_grads = []
biases_regularizer = tf.no_regularizer
weights_regularizer = tf.contrib.layers.l2_regularizer(cfgs.WEIGHT_DECAY)
with tf.variable_scope(tf.get_variable_scope()):
for i in range(num_gpu):
with tf.device('/gpu:%d' % i):
with tf.name_scope('tower_%d' % i):
with slim.arg_scope(
[slim.model_variable, slim.variable],
device='/device:CPU:0'):
with slim.arg_scope([slim.conv2d, slim.conv2d_in_plane,
slim.conv2d_transpose, slim.separable_conv2d,
slim.fully_connected],
weights_regularizer=weights_regularizer,
biases_regularizer=biases_regularizer,
biases_initializer=tf.constant_initializer(0.0)):
gtboxes_and_label_h, gtboxes_and_label_r = tf.py_func(self.get_gtboxes_and_label,
inp=[inputs_list[i][1],
inputs_list[i][2],
| tensorflow.contrib.slim.arg_scope | 11,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.