seed
stringlengths 25
1.88k
| seed_api
stringlengths 14
102
| index
int64 0
1.05k
|
---|---|---|
import tensorflow as tf
| tensorflow.GradientTape | 900 |
from tensorflow.python.ops import random_ops
"""
with ops.op_scope([x], name, "dropout") as name:
x = ops.convert_to_tensor(x, name="x")
if isinstance(keep_prob, float) and not 0 < keep_prob <= 1:
raise ValueError("keep_prob must be a scalar tensor or a float in the "
"range (0, 1], got %g" % keep_prob)
keep_prob = ops.convert_to_tensor(
keep_prob, dtype=x.dtype, name="keep_prob")
keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar())
noise_shape = noise_shape or array_ops.shape(x)
# uniform [keep_prob, 1.0 + keep_prob)
random_tensor = keep_prob
random_tensor += random_ops.random_uniform(
noise_shape, seed=seed, dtype=x.dtype)
# 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
binary_tensor = math_ops.floor(random_tensor)
ret = x * math_ops.inv(keep_prob) * binary_tensor
ret.set_shape(x.get_shape())
return ret
def top_k(input, k=1, sorted=True, name=None):
"""Finds values and indices of the `k` largest entries for the last dimension.
If the input is a vector (rank-1), finds the `k` largest entries in the vector
| tensorflow.python.ops.random_ops.random_uniform | 901 |
import tensorflow as tf
else:
raise("ERROR: invalid type passed into Simulator class (only accepts 'D', 'P', or 'T')")
self.rgb2lms = tf.convert_to_tensor([[17.8824, 43.5161, 4.11935], [3.45565, 27.1554, 3.86714], [0.0299566, 0.184309, 1.46709]])
def simulate_image(self, image):
# passes an image through the color-blindness simulator
inverted_rgb2lms = tf.linalg.inv(self.rgb2lms)
product1 = tf.matmul(inverted_rgb2lms, self.color_matrix)
product2 = tf.matmul(product1, self.rgb2lms)
original_image_shape = image.shape
simulated_image = tf.transpose(tf.matmul(product2, tf.reshape(tf.transpose(image, perm=[2, 0, 1]), (image.shape[2], image.shape[0] * image.shape[1]))), perm=[1, 0])
| tensorflow.linalg.inv | 902 |
from tensorflow.contrib.learn.python.learn.estimators import composable_model
model_dir=model_dir, config=config)
num_ps_replicas = config.num_ps_replicas if config else 0
self._linear_model = composable_model.LinearComposableModel(
num_label_columns=target_column.num_label_columns,
optimizer=linear_optimizer,
gradient_clip_norm=gradient_clip_norm,
num_ps_replicas=num_ps_replicas)
self._dnn_model = composable_model.DNNComposableModel(
num_label_columns=target_column.num_label_columns,
hidden_units=dnn_hidden_units,
optimizer=dnn_optimizer,
activation_fn=dnn_activation_fn,
dropout=dnn_dropout,
gradient_clip_norm=gradient_clip_norm,
num_ps_replicas=num_ps_replicas) if dnn_hidden_units else None
self._linear_feature_columns = linear_feature_columns
| tensorflow.contrib.learn.python.learn.estimators.composable_model.DNNComposableModel | 903 |
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
input_fn = test_data.iris_input_logistic_fn
metrics = classifier.fit(input_fn=input_fn, steps=_ITERS).evaluate(
input_fn=input_fn, steps=100)
self._assertSingleClassMetrics(metrics)
def benchmarkMultiClass(self):
iris = base.load_iris()
cont_feature = feature_column.real_valued_column('feature', dimension=4)
bucketized_feature = feature_column.bucketized_column(
cont_feature, test_data.get_quantile_based_buckets(iris.data, 10))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
linear_feature_columns=(bucketized_feature,),
dnn_feature_columns=(cont_feature,),
dnn_hidden_units=(3, 3))
input_fn = test_data.iris_input_multiclass_fn
metrics = classifier.fit(input_fn=input_fn, steps=_ITERS).evaluate(
input_fn=input_fn, steps=100)
self._assertCommonMetrics(metrics)
def benchmarkPartitionedVariables(self):
| tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined.DNNLinearCombinedClassifier | 904 |
from tensorflow.python.ops import array_ops
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
sample_shape, batch_shape, event_shape = self.get_shape(x)
event_shape = distribution_util.pick_vector(
self._event_ndims_is_0, (1,), event_shape)
batch_shape = distribution_util.pick_vector(
self._batch_ndims_is_0, (1,), batch_shape)
new_shape = array_ops.concat(0, ((-1,), batch_shape, event_shape))
x = array_ops.reshape(x, shape=new_shape)
x = distribution_util.rotate_transpose(x, shift=-1)
return x, sample_shape
def undo_make_batch_of_event_sample_matrices(
self, x, sample_shape, name="undo_make_batch_of_event_sample_matrices"):
"""Reshapes/transposes `Distribution` `Tensor` from B_+E_+S_ to S+B+E.
| tensorflow.python.ops.array_ops.concat | 905 |
import tensorflow as tf
with tf.variable_scope(name):
W = get_variable("W", shape=[size, size, in_channels, out_channels], dtype=tf.float32,
initializer=initializer, regularizer=tf.nn.l2_loss)
b = get_variable("b", shape=[1, 1, 1, out_channels], dtype=tf.float32,
initializer=tf.zeros_initializer(),trainable=bias)
if dilation:
assert(strides == [1, 1, 1, 1])
out = tf.add(tf.nn.atrous_conv2d(inp, W, rate=dilation, padding=padding), b, name='convolution')
out.set_shape([batch_size, res1, res2, out_channels])
else:
out = tf.add(tf.nn.conv2d(inp, W, strides=strides, padding=padding), b, name='convolution')
if apply_relu:
out = relu(out, alpha=alpha, name='relu')
| tensorflow.nn.atrous_conv2d | 906 |
import tensorflow as tf
self._moving_variance = tf.get_variable(
"moving_variance",
shape=self._mean_shape,
collections=[tf.GraphKeys.MOVING_AVERAGE_VARIABLES,
tf.GraphKeys.GLOBAL_VARIABLES],
initializer=tf.ones_initializer(),
trainable=False)
def build_batch_stats():
"""Builds the batch statistics calculation ops."""
# We use the moving mean as an estimate of the mean in order to perform
# a more numerically stable calculation of the batch mean.
# Copy for better stability.
shift = tf.add(self._moving_mean, 0)
counts, shifted_sum_x, shifted_sum_x2, _ = tf.nn.sufficient_statistics(
input_batch,
reduction_indices,
keep_dims=True,
shift=shift,
name="batch_norm_ss")
mean, variance = tf.nn.normalize_moments(counts,
shifted_sum_x,
shifted_sum_x2,
shift,
name="normalize_moments")
return mean, variance
| tensorflow.nn.sufficient_statistics | 907 |
import tensorflow as tf
trainable_vars = tf.trainable_variables()
if self.config.clip_weight:
# clip_weight
tvars = tf.trainable_variables()
grads = tf.gradients(self.loss, tvars)
grads, _ = tf.clip_by_global_norm(grads, clip_norm=self.config.max_norm_grad)
grad_var_pairs = zip(grads, tvars)
self.train_op = self.optimizer.apply_gradients(grad_var_pairs, name='apply_grad')
else:
self.train_op = self.optimizer.minimize(self.loss)
| tensorflow.clip_by_global_norm | 908 |
import tensorflow as tf
add_weight_decay(params['weight_decay'])
regularization_loss = tf.losses.get_regularization_loss()
# create localization and classification losses
losses = ssd.loss(labels, params)
tf.losses.add_loss(params['localization_loss_weight'] * losses['localization_loss'])
tf.losses.add_loss(params['classification_loss_weight'] * losses['classification_loss'])
tf.summary.scalar('regularization_loss', regularization_loss)
tf.summary.scalar('localization_loss', losses['localization_loss'])
tf.summary.scalar('classification_loss', losses['classification_loss'])
total_loss = tf.losses.get_total_loss(add_regularization_losses=True)
if mode == tf.estimator.ModeKeys.EVAL:
| tensorflow.summary.scalar | 909 |
import tensorflow as tf
def testEmbeddingAttentionDecoder(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
inp = [tf.constant(0.5, shape=[2, 2])] * 2
cell = tf.nn.rnn_cell.GRUCell(2)
enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size])
for e in enc_outputs])
dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
dec, mem = tf.nn.seq2seq.embedding_attention_decoder(
dec_inp, enc_state, attn_states, cell, num_symbols=4,
embedding_size=2, output_size=3)
sess.run([tf.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 3), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
| tensorflow.nn.seq2seq.embedding_attention_decoder | 910 |
import tensorflow as tf
z = tf.py_func(my_func, [x, y], [tf.float64])
self.assertAllEqual(
z[0].eval(),
my_func([1.0, 2.0], [2.0, 3.0]).astype(np.float64))
# a bit exotic type (complex64)
with self.test_session():
x = tf.constant(1+2j, tf.complex64)
y = tf.constant(3+4j, tf.complex64)
z, = tf.py_func(my_func, [x, y], [tf.complex64])
self.assertAllClose(z.eval(), my_func(1+2j, 3+4j))
# a bit excotic function (rfft)
with self.test_session():
x = tf.constant([1., 2., 3., 4.], tf.float32)
def rfft(x):
return np.fft.rfft(x).astype(np.complex64)
y, = tf.py_func(rfft, [x], [tf.complex64])
| tensorflow.py_func | 911 |
import tensorflow as tf
with tf.name_scope('weight_decay'):
add_weight_decay(params['weight_decay'])
regularization_loss = tf.losses.get_regularization_loss()
| tensorflow.losses.get_regularization_loss | 912 |
from tensorflow.contrib.slim.python.slim import queues
width = 280
with self.cached_session():
test_dataset = _create_tfrecord_dataset(dataset_dir)
provider = dataset_data_provider.DatasetDataProvider(test_dataset)
key, image, label = provider.get(['record_key', 'image', 'label'])
image = _resize_image(image, height, width)
with session.Session('') as sess:
with queues.QueueRunners(sess):
key, image, label = sess.run([key, image, label])
split_key = key.decode('utf-8').split(':')
self.assertEqual(2, len(split_key))
self.assertEqual(test_dataset.data_sources[0], split_key[0])
self.assertTrue(split_key[1].isdigit())
self.assertListEqual([height, width, 3], list(image.shape))
self.assertListEqual([1], list(label.shape))
| tensorflow.contrib.slim.python.slim.queues.QueueRunners | 913 |
import tensorflow as tf
var = tf.concat(tf.unstack(var), axis=0)
var = tf.expand_dims(var, dim=0)
color_s = tf.summary.image(name, var[..., :3], max_outputs=FLAGS.visualiza_max)
var = tf.expand_dims(var[..., 3], dim=3)
bw_s = tf.summary.image('depth_' + name, var, max_outputs=FLAGS.visualiza_max)
return tf.summary.merge([color_s, bw_s])
# TRAINING PROGRESS EVENTS
| tensorflow.summary.merge | 914 |
from tensorflow.python.ops import array_ops
from tensorflow.contrib.slim.python.slim.data import test_utils
from tensorflow.contrib.slim.python.slim.data import tfexample_decoder
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
def _resize_image(image, height, width):
image = array_ops.expand_dims(image, 0)
image = image_ops.resize_bilinear(image, [height, width])
return array_ops.squeeze(image, [0])
def _create_tfrecord_dataset(tmpdir):
if not gfile.Exists(tmpdir):
gfile.MakeDirs(tmpdir)
data_sources = test_utils.create_tfrecord_files(tmpdir, num_files=1)
keys_to_features = {
'image/encoded':
parsing_ops.FixedLenFeature(
shape=(), dtype=dtypes.string, default_value=''),
'image/format':
parsing_ops.FixedLenFeature(
| tensorflow.python.ops.array_ops.squeeze | 915 |
from tensorflow.contrib.rnn.python.ops import core_rnn
]
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = rnn_cell.LSTMCell(
num_units=num_units, initializer=initializer, state_is_tuple=True)
multi_cell = rnn_cell.MultiRNNCell(
[cell() for _ in range(num_layers)])
outputs, final_state = core_rnn.static_rnn(
multi_cell, inputs, dtype=dtypes.float32)
trainable_variables = ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients([outputs, final_state],
trainable_variables)
training_op = control_flow_ops.group(*gradients)
| tensorflow.contrib.rnn.python.ops.core_rnn.static_rnn | 916 |
from tensorflow.python.ops import array_ops
outer = tf.matrix_band_part(outer, 0, self.max_a_len)
self.yp1 = tf.argmax(tf.reduce_max(outer, axis=2), axis=1)
self.yp2 = tf.argmax(tf.reduce_max(outer, axis=1), axis=1)
def _compute_loss(self):
def focal_loss(logits, labels, weights=None, alpha=0.25, gamma=2):
logits = tf.nn.sigmoid(logits)
zeros = array_ops.zeros_like(logits, dtype=logits.dtype)
pos_p_sub = array_ops.where(labels > zeros, labels - logits, zeros)
neg_p_sub = array_ops.where(labels > zeros, zeros, logits)
cross_ent = - alpha * (pos_p_sub ** gamma) * tf.log(tf.clip_by_value(logits, 1e-8, 1.0)) \
- (1 - alpha) * (neg_p_sub ** gamma) * tf.log(tf.clip_by_value(1.0 - logits, 1e-8, 1.0))
return tf.reduce_sum(cross_ent, 1)
start_label = tf.one_hot(self.start_label, tf.shape(self.logits1)[1], axis=1)
end_label = tf.one_hot(self.end_label, tf.shape(self.logits2)[1], axis=1)
| tensorflow.python.ops.array_ops.where | 917 |
import tensorflow as tf
sp_indices = [sp_m.indices for sp_m in sp_matrices]
sp_values = [sp_m.values for sp_m in sp_matrices]
sp_shape = [sp_m.dense_shape for sp_m in sp_matrices]
return self.b_module.bspmm(sp_ids = sp_indices, sp_values = sp_values, sp_shape = sp_shape, rhs = dense_matrices, adjoint_a = adjoint_a, adjoint_b = adjoint_b)
class BatchedSpMDT:
def __init__(self):
self.b_module = tf.load_op_library('./batched.so')
def call(self, sp_matrices, dense_matrices, adjoint_a=False, adjoint_b=False):
sp_indices = [sp_m.indices for sp_m in sp_matrices]
sp_values = [sp_m.values for sp_m in sp_matrices]
sp_shape = [sp_m.dense_shape for sp_m in sp_matrices]
| tensorflow.load_op_library | 918 |
import tensorflow as tf
means_norm_sq, perm=[2, 0, 1]) - 2 * scalar_prod
if self.hparams.soft_em:
nearest_idx = tf.stack(
[
tf.multinomial(
-dist[:, i, :], num_samples=self.hparams.num_samples)
for i in range(self.hparams.num_blocks)
],
axis=1)
nearest_hot = tf.one_hot(nearest_idx, depth=self.hparams.block_v_size)
nearest_hot = tf.reduce_mean(nearest_hot, axis=-2)
else:
if self.hparams.random_top_k > 1:
_, top_k_idx = tf.nn.top_k(-dist, k=self.hparams.random_top_k)
nearest_idx = tf.gather(
top_k_idx,
tf.random_uniform(
[1],
minval=0,
maxval=self.hparams.random_top_k - 1,
dtype=tf.int32),
axis=-1)
else:
if self.hparams.use_scales:
dist /= tf.reshape(self.hparams.scales,
[1, 1, self.hparams.moe_num_experts])
nearest_idx = tf.argmax(-dist, axis=-1)
| tensorflow.nn.top_k | 919 |
from tensorflow.contrib.util import make_tensor_proto
# Set request objects using the tf-serving `CopyFrom` setter method
request.model_spec.name = '0'
request.model_spec.signature_name = 'serving_default'
# This is correct (default constant).
request.inputs['input'].CopyFrom(make_tensor_proto(input_data,
shape=input_data.shape))
# Boiler-Plate
| tensorflow.contrib.util.make_tensor_proto | 920 |
import tensorflow.contrib.layers as layers
out = layers.flatten(out)
with tf.variable_scope("action_value"):
out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)
| tensorflow.contrib.layers.fully_connected | 921 |
import tensorflow as tf
]
else:
return self.create_accumulator()
def extract_output(self, accumulator):
# For each output, cast that output to the specified type. Note there
# will be one output for each input tensor to the analyzer.
return [
sub_accumulator.astype(output_dtype) for sub_accumulator, output_dtype
in zip(accumulator, self._output_dtypes)
]
def output_tensor_infos(self):
return [
analyzer_nodes.TensorInfo(tf.as_dtype(dtype), shape, None)
for dtype, shape in zip(self._output_dtypes, self._output_shapes)
]
def _get_output_shape_from_input(x):
if isinstance(x, tf.SparseTensor):
return x.get_shape().as_list()[1:]
# When reducing over batch dimensions, with known shape, the result will be
# the same shape as the input, but without the batch.
if x.shape.rank is not None:
return x.shape.as_list()[1:]
return (None,)
| tensorflow.as_dtype | 922 |
from tensorflow.python.ops import math_ops
return moving_averages.assign_moving_average(
moving_average_variable, value, decay, zero_debias=False)
# quicker adaptation at the beginning
if global_step is not None:
n = math_ops.cast(global_step, dtypes.float32)
decay = math_ops.minimum(decay, n / (n + 1.))
# update averages
mean = moving_average("mean", log_norm, decay)
sq_mean = moving_average("sq_mean", math_ops.square(log_norm), decay)
variance = sq_mean - math_ops.square(mean)
std = math_ops.sqrt(math_ops.maximum(epsilon, variance))
max_norms = math_ops.exp(mean + std_factor * std)
return max_norms, mean
def adaptive_clipping_fn(std_factor=2.,
decay=0.95,
static_max_norm=None,
global_step=None,
report_summary=False,
epsilon=1e-8,
name=None):
"""Adapt the clipping value using statistics on the norms.
| tensorflow.python.ops.math_ops.maximum | 923 |
from tensorflow.contrib.boosted_trees.proto import learner_pb2
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[core_feature_column.numeric_column("x")],
use_core_libs=True)
regressor.fit(input_fn=_train_input_fn, steps=15)
regressor.evaluate(input_fn=_eval_input_fn, steps=1)
regressor.export(self._export_dir_base)
def testRankingDontThrowExceptionForForEstimator(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
model = estimator.GradientBoostedDecisionTreeRanker(
head=head_fn,
learner_config=learner_config,
num_trees=1,
| tensorflow.contrib.boosted_trees.proto.learner_pb2.LearnerConfig | 924 |
from tensorflow.python.ops import nn
def __init__(self,
alpha,
beta,
validate_args=False,
allow_nan_stats=True,
name="InverseGammaWithSoftplusAlphaBeta"):
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[alpha, beta]) as ns:
super(InverseGammaWithSoftplusAlphaBeta, self).__init__(
alpha=nn.softplus(alpha, name="softplus_alpha"),
beta=nn.softplus(beta, name="softplus_gamma"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=ns)
self._parameters = parameters
| tensorflow.python.ops.nn.softplus | 925 |
import tensorflow as tf
Returns:
(tf.Tensor) A single value tensor containing the loss.
(tf.Tensor) A tensor containing the propensity weights.
"""
loss = None
with tf.name_scope(name, "click_weighted_pairwise_loss",[output]):
sliced_output = tf.unstack(output, axis=1)
sliced_label = tf.unstack(labels, axis=1)
sliced_propensity = tf.unstack(propensity_weights, axis=1)
for i in range(len(sliced_output)):
for j in range(i+1, len(sliced_output)):
| tensorflow.name_scope | 926 |
from tensorflow.python.ops import gen_math_ops
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops._sigmoid(x, name=name)
| tensorflow.python.ops.gen_math_ops._sigmoid | 927 |
from tensorflow.python.ops import array_ops
self._gate_linear = _Linear(
[inputs, state],
2 * self._num_units,
True,
bias_initializer=bias_ones,
kernel_initializer=self._kernel_initializer)
value = math_ops.sigmoid(self._gate_linear([inputs, state]))
r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)
r_state = r * state
if self._candidate_linear is None:
with vs.variable_scope("candidate"):
self._candidate_linear = _Linear(
[inputs, r_state],
self._num_units,
| tensorflow.python.ops.array_ops.split | 928 |
from tensorflow.python.ops import gradients_impl
input_c=input_c,
params=params)
all_grads = gradients_impl.gradients(
[output, output_h, output_c],
| tensorflow.python.ops.gradients_impl.gradients | 929 |
import tensorflow as tf
head_embed_row = tf.expand_dims(head_embed, 1) # embeddings as row vectors
tail_embed_col = tf.expand_dims(tail_embed, 2) # embeddings as column vectors
head_rel_mult = tf.batch_matmul(head_embed_row, rel_embed_square)
# Output needs a squeeze into a 1d vector
| tensorflow.batch_matmul | 930 |
import tensorflow as tf
out_depth = out_size[0]
out_height = out_size[1]
out_width = out_size[2]
zero = tf.zeros([], dtype='int32')
# 0 <= z < depth, 0 <= y < height & 0 <= x < width.
max_z = tf.to_int32(tf.shape(im)[1] - 1)
max_y = tf.to_int32(tf.shape(im)[2] - 1)
max_x = tf.to_int32(tf.shape(im)[3] - 1)
# Converts scale indices from [-1, 1] to [0, width/height/depth].
x = (x + 1.0) * (width_f) / 2.0
y = (y + 1.0) * (height_f) / 2.0
z = (z + 1.0) * (depth_f) / 2.0
x0 = tf.to_int32(tf.floor(x))
x1 = x0 + 1
y0 = tf.to_int32(tf.floor(y))
y1 = y0 + 1
z0 = tf.to_int32(tf.floor(z))
z1 = z0 + 1
x0_clip = tf.clip_by_value(x0, zero, max_x)
x1_clip = tf.clip_by_value(x1, zero, max_x)
y0_clip = tf.clip_by_value(y0, zero, max_y)
y1_clip = tf.clip_by_value(y1, zero, max_y)
z0_clip = tf.clip_by_value(z0, zero, max_z)
z1_clip = tf.clip_by_value(z1, zero, max_z)
dim3 = width
| tensorflow.floor | 931 |
import tensorflow as tf
with tf.contrib.summary.create_file_writer(
logdir=model_dir, filename_suffix=".host_call").as_default():
with tf.contrib.summary.always_record_summaries():
for i, name in enumerate(metric_names):
| tensorflow.contrib.summary.always_record_summaries | 932 |
import tensorflow as tf
_phase_train = _phase.assign(True)
_phase_infer = _phase.assign(False)
# TODO: move to ops
def _rank(x):
return len(x.get_shape())
def _apply_dropout_mask(tensor_shape, keep_prob=1.0, normalize=True):
random_tensor = keep_prob + tf.random_uniform(tensor_shape, dtype=tf.float32)
binary_mask = tf.floor(random_tensor)
if normalize:
binary_mask = tf.reciprocal(keep_prob) * binary_mask
return binary_mask
def _global_keep_prob(keep_prob):
keep_prob = tf.convert_to_tensor(keep_prob, dtype=tf.float32)
keep_prob = tf.cond(_phase, lambda: keep_prob, lambda: keep_prob * 0.0 + 1.0)
return keep_prob
def layer(func):
class Layer(object):
| tensorflow.reciprocal | 933 |
from tensorflow.python.ops import state_ops
predictions_idx=predictions_idx, labels=labels, class_id=class_id,
weights=weights)
batch_total_fn = math_ops.to_double(math_ops.reduce_sum(fn))
var = contrib_variables.local_variable(
array_ops.zeros([], dtype=dtypes.float64), name=scope)
return var, state_ops.assign_add(var, batch_total_fn, name='update')
def streaming_mean_absolute_error(predictions, labels, weights=None,
metrics_collections=None,
updates_collections=None,
| tensorflow.python.ops.state_ops.assign_add | 934 |
import tensorflow as tf
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
# Test that previous-feeding model ignores inputs after the first.
dec_inp2 = [tf.constant(0, tf.int32, shape=[2])] * 3
with tf.variable_scope("other"):
d3, _ = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp2, cell, num_symbols=5, embedding_size=2,
feed_previous=tf.constant(True))
sess.run([tf.global_variables_initializer()])
tf.get_variable_scope().reuse_variables()
d1, _ = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp, cell, num_symbols=5, embedding_size=2,
feed_previous=True)
d2, _ = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp2, cell, num_symbols=5, embedding_size=2,
feed_previous=True)
res1 = sess.run(d1)
res2 = sess.run(d2)
res3 = sess.run(d3)
self.assertAllClose(res1, res2)
self.assertAllClose(res1, res3)
| tensorflow.nn.seq2seq.embedding_tied_rnn_seq2seq | 935 |
from tensorflow.python.ops import gen_state_ops
update_ops.append(op)
with ops.control_dependencies(update_ops):
return gen_state_ops._destroy_temporary_variable(var,
var_name=var_name,
| tensorflow.python.ops.gen_state_ops._destroy_temporary_variable | 936 |
import tensorflow as tf
def make_cell():
cell = self._get_lstm_cell(config, is_training)
if is_training and config.keep_prob < 1:
cell = tf.contrib.rnn.DropoutWrapper(
cell, output_keep_prob=config.keep_prob)
return cell
| tensorflow.contrib.rnn.DropoutWrapper | 937 |
from tensorflow.python.ops import math_ops
Returns:
mean_average_precision: Scalar `float64` `Tensor` with the mean average
precision values.
update: `Operation` that increments variables appropriately, and whose
value matches `metric`.
"""
default_name = _at_k_name('average_precision', k)
with ops.name_scope(name, default_name, (predictions, labels)) as scope:
# Calculate per-example average precision, and apply weights.
average_precision = sparse_average_precision_at_k(
predictions=predictions, labels=labels, k=k)
if weights is not None:
weights = math_ops.to_double(weights)
average_precision = math_ops.mul(average_precision, weights)
# Create accumulation variables and update ops for max average precision and
# total average precision.
with ops.name_scope(None, 'max', (average_precision,)) as max_scope:
# `max` is the max possible precision. Since max for any row is 1.0:
# - For the unweighted case, this is just the number of rows.
# - For the weighted case, it's the sum of the weights broadcast across
# `average_precision` rows.
max_var = contrib_variables.local_variable(
array_ops.zeros([], dtype=dtypes.float64), name=max_scope)
if weights is None:
batch_max = math_ops.to_double(
array_ops.size(average_precision, name='batch_max'))
| tensorflow.python.ops.math_ops.mul | 938 |
import tensorflow as tf
name='embeddings', shape=(vocabulary_size, embedding_size), initializer=E_init, dtype=LayersConfig.tf_dtype, **E_init_args)
embed = tf.nn.embedding_lookup(embeddings, self.inputs)
# Construct the variables for the NCE loss (i.e. negative sampling)
nce_weights = tf.get_variable(
name='nce_weights', shape=(vocabulary_size, embedding_size), initializer=nce_W_init, dtype=LayersConfig.tf_dtype, **nce_W_init_args)
nce_biases = tf.get_variable(name='nce_biases', shape=(vocabulary_size), initializer=nce_b_init, dtype=LayersConfig.tf_dtype, **nce_b_init_args)
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels
# each time we evaluate the loss.
self.nce_cost = tf.reduce_mean(
tf.nn.nce_loss(
weights=nce_weights,
biases=nce_biases,
inputs=embed,
labels=train_labels,
num_sampled=num_sampled,
num_classes=vocabulary_size,
**nce_loss_args))
self.outputs = embed
self.normalized_embeddings = tf.nn.l2_normalize(embeddings, 1)
| tensorflow.nn.nce_loss | 939 |
import tensorflow as tf
'input_node': input_node,
'hidden_layers_node': hidden_layers_node,
'output_node': output_node,
'learning_rate': learning_rate,
'learning_rate_decay': learning_rate_decay,
'activation': activation,
'L1_reg': L1_reg,
'L2_reg': L2_reg,
'optimizer': optimizer,
'dropout': dropout_keep_prob
}
# Set random state
tf.set_random_seed(seed)
# create new Session for the DeepSurv Class
self.sess = tf.Session(graph=G)
# Initialize all global variables
self.sess.run(init_op)
def train(self, num_epoch=5000, iteration=-1,
plot_train_loss=False, plot_train_CI=False):
"""
Training DeepSurv network.
Parameters:
| tensorflow.set_random_seed | 940 |
import tensorflow as tf
use_hvd=FLAGS.use_hvd)
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
eval_input_fn = input_fn_builder(
input_files=validation_input_files,
max_seq_length=FLAGS.max_seq_length,
max_predictions_per_seq=FLAGS.max_predictions_per_seq,
is_training=False,
batch_size=FLAGS.eval_batch_size,
use_hvd=FLAGS.use_hvd)
if FLAGS.auto_recover:
hooks.append(tf.data.experimental.CheckpointInputPipelineHook(estimator))
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps, hooks=hooks)
eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn, steps=FLAGS.max_eval_steps)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
if __name__ == "__main__":
# flags.mark_flag_as_required("input_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
| tensorflow.estimator.TrainSpec | 941 |
from tensorflow.python.framework import function
def testGraphExtension(self):
self._testGraphExtensionSave()
self._testGraphExtensionRestore()
def testStrippedOpListDef(self):
with self.test_session():
# Creates a graph.
v0 = tf.Variable(0.0)
var = tf.Variable(10.0)
tf.add(v0, var)
@function.Defun(x=tf.float32)
def minus_one(x):
return x - 1
minus_one(tf.identity(v0))
save = tf.train.Saver({"v0": v0})
tf.initialize_all_variables()
# Generates MetaGraphDef.
meta_graph_def = save.export_meta_graph()
ops = [o.name for o in meta_graph_def.meta_info_def.stripped_op_list.op]
| tensorflow.python.framework.function.Defun | 942 |
import tensorflow as tf
decode_data = None, capacity = 1024, batch_size = 32, scope = None):
encode = tf.placeholder(tf.int32, shape=[None], name="encode")
decode = tf.placeholder(tf.int32, shape=[decode_max_length + 2], name="decode")
weight = tf.placeholder(tf.float32, shape=[decode_max_length + 1], name="weight")
queue = tf.PaddingFIFOQueue(capacity = capacity,
dtypes = [tf.int32, tf.int32, tf.float32],
shapes = [[None], [decode_max_length + 2], [decode_max_length + 1]],
name = 'FIFOQueue')
enqueue_op = queue.enqueue([encode, decode, weight])
| tensorflow.PaddingFIFOQueue | 943 |
import tensorflow as tf
"""Select a subset of features from the example dict."""
feature_list = feature_list or ['inputs', 'targets']
return {f: example[f] for f in feature_list if f in example}
def _eager_dataset_iterator(dataset):
for item in dataset:
flat = tf.nest.flatten(item)
flat = [el.numpy() for el in flat]
yield tf.nest.pack_sequence_as(item, flat)
def _train_and_eval_dataset_v1(problem_name, data_dir, train_shuffle_files,
eval_shuffle_files):
| tensorflow.nest.flatten | 944 |
import tensorflow as tf
'member/age': tf.io.FixedLenFeature([], tf.int64),
'member/height': tf.io.VarLenFeature(tf.float32),
'member/prefer_prods': tf.io.VarLenFeature(tf.int64)}
features = tf.io.parse_single_example(example_proto, features)
images = tf.image.decode_png(features['member/encoded'], channels=3)
# 注意png原本有4個channel,但執行到下面的處理會出錯,所以前一行先降成3個channel。
images = tf.image.random_brightness(images, 0.1)
images = tf.image.random_saturation(images, 0.7, 1.3)
images = tf.image.random_contrast(images, 0.6, 1.5)
images = tf.image.random_flip_left_right(images)
return features, images
| tensorflow.image.random_saturation | 945 |
import tensorflow as tf
import numpy as np
import tensorflow as tf
class VariationalDense:
"""Variational Dense Layer Class"""
def __init__(self, n_in, n_out, dropout_mask_ph,
model_prob=0.9, model_lam=3e-4, activation=None, name="hidden"):
self.model_prob = model_prob # probability to keep units
self.model_lam = model_lam # l^2 / 2*tau: l=1e-2, tau=[0.1, 0.15, 0.2]
self.dropout_mask_ph = dropout_mask_ph # placeholder: p_s * i_s
self.p_s = tf.shape(self.dropout_mask_ph)[0] # post sample size
self.DM = tf.zeros(shape=[self.p_s, n_in, n_in]) # Dropout masks: p_s * i_s * i_s
self.DM = tf.linalg.set_diag(self.DM, self.dropout_mask_ph)
kernel_initializer = tf.initializers.truncated_normal(mean=0.0, stddev=0.01)
self.model_W = tf.get_variable("{}_W".format(name), initializer=kernel_initializer([n_in, n_out])) # variational parameters
self.model_b = tf.get_variable("{}_b".format(name), initializer=tf.zeros([n_out]))
self.model_DMW = tf.einsum('pij,jk->pik', self.DM, self.model_W) # Masked weight: p_s * i_s * o_s
self.model_tiled_b = tf.tile(tf.reshape(self.model_b, [1, n_out]), [self.p_s, 1])
if activation is None:
self.activation = tf.identity
else:
self.activation = activation
| tensorflow.linalg.set_diag | 946 |
import tensorflow as tf
# Convolution bank: concatenate on the last axis to stack channels from all convolutions
conv_outputs = tf.concat(
[conv1d(inputs, k, 128, tf.nn.relu, is_training, 'conv1d_%d' % k) for k in range(1, K + 1)],
axis=-1
)
# Maxpooling:
maxpool_output = tf.layers.max_pooling1d(
conv_outputs,
pool_size=2,
strides=1,
padding='same')
# Two projection layers:
| tensorflow.layers.max_pooling1d | 947 |
import tensorflow as tf
import tensorflow as tf
from tensorflow import keras as tf_keras
# prevent Keras from using up all gpu memory
if tf.executing_eagerly():
gpus = tf.config.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
else:
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.5
set_session(tf.Session(config=config))
| tensorflow.config.experimental.set_memory_growth | 948 |
import tensorflow as tf
if not inverse:
z = tf.math.exp(log_lambdas)*x
ldj = tf.math.reduce_sum(log_lambdas, axis=[1,2,3])
else:
z = x*tf.math.exp(-log_lambdas)
ldj = -tf.math.reduce_sum(log_lambdas, axis=[1,2,3])
return z, ldj
class Exponentiate(Parameterize):
"""
| tensorflow.math.reduce_sum | 949 |
import tensorflow as tf
n_row,n_col,n_channel = x.shape
n_patch = n_row*n_col // (self.size**2)
patches = tf.image.extract_patches(tf.expand_dims(x,0),sizes=[1,self.size,self.size,1],strides=[1,self.size,self.size,1],rates=[1, 1, 1, 1],padding='VALID')
patches = tf.reshape(patches,[n_patch,self.size,self.size,n_channel])
patches = tf.random.shuffle(patches)
# rand_idx = tf.reshape(tf.random.shuffle(tf.range(0,n_patch)),[n_patch])
# patches = tf.gather(patches, rand_idx, axis=0)
rows = tf.split(patches,n_col//self.size,axis=0)
rows = [tf.concat(tf.unstack(x),axis=1) for x in rows]
x_aug = tf.concat(rows,axis=0)
x_aug = tf.convert_to_tensor(x_aug)
return tf.concat([x, x_aug],axis=2)
def mix_scramble(self,x):
# assume square patch
| tensorflow.unstack | 950 |
import tensorflow as tf
resized = tf.nn.conv3d_transpose(
value=x,
filter=kernel,
output_shape=y_size,
strides=[1] + strides + [1],
padding=self.padding,
name='resize_x_to_y')
resized = tf.nn.bias_add(
resized,
bias)
resized = self.ff_nl(resized)
return resized
elif mode == 'replicate_n_transpose':
resized = tf.image.resize_images(
x,
y_size[:-1],
kernel,
align_corners=False)
resized = tf.nn.conv3d_transpose(
value=resized,
filter=kernel,
output_shape=y_size,
strides=[1, 1, 1, 1, 1],
padding='SAME',
name='resize_x_to_y')
resized = tf.nn.bias_add(
| tensorflow.image.resize_images | 951 |
import tensorflow as tf
# Lstm Units.
self.num_units = 16
def buildLstmLayer(self):
return tf.keras.layers.StackedRNNCells([
tf.lite.experimental.nn.TFLiteLSTMCell(
self.num_units, use_peepholes=True, forget_bias=1.0, name="rnn1"),
tf.lite.experimental.nn.TFLiteLSTMCell(
self.num_units, num_proj=8, forget_bias=1.0, name="rnn2"),
tf.lite.experimental.nn.TFLiteLSTMCell(
self.num_units // 2,
use_peepholes=True,
num_proj=8,
forget_bias=0,
name="rnn3"),
tf.lite.experimental.nn.TFLiteLSTMCell(
self.num_units, forget_bias=1.0, name="rnn4")
])
| tensorflow.lite.experimental.nn.TFLiteLSTMCell | 952 |
from tensorflow.python.ops import array_ops
with ops.name_scope(name, default_name, (predictions_idx, labels)) as scope:
fn = _sparse_false_negative_at_k(
predictions_idx=predictions_idx, labels=labels, class_id=class_id,
weights=weights)
batch_total_fn = math_ops.to_double(math_ops.reduce_sum(fn))
var = contrib_variables.local_variable(
array_ops.zeros([], dtype=dtypes.float64), name=scope)
return var, state_ops.assign_add(var, batch_total_fn, name='update')
def streaming_mean_absolute_error(predictions, labels, weights=None,
metrics_collections=None,
updates_collections=None,
| tensorflow.python.ops.array_ops.zeros | 953 |
import tensorflow as tf
def Decode(self, ids):
txt = tf.strings.reduce_join(self._TokenToString(ids))
txt = tf.strings.regex_replace(txt, BOW_STR, ' ')
# Note that this strips spaces from the end of the input as well.
# We assume no inputs rely on the existence of trailing whitespace.
| tensorflow.strings.regex_replace | 954 |
import tensorflow as tf
#########################
print("Hello yes I am in build_act without noise")
print(f"Obs space: {ob_space}")
print(f"policy.obs_ph: {policy.obs_ph}")
print(f"policy.processed_obs: {policy.processed_obs}")
print(f"Obs_phs space: {obs_phs}")
#assert 5 == 1
#######################
for var in tf.all_variables():
print(var)
batch_size = tf.shape(policy.obs_ph)[0]
n_actions = ac_space.nvec if isinstance(ac_space, MultiDiscrete) else ac_space.n
random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=n_actions, dtype=tf.int64)
chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
| tensorflow.all_variables | 955 |
from tensorflow.python.ops import control_flow_ops
old_value = array.value()
assign_op = state_ops.assign(array, new_value, validate_shape=False)
with ops.control_dependencies([assign_op]):
copy_op = array[:size].assign(old_value[:size])
# return value needs to be the same dtype as no_op() for cond
with ops.control_dependencies([copy_op]):
return control_flow_ops.no_op()
new_size = size + batch_size
array_size = array_ops.shape_internal(array, optimize=False)[0]
maybe_reallocate_op = control_flow_ops.cond(
new_size > array_size, reallocate, control_flow_ops.no_op)
| tensorflow.python.ops.control_flow_ops.no_op | 956 |
import tensorflow as tf
y, = tf.py_func(bad1, [], [tf.string])
z, = tf.py_func(bad2, [], [tf.float64])
with self.assertRaisesRegexp(errors.UnimplementedError,
"Unsupported numpy type"):
y.eval()
with self.assertRaisesRegexp(errors.UnimplementedError,
"Unsupported object type"):
z.eval()
if __name__ == "__main__":
tf.test.main()
| tensorflow.test.main | 957 |
import tensorflow as tf
tf.logging.info('Warm-starting tensors: %s', sorted(var_names))
vars_to_warm_start = var_names
warm_start_settings = tf.estimator.WarmStartSettings(
ckpt_to_initialize_from=checkpoint_path,
vars_to_warm_start=vars_to_warm_start)
| tensorflow.estimator.WarmStartSettings | 958 |
import tensorflow as tf
def maybe_avg(v):
if ema is not None and not init:
v = tf.cond(training, lambda: v, lambda: ema.average(v))
return v
if init:
x = tf.nn.conv2d(x, tf.nn.l2_normalize(V.initialized_value(), [0, 1, 2]), [1] + list(stride) + [1], pad)
init_scale=.01
m_init, v_init = tf.nn.moments(x, [0,1,2])
scale_init = init_scale / tf.sqrt(v_init + 1e-10)
with tf.control_dependencies([g.assign(g * scale_init), b.assign_add(-m_init * scale_init)]):
x = tf.reshape(scale_init, [1, 1, 1, num_filters]) * (x - tf.reshape(m_init, [1, 1, 1, num_filters]))
else:
V = maybe_avg(V)
g = maybe_avg(g)
b = maybe_avg(b)
# use weight normalization (Salimans & Kingma, 2016)
W = tf.reshape(g, [1, 1, 1, num_filters]) * tf.nn.l2_normalize(V, [0, 1, 2])
| tensorflow.sqrt | 959 |
import tensorflow.contrib.eager as tfe
def tearDown(self):
shutil.rmtree(self._tmp_logdir)
super(LinearRegressionTest, self).tearDown()
def testSyntheticDataset(self):
true_w = tf.random_uniform([3, 1])
true_b = [1.0]
batch_size = 10
num_batches = 2
noise_level = 0.
dataset = linear_regression.synthetic_dataset(true_w, true_b, noise_level,
batch_size, num_batches)
it = tfe.Iterator(dataset)
for _ in range(2):
(xs, ys) = it.next()
self.assertEqual((batch_size, 3), xs.shape)
self.assertEqual((batch_size, 1), ys.shape)
self.assertEqual(tf.float32, xs.dtype)
self.assertEqual(tf.float32, ys.dtype)
with self.assertRaises(StopIteration):
it.next()
def testLinearRegression(self):
true_w = [[1.0], [-0.5], [2.0]]
true_b = [1.0]
| tensorflow.contrib.eager.Iterator | 960 |
import tensorflow as tf
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state('./model_pretrain')
if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
print("loading checkpoint...")
| tensorflow.train.checkpoint_exists | 961 |
import tensorflow as tf
'AB4DEF.GH', 'ABDEF.GH', 'XYZ']
files = [tempfile.NamedTemporaryFile(prefix=c) for c in cases]
with self.test_session():
# Test exact match without wildcards.
for f in files:
self.assertEqual(tf.matching_files(f.name).eval(),
tf.compat.as_bytes(f.name))
# We will look for files matching "ABxDEF.GH*" where "x" is some wildcard.
pos = files[0].name.find(cases[0])
pattern = files[0].name[:pos] + 'AB%sDEF.GH*'
self.assertEqual(set(tf.matching_files(pattern % 'z').eval()),
self._subset(files, [1]))
self.assertEqual(set(tf.matching_files(pattern % '?').eval()),
self._subset(files, [0, 1, 3, 4]))
self.assertEqual(set(tf.matching_files(pattern % '*').eval()),
self._subset(files, [0, 1, 2, 3, 4, 5]))
self.assertEqual(set(tf.matching_files(pattern % '[cxz]').eval()),
self._subset(files, [0, 1]))
self.assertEqual(set(tf.matching_files(pattern % '[0-9]').eval()),
self._subset(files, [3, 4]))
if __name__ == '__main__':
| tensorflow.matching_files | 962 |
from tensorflow.python.ops import gen_math_ops
"""
return gen_math_ops._range(start, limit, delta, name=name)
| tensorflow.python.ops.gen_math_ops._range | 963 |
import tensorflow as tf
rightmost_transposed_ndims)
msg = '`rightmost_transposed_ndims` must be non-negative.'
if rightmost_transposed_ndims_ is not None:
if rightmost_transposed_ndims_ < 0:
raise ValueError(msg[:-1] + ', saw: {}.'.format(
rightmost_transposed_ndims_))
elif validate_args:
assertions += [
tf.compat.v1.assert_non_negative(
rightmost_transposed_ndims, message=msg)
]
return assertions
def _maybe_validate_perm(perm, validate_args, name=None):
| tensorflow.compat.v1.assert_non_negative | 964 |
from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten
# Block 1
conv1a = Conv2D(padding="same", filters=RNN_SIZE//8, kernel_size=[8, 8], strides=4, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(self.inputs)
conv1b = Conv2D(padding="same", filters=RNN_SIZE//8, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv1a)
conv1c = Conv2D(padding="same", filters=RNN_SIZE//8, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv1b)
pool1 = MaxPool2D(pool_size=[2,2])(conv1c)
# Block 2
conv2a = Conv2D(padding="same", filters=RNN_SIZE//4, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(pool1)
| tensorflow.keras.layers.MaxPool2D | 965 |
import tensorflow as tf
test_inputs: The test inputs for tflite.
input_tensor: The input tensor of tensorflow graph.
output_tensor: The output tensor of tensorflow graph.
use_mlir_converter: Whether or not to use MLIRConverter to convert the
model.
Returns:
The tflite inference result.
"""
converter = tf.lite.TFLiteConverter.from_session(sess, [input_tensor],
[output_tensor])
tflite = converter.convert()
converter.experimental_enable_mlir_converter = use_mlir_converter
interpreter = tf.lite.Interpreter(model_content=tflite)
try:
interpreter.allocate_tensors()
| tensorflow.lite.TFLiteConverter.from_session | 966 |
import tensorflow as tf
expand_W = tf.get_variable("W_%d" % i, [current_size, output_sizes[i]])
expand_b = tf.get_variable("b_%d" % i, [output_sizes[i]])
output_data = tf.nn.bias_add(tf.matmul(output_data, expand_W), expand_b)
output_data = tf.nn.elu(output_data)
current_size = output_sizes[i]
#expand_W = tf.get_variable("final_W", [current_size, 1])
| tensorflow.nn.elu | 967 |
import tensorflow as tf
def _get_next_checkpoint():
return tf.contrib.training.checkpoints_iterator(
| tensorflow.contrib.training.checkpoints_iterator | 968 |
import tensorflow as tf
net_1 = convLinear(inpOp, nIn, nOut, kH, kW, dH, dW, padType, name+'_1', phase_train, use_batch_norm, weight_decay)
net_2 = convLinear(inpOp, nIn, nOut, kH, kW, dH, dW, padType, name+'_2', phase_train, use_batch_norm, weight_decay)
out = tf.maximum(net_1, net_2)
return out
def affine(inpOp, nIn, nOut, name, weight_decay=0.0):
with tf.variable_scope(name):
l2_regularizer = lambda t: l2_loss(t, weight=weight_decay)
weights = tf.get_variable("weights", [nIn, nOut],
initializer=tf.truncated_normal_initializer(stddev=1e-1),
regularizer=l2_regularizer, dtype=inpOp.dtype)
biases = tf.get_variable("biases", [nOut], initializer=tf.constant_initializer(), dtype=inpOp.dtype)
affine1 = tf.nn.relu_layer(inpOp, weights, biases)
return affine1
def l2_loss(tensor, weight=1.0, scope=None):
"""Define a L2Loss, useful for regularize, i.e. weight decay.
Args:
tensor: tensor to regularize.
weight: an optional weight to modulate the loss.
scope: Optional scope for op_scope.
Returns:
the L2 loss op.
"""
with tf.name_scope(scope):
| tensorflow.nn.relu_layer | 969 |
import tensorflow as tf
generator_inputs = features
real_data = labels
gan_model = tf.contrib.gan.gan_model(generator_fn, discriminator_fn, real_data, generator_inputs)
predictions = gan_model.generated_data
| tensorflow.contrib.gan.gan_model | 970 |
import tensorflow as tf
if isinstance(shape_a[0], int) and isinstance(shape_b[0], int):
if shape_a[0] != shape_b[0]:
raise ValueError('Unequal first dimension {}, {}'.format(
shape_a[0], shape_b[0]))
else: return tf.no_op()
else:
return tf.assert_equal(shape_a[0], shape_b[0])
| tensorflow.no_op | 971 |
import tensorflow as tf
def good():
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
crop_window = tf.stack([offset_y, offset_x, target_height, target_width])
image = tf.image.decode_and_crop_jpeg(
byte, crop_window, channels=3, **JPEG_OPT)
image = uint8_resize_bicubic(image, [224, 224])
return image
def bad():
| tensorflow.image.decode_and_crop_jpeg | 972 |
import tensorflow as tf
# fixed folder
saved_model_dir = "tf_cnn_model/1/"
target_dir = "tflite_cnn_model"
def convert_tflite():
if not os.path.exists(target_dir):
os.makedirs(target_dir)
converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)
#converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_LATENCY]
tflite_model = converter.convert()
with open(f"{target_dir}/tflite_model.tflite", "wb") as f:
f.write(tflite_model)
def validation():
| tensorflow.lite.TFLiteConverter.from_saved_model | 973 |
import tensorflow as tf
tf.TensorShape(spec['shape']))
# Used for input shapes of the prediction network
if self.data_shape is None:
self.data_shape = output_shapes
# Handle for the feedable iterator
self.handle = tf.placeholder(tf.string, shape=[])
iterator = tf.data.Iterator.from_string_handle(
self.handle, output_types, output_shapes)
data = iterator.get_next()
# Build the actual training and evaluation models
self._train_graph(data)
self._eval_graph(data)
self.summaries = tf.summary.merge_all()
| tensorflow.data.Iterator.from_string_handle | 974 |
from tensorflow.python.ops import math_ops
iou = math_ops.div(cm_diag, denominator)
return math_ops.reduce_mean(iou, name=name)
| tensorflow.python.ops.math_ops.reduce_mean | 975 |
import tensorflow as tf
tf.get_variable_scope().reuse_variables()
else:
assert tf.get_variable_scope().reuse is False
d = tf.contrib.layers.conv2d(layer_input,filters,kernel_size=f_size,stride=2, padding='SAME')
if norm:
d = tf.contrib.layers.batch_norm(d)
d = lrelu(d,alpha=0.2)
| tensorflow.contrib.layers.conv2d | 976 |
import tensorflow as tf
def testAddCollectionDef(self):
test_dir = self._TestDir("good_collection")
filename = os.path.join(test_dir, "metafile")
with self.test_session():
# Creates a graph.
v0 = tf.Variable(10.0, name="v0")
var = tf.Variable(tf.constant(0, dtype=tf.int64))
count_up_to = var.count_up_to(3)
input_queue = tf.FIFOQueue(30, tf.float32, shared_name="collection_queue")
qr = tf.train.QueueRunner(input_queue, [count_up_to])
tf.initialize_all_variables()
# Creates a saver.
save = tf.train.Saver({"v0": v0})
# Adds a set of collections.
tf.add_to_collection("int_collection", 3)
tf.add_to_collection("float_collection", 3.5)
tf.add_to_collection("string_collection", "hello")
tf.add_to_collection("variable_collection", v0)
| tensorflow.train.QueueRunner | 977 |
from tensorflow.python.training import adagrad
linear_feature_columns=(bucketized_feature,),
linear_optimizer=ftrl.FtrlOptimizer(learning_rate=0.1),
dnn_feature_columns=(cont_feature,),
dnn_hidden_units=(3, 3),
dnn_optimizer=adagrad.AdagradOptimizer(learning_rate=0.1))
input_fn = test_data.iris_input_logistic_fn
metrics = classifier.fit(input_fn=input_fn, steps=_ITERS).evaluate(
| tensorflow.python.training.adagrad.AdagradOptimizer | 978 |
import tensorflow as tf
with tf.name_scope("CRF_log_likelihood"):
log_likelihood, _ = tf.contrib.crf.crf_log_likelihood(
| tensorflow.contrib.crf.crf_log_likelihood | 979 |
from tensorflow.contrib import metrics as contrib_metrics
"eval_accuracy": accuracy,
"eval_loss": loss,
}
elif task_name == "sts-b":
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
"""Compute Pearson correlations for STS-B."""
# Display labels and predictions
concat1 = contrib_metrics.streaming_concat(logits)
concat2 = contrib_metrics.streaming_concat(label_ids)
# Compute Pearson correlation
pearson = contrib_metrics.streaming_pearson_correlation(
logits, label_ids, weights=is_real_example)
# Compute MSE
# mse = tf.metrics.mean(per_example_loss)
mse = tf.metrics.mean_squared_error(
label_ids, logits, weights=is_real_example)
loss = tf.metrics.mean(
values=per_example_loss,
weights=is_real_example)
| tensorflow.contrib.metrics.streaming_pearson_correlation | 980 |
import tensorflow as tf
print('\ninverse(D)=')
print(sess.run(tf.matrix_inverse(D)))
print('\ndeterminant(D)={:.1f}'.format(sess.run(tf.matrix_determinant(D))))
print('\ncholesky(D):')
print(sess.run(tf.cholesky(identity_matrix)))
print('\nselfAdjointEig(D):')
print(sess.run(tf.self_adjoint_eig(D)))
print(sess.run(tf.div(13, 4)))
print(sess.run(tf.truediv(13, 4)))
print(sess.run(tf.floordiv(13, 4)))
print(sess.run(tf.mod(13.2, 4)))
print(sess.run(tf.cross([1, 0, 0], [0, 1, 0])))
print(sess.run(tf.square([1, 2, 3])))
| tensorflow.self_adjoint_eig | 981 |
from tensorflow.contrib import layers
key=lambda x: x.key) if self._linear_feature_columns else None
def _get_dnn_feature_columns(self):
return sorted(set(
self._dnn_feature_columns)) if self._dnn_feature_columns else None
def _dnn_logits(self, features):
net = layers.input_from_feature_columns(
features,
self._get_dnn_feature_columns(),
weight_collections=[self._dnn_weight_collection])
for layer_id, num_hidden_units in enumerate(self._dnn_hidden_units):
net = layers.legacy_fully_connected(
net,
num_hidden_units,
activation_fn=self._dnn_activation_fn,
weight_collections=[self._dnn_weight_collection],
bias_collections=[self._dnn_weight_collection],
name="hiddenlayer_%d" % layer_id)
self._add_hidden_layer_summary(net, "hiddenlayer_%d" % layer_id)
logit = layers.legacy_fully_connected(
net,
self._num_label_columns(),
weight_collections=[self._dnn_weight_collection],
| tensorflow.contrib.layers.legacy_fully_connected | 982 |
import tensorflow as tf
message = annotations_pb2.BucketBoundaries()
annotation.Unpack(message)
self.assertAllClose(list(message.boundaries), [1])
def test_infer_feature_schema_with_ragged_tensor(self):
with tf.compat.v1.Graph().as_default() as graph:
outputs = {
'foo': tf.RaggedTensor.from_row_splits(
values=tf.constant([3, 1, 4, 1, 5, 9, 2, 6], tf.int64),
row_splits=[0, 4, 4, 7, 8, 8]),
| tensorflow.compat.v1.Graph | 983 |
import tensorflow as tf
loss = None
train_op = None
eval_metric_ops = None
# 3. Create predictions
predictions_dict = {"predicted": predictions}
# 4. Create export outputs
export_outputs = {"predict_export_outputs": tf.estimator.export.PredictOutput(outputs = predictions)}
# 4. Return EstimatorSpec
return tf.estimator.EstimatorSpec(
mode = mode,
predictions = predictions_dict,
loss = loss,
train_op = train_op,
| tensorflow.estimator.export.PredictOutput | 984 |
from tensorflow.python.ops import math_ops
ValueError: If `weights` is not `None` and has an incomptable shape.
"""
default_name = _at_k_name('false_positive', k, class_id=class_id)
with ops.name_scope(name, default_name, (predictions_idx, labels)) as scope:
fp = _sparse_false_positive_at_k(
predictions_idx=predictions_idx, labels=labels, class_id=class_id,
weights=weights)
batch_total_fp = math_ops.to_double(math_ops.reduce_sum(fp))
var = contrib_variables.local_variable(
array_ops.zeros([], dtype=dtypes.float64), name=scope)
return var, state_ops.assign_add(var, batch_total_fp, name='update')
| tensorflow.python.ops.math_ops.reduce_sum | 985 |
import tensorflow as tf
if K.backend() == 'tensorflow':
import tensorflow as tf
I, J = tf.meshgrid(i, j, indexing=indexing)
else:
| tensorflow.meshgrid | 986 |
import tensorflow as tf
if labels is not None and not labels.dtype.is_integer:
raise ValueError('expected integer labels but got %r' % labels.dtype)
if (frequency_threshold is None and labels is None and key_fn is None and
not fingerprint_shuffle and top_k is not None and
top_k <= LARGE_VOCAB_TOP_K):
logging.info('If the number of unique tokens is smaller than the provided '
'top_k or approximation error is acceptable, consider using '
'tft.experimental.approximate_vocabulary for a potentially '
'more efficient implementation.')
with tf.compat.v1.name_scope(name, 'vocabulary'):
vocabulary_key = vocab_filename
vocab_filename = _get_vocab_filename(vocab_filename, store_frequency)
informativeness_threshold = float('-inf')
coverage_informativeness_threshold = float('-inf')
if labels is not None:
if weights is not None:
vocab_ordering_type = _VocabOrderingType.WEIGHTED_MUTUAL_INFORMATION
else:
vocab_ordering_type = _VocabOrderingType.MUTUAL_INFORMATION
# Correct for the overloaded `frequency_threshold` API.
| tensorflow.compat.v1.name_scope | 987 |
import tensorflow as tf
step = tf.to_float(global_step)
warmup_steps = tf.to_float(params.warmup_steps)
multiplier = params.hidden_size ** -0.5
decay = multiplier * tf.minimum((step + 1) * (warmup_steps ** -1.5),
(step + 1) ** -0.5)
return learning_rate * decay
elif params.learning_rate_decay == "new_warmup_rsqrt_decay":
step = tf.to_float(global_step)
warmup_steps = tf.to_float(params.warmup_steps)
multiplier = params.hidden_size ** -0.5
decay = params.r0 * multiplier * tf.minimum((step + 1) * (warmup_steps ** -1.0) * (warmup_steps ** -0.5),
(step + 1) ** -0.5)
return learning_rate * decay
elif params.learning_rate_decay == "rnnplus_warmup_decay":
step = tf.to_float(global_step)
n = float(len(params.device_list))
warmup_steps = tf.to_float(params.warmup_steps)
decay = tf.minimum(1 + step * (n - 1) / (n * warmup_steps), tf.minimum(n, n * ((2*n) ** ((params.s - n * step) / (params.e - params.s)))))
return tf.maximum(learning_rate * decay, 5e-6)
| tensorflow.minimum | 988 |
from tensorflow.python.ops import math_ops
def _log_cdf(self, x):
return math_ops.log(self.cdf(x))
@distribution_util.AppendDocstring(_poisson_sample_note)
def _cdf(self, x):
x = self._assert_valid_sample(x, check_integer=False)
return math_ops.igammac(math_ops.floor(x + 1), self.rate)
def _log_normalization(self):
return self.rate
def _log_unnormalized_prob(self, x):
x = self._assert_valid_sample(x, check_integer=True)
return x * math_ops.log(self.rate) - math_ops.lgamma(x + 1)
def _mean(self):
return array_ops.identity(self.rate)
def _variance(self):
return array_ops.identity(self.rate)
@distribution_util.AppendDocstring(
"""Note: when `rate` is an integer, there are actually two modes: `rate`
and `rate - 1`. In this case we return the larger, i.e., `rate`.""")
def _mode(self):
return math_ops.floor(self.rate)
| tensorflow.python.ops.math_ops.lgamma | 989 |
import tensorflow as tf
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
inp = [tf.constant(0.5, shape=[2, 2])] * 2
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
cell = tf.nn.rnn_cell.OutputProjectionWrapper(
tf.nn.rnn_cell.GRUCell(2), 4)
dec, mem = tf.nn.seq2seq.basic_rnn_seq2seq(inp, dec_inp, cell)
sess.run([tf.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
| tensorflow.nn.seq2seq.basic_rnn_seq2seq | 990 |
import tensorflow as tf
landm_valid = tf.reshape(y_true[..., 14], [num_batch * num_prior, 1])
class_true = tf.reshape(y_true[..., 15], [num_batch * num_prior, 1])
# define filter mask: class_true = 1 (pos), 0 (neg), -1 (ignore)
# landm_valid = 1 (w landm), 0 (w/o landm)
mask_pos = tf.equal(class_true, 1)
mask_neg = tf.equal(class_true, 0)
mask_landm = tf.logical_and(tf.equal(landm_valid, 1), mask_pos)
# landm loss (smooth L1)
mask_landm_b = tf.broadcast_to(mask_landm, tf.shape(landm_true))
loss_landm = _smooth_l1_loss(tf.boolean_mask(landm_true, mask_landm_b),
tf.boolean_mask(landm_pred, mask_landm_b))
loss_landm = tf.reduce_mean(loss_landm)
# localization loss (smooth L1)
mask_pos_b = tf.broadcast_to(mask_pos, tf.shape(loc_true))
loss_loc = _smooth_l1_loss(tf.boolean_mask(loc_true, mask_pos_b),
tf.boolean_mask(loc_pred, mask_pos_b))
loss_loc = tf.reduce_mean(loss_loc)
# classification loss (crossentropy)
| tensorflow.boolean_mask | 991 |
import tensorflow as tf
self.D = dim_feature[1]
self.M = dim_embed
self.H = dim_hidden
self.T = n_time_step
self._start = word_to_idx['<START>']
self._null = word_to_idx['<NULL>']
self.weight_initializer = tf.contrib.layers.xavier_initializer()
self.const_initializer = tf.constant_initializer(0.0)
self.emb_initializer = tf.random_uniform_initializer(minval=-1.0, maxval=1.0)
# Place holder for features and captions
self.features = tf.placeholder(tf.float32, [None, self.L, self.D])
self.captions = tf.placeholder(tf.int32, [None, self.T + 1])
def _get_initial_lstm(self, features):
with tf.variable_scope('initial_lstm'):
features_mean = tf.reduce_mean(features, 1)
| tensorflow.random_uniform_initializer | 992 |
import tensorflow as tf
return tags
def id2tag(self, pred_ids, name=None):
mapping_strings = self.load_tag_data()
reverse_vocab_tags = tf.contrib.lookup.index_to_string_table_from_tensor(
mapping_strings, name=name
)
pred_strings = reverse_vocab_tags.lookup(tf.to_int64(pred_ids))
| tensorflow.contrib.lookup.index_to_string_table_from_tensor | 993 |
import tensorflow as tf
# calc l2 losses
l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)
# do logit = W*X+b
logit = tf.nn.xw_plus_b(H_drop, W, b, name="scores")
predictions = tf.nn.softmax(logit, name="predictions")
#claulate loss and optimizer
| tensorflow.nn.xw_plus_b | 994 |
import tensorflow as tf
class VariationalDense:
"""Variational Dense Layer Class"""
def __init__(self, n_in, n_out, dropout_mask_ph,
model_prob=0.9, model_lam=3e-4, activation=None, name="hidden"):
self.model_prob = model_prob # probability to keep units
self.model_lam = model_lam # l^2 / 2*tau: l=1e-2, tau=[0.1, 0.15, 0.2]
self.dropout_mask_ph = dropout_mask_ph # placeholder: p_s * i_s
self.p_s = tf.shape(self.dropout_mask_ph)[0] # post sample size
self.DM = tf.zeros(shape=[self.p_s, n_in, n_in]) # Dropout masks: p_s * i_s * i_s
self.DM = tf.linalg.set_diag(self.DM, self.dropout_mask_ph)
kernel_initializer = tf.initializers.truncated_normal(mean=0.0, stddev=0.01)
self.model_W = tf.get_variable("{}_W".format(name), initializer=kernel_initializer([n_in, n_out])) # variational parameters
self.model_b = tf.get_variable("{}_b".format(name), initializer=tf.zeros([n_out]))
self.model_DMW = tf.einsum('pij,jk->pik', self.DM, self.model_W) # Masked weight: p_s * i_s * o_s
self.model_tiled_b = tf.tile(tf.reshape(self.model_b, [1, n_out]), [self.p_s, 1])
if activation is None:
self.activation = tf.identity
else:
self.activation = activation
| tensorflow.initializers.truncated_normal | 995 |
from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten
conv2c = Conv2D(padding="same", filters=RNN_SIZE//4, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv2b)
pool2 = MaxPool2D(pool_size=[2,2])(conv2c)
# Block 3
conv3a = Conv2D(padding="same", filters=RNN_SIZE//2, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(pool2)
conv3b = Conv2D(padding="same", filters=RNN_SIZE//2, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv3a)
conv3c = Conv2D(padding="same", filters=RNN_SIZE//2, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv3b)
pool3 = MaxPool2D(pool_size=[2,2])(conv3c)
# final convolutional layer
#removed GOAL_SIZE
conv4 = Conv2D(padding="valid", filters=RNN_SIZE-loc_layer_size, kernel_size=[2, 2], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=None)(pool3)
# FC layers
flat1a = Flatten(data_format='channels_last')(conv4)
#removed GOAL_SIZE
flat1b = Dense(units=RNN_SIZE-loc_layer_size)(flat1a)
# FC layers for goal_pos input
# goal_layer1 = Dense(units=GOAL_SIZE)(goal_pos)
# goal_layer2 = Dense(units=GOAL_SIZE)(goal_layer1)
# FC layers to find next location
loc_layer1 = Dense(units=loc_layer_size)(prev_loc)
loc_layer2 = Dense(units=loc_layer_size)(loc_layer1)
# Concatenationation of above layers, followed by FC layer
concat = tf.concat([flat1b, loc_layer2],1) # goal_layer2
| tensorflow.keras.layers.Flatten | 996 |
from tensorflow.contrib.learn.python.learn import ops
self.assertEqual(embed_np.shape, embed_tf.shape)
self.assertAllClose(embed_np, embed_tf)
def test_categorical_variable(self):
random_seed.set_random_seed(42)
with self.cached_session() as sess:
cat_var_idx = array_ops.placeholder(dtypes.int64, [2, 2])
embeddings = ops.categorical_variable(
cat_var_idx, n_classes=5, embedding_size=10, name="my_cat_var")
sess.run(variables.global_variables_initializer())
emb1 = sess.run(embeddings,
feed_dict={cat_var_idx.name: [[0, 1], [2, 3]]})
emb2 = sess.run(embeddings,
feed_dict={cat_var_idx.name: [[0, 2], [1, 3]]})
| tensorflow.contrib.learn.python.learn.ops.categorical_variable | 997 |
from tensorflow.python.util import compat
for k in kwargs_attr:
self._definition.attr[k].CopyFrom(kwargs_attr[k])
# Hash the definition and its dependencies.
hasher = hashlib.sha1()
def _hash_func_def():
"""Hash the function definition agnostic to node/map ordering."""
def update_num(n):
hasher.update(compat.as_bytes("%x" % n))
def update_str(s):
update_num(len(s))
hasher.update(compat.as_bytes(s))
def update_strs(slist):
update_num(len(slist))
for s in slist:
update_str(s)
for adef in self._definition.signature.input_arg:
update_str(adef.SerializeToString())
for adef in self._definition.signature.output_arg:
update_str(adef.SerializeToString())
for n in sorted(self._definition.node_def, key=lambda n: n.name):
| tensorflow.python.util.compat.as_bytes | 998 |
import tensorflow as tf
self.q_maxlen = tf.reduce_max(self.q_len)
self.c = tf.slice(self.c, [0, 0], [N, self.c_maxlen])
| tensorflow.slice | 999 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.