seed
stringlengths 25
1.88k
| seed_api
stringlengths 14
102
| index
int64 0
1.05k
|
---|---|---|
from tensorflow.contrib.metrics.python.ops import set_ops
Integer `Tensor` of shape [D1, ... DN], where each value is the number of
relevant values for that row.
Raises:
ValueError: if inputs have invalid dtypes or values.
"""
if k < 1:
raise ValueError('Invalid k=%s.' % k)
with ops.name_scope(None, 'num_relevant', (labels,)) as scope:
# For SparseTensor, calculate separate count for each row.
if isinstance(labels, (ops.SparseTensor, ops.SparseTensorValue)):
labels_sizes = set_ops.set_size(labels)
return math_ops.minimum(labels_sizes, k, name=scope)
# For dense Tensor, calculate scalar count based on last dimension, and
# tile across labels shape.
labels_shape = array_ops.shape(labels)
labels_size = labels_shape[-1]
num_relevant_scalar = math_ops.minimum(labels_size, k)
return array_ops.fill(labels_shape[0:-1], num_relevant_scalar, name=scope)
| tensorflow.contrib.metrics.python.ops.set_ops.set_size | 800 |
from tensorflow.python.ops import math_ops
Returns:
Masked weights if `mask` and `weights` are not `None`, weights equivalent to
`mask` if `weights` is `None`, and otherwise `weights`.
Raises:
ValueError: If `weights` and `mask` are not `None` and have mismatched
shapes.
"""
if mask is not None:
check_ops.assert_type(mask, dtypes.bool)
if weights is None:
weights = array_ops.ones_like(mask, dtype=dtypes.float32)
weights = math_ops.cast(math_ops.logical_not(mask), weights.dtype) * weights
return weights
def _safe_div(numerator, denominator, name):
"""Divides two values, returning 0 if the denominator is <= 0.
Args:
numerator: A real `Tensor`.
denominator: A real `Tensor`, with dtype matching `numerator`.
name: Name for the returned op.
| tensorflow.python.ops.math_ops.logical_not | 801 |
from tensorflow import keras
dataset = dataset.shuffle(buffer_size = 10 * batch_size)
else:
num_epochs = 1 # end-of-input after this
dataset = dataset.repeat(num_epochs).batch(batch_size)
iterator = dataset.make_one_shot_iterator()
batch_features, batch_labels = iterator.get_next()
return batch_features, batch_labels
return _input_fn
# Create inference model using Keras
# The model here is a dnn regressor
def make_keras_estimator(output_dir):
from tensorflow import keras
model = keras.models.Sequential()
model.add(keras.layers.Dense(32, input_shape=(N_INPUTS,), name=TIMESERIES_INPUT_LAYER))
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.Dense(1))
model.compile(loss = 'mean_squared_error',
optimizer = 'adam',
metrics = ['mae', 'mape']) # mean absolute [percentage] error
return keras.estimator.model_to_estimator(model, model_dir=output_dir)
# Create the inference model
def simple_rnn(features, labels, mode):
# 0. Reformat input shape to become a sequence
x = tf.split(features[TIMESERIES_COL], N_INPUTS, 1)
# 1. Configure the RNN
| tensorflow.keras.models.Sequential | 802 |
import tensorflow as tf
tf.logging.info(eval_results)
tf.logging.info('Finished model {}.'.format(model_scope))
def main(_):
# Using the Winograd non-fused algorithms provides a small performance boost.
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction = FLAGS.gpu_memory_fraction)
sess_config = tf.ConfigProto(allow_soft_placement = True, log_device_placement = False, intra_op_parallelism_threads = FLAGS.num_cpu_threads, inter_op_parallelism_threads = FLAGS.num_cpu_threads, gpu_options = gpu_options)
# Set up a RunConfig to only save checkpoints once per training cycle.
run_config = tf.estimator.RunConfig().replace(
save_checkpoints_secs=FLAGS.save_checkpoints_secs).replace(
save_checkpoints_steps=None).replace(
| tensorflow.GPUOptions | 803 |
import tensorflow as tf
x_data = tf.placeholder(tf.float32)
m = tf.constant(3.)
# Multiplication
prod = tf.mul(x_data, m)
for x_val in x_vals:
print(sess.run(prod, feed_dict={x_data: x_val}))
| tensorflow.mul | 804 |
import tensorflow as tf
cell_bw = GetCell()
rnnout, _, _ = tf.nn.bidirectional_rnn(cell_fw, cell_bw, self._inputs,
dtype=tf.float32,
| tensorflow.nn.bidirectional_rnn | 805 |
import tensorflow as tf
class JointsMSELoss(object):
def __init__(self):
self.mse = tf.losses.MeanSquaredError()
| tensorflow.losses.MeanSquaredError | 806 |
import tensorflow as tf
f = conv(x, scope='f_conv', filter_dims=[1, 1, channels//8], stride_dims=[1, 1], non_linear_fn=act_func)
f = tf.layers.max_pooling2d(f, pool_size=2, strides=2, padding='SAME')
print('attention f dims: ' + str(f.get_shape().as_list()))
g = conv(x, scope='g_conv', filter_dims=[1, 1, channels//8], stride_dims=[1, 1], non_linear_fn=act_func)
print('attention g dims: ' + str(g.get_shape().as_list()))
h = conv(x, scope='h_conv', filter_dims=[1, 1, channels//2], stride_dims=[1, 1], non_linear_fn=act_func)
h = tf.layers.max_pooling2d(h, pool_size=2, strides=2, padding='SAME')
print('attention h dims: ' + str(h.get_shape().as_list()))
# N = h * w
g = tf.reshape(g, shape=[-1, g.shape[1]*g.shape[2], g.get_shape().as_list()[-1]])
print('attention g flat dims: ' + str(g.get_shape().as_list()))
f = tf.reshape(f, shape=[-1, f.shape[1]*f.shape[2], f.shape[-1]])
| tensorflow.layers.max_pooling2d | 807 |
from tensorflow.contrib.learn.python.learn.datasets import base
base.shrink_csv(train_path, 1000)
base.shrink_csv(test_path, 1000)
| tensorflow.contrib.learn.python.learn.datasets.base.shrink_csv | 808 |
from tensorflow.python.ops import nn
return features
return {"": features}
def _get_optimizer(optimizer):
if callable(optimizer):
return optimizer()
else:
return optimizer
def _add_hidden_layer_summary(value, tag):
summary.scalar("%s_fraction_of_zero_values" % tag, nn.zero_fraction(value))
summary.histogram("%s_activation" % tag, value)
def _dnn_model_fn(features, labels, mode, params, config=None):
"""Deep Neural Net model_fn.
Args:
features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`).
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
| tensorflow.python.ops.nn.zero_fraction | 809 |
import tensorflow as tf
A matrix with the input matrices stacked along its main diagonal, having
shape [..., \sum_i N_i, \sum_i M_i].
"""
matrices = [tf.convert_to_tensor(matrix, dtype=dtype) for matrix in matrices]
blocked_rows = tf.Dimension(0)
blocked_cols = tf.Dimension(0)
batch_shape = tf.TensorShape(None)
for matrix in matrices:
full_matrix_shape = matrix.get_shape().with_rank_at_least(2)
batch_shape = batch_shape.merge_with(full_matrix_shape[:-2])
blocked_rows += full_matrix_shape[-2]
| tensorflow.Dimension | 810 |
from tensorflow.python.layers import pooling as pooling_layers
input_layer = self.top_layer
else:
self.top_size = num_channels_in
name = 'apool' + str(self.counts['apool'])
self.counts['apool'] += 1
pool = pooling_layers.average_pooling2d(
input_layer, [k_height, k_width], [d_height, d_width],
padding=mode,
data_format=self.channel_pos,
name=name)
| tensorflow.python.layers.pooling.average_pooling2d | 811 |
import tensorflow as tf
tf.saved_model.ASSETS_DIRECTORY,
sanitized_vocab_filename(filename=vocab_filename))
files = tf.io.gfile.glob(prefix) + tf.io.gfile.glob(
'{}.tfrecord.gz'.format(prefix))
| tensorflow.io.gfile.glob | 812 |
import tensorflow as tf
th = 0.008
max_step = 600
lr = 10
elif mode == 'ultra':
if not tf.test.is_gpu_available():
print("Please enable GPU for ultra setting...")
sys.exit(1)
th = 0.01
| tensorflow.test.is_gpu_available | 813 |
from tensorflow.python.ops import sparse_ops
expand_dims = [dim]
expanded_shape = array_ops.concat(
0, (array_ops.slice(tensor.shape, [0], expand_dims), [1],
array_ops.slice(tensor.shape, expand_dims, [-1])),
name='expanded_shape')
expanded = sparse_ops.sparse_reshape(
tensor, shape=expanded_shape, name='expand')
if multiple == 1:
return expanded
return sparse_ops.sparse_concat(
dim - 1 if dim < 0 else dim, [expanded] * multiple, name=scope)
# Dense.
expanded = array_ops.expand_dims(
tensor, dim if (dim >= 0) else (dim - 1), name='expand')
if multiple == 1:
return expanded
ones = array_ops.ones_like(array_ops.shape(tensor))
| tensorflow.python.ops.sparse_ops.sparse_concat | 814 |
import tensorflow as tf
return h
def minibatch_discrimination(x, n_kernels, dim_per_kernel, name):
with tf.variable_scope(name):
batch_size, nf = x.get_shape().as_list()
h = linear(x, [nf, n_kernels*dim_per_kernel], 'h1')
activation = tf.reshape(h, (batch_size, n_kernels, dim_per_kernel))
big = tf.eye(batch_size)
big = tf.expand_dims(big, 1)
abs_dif = tf.reduce_sum(tf.abs(tf.expand_dims(activation, 3) - tf.expand_dims(tf.transpose(activation, [1, 2, 0]), 0)), 2)
mask = 1. - big
masked = tf.exp(-abs_dif) * mask
def half(tens, second):
| tensorflow.eye | 815 |
import tensorflow as tf
# `sloppy` mode means that the interleaving is not exact. This adds
# even more randomness to the training pipeline.
d = d.apply(
tf.contrib.data.parallel_interleave(
tf.data.TFRecordDataset,
sloppy=is_training,
cycle_length=cycle_length,
| tensorflow.contrib.data.parallel_interleave | 816 |
from tensorflow.contrib.metrics.python.ops import set_ops
labels, predictions_idx = _maybe_select_class_id(labels,
predictions_idx,
class_id)
fn = set_ops.set_size(set_ops.set_difference(predictions_idx,
labels,
aminusb=False))
| tensorflow.contrib.metrics.python.ops.set_ops.set_difference | 817 |
import tensorflow as tf
'member/age': tf.io.FixedLenFeature([], tf.int64),
'member/height': tf.io.VarLenFeature(tf.float32),
'member/prefer_prods': tf.io.VarLenFeature(tf.int64)}
features = tf.io.parse_single_example(example_proto, features)
images = tf.image.decode_png(features['member/encoded'], channels=3)
# 注意png原本有4個channel,但執行到下面的處理會出錯,所以前一行先降成3個channel。
| tensorflow.io.parse_single_example | 818 |
import tensorflow as tf
major, minor, _ = tf.version.VERSION.split('.')
if not (int(major) >= 2 and tf2.enabled()):
tf.compat.v1.logging.warning(
'Tensorflow version (%s) found. TransformFeaturesLayer is supported '
'only for TF 2.x with TF 2.x behaviors enabled and may not work as '
'intended.', tf.version.VERSION)
elif int(major) == 2 and int(minor) < 3:
# TODO(varshaan): Log a more specific warning.
tf.compat.v1.logging.warning(
'Tensorflow version (%s) found. TransformFeaturesLayer may not work '
'as intended if the SavedModel contains an initialization op.',
tf.version.VERSION)
# TODO(b/162055065): Possibly switch back to inherit from Layer when possible.
@_maybe_register_keras_serializable(package='TensorFlowTransform')
| tensorflow.compat.v1.logging.warning | 819 |
import tensorflow as tf
self._testMultiSaverCollectionSave()
self._testMultiSaverCollectionRestore()
def testBinaryAndTextFormat(self):
test_dir = self._TestDir("binary_and_text")
filename = os.path.join(test_dir, "metafile")
with self.test_session(graph=tf.Graph()):
# Creates a graph.
tf.Variable(10.0, name="v0")
# Exports the graph as binary format.
tf.train.export_meta_graph(filename, as_text=False)
with self.test_session(graph=tf.Graph()):
# Imports the binary format graph.
saver = tf.train.import_meta_graph(filename)
# Exports the graph as text format.
saver.export_meta_graph(filename, as_text=True)
with self.test_session(graph=tf.Graph()):
# Imports the text format graph.
tf.train.import_meta_graph(filename)
# Writes wrong contents to the file.
tf.train.write_graph(saver.as_saver_def(), os.path.dirname(filename),
os.path.basename(filename))
with self.test_session(graph=tf.Graph()):
# Import should fail.
with self.assertRaisesWithPredicateMatch(
IOError, lambda e: "Cannot parse file"):
| tensorflow.train.import_meta_graph | 820 |
from tensorflow.contrib.eager.python.examples.linear_regression import linear_regression
true_w = [[1.0], [-0.5], [2.0]]
true_b = [1.0]
model = linear_regression.LinearModel()
dataset = linear_regression.synthetic_dataset(
true_w, true_b, noise_level=0., batch_size=64, num_batches=40)
| tensorflow.contrib.eager.python.examples.linear_regression.linear_regression.LinearModel | 821 |
import tensorflow as tf
raise ValueError("Height not divisible by 2.")
if width % 2 != 0:
raise ValueError("Width not divisible by 2.")
weights = numpy.zeros((2, 2, channels, 4 * channels))
for idx_ch in xrange(channels):
slice_2 = slice(idx_ch, (idx_ch + 1))
slice_3 = slice((idx_ch * 4), ((idx_ch + 1) * 4))
weights[:, :, slice_2, slice_3] = SQUEEZE_MATRIX
shuffle_channels = [idx_ch * 4 for idx_ch in xrange(channels)]
shuffle_channels += [idx_ch * 4 + 1 for idx_ch in xrange(channels)]
shuffle_channels += [idx_ch * 4 + 2 for idx_ch in xrange(channels)]
shuffle_channels += [idx_ch * 4 + 3 for idx_ch in xrange(channels)]
shuffle_channels = numpy.array(shuffle_channels)
weights = weights[:, :, :, shuffle_channels].astype("float32")
if reverse:
res = tf.nn.conv2d_transpose(
value=input_,
filter=weights,
output_shape=[batch_size, height * 2, width * 2, channels],
strides=[1, 2, 2, 1],
padding="SAME",
name="unsqueeze_2x2")
else:
res = tf.nn.conv2d(
input=input_,
filter=weights,
strides=[1, 2, 2, 1],
padding="SAME",
name="squeeze_2x2")
| tensorflow.nn.conv2d_transpose | 822 |
import tensorflow as tf
def _create_params(self):
initializer = tf.random_uniform_initializer(minval=-0.1, maxval=0.1)
with tf.variable_scope(self.name, initializer=initializer):
with tf.variable_scope("lstm"):
self.w_lstm = []
for layer_id in range(self.lstm_num_layers):
with tf.variable_scope("layer_{}".format(layer_id)):
w = tf.get_variable("w", [2 * self.lstm_size, 4 * self.lstm_size])
self.w_lstm.append(w)
self.g_emb = tf.get_variable("g_emb", [1, self.lstm_size])
with tf.variable_scope("emb"):
self.w_emb = tf.get_variable("w", [self.num_branches, self.lstm_size])
with tf.variable_scope("softmax"):
self.w_soft = tf.get_variable("w", [self.lstm_size, self.num_branches])
| tensorflow.get_variable | 823 |
import tensorflow as tf
# Note: this warning is misleading in the context where tokens are ranked
# based on mutual information rather than frequency.
tf.compat.v1.logging.warn(
'frequency_threshold %d <= 1 is a no-op, use None instead.',
| tensorflow.compat.v1.logging.warn | 824 |
from tensorflow.python.ops import random_ops
else:
gradient_shape = gradient.get_shape()
noise = random_ops.truncated_normal(gradient_shape) * gradient_noise_scale
noisy_gradients.append(gradient + noise)
| tensorflow.python.ops.random_ops.truncated_normal | 825 |
from tensorflow.python.ops import gen_math_ops
# tensor `imag` is [4.75, 5.75]
tf.complex(real, imag) ==> [[2.25 + 4.74j], [3.25 + 5.75j]]
```
Args:
real: A `Tensor` of type `float`.
imag: A `Tensor` of type `float`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `complex64`.
"""
with ops.op_scope([real, imag], name, "Complex") as name:
return gen_math_ops._complex(real, imag, name=name)
def round(x, name=None):
"""Rounds the values of a tensor to the nearest integer, element-wise.
For example:
```python
# 'a' is [0.9, 2.5, 2.3, -4.4]
tf.round(a) ==> [ 1.0, 3.0, 2.0, -4.0 ]
```
| tensorflow.python.ops.gen_math_ops._complex | 826 |
import tensorflow as tf
train_op = None
cls_accuracy = tf.metrics.accuracy(glabels, predictions['classes'])
metrics = {'cls_accuracy': cls_accuracy}
# Create a tensor named train_accuracy for logging purposes.
tf.identity(cls_accuracy[1], name='cls_accuracy')
tf.summary.scalar('cls_accuracy', cls_accuracy[1])
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
| tensorflow.identity | 827 |
from tensorflow.contrib.learn.python.learn.summary_writer_cache import SummaryWriterCache
def __init__(self, every_n_steps=100, output_dir=None,
summary_writer=None):
super(StepCounter, self).__init__(every_n_steps=every_n_steps)
self._summary_tag = "global_step/sec"
self._last_reported_step = None
self._last_reported_time = None
self._summary_writer = summary_writer
if summary_writer is None and output_dir:
self._summary_writer = SummaryWriterCache.get(output_dir)
def set_estimator(self, estimator):
super(StepCounter, self).set_estimator(estimator)
if self._summary_writer is None:
self._summary_writer = SummaryWriterCache.get(estimator.model_dir)
def every_n_step_end(self, current_step, outputs):
current_time = time.time()
| tensorflow.contrib.learn.python.learn.summary_writer_cache.SummaryWriterCache.get | 828 |
from tensorflow.contrib import seq2seq
if decoder_fn is None:
outputs, final_state = tf.nn.dynamic_rnn(cell, tensor,
sequence_length=sequence_length, initial_state=initial_state, dtype=tf.float32)
final_context_state = None
else:
# TODO: turn off sequence_length?
outputs, final_state, final_context_state = seq2seq.dynamic_rnn_decoder(
cell, decoder_fn, inputs=None, sequence_length=sequence_length)
if return_final_state:
return final_state
else:
| tensorflow.contrib.seq2seq.dynamic_rnn_decoder | 829 |
from tensorflow.python.ops import random_ops
def validateKolmogorovSmirnov(self,
shape,
mean,
stddev,
minval,
maxval,
seed=1618):
try:
import scipy.stats # pylint: disable=g-import-not-at-top
tf.set_random_seed(seed)
with self.test_session(use_gpu=self._use_gpu):
samples = random_ops.parameterized_truncated_normal(shape, mean, stddev,
minval,
maxval).eval()
assert (~np.isnan(samples)).all()
minval = max(mean - stddev * 10, minval)
maxval = min(mean + stddev * 10, maxval)
dist = scipy.stats.norm(loc=mean, scale=stddev)
cdf_min = dist.cdf(minval)
cdf_max = dist.cdf(maxval)
def truncated_cdf(x):
return np.clip((dist.cdf(x) - cdf_min) / (cdf_max - cdf_min), 0.0, 1.0)
| tensorflow.python.ops.random_ops.parameterized_truncated_normal | 830 |
import tensorflow as tf
def cross_entropy_layer(tensor, target, **opts):
if _rank(tensor) > 1:
target = tf.reshape(target, shape=(-1, ))
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=tensor, labels=target)
mask = tf.cast(tf.not_equal(target, tf.zeros_like(target)), dtype=tf.float32)
out = cross_entropy * mask
return out
| tensorflow.zeros_like | 831 |
import tensorflow as tf
"""
Build the custom CNN for the CIFAR-10 dataset.
"""
# The input data holders (cf. shapes after prepa)
self.X = tf.compat.v1.placeholder(tf.float32, shape = (None,
self.config.data["image_size"],
self.config.data["image_size"],
self.config.data["num_channels"]), name="X") # ex. (50000, 32, 32, 3)
self.y = tf.compat.v1.placeholder(tf.int32, shape = (None, self.config.data["num_categories"]), name="y") # ex. (50000, 10)
self.train = tf.compat.v1.placeholder(tf.bool)
# The CNN architecture = conv/poo layers + flatten layer + connected layers
with tf.name_scope("cnn"):
# a. Create convolution/pooling layers = conv + drop + pool + conv + drop + pool + conv + pool + conv + drop
self.conv1 = tf.layers.conv2d(self.X,
self.config.cifar10_cnn["num_filters"],
| tensorflow.compat.v1.placeholder | 832 |
import tensorflow as tf
marker='.', c=sdf_values.numpy()[:, 0])
plt.colorbar()
if not tf.math.is_nan(iou):
self.iou_per_class[class_id].append(iou)
| tensorflow.math.is_nan | 833 |
from tensorflow.python.framework import ops
If the default graph is being used to define a function, the
returned list of tensors are those accessed inside the function body
but defined outside the function body so far. Otherwise, returns an
empty list.
"""
g = ops.get_default_graph()
if isinstance(g, _FuncGraph):
return g.extra_inputs
else:
return []
| tensorflow.python.framework.ops.get_default_graph | 834 |
import tensorflow as tf
'image/object/bbox/xmax':
tf.io.VarLenFeature(tf.float32),
'image/object/bbox/ymin':
tf.io.VarLenFeature(tf.float32),
'image/object/bbox/ymax':
tf.io.VarLenFeature(tf.float32),
'image/object/class/label':
tf.io.VarLenFeature(tf.int64),
'image/object/area':
tf.io.VarLenFeature(tf.float32),
'image/object/is_crowd':
tf.io.VarLenFeature(tf.int64),
}
if include_mask:
self._keys_to_features.update({
'image/object/mask':
tf.io.VarLenFeature(tf.string),
})
def _decode_image(self, parsed_tensors):
"""Decodes the image and set its static shape."""
image = tf.io.decode_image(parsed_tensors['image/encoded'], channels=3)
| tensorflow.io.VarLenFeature | 835 |
import tensorflow as tf
# Calculate the total loss for the current tower.
total_loss = tf.add_n(losses, name='total_loss')
| tensorflow.add_n | 836 |
import tensorflow as tf
pass
else:
# add a skip connection
lstm_cell = tf.nn.rnn_cell.ResidualWrapper(lstm_cell)
# collect the input state, run the dynamic rnn, collect
| tensorflow.nn.rnn_cell.ResidualWrapper | 837 |
from tensorflow.python.framework import ops
return math_ops.div(
math_ops.reduce_sum(loss_vec),
math_ops.to_float(math_ops.reduce_sum(weight_tensor)),
name="loss")
def _get_linear_vars(self):
if self._get_linear_feature_columns():
return ops.get_collection(self._linear_weight_collection)
return []
def _get_linear_training_ops(self, linear_grads, linear_vars):
if self._get_linear_feature_columns():
self._linear_optimizer = self._get_optimizer(
self._linear_optimizer,
| tensorflow.python.framework.ops.get_collection | 838 |
import tensorflow as tf
* Qingyao Ai, Keping Bi, Cheng Luo, Jiafeng Guo, W. Bruce Croft. 2018. Unbiased Learning to Rank with Unbiased Propensity Estimation. In Proceedings of SIGIR '18
"""
def __init__(self, data_set, exp_settings, forward_only=False):
"""Create the model.
Args:
data_set: (Raw_data) The dataset used to build the input layer.
exp_settings: (dictionary) The dictionary containing the model settings.
forward_only: Set true to conduct prediction only, false to conduct training.
"""
print('Build DLA atten')
self.hparams = tf.contrib.training.HParams(
learning_rate=0.05, # Learning rate.
max_gradient_norm=5.0, # Clip gradients to this norm.
loss_func='click_weighted_softmax_cross_entropy', # Select Loss function
logits_to_prob='softmax', # the function used to convert logits to probability distributions
ranker_learning_rate=-1.0, # The learning rate for ranker (-1 means same with learning_rate).
ranker_loss_weight=1.0, # Set the weight of unbiased ranking loss
l2_loss=0.0, # Set strength for L2 regularization.
l1_loss=0.0,
max_propensity_weight = -1, # Set maximum value for propensity weights
constant_propensity_initialization = False, # Set true to initialize propensity with constants.
grad_strategy='ada', # Select gradient strategy
)
print(exp_settings['learning_algorithm_hparams'])
| tensorflow.contrib.training.HParams | 839 |
import tensorflow as tf
request.model_spec.signature_name = 'serving_default'
# This is correct (default constant).
request.inputs['input'].CopyFrom(make_tensor_proto(input_data,
shape=input_data.shape))
# Boiler-Plate
response = stub.Predict(request, timeout)
result = response.outputs['output']
print(tf.make_ndarray(result))
| tensorflow.make_ndarray | 840 |
import tensorflow as tf
"""Imports ops from collections."""
if self._is_training:
self._train_op = tf.get_collection_ref("train_op")[0]
self._lr = tf.get_collection_ref("lr")[0]
self._new_lr = tf.get_collection_ref("new_lr")[0]
self._lr_update = tf.get_collection_ref("lr_update")[0]
rnn_params = tf.get_collection_ref("rnn_params")
if self._cell and rnn_params:
params_saveable = tf.contrib.cudnn_rnn.RNNParamsSaveable(
self._cell,
self._cell.params_to_canonical,
self._cell.canonical_to_params,
rnn_params,
base_variable_scope="Model/RNN")
tf.add_to_collection(tf.GraphKeys.SAVEABLE_OBJECTS, params_saveable)
self._cost = tf.get_collection_ref(util.with_prefix(self._name, "cost"))[0]
| tensorflow.contrib.cudnn_rnn.RNNParamsSaveable | 841 |
from tensorflow.python.ops import gen_nn_ops
Returns:
A `Tensor` with the same type as `value`. The average pooled output tensor.
"""
with ops.op_scope([value], name, "AvgPool") as name:
value = ops.convert_to_tensor(value, name="input")
return gen_nn_ops._avg_pool(value, ksize=ksize, strides=strides,
padding=padding,
data_format=data_format,
name=name)
| tensorflow.python.ops.gen_nn_ops._avg_pool | 842 |
from tensorflow.contrib.eager.python.examples.revnet import config as config_
dev = tf.DeviceSpec.from_string(device).device_type.lower()
name = "%s_%s_batch_%d_%s" % (label, dev, batch_size, data_format)
extras = {"examples_per_sec": batch_size / avg_time}
self.report_benchmark(
iters=num_iters, wall_time=avg_time, name=name, extras=extras)
def _benchmark_eager_apply(self,
label,
device_and_format,
defun=False,
execution_mode=None):
config = config_.get_hparams_imagenet_56()
with tfe.execution_mode(execution_mode):
device, data_format = device_and_format
model = revnet.RevNet(config=config)
if defun:
# TODO(apassos): reenable after cond lets you return None
model.call = tfe.defun(model.call)
batch_size = 64
num_burn = 5
num_iters = 10
with tf.device(device):
| tensorflow.contrib.eager.python.examples.revnet.config.get_hparams_imagenet_56 | 843 |
import tensorflow as tf
"""
max_time = 8
batch_size = 16
inputs = tf.random_uniform([batch_size, max_time],
maxval=30521, dtype=tf.int32)
| tensorflow.random_uniform | 844 |
import tensorflow as tf
auc += ((x - prev_x) * (y + prev_y) / 2.)
prev_x = x
prev_y = y
return auc
def attention(query, facts, attention_size, mask, stag='null', mode='LIST', softmax_stag=1, time_major=False, return_alphas=False):
if isinstance(facts, tuple):
# In case of Bi-RNN, concatenate the forward and the backward RNN outputs.
facts = tf.concat(facts, 2)
if time_major:
# (T,B,D) => (B,T,D)
facts = tf.array_ops.transpose(facts, [1, 0, 2])
mask = tf.equal(mask, tf.ones_like(mask))
hidden_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer
input_size = query.get_shape().as_list()[-1]
# Trainable parameters
w1 = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.1))
w2 = tf.Variable(tf.random_normal([input_size, attention_size], stddev=0.1))
b = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
v = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
with tf.name_scope('v'):
| tensorflow.array_ops.transpose | 845 |
import tensorflow as tf
print("***************")
print("Training done!!")
save_path = saver.save(sess, ckpt_name)
print("Model saved in file: %s" % save_path)
print ("creating protobuf...")
g_1 = tf.get_default_graph()
with tf.Session(graph = g_1) as sess:
saver = tf.train.import_meta_graph('save/model.ckpt.meta', clear_devices=True)
saver.restore(sess, ckpt_name)
graph_def = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, dst_nodes)
tf.train.write_graph(tf.graph_util.extract_sub_graph(graph_def, dst_nodes), path, fname, as_text=False)
| tensorflow.graph_util.extract_sub_graph | 846 |
from tensorflow.contrib.slim.python.slim.data import tfexample_decoder
'image/class/label':
parsing_ops.FixedLenFeature(
shape=[1],
dtype=dtypes.int64,
default_value=array_ops.zeros(
[1], dtype=dtypes.int64))
}
items_to_handlers = {
'image': tfexample_decoder.Image(),
'label': tfexample_decoder.Tensor('image/class/label'),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
return dataset.Dataset(
data_sources=data_sources,
reader=io_ops.TFRecordReader,
decoder=decoder,
num_samples=100,
items_to_descriptions=None)
class DatasetDataProviderTest(test.TestCase):
| tensorflow.contrib.slim.python.slim.data.tfexample_decoder.TFExampleDecoder | 847 |
from tensorflow.python.framework import op_def_library as _op_def_library
"""
result = _op_def_lib.apply_op("UnpackPath", path=path,
path_values=path_values, name=name)
return result
def _InitOpDefLibrary():
op_list = _op_def_pb2.OpList()
_text_format.Merge(_InitOpDefLibrary.op_list_ascii, op_list)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
_InitOpDefLibrary.op_list_ascii = """op {
name: "HardRoutingFunction"
input_arg {
name: "input_data"
type: DT_FLOAT
| tensorflow.python.framework.op_def_library.OpDefLibrary | 848 |
import tensorflow as tf
self.e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/eval_net')
self.t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/target_net')
with tf.variable_scope('target_q'):
self.target_q = R + self.gamma * self.q_
with tf.variable_scope('abs_TD'):
self.abs_td = tf.abs(self.target_q - self.q)
self.ISWeights = tf.placeholder(tf.float32, [None, 1], name='IS_weights')
with tf.variable_scope('TD_error'):
self.loss = tf.reduce_mean(self.ISWeights * tf.squared_difference(self.target_q, self.q))
with tf.variable_scope('C_train'):
self.train_op = tf.train.AdamOptimizer(self.lr).minimize(self.loss, global_step=GLOBAL_STEP)
with tf.variable_scope('a_grad'):
self.a_grads = tf.gradients(self.q, a)[0] # tensor of gradients of each sample (None, a_dim)
def _build_net(self, s, a, scope, trainable):
with tf.variable_scope(scope):
init_w = tf.random_normal_initializer(0., 0.01)
| tensorflow.squared_difference | 849 |
import tensorflow as tf
As described in https://arxiv.org/pdf/1608.06993v3.pdf (page 5).
Args:
image: a Tensor.
Returns:
Tensor of the same shape as image.
"""
image = tf.image.resize_with_crop_or_pad(image, 40, 40)
image = tf.image.random_crop(image, [32, 32, 3])
image = tf.image.random_flip_left_right(image)
return image
# Makes the function accessible in gin configs, even with all args denylisted.
@gin.configurable(module='trax.data', denylist=['dataset', 'training'])
def cifar10_augmentation_preprocess(dataset, training):
| tensorflow.image.resize_with_crop_or_pad | 850 |
import tensorflow as tf
self.error_rate = 1. - \
tf.reduce_mean(tf.to_float(tf.nn.in_top_k(
self.end_points_D['class_logits'], targets, 1)))
if gpu_idx == 0:
update = tf.assign(num_error_rate, num_error_rate + 1.)
with tf.control_dependencies([update]):
tc = tf.maximum(.01, 1. / num_error_rate)
update = tf.assign(avg_error_rate, (1. - tc) * avg_error_rate + tc * self.error_rate)
with tf.control_dependencies([update]):
self.d_loss_class = tf.identity(self.d_loss_class)
self.d_loss_fake = tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.end_points_D['D_on_G_logits'],
labels=tf.zeros_like(self.end_points_D['D_on_G_logits']))
self.d_loss_class = tf.reduce_mean(self.d_loss_class)
| tensorflow.assign | 851 |
import tensorflow.contrib as contrib
stitch3_1, stitch3_2 = fc3_1, fc3_2
dropout3_1 = contrib.layers.dropout(stitch3_1, keep_prob=keep_prob, is_training=is_training,
scope="dropout3_1")
| tensorflow.contrib.layers.dropout | 852 |
import tensorflow as tf
generator_inputs = features
real_data = labels
gan_model = tf.contrib.gan.gan_model(generator_fn, discriminator_fn, real_data, generator_inputs)
predictions = gan_model.generated_data
loss = None
train_op = None
if mode == tf.estimator.ModeKeys.TRAIN:
# define loss
gan_loss = tf.contrib.gan.gan_loss(gan_model, add_summaries=False)
loss = gan_loss.generator_loss
# define train_op
gen_optimizer = tf.train.RMSPropOptimizer(learning_rate=0.05)
dis_optimizer = tf.train.RMSPropOptimizer(learning_rate=0.05)
# wrapper to make the optimizer work with TPUs
if params['use_tpu']:
gen_optimizer = tf.contrib.tpu.CrossShardOptimizer(gen_optimizer)
dis_optimizer = tf.contrib.tpu.CrossShardOptimizer(dis_optimizer)
| tensorflow.contrib.gan.gan_loss | 853 |
import tensorflow as tf
"""
sess = tf.get_default_session()
if variables is None:
variables = tf.global_variables()
else:
variables = list(variables)
if len(variables) == 0:
return []
if semver.match(tf.__version__, '<1.0.0'):
init_flag = sess.run(
tf.pack([tf.is_variable_initialized(v) for v in variables]))
else:
init_flag = sess.run(
tf.stack([tf.is_variable_initialized(v) for v in variables]))
return [v for v, f in zip(variables, init_flag) if not f]
def get_hard_target_model_updates(target, source):
"""Return list of target model update ops.
These are hard target updates. The source weights are copied
directly to the target network.
Parameters
----------
target: keras.models.Model
The target model. Should have same architecture as source model.
| tensorflow.is_variable_initialized | 854 |
import tensorflow as tf
def general_deconv2d(self, input_data, filters = 64, kernel_size = 7, stride = 1, stddev = 0.02, activation_function = "relu", padding = "VALID", do_norm = True, relu_factor = 0, name="deconv2d"):
with tf.variable_scope(name):
deconv = tf.layers.conv2d_transpose(input_data, filters, kernel_size, (stride, stride), padding, activation = None)
| tensorflow.layers.conv2d_transpose | 855 |
import tensorflow as tf
W = tf.get_variable('W', [state_size, num_classes])
b = tf.get_variable('b', [num_classes], initializer=tf.constant_initializer(0.0))
'''因为rnn_outputs是三维的,这里需要将其转成2维的,
矩阵运算后再转换回来[batch_size, num_steps, num_classes]'''
logits = tf.reshape(tf.matmul(tf.reshape(rnn_outputs, [-1, state_size]), W) +b, \
shape=[batch_size, num_steps, num_classes])
predictions = tf.nn.softmax(logits)
y_as_list = tf.unstack(y, num=num_steps, axis=1)
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,logits=logits)
total_loss = tf.reduce_mean(losses)
train_step = tf.train.AdagradOptimizer(learning_rate).minimize(total_loss)
'''训练网络'''
def train_rnn(num_epochs, num_steps, state_size=4, verbose=True):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
#sess = tf_debug.LocalCLIDebugWrapperSession(sess)
training_losses = []
for idx, epoch in enumerate(gen_epochs(num_epochs, num_steps)):
training_loss = 0
| tensorflow.train.AdagradOptimizer | 856 |
from tensorflow.python.ops import data_flow_ops
labels_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='labels')
batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
control_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='control')
phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
nrof_preprocess_threads = 4
image_size = (args.image_size, args.image_size)
eval_input_queue = data_flow_ops.FIFOQueue(capacity=2000000,
dtypes=[tf.string, tf.int32, tf.int32],
shapes=[(1,), (1,), (1,)],
shared_name=None, name=None)
eval_enqueue_op = eval_input_queue.enqueue_many([image_paths_placeholder, labels_placeholder, control_placeholder], name='eval_enqueue_op')
image_batch, label_batch = facenet.create_input_pipeline(eval_input_queue, image_size, nrof_preprocess_threads, batch_size_placeholder)
| tensorflow.python.ops.data_flow_ops.FIFOQueue | 857 |
import tensorflow as tf
self.assertAllClose(masks.numpy(), expected_masks.numpy())
def test_inputs_Distances_to_centers(self):
inputs = tf.random.uniform(
[100, 8], minval=-10, maxval=10.0, dtype=tf.float32)
centers = tf.random.uniform(
| tensorflow.random.uniform | 858 |
import tensorflow as tf
for input_file in input_files:
tf.logging.info(" %s" % input_file)
validation_input_files = []
if FLAGS.validation_input_file is None and FLAGS.validation_input_dir is None:
validation_input_files = input_files
else:
if FLAGS.validation_input_file is not None:
for input_pattern in FLAGS.validation_input_file.split(","):
validation_input_files.extend(tf.gfile.Glob(input_pattern))
if FLAGS.validation_input_dir is not None:
for filename in tf.gfile.ListDirectory(FLAGS.validation_input_dir):
validation_input_files.extend(tf.gfile.Glob(os.path.join(FLAGS.validation_input_dir, filename)))
tf.logging.info("*** Input Validation Files ***")
for input_file in validation_input_files:
tf.logging.info(" %s" % input_file)
config = tf.ConfigProto()
if FLAGS.xla:
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
if FLAGS.use_hvd:
| tensorflow.gfile.ListDirectory | 859 |
from tensorflow.python.summary import summary
elif callable(clip_gradients):
gradients = clip_gradients(gradients)
elif clip_gradients is not None:
raise ValueError("Unknown type %s for clip_gradients" %
type(clip_gradients))
# Add scalar summary for loss.
if "loss" in summaries:
summary.scalar("loss", loss)
# Add histograms for variables, gradients and gradient norms.
for gradient, variable in gradients:
if isinstance(gradient, ops.IndexedSlices):
grad_values = gradient.values
else:
grad_values = gradient
| tensorflow.python.summary.summary.scalar | 860 |
from tensorflow.python.training import training as train
OPTIMIZER_CLS_NAMES = {
"Adagrad": train.AdagradOptimizer,
"Adam": train.AdamOptimizer,
"Ftrl": train.FtrlOptimizer,
"Momentum": lambda learning_rate: train.MomentumOptimizer(learning_rate, momentum=0.9), # pylint: disable=line-too-long
"RMSProp": train.RMSPropOptimizer,
"SGD": train.GradientDescentOptimizer,
}
| tensorflow.python.training.training.MomentumOptimizer | 861 |
from tensorflow.core.framework.summary_pb2 import Summary
def every_n_step_end(self, current_step, outputs):
current_time = time.time()
if self._last_reported_time is not None and self._summary_writer:
added_steps = current_step - self._last_reported_step
elapsed_time = current_time - self._last_reported_time
steps_per_sec = added_steps / elapsed_time
summary = Summary(value=[Summary.Value(tag=self._summary_tag,
simple_value=steps_per_sec)])
self._summary_writer.add_summary(summary, current_step)
self._last_reported_step = current_step
self._last_reported_time = current_time
| tensorflow.core.framework.summary_pb2.Summary.Value | 862 |
from tensorflow.contrib.framework import deprecated_args
return streaming_mean(is_correct, weights, metrics_collections,
updates_collections, name or 'accuracy')
@deprecated_args(IGNORE_MASK_DATE, IGNORE_MASK_INSTRUCTIONS, 'ignore_mask')
def streaming_precision(predictions, labels, ignore_mask=None, weights=None,
metrics_collections=None, updates_collections=None,
name=None):
| tensorflow.contrib.framework.deprecated_args | 863 |
from tensorflow.python.ops import nn_ops
return None, self._loss, sampled_words
def calculate_encoder_features(self, encoder_states, encoder_dim):
options = self.options
input_shape = tf.shape(encoder_states)
batch_size = input_shape[0]
passage_len = input_shape[1]
with variable_scope.variable_scope("attention_decoder"):
encoder_features = tf.expand_dims(encoder_states, axis=2) # now is shape [batch_size, passage_len, 1, encoder_dim]
W_h = variable_scope.get_variable("W_h", [1, 1, encoder_dim, options.attention_vec_size])
self.W_h = W_h
encoder_features = nn_ops.conv2d(encoder_features, W_h, [1, 1, 1, 1], "SAME") # [batch_size, passage_len, 1, attention_vec_size]
encoder_features = tf.reshape(encoder_features, [batch_size, passage_len, options.attention_vec_size])
return encoder_features
def decode_mode(self, word_vocab, beam_size, state_t_1, context_t_1, coverage_t_1, word_t,
encoder_states, encoder_features, passage_word_idx, passage_mask):
options = self.options
with variable_scope.variable_scope("attention_decoder"):
v = variable_scope.get_variable("v", [options.attention_vec_size])
v = tf.expand_dims(tf.expand_dims(v, axis=0), axis=0)
w_c = None
| tensorflow.python.ops.nn_ops.conv2d | 864 |
from tensorflow.contrib import slim
init = tf.global_variables_initializer()
if FLAGS.pretrained_model_path is not None:
variable_restore_op = slim.assign_from_checkpoint_fn(FLAGS.pretrained_model_path, slim.get_trainable_variables(),
ignore_missing_vars=True)
| tensorflow.contrib.slim.get_trainable_variables | 865 |
from tensorflow.contrib.eager.python import tfe
def main(_):
data_dir = os.path.join(FLAGS.dir, "data")
train_data = load_dataset(
data_dir=data_dir, url=SOURCE_TRAIN_URL, batch_size=FLAGS.batch_size)
eval_data = load_dataset(
data_dir=data_dir, url=SOURCE_TEST_URL, batch_size=FLAGS.batch_size)
model = RNNColorbot(
rnn_cell_sizes=FLAGS.rnn_cell_sizes,
label_dimension=3,
keep_prob=FLAGS.keep_probability)
optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
if FLAGS.no_gpu or tfe.num_gpus() <= 0:
print(tfe.num_gpus())
device = "/cpu:0"
else:
device = "/gpu:0"
print("Using device %s." % device)
log_dir = os.path.join(FLAGS.dir, "summaries")
tf.gfile.MakeDirs(log_dir)
train_summary_writer = tf.contrib.summary.create_file_writer(
os.path.join(log_dir, "train"), flush_millis=10000)
test_summary_writer = tf.contrib.summary.create_file_writer(
os.path.join(log_dir, "eval"), flush_millis=10000, name="eval")
with tf.device(device):
for epoch in range(FLAGS.num_epochs):
| tensorflow.contrib.eager.python.tfe.num_gpus | 866 |
import tensorflow as tf
# Use fixed archs if specified, otherwise use placeholders'
(normal_arch, reduction_arch) = self._get_fixed_cell_archs(**knobs)
normal_arch = normal_arch if not use_dynamic_arch else ph.normal_arch
reduction_arch = reduction_arch if not use_dynamic_arch else ph.reduction_arch
# Initialize steps variable
step = self._make_var('step', (), dtype=tf.int32, trainable=False, initializer=tf.initializers.constant(0))
# For train dataset, preprocess & do inference
utils.logger.log('Building model for training...')
(train_X, train_classes, train_dataset_init_op) = \
self._preprocess(ph.train_images, ph.train_classes, is_train=True, **knobs)
| tensorflow.initializers.constant | 867 |
from tensorflow.contrib.slim.python.slim.data import dataset_data_provider
def testTFRecordDataset(self):
dataset_dir = tempfile.mkdtemp(prefix=os.path.join(self.get_temp_dir(),
'tfrecord_dataset'))
height = 300
width = 280
with self.cached_session():
test_dataset = _create_tfrecord_dataset(dataset_dir)
provider = dataset_data_provider.DatasetDataProvider(test_dataset)
key, image, label = provider.get(['record_key', 'image', 'label'])
image = _resize_image(image, height, width)
with session.Session('') as sess:
with queues.QueueRunners(sess):
key, image, label = sess.run([key, image, label])
split_key = key.decode('utf-8').split(':')
self.assertEqual(2, len(split_key))
| tensorflow.contrib.slim.python.slim.data.dataset_data_provider.DatasetDataProvider | 868 |
import tensorflow as tf
# The two terms 'term1' and 'term2' which come from normalizers of the
# 1. Original policy distribution
# 2. The distribution after completing the square
sigma = tf.matrix_inverse(prec)
term1 = -0.5 * param_eta * tf.log(tf.matrix_determinant(2 * np.pi * sigma))
if self.beta == 0:
term2 = 0.5 * param_eta * tf.log(tf.matrix_determinant(2 * np.pi * param_eta * HaaInv))
| tensorflow.matrix_inverse | 869 |
from tensorflow.contrib.learn.python.learn import session_run_hook
self._last_step = run_context.session.run(self._global_step_tensor) + 1
request = {self._global_step_tensor: self._global_step_tensor}
monitor_fetches = []
for m in self._monitors:
monitor_requests = m.step_begin(self._last_step)
if monitor_requests:
if not isinstance(monitor_requests, list):
raise ValueError("Monitor.step_begin should return a list.")
monitor_fetches.extend(monitor_requests)
if monitor_fetches:
request["monitors"] = dict(
zip(monitor_fetches, [_as_graph_element(f) for f in monitor_fetches]))
return session_run_hook.SessionRunArgs(request)
def after_run(self, run_context, run_values):
result = run_values.results[
"monitors"] if "monitors" in run_values.results else {}
for m in self._monitors:
induce_stop = m.step_end(self._last_step, result)
if induce_stop:
run_context.request_stop()
for m in self._monitors:
m.post_step(self._last_step, run_context.session)
self._last_step = run_values.results[self._global_step_tensor] + 1
| tensorflow.contrib.learn.python.learn.session_run_hook.SessionRunArgs | 870 |
from tensorflow.contrib.learn.python.learn.io import data_feeder
def input_fn():
return x.create_graph()
return input_fn, None
df = data_feeder.setup_train_data_feeder(x, None,
n_classes=None,
batch_size=batch_size)
return df.input_builder, df.get_feed_dict_fn()
| tensorflow.contrib.learn.python.learn.io.data_feeder.setup_train_data_feeder | 871 |
import tensorflow as tf
print(fc1)
# now to upscale to actual image size
deconv_shape1 = image_net["pool4"].get_shape()
W_t1 = utils.weight_variable([4, 4, deconv_shape1[3].value, 278], name="W_t1")
b_t1 = utils.bias_variable([deconv_shape1[3].value], name="b_t1")
conv_t1 = utils.conv2d_transpose_strided(concat1, W_t1, b_t1, output_shape=tf.shape(image_net["pool4"]))
fuse_1 = tf.add(conv_t1, image_net["pool4"], name="fuse_1")
deconv_shape2 = image_net["pool3"].get_shape()
W_t2 = utils.weight_variable([4, 4, deconv_shape2[3].value, deconv_shape1[3].value], name="W_t2")
b_t2 = utils.bias_variable([deconv_shape2[3].value], name="b_t2")
conv_t2 = utils.conv2d_transpose_strided(fuse_1, W_t2, b_t2, output_shape=tf.shape(image_net["pool3"]))
fuse_2 = tf.add(conv_t2, image_net["pool3"], name="fuse_2")
shape = tf.shape(image)
deconv_shape3 = tf.stack([shape[0], shape[1], shape[2], 3])
W_t3 = utils.weight_variable([16, 16, 3, deconv_shape2[3].value], name="W_t3")
b_t3 = utils.bias_variable([3], name="b_t3")
conv_t3 = tf.nn.relu(utils.conv2d_transpose_strided(fuse_2, W_t3, b_t3, output_shape=deconv_shape3, stride=8))
annotation_pred = tf.argmax(conv_t3, dimension=3, name="prediction")
return tf.expand_dims(annotation_pred, dim=3), conv_t3
| tensorflow.add | 872 |
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
batch_size=None,
monitors=None,
max_steps=None):
"""See trainable.Trainable. Note: Labels must be integer class indices."""
# TODO(roumposg): Remove when deprecated monitors are removed.
hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)
self._estimator.fit(x=x,
y=y,
input_fn=input_fn,
steps=steps,
| tensorflow.contrib.learn.python.learn.monitors.replace_monitors_with_hooks | 873 |
import tensorflow as tf
msg = '`perm` must be a vector.'
if perm.shape.ndims is not None:
if perm.shape.ndims != 1:
raise ValueError(
msg[:-1] + ', saw rank: {}.'.format(perm.shape.ndims))
elif validate_args:
assertions += [tf.compat.v1.assert_rank(perm, 1, message=msg)]
perm_ = tf.get_static_value(perm)
msg = '`perm` must be a valid permutation vector.'
if perm_ is not None:
if not np.all(np.arange(np.size(perm_)) == np.sort(perm_)):
raise ValueError(msg[:-1] + ', saw: {}.'.format(perm_))
| tensorflow.compat.v1.assert_rank | 874 |
import tensorflow as tf
# Use a variable name map to set the saved tensor names
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
# Verify that the original names are not in the Saved file
save = tf.train.Saver({"v0": v0, "v1": v1})
with self.assertRaisesOpError("not found in checkpoint"):
save.restore(sess, save_path)
# Verify that the mapped names are present in the Saved file and can be
# Restored using remapped names.
with self.test_session() as sess:
v0 = tf.Variable(-1.0, name="v0")
v1 = tf.Variable(-1.0, name="v1")
with self.assertRaisesOpError("uninitialized value v0"):
sess.run(v0)
with self.assertRaisesOpError("uninitialized value v1"):
sess.run(v1)
save = tf.train.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1})
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
| tensorflow.Variable | 875 |
import tensorflow as tf
"""
# size: num_priors x num_targets
ious = iou_of(tf.expand_dims(gt_boxes, axis=0), tf.expand_dims(corner_form_priors, axis=1))
# size: num_priors
best_target_per_prior = tf.math.reduce_max(ious, axis=1)
best_target_per_prior_index = tf.math.argmax(ious, axis=1)
# size: num_targets
best_prior_per_target = tf.math.reduce_max(ious, axis=0)
best_prior_per_target_index = tf.math.argmax(ious, axis=0)
| tensorflow.math.reduce_max | 876 |
import tensorflow as tf
memory_limit = int(fraction*total_memory)
print(memory_info)
if tf.version.VERSION[0]=="2":
gpus = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(gpus[0], True)
tf.config.experimental.set_virtual_device_configuration(gpus[0],
[tf.config.experimental.VirtualDeviceConfiguration(memory_limit=memory_limit)])
else:
gpu_options = tf.GPUOptions(allow_growth=allow_growth,
per_process_gpu_memory_fraction=fraction)
config = tf.ConfigProto(gpu_options=gpu_options)
session = tf.Session(config=config)
| tensorflow.config.experimental.VirtualDeviceConfiguration | 877 |
import tensorflow as tf
if isinstance(grad, tf.IndexedSlices):
grad_values = grad.values
else:
grad_values = grad
summaries.append(tf.histogram_summary(var.op.name + ':gradient',
grad_values))
summaries.append(tf.histogram_summary(var.op.name + ':gradient_norm',
tf.global_norm([grad_values])))
| tensorflow.histogram_summary | 878 |
import tensorflow as tf
start = time()
self.sess.run([self.pi_new_params, self.vf_new_params, self.data_iter.initializer],
feed_dict={self.state: s, self.actions: a, self.rewards: r, self.advantage: adv})
while True:
try:
summary, step, _ = self.sess.run([self.summarise, self.global_step, self.train_op])
except tf.errors.OutOfRangeError:
break
print('\rTrained in %.3fs. Global step %i' % (time() - start, step+1))
return summary
class PPO_HC(PPO):
def build_anet(self, state_in, name, reuse=False):
reg = tf.contrib.layers.l2_regularizer(1e-3)
with tf.variable_scope(name, reuse=reuse):
layer_a1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg)
layer_a2 = tf.layers.dense(layer_a1, 256, tf.nn.relu, kernel_regularizer=reg)
mu = tf.layers.dense(layer_a2, self.a_dim, tf.nn.tanh, kernel_regularizer=reg)
sigma = tf.layers.dense(layer_a2, self.a_dim, tf.nn.softplus, kernel_regularizer=reg)
# sigma = tf.get_variable(name='pi_sigma', shape=self.a_dim, initializer=tf.constant_initializer(0.5))
sigma = tf.clip_by_value(sigma, 0.0, 1.0)
norm_dist = tf.distributions.Normal(loc=mu * self.a_bound, scale=sigma)
params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name)
return norm_dist, params
class PPO_LSTM(Base):
| tensorflow.contrib.layers.l2_regularizer | 879 |
from tensorflow.python.ops import math_ops
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
top_k_idx = math_ops.to_int64(top_k_idx)
weights = _mask_weights(ignore_mask, weights)
tp, tp_update = _streaming_sparse_true_positive_at_k(
| tensorflow.python.ops.math_ops.to_int64 | 880 |
from tensorflow.python.ops import array_ops
b_list = [b[i] for i in range(numTensors)]
b_grads = b_module.bspmm(a_indices, a_values, a_shape, grad, adjoint_a=True, adjoint_b=False)
bg_row=tf.shape(b_grads[0])[0]
bg_col=tf.shape(b_grads[0])[1]
b_grads = tf.reshape(b_grads, (numTensors * bg_row, bg_col))
if adj_b:
b_grads = [array_ops.transpose(b_g) for b_g in b_grads]
for t in range(numTensors):
rows = a_indices[t][:, 0]
cols = a_indices[t][:, 1]
parts_a = array_ops.gather(grad[t], rows if not adj_a else cols)
parts_b = array_ops.gather(b_list[t] if not adj_b else array_ops.transpose(b_list[t]), cols if not adj_a else rows)
a_values_grads.append(math_ops.reduce_sum(parts_a * parts_b, reduction_indices=1))
| tensorflow.python.ops.array_ops.transpose | 881 |
import tensorflow as tf
with tf.Session() as sess:
self.assertTrue(assert_tensors_equal(sess, tensor1, tensor2, 20))
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testProblemHparamsModality(self):
problem = problem_hparams.TestProblem(input_vocab_size=2,
| tensorflow.contrib.eager.run_test_in_graph_and_eager_modes | 882 |
import tensorflow as tf
# BN when training
update = 1.0 - decay
update_mu = mu.assign_sub(update * (mu - batch_mean))
update_sigma = sigma.assign_sub(update * (sigma - batch_var))
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_mu)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_sigma)
mean, var = tf.cond(self.train_flag, lambda: (batch_mean, batch_var), lambda: (mu, sigma))
bn = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-5)
tf.add_to_collection('debug_layers', bn)
return bn
| tensorflow.nn.batch_normalization | 883 |
import tensorflow as tf
"tpu_name",
None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.",
)
tf.flags.DEFINE_string(
"tpu_zone",
None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.",
)
tf.flags.DEFINE_string(
"gcp_project",
None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.",
)
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores",
8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.",
)
| tensorflow.flags.DEFINE_string | 884 |
from tensorflow.python.ops import array_ops
def get_weight_tensor(self, features):
if not self._weight_column_name:
return None
else:
return array_ops.reshape(
math_ops.to_float(features[self._weight_column_name]), shape=(-1,))
@property
def problem_type(self):
return self._problem_type
def _weighted_loss(self, loss, weight_tensor):
"""Returns cumulative weighted loss."""
unweighted_loss = array_ops.reshape(loss, shape=(-1,))
weighted_loss = math_ops.multiply(unweighted_loss,
array_ops.reshape(
weight_tensor, shape=(-1,)))
return weighted_loss
def training_loss(self, logits, target, features, name="training_loss"):
"""Returns training loss tensor for this head.
Training loss is different from the loss reported on the tensorboard as we
should respect the example weights when computing the gradient.
L = sum_{i} w_{i} * l_{i} / B
| tensorflow.python.ops.array_ops.reshape | 885 |
import tensorflow as tf
img_summary: a string tensor containing sampled input images.
"""
# Reshape to use within a convolutional neural net. Last dimension is for
# 'features' - it would be 1 one for a grayscale image, 3 for an RGB image,
# 4 for RGBA, etc.
x_image = tf.reshape(x, [-1, FLAGS.img_width, FLAGS.img_height, FLAGS.img_channels])
x_image = tf.cond(train, lambda: tf.map_fn(tf.image.random_flip_left_right, x_image), lambda: x_image)
x_image = tf.cond(train, lambda: tf.map_fn(lambda x: tf.image.random_brightness(x, 0.5), x_image), lambda: x_image)
img_summary = tf.summary.image('Input_images', x_image)
# First convolutional layer - maps one image to 32 feature maps.
with tf.variable_scope('Conv_1'):
conv1 = tf.layers.conv2d(
inputs=x_image,
filters=32,
| tensorflow.image.random_brightness | 886 |
import tensorflow as tf
end_logits = tf.squeeze(conv(tf.concat([self.enc[1], self.enc[3]],axis = -1),1, bias = False, name = "end_pointer"), -1)
self.logits = [mask_logits(start_logits, mask = self.c_mask),
mask_logits(end_logits, mask = self.c_mask)]
logits1, logits2 = [l for l in self.logits]
outer = tf.matmul(tf.expand_dims(tf.nn.softmax(logits1), axis=2),
tf.expand_dims(tf.nn.softmax(logits2), axis=1))
outer = tf.matrix_band_part(outer, 0, config.ans_limit)
self.yp1 = tf.argmax(tf.reduce_max(outer, axis=2), axis=1)
self.yp2 = tf.argmax(tf.reduce_max(outer, axis=1), axis=1)
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits1, labels=self.y1)
losses2 = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits2, labels=self.y2)
self.loss = tf.reduce_mean(losses + losses2)
if config.l2_norm is not None:
variables = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
l2_loss = tf.contrib.layers.apply_regularization(regularizer, variables)
self.loss += l2_loss
if config.decay is not None:
self.var_ema = tf.train.ExponentialMovingAverage(config.decay)
ema_op = self.var_ema.apply(tf.trainable_variables())
with tf.control_dependencies([ema_op]):
| tensorflow.nn.sparse_softmax_cross_entropy_with_logits | 887 |
import tensorflow as tf
input_partition_dims = None
num_cores_per_replica = None
if params.use_tpu:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
params.platform.tpu,
zone=params.platform.tpu_zone,
project=params.platform.gcp_project)
tpu_grpc_url = tpu_cluster_resolver.get_master()
tf.Session.reset(tpu_grpc_url)
# If the input image is transposed (from NHWC to HWCN), the partition
# dimensions also need to be transposed the same way.
def _maybe_transpose(input_partition_dims):
if input_partition_dims and params.train.transpose_input:
return [input_partition_dims[i] for i in [1, 2, 3, 0]]
else:
return input_partition_dims
| tensorflow.Session.reset | 888 |
import tensorflow as tf
flags.mark_flag_as_required("output_dir")
tf.app.run()
| tensorflow.app.run | 889 |
import tensorflow as tf
if __name__ == "__main__":
tf.autograph.set_verbosity(0)
| tensorflow.autograph.set_verbosity | 890 |
import tensorflow as tf
# ddi
ddi_dataset = dataset.batch(n_ddi_batch)
ddi_batch = ddi_dataset.make_one_shot_iterator().get_next()
# post processing
im = self.post_process(training_batch)
ddi_im = self.post_process(ddi_batch)
self.im = im
self.ddi_im = ddi_im
def data_map(self, img_path):
n_bits = config.model.data.n_bits
n_bins = 2**n_bits
rgb = tf.image.decode_png(tf.read_file(img_path), channels=3, dtype=tf.uint8)
h = config.model.data.dimensions.h
w = config.model.data.dimensions.w
c = config.model.data.dimensions.c
# rgb.set_shape([h,w,c]) # don't set because going to crop anyway
# crop for lsun 96, see realnvp and glow for specifics
rgb = tf.image.random_crop(rgb,size=[h,w,c])
# crop for patch training
crop_h = h//self.crop_factor
crop_w = w//self.crop_factor
rgb = tf.image.random_crop(rgb,size=[crop_h,crop_w,c])
| tensorflow.read_file | 891 |
from tensorflow.python.training import ftrl
metrics = classifier.fit(input_fn=_input_fn, steps=_ITERS).evaluate(
input_fn=_input_fn, steps=100)
self._assertSingleClassMetrics(metrics)
def benchmarkCustomOptimizer(self):
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_feature = feature_column.real_valued_column('feature', dimension=4)
bucketized_feature = feature_column.bucketized_column(
cont_feature, test_data.get_quantile_based_buckets(iris.data, 10))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
model_dir=tempfile.mkdtemp(),
linear_feature_columns=(bucketized_feature,),
linear_optimizer=ftrl.FtrlOptimizer(learning_rate=0.1),
dnn_feature_columns=(cont_feature,),
dnn_hidden_units=(3, 3),
dnn_optimizer=adagrad.AdagradOptimizer(learning_rate=0.1))
input_fn = test_data.iris_input_logistic_fn
metrics = classifier.fit(input_fn=input_fn, steps=_ITERS).evaluate(
input_fn=input_fn, steps=100)
self._assertSingleClassMetrics(metrics)
def benchmarkMultiClass(self):
iris = base.load_iris()
cont_feature = feature_column.real_valued_column('feature', dimension=4)
bucketized_feature = feature_column.bucketized_column(
| tensorflow.python.training.ftrl.FtrlOptimizer | 892 |
import tensorflow as tf
return (loss, i + 1)
# def sample_compute(i):
# batch1 = tf.gather(batch, tf.random.shuffle(index))
# batch2 = tf.gather(batch, tf.random.shuffle(index))
# pred1 = tf.slice(batch1, [0, 0], [num_sam, 1])
# pred2 = tf.slice(batch2, [0, 0], [num_sam, 1])
# tgt1 = tf.slice(batch1, [0, 1], [num_sam, 1])
# tgt2 = tf.slice(batch2, [0, 1], [num_sam, 1])
# loss = compute_contra_loss(pred1, pred2, tgt1, tgt2)
# print(loss)
# return loss
i = tf.constant(0)
loss = tf.constant(0.)
final_loss = tf.while_loop(lambda l, i: i < resample, sample_compute, [loss, i])[0]
# final_loss = tf.scan(sample_compute, tf.range(resample), loss)[-1]
# final_loss = tf.map_fn(fn=lambda inp: sample_compute(inp), elems= tf.range(resample), dtype=tf.float32, parallel_iterations=1)
# print('final', final_loss)
# final_loss = loss
avg_loss = tf.reduce_mean(final_loss) / divider
# p = tf.print('cur_loss', [final_loss, avg_loss])
# with tf.control_dependencies([p]):
# avg_loss = tf.identity(avg_loss)
# print(final_loss, avg_loss)
# p = tf.print('debug loss ', [final_loss, avg_loss])
# with tf.control_dependencies([p]):
# avg_loss = 1. * avg_loss
# print(avg_loss)
# exit()
| tensorflow.while_loop | 893 |
import tensorflow as tf
hooks = []
if FLAGS.use_hvd:
hooks.append(hvd.BroadcastGlobalVariablesHook(0))
if hvd.rank() == -1: #if debug, set 0
CLIDebugHook = tf_debug.LocalCLIDebugHook(ui_type='readline')
CLIDebugHook.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)
hooks.append(CLIDebugHook)
if FLAGS.profile and hvd.rank() == 0:
ProfilerHook = tf.train.ProfilerHook(save_steps=FLAGS.hooking_frequence, output_dir=FLAGS.output_dir, show_dataflow=True, show_memory=True)
hooks.append(ProfilerHook)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.estimator.Estimator(
model_fn=model_fn,
config=run_config)
if FLAGS.do_train:
tf.logging.info("***** Running training *****")
| tensorflow.train.ProfilerHook | 894 |
import tensorflow as tf
):
# data for self-attention
rep_map_dp = dropout(rep_map, keep_prob, is_train)
rep_dep_tensor_dp, _, _ = reduce_data_rep_max_len(rep_map_dp, dep_selection)
rep_head_tensor_dp, _, _ = reduce_data_rep_max_len(rep_map_dp, head_selection)
# mask generation
dep_idxs = tf.tile(tf.expand_dims(dep_org_idx, 1), [1, sl_head, 1])
head_idxs = tf.tile(tf.expand_dims(head_org_idx, 2), [1, 1, sl_dep])
if direction is None:
direct_mask = tf.not_equal(head_idxs, dep_idxs) # [bs, slh, sld]
else:
if direction == 'forward':
direct_mask = tf.greater(head_idxs, dep_idxs) # [bs, slh, sld]
else:
direct_mask = tf.less(head_idxs, dep_idxs) # [bs, slh, sld]
# [bs, slh, slh]
rep_mask_tile = tf.logical_and(tf.expand_dims(rep_dep_mask, 1), tf.expand_dims(rep_head_mask, 2))
attn_mask = tf.logical_and(direct_mask, rep_mask_tile) # [bs, slh, sld]
# tensor tile
rep_map_tile = tf.tile(tf.expand_dims(rep_dep_tensor, 1), [1, sl_head, 1, 1]) # bs,slh,sld,vec
with tf.variable_scope('attention'): # bs,sl,sl,vec
f_bias = tf.get_variable('f_bias', [ivec], tf.float32, tf.constant_initializer(0.))
dependent = linear(rep_dep_tensor_dp, ivec, False, scope='linear_dependent') # bs,sld,vec
dependent_etd = tf.expand_dims(dependent, 1) # bs,1,sld,vec
head = linear(rep_head_tensor_dp, ivec, False, scope='linear_head') # bs,slh,vec
| tensorflow.greater | 895 |
import tensorflow as tf
# Convert input R+1 tensor into a feature dictionary of one R+1 tensor
features = {TIMESERIES_COL: inputs}
return features, labels
# Create list of files that match pattern
file_list = tf.gfile.Glob(filename)
# Create dataset from file list
dataset = tf.data.TextLineDataset(file_list).map(decode_csv)
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None # indefinitely
dataset = dataset.shuffle(buffer_size = 10 * batch_size)
else:
num_epochs = 1 # end-of-input after this
dataset = dataset.repeat(num_epochs).batch(batch_size)
| tensorflow.data.TextLineDataset | 896 |
import tensorflow as tf
else:
d_checkpoints[r] += dr
def _unsparsify(x):
if not isinstance(x, tf.IndexedSlices):
return x
assert x.dense_shape is not None, "memory_saving_gradients encountered sparse gradients of unknown shape"
indices = x.indices
while indices.shape.ndims < x.values.shape.ndims:
indices = tf.expand_dims(indices, -1)
return tf.scatter_nd(indices, x.values, x.dense_shape)
# partial derivatives to xs (usually the params of the neural net)
d_xs_new = dv[len(checkpoints_other):]
for j in range(len(xs)):
if d_xs_new[j] is not None:
if d_xs[j] is None:
d_xs[j] = _unsparsify(d_xs_new[j])
else:
| tensorflow.scatter_nd | 897 |
import tensorflow as tf
config = tf.ConfigProto()
custom_op = config.graph_options.rewrite_options.custom_optimizers.add()
custom_op.name = "NpuOptimizer"
custom_op.parameter_map["use_off_line"].b = True # 在昇腾AI处理器执行训练
config.graph_options.rewrite_options.remapping = RewriterConfig.OFF # 关闭remap开关
if FLAGS.allow_mix_precision:
custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes("allow_mix_precision")
if FLAGS.auto_tune:
custom_op.parameter_map["auto_tune_mode"].s = tf.compat.as_bytes("RL,GA")
with tf.Session(config=config) as sess:
if FLAGS.restore:
print('continue training from previous checkpoint')
ckpt = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
saver.restore(sess, ckpt)
else:
sess.run(init)
if FLAGS.pretrained_model_path is not None:
variable_restore_op(sess)
data_generator = icdar.get_batch(num_workers=FLAGS.num_readers,
input_size=FLAGS.input_size,
batch_size=FLAGS.batch_size_per_gpu * len(gpus))
start = time.time()
avg_time_per_step1 = 0
performs = []
| tensorflow.train.latest_checkpoint | 898 |
import tensorflow as tf
# create localization and classification losses
losses = ssd.loss(labels, params)
tf.losses.add_loss(params['localization_loss_weight'] * losses['localization_loss'])
tf.losses.add_loss(params['classification_loss_weight'] * losses['classification_loss'])
tf.summary.scalar('regularization_loss', regularization_loss)
tf.summary.scalar('localization_loss', losses['localization_loss'])
tf.summary.scalar('classification_loss', losses['classification_loss'])
total_loss = tf.losses.get_total_loss(add_regularization_losses=True)
if mode == tf.estimator.ModeKeys.EVAL:
batch_size = features['images'].shape[0].value
assert batch_size == 1
| tensorflow.losses.get_total_loss | 899 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.