seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf def _build_net(self): # we use parameter sharing among agents with tf.variable_scope(self.name): # ------------------ all inputs ------------------------ self.S = tf.placeholder(tf.float32, [None, self.num_global_s], name='S') # input Global State self.s = tf.placeholder(tf.float32, [None, self.num_s], name='s1') # input state for agent1 self.S_ = tf.placeholder(tf.float32, [None, self.num_global_s], name='S_') # input Next Global State
tensorflow.placeholder
12,800
import tensorflow as tf monotonicity_dist = monotonicity_dist or 1.0 batch_size = tf.shape(attention_weights)[0] src_len = tf.shape(attention_weights)[2] trg_len = tf.shape(attention_weights)[1] src_indices = tf.tile(tf.reshape(tf.range(src_len), shape=[1, 1, src_len]), [batch_size, trg_len, 1]) trg_indices = tf.tile(tf.reshape(tf.range(trg_len), shape=[1, trg_len, 1]), [batch_size, 1, src_len]) source_length = encoder_input_length[0] target_length = tf.to_int32(tf.reduce_sum(trg_mask, axis=1)) true_src_len = tf.reshape(source_length, shape=[batch_size, 1, 1]) - 1 true_trg_len = tf.reshape(target_length, shape=[batch_size, 1, 1]) - 1 src_mask = tf.to_float(tf.sequence_mask(source_length, maxlen=src_len)) mask = tf.matmul(tf.expand_dims(trg_mask, axis=2), tf.expand_dims(src_mask, axis=1)) monotonous = tf.sqrt(((true_trg_len * src_indices - true_src_len * trg_indices) ** 2) / (true_trg_len**2 + true_src_len**2)) monotonous = tf.to_float(monotonous < monotonicity_dist) non_monotonous = (1 - monotonous) * mask attn_loss = tf.reduce_sum(attention_weights * tf.stop_gradient(non_monotonous)) / tf.to_float(batch_size) if monotonicity_decay: decay = tf.stop_gradient(0.5 ** (tf.to_float(global_step) / monotonicity_decay)) else: decay = 1.0 xent_loss += monotonicity_weight * decay * attn_loss
tensorflow.expand_dims
12,801
import tensorflow as tf quantize the tensor (layer's weights). This contains the weights created in the `build` function. **kwargs: Additional variables which may be passed to the quantizer. Returns: quantized tensor. """ if training: if CLUSTER_CENTROIDS in weights: if self.preserve_sparsity: weights[ORIGINAL_WEIGHTS].assign( tf.multiply(weights[ORIGINAL_WEIGHTS], weights[SPARSITY_MASK])) weights[CLUSTERING_IMPL].cluster_centroids.assign( weights[CLUSTERING_IMPL]. cluster_centroids * weights[CENTROIDS_MASK] ) weights[CLUSTER_CENTROIDS].assign( weights[CLUSTERING_IMPL].cluster_centroids ) # Insert clustering variables
tensorflow.multiply
12,802
import tensorflow as tf loss_action = tf.reduce_mean( - one_hot_labels_action * tf.log(tf.clip_by_value(self.predictions_action, 1e-10, 1.0)), name='loss' ) loss_arguments = tf.reduce_mean( - one_hot_labels_arguments * tf.log(tf.clip_by_value(self.predictions_arguments, 1e-10, 1.0)), name='loss' ) self.loss = loss_action + loss_arguments tf.scalar_summary('loss', self.loss) with tf.name_scope('accuracy'): correct_prediction_action = tf.equal( tf.argmax(one_hot_labels_action, 1), tf.argmax(self.predictions_action, 1) ) self.accuracy_action = tf.reduce_mean(tf.cast(correct_prediction_action, 'float')) tf.scalar_summary('accuracy_action', self.accuracy_action) correct_prediction_arguments = tf.equal(tf.argmax(one_hot_labels_arguments, 2), tf.argmax(self.predictions_arguments, 2)) self.accuracy_arguments = tf.reduce_mean(tf.cast(correct_prediction_arguments, 'float')) tf.scalar_summary('accuracy_arguments', self.accuracy_arguments)
tensorflow.argmax
12,803
import tensorflow as tf "output_weights", [num_labels, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.matmul(output_layer, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) probabilities = tf.nn.softmax(logits, axis=-1) log_probs = tf.nn.log_softmax(logits, axis=-1) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, logits, probabilities) def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu,
tensorflow.nn.softmax
12,804
import tensorflow as tf if not self._problem_hparams: log_warn(_no_problem_err("loss")) return (tf.constant(0., dtype=tf.float32), tf.constant(1., dtype=tf.float32))
tensorflow.constant
12,805
import tensorflow as tf alpha_mean = tf.get_variable('alpha_mean_layer'+str(h), shape=[1, 1, n_basis, n_out], initializer=tf.random_normal_initializer()) alpha_logstd = tf.get_variable('alpha_logstd_layer'+str(h), shape=[1, 1, n_basis, n_out], initializer=tf.random_normal_initializer()) alpha_std = tf.exp(alpha_logstd) # Compute epsilon from {n_samples} standard Gaussian # epsilon = tf.random_normal([n_samples, 1, n_out*2, n_out]) epsilon = tf.random_uniform([n_samples, 1, n_basis, n_out]) hyp_params = tf.get_variable('hyp_params_layer'+str(h), shape=[2], initializer=tf.random_normal_initializer()) l1, l2 = tf.nn.sigmoid(hyp_params[0]), tf.exp(hyp_params[1]) epsilon = tf.sinh(epsilon*l2)/tf.cosh(epsilon*l2)**l1/l2 # Compute A_{h+1} A = tf.tile(alpha_mean+epsilon*alpha_std, [1, tf.shape(X)[0], 1, 1]) # Compute z_{h}A_{h+1}
tensorflow.random_uniform
12,806
from tensorflow.python.ops import math_ops x = fp_rate y = recall else: # curve == 'PR'. precision = math_ops.div(tp + epsilon, tp + fp + epsilon) x = recall y = precision
tensorflow.python.ops.math_ops.div
12,807
from tensorflow.contrib import layers columns_to_tensors=features, feature_columns=feature_columns, weight_collections=[parent_scope], scope=scope) hidden_layer_partitioner = ( partitioned_variables.min_max_variable_partitioner( max_partitions=num_ps_replicas)) for layer_id, num_hidden_units in enumerate(hidden_units): with variable_scope.variable_scope( parent_scope + "/hiddenlayer_%d" % layer_id, values=[net], partitioner=hidden_layer_partitioner) as scope: net = layers.fully_connected( net, num_hidden_units, activation_fn=activation_fn, variables_collections=[parent_scope], scope=scope) if dropout is not None and mode == model_fn.ModeKeys.TRAIN: net = layers.dropout(net, keep_prob=(1.0 - dropout)) _add_hidden_layer_summary(net, scope.name) with variable_scope.variable_scope( parent_scope + "/logits", values=[net],
tensorflow.contrib.layers.fully_connected
12,808
from tensorflow.python.framework import constant_op def _train_input_fn(): features = {"x": constant_op.constant([[2.], [1.], [1.]])} label = constant_op.constant([[1], [0], [0]], dtype=dtypes.int32) return features, label def _ranking_train_input_fn(): features = { "a.f1": constant_op.constant([[3.], [0.3], [1.]]), "a.f2": constant_op.constant([[0.1], [3.], [1.]]), "b.f1": constant_op.constant([[13.], [0.4], [5.]]), "b.f2": constant_op.constant([[1.], [3.], [0.01]]), } label = constant_op.constant([[0], [0], [1]], dtype=dtypes.int32) return features, label def _eval_input_fn(): features = {"x": constant_op.constant([[1.], [2.], [2.]])} label = constant_op.constant([[0], [1], [1]], dtype=dtypes.int32) return features, label
tensorflow.python.framework.constant_op.constant
12,809
import tensorflow as tf with tf.variable_scope(name) as scope: with tf.variable_scope('bn') : self.gamma= tf.get_variable('gamma',[dims], initializer=tf.constant_initializer(1.0)) self.beta = tf.get_variable('beta',[dims], initializer=tf.constant_initializer(0.0)) self.moving_mean = tf.get_variable('moving_mean',[dims], initializer=tf.constant_initializer(0.0), trainable=False) self.moving_variance = tf.get_variable('moving_variance',[dims], initializer=tf.constant_initializer(1.0), trainable=False) self.scope = scope def __call__(self,input_var,is_training,**xargs) : with tf.variable_scope(self.scope) :
tensorflow.constant_initializer
12,810
from tensorflow.python.ops import variable_scope or if either `metrics_collections` or `updates_collections` are not a list or tuple. """ with variable_scope.variable_scope(name, 'mean', [values, weights]): total = _create_local('total_tensor', shape=values.get_shape()) count = _create_local('count_tensor', shape=values.get_shape())
tensorflow.python.ops.variable_scope.variable_scope
12,811
import tensorflow as tf by default variables is None, all variables in the graph will be saved. It is probably a good idea since the whole session must be later be restored by the ChiefSession """ os.makedirs(self._checkpoint_dir, exist_ok=True) #variables = tf.trainable_variables() self._saver = tf.train.Saver(variables, max_to_keep=max_to_keep, save_relative_paths=True) def _save_graph(self): writer = tf.summary.FileWriter(logdir=self._checkpoint_dir, # graph=self.sess.graph,
tensorflow.train.Saver
12,812
from tensorflow.python.platform import gfile self.assertTrue(gfile.Exists(s2)) s3 = save.save(sess, os.path.join(save_dir, "s3")) self.assertEqual([s2, s3], save.last_checkpoints) self.assertFalse(gfile.Exists(s1)) self.assertTrue(gfile.Exists(s2)) self.assertTrue(gfile.Exists(s3)) # Create a second helper, identical to the first. save2 = tf.train.Saver(saver_def=save.as_saver_def()) save2.set_last_checkpoints(save.last_checkpoints)
tensorflow.python.platform.gfile.Exists
12,813
import tensorflow as tf # print("the dataset is therefore properly normalised, as expected.") # # # ------------------------------------------------------ # step3: Let's get serious and build the neural network # ------------------------------------------------------ # [none, 128, 9] X = tf.placeholder(tf.float32, [None, config.n_steps, config.n_inputs]) # [none, 6] Y = tf.placeholder(tf.float32, [None, config.n_classes]) print("-------X Y----------") print(X) X = tf.reshape(X, shape=[-1, 32, 36]) print(X) print(Y) Y = tf.reshape(Y, shape=[-1, 6])
tensorflow.placeholder
12,814
import tensorflow as tf # tile x from shape (b_s * i_s) to (p_s * b_s * i_s) post_size = tf.shape(dropout_mask_phs[0])[0] x = tf.tile(tf.reshape(x, [1, tf.shape(x)[0], tf.shape(x)[1]]), [post_size, 1, 1]) # TODO: no dropout on input
tensorflow.shape
12,815
import tensorflow as tf act_limit = action_space.high[0] if nn_type == 'mlp': with tf.variable_scope('pi'): pi = act_limit * mlp(x, list(hidden_sizes) + [act_dim], activation, output_activation) with tf.variable_scope('q1'): q1 = tf.squeeze(mlp(tf.concat([x, a], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1) with tf.variable_scope('q2'): q2 = tf.squeeze(mlp(tf.concat([x, a], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1) with tf.variable_scope('q1', reuse=True): q1_pi = tf.squeeze(mlp(tf.concat([x, pi], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1) elif nn_type == 'mlp_dropout': with tf.variable_scope('pi'): pi = act_limit * mlp_dropout(x, list(hidden_sizes)+[act_dim], activation, output_activation) with tf.variable_scope('q'): q = tf.squeeze(mlp_dropout(tf.concat([x,a], axis=-1), list(hidden_sizes)+[1], activation, None, dropout_rate), axis=1) with tf.variable_scope('q', reuse=True): q_pi = tf.squeeze(mlp_dropout(tf.concat([x,pi], axis=-1), list(hidden_sizes)+[1], activation, None, dropout_rate), axis=1) elif nn_type == 'mlp_variational': with tf.variable_scope('pi'): pi_in_dim = x.shape.as_list()[1] pi_dropout_mask_generator = DropoutMaskGenerator(pi_in_dim, hidden_sizes, model_prob=1.0 - dropout_rate) pi_dropout_mask_phs = pi_dropout_mask_generator.generate_dropout_mask_placeholders() pi, pi_reg = mlp_variational(x, pi_dropout_mask_phs, list(hidden_sizes) + [act_dim], activation, output_activation, dropout_rate) pi = act_limit * pi with tf.variable_scope('q1'):
tensorflow.variable_scope
12,816
import tensorflow as tf file_name = 'heatmap_{}_{}.jpg'.format(save_image_with_heatmap.counter, ind) imsave(os.path.join(config.DEBUG_DIR, file_name), img.astype(np.uint8)) return save_image_with_heatmap.counter def get_keypoint(image, targets, predictions, heatmap_size, height, width, category, clip_at_zero=True, data_format='channels_last', name=None): predictions = tf.reshape(predictions, [1, -1, heatmap_size*heatmap_size]) pred_max = tf.reduce_max(predictions, axis=-1) pred_indices = tf.argmax(predictions, axis=-1) pred_x, pred_y = tf.cast(tf.floormod(pred_indices, heatmap_size), tf.float32), tf.cast(tf.floordiv(pred_indices, heatmap_size), tf.float32) width, height = tf.cast(width, tf.float32), tf.cast(height, tf.float32) pred_x, pred_y = pred_x * width / tf.cast(heatmap_size, tf.float32), pred_y * height / tf.cast(heatmap_size, tf.float32) if clip_at_zero: pred_x, pred_y = pred_x * tf.cast(pred_max>0, tf.float32), pred_y * tf.cast(pred_max>0, tf.float32) pred_x = pred_x * tf.cast(pred_max>0, tf.float32) + tf.cast(pred_max<=0, tf.float32) * (width / 2.) pred_y = pred_y * tf.cast(pred_max>0, tf.float32) + tf.cast(pred_max<=0, tf.float32) * (height / 2.) if config.PRED_DEBUG: pred_indices_ = tf.squeeze(pred_indices) image_ = tf.squeeze(image) * 255. pred_heatmap = tf.one_hot(pred_indices_, heatmap_size*heatmap_size, on_value=1., off_value=0., axis=-1, dtype=tf.float32) pred_heatmap = tf.reshape(pred_heatmap, [-1, heatmap_size, heatmap_size]) if data_format == 'channels_first': image_ = tf.transpose(image_, perm=(1, 2, 0)) save_image_op = tf.py_func(save_image_with_heatmap, [image_, height, width, heatmap_size,
tensorflow.cast
12,817
import tensorflow as tf feature_emb_list.append(speaker_pair_emb) tiled_genre_emb = tf.tile(tf.expand_dims(tf.expand_dims(genre_emb, 0), 0), [k, c, 1]) # [k, c, emb] feature_emb_list.append(tiled_genre_emb) if self.config["use_features"]: antecedent_distance_buckets = self.bucket_distance(top_antecedent_offsets) # [k, c] antecedent_distance_emb = tf.gather(tf.get_variable("antecedent_distance_emb", [10, self.config["feature_size"]]), antecedent_distance_buckets) # [k, c] feature_emb_list.append(antecedent_distance_emb) feature_emb = tf.concat(feature_emb_list, 2) # [k, c, emb] feature_emb = tf.nn.dropout(feature_emb, self.dropout) # [k, c, emb] target_emb = tf.expand_dims(top_span_emb, 1) # [k, 1, emb]
tensorflow.get_variable
12,818
from tensorflow.python.ops import array_ops def _loss(self, logits, target, weight_tensor): if self._n_classes < 2: loss_vec = math_ops.square(logits - math_ops.to_float(target)) elif self._n_classes == 2: loss_vec = nn.sigmoid_cross_entropy_with_logits(logits, math_ops.to_float(target)) else: loss_vec = nn.sparse_softmax_cross_entropy_with_logits( logits, array_ops.reshape(target, [-1])) if weight_tensor is None: return math_ops.reduce_mean(loss_vec, name="loss") else: loss_vec = array_ops.reshape(loss_vec, shape=(-1,)) loss_vec = math_ops.mul( loss_vec, array_ops.reshape(weight_tensor, shape=(-1,))) return math_ops.div(
tensorflow.python.ops.array_ops.reshape
12,819
import tensorflow as tf
tensorflow.math.exp
12,820
import tensorflow as tf cls_pred = tf.boolean_mask(cls_pred, tf.stop_gradient(final_mask)) location_pred = tf.boolean_mask(location_pred, tf.stop_gradient(positive_mask)) gtargets = tf.boolean_mask(gtargets, tf.stop_gradient(positive_mask)) predictions = { 'classes': tf.argmax(cls_pred, axis=-1), 'probabilities': tf.reduce_max(tf.nn.softmax(cls_pred, name='softmax_tensor'), axis=-1), 'bboxes_predict': tf.reshape(bboxes_pred, [-1, 4]) } if mode == tf.estimator.ModeKeys.PREDICT: return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions) # Calculate loss, which includes softmax cross entropy and L2 regularization. cross_entropy = tf.cond(n_positives > 0., lambda: tf.losses.sparse_softmax_cross_entropy(labels=glabels, logits=cls_pred), lambda: 0.) #cross_entropy = tf.losses.sparse_softmax_cross_entropy(labels=glabels, logits=cls_pred) # Create a tensor named cross_entropy for logging purposes. tf.identity(cross_entropy, name='cross_entropy_loss') tf.summary.scalar('cross_entropy_loss', cross_entropy)
tensorflow.estimator.EstimatorSpec
12,821
import tensorflow as tf theta: TensorLike, phi: TensorLike, name: str = "spherical_harmonics_evaluate_spherical_harmonics") -> TensorLike: # pylint: disable=line-too-long with tf.name_scope(name): degree_l = tf.convert_to_tensor(value=degree_l) order_m = tf.convert_to_tensor(value=order_m) theta = tf.convert_to_tensor(value=theta) phi = tf.convert_to_tensor(value=phi) var_type = theta.dtype sign_m = tf.math.sign(order_m) order_m = tf.abs(order_m) zeros = tf.zeros_like(order_m)
tensorflow.convert_to_tensor
12,822
import tensorflow as tf batch_size=batch_size, num_parallel_batches=num_cpu_threads, drop_remainder=True)) return d return input_fn def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.to_int32(t) example[name] = t
tensorflow.parse_single_example
12,823
import tensorflow as tf def main(self): with tf.Graph().as_default() as graph, tf.device('/cpu:0'):
tensorflow.device
12,824
import tensorflow as tf tf.placeholder(tf.float32, [None, None, 3]), fields.InputDataFields.image_additional_channels: tf.placeholder(tf.float32, [None, None, 2]), fields.InputDataFields.original_image: tf.placeholder(tf.float32, [None, None, 3]), } with self.assertRaises(ValueError): _ = inputs.pad_input_data_to_static_shapes(
tensorflow.placeholder
12,825
import tensorflow as tf with tf.compat.v1.name_scope(name, 'histogram'): x = tf.reshape(tf_utils.get_values(x), [-1]) if categorical: x_dtype = x.dtype x = x if x_dtype == tf.string else tf.strings.as_string(x) elements, counts = count_per_key(x) if x_dtype != elements.dtype: elements = tf.strings.to_number(elements, tf.int64) return counts, elements if boundaries is None: boundaries = tf.range(11, dtype=tf.float32) / 10.0 elif isinstance(boundaries, int) or (isinstance(boundaries, tf.Tensor) and boundaries.get_shape().ndims == 0): min_value, max_value = _min_and_max(x, True) boundaries = tf.linspace( tf.cast(min_value, tf.float32), tf.cast(max_value, tf.float32), tf.cast(boundaries, tf.int64)) # Shift the boundaries slightly to account for floating point errors, # and due to the fact that the rightmost boundary is essentially ignored. boundaries = tf.expand_dims(tf.cast(boundaries, tf.float32), 0) - 0.0001
tensorflow.range
12,826
import tensorflow as tf # If crop_size is None, we simply do global pooling. pool_height = tf.shape(features)[1]
tensorflow.shape
12,827
import tensorflow as tf # create localization and classification losses losses = ssd.loss(labels, params) tf.losses.add_loss(params['localization_loss_weight'] * losses['localization_loss']) tf.losses.add_loss(params['classification_loss_weight'] * losses['classification_loss']) tf.summary.scalar('regularization_loss', regularization_loss) tf.summary.scalar('localization_loss', losses['localization_loss']) tf.summary.scalar('classification_loss', losses['classification_loss']) total_loss = tf.losses.get_total_loss(add_regularization_losses=True) if mode == tf.estimator.ModeKeys.EVAL: batch_size = features['images'].shape[0].value assert batch_size == 1
tensorflow.losses.get_total_loss
12,828
import tensorflow as tf predictions: 2D tensor or array, [batch_size, num_classes] predictions of the network . labels: 2D or array tensor, [batch_size, num_classes] ground truth labels or target labels. eps: a constant to set upper or lower limit for labels, smoothening factor name: Optional scope/name for op_scope. Returns: A tensor with the log loss. """ with tf.name_scope(name): predictions.get_shape().assert_is_compatible_with(labels.get_shape()) predictions = tf.to_float(predictions) labels = tf.to_float(labels) losses = -tf.multiply(labels, tf.log(predictions + eps)) - tf.multiply( (1 - labels), tf.log(1 - predictions + eps)) return tf.losses.compute_weighted_loss(losses, weights) def kappa_loss(predictions, labels, y_pow=1, eps=1e-15, num_ratings=5, batch_size=32, name='kappa'): """Define a kappa loss, Its a continuous differentiable approximation of discrete kappa loss. Args: predictions: 2D tensor or array, [batch_size, num_classes] predictions of the network . labels: 2D tensor or array,[batch_size, num_classes] ground truth labels or target labels. y_pow: int, to whcih the labels should be raised; useful if model diverge. e.g. y_pow=2 num_ratings: numbers of rater to used, typically num_classes of the model
tensorflow.log
12,829
import tensorflow as tf self.terminals_ph = tf.placeholder(tf.float32, shape=(None, 1), name='terminals') self.rewards_ph = tf.placeholder(tf.float32, shape=(None, 1), name='rewards') self.is_demo_ph = tf.placeholder(tf.float32, shape=(None, 1), name='is_demonstrations') self.weight_ph = tf.placeholder(tf.float32, shape=(None, 1), name='importance_weight') self.actions_ph = tf.placeholder(tf.float32, shape=(None,) + self.action_space.shape, name='actions') self.learning_rate_ph = tf.placeholder(tf.float32, [], name="learning_rate_ph") if self.n_step: self.next_observations_ph_n = self.target_policy.obs_ph self.processed_next_obs_ph_n = self.target_policy.processed_obs self.rewards_ph_n = tf.placeholder(tf.float32, shape=(None, 1), name='n_step_rewards') self.terminals_ph_n = tf.placeholder(tf.float32, shape=(None, 1), name='n_step_terminals') with tf.variable_scope("model", reuse=False): # Create the policy # first return value corresponds to deterministic actions # policy_out corresponds to stochastic actions, used for training # logp_pi is the log probability of actions taken by the policy self.deterministic_action, policy_out, logp_pi = self.policy_tf.make_actor(self.processed_obs_ph) # Monitor the entropy of the policy,
tensorflow.placeholder
12,830
import tensorflow as tf start_features = tf.einsum("lbh,bl->bh", output, start_index) start_features = tf.tile(start_features[None], [seq_len, 1, 1])
tensorflow.tile
12,831
import tensorflow as tf if direction is None: direct_mask_un = tf.not_equal(unhead_idxs, undep_idxs) # [bs, sluh, sld] else: if direction == 'forward': direct_mask_un = tf.greater(unhead_idxs, undep_idxs) # [bs, sluh, sld] else: direct_mask_un = tf.less(unhead_idxs, undep_idxs) # [bs, sluh, sld] # [bs, sluh, sld] rep_mask_tile_un = tf.logical_and(tf.expand_dims(rep_dep_mask, 1), tf.expand_dims(rep_unhead_mask, 2)) pooling_mask = tf.logical_and(direct_mask_un, rep_mask_tile_un) # [bs, sluh, sld] # data for pooling pooling_data = tf.tile(tf.expand_dims(rep_dep_tensor, 1), [1, sl_unhead, 1, 1]) # bs,sluh,sld,hn # execute mean pooling based on pooling_mask[bs, sluh, sld] and pooling_data[bs,sluh,sld,hn] pooling_data = mask_for_high_rank(pooling_data, pooling_mask) # [bs,sluh,sld,hn] pooling_data_sum = tf.reduce_sum(pooling_data, -2) # [bs,sluh,hn] pooling_den = tf.reduce_sum(tf.cast(pooling_mask, tf.int32), -1, keep_dims=True) # [bs,sluh] pooling_den = tf.where(tf.equal(pooling_den, 0), tf.ones_like(pooling_den), pooling_den)
tensorflow.logical_and
12,832
import tensorflow as tf return out_img def build_discriminator(self,image,reuse=False,name='discriminator'): with tf.variable_scope(name): if reuse: tf.get_variable_scope().reuse_variables() else: assert tf.get_variable_scope().reuse is False def lrelu(x, alpha,name='lrelu'): with tf.variable_scope(name): if reuse: tf.get_variable_scope().reuse_variables() else: assert tf.get_variable_scope().reuse is False return tf.nn.relu(x) - alpha * tf.nn.relu(-x) def instance_norm(x,name='instance_norm'): with tf.variable_scope(name): if reuse: tf.get_variable_scope().reuse_variables() else: assert tf.get_variable_scope().reuse is False
tensorflow.get_variable_scope
12,833
import tensorflow as tf Ref.: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow Args: inputs: Tensor, k-D input ... x C could be BC or BHWC or BDHWC is_training: boolean tf.Varialbe, true indicates training phase scope: string, variable scope moments_dims: a list of ints, indicating dimensions for moments calculation bn_decay: float or float tensor variable, controling moving average weight Return: normed: batch-normalized maps """ with tf.variable_scope(scope) as sc: num_channels = inputs.get_shape()[-1].value beta = tf.Variable(tf.constant(0.0, shape=[num_channels]), name='beta', trainable=True) gamma = tf.Variable(tf.constant(1.0, shape=[num_channels]), name='gamma', trainable=True) batch_mean, batch_var = tf.nn.moments(inputs, moments_dims, name='moments') decay = bn_decay if bn_decay is not None else 0.9 ema = tf.train.ExponentialMovingAverage(decay=decay) # Operator that maintains moving averages of variables. ema_apply_op = tf.cond(is_training, lambda: ema.apply([batch_mean, batch_var]), lambda: tf.no_op()) # Update moving average and return current batch's avg and var.
tensorflow.constant
12,834
import tensorflow as tf tf.lookup.KeyValueTensorInitializer(vocab, table_values), num_oov_buckets=1) def to_ids(example): sentence = tf.reshape(example['tokens'], shape=[1]) words = tf.strings.split(sentence, sep=' ').values truncated_words = words[:max_seq_len] tokens = table.lookup(truncated_words) + 1 tokens = tf.cond( tf.less(tf.size(tokens), max_seq_len), lambda: tf.concat([tokens, [eos]], 0), lambda: tokens) return tf.concat([[bos], tokens], 0) return to_ids def batch_and_split(dataset, max_seq_len, batch_size): return dataset.padded_batch( batch_size, padded_shapes=[max_seq_len + 1]).map( split_input_target, num_parallel_calls=tf.data.experimental.AUTOTUNE) def get_special_tokens(vocab_size):
tensorflow.concat
12,835
import tensorflow as tf with tf.variable_scope("attention"): inputs_ = tf.nn.relu( dense(d_inputs, hidden, use_bias=False, scope="inputs")) memory_ = tf.nn.relu( dense(d_memory, hidden, use_bias=False, scope="memory")) outputs = tf.matmul(inputs_, tf.transpose( memory_, [0, 2, 1])) / (hidden ** 0.5) mask = tf.tile(tf.expand_dims(mask, axis=1), [1, JX, 1]) logits = tf.nn.softmax(softmax_mask(outputs, mask)) outputs = tf.matmul(logits, memory) res = tf.concat([inputs, outputs], axis=2) with tf.variable_scope("gate"): dim = res.get_shape().as_list()[-1] d_res = dropout(res, keep_prob=keep_prob, is_train=is_train) gate = tf.nn.sigmoid(dense(d_res, dim, use_bias=False)) return res * gate def dense(inputs, hidden, use_bias=True, scope="dense"): with tf.variable_scope(scope): shape = tf.shape(inputs) dim = inputs.get_shape().as_list()[-1] out_shape = [shape[idx] for idx in range( len(inputs.get_shape().as_list()) - 1)] + [hidden]
tensorflow.variable_scope
12,836
import tensorflow as tf import sys import getch import model_interpreter as interpreter import network_utils as nut import math from tensorflow.contrib.tensorboard.plugins import projector from Bunch import Bunch tf.app.flags.DEFINE_string('input_path', '../data/tmp/grid03.14.c.tar.gz', 'input folder') tf.app.flags.DEFINE_string('input_name', '', 'input folder') tf.app.flags.DEFINE_string('test_path', '', 'test set folder') tf.app.flags.DEFINE_string('net', 'f100-f3', 'model configuration') tf.app.flags.DEFINE_string('model', 'noise', 'Type of the model to use: Autoencoder (ae)' 'WhatWhereAe (ww) U-netAe (u)') tf.app.flags.DEFINE_string('postfix', '', 'Postfix for the training folder') tf.app.flags.DEFINE_float('alpha', 10, 'Predictive reconstruction loss weight') tf.app.flags.DEFINE_float('beta', 0.0005, 'Reconstruction from noisy data loss weight') tf.app.flags.DEFINE_float('epsilon', 0.000001, 'Diameter of epsilon sphere comparing to distance to a neighbour. <= 0.5') tf.app.flags.DEFINE_float('gamma', 50., 'Loss weight for large distances') tf.app.flags.DEFINE_float('distance', 0.01, 'Maximum allowed interpoint distance') tf.app.flags.DEFINE_float('delta', 1., 'Loss weight for stacked objective') tf.app.flags.DEFINE_string('comment', '', 'Comment to leave by the model')
tensorflow.app.flags.DEFINE_string
12,837
import tensorflow as tf # resize y-z squeeze_b_x = tf.reshape(input_tensor, [-1, y_size, z_size, c_size], name='reshape_bx') resize_b_x = tf.compat.v1.image.resize_bilinear(squeeze_b_x, [y_size_new, z_size_new], align_corners=align) resume_b_x = tf.reshape(resize_b_x, [-1, x_size, y_size_new, z_size_new, c_size], name='resume_bx') # Reorient reoriented = tf.transpose(resume_b_x, [0, 3, 2, 1, 4]) # squeeze and 2d resize squeeze_b_z = tf.reshape(reoriented, [-1, y_size_new, x_size, c_size], name='reshape_bz') resize_b_z = tf.compat.v1.image.resize_bilinear(squeeze_b_z, [y_size_new, x_size_new], align_corners=align) resume_b_z = tf.reshape(resize_b_z, [-1, z_size_new, y_size_new, x_size_new, c_size], name='resume_bz') output_tensor = tf.transpose(resume_b_z, [0, 3, 2, 1, 4]) return output_tensor def conv3d(x, kernel_size, filters, padding='SYMMETRIC', activation=None, initialization=None, use_bias=True): """ Based on: https://github.com/gitlimlab/CycleGAN-Tensorflow/blob/master/ops.py For tf padding, refer to: https://www.tensorflow.org/api_docs/python/tf/pad
tensorflow.compat.v1.image.resize_bilinear
12,838
from tensorflow.python.framework import ops output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name) output_shape.assert_is_fully_defined() filter_height = int(filter_shape[0]) filter_width = int(filter_shape[1]) filter_in_depth = int(filter_shape[2]) filter_out_depth = int(filter_shape[3]) return ops.OpStats("weight_parameters", (filter_height * filter_width * filter_in_depth * filter_out_depth)) @ops.RegisterStatistics("BiasAdd", "flops") def _calc_bias_add_flops(graph, node):
tensorflow.python.framework.ops.OpStats
12,839
import tensorflow as tf use_bias=False, name='hyper_b_final_l1') hyper_b_final = tf.layers.dense(inputs=hyper_b_final_l1, units=1, activation=None, use_bias=False, name='hyper_b_final') # First layer w1 = tf.abs(tf.matmul(state, hyper_w_1)) b1 = tf.matmul(state, hyper_b_1) w1_reshaped = tf.reshape(w1, [-1, n_agents, n_h_mixer]) # reshape into batch of matrices b1_reshaped = tf.reshape(b1, [-1, 1, n_h_mixer]) # [batch, 1, n_h_mixer] hidden = tf.nn.elu(tf.matmul(agent_qs_reshaped, w1_reshaped) + b1_reshaped) # Second layer w_final = tf.abs(tf.matmul(state, hyper_w_final)) w_final_reshaped = tf.reshape(w_final, [-1, n_h_mixer, 1]) # reshape into batch of matrices b_final_reshaped = tf.reshape(hyper_b_final, [-1, 1, 1])
tensorflow.reshape
12,840
import tensorflow as tf transpose_b=True, name="2") + self.b_rec)\ + np.sqrt(2.0 * self.alpha * self.rec_noise * self.rec_noise)\ * tf.random_normal(state.get_shape(), mean=0.0, stddev=1.0) else: new_state = ((1-self.alpha) * state) \ + self.alpha * ( tf.matmul( tf.nn.relu(state), self.W_rec * self.rec_Connectivity, transpose_b=True, name="1") + tf.matmul( rnn_in, self.W_in * self.input_Connectivity, transpose_b=True, name="2") + self.b_rec)\ + np.sqrt(2.0 * self.alpha * self.rec_noise * self.rec_noise)\ * tf.random_normal(state.get_shape(), mean=0.0, stddev=1.0) return new_state def rnn_output(self, new_state): if self.dale_ratio:
tensorflow.matmul
12,841
import tensorflow as tf with tf.control_dependencies([variables_averages_op, apply_gradient_op, batch_norm_updates_op]): train_op = tf.no_op(name='train_op') saver = tf.train.Saver(tf.global_variables()) summary_writer = tf.summary.FileWriter(FLAGS.checkpoint_path, tf.get_default_graph())
tensorflow.global_variables
12,842
import tensorflow as tf for output in model_options.outputs_to_num_classes } for i, image_scale in enumerate(eval_scales): with tf.variable_scope(tf.get_variable_scope(), reuse=True if i else None): outputs_to_scales_to_logits = multi_scale_logits( images, model_options=model_options, image_pyramid=[image_scale], is_training=False, fine_tune_batch_norm=False) if add_flipped_images: with tf.variable_scope(tf.get_variable_scope(), reuse=True): outputs_to_scales_to_logits_reversed = multi_scale_logits( tf.reverse_v2(images, [2]), model_options=model_options, image_pyramid=[image_scale], is_training=False, fine_tune_batch_norm=False) for output in sorted(outputs_to_scales_to_logits): scales_to_logits = outputs_to_scales_to_logits[output] logits = tf.image.resize_bilinear( scales_to_logits[_MERGED_LOGITS_SCOPE], tf.shape(images)[1:3], align_corners=True) outputs_to_predictions[output].append( tf.expand_dims(tf.nn.softmax(logits), 4))
tensorflow.reverse_v2
12,843
import tensorflow as tf def Decode(self, ids): txt = tf.strings.reduce_join(self._TokenToString(ids)) txt = tf.strings.regex_replace(txt, BOW_STR, ' ') # Note that this strips spaces from the end of the input as well. # We assume no inputs rely on the existence of trailing whitespace.
tensorflow.strings.regex_replace
12,844
import tensorflow as tf def testRelPath(self): train_dir = "train" model = os.path.join(train_dir, "model-0") # model_checkpoint_path should have no "train" directory part. new_rel_path = "model-0" ckpt = tf.train.generate_checkpoint_state_proto(train_dir, model) self.assertEqual(ckpt.model_checkpoint_path, new_rel_path) self.assertEqual(len(ckpt.all_model_checkpoint_paths), 1) self.assertEqual(ckpt.all_model_checkpoint_paths[-1], new_rel_path) def testAllModelCheckpointPaths(self): save_dir = self._TestDir("all_models_test") abs_path = os.path.join(save_dir, "model-0") for paths in [None, [], ["model-2"]]: ckpt = tf.train.generate_checkpoint_state_proto( save_dir, abs_path, all_model_checkpoint_paths=paths) self.assertEqual(ckpt.model_checkpoint_path, abs_path) self.assertTrue(os.path.isabs(ckpt.model_checkpoint_path)) self.assertEqual( len(ckpt.all_model_checkpoint_paths), len(paths) if paths else 1) self.assertEqual(ckpt.all_model_checkpoint_paths[-1], abs_path) def testUpdateCheckpointState(self): save_dir = self._TestDir("update_checkpoint_state") os.chdir(save_dir) # Make a temporary train directory.
tensorflow.train.generate_checkpoint_state_proto
12,845
import tensorflow as tf normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, epsilon) else: # need broadcasting target_shape = [] for axis in range(get_ndim(x)): if axis in reduction_axes: target_shape.append(1) else: target_shape.append(tf.shape(x)[axis]) target_shape = stack(target_shape) broadcast_mean = tf.reshape(mean, target_shape) broadcast_var = tf.reshape(var, target_shape) broadcast_gamma = tf.reshape(gamma, target_shape) broadcast_beta = tf.reshape(beta, target_shape) normed = tf.nn.batch_normalization(x, broadcast_mean, broadcast_var, broadcast_beta, broadcast_gamma, epsilon) return normed, mean, var def ones(shape, dtype=None, name=None): """Instantiates an all-ones tensor variable and returns it. Parameters ---------- shape: Tuple of integers, shape of returned Keras variable. dtype: Tensorflow dtype name: String, name of returned Keras variable. Returns
tensorflow.nn.batch_normalization
12,846
import tensorflow as tf entropy = loss_entropy + rl_entropy log_prob = loss_log_prob + log_prob train_op, _, _ = meta_train_op(acc, entropy, log_prob, rl_scope, params) return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op) def rl_label_weights(name=None): """Returns the weight for importance."""
tensorflow.estimator.EstimatorSpec
12,847
import tensorflow as tf except Exception: pred_norm = pred_ / \ (eps + tf.reshape(tf.reduce_sum(pred_, 1), [batch_size, 1])) hist_rater_a = tf.reduce_sum(pred_norm, 0) hist_rater_b = tf.reduce_sum(labels, 0) conf_mat = tf.matmul(tf.transpose(pred_norm), labels) nom = tf.reduce_sum(weights * conf_mat) denom = tf.reduce_sum(weights * tf.matmul( tf.reshape(hist_rater_a, [num_ratings, 1]), tf.reshape(hist_rater_b, [1, num_ratings])) / tf.to_float(batch_size))
tensorflow.transpose
12,848
import tensorflow as tf weights = tf.to_float(tf.one_hot(tf.to_int32(tf.squeeze(pos, axis=1)), depth=attn_length)) weighted_average = tf.reduce_sum(tf.expand_dims(weights, axis=2) * hidden_states, axis=1) else: # Local attention of Luong et al. (http://arxiv.org/abs/1508.04025) wp = get_variable('Wp', [state_size, state_size]) vp = get_variable('vp', [state_size, 1]) pos = tf.nn.sigmoid(tf.matmul(tf.nn.tanh(tf.matmul(state, wp)), vp)) pos = tf.floor(encoder_input_length * pos) pos = tf.reshape(pos, [-1, 1]) pos = tf.minimum(pos, encoder_input_length - 1) idx = tf.tile(tf.to_float(tf.range(attn_length)), tf.stack([batch_size])) idx = tf.reshape(idx, [-1, attn_length]) low = pos - encoder.attn_window_size high = pos + encoder.attn_window_size
tensorflow.reshape
12,849
import tensorflow as tf compute_pdf = lambda x: pareto.prob(x) # pylint:disable=unnecessary-lambda self.assertAlmostEqual(self.compute_gradients( compute_pdf, args=[x])[0], 0.) def testParetoCDFGradientZeroOutsideSupport(self): scale = tf.constant(1.) concentration = tf.constant(3.) # Check the gradient on the undefined portion. x = scale - 1
tensorflow.constant
12,850
import tensorflow as tf will have the same type as `x`. If `x` is integral, the output is cast to float32. NaNs and infinite input values are ignored. Raises: TypeError: If the type of `x` is not supported. """ with tf.compat.v1.name_scope(name, 'var'): return _mean_and_var(x, reduce_instance_dims, output_dtype)[1] def _mean_and_var(x: common_types.TensorType, reduce_instance_dims: bool = True,
tensorflow.compat.v1.name_scope
12,851
import tensorflow as tf # mean all elements of all pixels in all batch reduction=tf.losses.Reduction.MEAN))# SUM, SUM_OVER_BATCH_SIZE, default mean by all elements mse_loss = tf.multiply(params['mse_weight'], tf.add_n(mse_loss_list), name='mse_loss') tf.summary.scalar('mse', mse_loss) tf.losses.add_loss(mse_loss) # bce_loss_list = []
tensorflow.summary.scalar
12,852
import tensorflow as tf z_projs_real, z_projs_fake, z_aug_projs_real, z_aug_projs_fake = tf.split(z_projs, 4) self.d_loss, _, _, self.g_loss = loss_lib.get_losses( d_real=d_real, d_fake=d_fake, d_real_logits=d_real_logits, d_fake_logits=d_fake_logits) penalty_loss = penalty_lib.get_penalty_loss( x=images, x_fake=generated, y=y, is_training=is_training, discriminator=self.discriminator, architecture=self._architecture) self.d_loss += self._lambda * penalty_loss z_projs = tf.concat([z_projs_real, z_projs_fake], 0) z_aug_projs = tf.concat([z_aug_projs_real, z_aug_projs_fake], 0) sims_logits = tf.matmul(z_projs, z_aug_projs, transpose_b=True) logits_max = tf.reduce_max(sims_logits,1) sims_logits = sims_logits - tf.reshape(logits_max, [-1, 1]) sims_probs = tf.nn.softmax(sims_logits) sim_labels = tf.constant(np.arange(bs * 2, dtype=np.int32)) sims_onehot = tf.one_hot(sim_labels, bs * 2) c_real_loss = - tf.reduce_mean( tf.reduce_sum(sims_onehot * tf.log(sims_probs + 1e-10), 1)) self.d_loss += c_real_loss * self._weight_contrastive_loss_d self._tpu_summary.scalar("loss/c_real_loss", c_real_loss)
tensorflow.matmul
12,853
import tensorflow as tf output_bias = tf.get_variable( "output_bias", shape=[bert_config.vocab_size], initializer=tf.zeros_initializer(), ) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) label_ids = tf.reshape(label_ids, [-1]) label_weights = tf.reshape(label_weights, [-1]) one_hot_labels = tf.one_hot( label_ids, depth=bert_config.vocab_size, dtype=tf.float32 ) # The `positions` tensor might be zero-padded (if the sequence is too # short to have the maximum number of predictions). The `label_weights` # tensor has a value of 1.0 for every real prediction and 0.0 for the # padding predictions. per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1]) numerator = tf.reduce_sum(label_weights * per_example_loss) denominator = tf.reduce_sum(label_weights) + 1e-5 loss = numerator / denominator
tensorflow.one_hot
12,854
import tensorflow as tf use_nesterov=True ) with tf.variable_scope('src'): return optimizer.minimize(loss, global_step), src_learning_rate
tensorflow.variable_scope
12,855
import tensorflow as tf "Mean squared error loss" loss=tf.reduce_mean(tf.square(tf.reshape(predictions,[-1])-tf.reshape(yy,[-1]))) "Adding regularization" if lambda_l2_reg > 0 : cell_l2 = tf.reduce_sum([tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables() if not ("noreg" in tf_var.name or "Bias" in tf_var.name)]) Predict_l2 = tf.nn.l2_loss(W) #+ tf.nn.l2_loss(b) total_loss = tf.reduce_sum(loss + lambda_l2_reg* tf.reduce_sum(cell_l2+Predict_l2) ) else: total_loss = loss "Define the train_step"
tensorflow.nn.l2_loss
12,856
import tensorflow as tf weights_initializer=tf.truncated_normal_initializer(stddev=0.01), reuse=reuse): with tf.variable_scope(_LOGITS_SCOPE_NAME, _LOGITS_SCOPE_NAME, [features]): branch_logits = []
tensorflow.variable_scope
12,857
import tensorflow as tf output_projection=(w, b)) targets = [dec_inp[i+1] for i in range(len(dec_inp) - 1)] + [0] def SampledLoss(labels, inputs): labels = tf.reshape(labels, [-1, 1]) return tf.nn.sampled_softmax_loss(w_t, b, inputs, labels, 8, classes) return tf.nn.seq2seq.model_with_buckets( enc_inp, dec_inp, targets, weights, buckets, GRUSeq2Seq, softmax_loss_function=SampledLoss) # Now we construct the copy model.
tensorflow.nn.seq2seq.model_with_buckets
12,858
import tensorflow as tf def simple_block_attention( rep_tensor, rep_mask, block_len=5, scope=None, direction=None, keep_prob=1., is_train=None, wd=0., activation='elu', hn=None): assert direction is not None def scaled_tanh(x, scale=5.): return scale * tf.nn.tanh(1. / scale * x) bs, sl, vec = tf.shape(rep_tensor)[0], tf.shape(rep_tensor)[1], tf.shape(rep_tensor)[2] ivec = hn or rep_tensor.get_shape().as_list()[2] input_dim = rep_tensor.get_shape().as_list()[2] with tf.variable_scope(scope or 'block_simple'): # @1. split sequence with tf.variable_scope('split_seq'): block_num = tf.cast(tf.ceil(tf.divide(tf.cast(sl, tf.float32), tf.cast(block_len, tf.float32))), tf.int32) comp_len = block_num * block_len - sl rep_tensor_comp = tf.concat([rep_tensor, tf.zeros([bs, comp_len, input_dim], tf.float32)], 1) rep_mask_comp = tf.concat([rep_mask, tf.cast(tf.zeros([bs, comp_len], tf.int32), tf.bool)], 1) rep_tensor_split = tf.reshape(rep_tensor_comp, [bs, block_num, block_len, input_dim]) # bs,bn,bl,d rep_mask_split = tf.reshape(rep_mask_comp, [bs, block_num, block_len]) # bs,bn,bl # non-linear rep_map = bn_dense_layer(rep_tensor_split, ivec, True, 0., 'bn_dense_map', activation, False, wd, keep_prob, is_train) # bs,bn,bl,vec rep_map_tile = tf.tile(tf.expand_dims(rep_map, 2), [1, 1, block_len, 1, 1]) # bs,bn,bl,bl,vec
tensorflow.variable_scope
12,859
import tensorflow as tf from __future__ import print_function import tensorflow as tf from tensorflow.python.platform import tf_logging as logging import numpy as np import matplotlib.pyplot as plt from timeit import default_timer as timer import time import os tf.logging.set_verbosity(tf.logging.INFO) project_dn = os.path.expanduser("~/projects/apcocsm/") # project_dn = "/home/pbos/apcocsm/" m0_num = 5.291 argpar_num = -20.0 constraint = {}
tensorflow.logging.set_verbosity
12,860
import tensorflow as tf 'For large datasets it is better to prepare datasets manually!', dataset_name, data_dir) if dataset_name.startswith('t2t_'): # Download and run dataset generator for T2T problem. data_dir = os.path.join(data_dir, dataset_name) tf.io.gfile.makedirs(data_dir) tf.io.gfile.makedirs(dl_dir) t2t_problems().problem(dataset_name[len('t2t_'):]).generate_data( data_dir, dl_dir) else: # Download and prepare TFDS dataset. tfds_builder = tfds.builder(dataset_name)
tensorflow.io.gfile.makedirs
12,861
import tensorflow as tf def output_tensor_infos(self) -> List[analyzer_nodes.TensorInfo]: # The output is (mean, var). if self._compute_variance and not self._compute_weighted: return [ analyzer_nodes.TensorInfo( tf.as_dtype(self._output_numpy_dtype), self._output_shape, None) ] * 2 else: return [ analyzer_nodes.TensorInfo( tf.as_dtype(np.int64), self._output_shape, None), analyzer_nodes.TensorInfo( tf.as_dtype(self._output_numpy_dtype), self._output_shape, None), analyzer_nodes.TensorInfo( tf.as_dtype(self._output_numpy_dtype), self._output_shape, None), analyzer_nodes.TensorInfo( tf.as_dtype(self._output_numpy_dtype), self._output_shape, None) ] def _combine_mean_and_var_accumulators( self, a: _WeightedMeanAndVarAccumulator, b: _WeightedMeanAndVarAccumulator) -> _WeightedMeanAndVarAccumulator: """Combines two mean and var accumulators. Args: a: A _WeightedMeanAndVarAccumulator. b: A _WeightedMeanAndVarAccumulator.
tensorflow.as_dtype
12,862
import tensorflow as tf self, 'kappa_%s' % layer, tf.get_variable( name='%s_kappa' % self.layer_name, dtype=self.dtype, initializer=initialization.xavier_initializer( shape=bias_shape, dtype=self.dtype, uniform=self.normal_initializer, mask=None))) if self.lesion_omega: setattr( self, 'omega_%s' % layer, tf.constant(0.)) else: setattr( self, 'omega_%s' % layer, tf.get_variable( name='%s_omega' % self.layer_name, dtype=self.dtype, initializer=initialization.xavier_initializer( shape=bias_shape, dtype=self.dtype, uniform=self.normal_initializer, mask=None))) else:
tensorflow.constant
12,863
import tensorflow as tf use_one_hot_embeddings=FLAGS.use_tpu) # If TPU is not available, this will fall back to normal Estimator on CPU # or GPU. estimator = tf.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config,
tensorflow.contrib.tpu.TPUEstimator
12,864
import tensorflow as tf boxlist1: Nx4 floatbox boxlist2: Mx4 Returns: a tensor with shape [N, M] representing pairwise iou scores. """ intersections = pairwise_intersection(boxlist1, boxlist2) areas1 = area(boxlist1) areas2 = area(boxlist2) unions = ( tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections) return tf.where( tf.equal(intersections, 0.0), tf.zeros_like(intersections), tf.truediv(intersections, unions)) @under_name_scope() def pairwise_iou_batch(proposal_boxes, gt_boxes, orig_gt_counts, batch_size): """Computes pairwise intersection-over-union between box collections. Args: proposal_boxes: K x 5 (batch_index, x1, y1, x2, y2) gt_boxes: BS x MaxNumGTs x 4 orig_gt_counts: BS Returns: list of length BS, each element is output of pairwise_iou: N x M
tensorflow.zeros_like
12,865
import tensorflow as tf if not self._problem_hparams: log_warn("Without a Problem, T2TModel.top is a passthrough.") return body_output target_modality = self._problem_hparams.target_modality with tf.variable_scope(target_modality.name): log_info("Transforming body output with %s.top", target_modality.name) last_only = ( target_modality.top_is_pointwise and self.hparams.mode == tf.estimator.ModeKeys.PREDICT and
tensorflow.variable_scope
12,866
import tensorflow as tf 矩阵运算后再转换回来[batch_size, num_steps, num_classes]''' logits = tf.reshape(tf.matmul(tf.reshape(rnn_outputs, [-1, state_size]), W) +b, \ shape=[batch_size, num_steps, num_classes]) predictions = tf.nn.softmax(logits) y_as_list = tf.unstack(y, num=num_steps, axis=1) losses = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,logits=logits) total_loss = tf.reduce_mean(losses) train_step = tf.train.AdagradOptimizer(learning_rate).minimize(total_loss) '''训练网络'''
tensorflow.nn.sparse_softmax_cross_entropy_with_logits
12,867
import tensorflow as tf print(sess.run(tf.floordiv(13, 4))) print(sess.run(tf.mod(13.2, 4))) print(sess.run(tf.cross([1, 0, 0], [0, 1, 0]))) print(sess.run(tf.square([1, 2, 3]))) def custom_polynomial(local_tf, value): return local_tf.subtract(3 * local_tf.square(value), value) + 10 print((sess.run(custom_polynomial(tf, 11)))) alpha = 0.1 val = tf.constant([[2, 3], [1, 4]], dtype=tf.float32) l1 = tf.contrib.layers.l1_regularizer(alpha)(val) l2 = tf.contrib.layers.l2_regularizer(alpha)(val) A = [[0.8, 0.6, 0.3], [0.1, 0.6, 0.4]] B = [1, 1] top_k = tf.nn.top_k(A, 2) in_top_k = tf.nn.in_top_k(A, B, 1) sess.run(tf.global_variables_initializer()) print(f'\nl1={sess.run(l1)} l2={sess.run(l2)}') a = np.array([1, 2, 3], dtype=np.float32)
tensorflow.constant
12,868
import tensorflow as tf def _SafeNegEntropy(probs, batch_size, eps=0.0001): """Computes negative entropy in a way that will not overflow.""" adjusted_probs = tf.clip_by_value(probs, eps, 1.0 - eps) entropy = tf.mul(probs, tf.log(adjusted_probs)) return tf.reduce_sum(entropy) / batch_size
tensorflow.log
12,869
from tensorflow.python.ops import math_ops """ predictions, labels = tensor_util.remove_squeezable_dimensions( predictions, labels) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) radial_diffs = math_ops.mul(predictions, labels) radial_diffs = math_ops.reduce_sum(radial_diffs, reduction_indices=[dim,], keep_dims=True) mean_distance, update_op = streaming_mean(radial_diffs, weights, None, None, name or 'mean_cosine_distance') mean_distance = math_ops.sub(1.0, mean_distance) update_op = math_ops.sub(1.0, update_op) if metrics_collections: ops.add_to_collections(metrics_collections, mean_distance) if updates_collections: ops.add_to_collections(updates_collections, update_op) return mean_distance, update_op
tensorflow.python.ops.math_ops.sub
12,870
import tensorflow as tf element=[ys,x_coori] xs = tf.map_fn(loop_synthesis, element, dtype=tf.float32, parallel_iterations=1, back_prop=False)
tensorflow.map_fn
12,871
from tensorflow.python.training import summary_io def set_estimator(self, estimator): super(SummarySaver, self).set_estimator(estimator) # TODO(mdan): This line looks redundant. if self._summary_writer is None: self._summary_writer = summary_io.SummaryWriter(estimator.model_dir) def every_n_step_begin(self, step): super(SummarySaver, self).every_n_step_begin(step)
tensorflow.python.training.summary_io.SummaryWriter
12,872
import tensorflow as tf use_aux_head = knobs['use_aux_head'] # Whether to use auxiliary head stem_ch = initial_block_ch * stem_ch_mul # Layers with reduction cells (otherwise, normal cells) reduction_layers = [L // 3, L // 3 * 2 + 1] # Layers with auxiliary heads # Aux heads speed up training of good feature repsentations early in the network # Add aux heads only if enabled and downsampling width can happen 3 times aux_head_layers = [] if use_aux_head and w % (2 << 3) == 0: aux_head_layers.append(reduction_layers[-1] + 1) with tf.variable_scope('model', reuse=(not is_train)): # "Stem" convolution layer (layer -1) with tf.variable_scope('layer_stem'): X = self._do_conv(X, w, h, in_ch, stem_ch, filter_size=3, no_relu=True, is_train=is_train) # 3x3 convolution stem = (X, w, h, stem_ch) # Core layers of cells block_ch = initial_block_ch aux_logits_list = [] # Stores list of logits from aux heads layers = [stem, stem] # Stores previous layers. layers[i] = (<layer (i + 1)>, <width>, <height>, <channels>) for l in range(L + 2): utils.logger.log('Building layer {}...'.format(l))
tensorflow.variable_scope
12,873
import tensorflow as tf session.run(output) with self.assertRaisesRegexp(tf.errors.InvalidArgumentError, 'Output shape must have a batch dimension'): coord.join() def test_output_must_have_same_batch_dimension_size_as_input(self): with self.test_session() as session: @dynamic_batching.batch_fn def f(_): return tf.constant([1, 2, 3, 4]) output = f(tf.constant([1])) coord = tf.train.Coordinator() tf.train.start_queue_runners(coord=coord) with self.assertRaises(tf.errors.CancelledError): session.run(output) with self.assertRaisesRegexp( tf.errors.InvalidArgumentError, 'Output shape must have the same batch dimension as the input batch ' 'size. Expected: 1 Observed: 4'): coord.join() def test_get_inputs_cancelled(self): with tf.Graph().as_default(): @dynamic_batching.batch_fn
tensorflow.train.start_queue_runners
12,874
import tensorflow as tf def contra_step_lossV3(pred, tgt, margin=1.0): # Step-wise contrastive loss pred1, pred2 = tf.split(pred, 2, axis=0) tgt1, tgt2 = tf.split(tgt, 2, axis=0) geq = tf.cast((tgt1 - tgt2) > 0, tf.bool) tgt_larg = tf.where(geq, tgt1, tgt2) tgt_small = tf.where(geq, tgt2, tgt1)
tensorflow.split
12,875
import tensorflow as tf x = tf.to_float(x) y = tf.to_float(y) z = tf.to_float(z) depth_f = tf.to_float(depth) height_f = tf.to_float(height) width_f = tf.to_float(width) # Number of disparity interpolated. out_depth = out_size[0]
tensorflow.to_float
12,876
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature def predict_proba(self, x, batch_size=None): """Returns prediction probabilities for given features (classification). Args: x: features. batch_size: OVerride default batch size. Returns: Numpy array of predicted probabilities. """ return self._infer_model(x=x, batch_size=batch_size, proba=True) def _check_inputs(self, features, targets): if self._features_info is not None: if not tensor_signature.tensors_compatible(features, self._features_info): raise ValueError('Features are incompatible with given information. ' 'Given features: %s, required signatures: %s.' % (str(features), str(self._features_info))) else: self._features_info = tensor_signature.create_signatures(features) if self._targets_info is not None: if not tensor_signature.tensors_compatible(targets, self._targets_info): raise ValueError('Targets are incompatible with given information. ' 'Given targets: %s, required signatures: %s.' % (str(targets), str(self._targets_info))) else: self._targets_info = tensor_signature.create_signatures(targets) def _train_model(self,
tensorflow.contrib.learn.python.learn.estimators.tensor_signature.tensors_compatible
12,877
import tensorflow as tf x0 = tf.minimum(tf.maximum(x[:, :, :, 0], -1.), 1.) x1 = tf.minimum(tf.maximum( x[:, :, :, 1] + coeffs[:, :, :, 0] * x0, -1.), 1.) x2 = tf.minimum(tf.maximum( x[:, :, :, 2] + coeffs[:, :, :, 1] * x0 + coeffs[:, :, :, 2] * x1, -1.), 1.) return tf.concat([tf.reshape(x0, xs[:-1] + [1]), tf.reshape(x1, xs[:-1] + [1]), tf.reshape(x2, xs[:-1] + [1])], 3)
tensorflow.maximum
12,878
import tensorflow as tf Returns: resized_image: A 3-D tensor containing the resized image. """ smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32) shape = tf.shape(image) height = shape[0] width = shape[1] new_height, new_width = _smallest_size_at_least(height, width, smallest_side) image = tf.expand_dims(image, 0) resized_image = tf.image.resize_bilinear(image, [new_height, new_width], align_corners=False) resized_image = tf.squeeze(resized_image) resized_image.set_shape([None, None, 3]) return resized_image def preprocess_for_train(image, output_height,
tensorflow.expand_dims
12,879
import tensorflow as tf emb_train_op = emb_opt.apply_gradients(zip(emb_grads, emb_var)) else: with sok.OptimizerScope(emb_var): emb_train_op = emb_opt.apply_gradients(zip(emb_grads, emb_var)) with tf.control_dependencies([*emb_grads]): # in case NCCL runs concurrently via SOK and horovod other_grads = strategy.reduce("sum", other_grads) other_train_op = dense_opt.apply_gradients(zip(other_grads, other_var)) with tf.control_dependencies([emb_train_op, other_train_op]): total_loss = strategy.reduce("sum", loss) total_loss = tf.identity(total_loss) return total_loss, embedding_vector return strategy.run(_step_fn, inputs, labels) replica_batch_size = args.global_batch_size // args.gpu_num dataset = utils.tf_dataset(*random_samples, batchsize=replica_batch_size, to_sparse_tensor=True, repeat=1) train_iterator = dataset.make_initializable_iterator() iterator_init = train_iterator.initializer inputs, labels = train_iterator.get_next()
tensorflow.identity
12,880
import tensorflow as tf lr_schedules = { 'warmup_cosine':warmup_cosine, 'warmup_linear':warmup_linear, 'warmup_constant':warmup_constant, } def _norm(x, g=None, b=None, e=1e-5, axis=[1]): u = tf.reduce_mean(x, axis=axis, keep_dims=True) s = tf.reduce_mean(tf.square(x-u), axis=axis, keep_dims=True) x = (x - u) * tf.rsqrt(s + e) if g is not None and b is not None: x = x*g + b return x def norm(x, scope, axis=[-1]): with tf.variable_scope(scope): n_state = shape_list(x)[-1]
tensorflow.square
12,881
import tensorflow as tf if x != prev_x: auc += ((x - prev_x) * (y + prev_y) / 2.) prev_x = x prev_y = y return auc def attention(query, facts, attention_size, mask, stag='null', mode='LIST', softmax_stag=1, time_major=False, return_alphas=False): if isinstance(facts, tuple): # In case of Bi-RNN, concatenate the forward and the backward RNN outputs. facts = tf.concat(facts, 2) if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) mask = tf.equal(mask, tf.ones_like(mask)) hidden_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer input_size = query.get_shape().as_list()[-1]
tensorflow.concat
12,882
import tensorflow as tf nf=64, rf=3, stride=1, init_scale=np.sqrt(2))) nh = np.prod([v.value for v in c3.get_shape()[1:]]) h3 = tf.reshape(c3, [-1, nh]) pre_s = tf.nn.relu(self.fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2))) # Critic # 定義變數 # self.tfs = tf.placeholder(tf.float32, [None, image_features], 'state')
tensorflow.reshape
12,883
import tensorflow as tf reinforce_weights = get_weights(samples, utils.EOS_ID, include_first_eos=True) reinforce_loss = sequence_loss(logits=outputs, targets=samples, weights=reinforce_weights, rewards=baseline_rewards) trg_mask = get_weights(targets[:, 1:], utils.EOS_ID, include_first_eos=True) xent_loss = sequence_loss(logits=outputs, targets=targets[:, 1:], weights=trg_mask) if monotonicity_weight: monotonicity_dist = monotonicity_dist or 1.0 batch_size = tf.shape(attention_weights)[0] src_len = tf.shape(attention_weights)[2] trg_len = tf.shape(attention_weights)[1] src_indices = tf.tile(tf.reshape(tf.range(src_len), shape=[1, 1, src_len]), [batch_size, trg_len, 1]) trg_indices = tf.tile(tf.reshape(tf.range(trg_len), shape=[1, trg_len, 1]), [batch_size, 1, src_len]) source_length = encoder_input_length[0] target_length = tf.to_int32(tf.reduce_sum(trg_mask, axis=1)) true_src_len = tf.reshape(source_length, shape=[batch_size, 1, 1]) - 1 true_trg_len = tf.reshape(target_length, shape=[batch_size, 1, 1]) - 1
tensorflow.shape
12,884
import tensorflow as tf def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] name_to_features = { "input_ids": tf.FixedLenFeature([max_seq_length], tf.int64), "input_mask": tf.FixedLenFeature([max_seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([max_seq_length], tf.int64), "masked_lm_positions": tf.FixedLenFeature( [max_predictions_per_seq], tf.int64
tensorflow.FixedLenFeature
12,885
import tensorflow as tf """Build dynamic graph""" rnn_outputs, final_state = tf.nn.dynamic_rnn(cell=cell, inputs=rnn_inputs,initial_state=init_state) """Add prediction layer""" with tf.variable_scope('softmax'): W = tf.get_variable('W', [state_size, input_size_y]) b = tf.get_variable('b', [input_size_y], initializer=tf.constant_initializer(0.0)) rnn_outputs = tf.reshape(rnn_outputs, [-1, state_size]) predictions = tf.matmul(rnn_outputs, W) + b yy = tf.reshape(y, [-1, input_size_y]) #batch_size*num_steps when yo udefine a placeholder in Tensorflow, the shape of the input during the session should be the same as the shape of the plcae holder "Mean squared error loss" loss=tf.reduce_mean(tf.square(tf.reshape(predictions,[-1])-tf.reshape(yy,[-1]))) "Adding regularization" if lambda_l2_reg > 0 : cell_l2 = tf.reduce_sum([tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables() if not ("noreg" in tf_var.name or "Bias" in tf_var.name)]) Predict_l2 = tf.nn.l2_loss(W) #+ tf.nn.l2_loss(b) total_loss = tf.reduce_sum(loss + lambda_l2_reg* tf.reduce_sum(cell_l2+Predict_l2) )
tensorflow.matmul
12,886
import tensorflow as tf w = w if w is not None else self.w b = b if b is not None else self.b if( self.data_format =='NCHW' ) : return tf.nn.bias_add( tf.nn.conv2d(input_var, w, use_cudnn_on_gpu=True,data_format='NCHW', strides=self.strides, padding=self.padding), b,data_format='NCHW',name=name) else :
tensorflow.nn.conv2d
12,887
import tensorflow as tf encoder_output_label_, _ = encoder(x_input_l, reuse=True, supervised=True) # Generate output images with tf.variable_scope(tf.get_variable_scope()): decoder_image = decoder(manual_decoder_input, reuse=True) # Classification accuracy of encoder correct_pred = tf.equal(tf.argmax(encoder_output_label_, 1), tf.argmax(y_input, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) # Autoencoder loss autoencoder_loss = tf.reduce_mean(tf.square(x_target - decoder_output)) # Gaussian Discriminator Loss
tensorflow.argmax
12,888
import tensorflow as tf 'use_nccl', True, 'Whether to use nccl all-reduce primitives where possible') # Distributed training flags. tf.flags.DEFINE_string('job_name', '', 'One of "ps", "worker", "". Empty for local training') tf.flags.DEFINE_string('ps_hosts', '', 'Comma-separated list of target hosts') tf.flags.DEFINE_string('worker_hosts', '', 'Comma-separated list of target hosts') tf.flags.DEFINE_integer('task_index', 0, 'Index of task within the job') tf.flags.DEFINE_string('server_protocol', 'grpc', 'protocol for servers') tf.flags.DEFINE_boolean('cross_replica_sync', True, '') # Summary and Save & load checkpoints. tf.flags.DEFINE_integer('summary_verbosity', 0, """Verbosity level for summary ops. Pass 0 to disable both summaries and checkpoints.""") tf.flags.DEFINE_integer('save_summaries_steps', 0,
tensorflow.flags.DEFINE_integer
12,889
import tensorflow as tf logger.info(f'load trainset-{i} {len(traindata)}') for i in range(20): if not os.path.exists(f'./{inittask}/{taskname}.devset.task0.slbo{i}.pkl'): continue devdata = pickle.load(open(f'./{inittask}/{taskname}.devset.task0.slbo{i}.pkl', 'rb')) add_multi_step(devdata, task_dev_sets[0]) logger.info(f'load devset-{i} {len(devdata)}') logger.info('Load the first task saver!') saver.load_state_dict(np.load(f'./{inittask}/{taskname}.task0.saver.npy', allow_pickle=True)[()]) logger.info('Update all copies! (lazymodel, normalizers_copy)') tf.get_default_session().run(sync_model_to_lazymodel) tf.get_default_session().run(copy_normalizers) logger.info('Loaded normalizers:') load_norm = tf.get_default_session().run(normalizers_parameters) logger.info(load_norm) TASK_NUM = 1 ########################## debug ######################### #for task_idx in range(TASK_NUM): # total_loss = [] # for scan in range(100): # samples = task_train_sets[task_idx].sample_multi_step(FLAGS.model.dev_batch_size, 1, FLAGS.model.multi_step) # loss_i = loss_mod.get_loss(samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout) # total_loss.append(loss_i.mean()) # total_loss = np.mean(total_loss) # print ('loaded model train loss:', total_loss) #for task_idx in range(TASK_NUM): # total_loss = []
tensorflow.get_default_session
12,890
import tensorflow as tf true_src_len = tf.reshape(source_length, shape=[batch_size, 1, 1]) - 1 true_trg_len = tf.reshape(target_length, shape=[batch_size, 1, 1]) - 1 src_mask = tf.to_float(tf.sequence_mask(source_length, maxlen=src_len)) mask = tf.matmul(tf.expand_dims(trg_mask, axis=2), tf.expand_dims(src_mask, axis=1)) monotonous = tf.sqrt(((true_trg_len * src_indices - true_src_len * trg_indices) ** 2) / (true_trg_len**2 + true_src_len**2)) monotonous = tf.to_float(monotonous < monotonicity_dist) non_monotonous = (1 - monotonous) * mask attn_loss = tf.reduce_sum(attention_weights * tf.stop_gradient(non_monotonous)) / tf.to_float(batch_size) if monotonicity_decay: decay = tf.stop_gradient(0.5 ** (tf.to_float(global_step) / monotonicity_decay)) else: decay = 1.0 xent_loss += monotonicity_weight * decay * attn_loss losses = [xent_loss, reinforce_loss, baseline_loss_] return losses, [outputs], encoder_state, attention_states, attention_weights, samples, beam_fun, initial_data def reconstruction_encoder_decoder(encoders, decoders, encoder_inputs, targets, feed_previous, encoder_input_length=None, training=True, reconstruction_weight=1.0,
tensorflow.to_float
12,891
import tensorflow as tf self._lr_update = tf.get_collection_ref('lr_update')[0] rnn_params = tf.get_collection_ref('rnn_params')
tensorflow.get_collection_ref
12,892
import tensorflow as tf prob_tf = tf.nn.softmax(pi.logits) # the "policy gradients" loss: its derivative is precisely the policy gradient # notice that self.ac is a placeholder that is provided externally. # adv will contain the advantages, as calculated in process_rollout pi_loss = - tf.reduce_sum(tf.reduce_sum(log_prob_tf * self.ac, [1]) * self.adv) # loss of value function vf_loss = 0.5 * tf.reduce_sum(tf.square(pi.vf - self.r)) entropy = - tf.reduce_sum(prob_tf * log_prob_tf)
tensorflow.reduce_sum
12,893
from tensorflow.core.framework.summary_pb2 import Summary self._summary_writer = SummaryWriterCache.get(output_dir) def set_estimator(self, estimator): super(StepCounter, self).set_estimator(estimator) if self._summary_writer is None: self._summary_writer = SummaryWriterCache.get(estimator.model_dir) def every_n_step_end(self, current_step, outputs): current_time = time.time() if self._last_reported_time is not None and self._summary_writer: added_steps = current_step - self._last_reported_step elapsed_time = current_time - self._last_reported_time steps_per_sec = added_steps / elapsed_time summary = Summary(value=[Summary.Value(tag=self._summary_tag, simple_value=steps_per_sec)]) self._summary_writer.add_summary(summary, current_step) self._last_reported_step = current_step self._last_reported_time = current_time class NanLossDuringTrainingError(RuntimeError): def __str__(self): return "NaN loss during training."
tensorflow.core.framework.summary_pb2.Summary.Value
12,894
import tensorflow as tf tf.flags.DEFINE_string('data_format', 'NHWC', """Data layout to use: NHWC (TF native) or NCHW (cuDNN native).""") tf.flags.DEFINE_integer('num_intra_threads', 1, """Number of threads to use for intra-op parallelism. If set to 0, the system will pick an appropriate number.""") tf.flags.DEFINE_integer('num_inter_threads', 0, """Number of threads to use for inter-op parallelism. If set to 0, the system will pick an appropriate number.""") tf.flags.DEFINE_string('trace_file', None, """Enable TensorFlow tracing and write trace to this file.""") tf.flags.DEFINE_string('graph_file', None, """Write the model's graph definition to this file. Defaults to binary format unless filename ends in 'txt'.""") tf.flags.DEFINE_string('optimizer', 'sgd', 'Optimizer to use: momentum or sgd or rmsprop') tf.flags.DEFINE_float('learning_rate', None, """Initial learning rate for training.""") tf.flags.DEFINE_float('num_epochs_per_decay', 0, """Steps after which learning rate decays.""") tf.flags.DEFINE_float('learning_rate_decay_factor', 0.94, """Learning rate decay factor.""") tf.flags.DEFINE_float('momentum', 0.9, """Momentum for training.""") tf.flags.DEFINE_float('rmsprop_decay', 0.9, """Decay term for RMSProp.""")
tensorflow.flags.DEFINE_string
12,895
import tensorflow as tf self._initializers = { "w": tf.contrib.layers.xavier_initializer(), } self._regularizers = { 'w': tf.contrib.layers.l2_regularizer(config.l2) } self._construct_placeholders() self._construct_weights() self._construct() tf.summary.scalar('Model/Loss', tf.get_collection(GraphKeys.LOSSES)[0]) self.summary = tf.summary.merge_all() def _construct(self): """ Construct the model; main part of it goes here """ self.v = DenseLayer(1, False, tf.nn.relu, initializers=self._initializers, regularizers=self._regularizers, name='OutputVector') self.score = tf.squeeze(self.v(self._cur_user * self._cur_item)) negative_output = tf.squeeze(self.v(self._cur_user * self._cur_item_negative)) tf.add_to_collection(GraphKeys.PREDICTION, self.score) self.loss = LossLayer()(self.score, negative_output)
tensorflow.summary.merge_all
12,896
import tensorflow as tf with tf.name_scope('data'):
tensorflow.name_scope
12,897
import tensorflow as tf second_comp = learning_rate elif mode == 'sin': first_factor = (learning_rate - max_lr) / 2. second_factor = tf.sin((pi * global_step) / step_size) second_comp = (learning_rate + max_lr) / 2. elif mode == 'saw': first_factor = max_lr - learning_rate second_factor = tf.mod(global_step / step_size, 1) second_comp = learning_rate return first_factor * second_factor + second_comp
tensorflow.mod
12,898
import tensorflow as tf Args: tower_grads: List of lists of (gradient, variable) tuples. The outer list is over individual gradients. The inner list is over the gradient calculation for each tower. Returns: List of pairs of (gradient, variable) where the gradient has been averaged across all towers. """ average_grads = [] for grad_and_vars in zip(*tower_grads): # Note that each grad_and_vars looks like the following: # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN)) grads = [] for g, _ in grad_and_vars: # Add 0 dimension to the gradients to represent the tower. expanded_g = tf.expand_dims(g, 0) # Append on a 'tower' dimension which we will average over below. grads.append(expanded_g) # Average over the 'tower' dimension. grad = tf.concat(axis=0, values=grads) grad = tf.reduce_mean(grad, 0) # Keep in mind that the Variables are redundant because they are shared # across towers. So .. we will just return the first tower's pointer to # the Variable. v = grad_and_vars[0][1] grad_and_var = (grad, v) average_grads.append(grad_and_var)
tensorflow.expand_dims
12,899