seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
from tensorflow.python.framework import ops * `gradients` is empty. """ loss = ops.convert_to_tensor(loss) contrib_framework.assert_scalar(loss)
tensorflow.python.framework.ops.convert_to_tensor
2,900
import tensorflow as tf tf.set_random_seed(111) random.seed(111) np.random.seed(111) with self.test_session() as sess: # We use sampled softmax so we keep output projection separate. w = tf.get_variable("proj_w", [24, classes]) w_t = tf.transpose(w) b = tf.get_variable("proj_b", [classes]) # Here comes a sample Seq2Seq model using GRU cells. def SampleGRUSeq2Seq(enc_inp, dec_inp, weights): """Example sequence-to-sequence model that uses GRU cells.""" def GRUSeq2Seq(enc_inp, dec_inp): cell = tf.nn.rnn_cell.MultiRNNCell([tf.nn.rnn_cell.GRUCell(24)] * 2, state_is_tuple=True) return tf.nn.seq2seq.embedding_attention_seq2seq(
tensorflow.get_variable
2,901
import tensorflow as tf def _normalize(self, x, mean, mean_sq, message): # make sure this is called with a variable scope shape = x.get_shape().as_list() assert len(shape) == 4 self.gamma = safe_get("gamma", [shape[-1]], initializer=tf.random_normal_initializer(1., 0.02)) gamma = tf.reshape(self.gamma, [1, 1, 1, -1]) self.beta = safe_get("beta", [shape[-1]], initializer=tf.constant_initializer(0.)) beta = tf.reshape(self.beta, [1, 1, 1, -1]) assert self.epsilon is not None assert mean_sq is not None assert mean is not None std = tf.sqrt(self.epsilon + mean_sq - tf.square(mean)) out = x - mean out = out / std out = out * gamma out = out + beta return out
tensorflow.reshape
2,902
import tensorflow as tf U = tf.reshape(flat_logits, [-1, num_acts, self.k]) # Calculate w cut_g = tf.stop_gradient(self.g) cut_g = tf.expand_dims(cut_g, [1]) gstack = tf.concat([self.prev_g, cut_g], axis=1) self.last_c_g = gstack[:, 1:] # print self.last_c_g gsum = tf.reduce_sum(gstack, axis=1) phi = tf.get_variable("phi", (self.g_dim, self.k))
tensorflow.concat
2,903
import tensorflow as tf sequence_max_length ) tiled_representation = tf.multiply( tiled_representation, tf.cast(mask[:, :, tf.newaxis], dtype=tf.float32) ) dependencies_hidden.append(tiled_representation)
tensorflow.cast
2,904
from tensorflow.python.framework import ops predictions, labels, weights, metrics_collections=None, updates_collections=None, name=None) false_positives, false_positives_update_op = _streaming_false_positives( predictions, labels, weights, metrics_collections=None, updates_collections=None, name=None) def compute_precision(name): return math_ops.select( math_ops.greater(true_positives + false_positives, 0), math_ops.div(true_positives, true_positives + false_positives), 0, name) precision = compute_precision('value') with ops.control_dependencies([true_positives_update_op, false_positives_update_op]): update_op = compute_precision('update_op') if metrics_collections: ops.add_to_collections(metrics_collections, precision) if updates_collections: ops.add_to_collections(updates_collections, update_op) return precision, update_op @deprecated_args(IGNORE_MASK_DATE, IGNORE_MASK_INSTRUCTIONS, 'ignore_mask')
tensorflow.python.framework.ops.control_dependencies
2,905
import tensorflow as tf reg = 0 for state in states: dJr = tf.matmul(tf.nn.relu(state), tf.matmul(tf.abs(self.W_rec) * self.rec_Connectivity, self.Dale_rec)) reg += tf.reduce_sum(tf.square(dJr)) return reg / (self.N_steps * self.N_batch)
tensorflow.abs
2,906
import tensorflow as tf train_inputs = tf.random_uniform((train_batch_size, height, width, 3)) mobilenet_v1.mobilenet_v1(train_inputs, num_classes) eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3)) logits, _ = mobilenet_v1.mobilenet_v1(eval_inputs, num_classes, reuse=True) predictions = tf.argmax(logits, 1) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(predictions)
tensorflow.argmax
2,907
import tensorflow as tf # Get input and output tensors images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0") embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0") phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0") # Run forward pass to calculate embeddings
tensorflow.get_default_graph
2,908
import tensorflow as tf log_norm = tf.reduce_logsumexp(antecedent_scores, [1]) # [k] return log_norm - marginalized_gold_scores # [k] def bucket_distance(self, distances): """ Places the given values (designed for distances) into 10 semi-logscale buckets: [0, 1, 2, 3, 4, 5-7, 8-15, 16-31, 32-63, 64+]. """ logspace_idx = tf.to_int32(tf.floor(tf.log(tf.to_float(distances))/math.log(2))) + 3 use_identity = tf.to_int32(distances <= 4) combined_idx = use_identity * distances + (1 - use_identity) * logspace_idx return tf.clip_by_value(combined_idx, 0, 9) def get_slow_antecedent_scores(self, top_span_emb, top_antecedents, top_antecedent_emb, top_antecedent_offsets, top_span_speaker_ids, genre_emb): k = util.shape(top_span_emb, 0) c = util.shape(top_antecedents, 1) feature_emb_list = [] if self.config["use_metadata"]: top_antecedent_speaker_ids = tf.gather(top_span_speaker_ids, top_antecedents) # [k, c] same_speaker = tf.equal(tf.expand_dims(top_span_speaker_ids, 1), top_antecedent_speaker_ids) # [k, c]
tensorflow.clip_by_value
2,909
import tensorflow as tf examples_per_sec = self.batch_size * self.num_batches / duration log_fn('%i\t%.1f examples/sec' % (step + 1, examples_per_sec)) start_time = time.time() precision_at_1 = count_top_1 / total_eval_count recall_at_5 = count_top_5 / total_eval_count summary = tf.Summary() summary.value.add(tag='eval/Accuracy@1', simple_value=precision_at_1) summary.value.add(tag='eval/Recall@5', simple_value=recall_at_5) summary_writer.add_summary(summary, global_step) log_fn('Precision @ 1 = %.4f recall @ 5 = %.4f [%d examples]' %
tensorflow.Summary
2,910
import tensorflow as tf output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf.estimator.ModeKeys.EVAL: def metric_fn(per_example_loss, label_ids, logits): predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) accuracy = tf.metrics.accuracy(label_ids, predictions) loss = tf.metrics.mean(per_example_loss) return { "eval_accuracy": accuracy, "eval_loss": loss, } eval_metrics = (metric_fn, [per_example_loss, label_ids, logits]) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: output_spec = tf.contrib.tpu.TPUEstimatorSpec(
tensorflow.metrics.accuracy
2,911
import tensorflow as tf return parser.parse_args(args) def default_parameters(): params = tf.contrib.training.HParams( input=["", ""], output="", record="",
tensorflow.contrib.training.HParams
2,912
import tensorflow as tf W1, W2 = weights[l] b = biases[l] H1 = tf.add(tf.matmul(H, W1), b) H2 = tf.matmul(H, W2) H = tf.tanh(tf.add(H1 * H2, H1)) W1, W2 = weights[-1] b = biases[-1] H1 = tf.add(tf.matmul(H, W1), b) H2 = tf.matmul(H, W2) Y = tf.add(H1 * H2, H1) return Y def fwd_gradients_0(self, U, x): g = tf.gradients(U, x, grad_ys=self.dummy_x0_tf)[0]
tensorflow.matmul
2,913
import tensorflow as tf summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
tensorflow.summary.FileWriter
2,914
import tensorflow as tf n_actions = ac_space.nvec if isinstance(ac_space, MultiDiscrete) else ac_space.n with tf.variable_scope("input", reuse=reuse): stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic") update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps")
tensorflow.placeholder
2,915
import tensorflow as tf List of uninitialized tf variables. """ sess = tf.get_default_session() if variables is None: variables = tf.global_variables() else: variables = list(variables)
tensorflow.global_variables
2,916
import tensorflow as tf image.set_shape(shape) return tf.cast(image, dtype) def serving_input_receiver_fn(): image_bytes_list = tf.placeholder( shape=[None], dtype=tf.string, )
tensorflow.placeholder
2,917
import tensorflow as tf sess = sess or tf.get_default_session() for st_idx in range(len(state)): state[st_idx]=featurize_state(state[st_idx]); feed_dict = { self.state: state, self.target: target, self.a_his: action } _, loss = sess.run([self.train_op, self.loss], feed_dict) return loss class ValueEstimator_MountainCarContinuous(): def __init__(self, learning_rate=0.1, par_idx=0,scope="value_estimator"): w_init = tf.random_normal_initializer(0.,.1); with tf.variable_scope(scope+"_"+str(par_idx)): # state and target self.state = tf.placeholder(tf.float32, [None,400], "state") self.target = tf.placeholder(tf.float32, [None,1], name="target") # layers self.value_estimate = tf.layers.dense(self.state, 1, kernel_initializer=w_init, name='v') # estimated value for state # loss and optimizer self.loss = tf.squared_difference(self.value_estimate, self.target) self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) self.train_op = self.optimizer.minimize( self.loss, global_step=tf.contrib.framework.get_global_step()) def predict(self, state, sess=None): sess = sess or tf.get_default_session()
tensorflow.placeholder
2,918
import tensorflow as tf tf.config.experimental.set_virtual_device_configuration(gpus[0], [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=memory_limit)]) else: gpu_options = tf.GPUOptions(allow_growth=allow_growth, per_process_gpu_memory_fraction=fraction) config = tf.ConfigProto(gpu_options=gpu_options) session = tf.Session(config=config) K.set_session(session) def multi_gpu(model, gpus=None, cpu_merge=True, cpu_relocation=False):
tensorflow.Session
2,919
import tensorflow as tf if isinstance(parsed_tensors[k], tf.SparseTensor): if parsed_tensors[k].dtype == tf.string: parsed_tensors[k] = tf.sparse.to_dense( parsed_tensors[k], default_value='') else: parsed_tensors[k] = tf.sparse.to_dense( parsed_tensors[k], default_value=0) image = self._decode_image(parsed_tensors) boxes = self._decode_boxes(parsed_tensors) areas = self._decode_areas(parsed_tensors) is_crowds = tf.cond( tf.greater(tf.shape(parsed_tensors['image/object/is_crowd'])[0], 0), lambda: tf.cast(parsed_tensors['image/object/is_crowd'], dtype=tf.bool), lambda: tf.zeros_like(parsed_tensors['image/object/class/label'], dtype=tf.bool)) # pylint: disable=line-too-long if self._include_mask: masks = self._decode_masks(parsed_tensors) decoded_tensors = { 'image': image, 'source_id': parsed_tensors['image/source_id'], 'height': parsed_tensors['image/height'], 'width': parsed_tensors['image/width'], 'groundtruth_classes': parsed_tensors['image/object/class/label'], 'groundtruth_is_crowd': is_crowds, 'groundtruth_area': areas,
tensorflow.cast
2,920
import tensorflow as tf images = images / 255 # Convert to [0, 1] return (images, classes) # Prepare train dataset def _preprocess_train(image, clazz): # Do random crop + horizontal flip for each train image image = tf.pad(image, [[4, 4], [4, 4], [0, 0]]) image = tf.image.random_crop(image, (w, h, in_ch)) image = tf.image.random_flip_left_right(image) if cutout_size > 0: image = self._do_cutout(image, w, h, cutout_size) return (image, clazz) (images, classes) = _prepare(images, classes)
tensorflow.image.random_flip_left_right
2,921
import tensorflow as tf def rnn_step(self, rnn_in, state): if self.dale_ratio: new_state = (1-self.alpha) * state \ + self.alpha * ( tf.matmul( tf.nn.relu(state), tf.matmul( tf.abs(self.W_rec) * self.rec_Connectivity, self.Dale_rec, name="in_1"), transpose_b=True, name="1") + tf.matmul(
tensorflow.nn.relu
2,922
import tensorflow as tf """Returns a map of variables to load from a foreign checkpoint. Args: from_detection_checkpoint: whether to restore from a full detection checkpoint (with compatible variable names) or to restore from a classification checkpoint for initialization prior to training. Returns: A dict mapping variable names to variables. """ return {var.op.name: var for var in tf.global_variables()} class TrainerTest(tf.test.TestCase): def test_configure_trainer_and_train_two_steps(self): train_config_text_proto = """ optimizer { adam_optimizer { learning_rate {
tensorflow.global_variables
2,923
from tensorflow.python.framework import tensor_shape if start_value is None or limit_value is None or delta_value is None: return [tensor_shape.vector(None)] else: return [tensor_shape.vector((limit_value - start_value + delta_value - 1) // delta_value)]
tensorflow.python.framework.tensor_shape.vector
2,924
import tensorflow as tf init_fw, init_bw = self.inits[layer] mask_fw, mask_bw = self.dropout_mask[layer] with tf.variable_scope("fw_{}".format(layer)): out_fw, _ = gru_fw( outputs[-1] * mask_fw, initial_state=(init_fw, )) with tf.variable_scope("bw_{}".format(layer)): inputs_bw = tf.reverse_sequence( outputs[-1] * mask_bw, seq_lengths=seq_len, seq_dim=0, batch_dim=1) out_bw, _ = gru_bw(inputs_bw, initial_state=(init_bw, )) out_bw = tf.reverse_sequence( out_bw, seq_lengths=seq_len, seq_dim=0, batch_dim=1) outputs.append(tf.concat([out_fw, out_bw], axis=2)) if concat_layers: res = tf.concat(outputs[1:], axis=2) else: res = outputs[-1] res = tf.transpose(res, [1, 0, 2]) return res class native_gru:
tensorflow.concat
2,925
import tensorflow as tf rep_mask_tile_2 = tf.tile(tf.expand_dims(rep_mask_split, 3), [1, 1, 1, bl]) # bs,bn,bl,bl rep_mask_tile = tf.logical_and(rep_mask_tile_1, rep_mask_tile_2)
tensorflow.logical_and
2,926
from tensorflow.contrib.boosted_trees.proto import learner_pb2 def testFitAndEvaluateDontThrowExceptionWithCoreForRegressor(self): learner_config = learner_pb2.LearnerConfig()
tensorflow.contrib.boosted_trees.proto.learner_pb2.LearnerConfig
2,927
import tensorflow as tf inputs = batch_norm(inputs, training, data_format) inputs = tf.nn.relu(inputs) inputs = conv2d_fixed_padding(
tensorflow.nn.relu
2,928
import tensorflow as tf self.q_mask = tf.slice(self.q_mask, [0, 0], [N, self.q_maxlen]) self.ch = tf.slice(self.ch, [0, 0, 0], [N, self.c_maxlen, CL]) self.qh = tf.slice(self.qh, [0, 0, 0], [N, self.q_maxlen, CL]) self.y1 = tf.argmax(tf.slice(self.y1, [0, 0], [N, self.c_maxlen]),axis=-1) self.y2 = tf.argmax(tf.slice(self.y2, [0, 0], [N, self.c_maxlen]),axis=-1) else: self.c_maxlen, self.q_maxlen = config.para_limit, config.ques_limit
tensorflow.slice
2,929
import tensorflow as tf if __name__ == "__main__": tf.test.main()
tensorflow.test.main
2,930
import tensorflow as tf l2_loss.append(tf.nn.l2_loss(v)) l2_loss = FLAGS.dst_weight_decay * tf.add_n(l2_loss) enable_pretrain = tf.cast( tf.greater_equal(global_step, FLAGS.first_pretrain_steps), tf.float32) loss = src_loss * tf.stop_gradient(loss_weights) * enable_pretrain loss += dst_loss + l2_loss return tf.identity(loss), src_loss, dst_loss def train_model_fn(features, labels, mode, params): # pylint: disable=unused-argument """Defines the model function.""" target_num_classes = FLAGS.target_num_classes global_step = tf.train.get_global_step() src_features, src_labels = features['src'], tf.cast(labels['src'], tf.int64) finetune_features = features['finetune'] target_features = features['target'] num_classes = FLAGS.src_num_classes finetune_one_hot_labels = tf.one_hot( tf.cast(labels['finetune'], tf.int64), target_num_classes) target_one_hot_labels = tf.one_hot( tf.cast(labels['target'], tf.int64), target_num_classes) with tf.variable_scope('rl_controller') as rl_scope: # It creates a `rl_scope` which will be used for ops.
tensorflow.train.get_global_step
2,931
from tensorflow.python.framework import ops This function "reverses" `make_batch_of_event_sample_matrices`. Args: x: `Tensor` of shape `B_+E_+S_`. sample_shape: `Tensor` (1D, `int32`). name: `String`. The name to give this op. Returns: x: `Tensor`. Input transposed/reshaped to `S+B+E`. """ with self._name_scope(name, values=[x, sample_shape]): x = ops.convert_to_tensor(x, name="x") sample_shape = ops.convert_to_tensor(sample_shape, name="sample_shape") x = distribution_util.rotate_transpose(x, shift=1) if self._is_all_constant_helper(self.batch_ndims, self.event_ndims): if self._batch_ndims_is_0 or self._event_ndims_is_0: b = ((min(-2, -1 - self._event_ndims_static),) if self._batch_ndims_is_0 else ()) e = (-1,) if self._event_ndims_is_0 else () x = array_ops.squeeze(x, squeeze_dims=b + e) _, batch_shape, event_shape = self.get_shape(x) else:
tensorflow.python.framework.ops.convert_to_tensor
2,932
import tensorflow as tf # sigma = tf.get_variable(name='pi_sigma', shape=self.a_dim, initializer=tf.constant_initializer(0.5)) sigma = tf.clip_by_value(sigma, 0.0, 1.0) norm_dist = tf.distributions.Normal(loc=mu * self.a_bound, scale=sigma) params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name) return norm_dist, params, state_init_a, state_final_a def build_cnet(self, state_in, name, reuse=False, batch_size=64): reg = tf.contrib.layers.l2_regularizer(1e-3) with tf.variable_scope(name, reuse=reuse): layer_c1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg) layer_c2 = tf.layers.dense(layer_c1, 256, tf.nn.relu, kernel_regularizer=reg) lstm_c = tf.nn.rnn_cell.LSTMCell(num_units=256) lstm_c = tf.nn.rnn_cell.DropoutWrapper(lstm_c, output_keep_prob=self.keep_prob) state_init_c = lstm_c.zero_state(batch_size=batch_size, dtype=tf.float32) lstm_cin = tf.expand_dims(layer_c2, axis=1) out_c, state_final_c = tf.nn.dynamic_rnn(cell=lstm_c, inputs=lstm_cin, initial_state=state_init_c) cell_out_c = tf.reshape(out_c, [-1, 256]) vf = tf.layers.dense(cell_out_c, 1, kernel_regularizer=reg)
tensorflow.layers.dense
2,933
import tensorflow as tf ) else: distribution = tf.contrib.distribute.MirroredStrategy( num_gpus=FLAGS.num_gpus ) run_config = tf.estimator.RunConfig(train_distribute=distribution) model_fn = model_fn_builder( bert_config=bert_config, init_checkpoint=FLAGS.init_checkpoint,
tensorflow.estimator.RunConfig
2,934
import tensorflow as tf E_init_args=None, nce_W_init=tf.truncated_normal_initializer(stddev=0.03), nce_W_init_args=None, nce_b_init=tf.constant_initializer(value=0.0), nce_b_init_args=None, name='word2vec',
tensorflow.constant_initializer
2,935
import tensorflow as tf activation = map # print(activation.get_shape().as_list()) return activation def batch_norm_conv(x, b_train, scope): with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): n_out = x.get_shape().as_list()[-1] beta = tf.get_variable('beta', initializer=tf.constant(0.0, shape=[n_out])) gamma = tf.get_variable('gamma', initializer=tf.constant(1.0, shape=[n_out])) batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments') ema = tf.train.ExponentialMovingAverage(decay=0.9) def mean_var_with_update(): ema_apply_op = ema.apply([batch_mean, batch_var]) with tf.control_dependencies([ema_apply_op]): return tf.identity(batch_mean), tf.identity(batch_var) mean, var = tf.cond(b_train,
tensorflow.constant
2,936
import tensorflow as tf # self.final_layer = self.conv_layer(bottom = self.deconv_5, kernal_size = 1, in_channels = 64, out_channels = 3, stride = 1, name = 'final_layer') self.final_layer = self.conv_bn_relu(bottom = self.deconv_5, name = 'final_layer', kernel_size = 1, output_channels = 3, initializer =tf.contrib.layers.variance_scaling_initializer(), bn = False, training = self.is_training, relu=False) # self.pool5 = self.avg_pool(self.block4_3, 7, 1, "pool5") #self.fc0 = self.fc_layer(self.pool5, 2048, 1024, "fc0") #self.relu1 = tf.nn.relu(self.fc0) #if train_mode is not None: # self.relu1 = tf.cond(train_mode, lambda: tf.nn.dropout(self.relu1, self.dropout), lambda: self.relu1) #elif self.trainable: # self.relu1 = tf.nn.dropout(self.relu1, self.dropout) self.y_soft = tf.nn.softmax(self.final_layer) self.logits = tf.reshape(self.final_layer, (-1, 3)) print(self.logits) self.predicted = tf.argmax(self.final_layer, axis = 3) print(self.predicted.get_shape().as_list()) # cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.labels, logits=logits, name=None) # self.loss = tf.reduce_mean(cross_entropy, name = 'xcross_entropy') # if(last_layer_type == "sigmoid"): # self.prob = tf.nn.sigmoid(self.fc1, name="prob") # elif(last_layer_type == "softmax"):
tensorflow.reshape
2,937
import tensorflow as tf session_conf = tf.ConfigProto( allow_soft_placement=args.allow_soft_placement, log_device_placement=args.log_device_placement) session_conf.gpu_options.allow_growth = args.gpu_options_allow_growth sess = tf.Session(config=session_conf) with sess.as_default(): # Load the saved meta graph and restore variables saver = tf.train.import_meta_graph("{0}.meta".format(checkpoint_file))
tensorflow.Session
2,938
import tensorflow as tf dimension and outputs a vector of the same shape as the input. name: (Optional) A name for this operation. Returns: A `Tensor` containing the sum. If `x` is float32 or float64, the sum will have the same type as `x`. If `x` is float16, the output is cast to float32. If `x` is integral, the output is cast to [u]int64. If `x` is sparse and reduce_inst_dims is False will return 0 in place where column has no values across batches. Raises: TypeError: If the type of `x` is not supported. """ with tf.compat.v1.name_scope(name, 'sum'): if reduce_instance_dims: x = tf.reduce_sum(input_tensor=tf_utils.get_values(x)) elif isinstance(x, tf.SparseTensor): if x.dtype == tf.uint8 or x.dtype == tf.uint16: x = tf.cast(x, tf.int64) elif x.dtype == tf.uint32 or x.dtype == tf.uint64: TypeError('Data type %r is not supported' % x.dtype) x = tf.sparse.reduce_sum(x, axis=0) elif isinstance(x, tf.RaggedTensor): raise NotImplementedError( 'Elementwise sum does not support RaggedTensors.') else:
tensorflow.compat.v1.name_scope
2,939
import tensorflow as tf mask = tf.reduce_any(self.ids_placeholder > 0, axis=2) else: mask = self.ids_placeholder > 0 sequence_lengths = tf.reduce_sum(tf.cast(mask, tf.int32), axis=1) batch_size = tf.shape(sequence_lengths)[0] # for each direction, we'll store tensors for each layer self.lstm_outputs = {'forward': [], 'backward': []}
tensorflow.shape
2,940
import tensorflow as tf initializer = get_initializer(params) model = model_cls(params) # Multi-GPU setting sharded_losses = parallel.parallel_model( model.get_training_func(initializer), features, params.device_list ) loss = tf.add_n(sharded_losses) / len(sharded_losses) # Create global step global_step = tf.train.get_or_create_global_step() # Print parameters all_weights = {v.name: v for v in tf.trainable_variables()} total_size = 0 for v_name in sorted(list(all_weights)): v = all_weights[v_name] tf.logging.info("%s\tshape %s", v.name[:-2].ljust(80), str(v.shape).ljust(20)) v_size = np.prod(np.array(v.shape.as_list())).tolist() # mutiple all dimension size total_size += v_size tf.logging.info("Total trainable variables size: %d", total_size) learning_rate = get_learning_rate_decay(params.learning_rate, global_step, params) learning_rate = tf.convert_to_tensor(learning_rate, dtype=tf.float32) tf.summary.scalar("learning_rate", learning_rate)
tensorflow.trainable_variables
2,941
import tensorflow as tf - groundtruth_instance_masks_png: a string tensor of shape [None]. """ parsed_tensors = tf.io.parse_single_example( serialized=serialized_example, features=self._keys_to_features)
tensorflow.io.parse_single_example
2,942
import tensorflow as tf ) # **(embeddings_kwargs or {}), word_embeddings = tf.nn.embedding_lookup( self.embeddings,
tensorflow.nn.embedding_lookup
2,943
import tensorflow as tf ''' def create_custom_regularizers(self): # should not be an empty list return [0.] ''' # save in self.regularizers the regularizers of the model def create_regularizers_and_updates(self): wb_regularizers = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) # see keras_utils.py: activity_and_contractive_regularizers ac_regularizers = tf.get_collection(AC_REGULARIZATION) # if (not wb_regularizers) and (not ac_regularizers): # wb_regularizers = [tf.constant(0.)] #import pdb;pdb.set_trace() if len(wb_regularizers)>0: self.regularizers += wb_regularizers
tensorflow.get_collection
2,944
import tensorflow as tf # concat all images all_images = tf.concat([images, generated, aug_images, aug_generated], 0) if self.conditional: all_y = tf.concat([y, sampled_y, y, sampled_y], axis=0) # Compute discriminator output for real and fake images in one batch. d_all, d_all_logits, d_latents = self.discriminator( x=all_images, y=all_y, is_training=is_training) z_projs = self._latent_projections(d_latents) d_real, d_fake, _, _ = tf.split(d_all, 4) d_real_logits, d_fake_logits, _, _ = tf.split(d_all_logits, 4) z_projs_real, z_projs_fake, z_aug_projs_real, z_aug_projs_fake = tf.split(z_projs, 4) self.d_loss, _, _, self.g_loss = loss_lib.get_losses( d_real=d_real, d_fake=d_fake, d_real_logits=d_real_logits, d_fake_logits=d_fake_logits) penalty_loss = penalty_lib.get_penalty_loss( x=images, x_fake=generated, y=y, is_training=is_training, discriminator=self.discriminator, architecture=self._architecture) self.d_loss += self._lambda * penalty_loss
tensorflow.split
2,945
import tensorflow as tf return out_img def build_discriminator(self,image,reuse=False,name='discriminator'): with tf.variable_scope(name): if reuse: tf.get_variable_scope().reuse_variables() else: assert tf.get_variable_scope().reuse is False def lrelu(x, alpha,name='lrelu'): with tf.variable_scope(name): if reuse: tf.get_variable_scope().reuse_variables() else: assert tf.get_variable_scope().reuse is False return tf.nn.relu(x) - alpha * tf.nn.relu(-x) def instance_norm(x,name='instance_norm'): with tf.variable_scope(name): if reuse: tf.get_variable_scope().reuse_variables()
tensorflow.variable_scope
2,946
import tensorflow as tf size_assertion = tf.Assert( tf.logical_and( tf.greater_equal(original_shape[0], crop_height), tf.greater_equal(original_shape[1], crop_width)), ['Crop size greater than the image size.']) offsets = tf.to_int32(tf.pack([offset_height, offset_width, 0])) # Use tf.slice instead of crop_to_bounding box as it accepts tensors to # define the crop size. image = control_flow_ops.with_dependencies( [size_assertion], tf.slice(image, offsets, cropped_shape)) return tf.reshape(image, cropped_shape) def _random_crop(image_list, crop_height, crop_width): """Crops the given list of images. The function applies the same crop to each image in the list. This can be effectively applied when there are multiple image inputs of the same dimension such as: image, depths, normals = _random_crop([image, depths, normals], 120, 150)
tensorflow.slice
2,947
import tensorflow as tf else: self.loss_d_rot = self.loss_d + self.weight_rotation_loss_d * self.real_pc_rot_loss self.loss_g_rot = self.loss_g + self.weight_rotation_loss_g * self.gen_out_rot_loss # Compute gradient penalty at interpolated points ndims = self.real_pc.get_shape().ndims #(1024, 3) alpha = tf.random_uniform(shape=[self.batch_size] + [1] * (ndims - 1), minval=0., maxval=1.) differences = self.generator_out - self.real_pc interpolates = self.real_pc + (alpha * differences) with tf.variable_scope('discriminator') as scope: gradients = tf.gradients(self.discriminator(interpolates, reuse=True, scope=scope, **disc_kwargs)[1], [interpolates])[0] # Reduce over all but the first dimension slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=list(range(1, ndims)))) gradient_penalty = tf.reduce_mean((slopes - 1.) ** 2) self.loss_d_rot += lam * gradient_penalty train_vars = tf.trainable_variables() d_params = [v for v in train_vars if v.name.startswith(name + '/discriminator/')]
tensorflow.variable_scope
2,948
import tensorflow as tf channels = shape[3] res = tf.split(axis=0, num_or_size_splits=batch_size, value=input_) res = [elem[0, :, :, :] for elem in res] res = [tf.image.random_flip_left_right(elem) for elem in res] res = [tf.reshape(elem, [1, height, width, channels]) for elem in res] res = tf.concat(axis=0, values=res)
tensorflow.image.random_flip_left_right
2,949
import tensorflow as tf "with_src_lang_tag": self._with_src_lang_tag, "trg_lang_tag_position": self._trg_lang_tag_position, } def inputs_signature(self, mode): """ Returns the input dtypes and signatures. """ dtypes = {"feature": tf.int64, "src_lang": tf.int64, "trg_lang": tf.int64} signatures = {"feature": tf.TensorShape([None, None]), "src_lang": tf.TensorShape([None, ]), "trg_lang": tf.TensorShape([None, ])} if mode == compat.ModeKeys.INFER: return dtypes, signatures dtypes["label"] = tf.int64 signatures["label"] = tf.TensorShape([None, None]) return dtypes, signatures
tensorflow.TensorShape
2,950
import tensorflow as tf class DataOwner: BATCH_SIZE = 30 def __init__(self, player_name, build_training_model): self.player_name = player_name self._build_training_model = build_training_model def _build_data_pipeline(self): def normalize(image, label): image = tf.cast(image, tf.float32) / 255.0 return image, label dataset = tf.data.TFRecordDataset(["./data/train.tfrecord"]) dataset = dataset.map(decode) dataset = dataset.map(normalize) dataset = dataset.repeat() dataset = dataset.batch(self.BATCH_SIZE) iterator = dataset.make_one_shot_iterator() return iterator.get_next() def compute_gradient(self): with tf.name_scope('data_loading'): x, y = self._build_data_pipeline() with tf.name_scope('gradient_computation'):
tensorflow.data.TFRecordDataset
2,951
import tensorflow as tf input_size = query.get_shape().as_list()[-1] # Trainable parameters w1 = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.1)) w2 = tf.Variable(tf.random_normal([input_size, attention_size], stddev=0.1)) b = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) v = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) with tf.name_scope('v'): # Applying fully connected layer with non-linear activation to each of the B*T timestamps; # the shape of `tmp` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size tmp1 = tf.tensordot(facts, w1, axes=1)
tensorflow.random_normal
2,952
import tensorflow as tf num_input_units = inputs.get_shape()[-1].value weights = _variable_with_weight_decay('weights', shape=[num_input_units, num_outputs], use_xavier=use_xavier, stddev=stddev, wd=weight_decay) outputs = tf.matmul(inputs, weights) biases = _variable_on_cpu('biases', [num_outputs], tf.constant_initializer(0.0)) outputs = tf.nn.bias_add(outputs, biases) if bn:
tensorflow.matmul
2,953
import tensorflow as tf scores=outputs[1], labels=outputs[2], method=1) tf.summary.image('Compare/final_detection_gpu:%d' % i, detections_in_img) loss_dict = outputs[-1]
tensorflow.summary.image
2,954
import tensorflow as tf algo.optimize_policy() sampler.update_goals() """ with self.sess.as_default() as sess: # initialize uninitialized vars (only initialize vars that were not loaded) uninit_vars = [var for var in tf.global_variables() if not sess.run(tf.is_variable_initialized(var))] sess.run(tf.variables_initializer(uninit_vars)) start_time = time.time() for itr in range(self.start_itr, self.n_itr): itr_start_time = time.time()
tensorflow.is_variable_initialized
2,955
import tensorflow as tf last_complete = now def main(): if not tf.io.gfile.exists(a.output_dir): tf.io.gfile.makedirs(a.output_dir) if a.operation == "edges" and a.crop: try: if not tf.io.gfile.exists(a.crop_dir): tf.io.gfile.makedirs(a.crop_dir) except Exception as e: raise Exception("invalid crop_dir: {:s}".format(e)) src_paths = [] dst_paths = []
tensorflow.io.gfile.exists
2,956
import tensorflow as tf if self.local_condition: self._placeholders.append(tf.placeholder(tf.float32, shape=(None, hparams.num_mels, None), name='local_condition_features')) queue_types.append(tf.float32) if self.global_condition: self._placeholders.append(tf.placeholder(tf.int32, shape=(None, 1), name='global_condition_features')) queue_types.append(tf.int32) # Create queue for buffering data queue = tf.FIFOQueue(8, queue_types, name='input_queue') self._enqueue_op = queue.enqueue(self._placeholders) variables = queue.dequeue() self.inputs = variables[0] self.inputs.set_shape(self._placeholders[0].shape) self.targets = variables[1] self.targets.set_shape(self._placeholders[1].shape)
tensorflow.FIFOQueue
2,957
import tensorflow as tf self.e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/eval_net') self.t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/target_net') with tf.variable_scope('target_q'): self.target_q = R + self.gamma * self.q_ with tf.variable_scope('abs_TD'): self.abs_td = tf.abs(self.target_q - self.q) self.ISWeights = tf.placeholder(tf.float32, [None, 1], name='IS_weights') with tf.variable_scope('TD_error'): self.loss = tf.reduce_mean(self.ISWeights * tf.squared_difference(self.target_q, self.q)) with tf.variable_scope('C_train'): self.train_op = tf.train.AdamOptimizer(self.lr).minimize(self.loss, global_step=GLOBAL_STEP) with tf.variable_scope('a_grad'): self.a_grads = tf.gradients(self.q, a)[0] # tensor of gradients of each sample (None, a_dim) def _build_net(self, s, a, scope, trainable): with tf.variable_scope(scope): init_w = tf.random_normal_initializer(0., 0.01)
tensorflow.squared_difference
2,958
from tensorflow.python.framework import ops # model_to_estimator) invalidates all models/layers made before the # conversion (even if they were not the model converted to an estimator). # Similarly, making a layer or a model inside a a tf.compat.v1.Graph # invalidates all layers/models you previously made outside of the graph. self._originally_built_as_v1 = True @property def _saved_model_loader(self) -> saved_transform_io_v2.SavedModelLoader: """A `saved_transform_io_v2.SavedModelLoader`.""" if self._saved_model_loader_value is None: self._saved_model_loader_value = saved_transform_io_v2.SavedModelLoader( self._tft_output.transform_savedmodel_dir) self._loaded_saved_model_graph = ops.get_default_graph() # TODO(b/160294509): Use tf.compat.v1 when we stop supporting TF 1.15. if ops.executing_eagerly_outside_functions(): return self._saved_model_loader_value else: assert not self._exported_as_v1 # TODO(b/149997088): Raise an exception once we no longer support using # the Keras layer with estimator based Trainer. tf.compat.v1.logging.warning('Loading a TF2 SavedModel but eager mode ' 'seems disabled.') # If exported as TF2 SavedModel but not invoked in eager mode, # re-initialize the saved_model_loader_value as __init__ could have been # called in a different graph context. default_graph = ops.get_default_graph() if (self._loaded_saved_model_graph is None or self._loaded_saved_model_graph is not default_graph): self._saved_model_loader_value = saved_transform_io_v2.SavedModelLoader(
tensorflow.python.framework.ops.executing_eagerly_outside_functions
2,959
import tensorflow as tf x2d, x3d = x with tf.GradientTape() as tape: pred, K, reprojected, crit_fake = model(x2d) crit_real = model.crit(x3d) crit_dis = tf.reduce_mean(tf.square(crit_real - tf.ones_like(crit_real))) + tf.reduce_mean(tf.square(crit_fake - tf.zeros_like(crit_fake))) crit_gen = tf.reduce_mean(tf.square(crit_fake - tf.ones_like(crit_fake))) rep_loss = tf.reduce_mean(tf.square(pred - x2d)) KK = tf.matmul(K, K, transpose_b=True) K_trace = tf.expand_dims(tf.expand_dims(tf.trace(KK), -1), -1) K_loss = tf.reduce_mean(tf.abs(KK / K_trace - tf.eye(2))) loss_total_gen = crit_gen + rep_loss + K_loss gen_var = model.get_gen_vars() dis_var = model.dis.trainable_variables grads = tape.gradient([loss_total_gen, crit_dis], [gen_var, dis_var]) return grads, [crit_dis, crit_gen, rep_loss, K_loss] reader = datareader.DataReader(16)
tensorflow.trace
2,960
import tensorflow as tf def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] label_ids = features["label_ids"] is_real_example = None if "is_real_example" in features: is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32) else: is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32) is_training = (mode == tf.estimator.ModeKeys.TRAIN) (total_loss, per_example_loss, logits, probabilities) = create_model( bert_config, is_training, input_ids, input_mask, segment_ids, label_ids, num_labels, use_one_hot_embeddings) tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu:
tensorflow.shape
2,961
import tensorflow as tf tf.cast(tf.ones_like(label_ids, dtype=tf.int32), tf.int32), tf.cast(pred_label, tf.int32) ) st_accuracy = tf.reduce_mean(tf.cast(correct, tf.float32)) pred_label = tf.argmax(distillation_loss["te_logits"], axis=-1, output_type=tf.int32) correct = tf.equal( tf.cast(tf.zeros_like(label_ids, dtype=tf.int32), tf.int32), tf.cast(pred_label, tf.int32) ) te_accuracy = tf.reduce_mean(tf.cast(correct, tf.float32)) except: te_accuracy = tf.constant(0.0) st_accuracy = tf.constant(0.0) try: st_accuracy = tf.reduce_mean(distillation_loss["src_f1_prob"]) te_accuracy = tf.reduce_mean(distillation_loss["tgt_f1_prob"]) except: te_accuracy = tf.constant(0.0) st_accuracy = tf.constant(0.0) return { "train":{ "loss":loss, "logits":logits,
tensorflow.constant
2,962
import tensorflow as tf w = tf.get_variable( name="kernel", shape=[num_attention_heads * head_size, hidden_size], initializer=initializer) w = tf.reshape(w, [num_attention_heads, head_size, hidden_size]) b = tf.get_variable( name="bias", shape=[hidden_size], initializer=tf.zeros_initializer) ret = tf.einsum("BFND,NDH->BFH", input_tensor, w) ret += b if activation is not None:
tensorflow.get_variable
2,963
import tensorflow as tf nodes_list = [nodes] adj_list = [] for hop_edge_types in edge_types: neighbor, weight, _ = get_full_neighbor(nodes, hop_edge_types) next_nodes, next_idx = tf.unique(neighbor.values, out_idx=tf.int64) next_indices = tf.stack([neighbor.indices[:, 0], next_idx], 1) next_values = weight.values next_shape = tf.stack([tf.size(nodes), tf.size(next_nodes)]) next_shape = tf.cast(next_shape, tf.int64) next_adj = tf.SparseTensor(next_indices, next_values, next_shape) next_adj = tf.sparse_reorder(next_adj) nodes_list.append(next_nodes) adj_list.append(next_adj) nodes = next_nodes return nodes_list, adj_list
tensorflow.SparseTensor
2,964
import tensorflow as tf tf.constant([], dtype=tf.string), lambda state, elem: tf.concat([state, elem], axis=-1))
tensorflow.concat
2,965
import tensorflow as tf def sample_compute(_): pairs = sample_func() loss = compute_contra_loss(*pairs, hard_ratio=hard_ratio) pct = tf.math.count_nonzero(loss, dtype=tf.float32) / tf.size(loss, out_type=tf.float32) p = tf.cond(tf.random_uniform((), dtype=tf.float32) < 1e-4,
tensorflow.math.count_nonzero
2,966
import tensorflow as tf concat1 = contrib_metrics.streaming_concat(logits) concat2 = contrib_metrics.streaming_concat(label_ids) # Compute Pearson correlation pearson = contrib_metrics.streaming_pearson_correlation( logits, label_ids, weights=is_real_example) # Compute MSE # mse = tf.metrics.mean(per_example_loss) mse = tf.metrics.mean_squared_error( label_ids, logits, weights=is_real_example) loss = tf.metrics.mean( values=per_example_loss, weights=is_real_example) return {"pred": concat1, "label_ids": concat2, "pearson": pearson, "MSE": mse, "eval_loss": loss,} elif task_name == "cola": def metric_fn(per_example_loss, label_ids, logits, is_real_example): """Compute Matthew's correlations for STS-B.""" predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) # https://en.wikipedia.org/wiki/Matthews_correlation_coefficient tp, tp_op = tf.metrics.true_positives(
tensorflow.metrics.mean
2,967
import tensorflow as tf double_obs_ph = double_policy.obs_ph with tf.variable_scope("loss", reuse=reuse): # set up placeholders act_t_ph = tf.placeholder(tf.int32, [None], name="action") rew_t_ph = tf.placeholder(tf.float32, [None], name="reward") done_mask_ph = tf.placeholder(tf.float32, [None], name="done") importance_weights_ph = tf.placeholder(tf.float32, [None], name="weight") # q scores for actions which we know were selected in the given state. q_t_selected = tf.reduce_sum(step_model.q_values * tf.one_hot(act_t_ph, n_actions), axis=1) # compute estimate of best possible value starting from state at t + 1 if double_q: q_tp1_best_using_online_net = tf.argmax(double_q_values, axis=1) q_tp1_best = tf.reduce_sum(target_policy.q_values * tf.one_hot(q_tp1_best_using_online_net, n_actions), axis=1) else: q_tp1_best = tf.reduce_max(target_policy.q_values, axis=1) q_tp1_best_masked = (1.0 - done_mask_ph) * q_tp1_best # compute RHS of bellman equation q_t_selected_target = rew_t_ph + gamma * q_tp1_best_masked # compute the error (potentially clipped) td_error = q_t_selected - tf.stop_gradient(q_t_selected_target) errors = tf_util.huber_loss(td_error) weighted_error = tf.reduce_mean(importance_weights_ph * errors)
tensorflow.argmax
2,968
from tensorflow.python.framework import ops # Since C_A is stored in a var, by how much do we need to increment that var # to make the var = C_AB? delta_comoment = (batch_comoment + (prev_mean_prediction - batch_mean_prediction) * (prev_mean_label - batch_mean_label) * (prev_count * batch_count / update_count)) update_comoment = state_ops.assign_add(comoment, delta_comoment) covariance = _safe_div(comoment, count - 1, 'covariance') with ops.control_dependencies([update_comoment]): update_op = _safe_div(comoment, count - 1, 'update_op') if metrics_collections: ops.add_to_collections(metrics_collections, covariance) if updates_collections: ops.add_to_collections(updates_collections, update_op) return covariance, update_op def streaming_pearson_correlation(predictions, labels, weights=None, metrics_collections=None, updates_collections=None,
tensorflow.python.framework.ops.add_to_collections
2,969
import tensorflow as tf per_example_loss = -tf.reduce_sum( tf.nn.log_softmax(logits) * one_hot_target, -1)
tensorflow.nn.log_softmax
2,970
import tensorflow as tf sess = tf.Session() # Create actor and critic. actor = Actor(sess, ACTION_DIM, ACTION_BOUND, LR_A, REPLACE_ITER_A) critic = Critic(sess, STATE_DIM, ACTION_DIM, LR_C, GAMMA, REPLACE_ITER_C, actor.a, actor.a_) actor.add_grad_to_graph(critic.a_grads) M = Memory(MEMORY_CAPACITY) saver = tf.train.Saver(max_to_keep=100) if LOAD_MODEL: all_ckpt = tf.train.get_checkpoint_state('./data', 'checkpoint').all_model_checkpoint_paths saver.restore(sess, all_ckpt[-1]) else: if os.path.isdir(DATA_PATH): shutil.rmtree(DATA_PATH) os.mkdir(DATA_PATH) sess.run(tf.global_variables_initializer())
tensorflow.train.Saver
2,971
from tensorflow.contrib.framework import deprecated_args ops.add_to_collections(updates_collections, update) return metric, update # TODO(ptucker): Validate range of values in labels? @deprecated_args(IGNORE_MASK_DATE, IGNORE_MASK_INSTRUCTIONS, 'ignore_mask') def streaming_sparse_precision_at_k(predictions, labels, k, class_id=None,
tensorflow.contrib.framework.deprecated_args
2,972
import tensorflow as tf imsave(os.path.join(config.DEBUG_DIR, file_name), img.astype(np.uint8)) return save_image_with_heatmap.counter def get_keypoint(image, targets, predictions, heatmap_size, height, width, category, clip_at_zero=True, data_format='channels_last', name=None): predictions = tf.reshape(predictions, [1, -1, heatmap_size*heatmap_size]) pred_max = tf.reduce_max(predictions, axis=-1) pred_indices = tf.argmax(predictions, axis=-1) pred_x, pred_y = tf.cast(tf.floormod(pred_indices, heatmap_size), tf.float32), tf.cast(tf.floordiv(pred_indices, heatmap_size), tf.float32) width, height = tf.cast(width, tf.float32), tf.cast(height, tf.float32) pred_x, pred_y = pred_x * width / tf.cast(heatmap_size, tf.float32), pred_y * height / tf.cast(heatmap_size, tf.float32) if clip_at_zero: pred_x, pred_y = pred_x * tf.cast(pred_max>0, tf.float32), pred_y * tf.cast(pred_max>0, tf.float32) pred_x = pred_x * tf.cast(pred_max>0, tf.float32) + tf.cast(pred_max<=0, tf.float32) * (width / 2.)
tensorflow.floordiv
2,973
import tensorflow as tf mu,var = tf.nn.moments(t,axes=[0]) std = tf.sqrt(var+self.epsilon) return [tf.assign(self.g,1/std),tf.assign(self.b,-1.*mu/std)] require_init = tf.reduce_any(tf.is_nan(self.g)) init_ops = tf.cond(require_init,_init,lambda : [self.g,self.b]) with tf.control_dependencies(init_ops): w = tf.expand_dims(self.g,axis=0) * tf.nn.l2_normalize(self.v,axis=0) return tf.matmul(input_var,w)+self.b def get_variables(self): #TODO: self.v should be l2-normalized or not? / currently not. return {'v':self.v,'b':self.b,'g':self.g}
tensorflow.control_dependencies
2,974
import tensorflow as tf # Adds a set of collections. tf.add_to_collection("int_collection", 3) tf.add_to_collection("float_collection", 3.5) tf.add_to_collection("string_collection", "hello") tf.add_to_collection("variable_collection", v0) # Add QueueRunners. tf.train.add_queue_runner(qr) # Adds user_defined proto in three formats: string, bytes and Any. queue_runner = queue_runner_pb2.QueueRunnerDef(queue_name="test_queue") tf.add_to_collection("user_defined_string_collection", str(queue_runner)) tf.add_to_collection("user_defined_bytes_collection", queue_runner.SerializeToString())
tensorflow.train.add_queue_runner
2,975
import tensorflow as tf neighbor, weight, _ = get_full_neighbor(nodes, hop_edge_types) next_nodes, next_idx = tf.unique(neighbor.values, out_idx=tf.int64) next_indices = tf.stack([neighbor.indices[:, 0], next_idx], 1) next_values = weight.values next_shape = tf.stack([tf.size(nodes), tf.size(next_nodes)]) next_shape = tf.cast(next_shape, tf.int64) next_adj = tf.SparseTensor(next_indices, next_values, next_shape) next_adj = tf.sparse_reorder(next_adj)
tensorflow.size
2,976
import tensorflow as tf def compute_error_loss(pred1, pred2, tgt1, tgt2, hard_ratio=1.0): geq = tf.cast((tgt1 - tgt2) > 0, tf.bool) tgt_larg = tf.where(geq, tgt1, tgt2) tgt_small = tf.where(geq, tgt2, tgt1) pred_larg = tf.where(geq, pred1, pred2) pred_small = tf.where(geq, pred2, pred1) loss = tf.maximum(0., (tgt_larg - tgt_small) - (pred_larg - pred_small)) if hard_ratio < 1.0:
tensorflow.where
2,977
import tensorflow as tf def build(self, rgb, label_num, train_mode=None, last_layer_type = "softmax"): """ load variable from npy to build the Resnet or Generate a new one :param rgb: rgb image [batch, height, width, 3] values scaled [0, 1] :param train_mode: a bool tensor, usually a placeholder: if True, dropout will be turned on """ red, green, blue = tf.split(axis=3, num_or_size_splits=3, value=rgb) assert red.get_shape().as_list()[1:] == [224, 224, 1] assert green.get_shape().as_list()[1:] == [224, 224, 1] assert blue.get_shape().as_list()[1:] == [224, 224, 1] bgr = tf.concat(axis=3, values=[ blue - configs['VGG_MEAN'][0], green - configs['VGG_MEAN'][1], red - configs['VGG_MEAN'][2], ]) print(bgr.get_shape().as_list()) assert bgr.get_shape().as_list()[1:] == [224, 224, 3] self.bgr = bgr self.conv1 = self.conv_layer(self.bgr, 7, 3, 64, 2, "conv1")# 112*112 self.pool1 = self.max_pool(self.conv1, 3, 2, "pool1")# 56*56 * 64 self.block1_1 = self.res_block_3_layers(self.pool1, [64, 64, 256], "res2a", True)# 56*56 self.block1_2 = self.res_block_3_layers(self.block1_1, [64, 64, 256], "res2b")# 56*56
tensorflow.concat
2,978
import tensorflow as tf def lstm(xs, s, scope, nh, init_scale=1.0): nbatch, nin = [v.value for v in xs[0].get_shape()] nsteps = len(xs) with tf.variable_scope(scope): wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale)) wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale)) b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0)) c, h = tf.split(axis=1, num_or_size_splits=2, value=s) for idx, x in enumerate(xs): c = c h = h z = tf.matmul(x, wx) + tf.matmul(h, wh) + b i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z) i = tf.nn.sigmoid(i) f = tf.nn.sigmoid(f) o = tf.nn.sigmoid(o) u = tf.tanh(u) c = f*c + i*u h = o*tf.tanh(c) xs[idx] = h s = tf.concat(axis=1, values=[c, h]) return xs, s
tensorflow.matmul
2,979
import tensorflow as tf samples = flatten(samples) domain_selection_mask = tf.concat( values=[tf.zeros((batch_size, 1)), tf.ones((batch_size, 1))], axis=0) grl = gradient_reverse(samples)
tensorflow.zeros
2,980
import tensorflow as tf worker_device = "/job:worker/task:{}/cpu:0".format(task) with tf.device(tf.train.replica_device_setter(1, worker_device=worker_device)): with tf.variable_scope("global"): self.network = LSTMPolicy(env.observation_space.shape, env.action_space.n)
tensorflow.variable_scope
2,981
import tensorflow as tf def instantiate_discriminator_growth_layer_block(self, params, block_idx): """Instantiates discriminator growth block layers. Args: params: dict, user passed parameters. block_idx: int, the current growth block's index. Returns: List of growth block layers. """ with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE): # Get conv block layer properties. conv_block = params["discriminator_growth_conv_blocks"][block_idx] # Create new inner convolutional layers. conv_layers = [ tf.layers.Conv2D( filters=conv_block[i][3], kernel_size=conv_block[i][0:2], strides=conv_block[i][4:6],
tensorflow.variable_scope
2,982
import tensorflow as tf elif decoder.embedding_initializer == 'uniform' or (decoder.embedding_initializer is None and decoder.initializer == 'uniform'): initializer = tf.random_uniform_initializer(minval=-weight_scale, maxval=weight_scale) else: initializer = tf.random_normal_initializer(stddev=weight_scale) with tf.device('/cpu:0'): embedding = get_variable('embedding_{}'.format(decoder.name), shape=embedding_shape, initializer=initializer)
tensorflow.random_normal_initializer
2,983
from tensorflow.python.ops import math_ops perm = [0 if n == axis else n + 1 if n < axis else n for n in range(ndim)] valid_array = array[:size] valid_array.set_shape([None] + fixed_shape) value = array_ops.transpose(valid_array, perm, name='concat') values_size = array_ops.shape(values)[axis] if max_size is None: batch_size = values_size else: batch_size = math_ops.minimum(values_size, max_size - size) perm = [axis] + [n for n in range(ndim) if n != axis] batch_values = array_ops.transpose(values, perm)[:batch_size] def reallocate(): next_size = _next_array_size(new_size) next_shape = array_ops.pack([next_size] + fixed_shape) new_value = array_ops.zeros(next_shape, dtype=values.dtype)
tensorflow.python.ops.math_ops.minimum
2,984
import tensorflow as tf tf.logging.info("label: %s (id = %d)" % (example.label, label_id)) feature = InputFeatures( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, is_real_example=True) return feature def file_based_convert_examples_to_features( examples, label_list, max_seq_length, tokenizer, output_file): """Convert a set of `InputExample`s to a TFRecord file.""" writer = tf.python_io.TFRecordWriter(output_file) for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: tf.logging.info("Writing example %d of %d" % (ex_index, len(examples))) feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer) def create_int_feature(values): f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) return f features = collections.OrderedDict() features["input_ids"] = create_int_feature(feature.input_ids)
tensorflow.python_io.TFRecordWriter
2,985
import tensorflow as tf with tf.Session(config=config) as sess: sess.run(init) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) if restore: tf.train.Saver().restore(sess,path) feed_dict={ testnum: test_num, trainnum: train_num, learnrate:lr
tensorflow.train.Saver
2,986
import tensorflow as tf init = tf.truncated_normal_initializer(stddev=init_stddev) X = tf.layers.conv2d(X, out_channels, kernel_size=filtersize, strides=(stride, stride), padding="valid", kernel_initializer=init) if norm == 'I': X = tf.contrib.layers.instance_norm(X, scope=scope, reuse=reuse, epsilon=0.001) elif norm == 'B': X = tf.layers.batch_normalization(X, reuse=reuse, training=True) elif norm == 'G':
tensorflow.contrib.layers.instance_norm
2,987
import tensorflow as tf # Setup default values. if not image_pyramid: image_pyramid = [1.0] #if model_options.crop_size is None and model_options.add_image_level_feature: # raise ValueError( # 'Crop size must be specified for using image-level feature.') if model_options.model_variant == 'mobilenet_v2': if (model_options.atrous_rates is not None or model_options.decoder_output_stride is not None): # Output a warning and users should make sure if the setting is desired. tf.logging.warning('Our provided mobilenet_v2 checkpoint does not ' 'include ASPP and decoder modules.') crop_height = ( model_options.crop_size[0] if model_options.crop_size else tf.shape(images)[1]) crop_width = ( model_options.crop_size[1] if model_options.crop_size else tf.shape(images)[2]) # Compute the height, width for the output logits.
tensorflow.logging.warning
2,988
import tensorflow as tf top_antecedents = tf.maximum(raw_top_antecedents, 0) # [k, c] top_fast_antecedent_scores = tf.expand_dims(top_span_mention_scores, 1) + tf.gather(top_span_mention_scores, top_antecedents) # [k, c] top_fast_antecedent_scores += tf.log(tf.to_float(top_antecedents_mask)) # [k, c] return top_antecedents, top_antecedents_mask, top_fast_antecedent_scores, top_antecedent_offsets
tensorflow.to_float
2,989
import tensorflow as tf Raises: InvalidArgumentError: if the rank is not 3 or if the image dimensions are less than the crop size. """ original_shape = tf.shape(image) rank_assertion = tf.Assert( tf.equal(tf.rank(image), 3), ['Rank of image must be equal to 3.']) cropped_shape = control_flow_ops.with_dependencies( [rank_assertion], tf.pack([crop_height, crop_width, original_shape[2]])) size_assertion = tf.Assert( tf.logical_and( tf.greater_equal(original_shape[0], crop_height), tf.greater_equal(original_shape[1], crop_width)), ['Crop size greater than the image size.']) offsets = tf.to_int32(tf.pack([offset_height, offset_width, 0])) # Use tf.slice instead of crop_to_bounding box as it accepts tensors to # define the crop size. image = control_flow_ops.with_dependencies( [size_assertion], tf.slice(image, offsets, cropped_shape)) return tf.reshape(image, cropped_shape) def _random_crop(image_list, crop_height, crop_width):
tensorflow.greater_equal
2,990
import tensorflow as tf FLAGS.pm= FLAGS.pm.split('-') num_layers = len(FLAGS.pm) - 1 print(num_layers) for l in range(num_layers): with tf.variable_scope('layer{}'.format(l)): with tf.variable_scope('conv'): if l == 0: bottom = x W = weight_variable([1, FLAGS.conv_kernel, FLAGS.conv_kernel, 1, FLAGS.feats_per_layer]) else: if out.get_shape()[2] < FLAGS.conv_kernel:
tensorflow.variable_scope
2,991
import tensorflow as tf # sequence has positions [0, 1, 2, ... seq_length-1], so we can just # perform a slice. position_embeddings = tf.slice(full_position_embeddings, [0, 0], [seq_length, -1])
tensorflow.slice
2,992
import tensorflow as tf self.mixed_pred, mixed_end_points = self.get_pred(self.mixed_pc) self.mixed_loss = self.get_loss(self.mixed_pred, self.mixed_label, mixed_end_points) with tf.variable_scope('discriminator') as scope: self.real_prob, self.real_logit = self.discriminator(self.real_pc_rotated, scope=scope, **disc_kwargs) self.synthetic_prob, self.synthetic_logit = self.discriminator(self.gen_out_rotated, reuse=True, scope=scope, **disc_kwargs) # Compute WGAN losses self.loss_d = tf.reduce_mean(self.synthetic_logit) - tf.reduce_mean(self.real_logit) # comparing rotated fake and real images self.loss_g = -tf.reduce_mean(self.synthetic_logit) # Add rotation loss if self.ms_task: self.g_ms_loss = tf.abs(self.gen_out_rot_loss - self.real_pc_rot_loss, name = 'abs') self.d_ms_loss = self.mixed_loss self.loss_d_rot = self.loss_d + self.weight_rotation_loss_d * self.d_ms_loss self.loss_g_rot = self.loss_g + self.weight_rotation_loss_g * self.g_ms_loss else: self.loss_d_rot = self.loss_d + self.weight_rotation_loss_d * self.real_pc_rot_loss
tensorflow.reduce_mean
2,993
import tensorflow as tf # Test case 1, 2. x = tf.placeholder(dtype=tf.int32, shape=[]) # None would fire an exception were it actually executed. self.assertTrue(normal._is_scalar_helper(x.get_shape, lambda: None)) self.assertTrue(normal._is_scalar_helper(lambda: tf.TensorShape(None), lambda: tf.shape(x))) x = tf.placeholder(dtype=tf.int32, shape=[1]) # None would fire an exception were it actually executed. self.assertFalse(normal._is_scalar_helper(x.get_shape, lambda: None)) self.assertFalse(normal._is_scalar_helper(lambda: tf.TensorShape(None), lambda: tf.shape(x))) # Test case 3. x = tf.placeholder(dtype=tf.int32) is_scalar = normal._is_scalar_helper(x.get_shape, lambda: tf.shape(x)) self.assertTrue(is_scalar.eval(feed_dict={x: 1})) self.assertFalse(is_scalar.eval(feed_dict={x: [1]})) if __name__ == '__main__': tf.test.main()
tensorflow.shape
2,994
from tensorflow.python.platform import test config._cluster_spec = server_lib.ClusterSpec({}) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=(sparse_feature,), dnn_feature_columns=(embedding_feature,), dnn_hidden_units=(3, 3), config=config) metrics = classifier.fit(input_fn=_input_fn, steps=_ITERS).evaluate( input_fn=_input_fn, steps=100) self._assertCommonMetrics(metrics) if __name__ == '__main__': test.main()
tensorflow.python.platform.test.main
2,995
import tensorflow as tf """ Function initialize one matrix of weights and one bias vector. :type shape: tuple :type name: str :rtype: dictionary """ Winit = tf.truncated_normal(shape, mean=0, stddev=0.1) binit = tf.zeros(shape[-1]) layer = {} layer["weights"] = tf.get_variable(name + "/weights", dtype=tf.float32, initializer=Winit) layer["bias"] = tf.get_variable(name + "/bias", dtype=tf.float32, initializer=binit)
tensorflow.zeros
2,996
import tensorflow as tf mean, var = tf.nn.moments(inputdata, axis, keep_dims=True) if not use_affine: return tf.divide(inputdata - mean, tf.sqrt(var + epsilon), name='output') beta = tf.get_variable('beta', [ch], initializer=tf.constant_initializer()) beta = tf.reshape(beta, new_shape)
tensorflow.sqrt
2,997
import tensorflow as tf for i, (grad, var) in enumerate(gradients): if grad is not None: gradients[i] = (tf.clip_by_norm(grad, grad_norm_clipping), var) with tf.variable_scope("input_info", reuse=False): tf.summary.scalar('rewards', tf.reduce_mean(rew_t_ph)) tf.summary.scalar('importance_weights', tf.reduce_mean(importance_weights_ph)) if full_tensorboard_log: tf.summary.histogram('rewards', rew_t_ph) tf.summary.histogram('importance_weights', importance_weights_ph) if tf_util.is_image(obs_phs[0]):
tensorflow.reduce_mean
2,998
import tensorflow as tf train_op = None cls_accuracy = tf.metrics.accuracy(glabels, predictions['classes']) metrics = {'cls_accuracy': cls_accuracy} # Create a tensor named train_accuracy for logging purposes. tf.identity(cls_accuracy[1], name='cls_accuracy') tf.summary.scalar('cls_accuracy', cls_accuracy[1]) return tf.estimator.EstimatorSpec( mode=mode, predictions=predictions,
tensorflow.identity
2,999