seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf """ # TODO: support shape and tt_ranks as TensorShape?. # TODO: support None as a dimension. shape = np.array(shape) tt_rank = np.array(tt_rank) _validate_input_parameters(is_tensor=True, shape=shape, tt_rank=tt_rank, batch_size=batch_size) num_dims = shape.size if tt_rank.size == 1: tt_rank = tt_rank * np.ones(num_dims - 1) tt_rank = np.insert(tt_rank, 0, 1) tt_rank = np.append(tt_rank, 1) tt_rank = tt_rank.astype(int) tt_cores = [None] * num_dims with tf.name_scope(name): for i in range(num_dims): curr_core_shape = (batch_size, tt_rank[i], shape[i], tt_rank[i + 1]) tt_cores[i] = tf.random_normal(curr_core_shape, mean=mean, stddev=stddev, dtype=dtype) return TensorTrainBatch(tt_cores, shape, tt_rank, batch_size) def matrix_with_random_cores(shape, tt_rank=2, mean=0., stddev=1., dtype=tf.float32, name='t3f_matrix_with_random_cores'): """Generate a TT-matrix of given shape with N(mean, stddev^2) cores. Args:
tensorflow.name_scope
2,800
import tensorflow as tf random_tensor = (1.0 - self._dropout_keep_prob + tf.random_uniform(tf.shape(confidence_scores)))
tensorflow.shape
2,801
import tensorflow as tf self.evaluate(pdf), self._scipy_pareto(concentration_v, scale_v).pdf(x)) def testParetoLogPdfValidateArgs(self): batch_size = 3 scale = tf.constant([2., 3., 4.]) concentration = tf.constant([2.] * batch_size) pareto = tfd.Pareto(concentration, scale, validate_args=True) with self.assertRaisesOpError("not in the support"): x = tf.placeholder_with_default(input=[2., 3., 3.], shape=[3]) log_prob = pareto.log_prob(x) self.evaluate(log_prob) with self.assertRaisesOpError("not in the support"): x = tf.placeholder_with_default(input=[2., 2., 5.], shape=[3]) log_prob = pareto.log_prob(x) self.evaluate(log_prob) with self.assertRaisesOpError("not in the support"):
tensorflow.placeholder_with_default
2,802
import tensorflow as tf ) out = tf.layers.dense( inputs=fc2,
tensorflow.layers.dense
2,803
import tensorflow as tf s_h, s_w = self.output_height, self.output_width s_h2, s_h4, s_h8, s_h16 = \ int(s_h/2), int(s_h/4), int(s_h/8), int(s_h/16) s_w2, s_w4, s_w8, s_w16 = \ int(s_w/2), int(s_w/4), int(s_w/8), int(s_w/16) output_z_ = lrelu(linear(trans_z, self.gf_dim*8*s_h16*s_w16, 'd_h0_lin')) output_h0 = tf.reshape(output_z_, [-1, s_h16, s_w16, self.gf_dim * 8]) output_h1 = lrelu(deconv2d(tf.concat([output_h0, tgtctx_h3], 3), [self.batch_size, s_h8, s_w8, self.gf_dim*4], name='d_h1')) output_h2 = lrelu(deconv2d(tf.concat([output_h1, tgtctx_h2], 3), [self.batch_size, s_h4, s_w4, self.gf_dim*2], name='d_h2')) output_h3 = lrelu(deconv2d(tf.concat([output_h2, tgtctx_h1], 3), [self.batch_size, s_h2, s_w2, self.gf_dim*1], name='d_h3')) output_h4 = deconv2d(tf.concat([output_h3, tgtctx_h0], 3), [self.batch_size, s_h, s_w, self.c_dim], name='d_h4') scope.reuse_variables() truthoutput_z_ = lrelu(linear(tgtimg_z, self.gf_dim*8*s_h16*s_w16, 'd_h0_lin'))
tensorflow.concat
2,804
import tensorflow as tf non_monotonous = (1 - monotonous) * mask attn_loss = tf.reduce_sum(attention_weights * tf.stop_gradient(non_monotonous)) / tf.to_float(batch_size)
tensorflow.to_float
2,805
import tensorflow as tf ref=self.internals_memory[name], indices=indices, updates=internals[name] )) for name in sorted(actions): assignments.append(tf.scatter_update( ref=self.actions_memory[name], indices=indices, updates=actions[name] ))
tensorflow.scatter_update
2,806
import tensorflow as tf with tf.Session() as sess: # Load the model facenet.load_model(args.model_dir) # Get input and output tensors images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0") embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0") phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0") # Run forward pass to calculate embeddings nrof_images = len(image_list)
tensorflow.get_default_graph
2,807
import tensorflow as tf train_vars = tf.trainable_variables() d_params = [v for v in train_vars if v.name.startswith(name + '/discriminator/')] g_params = [v for v in train_vars if v.name.startswith(name + '/generator/')] rot_params = [v for v in train_vars if '/rotation/' in v.name] #slightly suspecting that this part is incorrect self.opt_d = self.optimizer(self.init_lr, beta, self.loss_d_rot, d_params) self.opt_g = self.optimizer(self.init_lr, beta, self.loss_g_rot, g_params) #used loss_g + rot_loss to update self.opt_pred = self.optimizer(lr_pred, beta, self.real_pc_rot_loss, rot_params, batch) #only use real pics to update self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=None) self.init = tf.global_variables_initializer() #Launch the session config = tf.ConfigProto(allow_soft_placement = True) config.gpu_options.allow_growth = True self.sess = tf.Session(config=config, graph=self.graph) self.sess.run(self.init)
tensorflow.global_variables
2,808
import tensorflow as tf tf.app.flags.DEFINE_integer('save_every', 250, 'Save model state every INT epochs') tf.app.flags.DEFINE_integer('eval_every', 25, 'Save encoding and visualizations every') tf.app.flags.DEFINE_integer('visualiza_max', 10, 'Max pairs to show on visualization') tf.app.flags.DEFINE_boolean('load_state', True, 'Load state if possible ') tf.app.flags.DEFINE_boolean('kill_depth', False, 'Ignore depth information') tf.app.flags.DEFINE_boolean('dev', False, 'Indicate development mode') tf.app.flags.DEFINE_integer('batch_size', 128, 'Batch size') tf.app.flags.DEFINE_float('learning_rate', 0.0001, 'Create visualization of ') tf.app.flags.DEFINE_float('blur', 5.0, 'Max sigma value for Gaussian blur applied to training set')
tensorflow.app.flags.DEFINE_boolean
2,809
import tensorflow as tf rdd = sc.parallelize(data_list, splits) tensor_structure = [TensorMeta(tf.as_dtype(t.dtype), shape=t.shape[1:], name="input_%s" % i) for i, t in enumerate(tensors)] else: flattened = nest.flatten(tensors) for i in range(len(flattened)): if flattened[i].dtype == np.dtype("float64"): flattened[i] = np.float32(flattened[i]) data_list = _splits(flattened) rdd = sc.parallelize(data_list, splits) rdd = rdd.map(lambda x: nest.pack_sequence_as(tensors, x)) tensor_structure = nest.pack_sequence_as(tensors, [TensorMeta(tf.as_dtype(t.dtype), shape=t.shape[1:], name="input_%s" % i) for i, t in enumerate(flattened)]) return rdd, tensor_structure def _splits(tensors): data_list = [] data_size = tensors[0].shape[0] for i in range(data_size): sample = [] for j in range(len(tensors)): sample.append(tensors[j][i])
tensorflow.as_dtype
2,810
from tensorflow.python.framework import ops train_ops = self._get_linear_training_ops( linear_grads, linear_vars) + self._get_dnn_training_ops(dnn_grads, dnn_vars) train_step = control_flow_ops.group(*train_ops, name="combined_training_op") with ops.control_dependencies([train_step]): with ops.get_default_graph().colocate_with(global_step): return state_ops.assign_add(global_step, 1).op, loss def _run_metrics(self, predictions, targets, metrics, weights): result = {}
tensorflow.python.framework.ops.control_dependencies
2,811
import tensorflow as tf target_value_shape_suffix = [num_target_frames, distributional_size] features = { "inputs": observations, "epoch": tf.constant(epoch + 1), "input_action": tf.zeros(obs_shape[:2] + [1], dtype=tf.int32), "input_reward": tf.zeros(obs_shape[:2] + [1], dtype=tf.int32), "targets": tf.zeros(obs_shape[:1] + [num_target_frames] + obs_shape[2:]), "target_action": tf.zeros( obs_shape[:1] + [num_target_frames, 1], dtype=tf.int32), "target_reward": tf.zeros( obs_shape[:1] + [num_target_frames, 1], dtype=tf.int32), "target_policy": tf.zeros( obs_shape[:1] + [num_target_frames] + [action_space.n]), "target_value": tf.zeros( obs_shape[:1] + target_value_shape_suffix) } model.distributional_value_size = max(distributional_size, 1) model.use_epochs = hparams.use_epochs
tensorflow.zeros
2,812
from tensorflow.python.ops import check_ops raise ValueError("%s.ndims=%d is not 0 (scalar)" % (x.name, x.get_shape().ndims)) if x_value_static < 0: raise ValueError("%s.value=%d cannot be negative" % (x.name, x_value_static)) return x if self.validate_args: x = control_flow_ops.with_dependencies([ check_ops.assert_rank(x, 0), check_ops.assert_non_negative(x)], x) return x def _introspect_ndims(self, ndims): """Helper to establish some properties of input ndims args.""" if self._is_all_constant_helper(ndims): return (tensor_util.constant_value(ndims),
tensorflow.python.ops.check_ops.assert_rank
2,813
import tensorflow as tf :param kernel_size: :param name: :param depth_multiplier: :param padding: :param stride: :return: """ with tf.variable_scope(name_or_scope=name): in_shape = input_tensor.get_shape().as_list() in_channel = in_shape[3] padding = padding.upper() depthwise_filter_shape = [kernel_size, kernel_size] + [in_channel, depth_multiplier] w_init = tf.contrib.layers.variance_scaling_initializer()
tensorflow.variable_scope
2,814
import tensorflow as tf d3, _ = tf.nn.seq2seq.embedding_attention_seq2seq( enc_inp, dec_inp2, cell, num_encoder_symbols=2, num_decoder_symbols=5, embedding_size=2, feed_previous=tf.constant(True)) sess.run([tf.global_variables_initializer()]) tf.get_variable_scope().reuse_variables() d1, _ = tf.nn.seq2seq.embedding_attention_seq2seq( enc_inp, dec_inp, cell, num_encoder_symbols=2, num_decoder_symbols=5, embedding_size=2, feed_previous=True) d2, _ = tf.nn.seq2seq.embedding_attention_seq2seq( enc_inp, dec_inp2, cell, num_encoder_symbols=2, num_decoder_symbols=5, embedding_size=2, feed_previous=True)
tensorflow.nn.seq2seq.embedding_attention_seq2seq
2,815
import tensorflow as tf r = tf.range(0, max_sequence_len, 1) range_row = tf.expand_dims(r, 0)
tensorflow.expand_dims
2,816
import tensorflow as tf run_metadata=run_metadata) if (save_vis_dir is not None and os.path.exists(save_vis_dir)): # first get the weights out with tf.variable_scope('conv5_3', reuse=True) as scope: conv5_3_weights = tf.get_variable("weights") conv5_3_weights_np, conv5_3_features, st_pool_features =\
tensorflow.variable_scope
2,817
import tensorflow as tf valid_pre = tf.reshape(valid_inf, [validnum, classnum]) valid_correct_prediction=tf.equal(tf.argmax(valid_inf,1),tf.argmax(valid_labels,1)) valid_accuracy=tf.reduce_mean(tf.cast(valid_correct_prediction,tf.float32)) valid_pre = tf.argmax(valid_pre, 1) valid_true = tf.argmax(valid_labels, 1) target_names = ['class sg', 'class bm', 'class wd', 'class wt', 'class wj', 'class wo', 'class ym', 'class shq', 'class shj', 'class no', 'class yh', 'class fb'] init = tf.initialize_all_variables() config=tf.ConfigProto() config.gpu_options.allow_growth=True #init=tf.initialize_all_variables() def train(train_num=64,test_num=32,lr=1e-4,loop_count=10000,report_step=100,save_step=1000,restore=False): with tf.Session(config=config) as sess: sess.run(init) coord = tf.train.Coordinator()
tensorflow.initialize_all_variables
2,818
import tensorflow as tf input_dict[fields.InputDataFields.image], HASH_KEY: tf.cast(hash_from_source_id, tf.int32),
tensorflow.cast
2,819
import tensorflow as tf (y_p - 368 / 2) * (y_p - 368 / 2) exponent = dist_sq / 2.0 / (21**2) gaussian_map[y_p, x_p] = np.exp(-exponent) return gaussian_map.reshape((1, 368, 368, 1)) def CPM(image): image = image / 256.0 - 0.5 gmap = tf.constant(get_gaussian_map()) gmap = tf.pad(gmap, [[0, 0], [0, 1], [0, 1], [0, 0]]) pool_center = AvgPooling('mappool', gmap, 9, stride=8, padding='VALID') with argscope(Conv2D, kernel_shape=3, nl=tf.nn.relu, W_init=tf.random_normal_initializer(stddev=0.01)): shared = (LinearWrap(image) .Conv2D('conv1_1', 64) .Conv2D('conv1_2', 64) .MaxPooling('pool1', 2) # 184 .Conv2D('conv2_1', 128)
tensorflow.pad
2,820
import tensorflow as tf def _train_semi_supervised(self, dataset, start_epoch, weights_from, summary_every, model_name, weights_dir): training_X, training_y, validation_X, validation_y = \ dataset.training_X, dataset.training_y, dataset.validation_X, dataset.validation_y if not os.path.exists(weights_dir): os.mkdir(weights_dir) if not os.path.exists(weights_dir + '/best_models'): os.mkdir(weights_dir + '/best_models') # Create a saver. saver = tf.train.Saver(max_to_keep=None) if self.is_summary: training_batch_summary_op = tf.merge_all_summaries(key=TRAINING_BATCH_SUMMARIES) training_epoch_summary_op = tf.merge_all_summaries(key=TRAINING_EPOCH_SUMMARIES) validation_batch_summary_op = tf.merge_all_summaries(key=VALIDATION_BATCH_SUMMARIES) validation_epoch_summary_op = tf.merge_all_summaries(key=VALIDATION_EPOCH_SUMMARIES) # Build an initialization operation to run below. init = tf.global_variables_initializer() gpu_options = tf.GPUOptions( per_process_gpu_memory_fraction=self.cnf.get('gpu_memory_fraction', 0.9)) sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)) sess.run(init) if start_epoch > 1: weights_from = "weights/model-epoch-%d.ckpt" % (start_epoch - 1) if weights_from:
tensorflow.merge_all_summaries
2,821
import tensorflow as tf optimiser = tf.train.AdamOptimizer(decayed_learning_rate, name="Adam").minimize(cross_entropy) # calculate the prediction and the accuracy accuracy, acc_op = tf.metrics.accuracy(labels=tf.argmax(y_, axis=1), predictions=tf.argmax(y_conv, axis=1)) loss_summary = tf.summary.scalar('Loss', cross_entropy) acc_summary = tf.summary.scalar('Accuracy', accuracy) # summaries for TensorBoard visualisation validation_summary = tf.summary.merge([img_summary, acc_summary]) training_summary = tf.summary.merge([img_summary, loss_summary]) test_summary = tf.summary.merge([img_summary, acc_summary]) # saver for checkpoints saver = tf.train.Saver(tf.global_variables(), max_to_keep=1) with tf.Session() as sess: summary_writer = tf.summary.FileWriter(run_log_dir + '_train', sess.graph, flush_secs=5) summary_writer_validation = tf.summary.FileWriter(run_log_dir + '_validate', sess.graph, flush_secs=5) sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer())
tensorflow.summary.merge
2,822
import tensorflow as tf ValueError: if `image_tensor`'s' width or height is smaller than `min_dim`. """ image_shape = image_tensor.get_shape() image_height = static_shape.get_height(image_shape) image_width = static_shape.get_width(image_shape) if image_height is None or image_width is None: shape_assert = tf.Assert( tf.logical_and(tf.greater_equal(tf.shape(image_tensor)[1], min_dim), tf.greater_equal(tf.shape(image_tensor)[2], min_dim)), ['image size must be >= {} in both height and width.'.format(min_dim)]) with tf.control_dependencies([shape_assert]): return tf.identity(image_tensor) if image_height < min_dim or image_width < min_dim: raise ValueError( 'image size must be >= %d in both height and width; image dim = %d,%d' % (min_dim, image_height, image_width)) return image_tensor def assert_shape_equal(shape_a, shape_b):
tensorflow.identity
2,823
import tensorflow as tf flattened_lm_emb = tf.reshape(lm_emb, [num_sentences * max_sentence_length * lm_emb_size, lm_num_layers]) flattened_aggregated_lm_emb = tf.matmul(flattened_lm_emb, tf.expand_dims(self.lm_weights, 1)) # [num_sentences * max_sentence_length * emb, 1] aggregated_lm_emb = tf.reshape(flattened_aggregated_lm_emb, [num_sentences, max_sentence_length, lm_emb_size]) aggregated_lm_emb *= self.lm_scaling context_emb_list.append(aggregated_lm_emb) context_emb = tf.concat(context_emb_list, 2) # [num_sentences, max_sentence_length, emb] head_emb = tf.concat(head_emb_list, 2) # [num_sentences, max_sentence_length, emb] context_emb = tf.nn.dropout(context_emb, self.lexical_dropout) # [num_sentences, max_sentence_length, emb] head_emb = tf.nn.dropout(head_emb, self.lexical_dropout) # [num_sentences, max_sentence_length, emb] text_len_mask = tf.sequence_mask(text_len, maxlen=max_sentence_length) # [num_sentence, max_sentence_length] context_outputs = self.lstm_contextualize(context_emb, text_len, text_len_mask) # [num_words, emb]
tensorflow.concat
2,824
import tensorflow as tf self.demo = demo self.graph = graph if graph is not None else tf.Graph()
tensorflow.Graph
2,825
from tensorflow.contrib.slim.python.slim.data import tfexample_decoder items_to_handlers = { 'image': tfexample_decoder.Image(), 'label': tfexample_decoder.Tensor('image/class/label'), }
tensorflow.contrib.slim.python.slim.data.tfexample_decoder.Tensor
2,826
import tensorflow as tf ) # [bs,bn,vec] block_ct_res_tile = tf.tile(tf.expand_dims(block_ct_res, 2), [1, 1, bl, 1])#[bs,bn,vec]->[bs,bn,bl,vec] with tf.variable_scope('combination'): # input:1.rep_map[bs,bn,bl,vec]; 2.self_attn_result[bs,bn,bl,vec]; 3.rnn_res_tile[bs,bn,bl,vec] rep_tensor_with_ct = tf.concat([rep_map, self_attn_result, block_ct_res_tile], -1) # [bs,bn,bl,3vec] new_context_and_gate = linear(rep_tensor_with_ct, 2 * ivec, True, 0., 'linear_new_context_and_gate', False, wd, keep_prob, is_train) # [bs,bn,bl,2vec] new_context, gate = tf.split(new_context_and_gate, 2, 3) # bs,bn,bl,vec if activation == "relu": new_context_act = tf.nn.relu(new_context) elif activation == "elu": new_context_act = tf.nn.elu(new_context) elif activation == "linear": new_context_act = tf.identity(new_context) else: raise RuntimeError
tensorflow.split
2,827
import tensorflow as tf # Location predictions. location_feature_map_depth = (self._num_spatial_bins[0] * self._num_spatial_bins[1] * self.num_classes * self._box_code_size) location_feature_map = slim.conv2d(net, location_feature_map_depth, [1, 1], activation_fn=None, scope='refined_locations') box_encodings = ops.position_sensitive_crop_regions( location_feature_map, boxes=tf.reshape(proposal_boxes, [-1, self._box_code_size]), box_ind=get_box_indices(proposal_boxes), crop_size=self._crop_size, num_spatial_bins=self._num_spatial_bins, global_pool=True) box_encodings = tf.squeeze(box_encodings, squeeze_dims=[1, 2]) box_encodings = tf.reshape(box_encodings, [batch_size * num_boxes, 1, self.num_classes, self._box_code_size]) # Class predictions. total_classes = self.num_classes + 1 # Account for background class. class_feature_map_depth = (self._num_spatial_bins[0] * self._num_spatial_bins[1] * total_classes) class_feature_map = slim.conv2d(net, class_feature_map_depth, [1, 1], activation_fn=None, scope='class_predictions') class_predictions_with_background = ops.position_sensitive_crop_regions( class_feature_map,
tensorflow.squeeze
2,828
import tensorflow as tf x = tf.expand_dims(tf.range(ksize, delta=1, dtype=tf.float32), axis=1) y = tf.transpose(x, [1, 0]) kernel_matrix = tf.exp(- ((x - ksize/2.) ** 2 + (y - ksize/2.) ** 2) / (2 * sigma ** 2)) #print(kernel_matrix) kernel_filter = tf.reshape(kernel_matrix, [ksize, ksize, 1, 1]) kernel_filter = tf.tile(kernel_filter, [1, 1, inputs_filters, 1]) #kernel_filter = tf.transpose(kernel_filter, [1, 0, 2, 3]) outputs = tf.nn.depthwise_conv2d(inputs, kernel_filter, strides=[1, 1, 1, 1], padding='SAME', data_format=data_format_, name='blur') if data_format_ == 'NHWC': outputs = tf.transpose(outputs, [0, 3, 1, 2]) return outputs cpn_backbone = cpn.cascaded_pyramid_net if 'seresnext50' in FLAGS.backbone: cpn_backbone = cpn.xt_cascaded_pyramid_net def keypoint_model_fn(features, labels, mode, params): targets = labels['targets']
tensorflow.transpose
2,829
import tensorflow as tf super(RestoreMovingAverageHook, self).__init__() self.model_dir = model_dir def begin(self): ema = tf.train.ExponentialMovingAverage(decay=MOVING_AVERAGE_DECAY) variables_to_restore = ema.variables_to_restore() self.load_ema = tf.contrib.framework.assign_from_checkpoint_fn( tf.train.latest_checkpoint(self.model_dir), variables_to_restore ) def after_create_session(self, sess, coord): tf.logging.info('Loading EMA weights...') self.load_ema(sess)
tensorflow.train.latest_checkpoint
2,830
import tensorflow as tf def _tensor_to_image(self, net): with tf.name_scope('to_image'): if FLAGS.new_blur: net = net[..., :self.batch_shape[-1]] net = tf.nn.relu(net) net = tf.cast(net <= 1, net.dtype) * net * 255 net = tf.cast(net, tf.uint8) return net def _image_to_tensor(self, image): with tf.name_scope('args_transform'): net = tf.cast(image, tf.float32) / 255. if FLAGS.new_blur: net = _blur_expand(net) FLAGS.blur = 0. return net def _init_optimizer(self): self.loss_total = tf.add_n(self.losses, 'loss_total') self.optimizer = self.optimizer_constructor(learning_rate=FLAGS.learning_rate)
tensorflow.name_scope
2,831
import tensorflow as tf sigmean = tf.Variable(5.28, name="sigmean", dtype=tf.float64) sigwidth = tf.Variable(0.0027, name="sigwidth", dtype=tf.float64) vdict['sigmean'] = sigmean vdict['sigwidth'] = sigwidth # RooGaussian gauss("gauss","gaussian PDF",mes,sigmean,sigwidth) ; def gaussian_pdf(x, mean, std): val = tf.div(tf.exp(-tf.pow((x - mean) / std, 2) / two), (sqrt2pi * std), name="gaussian_pdf") return val # // --- Build Argus background PDF --- # RooRealVar argpar("argpar","argus shape parameter",-20.0,-100.,-1.) ; # RooConstVar m0("m0", "resonant mass", 5.291);
tensorflow.pow
2,832
import tensorflow as tf @pytest.fixture def sqrt_diag(session_tf): return tf.convert_to_tensor(Datum.sqrt_diag_data) @pytest.fixture def K(session_tf): return tf.convert_to_tensor(Datum.K_data) @pytest.fixture def K_batch(session_tf): return tf.convert_to_tensor(Datum.K_batch_data) @pytest.fixture def sqrt(session_tf): return tf.convert_to_tensor(Datum.sqrt_data) @pytest.fixture() def I(session_tf): return tf.convert_to_tensor(Datum.I) @pytest.mark.parametrize('white', [True, False]) def test_diags(session_tf, white, mu, sqrt_diag, K): """ The covariance of q(x) can be Cholesky matrices or diagonal matrices. Here we make sure the behaviours overlap. """ # the chols are diagonal matrices, with the same entries as the diag representation. chol_from_diag = tf.stack([tf.diag(sqrt_diag[:, i]) for i in range(Datum.N)]) # N x M x M
tensorflow.convert_to_tensor
2,833
import tensorflow as tf embeddings = np.zeros((n_tokens, embed_dim), dtype=DTYPE) config = tf.ConfigProto(allow_soft_placement=True) with tf.Session(config=config) as sess:
tensorflow.ConfigProto
2,834
import tensorflow as tf Tout=[tf.float32, tf.float32]) gtboxes_and_label_h = tf.reshape(gtboxes_and_label_h, [-1, 5])
tensorflow.reshape
2,835
import tensorflow as tf token_type_ids.append(e.token_type_ids) attention_mask.append(e.attention_mask) labels.append(e.label_ids) # parse examples to dataset def _to_dataset(x, dtype=tf.int32): x = tf.ragged.constant(x, dtype=dtype) d = tf.data.Dataset.from_tensor_slices(x) d = d.map(lambda x: x) return d dataset = tf.data.Dataset.zip(
tensorflow.ragged.constant
2,836
import tensorflow as tf self.x1_tf = tf.placeholder(tf.float32, shape=(None, self.x1.shape[1])) self.u0_tf = tf.placeholder(tf.float32, shape=(None, self.u0.shape[1])) self.u1_tf = tf.placeholder(tf.float32, shape=(None, self.u1.shape[1])) self.dummy_x0_tf = tf.placeholder(tf.float32, shape=(None, self.q)) # dummy variable for fwd_gradients self.dummy_x1_tf = tf.placeholder(tf.float32, shape=(None, self.q)) # dummy variable for fwd_gradients self.U0_pred = self.net_U0(self.x0_tf) # N0 x q self.U1_pred = self.net_U1(self.x1_tf) # N1 x q
tensorflow.placeholder
2,837
import tensorflow as tf ------- A tuple length of 3, (normalized_tensor, mean, variance). """ mean, var = tf.nn.moments( x, reduction_axes, shift=None, name=None, keep_dims=False) if sorted(reduction_axes) == range(ndim(x))[:-1]:
tensorflow.nn.moments
2,838
import tensorflow as tf return a elif type(a) == list: if type(a[0]) == tf.Tensor: return tf.stack(a, 0) else: return tf.constant(a, dtype) else: print(type(a)) return tf.constant(a, dtype)
tensorflow.constant
2,839
import tensorflow as tf # q network evaluation q_t = q_func(obs_t_input.get(), num_actions, scope="q_func", reuse=True) # reuse parameters from act q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.get_variable_scope().name + "/q_func") # target q network evalution q_tp1 = q_func(obs_tp1_input.get(), num_actions, scope="target_q_func") target_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.get_variable_scope().name + "/target_q_func") # q scores for actions which we know were selected in the given state. q_t_selected = tf.reduce_sum(q_t * tf.one_hot(act_t_ph, num_actions), 1) # compute estimate of best possible value starting from state at t + 1 if double_q: q_tp1_using_online_net = q_func(obs_tp1_input.get(), num_actions, scope="q_func", reuse=True) q_tp1_best_using_online_net = tf.argmax(q_tp1_using_online_net, 1) q_tp1_best = tf.reduce_sum(q_tp1 * tf.one_hot(q_tp1_best_using_online_net, num_actions), 1) else: q_tp1_best = tf.reduce_max(q_tp1, 1) q_tp1_best_masked = (1.0 - done_mask_ph) * q_tp1_best # compute RHS of bellman equation q_t_selected_target = rew_t_ph + gamma * q_tp1_best_masked # compute the error (potentially clipped) td_error = q_t_selected - tf.stop_gradient(q_t_selected_target) errors = U.huber_loss(td_error) weighted_error = tf.reduce_mean(importance_weights_ph * errors) # compute optimization op (potentially with gradient clipping)
tensorflow.argmax
2,840
import tensorflow as tf else self.config['eval_batch_size'] shards = {d: tf.unstack(v, num=batch_size*self.n_gpus, axis=0)
tensorflow.unstack
2,841
import tensorflow as tf with tf.variable_scope(scope, 'InceptionV1', [inputs]):
tensorflow.variable_scope
2,842
import tensorflow as tf # Compute sparse softmax cross entropy loss from logits & labels log_probs = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=classes) loss = tf.reduce_mean(log_probs) self._mark_for_monitoring('loss', loss) # Add regularization loss reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) reg_loss = reg_decay * tf.add_n(reg_losses) self._mark_for_monitoring('reg_loss', reg_loss) # Add loss from auxiliary logits aux_loss = tf.constant(0, dtype=tf.float32) for aux_logits in aux_logits_list: log_probs = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=aux_logits, labels=classes) aux_loss += aux_loss_mul * tf.reduce_mean(log_probs) total_loss = loss + reg_loss + aux_loss return total_loss def _add_global_avg_pool(self, X, in_w, in_h, in_ch): X = tf.nn.relu(X) X = tf.reduce_mean(X, (1, 2))
tensorflow.constant
2,843
import tensorflow as tf # strides = np.asarray(self.pool_strides) # strides[1:] *= len(self.ff_conv_k) # kernels = np.asarray(self.pooling_kernel) # kernels[1:] *= len(self.ff_conv_k) # return tf.layers.conv3d_transpose( # inputs=x, # strides=strides, # padding=self.padding, # filters=y_size[-1], # kernel_size=kernels, # trainable=self.train, # use_bias=use_bias, # activation=self.ff_nl) resized = tf.nn.conv3d_transpose( value=x, filter=kernel, output_shape=y_size, strides=[1] + strides + [1], padding=self.padding, name='resize_x_to_y') resized = tf.nn.bias_add( resized, bias) resized = self.ff_nl(resized) return resized elif mode == 'replicate_n_transpose':
tensorflow.nn.conv3d_transpose
2,844
from tensorflow.python.ops import array_ops Args: x: `Tensor`. name: `String`. The name to give this op. Returns: sample_shape: `Tensor` (1D, `int32`). batch_shape: `Tensor` (1D, `int32`). event_shape: `Tensor` (1D, `int32`). """ with self._name_scope(name, values=[x]): x = ops.convert_to_tensor(x, name="x") def slice_shape(start_sum, size, name): """Closure to slice out shape.""" start_sum = start_sum if start_sum else ( array_ops.zeros((), dtype=dtypes.int32, name="zero"),) if (x.get_shape().ndims is not None and self._is_all_constant_helper(size, *start_sum)): start = sum(tensor_util.constant_value(s) for s in start_sum) stop = start + tensor_util.constant_value(size) slice_ = x.get_shape()[start:stop].as_list() if all(s is not None for s in slice_): return ops.convert_to_tensor(slice_, dtype=dtypes.int32, name=name) # Fall-through intended. return array_ops.slice(array_ops.shape(x), (sum(start_sum),), (size,)) sample_ndims = self.get_sample_ndims(x, name=name) return (slice_shape((), sample_ndims, name="sample_shape"), slice_shape((sample_ndims,), self.batch_ndims,
tensorflow.python.ops.array_ops.zeros
2,845
import tensorflow as tf reader = tf.TFRecordReader # Features in Pascal VOC TFRecords. keys_to_features = { 'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''), 'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'), 'image/height': tf.FixedLenFeature([1], tf.int64), 'image/width': tf.FixedLenFeature([1], tf.int64), 'image/channels': tf.FixedLenFeature([1], tf.int64), 'image/shape': tf.FixedLenFeature([3], tf.int64), 'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/label': tf.VarLenFeature(dtype=tf.int64), 'image/object/bbox/difficult': tf.VarLenFeature(dtype=tf.int64), 'image/object/bbox/truncated': tf.VarLenFeature(dtype=tf.int64), } items_to_handlers = { 'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'), 'shape': slim.tfexample_decoder.Tensor('image/shape'), 'object/bbox': slim.tfexample_decoder.BoundingBox( ['xmin', 'ymin', 'xmax', 'ymax'], 'image/object/bbox/'), 'object/label': slim.tfexample_decoder.Tensor('image/object/bbox/label'), 'object/difficult': slim.tfexample_decoder.Tensor('image/object/bbox/difficult'), 'object/truncated': slim.tfexample_decoder.Tensor('image/object/bbox/truncated'), } decoder = slim.tfexample_decoder.TFExampleDecoder( keys_to_features, items_to_handlers)
tensorflow.VarLenFeature
2,846
import tensorflow as tf # # c = tf.constant('haHa') # print(sess.run(c)) # # sess.close() identity_matrix = tf.diag([1.0, 3.0, 1.0]) A = tf.truncated_normal([2, 3]) B = tf.fill([2, 3], 5.0) C = tf.random_uniform([3, 2], maxval=100) D = tf.convert_to_tensor(np.array([[1., 2., 3.], [-3., -7., -1.], [0., 5., -2.]])) sess = tf.Session() # sess.run(tf.global_variables_initializer())
tensorflow.diag
2,847
import tensorflow as tf initializer=tf.initializers.zeros(), shape=[num_classes, num_choices], dtype=tf.float32) dist = tfp.distributions.Categorical(logits=logits) dist_entropy = tf.reduce_sum(dist.entropy()) sample = dist.sample() sample_masks = 1. * tf.cast(sample, tf.float32) / num_choices sample_log_prob = tf.reduce_mean(dist.log_prob(sample)) return (dist_entropy, sample_masks, sample_log_prob) def get_loss_weights(name=None):
tensorflow.cast
2,848
import tensorflow as tf # Activation if softmax_stag: scores = tf.nn.softmax(scores) # [B, 1, T] # Weighted sum if mode == 'SUM': output = tf.matmul(scores, facts) # [B, 1, H] # output = tf.reshape(output, [-1, tf.shape(facts)[-1]]) else: scores = tf.reshape(scores, [-1, tf.shape(facts)[1]]) output = facts * tf.expand_dims(scores, -1) output = tf.reshape(output, tf.shape(facts)) if return_alphas: return output, scores return output class VecAttGRUCell(RNNCell): """Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078). Args:
tensorflow.shape
2,849
import tensorflow as tf labels_adv_all = np.array([]) labels_true_all = np.array([]) labels_nor_all = np.array([]) L2_distance = np.array([]) nor_img_all = np.reshape(np.array([]), (0, image_size,image_size,num_channel)) adv_img_all = np.reshape(np.array([]), (0, image_size,image_size,num_channel)) print('Num of sample per eps is %d' % (num_sample)) #Construct carlini adversarial samples model_carlini_adv = models_carlini(hps) #Construct predictions image = tf.placeholder(tf.float32,shape=[hps.batch_size, image_size, image_size, num_channel])############MNIST and CIFAR10 are different ar here adv_image = tf.placeholder(tf.float32,shape=[hps.batch_size, image_size, image_size, num_channel])############MNIST and CIFAR10 are different ar here predict = tf.placeholder(tf.float32,shape=[hps.batch_size, 10]) logit_nor,tsne_logit_nor = model_carlini_adv.predict(image,tsne_logits=True) logit_adv,tsne_logit_adv = model_carlini_adv.predict(adv_image,tsne_logits=True) predict_nor = tf.nn.softmax(logit_nor) predict_adv = tf.nn.softmax(logit_adv) # Calculate entropy argmax_y_onehot = tf.one_hot(tf.argmax(predict, 1), 10, on_value=0.0, off_value=1.0, axis=-1) normalized_y_nonmaximal = tf.reduce_sum(predict * argmax_y_onehot, 1) entropy = tf.reduce_sum(-tf.log(predict) * predict * argmax_y_onehot,1) / normalized_y_nonmaximal + tf.log(normalized_y_nonmaximal) for k in range(1): result_dict = loadmat('kernel_para_'+FLAGS.dataset+'/kernel1000_for_attack_' + f1 + '.mat')
tensorflow.placeholder
2,850
import tensorflow as tf config.allow_soft_placement = True sess_soft = tf.Session(config=config) # 4. 当使用CPU时,TensorFlow默认占据大部分CPU内存。虽然这也是时常期望的,但是我们能谨慎分配GPU内存。当TensorFlow一直不释放GPU内存时,如有必要,我们可以设置GPU内存增长选项让GPU内存分配缓慢增大到最大限制 config.gpu_options.allow_growth = True sess_grow = tf.Session(config=config) # 5. 如果希望限制死TensorFlow使用GPU内存的百分比,可以使用config设置per_process_gpu_memory_fraction config.gpu_options.per_process_gpu_memory_fraction = 0.4 sess_limited = tf.Session(config=config) # 6. 有时,我们希望代码健壮到可以决定运行多少GPU合适。TensorFlow有内建函数可以探测到。如果我们期望代码在GPU内存合适时利用GPU计算能力,并分配指定操作给GPU,那么该功能是有益的 if tf.test.is_built_with_cuda(): pass # 7. 我们希望分配指定操作给GPU。下面是一个示例代码,做了一些简单的计算,并将它们分配给主CPU和两个副GPU with tf.device('/cpu:0'): a = tf.constant([1.0, 3.0, 5.0], shape=[1,3]) b = tf.constant([2.0, 4.0, 6.0], shape=[3, 1]) with tf.device('/gpu:0'): c = tf.matmul(a,b) c = tf.reshape(c, [-1]) with tf.device('/gpu:1'):
tensorflow.test.is_built_with_cuda
2,851
import tensorflow as tf axis: A list of integer. Axes to compute the mean. keepdims: A boolean, whether to keep the dimensions or not. If keepdims is False, the rank of the tensor is reduced by 1 for each entry in axis. If keep_dims is True, the reduced dimensions are retained with length 1. Returns ------- A tensor with the mean of elements of x. """ axis = _normalize_axis(axis, get_ndim(x)) if x.dtype.base_dtype == tf.bool: x = tf.cast(x, tf.float32) return tf.reduce_mean(x, axis=axis, keep_dims=keepdims) def dot(x, y): """Multiplies 2 tensors (and/or variables) and returns a *tensor*. When attempting to multiply a ND tensor with a ND tensor, it reproduces the Theano behavior. (e.g. (2, 3).(4, 3, 5) = (2, 4, 5)) Parameters ----------
tensorflow.cast
2,852
import tensorflow as tf # gradients (in case of explosion) return tf.summary.merge_all() def add_eval_stats(summary_writer, step, linear_loss, before_loss, after_loss, stop_token_loss, loss): values = [ tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_before_loss", simple_value=before_loss), tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_after_loss", simple_value=after_loss), tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/stop_token_loss", simple_value=stop_token_loss), tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_loss", simple_value=loss), ] if linear_loss is not None: values.append(tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_linear_loss", simple_value=linear_loss))
tensorflow.Summary.Value
2,853
import tensorflow as tf return tf.reshape(tf.stack(values=h, axis=1), [-1]) def lstm(xs, ms, s, scope, nh, init_scale=1.0): nbatch, nin = [v.value for v in xs[0].get_shape()] with tf.variable_scope(scope): wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale)) wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale)) b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0)) c, h = tf.split(axis=1, num_or_size_splits=2, value=s) for idx, (x, m) in enumerate(zip(xs, ms)): c = c*(1-m) h = h*(1-m) z = tf.matmul(x, wx) + tf.matmul(h, wh) + b i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z) i = tf.nn.sigmoid(i) f = tf.nn.sigmoid(f) o = tf.nn.sigmoid(o) u = tf.tanh(u) c = f*c + i*u h = o*tf.tanh(c) xs[idx] = h s = tf.concat(axis=1, values=[c, h]) return xs, s def _ln(x, g, b, e=1e-5, axes=[1]): u, s = tf.nn.moments(x, axes=axes, keep_dims=True) x = (x-u)/tf.sqrt(s+e) x = x*g+b return x
tensorflow.nn.sigmoid
2,854
import tensorflow as tf # For printing layers shape self.training_end_points = self.end_points_D self.training_end_points.update(self.end_points_G) tf.summary.histogram("d", self.end_points_D['D_on_data']) tf.summary.histogram("d_", self.end_points_D['D_on_G']) tf.summary.image("G", G) d_label_smooth = self.cnf['d_label_smooth'] # 0.25 self.d_loss_real = self._sigmoid_kl_with_logits(self.end_points_D['D_on_data_logits'], 1. - d_label_smooth) class_loss_weight = 1. self.d_loss_class = class_loss_weight * tf.nn.sparse_softmax_cross_entropy_with_logits( logits=self.end_points_D['class_logits'], labels=tf.to_int64(targets)) self.test_loss = 1. - \ tf.reduce_mean(tf.to_float(tf.nn.in_top_k( self.end_points_D_val['logits'], targets, 1))) self.error_rate = 1. - \ tf.reduce_mean(tf.to_float(tf.nn.in_top_k( self.end_points_D['class_logits'], targets, 1))) if gpu_idx == 0: update = tf.assign(num_error_rate, num_error_rate + 1.) with tf.control_dependencies([update]): tc = tf.maximum(.01, 1. / num_error_rate) update = tf.assign(avg_error_rate, (1. - tc) * avg_error_rate + tc * self.error_rate) with tf.control_dependencies([update]): self.d_loss_class = tf.identity(self.d_loss_class) self.d_loss_fake = tf.nn.sigmoid_cross_entropy_with_logits( logits=self.end_points_D['D_on_G_logits'], labels=tf.zeros_like(self.end_points_D['D_on_G_logits']))
tensorflow.nn.in_top_k
2,855
import tensorflow as tf channels = tf.unstack(image, axis=-1) image = tf.stack([channels[2], channels[1], channels[0]], axis=-1) # dims for normalization width = tf.to_float(tf.shape(image)[2]) height = tf.to_float(tf.shape(image)[1]) # from [x1, y1, x2, y2, cls] to normalized [y1, x1, y1, x1] cols = tf.unstack(boxes, axis=1) boxes = tf.stack([cols[1] / height, cols[0] / width, cols[3] / height, cols[2] / width], axis=1) # add batch dimension (assume batch_size==1) #assert image.get_shape()[0] == 1 boxes = tf.expand_dims(boxes, dim=0) image = tf.image.draw_bounding_boxes(image, boxes) # 在image上画gt_truth return tf.summary.image('ground_truth', image) def _add_act_summary(self, tensor): tf.summary.histogram('ACT/' + tensor.op.name + '/activations', tensor) tf.summary.scalar('ACT/' + tensor.op.name + '/zero_fraction', tf.nn.zero_fraction(tensor)) def _add_score_summary(self, key, tensor): tf.summary.histogram('SCORE/' + tensor.op.name + '/' + key + '/scores', tensor) def _add_train_summary(self, var): tf.summary.histogram('TRAIN/' + var.op.name, var)
tensorflow.image.draw_bounding_boxes
2,856
import tensorflow as tf return X def conv(self, id, input, channels, size=3, stride=1, use_bias=True, padding="SAME", init_stddev=-1.0, dilation=1): assert padding in ["SAME", "VALID", "REFLECT", "PARTIAL"], 'valid paddings: "SAME", "VALID", "REFLECT", "PARTIAL"' if type(size) == int: size = [size, size] if init_stddev <= 0.0: init = tf.contrib.layers.variance_scaling_initializer(dtype=tf.float32) else: init = tf.truncated_normal_initializer(stddev=init_stddev) if padding == "PARTIAL": with tf.variable_scope('mask'): _, h, w, _ = input.get_shape().as_list() slide_window = size[0] * size[1] mask = tf.ones(shape=[1, h, w, 1]) update_mask = tf.layers.conv2d(mask, filters=1, dilation_rate=(dilation, dilation), name='mask' + id,
tensorflow.truncated_normal_initializer
2,857
import tensorflow as tf logits = tf.nn.bias_add(logits, output_bias) probabilities = tf.nn.softmax(logits, axis=-1) log_probs = tf.nn.log_softmax(logits, axis=-1) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss)
tensorflow.one_hot
2,858
from tensorflow.python.ops import math_ops @distribution_util.AppendDocstring( """Note: when `rate` is an integer, there are actually two modes: `rate` and `rate - 1`. In this case we return the larger, i.e., `rate`.""") def _mode(self): return math_ops.floor(self.rate) def _assert_valid_sample(self, x, check_integer=True): if not self.validate_args:
tensorflow.python.ops.math_ops.floor
2,859
import tensorflow as tf width = shape[2] channels = shape[3] res = tf.reshape(input_, [batch_size, height, 1, width, 1, channels]) res = tf.concat(
tensorflow.reshape
2,860
from tensorflow.python.framework import op_def_registry as _op_def_registry """ result = _op_def_lib.apply_op("UnpackPath", path=path, path_values=path_values, name=name) return result def _InitOpDefLibrary(): op_list = _op_def_pb2.OpList() _text_format.Merge(_InitOpDefLibrary.op_list_ascii, op_list) _op_def_registry.register_op_list(op_list) op_def_lib = _op_def_library.OpDefLibrary() op_def_lib.add_op_list(op_list) return op_def_lib _InitOpDefLibrary.op_list_ascii = """op { name: "HardRoutingFunction" input_arg {
tensorflow.python.framework.op_def_registry.register_op_list
2,861
import tensorflow as tf """ with tf.name_scope(name): inputs_shape = list(map(int, inputs.get_shape())) predictions_shape = list(map(int, predictions.get_shape())) nr_mix = int(predictions_shape[-1] / 10) logit_probs = predictions[:, :, :, :nr_mix] predictions = tf.reshape(predictions[:, :, :, nr_mix:], inputs_shape + [nr_mix * 3]) means = predictions[:, :, :, :, :nr_mix] log_scales = tf.maximum(predictions[:, :, :, :, nr_mix:2 * nr_mix], -7.) coeffs = tf.nn.tanh(predictions[:, :, :, :, 2 * nr_mix:3 * nr_mix]) inputs = tf.reshape(inputs, inputs_shape + [1]) + tf.zeros(inputs_shape + [nr_mix]) m2 = tf.reshape(means[:, :, :, 1, :] + coeffs[:, :, :, 0, :] * inputs[:, :, :, 0, :], [inputs_shape[0], inputs_shape[1], inputs_shape[2], 1, nr_mix]) m3 = tf.reshape( means[:, :, :, 2, :] + coeffs[:, :, :, 1, :] * inputs[:, :, :, 0, :] + coeffs[:, :, :, 2, :] * inputs[:, :, :, 1, :],
tensorflow.maximum
2,862
import tensorflow as tf nor_img_all = np.reshape(np.array([]), (0, image_size,image_size,num_channel)) adv_img_all = np.reshape(np.array([]), (0, image_size,image_size,num_channel)) print('Num of sample per eps is %d' % (num_sample)) #Construct carlini adversarial samples model_carlini_adv = models_carlini(hps) #Construct predictions image = tf.placeholder(tf.float32,shape=[hps.batch_size, image_size, image_size, num_channel])############MNIST and CIFAR10 are different ar here adv_image = tf.placeholder(tf.float32,shape=[hps.batch_size, image_size, image_size, num_channel])############MNIST and CIFAR10 are different ar here predict = tf.placeholder(tf.float32,shape=[hps.batch_size, 10]) logit_nor,tsne_logit_nor = model_carlini_adv.predict(image,tsne_logits=True) logit_adv,tsne_logit_adv = model_carlini_adv.predict(adv_image,tsne_logits=True) predict_nor = tf.nn.softmax(logit_nor) predict_adv = tf.nn.softmax(logit_adv) # Calculate entropy argmax_y_onehot = tf.one_hot(tf.argmax(predict, 1), 10, on_value=0.0, off_value=1.0, axis=-1) normalized_y_nonmaximal = tf.reduce_sum(predict * argmax_y_onehot, 1) entropy = tf.reduce_sum(-tf.log(predict) * predict * argmax_y_onehot,1) / normalized_y_nonmaximal + tf.log(normalized_y_nonmaximal) for k in range(1): result_dict = loadmat('kernel_para_'+FLAGS.dataset+'/kernel1000_for_attack_' + f1 + '.mat')
tensorflow.placeholder
2,863
import tensorflow as tf init_scale=np.sqrt(2))) c3 = tf.nn.relu(self.conv(c2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2))) nh = np.prod([v.value for v in c3.get_shape()[1:]]) h3 = tf.reshape(c3, [-1, nh]) pre_s = tf.nn.relu(self.fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2))) l1 = tf.layers.dense(inputs=pre_s, units=200, # number of hidden units activation=tf.nn.relu, name='l1', trainable=trainable ) mu = 2 * tf.layers.dense(inputs=l1, units=action_dim, # number of hidden units activation=tf.nn.tanh, name='mu', trainable=trainable ) sigma = tf.layers.dense(inputs=l1,
tensorflow.layers.dense
2,864
import tensorflow as tf if scale < 0.: raise ValueError('Setting a scale less than 0 on a regularizer: %g' % scale) if scale == 0.: return lambda _: None def l1(weights, name='l1_regularizer'): """Applies L1 regularization to weights.""" with tf.name_scope(name): my_scale = tf.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale') return tf.multiply(my_scale, tf.reduce_sum(tf.abs(weights)), name=name) return l1 def l2_regularizer(scale, name='l2_regularizer'): """Returns a function that can be used to apply L2 regularization to weights.
tensorflow.convert_to_tensor
2,865
import tensorflow as tf num = (1 - self.alpha) * dxt + tf.tensordot(self.alpha * dxt , tf.transpose( tf.matmul(tf.abs(self.W_rec) * self.rec_Connectivity,self.Dale_rec)), axes=1) * \ tf.where(tf.greater(xt, 0), tf.ones_like(xt), tf.zeros_like(xt)) denom = dxt # sum over hidden units num = tf.reduce_sum(tf.square(num), axis=2) denom = tf.reduce_sum(tf.square(denom), axis=2) bounded = tf.where(tf.greater(denom, 1e-20), tf.div(num, 1.0 * denom), tf.ones_like(num)) nelems = tf.reduce_mean(tf.where(tf.greater(denom, 1e-20), 1.0 * tf.ones_like(num), 1.0 * tf.zeros_like(num)), axis=1) # sum mean over each batch by time steps Omega = tf.square(bounded - 1.0) Omega = tf.reduce_sum(tf.reduce_mean(Omega, axis=1)) / (1.0 * tf.reduce_sum(nelems)) out = tf.gradients(Omega, self.W_rec) out[0] = tf.Print(out[0], [out[0], self.W_rec, Omega], "omega grads") out[0] = tf.verify_tensor_all_finite(out[0], "dead omega grad") return out, test
tensorflow.greater
2,866
import tensorflow as tf @gin.configurable(module='trax.data', denylist=['dataset', 'training']) def c4_preprocess(dataset, training, max_target_length=-1, tokenization=None, spm_path=None): """Pre-processing function for C4 dataset.""" del training def unicode_decode_chars(features, targets): targets = tf.strings.unicode_decode(features['text'], 'UTF-8') targets = tf.cast(targets, tf.int64) features['targets'] = targets features['inputs'] = targets return (features, targets) def spc_tokenize(tokenizer, features, targets): del targets tokenized_text = tokenizer.tokenize(features['text']) features['targets'] = tf.cast(tokenized_text, tf.int64)
tensorflow.strings.unicode_decode
2,867
import tensorflow as tf print("-" * 20) self.layers = [] # Temp(First) Conv Layer with tf.variable_scope("temp_conv") as scope: filter_shape = [3, embedding_size, 4, 64] W = tf.get_variable(name='W_1', shape=filter_shape, initializer=he_normal, regularizer=regularizer) paddings = [[0, 0], [1, 1], [0, 0], [0, 0]] cnn_inputs = tf.pad(cnn_inputs, paddings, "CONSTANT") #print("cnn_inputs shape:", cnn_inputs.shape) inputs = tf.nn.conv2d(cnn_inputs, W, strides=[1, 1, 1, 1], padding="VALID", name="first_conv") inputs = tf.layers.batch_normalization(inputs, axis=-1, training=self.is_training) inputs = tf.nn.relu(inputs, name="first_relu") #print("temp cnn output shape:", inputs.shape) inputs = tf.squeeze(inputs, axis=2) #print("squeeze shape", inputs.shape) #inputs = tf.nn.relu(inputs) print("Temp Conv", inputs.get_shape()) self.layers.append(inputs)
tensorflow.pad
2,868
import tensorflow as tf pos = tf.nn.sigmoid(tf.matmul(tf.nn.tanh(tf.matmul(state, wp)), vp)) pos = tf.floor(encoder_input_length * pos) pos = tf.reshape(pos, [-1, 1]) pos = tf.minimum(pos, encoder_input_length - 1) idx = tf.tile(tf.to_float(tf.range(attn_length)), tf.stack([batch_size]))
tensorflow.minimum
2,869
import tensorflow as tf true_fn=lambda: (vz_keys, tf.add(vz.lookup(u), 1)), false_fn=lambda: ( tf.concat([vz_keys, tf.reshape(u, (-1, 1))], axis=0), tf.constant(1, dtype=tf.int64)) ) vz.insert(u, r) kk = tf.Variable(0, dtype=tf.int64) for i in tf.range(start=0, limit=tf.size(vx_keys), delta=1, dtype=None, name='range'): for j in tf.range(start=0, limit=tf.size(vz_keys), delta=1, dtype=None, name='range'): to_add = tf.cond( tf.greater(vz.lookup(vx_keys[i]), -1), true_fn=lambda: tf.math.multiply(vx.lookup(vx_keys[i]), vz.lookup(vz_keys[j])), false_fn=lambda: tf.constant(0, dtype=tf.int64) )
tensorflow.size
2,870
import tensorflow as tf else: is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32) is_training = (mode == tf.estimator.ModeKeys.TRAIN) (total_loss, per_example_loss, logits, probabilities) = create_model( bert_config, is_training, input_ids, input_mask, segment_ids, label_ids, num_labels, use_one_hot_embeddings) tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold():
tensorflow.trainable_variables
2,871
from tensorflow.python.training import training as train OPTIMIZER_CLS_NAMES = { "Adagrad": train.AdagradOptimizer, "Adam": train.AdamOptimizer, "Ftrl": train.FtrlOptimizer, "Momentum": lambda learning_rate: train.MomentumOptimizer(learning_rate, momentum=0.9), # pylint: disable=line-too-long "RMSProp": train.RMSPropOptimizer, "SGD": train.GradientDescentOptimizer, }
tensorflow.python.training.training.MomentumOptimizer
2,872
import tensorflow as tf labels: Labels tensor, int32 - [batch_size]. Returns: loss: Loss tensor of type float. """ labels = tf.to_int64(labels) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits( logits, labels, name='xentropy') loss = tf.reduce_mean(cross_entropy, name='xentropy_mean') return loss def train(loss, global_step): """Train eccentricity model. Create an optimizer and apply to all trainable variables.
tensorflow.reduce_mean
2,873
import tensorflow as tf def variable(self, name, shape, initializer,regularizer=None): with tf.device('/cpu:0'):
tensorflow.device
2,874
import tensorflow as tf # tf.greater(n_negtives, 0), # tf.divide(tf.cast(n_neg_to_select, tf.float32), n_negtives), # tf.zeros_like(tf.cast(n_neg_to_select, tf.float32)), # name='rand_select_negtive') # include both selected negtive and all positive examples final_mask = tf.stop_gradient(tf.logical_or(tf.logical_and(negtive_mask, selected_neg_mask), positive_mask)) total_examples = tf.reduce_sum(tf.cast(final_mask, tf.float32)) # add mask for glabels and cls_pred here glabels = tf.boolean_mask(tf.clip_by_value(glabels, 0, FLAGS.num_classes), tf.stop_gradient(final_mask)) cls_pred = tf.boolean_mask(cls_pred, tf.stop_gradient(final_mask))
tensorflow.logical_and
2,875
import tensorflow as tf self.path = path self.tol = tol def update(self, labeled_sdfs, labeled_classes, labeled_poses, predicted_sdfs, predicted_classes, predicted_poses): """Update.""" if labeled_sdfs or labeled_classes: print(labeled_sdfs) mean_x = tf.reduce_mean(labeled_poses[1][:, 0]) mean_z = tf.reduce_mean(labeled_poses[1][:, 2]) samples_world = grid.generate( (mean_x - 0.5, 0.0, mean_z - 0.5), (mean_x + 0.5, 1.0, mean_z + 0.5), [self.resolution, self.resolution, self.resolution]) samples_world = tf.reshape(samples_world, [-1, 3]) status = False if status: _, axs = plt.subplots(3, 3) fig_obj_count = 0 # Do the same for the ground truth and predictions num_collisions = 0 prev_intersection = 0 sdf_values = tf.zeros_like(samples_world)[:, 0:1] for classes, sdfs, poses in [(predicted_classes, predicted_sdfs,
tensorflow.reshape
2,876
import tensorflow as tf output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt") with tf.gfile.GFile(output_eval_file, "w") as writer:
tensorflow.gfile.GFile
2,877
import tensorflow as tf # values: [batch_size, step_size, vocab_size] # answers: [batch_size, step_size] def _mask_and_accuracy(values, answers, loss_weights): values = tf.argmax(values,axis=2) x = tf.cast(values, dtype=tf.int32) y = tf.cast(answers, dtype=tf.int32)
tensorflow.argmax
2,878
import tensorflow as tf INCREASE_GS = GLOBAL_STEP.assign(tf.add(GLOBAL_STEP, 1)) LR_A = tf.train.exponential_decay(LR_A, GLOBAL_STEP, 10000, .97, staircase=True) LR_C = tf.train.exponential_decay(LR_C, GLOBAL_STEP, 10000, .97, staircase=True) END_POINT = (200 - 10) * (14/30) # from game env = gym.make(ENV_NAME) env.seed(1) STATE_DIM = env.observation_space.shape[0] # 24 ACTION_DIM = env.action_space.shape[0] # 4 ACTION_BOUND = env.action_space.high # [1, 1, 1, 1] # all placeholder for tf with tf.name_scope('S'): S = tf.placeholder(tf.float32, shape=[None, STATE_DIM], name='s') with tf.name_scope('R'): R = tf.placeholder(tf.float32, [None, 1], name='r') with tf.name_scope('S_'): S_ = tf.placeholder(tf.float32, shape=[None, STATE_DIM], name='s_') ############################### Actor #################################### class Actor(object): def __init__(self, sess, action_dim, action_bound, learning_rate, t_replace_iter): self.sess = sess self.a_dim = action_dim self.action_bound = action_bound self.lr = learning_rate
tensorflow.placeholder
2,879
import tensorflow as tf output = tf.nn.dropout(input_tensor, rate=dropout_prob)
tensorflow.nn.dropout
2,880
import tensorflow as tf step=global_step) tf.contrib.summary.scalar( 'rpn_score_loss', tf.reduce_mean(rpn_score_loss), step=global_step)
tensorflow.reduce_mean
2,881
import tensorflow as tf # during inference, compute the end logits based on beam search start_top_log_probs, start_top_index = tf.nn.top_k( start_log_probs, k=FLAGS.start_n_top)
tensorflow.nn.top_k
2,882
import tensorflow as tf return x - 1 minus_one(tf.identity(v0)) save = tf.train.Saver({"v0": v0}) tf.initialize_all_variables()
tensorflow.train.Saver
2,883
from tensorflow.python.framework import ops Returns: A `Tensor` with the same type as `value`. """ with ops.op_scope([value, bias], name, "BiasAddV1") as name: value = ops.convert_to_tensor(value, name="input") bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias") return gen_nn_ops._bias_add_v1(value, bias, name=name) ops.RegisterShape("BiasAddV1")(common_shapes.bias_add_shape) ops.RegisterShape("BiasAddGradV1")(common_shapes.bias_add_grad_shape) def relu6(features, name=None): """Computes Rectified Linear 6: `min(max(features, 0), 6)`.
tensorflow.python.framework.ops.RegisterShape
2,884
import tensorflow as tf with tf.Session() as sess: self.assertTrue(assert_tensors_equal(sess, tensor1, tensor2, 20)) @tf.contrib.eager.run_test_in_graph_and_eager_modes() def testProblemHparamsModality(self): problem = problem_hparams.TestProblem(input_vocab_size=2, target_vocab_size=3) p_hparams = problem.get_hparams() self.assertIsInstance(p_hparams.modality["inputs"], modalities.SymbolModality) self.assertIsInstance(p_hparams.modality["targets"], modalities.SymbolModality) @tf.contrib.eager.run_test_in_graph_and_eager_modes() def testProblemHparamsModalityObj(self): class ModalityObjProblem(problem_module.Problem): def hparams(self, defaults, model_hparams): hp = defaults hp.modality = {"inputs": modalities.SymbolModality, "targets": modalities.SymbolModality} hp.vocab_size = {"inputs": 2, "targets": 3} problem = ModalityObjProblem(False, False) p_hparams = problem.get_hparams()
tensorflow.contrib.eager.run_test_in_graph_and_eager_modes
2,885
import tensorflow as tf tie_weight=True, bi_data=run_config.bi_data, use_tpu=run_config.use_tpu) #### Quantity to monitor monitor_dict = {} if FLAGS.use_bfloat16: tgt_mask = tf.cast(tgt_mask, tf.float32) lm_loss = tf.cast(lm_loss, tf.float32) total_loss = tf.reduce_sum(lm_loss * tgt_mask) / tf.reduce_sum(tgt_mask) monitor_dict["total_loss"] = total_loss return total_loss, new_mems, monitor_dict def get_loss(FLAGS, features, labels, mems, is_training):
tensorflow.cast
2,886
import tensorflow as tf """ def _setup_placeholders(self): if self.demo: self.c = tf.placeholder(tf.int32, [None, self.config.max_p_len], "context") self.q = tf.placeholder(tf.int32, [None, self.config.max_q_len], "question") self.ch = tf.placeholder(tf.int32, [None, self.config.max_p_len, self.config.max_ch_len], "context_char") self.qh = tf.placeholder(tf.int32, [None, self.config.max_q_len, self.config.max_ch_len], "question_char") self.start_label = tf.placeholder(tf.int32, [None], "answer_label1") self.end_label = tf.placeholder(tf.int32, [None], "answer_label2") else: self.c = tf.placeholder(tf.int32, [self.config.batch_size * self.max_p_num, self.config.max_p_len], "context") self.q = tf.placeholder(tf.int32, [self.config.batch_size * self.max_p_num, self.config.max_q_len], "question") self.ch = tf.placeholder(tf.int32, [self.config.batch_size * self.max_p_num, self.config.max_p_len, self.config.max_ch_len], "context_char") self.qh = tf.placeholder(tf.int32, [self.config.batch_size * self.max_p_num, self.config.max_q_len, self.config.max_ch_len], "question_char")
tensorflow.placeholder
2,887
import tensorflow as tf with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)): inp = [tf.constant(0.5, shape=[2, 2])] * 2 dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3 cell = tf.nn.rnn_cell.OutputProjectionWrapper( tf.nn.rnn_cell.GRUCell(2), 4) dec, mem = tf.nn.seq2seq.basic_rnn_seq2seq(inp, dec_inp, cell) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 4), res[0].shape)
tensorflow.nn.seq2seq.basic_rnn_seq2seq
2,888
import tensorflow as tf self._mark_for_monitoring('reg_loss', reg_loss) # Add loss from auxiliary logits aux_loss = tf.constant(0, dtype=tf.float32) for aux_logits in aux_logits_list: log_probs = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=aux_logits, labels=classes) aux_loss += aux_loss_mul * tf.reduce_mean(log_probs) total_loss = loss + reg_loss + aux_loss return total_loss def _add_global_avg_pool(self, X, in_w, in_h, in_ch): X = tf.nn.relu(X) X = tf.reduce_mean(X, (1, 2)) X = tf.reshape(X, (-1, in_ch)) # Sanity shape check return X def _count_model_parameters(self): tf_trainable_vars = tf.trainable_variables() num_params = 0 # utils.logger.log('Model parameters:') for var in tf_trainable_vars: # utils.logger.log(str(var)) num_params += np.prod([dim.value for dim in var.get_shape()]) utils.logger.log('Model has {} parameters'.format(num_params)) return num_params def _add_vars_assign_op(self, vars):
tensorflow.reshape
2,889
import tensorflow as tf d1, _ = tf.nn.seq2seq.embedding_attention_seq2seq( enc_inp, dec_inp, cell, num_encoder_symbols=2, num_decoder_symbols=5, embedding_size=2, feed_previous=True) d2, _ = tf.nn.seq2seq.embedding_attention_seq2seq( enc_inp, dec_inp2, cell, num_encoder_symbols=2, num_decoder_symbols=5, embedding_size=2, feed_previous=True) res1 = sess.run(d1) res2 = sess.run(d2) res3 = sess.run(d3) self.assertAllClose(res1, res2) self.assertAllClose(res1, res3) def testOne2ManyRNNSeq2Seq(self): with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)): enc_inp = [tf.constant(1, tf.int32, shape=[2]) for i in range(2)] dec_inp_dict = {} dec_inp_dict["0"] = [ tf.constant(i, tf.int32, shape=[2]) for i in range(3)] dec_inp_dict["1"] = [ tf.constant(i, tf.int32, shape=[2]) for i in range(4)] dec_symbols_dict = {"0": 5, "1": 6} cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True) outputs_dict, state_dict = tf.nn.seq2seq.one2many_rnn_seq2seq( enc_inp, dec_inp_dict, cell, 2, dec_symbols_dict, embedding_size=2) sess.run([tf.global_variables_initializer()]) res = sess.run(outputs_dict["0"]) self.assertEqual(3, len(res)) self.assertEqual((2, 5), res[0].shape)
tensorflow.constant
2,890
import tensorflow as tf self.saver.restore(self.sess, path) def close(self): self.sess.close() def create_log_file(self, filename): self.log_file = filename f = open(self.log_file, 'w') f.close() def weight_variable(shape): return tf.get_variable('W', shape, initializer=tf.random_normal_initializer(0., 0.02)) def bias_variable(shape): return tf.get_variable('b', shape, initializer=tf.constant_initializer(0.)) def keep_prob(dropout, train): return tf.cond(train, lambda: tf.constant(dropout), lambda: tf.constant(1.)) def softmax_ce_with_logits(logits, labels): return tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits) def sigmoid_ce_with_logits(logits, labels): return tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits) def sigmoid_kl_with_logits(logits, targets): assert isinstance(targets, float) if targets in [0., 1.]:
tensorflow.constant_initializer
2,891
import tensorflow as tf opt = tf.train.RMSPropOptimizer(learning_rate, FLAGS.rmsprop_decay, momentum=FLAGS.rmsprop_momentum, epsilon=FLAGS.rmsprop_epsilon) else: raise ValueError('Optimizer "%s" was not recognized', FLAGS.optimizer) self.variable_mgr.append_apply_gradients_ops( gradient_state, opt, clipped_grads, training_ops) train_op = tf.group(*(training_ops + update_ops + extra_nccl_ops)) with tf.device(self.cpu_device): if self.task_index == 0 and FLAGS.summary_verbosity > 0: tf.summary.scalar('learning_rate', learning_rate) tf.summary.scalar('total_loss', total_loss) for grad, var in avg_grads: if grad is not None:
tensorflow.group
2,892
import tensorflow as tf self.embedding_W = tf.get_variable(name='lookup_W', shape=[num_quantized_chars, embedding_size], initializer=tf.contrib.layers.variance_scaling_initializer()) else: self.embedding_W = tf.Variable(tf.random_uniform([num_quantized_chars, embedding_size], -1.0, 1.0),name="embedding_W") self.embedded_characters = tf.nn.embedding_lookup(self.embedding_W, self.input_x) embedded_text_expand = tf.expand_dims(self.embedded_characters, -1)
tensorflow.random_uniform
2,893
import tensorflow as tf if t % LOG_EVERY_N_STEPS == 0 and model_initialized: print("Timestep %d" % (t,)) print("mean reward (100 episodes) %f" % mean_episode_reward) print("best mean reward %f" % best_mean_episode_reward) print("episodes %d" % len(episode_rewards)) print("exploration %f" % exploration.value(t)) print("learning_rate %f" % optimizer_spec.lr_schedule.value(t)) mean_rew_summ = tf.Summary(value=[tf.Summary.Value(tag='mean_rew',simple_value=mean_episode_reward)]) best_mean_rew_summ = tf.Summary(value=[tf.Summary.Value(tag='best_mean_rew',simple_value=best_mean_episode_reward)]) writer.add_summary(mean_rew_summ, global_step=t) writer.add_summary(best_mean_rew_summ, global_step=t) sys.stdout.flush() def gather_2d(vectors,indices): return tf.gather_nd(vectors, tf.stack([tf.range(tf.shape(vectors)[0]), indices], axis=1))
tensorflow.shape
2,894
import tensorflow as tf import tensorflow as tf from cotk.dataloader import MultiTurnDialog from cotk.wordvector import WordVector, Glove from utils import debug, try_cache from model import HredModel def create_model(sess, data, args, embed): with tf.variable_scope(args.name): model = HredModel(data, args, embed) model.print_parameters() latest_dir = '%s/checkpoint_latest' % args.model_dir best_dir = '%s/checkpoint_best' % args.model_dir if tf.train.get_checkpoint_state(latest_dir) and args.restore == "last": print("Reading model parameters from %s" % latest_dir) model.latest_saver.restore(sess, tf.train.latest_checkpoint(latest_dir))
tensorflow.variable_scope
2,895
import tensorflow as tf predictions: 2D tensor or array, [batch_size, num_classes] predictions of the network . labels: 2D or array tensor, [batch_size, num_classes] ground truth labels or target labels. eps: a constant to set upper or lower limit for labels, smoothening factor name: Optional scope/name for op_scope. Returns: A tensor with the log loss. """ with tf.name_scope(name): predictions = tf.to_float(predictions) labels = tf.to_float(labels) predictions = tf.clip_by_value(predictions, eps, 1 - eps) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) loss = -tf.reduce_mean(labels * tf.log(predictions)) return loss
tensorflow.name_scope
2,896
import tensorflow as tf try: config = load_config(logdir) except RuntimeError: print('Failed to load existing config.') except IOError: config = save_config(config, logdir) trainer = trainer_.Trainer(logdir, config=config) cleanups = [] try: with tf.variable_scope('graph', use_resource=True): data = get_batch(datasets, trainer.phase, trainer.reset) score, summary, cleanups = model_fn(data, trainer, config) message = 'Graph contains {} trainable variables.' tf.logging.info(message.format(tools.count_weights())) if config.train_steps: trainer.add_phase( 'train', config.train_steps, score, summary, batch_size=config.batch_shape[0],
tensorflow.variable_scope
2,897
import tensorflow as tf logger.info('Sync warmup policy and vfn and model') tf.get_default_session().run([sync_warmup_policy, sync_warmup_vfn, sync_warmup_model]) for p in warmup_policy.parameters(): p.invalidate()
tensorflow.get_default_session
2,898
import tensorflow as tf fvar = ( tf.matrix_diag(tf.tile((eKff - tf.trace(Li_eKuffu_Lit))[:, None], [1, num_func])) + tf.matrix_diag(tf.einsum("nij,dji->nd", Li_eKuffu_Lit, cov)) + # tf.matrix_diag(tf.trace(tf.matmul(Li_eKuffu_Lit, cov))) + tf.einsum("ig,nij,jh->ngh", q_mu, Li_eKuffu_Lit, q_mu) - # tf.matmul(q_mu, tf.matmul(Li_eKuffu_Lit, q_mu), transpose_a=True) - fmean[:, :, None] * fmean[:, None, :] + e_related_to_mean ) else: fvar = ( (eKff - tf.trace(Li_eKuffu_Lit))[:, None] + tf.einsum("nij,dji->nd", Li_eKuffu_Lit, cov) + tf.einsum("ig,nij,jg->ng", q_mu, Li_eKuffu_Lit, q_mu) - fmean ** 2 + tf.matrix_diag_part(e_related_to_mean) ) return fmean, fvar # --------------------------------------------------------------- ########################## HELPERS ############################## # ---------------------------------------------------------------
tensorflow.einsum
2,899