seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf conv = tf.nn.conv2d(bottom, kernel, [1, stride, stride, 1], padding='SAME') biases = self.variable('biases', [output_channels], tf.constant_initializer(0.0)) conv_layer = tf.nn.bias_add(conv, biases) if bn: conv_layer = self.batch_norm_layer('batch_norm_layer',conv_layer,training) if relu: conv_layer = tf.nn.relu(conv_layer, name=scope.name) print('Conv layer {0} -> {1}'.format(bottom.get_shape().as_list(),conv_layer.get_shape().as_list())) return conv_layer def batch_norm_layer(self, name, input_tensor,training): with tf.variable_scope(name) as scope:
tensorflow.nn.relu
13,900
import tensorflow as tf l1=tf.nn.relu(l1) l2 = tf.matmul(l1, self.w2)+self.b2 l2=tf.nn.relu(l2) l3=tf.matmul(l2, self.w3)+self.b3 l3=tf.nn.relu(l3) out=tf.matmul(l3, self.w4)+self.b4 return out def softmax_loss(self,predicts,labels): predicts=tf.nn.softmax(predicts) labels=tf.one_hot(labels,classnum) loss=-tf.reduce_sum(labels*tf.log(predicts)) return loss def optimer(self,loss,lr=0.001): train_step=tf.train.GradientDescentOptimizer(lr).minimize(loss) return train_step path=r'C:\JC\test\train_model.ckpt' image,label=getinputs(r'C:\JC\tfrecord\64_shuffle/train.tfrecords') test_image,test_label=getinputs(r'C:\JC\tfrecord\64_shuffle/test.tfrecords') valid_image,valid_label= getinputs(r'C:\JC\tfrecord\64_shuffle\validation.tfrecords') batch_image,batch_label=get_batch(image,label,trainnum,0) work=trainwork() inf=work.inference(batch_image) loss=work.softmax_loss(inf,batch_label) opti=work.optimer(loss,learnrate) test_image_batch,test_label_batch=get_test_batch(test_image,test_label,testnum)
tensorflow.train.GradientDescentOptimizer
13,901
from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten # final convolutional layer #removed GOAL_SIZE conv4 = Conv2D(padding="valid", filters=RNN_SIZE-loc_layer_size, kernel_size=[2, 2], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=None)(pool3) # FC layers
tensorflow.keras.layers.Conv2D
13,902
import tensorflow as tf label_id = label_map[example.label] if ex_index < 5: tf.logging.info("*** Example ***") tf.logging.info("guid: %s" % (example.guid)) tf.logging.info("tokens: %s" % " ".join( [tokenization.printable_text(x) for x in tokens])) tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) tf.logging.info("label: %s (id = %d)" % (example.label, label_id)) feature = InputFeatures( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, is_real_example=True) return feature
tensorflow.logging.info
13,903
from tensorflow.python.framework import ops as _ops return result _ops.RegisterShape("ResourceCreateOp")(None) _resource_initialized_op_outputs = ["initialized"]
tensorflow.python.framework.ops.RegisterShape
13,904
import tensorflow as tf num_topk = config.class_num_joints[(params['model_scope'] if 'all' not in params['model_scope'] else '*')] // 2 gather_col = tf.nn.top_k(temp_loss, k=num_topk, sorted=True)[1] gather_row = tf.reshape(tf.tile(tf.reshape(tf.range(cur_batch_size), [-1, 1]), [1, num_topk]), [-1, 1])
tensorflow.nn.top_k
13,905
import tensorflow as tf if bias: biases = variable_on_cpu("biases", [dim_out], tf.constant_initializer(0.))
tensorflow.constant_initializer
13,906
import tensorflow as tf return forward_fn(inputs, is_train=True, data_format=self.data_format) def forward_eval(self, inputs): """Forward computation at evaluation.""" return forward_fn(inputs, is_train=False, data_format=self.data_format) def calc_loss(self, labels, outputs, trainable_vars): """Calculate loss (and some extra evaluation metrics).""" loss = tf.losses.softmax_cross_entropy(labels, outputs) loss_filter = lambda var: 'batch_normalization' not in var.name loss += FLAGS.loss_w_dcy \ * tf.add_n([tf.nn.l2_loss(var) for var in trainable_vars if loss_filter(var)]) accuracy = tf.reduce_mean( tf.cast(tf.equal(tf.argmax(labels, axis=1), tf.argmax(outputs, axis=1)), tf.float32)) metrics = {'accuracy': accuracy} return loss, metrics def setup_lrn_rate(self, global_step): """Setup the learning rate (and number of training iterations).""" nb_epochs = 250 idxs_epoch = [100, 150, 200] decay_rates = [1.0, 0.1, 0.01, 0.001]
tensorflow.nn.l2_loss
13,907
import tensorflow as tf elif norm == 'B': X = tf.layers.batch_normalization(X, reuse=reuse, training=is_train, name=name)
tensorflow.layers.batch_normalization
13,908
from tensorflow.contrib import layers as contrib_layers "epsilon": batch_norm_epsilon, "updates_collections": batch_norm_updates_collections, } if is_training is not None: batch_norm_params["is_training"] = is_training # Set weight_decay for weights in Conv and DepthSepConv layers. weights_init = tf.keras.initializers.glorot_normal() regularizer = contrib_layers.l2_regularizer(weight_decay) if regularize_depthwise: depthwise_regularizer = regularizer else: depthwise_regularizer = None with slim.arg_scope( [slim.conv2d, slim.separable_conv2d], weights_initializer=weights_init,
tensorflow.contrib.layers.l2_regularizer
13,909
from tensorflow.python.ops import rnn_cell seq_length = config["seq_length"] with ops.Graph().as_default(), ops.device("/device:GPU:0"): inputs = seq_length * [ array_ops.zeros([batch_size, num_units], dtypes.float32) ] initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127) cell = rnn_cell.LSTMCell( num_units=num_units, initializer=initializer, state_is_tuple=True) multi_cell = rnn_cell.MultiRNNCell( [cell() for _ in range(num_layers)]) outputs, final_state = core_rnn.static_rnn( multi_cell, inputs, dtype=dtypes.float32) trainable_variables = ops.get_collection( ops.GraphKeys.TRAINABLE_VARIABLES)
tensorflow.python.ops.rnn_cell.LSTMCell
13,910
import tensorflow as tf stride=1, reuse=reuse): with slim.arg_scope([batch_norm], **batch_norm_params): with tf.variable_scope(DECODER_SCOPE, DECODER_SCOPE, [features]): decoder_features = features decoder_stage = 0
tensorflow.variable_scope
13,911
import tensorflow as tf """Select a subset of features from the example dict.""" feature_list = feature_list or ['inputs', 'targets'] return {f: example[f] for f in feature_list if f in example} def _eager_dataset_iterator(dataset): for item in dataset: flat = tf.nest.flatten(item) flat = [el.numpy() for el in flat] yield tf.nest.pack_sequence_as(item, flat) def _train_and_eval_dataset_v1(problem_name, data_dir, train_shuffle_files, eval_shuffle_files):
tensorflow.nest.flatten
13,912
import tensorflow as tf vp = get_variable('vp', [state_size, 1]) pos = tf.nn.sigmoid(tf.matmul(tf.nn.tanh(tf.matmul(state, wp)), vp)) pos = tf.floor(encoder_input_length * pos) pos = tf.reshape(pos, [-1, 1]) pos = tf.minimum(pos, encoder_input_length - 1) idx = tf.tile(tf.to_float(tf.range(attn_length)), tf.stack([batch_size])) idx = tf.reshape(idx, [-1, attn_length]) low = pos - encoder.attn_window_size high = pos + encoder.attn_window_size mlow = tf.to_float(idx < low)
tensorflow.stack
13,913
import tensorflow as tf dtype=DTYPE) b = tf.get_variable( "b_cnn_%s" % i, [num], dtype=DTYPE, initializer=tf.constant_initializer(0.0)) conv = tf.nn.conv2d( inp, w, strides=[1, 1, 1, 1], padding="VALID") + b # now max pool conv = tf.nn.max_pool( conv, [1, 1, max_chars-width+1, 1], [1, 1, 1, 1], 'VALID') # activation conv = activation(conv) conv = tf.squeeze(conv, squeeze_dims=[2]) convolutions.append(conv)
tensorflow.nn.max_pool
13,914
import tensorflow as tf # Episodes index self.episode_count = tf.get_variable( name='episode-count', dtype=util.tf_dtype('int'), initializer=0, trainable=False ) def tf_store(self, states, internals, actions, terminal, reward): # Memory indices to overwrite. num_instances = tf.shape(input=terminal)[0] with tf.control_dependencies([tf.assert_less_equal(num_instances, self.capacity)]): indices = tf.range(self.memory_index, self.memory_index + num_instances) % self.capacity # Remove episode indices. num_episodes = tf.count_nonzero( input_tensor=tf.gather(params=self.terminal_memory, indices=indices), axis=0, dtype=util.tf_dtype('int') ) num_episodes = tf.minimum(x=num_episodes, y=self.episode_count)
tensorflow.shape
13,915
import tensorflow as tf use_bias=False, name='hyper_b_final') # First layer w1 = tf.abs(tf.matmul(state, hyper_w_1)) b1 = tf.matmul(state, hyper_b_1) w1_reshaped = tf.reshape(w1, [-1, n_agents, n_h_mixer]) # reshape into batch of matrices b1_reshaped = tf.reshape(b1, [-1, 1, n_h_mixer]) # [batch, 1, n_h_mixer] hidden = tf.nn.elu(tf.matmul(agent_qs_reshaped, w1_reshaped) + b1_reshaped) # Second layer w_final = tf.abs(tf.matmul(state, hyper_w_final)) w_final_reshaped = tf.reshape(w_final, [-1, n_h_mixer, 1]) # reshape into batch of matrices b_final_reshaped = tf.reshape(hyper_b_final, [-1, 1, 1]) # [batch, 1, 1] y = tf.matmul(hidden, w_final_reshaped) + b_final_reshaped q_tot = tf.reshape(y, [-1, 1]) return q_tot class QMix():
tensorflow.reshape
13,916
import tensorflow as tf def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" name_to_features = { "input_ids": tf.FixedLenFeature([seq_length], tf.int64), "input_mask": tf.FixedLenFeature([seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), "label_ids": tf.FixedLenFeature([seq_length], tf.int64), "is_real_example": tf.FixedLenFeature([1], tf.int64), } def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.to_int32(t) example[name] = t return example def input_fn(params): """The actual input function.""" batch_size = params["batch_size"]
tensorflow.parse_single_example
13,917
import tensorflow as tf tf.train.start_queue_runners() self.run_op_benchmark( name='batching_many_small', sess=session, op_or_tensor=op_to_benchmark, burn_iters=10, min_iters=50) def benchmark_batching_large(self): with tf.Session() as session: @dynamic_batching.batch_fn def f(a, b): return a + b outputs = [] for _ in xrange(1000): outputs.append(f(tf.ones([1, 100000]), tf.ones([1, 100000]))) op_to_benchmark = tf.group(*outputs)
tensorflow.Session
13,918
import tensorflow as tf i = tf.nn.sigmoid(i) f = tf.nn.sigmoid(f)
tensorflow.nn.sigmoid
13,919
import tensorflow as tf TFRECORD_PATH = '../tfrecord/member.tfrecord' def main(): data_set = tf.data.TFRecordDataset(TFRECORD_PATH) data_set = data_set.map(parse_function) data_set = data_set.shuffle(buffer_size=9) data_set = data_set.batch(3)
tensorflow.data.TFRecordDataset
13,920
import tensorflow as tf self._assert_all_finite(grads[1].eval()) def test_prob_and_grad_gives_finite_results_for_common_events(self): with self.test_session(): mu = tf.Variable(0.0, name="mu") sigma = tf.Variable(1.0, name="sigma") qdist = distributions.QuantizedDistribution( base_dist_cls=distributions.Normal, mu=mu, sigma=sigma)
tensorflow.Variable
13,921
import tensorflow as tf with tf.name_scope('%s_%d' % (cifar10.TOWER_NAME, i)) as scope: # Calculate the loss for one tower of the CIFAR model. This function # constructs the entire CIFAR model but shares the variables across # all towers. loss = tower_loss(scope) # Reuse variables for the next tower. tf.get_variable_scope().reuse_variables() # Retain the summaries from the final tower. summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope) # Calculate the gradients for the batch of data on this CIFAR tower. grads = opt.compute_gradients(loss) # Keep track of the gradients across all towers. tower_grads.append(grads) # We must calculate the mean of each gradient. Note that this is the # synchronization point across all towers.
tensorflow.get_collection
13,922
import tensorflow as tf # calculate accuracy correct_predictions = tf.equal(tf.argmax(predictions, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy") print ("done...")
tensorflow.cast
13,923
import tensorflow as tf def truncate_example(x): for key, max_len in len_map.items(): x_len = tf.shape(x[key])[0] if x_len > max_len: x[key] = x[key][:max_len, ...]
tensorflow.shape
13,924
import tensorflow as tf def main(_): tf.logging.set_verbosity(tf.logging.INFO) if not FLAGS.do_train and not FLAGS.do_eval: raise ValueError("At least one of `do_train` or `do_eval` must be True.") bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) tf.gfile.MakeDirs(FLAGS.output_dir) input_files = [] for input_pattern in FLAGS.input_file.split(","): input_files.extend(tf.gfile.Glob(input_pattern)) tf.logging.info("*** Input Files ***") for input_file in input_files: tf.logging.info(" %s" % input_file) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project ) is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 if FLAGS.use_tpu: run_config = tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver,
tensorflow.logging.info
13,925
import tensorflow as tf return fc def get_conv_filter(self, name): return tf.constant(self.data_dict[name][0], name="filter") def get_bias(self, name): return tf.constant(self.data_dict[name][1], name="biases")
tensorflow.constant
13,926
import tensorflow as tf import gym import os import shutil np.random.seed(1) tf.set_random_seed(1) MAX_EPISODES = 2000 LR_A = 0.0005 # 1_tensorflow_new rate for actor LR_C = 0.0005 # 1_tensorflow_new rate for critic
tensorflow.set_random_seed
13,927
import tensorflow as tf self.EPS_LEN = 100000 # GPU setup os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False, device_count={'GPU': gpu}) config.gpu_options.allow_growth = True config.gpu_options.per_process_gpu_memory_fraction = 0.5 # Placeholders self.sess = tf.Session(config=config) self.s_dim, self.a_dim = env.observation_space.shape, env.action_space.shape[0] self.a_bound = (env.action_space.high - env.action_space.low) / 2 self.actions = tf.placeholder(tf.float32, [None, self.a_dim], 'action') self.state = tf.placeholder(tf.float32, [None, self.s_dim[0]], 'state') self.advantage = tf.placeholder(tf.float32, [None, 1], 'advantage') self.rewards = tf.placeholder(tf.float32, [None, 1], 'discounted_r') # Dateset with experiennce replay self.dataset = tf.data.Dataset.from_tensor_slices({'state': self.state, 'actions': self.actions, 'rewards': self.rewards, 'advantage': self.advantage}) self.dataset = self.dataset.shuffle(buffer_size=10000) self.dataset = self.dataset.batch(self.MINIBATCH) self.dataset = self.dataset.cache() self.dataset = self.dataset.repeat(self.EPOCHS) self.data_iter = self.dataset.make_initializable_iterator()
tensorflow.placeholder
13,928
import tensorflow as tf 'Learning rate decay boundaries by global_step (comma-separated list).') tf.app.flags.DEFINE_string( 'lr_decay_factors', '1, 0.6, 0.1', 'The values of learning_rate decay factor for each segment between boundaries (comma-separated list).') # checkpoint related configuration tf.app.flags.DEFINE_string( 'checkpoint_path', './model/resnet50',#None, 'The path to a checkpoint from which to fine-tune.') tf.app.flags.DEFINE_string( 'checkpoint_model_scope', '',
tensorflow.app.flags.DEFINE_string
13,929
import tensorflow as tf tf.transpose( tf.matmul(tf.abs(self.W_rec) * self.rec_Connectivity,self.Dale_rec)), axes=1) * \ tf.where(tf.greater(xt, 0), tf.ones_like(xt), tf.zeros_like(xt)) denom = dxt
tensorflow.greater
13,930
import tensorflow as tf self.top_layer = bn return bn def loss_function(logits, labels): # global cross_entropy # HACK TESTING cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=labels, name='xentropy') loss = tf.reduce_mean(cross_entropy, name='xentropy_mean') return loss
tensorflow.nn.sparse_softmax_cross_entropy_with_logits
13,931
import tensorflow as tf tf.app.flags.DEFINE_integer('loss_scale', 1024, '') tf.app.flags.DEFINE_float('moving_average_decay', 0.997, '') tf.app.flags.DEFINE_string('gpu_list', '1', '') tf.app.flags.DEFINE_string('checkpoint_path', '/tmp/east_resnet_v1_50_rbox/', '') tf.app.flags.DEFINE_boolean('restore', False, 'whether to resotre from checkpoint') tf.app.flags.DEFINE_integer('save_checkpoint_steps', 1000, '') tf.app.flags.DEFINE_integer('save_summary_steps', 100, '') tf.app.flags.DEFINE_string('pretrained_model_path', None, '')
tensorflow.app.flags.DEFINE_boolean
13,932
import tensorflow as tf def global_attention(state, hidden_states, encoder, encoder_input_length, scope=None, context=None, **kwargs): with tf.variable_scope(scope or 'attention_{}'.format(encoder.name)): if context is not None and encoder.use_context: state = tf.concat([state, context], axis=1) e = compute_energy(hidden_states, state, encoder, input_length=encoder_input_length, **kwargs) mask = tf.sequence_mask(encoder_input_length, maxlen=tf.shape(hidden_states)[1], dtype=tf.float32) e *= mask if encoder.attn_norm_fun == 'none': weights = e elif encoder.attn_norm_fun == 'sigmoid': weights = tf.nn.sigmoid(e) elif encoder.attn_norm_fun == 'max': weights = tf.one_hot(tf.argmax(e, -1), depth=tf.shape(e)[1]) else: e -= tf.reduce_max(e, axis=1, keep_dims=True) T = encoder.attn_temperature or 1.0 exp = tf.exp(e / T) * mask weights = exp / tf.reduce_sum(exp, axis=-1, keep_dims=True) weighted_average = tf.reduce_sum(tf.expand_dims(weights, 2) * hidden_states, axis=1) return weighted_average, weights def no_attention(state, hidden_states, *args, **kwargs): batch_size = tf.shape(state)[0] weighted_average = tf.zeros(shape=tf.stack([batch_size, 0]))
tensorflow.argmax
13,933
import tensorflow as tf X = self.conv('DZ1', X, 512, 1, 1) X = tf.nn.leaky_relu(X, 0.2) X = self.conv('DZ2', X, 512, 1, 1) X = tf.nn.leaky_relu(X, 0.2) X = self.conv('DZ3', X, 512, 1, 1) X = tf.nn.leaky_relu(X, 0.2) X = self.conv('DZ4', X, 512, 1, 1) X = tf.nn.leaky_relu(X, 0.2) X = discrim_conv('d_out', X, 1, 1, norm=False, nonlin=False, init_stddev=0.02) print('D out:', X.get_shape().as_list()) return X
tensorflow.nn.leaky_relu
13,934
import tensorflow as tf for index in non_static_indexes: shape[index] = dyn_shape[index] return shape def reshape_to_matrix(input_tensor): """Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).""" ndims = input_tensor.shape.ndims if ndims < 2: raise ValueError("Input tensor must have at least rank 2. Shape = %s" % (input_tensor.shape)) if ndims == 2: return input_tensor width = input_tensor.shape[-1] output_tensor = tf.reshape(input_tensor, [-1, width]) return output_tensor def reshape_from_matrix(output_tensor, orig_shape_list): """Reshapes a rank 2 tensor back to its original rank >= 2 tensor.""" if len(orig_shape_list) == 2: return output_tensor output_shape = get_shape_list(output_tensor) orig_dims = orig_shape_list[0:-1] width = output_shape[-1] return tf.reshape(output_tensor, orig_dims + [width])
tensorflow.reshape
13,935
import tensorflow as tf else: return tf.constant(axes[0])
tensorflow.constant
13,936
from tensorflow.python.ops import array_ops if self.num_label_columns == 1: logits = array_ops.concat([array_ops.zeros_like(logits), logits], 1)
tensorflow.python.ops.array_ops.zeros_like
13,937
from tensorflow.python.ops import array_ops kepsilon = 1e-7 # to account for floating point imprecisions thresholds = [(i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds-2)] thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon] (tp, fn, tn, fp, tp_update_op, fn_update_op, tn_update_op, fp_update_op) = _tp_fn_tn_fp(predictions, labels, thresholds, weights) assert array_ops.squeeze(fp).get_shape().as_list()[0] == num_thresholds def compute_sensitivity_at_specificity(name): specificities = math_ops.div(tn, tn + fp + kepsilon) tf_index = math_ops.argmin(math_ops.abs(specificities - specificity), 0) tf_index = math_ops.cast(tf_index, dtypes.int32)
tensorflow.python.ops.array_ops.squeeze
13,938
import tensorflow as tf #X:[n_batch_train, 2, n_ctx, 2] -> [n_batch_train*2,n_ctx,2] X = tf.reshape(X, [-1, n_ctx, 2]) M = tf.reshape(M, [-1, n_ctx]) h = embed(X, we) #h=[-1,n_ctx,emb] for layer in range(n_layer): h = block(h, 'h%d'%layer, train=train, scale=True) #h=[-1,n_ctx,emb] lm_h [-1,emb] lm_h = tf.reshape(h[:, :-1], [-1, n_embd]) lm_logits = tf.matmul(lm_h, we, transpose_b=True) lm_losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=lm_logits, labels=tf.reshape(X[:, 1:, 0], [-1])) lm_losses = tf.reshape(lm_losses, [shape_list(X)[0], shape_list(X)[1]-1]) lm_losses = tf.reduce_sum(lm_losses*M[:, 1:], 1)/tf.reduce_sum(M[:, 1:], 1) clf_h = tf.reshape(h, [-1, n_embd]) pool_idx = tf.cast(tf.argmax(tf.cast(tf.equal(X[:, :, 0], clf_token), tf.float32), 1), tf.int32) clf_h = tf.gather(clf_h, tf.range(shape_list(X)[0], dtype=tf.int32)*n_ctx+pool_idx) clf_h = tf.reshape(clf_h, [-1, 2, n_embd]) if train and clf_pdrop > 0: shape = shape_list(clf_h) shape[1] = 1 clf_h = tf.nn.dropout(clf_h, 1-clf_pdrop, shape) clf_h = tf.reshape(clf_h, [-1, n_embd]) clf_logits = clf(clf_h, 1, train=train) clf_logits = tf.reshape(clf_logits, [-1, 2])
tensorflow.reduce_sum
13,939
import tensorflow as tf }) return model_outputs if params['use_bfloat16']: with tf.contrib.tpu.bfloat16_scope(): model_outputs = _model_outputs() def cast_outputs_to_float(d): for k, v in sorted(six.iteritems(d)): if isinstance(v, dict): cast_outputs_to_float(v) else: d[k] = tf.cast(v, tf.float32) cast_outputs_to_float(model_outputs) else: model_outputs = _model_outputs() # First check if it is in PREDICT mode. if mode == tf.estimator.ModeKeys.PREDICT: predictions = {} predictions['detections'] = model_outputs['detections'] predictions['image_info'] = features['image_info'] if params['include_mask']:
tensorflow.cast
13,940
from tensorflow.python.framework import ops `int16`, `int8`, or `complex64`. bias: A 1-D `Tensor` with size matching the last dimension of `value`. Must be the same type as `value` unless `value` is a quantized type, in which case a different quantized type may be used. name: A name for the operation (optional). Returns: A `Tensor` with the same type as `value`. """ with ops.op_scope([value, bias], name, "BiasAddV1") as name: value = ops.convert_to_tensor(value, name="input") bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias") return gen_nn_ops._bias_add_v1(value, bias, name=name) ops.RegisterShape("BiasAddV1")(common_shapes.bias_add_shape)
tensorflow.python.framework.ops.op_scope
13,941
import tensorflow as tf def test_maximum_batch_size(self): with self.test_session() as session: @dynamic_batching.batch_fn_with_options(maximum_batch_size=2) def f(a, b): batch_size = tf.shape(a)[0] return a + b, tf.tile([batch_size], [batch_size]) outputs = [ f(tf.constant([1]), tf.constant([2])), f(tf.constant([1]), tf.constant([2])),
tensorflow.tile
13,942
import tensorflow as tf hyper_b_1 = tf.get_variable('hyper_b_1', [state_dim, n_h_mixer]) hyper_b_final_l1 = tf.layers.dense(inputs=state, units=n_h_mixer, activation=tf.nn.relu, use_bias=False, name='hyper_b_final_l1') hyper_b_final = tf.layers.dense(inputs=hyper_b_final_l1, units=1, activation=None, use_bias=False, name='hyper_b_final') # First layer w1 = tf.abs(tf.matmul(state, hyper_w_1)) b1 = tf.matmul(state, hyper_b_1) w1_reshaped = tf.reshape(w1, [-1, n_agents, n_h_mixer]) # reshape into batch of matrices b1_reshaped = tf.reshape(b1, [-1, 1, n_h_mixer]) # [batch, 1, n_h_mixer] hidden = tf.nn.elu(tf.matmul(agent_qs_reshaped, w1_reshaped) + b1_reshaped) # Second layer w_final = tf.abs(tf.matmul(state, hyper_w_final)) w_final_reshaped = tf.reshape(w_final, [-1, n_h_mixer, 1]) # reshape into batch of matrices
tensorflow.matmul
13,943
import tensorflow as tf cell_bw = GetCell() rnnout, _, _ = tf.nn.bidirectional_rnn(cell_fw, cell_bw, self._inputs, dtype=tf.float32,
tensorflow.nn.bidirectional_rnn
13,944
import tensorflow as tf args = parser.parse_args() def model(): x = tf.placeholder(tf.float32, [None, 784], name='x') gt = tf.placeholder(tf.float32, [None, 10], name='groundtruth') with tf.variable_scope('layer1'): w1 = tf.get_variable('weight1', [784, 1024], initializer=tf.random_normal_initializer()) b1 = tf.get_variable('bias1', [1024], initializer=tf.constant_initializer(0.0)) h1 = tf.nn.relu(tf.matmul(x, w1) + b1) with tf.variable_scope('layer2'): w2 = tf.get_variable('weight2', [1024, 1024], initializer=tf.random_normal_initializer()) b2 = tf.get_variable('bias2', [1024], initializer=tf.constant_initializer(0.0)) h2 = tf.nn.relu(tf.matmul(h1, w2) + b2) with tf.variable_scope('layer3'): w3 = tf.get_variable('weight3', [1024, 10], initializer=tf.random_normal_initializer()) b3 = tf.get_variable('bias3', [10], initializer=tf.constant_initializer(0.0)) y = tf.matmul(h2, w3) + b3 # losses cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=gt, logits=y)) # optimizer optimizer = tf.train.GradientDescentOptimizer(args.lr) # define one-step train ops train_op = optimizer.minimize(cross_entropy) return x, y, gt, train_op if __name__ == "__main__": max_train_step = args.max_train_step batch_size = args.batch_size
tensorflow.random_normal_initializer
13,945
from tensorflow.python.platform import gfile return labels_one_hot def extract_labels(filename, one_hot=False, num_classes=10): """Extract the labels into a 1D uint8 numpy array [index].""" print('Extracting', filename) with gfile.Open(filename, 'rb') as f, gzip.GzipFile(fileobj=f) as bytestream: magic = _read32(bytestream) if magic != 2049: raise ValueError('Invalid magic number %d in MNIST label file: %s' % (magic, filename)) num_items = _read32(bytestream)
tensorflow.python.platform.gfile.Open
13,946
import tensorflow as tf # Construct predictions image = tf.placeholder(tf.float32, shape=[hps.batch_size, image_size, image_size, num_channel]) ############MNIST and CIFAR10 are different ar here adv_image = tf.placeholder(tf.float32, shape=[hps.batch_size, image_size, image_size, num_channel]) ############MNIST and CIFAR10 are different ar here predict = tf.placeholder(tf.float32, shape=[hps.batch_size, 10]) predict_nor, tsne_logit_nor = models(hps, image, FLAGS.RCE_train, logits=False, tsne_logits=True) predict_adv, tsne_logit_adv = models(hps, adv_image, FLAGS.RCE_train, logits=False, tsne_logits=True) # Calculate entropy argmax_y_onehot = tf.one_hot(tf.argmax(predict, 1), 10, on_value=0.0, off_value=1.0, axis=-1) normalized_y_nonmaximal = tf.reduce_sum(predict * argmax_y_onehot, 1) entropy = tf.reduce_sum(-tf.log(predict) * predict * argmax_y_onehot, 1) / normalized_y_nonmaximal + tf.log( normalized_y_nonmaximal) for k in range(10): adv_image_craft = adv_craft_func(hps, image, FLAGS.attack_method, eps=0.02 * k + 0.02, RCE_train=FLAGS.RCE_train) #adv_image_craft = adv_craft_func(hps, image, FLAGS.attack_method, eps=0.04,RCE_train=FLAGS.RCE_train) sess.run(tf.global_variables_initializer()) saver.restore(sess, ckpt_state.model_checkpoint_path) for i in six.moves.range(FLAGS.eval_batch_count): time_start = time.time() (nor_img,true_label) = sess.run([images,labels])
tensorflow.log
13,947
from tensorflow.python.ops import init_ops bias_ones = self._bias_initializer if self._bias_initializer is None: bias_ones = init_ops.constant_initializer(1.0, dtype=inputs.dtype) with vs.variable_scope("gates"): # Reset gate and update gate.
tensorflow.python.ops.init_ops.constant_initializer
13,948
import tensorflow as tf "output_weights", [hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "output_bias",[], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.reduce_sum(tf.multiply(output_layer,output_weights),-1) logits = tf.add(logits, output_bias) probabilities=tf.sigmoid(logits) # labels=tf.constant(labels,dtype=tf.int32) per_example_loss=tf.losses.sigmoid_cross_entropy(multi_class_labels=labels, logits=logits,reduction=Reduction.NONE) per_example_loss=tf.reduce_sum(per_example_loss,axis=-1) loss = tf.reduce_mean(per_example_loss,name='train_loss') return (loss, per_example_loss, logits, probabilities)
tensorflow.add
13,949
import tensorflow as tf predictions_dict = { BOX_ENCODINGS: box_encodings, CLASS_PREDICTIONS_WITH_BACKGROUND: class_predictions_with_background } if self._predict_instance_masks: with slim.arg_scope(self._conv_hyperparams): upsampled_features = tf.image.resize_bilinear( image_features, [self._mask_height, self._mask_width], align_corners=True) upsampled_features = slim.conv2d( upsampled_features, num_outputs=self._mask_prediction_conv_depth, kernel_size=[2, 2])
tensorflow.image.resize_bilinear
13,950
import tensorflow as tf Lit_q_mu = tf.matrix_triangular_solve(Luu, q_mu, adjoint=True) e_mean_Kuf = expectation(pXnew, mean_function, (kern, feat)) # N x D x M # einsum isn't able to infer the rank of e_mean_Kuf, hence we explicitly set the rank of the tensor: e_mean_Kuf = tf.reshape(e_mean_Kuf, [num_data, num_func, num_ind]) e_fmean_mean = tf.einsum("nqm,mz->nqz", e_mean_Kuf, Lit_q_mu) # N x D x D e_related_to_mean = e_fmean_mean + tf.matrix_transpose(e_fmean_mean) + e_mean_mean if full_output_cov: fvar = ( tf.matrix_diag(tf.tile((eKff - tf.trace(Li_eKuffu_Lit))[:, None], [1, num_func])) +
tensorflow.matrix_transpose
13,951
import tensorflow as tf num_decoder_symbols=5, embedding_size=2) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 5), res[0].shape) res = sess.run([mem]) self.assertEqual((2, 4), res[0].shape) # Test externally provided output projection. w = tf.get_variable("proj_w", [2, 5]) b = tf.get_variable("proj_b", [5]) with tf.variable_scope("proj_seq2seq"): dec, _ = tf.nn.seq2seq.embedding_attention_seq2seq( enc_inp, dec_inp, cell, num_encoder_symbols=2, num_decoder_symbols=5, embedding_size=2, output_projection=(w, b)) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 2), res[0].shape) # Test that previous-feeding model ignores inputs after the first. dec_inp2 = [tf.constant(0, tf.int32, shape=[2]) for _ in range(3)] with tf.variable_scope("other"): d3, _ = tf.nn.seq2seq.embedding_attention_seq2seq( enc_inp, dec_inp2, cell, num_encoder_symbols=2,
tensorflow.nn.seq2seq.embedding_attention_seq2seq
13,952
import tensorflow as tf self.optimizer_func = tf.train.AdagradOptimizer if self.hparams.grad_strategy == 'sgd': self.optimizer_func = tf.train.GradientDescentOptimizer self.separate_gradient_update() tf.summary.scalar('Gradient Norm', self.norm, collections=['train']) tf.summary.scalar('Learning Rate', self.ranker_learning_rate, collections=['train']) tf.summary.scalar('Final Loss', tf.reduce_mean(self.loss), collections=['train']) clipped_labels = tf.clip_by_value(reshaped_train_labels, clip_value_min=0, clip_value_max=1) pad_removed_train_output = self.remove_padding_for_metric_eval(self.docid_inputs, train_output) for metric in self.exp_settings['metrics']: for topn in self.exp_settings['metrics_topn']:
tensorflow.summary.scalar
13,953
import tensorflow as tf input=activation, input_size=dialogue_state_action_template_size, output_size=num_actions_arguments * actions_arguments_vocabulary_length, name='linear_projection_3_predictions_arguments' ) self.predictions_arguments = softmax_2d( input=projection, n_classifiers=num_actions_arguments, n_classes=actions_arguments_vocabulary_length, name="softmax_2d_predictions_arguments") if FLAGS.print_variables: for v in tf.trainable_variables(): print(v.name) with tf.name_scope('loss'): one_hot_labels_action = dense_to_one_hot(actions_template, action_templates_vocabulary_length) one_hot_labels_arguments = dense_to_one_hot(actions_arguments, actions_arguments_vocabulary_length) loss_action = tf.reduce_mean( - one_hot_labels_action * tf.log(tf.clip_by_value(self.predictions_action, 1e-10, 1.0)), name='loss' ) loss_arguments = tf.reduce_mean(
tensorflow.trainable_variables
13,954
import tensorflow as tf b_rec_initializer = tf.constant_initializer(0.0) b_out_initializer = tf.constant_initializer(0.0)
tensorflow.constant_initializer
13,955
import tensorflow as tf x = tf.constant([b"hello", b"hi"], tf.string) y, = tf.py_func(read_fixed_length_numpy_strings, [], [tf.string]) z, = tf.py_func(read_and_return_strings, [x, y], [tf.string]) self.assertListEqual(list(z.eval()), [b"hello there", b"hi there"])
tensorflow.py_func
13,956
import tensorflow as tf feed_previous=feed_previous) def EmbeddingTiedRNNSeq2SeqNoTuple(enc_inp, dec_inp, feed_previous): cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False) return tf.nn.seq2seq.embedding_tied_rnn_seq2seq( enc_inp, dec_inp, cell, num_decoder_symbols, embedding_size=2, feed_previous=feed_previous) def EmbeddingAttentionSeq2Seq(enc_inp, dec_inp, feed_previous): cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True) return tf.nn.seq2seq.embedding_attention_seq2seq( enc_inp, dec_inp, cell, num_encoder_symbols, num_decoder_symbols, embedding_size=2, feed_previous=feed_previous) def EmbeddingAttentionSeq2SeqNoTuple(enc_inp, dec_inp, feed_previous): cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False) return tf.nn.seq2seq.embedding_attention_seq2seq( enc_inp, dec_inp, cell, num_encoder_symbols,
tensorflow.nn.rnn_cell.BasicLSTMCell
13,957
import tensorflow as tf } def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32.
tensorflow.parse_single_example
13,958
from tensorflow.python.training import moving_averages return mean, variance, second_moment def _build_update_ops_variance(self, mean, variance, is_training): """Builds the moving average update ops when using moving variance. Args: mean: The mean value to update with. variance: The variance value to update with. is_training: Boolean Tensor to indicate if we're currently in training mode. """ def build_update_ops(): """Builds the exponential moving average update ops.""" update_mean_op = moving_averages.assign_moving_average( variable=self._moving_mean, value=mean, decay=self._decay_rate, name="update_moving_mean").op update_variance_op = moving_averages.assign_moving_average( variable=self._moving_variance, value=variance, decay=self._decay_rate, name="update_moving_variance").op return update_mean_op, update_variance_op def build_no_ops():
tensorflow.python.training.moving_averages.assign_moving_average
13,959
import tensorflow as tf else: direct_mask = tf.greater(sl_col, sl_row) # bl,bl direct_mask_tile = tf.tile( tf.expand_dims(tf.expand_dims(direct_mask, 0), 0), [bs, bn, 1, 1]) # bs,bn,bl,bl rep_mask_tile_1 = tf.tile(tf.expand_dims(rep_mask_split, 2), [1, 1, bl, 1]) # bs,bn,bl,bl rep_mask_tile_2 = tf.tile(tf.expand_dims(rep_mask_split, 3), [1, 1, 1, bl]) # bs,bn,bl,bl rep_mask_tile = tf.logical_and(rep_mask_tile_1, rep_mask_tile_2) attn_mask = tf.logical_and(direct_mask_tile, rep_mask_tile, name='attn_mask') # bs,bn,bl,bl # attention f_bias = tf.get_variable('f_bias', [ivec], tf.float32, tf.constant_initializer(0.)) dependent_head = linear( rep_map, 2 * ivec, False, 0., 'linear_dependent_head', False, wd, keep_prob, is_train) # bs,bn,bl,2vec dependent, head = tf.split(dependent_head, 2, 3) dependent_etd = tf.expand_dims(dependent, 2) # bs,bn,1,bl,vec head_etd = tf.expand_dims(head, 3) # bs,bn,bl,1,vec logits = scaled_tanh(dependent_etd + head_etd + f_bias, 5.0) # bs,bn,bl,bl,vec logits_masked = exp_mask_for_high_rank(logits, attn_mask) attn_score = tf.nn.softmax(logits_masked, 3) # bs,bn,bl,bl,vec attn_score = mask_for_high_rank(attn_score, attn_mask) # bs,bn,bl,bl,vec self_attn_result = tf.reduce_sum(attn_score * rep_map_tile, 3) # bs,bn,bl,vec with tf.variable_scope('source2token_self_attn'): inter_block_logits = bn_dense_layer(self_attn_result, ivec, True, 0., 'bn_dense_map', 'linear', False, wd, keep_prob, is_train) # bs,bn,bl,vec inter_block_logits_masked = exp_mask_for_high_rank(inter_block_logits, rep_mask_split) # bs,bn,bl,vec
tensorflow.split
13,960
import tensorflow as tf W_fc1 = self.weight_variable('W_fc1', [1600, 512]) b_fc1 = self.bias_variable('b_fc1', [512]) W_fc2 = self.weight_variable('W_fc2', [512, self.ACTIONS]) b_fc2 = self.bias_variable('b_fc2', [self.ACTIONS]) s = tf.placeholder("float", [None, 80, 80, 4]) # 输入层,输入图像为80x80的4通道图像 h_conv1 = self.conv2d('h_conv1', s, W_conv1, 4, b_conv1) # 构造第一个卷积层输出为conv1 h_pool1 = self.max_pool_2x2('h_pool1', h_conv1) h_conv2 = self.conv2d('h_conv2', h_pool1, W_conv2, 2, b_conv2)
tensorflow.placeholder
13,961
from tensorflow.python.ops import control_flow_ops def _apply_dense(self, grad, var): lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype) mu_t = math_ops.cast(self._mu_t, var.dtype.base_dtype) vstar = self.get_slot(var, "vstar") gold = self.get_slot(var, "gold") var_update = state_ops.assign_sub(var, lr_t*(grad + gold + mu_t*(var-vstar))) #Update 'ref' by subtracting 'value #Create an op that groups multiple operations. #When this op finishes, all ops in input have finished return control_flow_ops.group(*[var_update,]) def _apply_sparse_shared(self, grad, var, indices, scatter_add): lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype) mu_t = math_ops.cast(self._mu_t, var.dtype.base_dtype) vstar = self.get_slot(var, "vstar") gold = self.get_slot(var, "gold") # glod is not sparse v_diff = state_ops.assign(vstar, mu_t * (var - vstar), use_locking=self._use_locking)
tensorflow.python.ops.control_flow_ops.group
13,962
from tensorflow.contrib.framework import deprecated_args _, top_k_idx = nn.top_k(predictions, k) return _streaming_sparse_precision_at_k( top_k_idx=top_k_idx, labels=labels, k=k, class_id=class_id, ignore_mask=ignore_mask, weights=weights, metrics_collections=metrics_collections, updates_collections=updates_collections, name=scope) # TODO(ptucker): Validate range of values in labels? @deprecated_args(IGNORE_MASK_DATE, IGNORE_MASK_INSTRUCTIONS, 'ignore_mask') def streaming_sparse_precision_at_top_k(top_k_predictions, labels, class_id=None, ignore_mask=None, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes precision@k of top-k predictions with respect to sparse labels. If `class_id` is specified, we calculate precision by considering only the entries in the batch for which `class_id` is in the top-k highest `predictions`, and computing the fraction of them for which `class_id` is
tensorflow.contrib.framework.deprecated_args
13,963
import tensorflow as tf return tf.nn.relu(layer) with tf.variable_scope('norm_layer_%s%d' % (prefix, id)) as vs:
tensorflow.variable_scope
13,964
import tensorflow as tf hist_rater_b = tf.reduce_sum(labels, 0) conf_mat = tf.matmul(tf.transpose(pred_norm), labels) nom = tf.reduce_sum(weights * conf_mat) denom = tf.reduce_sum(weights * tf.matmul( tf.reshape(hist_rater_a, [num_ratings, 1]), tf.reshape(hist_rater_b, [1, num_ratings])) / tf.to_float(batch_size)) try: return -(1 - nom / denom) except Exception: return -(1 - nom / (denom + eps))
tensorflow.to_float
13,965
import tensorflow as tf beta = tf.get_variable('beta', [ch], initializer=tf.constant_initializer()) beta = tf.reshape(beta, new_shape) gamma = tf.get_variable('gamma', [ch], initializer=tf.constant_initializer(1.0)) gamma = tf.reshape(gamma, new_shape) return tf.nn.batch_normalization(inputdata, mean, var, beta, gamma, epsilon, name=name) @staticmethod
tensorflow.reshape
13,966
import tensorflow as tf >>> samples = m.compute_posterior_samples(X, Y, test_points, 2) >>> samples.dtype dtype('float32') """ mu, var = self.build_posterior_mean_var(X, Y, test_points, True) jitter = tfhacks.eye(tf.shape(mu)[0], var.dtype) * 1e-06 L = tf.batch_cholesky(tf.transpose(var, (2, 0, 1)) + jitter) V_shape = [tf.shape(L)[0], tf.shape(L)[1], num_samples] V = tf.random_normal(V_shape, dtype=L.dtype) samples = tf.expand_dims(tf.transpose(mu), -1) + tf.batch_matmul(L, V) return tf.transpose(samples)
tensorflow.shape
13,967
import tensorflow as tf Li_eKuffu = tf.matrix_triangular_solve(Luu_tiled, eKuffu, lower=True) Li_eKuffu_Lit = tf.matrix_triangular_solve(Luu_tiled, tf.matrix_transpose(Li_eKuffu), lower=True) # N x M x M cov = tf.matmul(q_sqrt_r, q_sqrt_r, transpose_b=True) # D x M x M if mean_function is None or isinstance(mean_function, mean_functions.Zero): e_related_to_mean = tf.zeros((num_data, num_func, num_func), dtype=settings.float_type) else: # Update mean: \mu(x) + m(x) fmean = fmean + expectation(pXnew, mean_function)
tensorflow.zeros
13,968
import tensorflow as tf
tensorflow.flags.DEFINE_integer
13,969
import tensorflow as tf 'run_on_cloud', False, 'Wether we will train on cloud.') tf.app.flags.DEFINE_boolean( 'seq_train', False, 'Wether we will train a sequence model.') tf.app.flags.DEFINE_string(# 'model_to_train', 'blouse, dress, outwear, skirt, trousers', #'all, blouse, dress, outwear, skirt, trousers', 'skirt, dress, outwear, trousers', 'The sub-model to train (comma-separated list).') FLAGS = tf.app.flags.FLAGS #--model_scope=blouse --checkpoint_path=./logs/all --data_format=channels_last --batch_size=1 def input_pipeline(is_training=True, model_scope=FLAGS.model_scope, num_epochs=FLAGS.epochs_per_eval): if 'all' in model_scope: lnorm_table = tf.contrib.lookup.HashTable(tf.contrib.lookup.KeyValueTensorInitializer(tf.constant(config.global_norm_key, dtype=tf.int64), tf.constant(config.global_norm_lvalues, dtype=tf.int64)), 0) rnorm_table = tf.contrib.lookup.HashTable(tf.contrib.lookup.KeyValueTensorInitializer(tf.constant(config.global_norm_key, dtype=tf.int64), tf.constant(config.global_norm_rvalues, dtype=tf.int64)), 1) else: lnorm_table = tf.contrib.lookup.HashTable(tf.contrib.lookup.KeyValueTensorInitializer(tf.constant(config.local_norm_key, dtype=tf.int64), tf.constant(config.local_norm_lvalues, dtype=tf.int64)), 0) rnorm_table = tf.contrib.lookup.HashTable(tf.contrib.lookup.KeyValueTensorInitializer(tf.constant(config.local_norm_key, dtype=tf.int64), tf.constant(config.local_norm_rvalues, dtype=tf.int64)), 1) preprocessing_fn = lambda org_image, classid, shape, key_x, key_y, key_v: preprocessing.preprocess_image(org_image, classid, shape, FLAGS.train_image_size, FLAGS.train_image_size, key_x, key_y, key_v, (lnorm_table, rnorm_table), is_training=is_training, data_format=('NCHW' if FLAGS.data_format=='channels_first' else 'NHWC'), category=(model_scope if 'all' not in model_scope else '*'), bbox_border=FLAGS.bbox_border, heatmap_sigma=FLAGS.heatmap_sigma, heatmap_size=FLAGS.heatmap_size) images, shape, classid, targets, key_v, isvalid, norm_value = dataset.slim_get_split(FLAGS.data_dir, preprocessing_fn, (FLAGS.xt_batch_size if 'seresnext50' in FLAGS.backbone else FLAGS.batch_size), FLAGS.num_readers, FLAGS.num_preprocessing_threads, num_epochs=num_epochs, is_training=is_training, file_pattern=FLAGS.dataset_name, category=(model_scope if 'all' not in model_scope else '*'), reader=None)
tensorflow.constant
13,970
import tensorflow as tf features_proj = tf.matmul(features_flat, w) features_proj = tf.reshape(features_proj, [-1, self.L, self.D]) return features_proj def _attention_layer(self, features, features_proj, h, reuse=False): with tf.variable_scope('attention_layer', reuse=reuse): w = tf.get_variable('w', [self.H, self.D], initializer=self.weight_initializer) b = tf.get_variable('b', [self.D], initializer=self.const_initializer) w_att = tf.get_variable('w_att', [self.D, 1], initializer=self.weight_initializer) h_att = tf.nn.relu(features_proj + tf.expand_dims(tf.matmul(h, w), 1) + b) # (N, L, D) out_att = tf.reshape(tf.matmul(tf.reshape(h_att, [-1, self.D]), w_att), [-1, self.L]) # (N, L) alpha = tf.nn.softmax(out_att) context = tf.reduce_sum(features * tf.expand_dims(alpha, 2), 1, name='context') #(N, D) return context, alpha def _selector(self, context, h, reuse=False): with tf.variable_scope('selector', reuse=reuse): w = tf.get_variable('w', [self.H, 1], initializer=self.weight_initializer) b = tf.get_variable('b', [1], initializer=self.const_initializer)
tensorflow.matmul
13,971
import tensorflow as tf expected_rank_dict = {} if isinstance(expected_rank, six.integer_types): expected_rank_dict[expected_rank] = True else: for x in expected_rank: expected_rank_dict[x] = True actual_rank = tensor.shape.ndims if actual_rank not in expected_rank_dict: scope_name = tf.get_variable_scope().name raise ValueError( "For the tensor `%s` in scope `%s`, the actual rank " "`%d` (shape = %s) is not equal to the expected rank `%s`" % (name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
tensorflow.get_variable_scope
13,972
import tensorflow as tf self.assertEqual(10, v0.eval()) # Restore a different "v1" from shard 1 of the saved files. with tf.Session( target="", config=tf.ConfigProto(device_count={"CPU": 2})) as sess: with sess.graph.device("/cpu:0"): v1 = tf.Variable(222) save = tf.train.Saver({"v1": v1}, sharded=True) tf.initialize_all_variables().run() self.assertEqual(222, v1.eval()) save.restore(sess, save_path + "-00001-of-00002") self.assertEqual(20, v1.eval())
tensorflow.Variable
13,973
import tensorflow as tf ) with tf.variable_scope("loss"): if is_training: output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) output_layer = tf.reshape(output_layer, [-1, hidden_size]) logits = tf.matmul(output_layer, output_weight, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) logits = tf.reshape(logits, [-1, FLAGS.max_seq_length, 11]) log_probs = tf.nn.log_softmax(logits, axis=-1) # labels = tf.cast(labels,dtype=tf.float32) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
tensorflow.nn.bias_add
13,974
import tensorflow as tf kk = tf.Variable(0, dtype=tf.int64) for i in tf.range(start=0, limit=tf.size(vx_keys), delta=1, dtype=None, name='range'): for j in tf.range(start=0, limit=tf.size(vz_keys), delta=1, dtype=None, name='range'): to_add = tf.cond( tf.greater(vz.lookup(vx_keys[i]), -1), true_fn=lambda: tf.math.multiply(vx.lookup(vx_keys[i]), vz.lookup(vz_keys[j])), false_fn=lambda: tf.constant(0, dtype=tf.int64) ) kk = tf.math.add(kk, to_add) kernel[l][m] = kk return tf.convert_to_tensor(kernel, dtype=tf.int64) def dim(self): return self._dim
tensorflow.math.add
13,975
import tensorflow as tf row_blocks.append(tf.pad( tensor=matrix, paddings=tf.concat( [tf.zeros([tf.rank(matrix) - 1, 2], dtype=tf.int32), [(row_before_length, row_after_length)]], axis=0))) blocked = tf.concat(row_blocks, -2) blocked.set_shape(batch_shape.concatenate((blocked_rows, blocked_cols))) return blocked
tensorflow.concat
13,976
import tensorflow as tf scales = tf.convert_to_tensor(scales, dtype=tf.float32) ratios = tf.convert_to_tensor(ratios, dtype=tf.float32) offset = tf.convert_to_tensor(offset, dtype=tf.float32) scales_grid, ratios_grid = tf.meshgrid(scales, ratios) scales_grid = tf.reshape(scales_grid, [-1, 1]) ratios_grid = tf.reshape(ratios_grid, [-1, 1]) ratio_sqrts = tf.sqrt(ratios_grid) heights = scales_grid / ratio_sqrts * base_size[1] widths = scales_grid * ratio_sqrts * base_size[0] x_centers = tf.cast(tf.range(features_width), tf.float32) x_centers = x_centers * stride[1] y_centers = tf.cast(tf.range(features_height), tf.float32) y_centers = y_centers * stride[0] # x_centers = x_centers + offset[1]
tensorflow.sqrt
13,977
import tensorflow as tf with tf.variable_scope(args.name): model = HredModel(data, args, embed) model.print_parameters() latest_dir = '%s/checkpoint_latest' % args.model_dir best_dir = '%s/checkpoint_best' % args.model_dir if tf.train.get_checkpoint_state(latest_dir) and args.restore == "last": print("Reading model parameters from %s" % latest_dir) model.latest_saver.restore(sess, tf.train.latest_checkpoint(latest_dir)) else: if tf.train.get_checkpoint_state(best_dir) and args.restore == "best": print('Reading model parameters from %s' % best_dir) model.best_saver.restore(sess, tf.train.latest_checkpoint(best_dir)) else: print("Created model with fresh parameters.")
tensorflow.train.latest_checkpoint
13,978
import tensorflow as tf lang1_resfile.write(source) lang1_resfile.write("\n") lang2_resfile.write(target) lang2_resfile.write("\n") else: lang1_filename, lang2_filename = dataset[1] lang1_filepath = os.path.join(tmp_dir, lang1_filename) lang2_filepath = os.path.join(tmp_dir, lang2_filename) is_sgm = ( lang1_filename.endswith("sgm") and lang2_filename.endswith("sgm")) if not (tf.gfile.Exists(lang1_filepath) and tf.gfile.Exists(lang2_filepath)): # For .tar.gz and .tgz files, we read compressed. mode = "r:gz" if compressed_filepath.endswith("gz") else "r" with tarfile.open(compressed_filepath, mode) as corpus_tar: corpus_tar.extractall(tmp_dir) if lang1_filepath.endswith(".gz"): new_filepath = lang1_filepath.strip(".gz") generator_utils.gunzip_file(lang1_filepath, new_filepath) lang1_filepath = new_filepath if lang2_filepath.endswith(".gz"):
tensorflow.gfile.Exists
13,979
import tensorflow as tf self.assertFalse(task.teacher.params.is_eval) self.assertIsNotNone(task.teacher.params.input) self.assertFalse(task.student.params.is_eval) self.assertIsNotNone(task.student.params.input) metrics = task.FPropDefaultTheta() self.assertItemsEqual(['loss', 'num_samples_in_batch'], list(metrics.keys())) task.BProp() # Expected side effects of BProp(). self.assertIsNotNone(task.train_op) self.assertIsNotNone(task.total_examples) with self.session() as sess: tf.global_variables_initializer().run() variables = {} values_before_training = {} values_after_training = {} for child in ('teacher', 'student'): variables[child] = { k: v for k, v in getattr(task, child).vars.FlattenItems() } values_before_training[child] = sess.run(variables[child]) # Train for a few steps.
tensorflow.global_variables_initializer
13,980
import tensorflow as tf flags.DEFINE_integer("iterations_per_loop", 1, "How many steps to make in each estimator call.") flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") tf.flags.DEFINE_string( "tpu_name", None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.")
tensorflow.flags.DEFINE_string
13,981
import tensorflow as tf def cw_sampling(X, y=None): def phi_sampling(s, D): return tf.pow(1.0 + 4.0*s/(2.0*D-3), -0.5) D = tf.cast(tf.shape(X)[1], tf.float32) N = tf.cast(tf.shape(X)[0], tf.float32) D_int = tf.cast(D, tf.int32) N_int = tf.cast(N, tf.int32) if y is None: y = silverman_rule_of_thumb(N)
tensorflow.shape
13,982
import tensorflow as tf def _global_keep_prob(keep_prob): keep_prob = tf.convert_to_tensor(keep_prob, dtype=tf.float32) keep_prob = tf.cond(_phase, lambda: keep_prob, lambda: keep_prob * 0.0 + 1.0) return keep_prob
tensorflow.cond
13,983
import tensorflow as tf values = tf.math.sign(tf.nn.relu(interpolated + self.tol)) inter = tf.reshape(values, [self.resolution, self.resolution, self.resolution]) inter = tf.transpose(tf.reduce_max(inter, axis=a)) im = axs[fig_obj_count, 1].matshow(inter.numpy()) plt.colorbar(im, ax=axs[fig_obj_count, 1]) values = sdf_values inter = tf.reshape(values, [self.resolution, self.resolution, self.resolution]) inter = tf.transpose(tf.reduce_max(inter, axis=a)) im = axs[fig_obj_count, 2].matshow(inter.numpy()) plt.colorbar(im, ax=axs[fig_obj_count, 2]) fig_obj_count += 1 intersection = tf.reduce_sum(tf.math.sign(tf.nn.relu(sdf_values - 1))) union = tf.reduce_sum(tf.math.sign(sdf_values)) iou = intersection / union self.collisions.append(num_collisions) self.intersections.append(intersection) self.ious.append(iou) return num_collisions, intersection, iou
tensorflow.reduce_max
13,984
import tensorflow as tf pad_w1 = tf.mod(-w + bsize[2], bstrides[2]) return tf.cond( tf.logical_or(tf.greater(pad_h1, 0), tf.greater(pad_w1, 0)), lambda: tf.pad(x, [[0, 0], [0, pad_h1], [0, pad_w1], [0, 0]]), lambda: x) else: return x
tensorflow.pad
13,985
import tensorflow as tf seed: set random state. Returns: L2DeepSurv Class. """ # Prepare data self.train_data = {} self.train_data['X'], self.train_data['E'], \ self.train_data['T'], self.train_data['failures'], \ self.train_data['atrisk'], self.train_data['ties'] = utils.parse_data(X, label) # New Graph G = tf.Graph() with G.as_default(): # Data input X = tf.placeholder(tf.float32, [None, input_node], name = 'x-Input') y_ = tf.placeholder(tf.float32, [None, output_node], name = 'label-Input') # hidden layers self.nnweights = [] # collect weights of network prev_node = input_node prev_x = X for i in range(len(hidden_layers_node)): layer_name = 'layer' + str(i+1) with tf.variable_scope(layer_name, reuse=tf.AUTO_REUSE): weights = tf.get_variable('weights', [prev_node, hidden_layers_node[i]], initializer=tf.truncated_normal_initializer(stddev=0.1)) self.nnweights.append(weights)
tensorflow.placeholder
13,986
import tensorflow as tf Raises ------ ValueError If input tensor is not 2D. """ if weight_init is None: num_features = tensor.get_shape()[-1].value weight_init = tf.truncated_normal([num_features, size], stddev=0.01) if bias_init is None: bias_init = tf.zeros([size]) with tf.name_scope(name, 'fully_connected', [tensor]): w = tf.Variable(weight_init, name='w', dtype=tf.float32) b = tf.Variable(bias_init, name='b', dtype=tf.float32)
tensorflow.truncated_normal
13,987
import tensorflow as tf enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32) attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size])
tensorflow.reshape
13,988
import tensorflow as tf train_dataset_reader = dataset.BatchDatset(train_records, image_options) validation_dataset_reader = dataset.BatchDatset(valid_records, image_options) sess = tf.Session() print("Setting up Saver...") saver = tf.train.Saver() # create two summary writers to show training loss and validation loss in the same graph # need to create two folders 'train' and 'validation' inside FLAGS.logs_dir train_writer = tf.summary.FileWriter(FLAGS.logs_dir + '/train', sess.graph) validation_writer = tf.summary.FileWriter(FLAGS.logs_dir + '/validation') sess.run(tf.global_variables_initializer()) ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) print("Model restored...") if FLAGS.mode == "train": for itr in xrange(MAX_ITERATION): train_images, train_annotations = train_dataset_reader.next_batch(FLAGS.batch_size) z_ = np.random.uniform(low=-1.0, high=1.0, size=(FLAGS.batch_size,4,4,128)) # print(train_images) feed_dict = {image: train_images, annotation: train_annotations, keep_probability: 0.85, z: z_}
tensorflow.global_variables_initializer
13,989
import tensorflow as tf return bias def conv_bn_relu(self, bottom,name, kernel_size, output_channels, initializer,stride=1, bn=False,training=False,relu=True): input_channels = bottom.get_shape().as_list()[-1] with tf.variable_scope(name) as scope: kernel = self.variable('weights', [kernel_size, kernel_size, input_channels, output_channels], initializer, regularizer=tf.contrib.layers.l2_regularizer(0.0005)) conv = tf.nn.conv2d(bottom, kernel, [1, stride, stride, 1], padding='SAME') biases = self.variable('biases', [output_channels], tf.constant_initializer(0.0)) conv_layer = tf.nn.bias_add(conv, biases)
tensorflow.variable_scope
13,990
import tensorflow as tf _, axs = plt.subplots(labeled_translations.shape[0], 5) fig_obj_count = 0 for class_id in range(self.max_num_classes): # Do the same for the ground truth and predictions sdf_values = tf.zeros_like(samples_world)[:, 0:1] for mtype, (classes, sdfs, poses) in enumerate([ (labeled_classes, labeled_sdfs, labeled_poses), (predicted_classes, predicted_sdfs, predicted_poses)]): for i in range(classes.shape[0]): if class_id == classes[i]: sdf = tf.expand_dims(sdfs[i], -1) sdf = sdf * -1.0 # inside positive, outside zero samples_object = centernet_utils.transform_pointcloud( tf.reshape(samples_world, [1, 1, -1, 3]), tf.reshape(poses[2][i], [1, 1, 3]), tf.reshape(poses[0][i], [1, 1, 3, 3]), tf.reshape(poses[1][i], [1, 1, 3]), inverse=True) * 2.0 samples_object = \ (samples_object * (29.0/32.0) / 2.0 + 0.5) * 32.0 - 0.5 samples = tf.squeeze(samples_object)
tensorflow.expand_dims
13,991
import tensorflow as tf indicator = tf.less(range_tiled, lengths_tiled+1) #i.e. where seq len is less than index trim = np.ones(indicator.get_shape()) trim[:,0] = 0 #ignore start symbol indicator = tf.logical_and(indicator, trim.astype(bool)) self.indicator = indicator sz = [batch_size, max_sequence_len] self._mask = tf.select(indicator, tf.ones(sz), tf.zeros(sz)) #-------------------------------# self.weights = tf.constant(weights, dtype=tf.float32, name='class_weights') hidden_size = model_params['model_hidden_size']
tensorflow.zeros
13,992
import tensorflow as tf def benchmarkEagerLinearRegression(self): num_epochs = 10 num_batches = 200 batch_size = 64 dataset = linear_regression.synthetic_dataset( w=tf.random_uniform([3, 1]), b=tf.random_uniform([1]), noise_level=0.01, batch_size=batch_size, num_batches=num_batches) burn_in_dataset = dataset.take(10)
tensorflow.random_uniform
13,993
import tensorflow as tf Returns: float Tensor of shape [batch_size, seq_length, embedding_size]. """ # This function assumes that the input is of shape [batch_size, seq_length, # num_inputs]. # # If the input is a 2D tensor of shape [batch_size, seq_length], we # reshape to [batch_size, seq_length, 1]. if input_ids.shape.ndims == 2: input_ids = tf.expand_dims(input_ids, axis=[-1]) embedding_table = tf.get_variable( name=word_embedding_name, shape=[vocab_size, embedding_size], initializer=create_initializer(initializer_range)) if use_one_hot_embeddings: flat_input_ids = tf.reshape(input_ids, [-1]) one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)
tensorflow.expand_dims
13,994
import tensorflow as tf use_tpu=FLAGS.use_tpu, bsz=FLAGS.predict_batch_size) checkpoint_path = os.path.join(FLAGS.output_dir, "model.ckpt-best") result = estimator.predict( input_fn=predict_input_fn, checkpoint_path=checkpoint_path) output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv") output_submit_file = os.path.join(FLAGS.output_dir, "submit_results.tsv") with tf.gfile.GFile(output_predict_file, "w") as pred_writer,\ tf.gfile.GFile(output_submit_file, "w") as sub_writer: sub_writer.write("index" + "\t" + "prediction\n") num_written_lines = 0 tf.logging.info("***** Predict results *****") for (i, (example, prediction)) in\ enumerate(zip(predict_examples, result)): probabilities = prediction["probabilities"] if i >= num_actual_predict_examples: break
tensorflow.gfile.GFile
13,995
import tensorflow as tf # dimension. These Tensors are implicitly concatenated to # [params['batch_size']]. global_step_t = tf.reshape(global_step, [1]) total_loss_t = tf.reshape(total_loss, [1]) total_rpn_loss_t = tf.reshape(total_rpn_loss, [1]) rpn_score_loss_t = tf.reshape(rpn_score_loss, [1]) rpn_box_loss_t = tf.reshape(rpn_box_loss, [1]) total_fast_rcnn_loss_t = tf.reshape(total_fast_rcnn_loss, [1]) fast_rcnn_class_loss_t = tf.reshape(fast_rcnn_class_loss, [1]) fast_rcnn_box_loss_t = tf.reshape(fast_rcnn_box_loss, [1]) mask_loss_t = tf.reshape(mask_loss, [1]) learning_rate_t = tf.reshape(learning_rate, [1]) host_call = (host_call_fn, [global_step_t, total_loss_t, total_rpn_loss_t,
tensorflow.reshape
13,996
import tensorflow as tf drop_remainder=True)) return d return input_fn def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.to_int32(t) example[name] = t return example def main(_): tf.logging.set_verbosity(tf.logging.INFO) if FLAGS.use_hvd: hvd.init() if FLAGS.reduce_log and (hvd.rank() != 0): tf.logging.set_verbosity(tf.logging.ERROR)
tensorflow.to_int32
13,997
import tensorflow as tf # Capture *.tgz and *.tar.gz too. mode = "r:gz" if compressed_filepath.endswith("gz") else "r" with tarfile.open(compressed_filepath, mode) as corpus_tar: corpus_tar.extractall(tmp_dir) filenames = tf.gfile.Glob(os.path.join(tmp_dir, glob_pattern)) for tsv_filename in filenames: if tsv_filename.endswith(".gz"): new_filename = tsv_filename.strip(".gz") generator_utils.gunzip_file(tsv_filename, new_filename) tsv_filename = new_filename with tf.gfile.Open(tsv_filename) as tsv_file: for line in tsv_file: if line and "\t" in line: parts = line.split("\t") source, target = parts[src_column], parts[trg_column] source, target = source.strip(), target.strip() clean_pairs = [(source, target)] if "tsv" in datatypes_to_clean: clean_pairs = cleaner_en_xx.clean_en_xx_pairs(clean_pairs) for source, target in clean_pairs:
tensorflow.gfile.Open
13,998
from tensorflow.python.framework import ops last = input_shape[-1].value if last is not None and k is not None and last < k: raise ValueError("input.shape %s must have last dimension >= k = %d" % (input_shape, k)) output_shape = input_shape[:-1].concatenate([k]) return [output_shape, output_shape] @ops.RegisterShape("BatchNormWithGlobalNormalization") def _BatchNormShape(op): """Shape function for BatchNormWithGlobalNormalization op.""" input_shape = op.inputs[0].get_shape().with_rank(4) mean_shape = op.inputs[1].get_shape().with_rank(1) var_shape = op.inputs[2].get_shape().with_rank(1) beta_shape = op.inputs[3].get_shape().with_rank(1) gamma_shape = op.inputs[4].get_shape().with_rank(1)
tensorflow.python.framework.ops.RegisterShape
13,999