seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf relu6 = tf.nn.relu(conv6, name="relu6") if FLAGS.debug: utils.add_activation_summary(relu6) relu_dropout6 = tf.nn.dropout(relu6, keep_prob=keep_prob) W7 = utils.weight_variable([1, 1, 4096, 4096], name="W7") b7 = utils.bias_variable([4096], name="b7") conv7 = utils.conv2d_basic(relu_dropout6, W7, b7) relu7 = tf.nn.relu(conv7, name="relu7") if FLAGS.debug: utils.add_activation_summary(relu7) relu_dropout7 = tf.nn.dropout(relu7, keep_prob=keep_prob) W8 = utils.weight_variable([1, 1, 4096, NUM_OF_CLASSESS], name="W8") b8 = utils.bias_variable([NUM_OF_CLASSESS], name="b8") conv8 = utils.conv2d_basic(relu_dropout7, W8, b8) # annotation_pred1 = tf.argmax(conv8, dimension=3, name="prediction1") # now to upscale to actual image size deconv_shape1 = image_net["pool4"].get_shape() W_t1 = utils.weight_variable([4, 4, deconv_shape1[3].value, NUM_OF_CLASSESS], name="W_t1") b_t1 = utils.bias_variable([deconv_shape1[3].value], name="b_t1")
tensorflow.nn.dropout
14,000
import tensorflow as tf file. Defaults to binary format unless filename ends in 'txt'.""") tf.flags.DEFINE_string('optimizer', 'sgd', 'Optimizer to use: momentum or sgd or rmsprop') tf.flags.DEFINE_float('learning_rate', None, """Initial learning rate for training.""") tf.flags.DEFINE_float('num_epochs_per_decay', 0, """Steps after which learning rate decays.""") tf.flags.DEFINE_float('learning_rate_decay_factor', 0.94, """Learning rate decay factor.""") tf.flags.DEFINE_float('momentum', 0.9, """Momentum for training.""") tf.flags.DEFINE_float('rmsprop_decay', 0.9, """Decay term for RMSProp.""") tf.flags.DEFINE_float('rmsprop_momentum', 0.9, """Momentum in RMSProp.""") tf.flags.DEFINE_float('rmsprop_epsilon', 1.0, """Epsilon term for RMSProp.""") tf.flags.DEFINE_float('gradient_clip', None, """Gradient clipping magnitude. Disabled by default.""") tf.flags.DEFINE_float('weight_decay', 0.00004, """Weight decay factor for training.""") # Performance tuning flags. tf.flags.DEFINE_boolean('winograd_nonfused', True, """Enable/disable using the Winograd non-fused algorithms.""") tf.flags.DEFINE_boolean('sync_on_finish', False,
tensorflow.flags.DEFINE_float
14,001
from tensorflow.python.ops import math_ops check_ops.assert_type(values, dtypes.bool) count = _create_local('count', shape=[]) values = math_ops.to_float(values) if weights is not None: weights = math_ops.to_float(weights) values = math_ops.mul(values, weights) value_tensor = array_ops.identity(count) update_op = state_ops.assign_add(count, math_ops.reduce_sum(values)) if metrics_collections:
tensorflow.python.ops.math_ops.mul
14,002
import tensorflow as tf out_layer = VariationalDense(n_in=layer_sizes[-2], n_out=layer_sizes[-1], dropout_mask_ph=dropout_mask_phs[-1], model_prob=1.0-dropout_rate, model_lam=3e-4, activation=output_activation, name="Out") x = out_layer(x) regularization += out_layer.regularization return x, regularization def mlp_dropout(x, hidden_sizes=(32,), activation=tf.tanh, output_activation=None, dropout_rate=0): for h in hidden_sizes[:-1]: x = tf.layers.dense(x, units=h, activation=activation) x = tf.layers.dropout(x, rate=dropout_rate, training=True) x = tf.layers.dropout(x, rate=dropout_rate, training=True) return tf.layers.dense(x, units=hidden_sizes[-1], activation=output_activation) def mlp(x, hidden_sizes=(32,), activation=tf.tanh, output_activation=None): for h in hidden_sizes[:-1]: x = tf.layers.dense(x, units=h, activation=activation) return tf.layers.dense(x, units=hidden_sizes[-1], activation=output_activation) def get_vars(scope): return [x for x in tf.global_variables() if scope in x.name] def count_vars(scope): v = get_vars(scope) return sum([np.prod(var.shape.as_list()) for var in v])
tensorflow.layers.dropout
14,003
import tensorflow as tf use_spectral_norm, is_training, None, conv_dims=2) with tf.variable_scope("g"): g = convolution( inputs, num_outputs // 8,
tensorflow.variable_scope
14,004
import tensorflow as tf logits = generate(output, input_, context) pos = tf.expand_dims(pos, axis=1) state = tf.concat([state, context, pos, new_weights], axis=1) return state, logits
tensorflow.concat
14,005
import tensorflow as tf A scalar tensor containing the weight decay cost. Raises: NotImplementedError: If an unsupported penalty type is requested. """ variables = [] # exclude bias variables for v in tf.trainable_variables(): if v.get_shape().ndims == 2: variables.append(v) with tf.name_scope('weight_decay'): if penalty_type == 'l1': cost = tf.add_n([tf.reduce_sum(tf.abs(v)) for v in variables]) elif penalty_type == 'l2': cost = tf.add_n([tf.nn.l2_loss(v) for v in variables]) else: raise NotImplementedError('Unsupported penalty_type %s' % penalty_type) cost *= penalty #tf.scalar_summary('Weight Decay Cost', cost) return cost def multitask_logits(features, num_tasks, num_classes=2, weight_init=None, bias_init=None, dropout_prob=None, name=None):
tensorflow.nn.l2_loss
14,006
from tensorflow.python.ops import variable_scope @contextlib.contextmanager def as_default(self): yield def create_eager_var_store(): if context.in_eager_mode(): return variable_scope.EagerVariableStore() else: return DummyVariableStore() def scheduled_sampling(hparams, problem_hparams, dp, sharded_logits, losses, sharded_features, transformed_features, model): """Scheduled sampling."""
tensorflow.python.ops.variable_scope.EagerVariableStore
14,007
import tensorflow as tf self.conv3 = self._residual_block('conv3_2', self.conv3, 128) _debug(self.conv3) with tf.variable_scope('conv4_x'): self.conv4 = self._residual_block('conv4_1', self.conv3, 256, pool_first=True, strides=2) _debug(self.conv4)
tensorflow.variable_scope
14,008
import tensorflow as tf labels=label, initializer=xlnet_model.get_initializer(), scope="regression_{}".format(FLAGS.task_name.lower()), return_logits=True) total_loss = tf.reduce_mean(per_example_loss) return total_loss, per_example_loss, logits
tensorflow.reduce_mean
14,009
import tensorflow as tf state = state[:,-cell_output_size:] projection_input = [state, context] if decoder.use_previous_word: projection_input.insert(1, input_) # for back-compatibility output_ = tf.concat(projection_input, axis=1) if decoder.pred_deep_layer: deep_layer_size = decoder.pred_deep_layer_size or decoder.embedding_size if decoder.layer_norm: output_ = dense(output_, deep_layer_size, use_bias=False, name='deep_output') output_ = tf.contrib.layers.layer_norm(output_, activation_fn=tf.nn.tanh, scope='output_layer_norm') else: output_ = dense(output_, deep_layer_size, activation=tf.tanh, use_bias=True, name='deep_output') if decoder.use_dropout: size = tf.shape(output_)[1] noise_shape = [1, size] if decoder.pervasive_dropout else None output_ = tf.nn.dropout(output_, keep_prob=decoder.deep_layer_keep_prob, noise_shape=noise_shape) else: if decoder.pred_maxout_layer: maxout_size = decoder.maxout_size or cell_output_size output_ = dense(output_, maxout_size, use_bias=True, name='maxout')
tensorflow.contrib.layers.layer_norm
14,010
import tensorflow as tf except AttributeError: deconv = tf.nn.deconv2d(input_, w, output_shape=output_shape,
tensorflow.nn.deconv2d
14,011
import tensorflow as tf config.gpu_options.per_process_gpu_memory_fraction = 0.5 # Placeholders self.sess = tf.Session(config=config) self.s_dim, self.a_dim = env.observation_space.shape, env.action_space.shape[0] self.a_bound = (env.action_space.high - env.action_space.low) / 2 self.actions = tf.placeholder(tf.float32, [None, self.a_dim], 'action') self.state = tf.placeholder(tf.float32, [None, self.s_dim[0]], 'state') self.advantage = tf.placeholder(tf.float32, [None, 1], 'advantage') self.rewards = tf.placeholder(tf.float32, [None, 1], 'rewards') self.keep_prob = tf.placeholder(tf.float32, name='dropout_keep_prob') # Dateset with experiennce replay self.dataset = tf.data.Dataset.from_tensor_slices({'state': self.state, 'actions': self.actions,
tensorflow.placeholder
14,012
import tensorflow as tf fname = os.path.join(tf.resource_loader.get_data_files_path(), 'samples/configs/' + model_name + '.config') label_map_path = os.path.join(tf.resource_loader.get_data_files_path(), 'data/pet_label_map.pbtxt') data_path = os.path.join(tf.resource_loader.get_data_files_path(), 'test_data/pets_examples.record') configs = config_util.get_configs_from_pipeline_file(fname) override_dict = {
tensorflow.resource_loader.get_data_files_path
14,013
from tensorflow.python.framework import tensor_shape tensor_dtype = inputs[0].dtype if shape is not None: shape = tensor_shape.as_shape(shape) else: shape = tensor_shape.unknown_shape() for input_tensor in inputs: if isinstance(input_tensor, ops.Tensor): shape = shape.merge_with(input_tensor.get_shape())
tensorflow.python.framework.tensor_shape.unknown_shape
14,014
from tensorflow.python.training import server_lib def _setupCluster(self): def get_open_port(): try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) except IOError: s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) s.bind(("", 0)) port = s.getsockname()[1] s.close() return port port1 = get_open_port() port2 = get_open_port() cs = server_lib.ClusterSpec({ "worker": ["localhost:%s" % port1], "ps": ["localhost:%s" % port2] }) worker = server_lib.Server(cs, job_name="worker", start=True) ps = server_lib.Server(cs, job_name="ps", start=True) return worker, ps @contextlib.contextmanager def _maybeWithDevice(self, device): if device is not None: with ops.device(device):
tensorflow.python.training.server_lib.ClusterSpec
14,015
import tensorflow as tf if w_init is None: w_init = tf.contrib.layers.variance_scaling_initializer()
tensorflow.contrib.layers.variance_scaling_initializer
14,016
from tensorflow.contrib.slim.python.slim.data import tfexample_decoder 'image/class/label': parsing_ops.FixedLenFeature( shape=[1], dtype=dtypes.int64, default_value=array_ops.zeros( [1], dtype=dtypes.int64)) } items_to_handlers = { 'image': tfexample_decoder.Image(), 'label': tfexample_decoder.Tensor('image/class/label'), } decoder = tfexample_decoder.TFExampleDecoder(keys_to_features, items_to_handlers) return dataset.Dataset( data_sources=data_sources, reader=io_ops.TFRecordReader, decoder=decoder, num_samples=100, items_to_descriptions=None) class DatasetDataProviderTest(test.TestCase):
tensorflow.contrib.slim.python.slim.data.tfexample_decoder.TFExampleDecoder
14,017
import tensorflow as tf encoded = tokenizer.texts_to_sequences([text])[0] word2idx = tokenizer.word_index idx2word = tokenizer.index_word BATCH_SIZE = 256 embedding_dim = 100 units = 512 vocab_size = len(tokenizer.word_index) + 1 model = MyModel(vocab_size, embedding_dim, units, BATCH_SIZE) optimizer = tf.optimizers.Adam() checkpoint_dir = "./models/new_out" checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt") checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model) checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir)).expect_partial() @app.route("/summary", methods=["POST"]) @cross_origin(headers=['Content-Type']) def summary(): res = requests.post("https://turkcemetinozetleme.teaddict.net/ozetle/api/new", data={ "contextOfText":request.data.decode() }, headers={ "content-type": "application/x-www-form-urlencoded; charset=UTF-8;" }) print(res.text) response = app.response_class(
tensorflow.train.Checkpoint
14,018
import tensorflow as tf with tf.Graph().as_default():
tensorflow.Graph
14,019
import tensorflow as tf # Place holder for features and captions self.features = tf.placeholder(tf.float32, [None, self.L, self.D]) self.captions = tf.placeholder(tf.int32, [None, self.T + 1]) def _get_initial_lstm(self, features):
tensorflow.placeholder
14,020
import tensorflow as tf for idx, x in enumerate(xs): c = c h = h z = tf.matmul(x, wx) + tf.matmul(h, wh) + b i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z) i = tf.nn.sigmoid(i) f = tf.nn.sigmoid(f) o = tf.nn.sigmoid(o) u = tf.tanh(u) c = f*c + i*u
tensorflow.nn.sigmoid
14,021
import tensorflow as tf neg_log_lhoods = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=targets) if target_weight_strategy == 'rect': avg_neg_log_lhood = tf.reduce_mean(neg_log_lhoods) else: neg_log_lhoods = tf.multiply(neg_log_lhoods, target_weights) # be careful to have at least one weight be nonzero # should we be taking the mean elem-wise by batch? i think this is a big bug avg_neg_log_lhood = tf.reduce_sum(neg_log_lhoods) / tf.reduce_sum(target_weights) neg_log_lhoods_inspect = tf.reshape(neg_log_lhoods, [batch_size, rnn_nunroll]) # Train op if mode == 'train': lr = tf.Variable(0.0, trainable=False) self._lr = lr self._lr_summary = tf.summary.scalar('learning_rate', self._lr) tvars = tf.trainable_variables() grads = tf.gradients(avg_neg_log_lhood, tvars) if grad_clip > 0.0: grads, _ = tf.clip_by_global_norm(grads, grad_clip) if opt == 'sgd': optimizer = tf.train.GradientDescentOptimizer(lr) else: raise NotImplementedError() train_op = optimizer.apply_gradients(zip(grads, tvars), global_step=tf.contrib.framework.get_or_create_global_step())
tensorflow.summary.scalar
14,022
import tensorflow as tf logits_max = tf.reduce_max(sims_logits,1) sims_logits = sims_logits - tf.reshape(logits_max, [-1, 1]) sims_probs = tf.nn.softmax(sims_logits) sim_labels = tf.constant(np.arange(bs * 2, dtype=np.int32)) sims_onehot = tf.one_hot(sim_labels, bs * 2) c_real_loss = - tf.reduce_mean( tf.reduce_sum(sims_onehot * tf.log(sims_probs + 1e-10), 1))
tensorflow.one_hot
14,023
import tensorflow as tf will be used. Returns ------- list(tf.Variable) List of uninitialized tf variables. """ sess = tf.get_default_session() if variables is None: variables = tf.global_variables() else: variables = list(variables) if len(variables) == 0:
tensorflow.get_default_session
14,024
import tensorflow as tf self.char_mat = tf.concat([self.char_pad_unk_mat, self.pretrained_char_mat], axis=0) else: self.word_mat = tf.get_variable( 'word_embeddings', shape=[self.vocab.word_size(), self.vocab.word_embed_dim], initializer=tf.constant_initializer(self.vocab.word_embeddings), trainable=True ) self.char_mat = tf.get_variable( 'char_embeddings', shape=[self.vocab.char_size(), self.vocab.char_embed_dim], initializer=tf.constant_initializer(self.vocab.char_embeddings), trainable=True ) self.ch_len = tf.reshape(tf.reduce_sum( tf.cast(tf.cast(self.ch, tf.bool), tf.int32), axis=2), [-1]) self.qh_len = tf.reshape(tf.reduce_sum( tf.cast(tf.cast(self.qh, tf.bool), tf.int32), axis=2), [-1]) N, PL, QL, CL, d, dc, nh = self._params() if self.config.fix_pretrained_vector: dc = self.char_mat.get_shape()[-1]
tensorflow.constant_initializer
14,025
import tensorflow as tf if policy == 'linear': hid_size = num_hid_layers = 0 use_bias = False elif policy == 'simple-nn': hid_size = [16] num_hid_layers = 1 use_bias = True elif policy == 'nn': hid_size = [100, 50, 25] num_hid_layers = 3 use_bias = True if policy_init == 'xavier': policy_initializer = tf.contrib.layers.xavier_initializer() elif policy_init == 'zeros': policy_initializer = U.normc_initializer(0.0) elif policy_init == 'small-weights': policy_initializer = U.normc_initializer(0.1) else: raise Exception('Unrecognized policy initializer.') if policy == 'linear' or policy == 'nn' or policy == 'simple-nn': def make_policy(name, ob_space, ac_space): return MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space, hid_size=hid_size, num_hid_layers=num_hid_layers, gaussian_fixed_var=True, use_bias=use_bias, use_critic=False, hidden_W_init=policy_initializer, output_W_init=policy_initializer) elif policy == 'cnn':
tensorflow.contrib.layers.xavier_initializer
14,026
import tensorflow as tf # Compute sparse softmax cross entropy loss from logits & labels log_probs = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=classes) loss = tf.reduce_mean(log_probs) self._mark_for_monitoring('loss', loss) # Add regularization loss reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) reg_loss = reg_decay * tf.add_n(reg_losses) self._mark_for_monitoring('reg_loss', reg_loss) # Add loss from auxiliary logits aux_loss = tf.constant(0, dtype=tf.float32) for aux_logits in aux_logits_list: log_probs = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=aux_logits, labels=classes)
tensorflow.add_n
14,027
import tensorflow as tf w = tf.get_variable("proj_w", [24, classes]) w_t = tf.transpose(w)
tensorflow.transpose
14,028
import tensorflow as tf with tf.variable_scope(name): # change the channel to the caffe format # 18个通道[,18,none,none],分别显示得分,前9个为前景得分,后9个为背景得分 # 第二次[1,2,none,none] to_caffe = tf.transpose(bottom, [0, 3, 1, 2]) # then force it to have channel 2 #[1,2,none.none],将9个anchor的前景得分和背景得分分开 # 第二次[1,18,none,none] reshaped = tf.reshape(to_caffe, tf.concat(axis=0, values=[[self._batch_size], [num_dim, -1], [input_shape[2]]])) # then swap the channel back # [1,none,none,2], 第一个none应该为(行*9) # 第二次[1,none,none,18] to_tf = tf.transpose(reshaped, [0, 2, 3, 1]) return to_tf def _softmax_layer(self, bottom, name): if name == 'rpn_cls_prob_reshape': input_shape = tf.shape(bottom) # tf.reshape()中-1的应用,-1表示不知道该填什么数字合适的情况下,可以选择,由python通过原数组和其他的值推测出来 # 每一行是1个anchor的前景、背景得分,先显示所有点产生的第一种anchor,然后是所有点产生的第二种anchor,........ bottom_reshaped = tf.reshape(bottom, [-1, input_shape[-1]]) reshaped_score = tf.nn.softmax(bottom_reshaped, name=name) return tf.reshape(reshaped_score, input_shape) # [1,none,none,2]
tensorflow.transpose
14,029
import tensorflow as tf protos for object detection. """ import tensorflow as tf class TfExampleDecoder(object): """Tensorflow Example proto decoder.""" def __init__(self, include_mask=False): self._include_mask = include_mask self._keys_to_features = { 'image/encoded': tf.io.FixedLenFeature((), tf.string), 'image/source_id': tf.io.FixedLenFeature((), tf.string), 'image/height': tf.io.FixedLenFeature((), tf.int64), 'image/width': tf.io.FixedLenFeature((), tf.int64), 'image/object/bbox/xmin': tf.io.VarLenFeature(tf.float32), 'image/object/bbox/xmax': tf.io.VarLenFeature(tf.float32), 'image/object/bbox/ymin': tf.io.VarLenFeature(tf.float32), 'image/object/bbox/ymax': tf.io.VarLenFeature(tf.float32), 'image/object/class/label':
tensorflow.io.FixedLenFeature
14,030
import tensorflow as tf inputs_list[i][2], inputs_list[i][3]], Tout=[tf.float32, tf.float32]) gtboxes_and_label_h = tf.reshape(gtboxes_and_label_h, [cfgs.BATCH_SIZE, -1, 5]) # Unnecessary, if you have already sorted when making tfrecord and no data augmentation. gtboxes_and_label_q = tf.py_func(func=re_order, inp=[tf.reshape(gtboxes_and_label_q, [-1, 9]), True], Tout=[tf.float32]) gtboxes_and_label_q = tf.reshape(gtboxes_and_label_q, [cfgs.BATCH_SIZE, -1, 9]) img = inputs_list[i][0] img_shape = inputs_list[i][-2:] h_crop = tf.reduce_max(img_shape[0]) w_crop = tf.reduce_max(img_shape[1]) img = tf.image.crop_to_bounding_box(image=img, offset_height=0, offset_width=0,
tensorflow.reshape
14,031
import tensorflow as tf test_dir = self._TestDir("good_collection") filename = os.path.join(test_dir, "metafile") with self.test_session(): # Creates a graph. v0 = tf.Variable(10.0, name="v0") var = tf.Variable(tf.constant(0, dtype=tf.int64)) count_up_to = var.count_up_to(3) input_queue = tf.FIFOQueue(30, tf.float32, shared_name="collection_queue") qr = tf.train.QueueRunner(input_queue, [count_up_to]) tf.initialize_all_variables() # Creates a saver. save = tf.train.Saver({"v0": v0}) # Adds a set of collections. tf.add_to_collection("int_collection", 3) tf.add_to_collection("float_collection", 3.5) tf.add_to_collection("string_collection", "hello") tf.add_to_collection("variable_collection", v0) # Add QueueRunners. tf.train.add_queue_runner(qr) # Adds user_defined proto in three formats: string, bytes and Any. queue_runner = queue_runner_pb2.QueueRunnerDef(queue_name="test_queue") tf.add_to_collection("user_defined_string_collection", str(queue_runner)) tf.add_to_collection("user_defined_bytes_collection", queue_runner.SerializeToString()) any_buf = Any() any_buf.Pack(queue_runner) tf.add_to_collection("user_defined_any_collection", any_buf) # Generates MetaGraphDef. meta_graph_def = save.export_meta_graph(filename)
tensorflow.add_to_collection
14,032
import tensorflow as tf output_bias = tf.get_variable( "output_bias",[], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.reduce_sum(tf.multiply(output_layer,output_weights),-1) logits = tf.add(logits, output_bias) probabilities=tf.sigmoid(logits) # labels=tf.constant(labels,dtype=tf.int32) per_example_loss=tf.losses.sigmoid_cross_entropy(multi_class_labels=labels, logits=logits,reduction=Reduction.NONE) per_example_loss=tf.reduce_sum(per_example_loss,axis=-1) loss = tf.reduce_mean(per_example_loss,name='train_loss') return (loss, per_example_loss, logits, probabilities) def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator."""
tensorflow.losses.sigmoid_cross_entropy
14,033
import tensorflow as tf [slim.model_variable, slim.variable], device='/device:CPU:0'): with slim.arg_scope([slim.conv2d, slim.conv2d_in_plane, slim.conv2d_transpose, slim.separable_conv2d, slim.fully_connected], weights_regularizer=weights_regularizer, biases_regularizer=biases_regularizer, biases_initializer=tf.constant_initializer(0.0)): gtboxes_and_label_h, gtboxes_and_label_q = tf.py_func(self.get_gtboxes_and_label, inp=[inputs_list[i][1], inputs_list[i][2], inputs_list[i][3]], Tout=[tf.float32, tf.float32]) gtboxes_and_label_h = tf.reshape(gtboxes_and_label_h, [cfgs.BATCH_SIZE, -1, 5]) # Unnecessary, if you have already sorted when making tfrecord and no data augmentation. gtboxes_and_label_q = tf.py_func(func=re_order,
tensorflow.py_func
14,034
import tensorflow as tf """Takes in an array of states and calculates predictions. Get the cross-entropy for each example in the vector self._xent. Args: in_size: size of the hidden state vectors mats: list of hidden state vectors """ pred_mat = tf.get_variable('pred_mat', [in_size, self._out_vocab_size]) pred_bias = tf.get_variable('pred_bias', [self._out_vocab_size]) # Make a prediction on every word. def GetWordPred(o_): logits = tf.nn.xw_plus_b(o_, pred_mat, pred_bias) return tf.nn.softmax(logits) #self.preds_by_word1 = tf.pack([GetWordPred(o_) for o_ in mats]) #self.preds_by_word = tf.reshape(self.preds_by_word1, self.y.get_shape()) #self.probs = tf.mul(tf.expand_dims(self._mask,2), self.preds_by_word)
tensorflow.get_variable
14,035
import tensorflow as tf dual_variable: The underlying variable itself. """ # We disable partitioning while constructing dual variables because they will # be updated with assign, which is not available for partitioned variables. partitioner = tf.get_variable_scope().partitioner try: tf.get_variable_scope().set_partitioner(None) dual_variable = tf.contrib.framework.model_variable( name=name, shape=shape, dtype=dtype, initializer=initializer, collections=collections, trainable=trainable)
tensorflow.contrib.framework.model_variable
14,036
import tensorflow as tf valid_image_batch,valid_label_batch=get_valid_batch(valid_image,valid_label,validnum) valid_inf=work.valid_inference(valid_image_batch) valid_labels=tf.one_hot(valid_label_batch,classnum) #train_step=tf.train.GradientDescentOptimizer(0.001).minimize(cross_entropy) valid_pre = tf.reshape(valid_inf, [validnum, classnum])
tensorflow.one_hot
14,037
from tensorflow.contrib.eager.python.examples.spinn import data f.write("\n") return fake_train_file def testInferSpinnWorks(self): """Test inference with the spinn model.""" snli_1_0_dir = os.path.join(self._temp_data_dir, "snli/snli_1.0") self._create_test_data(snli_1_0_dir) vocab = data.load_vocabulary(self._temp_data_dir) word2index, embed = data.load_word_vectors(self._temp_data_dir, vocab) config = _test_spinn_config( data.WORD_VECTOR_LEN, 4, logdir=os.path.join(self._temp_data_dir, "logdir"), inference_sentences=("( foo ( bar . ) )", "( bar ( foo . ) )")) logits = spinn.train_or_infer_spinn( embed, word2index, None, None, None, config) self.assertEqual(tf.float32, logits.dtype) self.assertEqual((3,), logits.shape)
tensorflow.contrib.eager.python.examples.spinn.data.load_word_vectors
14,038
from tensorflow.python.client import timeline pred_boxes = pred_boxes[inv_index, :] if cfg.TEST.DEBUG_TIMELINE: trace = timeline.Timeline(step_stats=run_metadata.step_stats) trace_file = open(str(int(time.time() * 1000)) + '-test-timeline.ctf.json', 'w') trace_file.write(trace.generate_chrome_trace_format(show_memory=False))
tensorflow.python.client.timeline.Timeline
14,039
import tensorflow as tf self.assertEqual(y.shape, [self.config.batch_size, self.config.n_classes]) def _check_grad_angle_combined(self, grads, grads_true): """Verify that the reconstructed gradients has correct direction. Due to numerical imprecision, the magnitude may be slightly different. Yet according to the paper, the angle should be roughly the same. Args: grads: list of gradients from reconstruction grads_true: list of true gradients """ def _combine(gs): return [tf.reshape(g, [-1]) for g in gs] g1_all = tf.concat(_combine(grads), axis=0) g2_all = tf.concat(_combine(grads_true), axis=0) self.assertEqual(len(g1_all.shape), 1) self.assertEqual(len(g2_all.shape), 1) degree = blocks_test.compute_degree(g1_all, g2_all) self.assertLessEqual(degree, 1e0) def test_compute_gradients(self): """Test `compute_gradients` function.""" _, saved_hidden = self.model(self.x) # Initialize model
tensorflow.reshape
14,040
import tensorflow as tf def serving_input_fn(): label_ids = tf.placeholder(tf.int32, [None], name='label_ids') input_ids = tf.placeholder(tf.int32, [None, FLAGS.max_seq_length], name='input_ids') input_mask = tf.placeholder(tf.int32, [None, FLAGS.max_seq_length], name='input_mask') segment_ids = tf.placeholder(tf.int32, [None, FLAGS.max_seq_length], name='segment_ids') input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn({ 'label_ids': label_ids,
tensorflow.placeholder
14,041
import tensorflow as tf dtype=tf.string, ) images = tf.map_fn( _preprocess_image, image_bytes_list, back_prop=False, dtype=dtype)
tensorflow.map_fn
14,042
import tensorflow as tf with tf.variable_scope('project_features'): w = tf.get_variable('w', [self.D, self.D], initializer=self.weight_initializer)
tensorflow.get_variable
14,043
import tensorflow as tf output = tf.matmul(scores, facts) # [B, 1, H] # output = tf.reshape(output, [-1, tf.shape(facts)[-1]]) else: scores = tf.reshape(scores, [-1, tf.shape(facts)[1]]) output = facts * tf.expand_dims(scores, -1) output = tf.reshape(output, tf.shape(facts)) return output def din_fcn_attention(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False, forCnn=False): if isinstance(facts, tuple):
tensorflow.shape
14,044
import tensorflow as tf mult_result = tf.matmul(vector_batch_as_matricies, matrix) squeezed_result = tf.squeeze(mult_result, [1]) return squeezed_result def euclidean_loss_layer(a, b, multiplier=100.0, use_l1=False, eps=0.01): """ Math: out = (action - mlp_out)'*precision*(action-mlp_out) = (u-uhat)'*A*(u-uhat)""" multiplier = tf.constant(multiplier, dtype='float') #for bc #10000 uP =a*multiplier-b*multiplier if use_l1: return tf.reduce_mean(eps*tf.square(uP) + tf.abs(uP)) return tf.reduce_mean(tf.square(uP)) def conv2d(img, w, b, strides=[1, 1, 1, 1], is_dilated=False): if is_dilated: layer = tf.nn.atrous_conv2d(img, w, rate=2, padding='SAME') + b else: layer = tf.nn.conv2d(img, w, strides=strides, padding='SAME') + b return layer
tensorflow.abs
14,045
import tensorflow.contrib as contrib if cross_stitch_enabled: with tf.variable_scope("cross_stitch_2"): stitch2_1, stitch2_2 = apply_cross_stitch(fc2_1, fc2_2) else: stitch2_1, stitch2_2 = fc2_1, fc2_2 dropout2_1 = contrib.layers.dropout(stitch2_1, keep_prob=keep_prob, is_training=is_training, scope="dropout2_1") dropout2_2 = contrib.layers.dropout(stitch2_2, keep_prob=keep_prob, is_training=is_training, scope="dropout2_2") fc3_1 = contrib.layers.fully_connected(dropout2_1, 32, scope="fc3_1") fc3_2 = contrib.layers.fully_connected(dropout2_2, 32, scope="fc3_2") if cross_stitch_enabled: with tf.variable_scope("cross_stitch_3"): stitch3_1, stitch3_2 = apply_cross_stitch(fc3_1, fc3_2) else: stitch3_1, stitch3_2 = fc3_1, fc3_2 dropout3_1 = contrib.layers.dropout(stitch3_1, keep_prob=keep_prob, is_training=is_training, scope="dropout3_1")
tensorflow.contrib.layers.fully_connected
14,046
import tensorflow as tf tf.flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string( "gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.")
tensorflow.flags.DEFINE_string
14,047
import tensorflow as tf with self.test_session(use_gpu=use_gpu): p = tf.Variable(x) assign = tf.assign(p, y) p.initializer.run() new_value = assign.eval() return p.eval(), new_value def _initAssignAddFetch(self, x, y, use_gpu=False): """Initialize a param to init, and compute param += y.""" with self.test_session(use_gpu=use_gpu): p = tf.Variable(x) add = tf.assign_add(p, y) p.initializer.run() new_value = add.eval() return p.eval(), new_value def _initAssignSubFetch(self, x, y, use_gpu=False): """Initialize a param to init, and compute param -= y.""" with self.test_session(use_gpu=use_gpu): p = tf.Variable(x)
tensorflow.Variable
14,048
import tensorflow as tf output = model.build_server_graph(FLAGS, input_image) output = (output + 1.) * 127.5 output = tf.reverse(output, [-1]) output = tf.saturate_cast(output, tf.uint8) # load pretrained model vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) assign_ops = [] for var in vars_list: vname = var.name from_name = vname var_value = tf.contrib.framework.load_variable(checkpoint_dir, from_name) assign_ops.append(tf.assign(var, var_value)) sess.run(assign_ops) result = sess.run(output) tf.reset_default_graph() return result[0][:, :, ::-1]
tensorflow.reset_default_graph
14,049
import tensorflow as tf import tensorflow as tf import numpy as np class Model(object): def __init__(self, vars): self.saver = tf.train.Saver(vars) def session(self, sess): if sess is not None: self.sess = sess else: config_proto = tf.ConfigProto() config_proto.gpu_options.allow_growth = True self.sess = tf.Session(config=config_proto) def initialize(self): self.sess.run(tf.global_variables_initializer()) def save(self, path): self.saver.save(self.sess, path) def restore(self, path): self.saver.restore(self.sess, path) def close(self): self.sess.close()
tensorflow.Session
14,050
import tensorflow as tf return inputs, feat # image_size = 32, img_channels = 3, class_num = 10 in cifar10 x = tf.placeholder(tf.float32, shape=[None, image_size, image_size, img_channels]) label = tf.placeholder(tf.float32, shape=[None,]) one_hot_labels = tf.one_hot(indices=tf.cast(label, tf.int32), depth=class_num) training_flag = tf.placeholder(tf.bool) learning_rate = tf.placeholder(tf.float32, name='learning_rate')
tensorflow.cast
14,051
import tensorflow as tf # Compute examination loss self.relevance_weights = self.get_normalized_weights(self.logits_to_prob(train_output)) self.exam_loss = self.loss_func(self.propensity, reshaped_train_labels, self.relevance_weights) rw_list = tf.unstack(self.relevance_weights, axis=1) # Compute propensity weights for i in range(len(rw_list)): tf.summary.scalar('Relevance weights %d' % i, tf.reduce_mean(rw_list[i]), collections=['train']) tf.summary.scalar('Exam Loss', tf.reduce_mean(self.exam_loss), collections=['train']) # Gradients and SGD update operation for training the model. self.loss = self.exam_loss + self.hparams.ranker_loss_weight * self.rank_loss # Select optimizer self.optimizer_func = tf.train.AdagradOptimizer
tensorflow.reduce_mean
14,052
import tensorflow as tf Operation. """ with tf.name_scope('environment/simulate'): if action.dtype in (tf.float16, tf.float32, tf.float64): action = tf.check_numerics(action, 'action') observ_dtype = self._parse_dtype(self._batch_env.observation_space) observ, reward, done = tf.py_func( lambda a: self._batch_env.step(a)[:3], [action],
tensorflow.check_numerics
14,053
import tensorflow as tf def average_gradients(tower_grads): average_grads = [] for grad_and_vars in zip(*tower_grads): grads = [] for g, _ in grad_and_vars: expanded_g = tf.expand_dims(g, 0) grads.append(expanded_g) grad = tf.concat(grads, 0) grad = tf.reduce_mean(grad, 0) v = grad_and_vars[0][1]
tensorflow.expand_dims
14,054
import tensorflow as tf import tensorflow as tf import numpy as np import math # weights initializers he_normal = tf.contrib.keras.initializers.he_normal() #he_normal = tf.contrib.layers.variance_scaling_initializer() regularizer = tf.contrib.layers.l2_regularizer(1e-4) def Convolutional_Block(inputs, shortcut, num_filters, name, is_training):
tensorflow.contrib.keras.initializers.he_normal
14,055
import tensorflow as tf # Set default vals decoded = tf.where(tf.equal(decoded, 0), def_val, decoded)
tensorflow.equal
14,056
import tensorflow as tf D_int = tf.cast(D, tf.int32) N_int = tf.cast(N, tf.int32) if y is None: y = silverman_rule_of_thumb(N) YDistr = tf.contrib.distributions.MultivariateNormalDiag(loc=tf.zeros(D_int, tf.float32), scale_diag=tf.ones(D_int, tf.float32)) Y = YDistr.sample(N_int) T = 1.0/(2.0*N*tf.sqrt(m.pi*y)) A0 = euclidean_norm_squared(tf.subtract(tf.expand_dims(X, 0), tf.expand_dims(X, 1)), axis=2) A = tf.reduce_sum(phi_sampling(A0/(4*y), D)) B0 = euclidean_norm_squared(tf.subtract(tf.expand_dims(Y, 0), tf.expand_dims(Y, 1)), axis=2) B = tf.reduce_sum(phi_sampling(B0/(4*y), D)) C0 = euclidean_norm_squared(tf.subtract(tf.expand_dims(X, 0), tf.expand_dims(Y, 1)), axis=2) C = tf.reduce_sum(phi_sampling(C0/(4*y), D)) return T*(A + B - 2*C)
tensorflow.expand_dims
14,057
import tensorflow as tf name: (Optional) A name for this operation. output_dtype: (Optional) If not None, casts the output tensor to this type. Returns: A `Tensor` containing the mean. If `x` is floating point, the mean will have the same type as `x`. If `x` is integral, the output is cast to float32. NaNs and infinite input values are ignored. Raises: TypeError: If the type of `x` is not supported. """ with tf.compat.v1.name_scope(name, 'mean'): return _mean_and_var(x, reduce_instance_dims, output_dtype)[0] @common.log_api_use(common.ANALYZER_COLLECTION) def var(x: common_types.TensorType, reduce_instance_dims: bool = True, name: Optional[str] = None, output_dtype: Optional[tf.DType] = None) -> tf.Tensor: """Computes the variance of the values of a `Tensor` over the whole dataset.
tensorflow.compat.v1.name_scope
14,058
import tensorflow as tf def _to_dict(self, dataset: tf.data.Dataset, to_dict=True, **kwargs) -> tf.data.Dataset: num_parallel_calls = kwargs.get("num_parallel_calls", utils.AUTOTUNE) if not to_dict: dataset = dataset.map( lambda a, b, c, y: ((a, b, c), y), num_parallel_calls=num_parallel_calls, ) return dataset dataset = dataset.map( lambda a, b, c, y: ({self.input_ids: a, self.token_type_ids: b, self.attention_mask: c}, {self.labels: y}), num_parallel_calls=num_parallel_calls, ).prefetch(kwargs.get("buffer_size", utils.AUTOTUNE)) return dataset def _fixed_padding(self, dataset: tf.data.Dataset, pad_id=0, max_sequence_length=512, **kwargs) -> tf.data.Dataset: maxlen = tf.constant(max_sequence_length, dtype=tf.int32) pad_id = tf.constant(pad_id, dtype=tf.int32) # fmt: off padded_shapes = kwargs.get("padded_shapes", ([maxlen, ], [maxlen, ], [maxlen, ], [maxlen, ])) padding_values = kwargs.get("padding_values", (pad_id, pad_id, pad_id, pad_id)) # fmt: on dataset = utils.batching_and_padding(dataset, padded_shapes, padding_values, **kwargs) return dataset def _batch_padding(self, dataset: tf.data.Dataset, pad_id=0, **kwargs) -> tf.data.Dataset: pad_id = tf.constant(pad_id, dtype=tf.int32) # fmt: off padded_shapes = kwargs.get("padded_shapes", ([None, ], [None, ], [None, ], [None, ])) padding_values = kwargs.get("padding_values", (pad_id, pad_id, pad_id, pad_id)) # fmt: on
tensorflow.constant
14,059
import tensorflow as tf initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: init_op=tf.train.init_from_checkpoint(init_checkpoint, assignment_map) scaffold_fn=tf.train.Scaffold(init_op=init_op)
tensorflow.train.init_from_checkpoint
14,060
import tensorflow as tf def _get_generator_inputs(num_images_per_class, num_classes, noise_dims): # Since we want a grid of numbers for the conditional generator, manually # construct the desired class labels. num_images_generated = num_images_per_class * num_classes noise = tf.random_normal([num_images_generated, noise_dims]) labels = [lbl for lbl in range(num_classes) for _ in range(num_images_per_class)] one_hot_labels = tf.one_hot(tf.constant(labels), num_classes) return noise, one_hot_labels if __name__ == '__main__': app.run(main)
tensorflow.constant
14,061
import tensorflow as tf ) tf.flags.DEFINE_string( "tpu_zone",
tensorflow.flags.DEFINE_string
14,062
import tensorflow as tf update_param_noise_scale_ph = tf.placeholder(tf.bool, (), name="update_param_noise_scale") reset_ph = tf.placeholder(tf.bool, (), name="reset")
tensorflow.placeholder
14,063
import tensorflow as tf return (loss, per_example_loss, log_probs) def gather_indexes(sequence_tensor, positions): """Gathers the vectors at the specific positions over a minibatch.""" sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3) batch_size = sequence_shape[0] seq_length = sequence_shape[1] width = sequence_shape[2] flat_offsets = tf.reshape( tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1] ) flat_positions = tf.reshape(positions + flat_offsets, [-1]) flat_sequence_tensor = tf.reshape(sequence_tensor, [batch_size * seq_length, width]) output_tensor = tf.gather(flat_sequence_tensor, flat_positions) return output_tensor def input_fn_builder( input_files, max_seq_length, max_predictions_per_seq, is_training, num_cpu_threads=4 ):
tensorflow.range
14,064
import tensorflow as tf from tensorflow_examples.lite.model_maker.third_party.efficientdet.keras import efficientdet_keras def create_mask(pred_mask): pred_mask = tf.argmax(pred_mask, axis=-1) pred_mask = pred_mask[..., tf.newaxis] return pred_mask[0] dataset, info = tfds.load('oxford_iiit_pet:3.*.*', with_info=True) def normalize(input_image, input_mask): input_image = tf.cast(input_image, tf.float32) / 255.0 input_mask -= 1 return input_image, input_mask def load_image_train(datapoint): """Load images for training.""" input_image = tf.image.resize(datapoint['image'], (512, 512)) input_mask = tf.image.resize(datapoint['segmentation_mask'], (128, 128)) if tf.random.uniform(()) > 0.5: input_image = tf.image.flip_left_right(input_image) input_mask = tf.image.flip_left_right(input_mask)
tensorflow.cast
14,065
import tensorflow as tf with open(options_file, 'r') as fin: options = json.load(fin) max_word_length = options['char_cnn']['max_characters_per_token'] vocab = UnicodeCharsVocabulary(vocab_file, max_word_length) batcher = Batcher(vocab_file, max_word_length) ids_placeholder = tf.placeholder('int32', shape=(None, None, max_word_length) ) model = BidirectionalLanguageModel(options_file, weight_file) ops = model(ids_placeholder) config = tf.ConfigProto(allow_soft_placement=True) with tf.Session(config=config) as sess: sess.run(tf.global_variables_initializer()) sentence_id = 0 with open(dataset_file, 'r') as fin, h5py.File(outfile, 'w') as fout: for line in fin: sentence = line.strip().split() char_ids = batcher.batch_sentences([sentence]) embeddings = sess.run( ops['lm_embeddings'], feed_dict={ids_placeholder: char_ids} ) ds = fout.create_dataset( '{}'.format(sentence_id),
tensorflow.ConfigProto
14,066
import tensorflow as tf if isinstance(facts, tuple): # In case of Bi-RNN, concatenate the forward and the backward RNN outputs. facts = tf.concat(facts, 2) if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) # Trainable parameters mask = tf.equal(mask, tf.ones_like(mask)) facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] query = tf.layers.dense(query, facts_size, activation=None, name='f1_trans_shine' + stag) query = prelu(query) queries = tf.tile(query, [1, tf.shape(facts)[1]]) queries = tf.reshape(queries, tf.shape(facts)) din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)
tensorflow.ones_like
14,067
from tensorflow.contrib.losses.python.losses import loss_ops def __init__(self, label_name, weight_column_name): def loss_fn(logits, target): check_shape_op = control_flow_ops.Assert( math_ops.less_equal(array_ops.rank(target), 2), ["target's shape should be either [batch_size, 1] or [batch_size]"]) with ops.control_dependencies([check_shape_op]): target = array_ops.reshape( target, shape=[array_ops.shape(target)[0], 1]) return loss_ops.hinge_loss(logits, target) super(_BinarySvmTargetColumn, self).__init__( loss_fn=loss_fn, n_classes=2, label_name=label_name, weight_column_name=weight_column_name) def logits_to_predictions(self, logits, proba=False):
tensorflow.contrib.losses.python.losses.loss_ops.hinge_loss
14,068
import tensorflow as tf tf.logging.info(" Num examples = %d", len(train_examples)) tf.logging.info(" Batch size = %d", FLAGS.train_batch_size) tf.logging.info(" Num steps = %d", num_train_steps) train_input_fn = file_based_input_fn_builder(
tensorflow.logging.info
14,069
import tensorflow as tf def make_metric_fn(metric_fn): def wrapped_metric_fn(logits, labels): num, den = metric_fn(logits, labels, weights_fn=weights_fn) return tf.metrics.mean(num, den) return wrapped_metric_fn
tensorflow.metrics.mean
14,070
import tensorflow as tf :return: ([TensorFlow Tensor]) the target Q-retrace """ rho_bar = batch_to_seq(tf.minimum(1.0, rho_i), n_envs, n_steps, True) # list of len steps, shape [n_envs] reward_seq = batch_to_seq(rewards, n_envs, n_steps, True) # list of len steps, shape [n_envs]
tensorflow.minimum
14,071
import tensorflow as tf n_row,n_col,n_channel = x.shape n_patch = n_row*n_col // (patch_size**2) patches = tf.image.extract_patches(tf.expand_dims(x,0),sizes=window,strides=window,rates=[1, 1, 1, 1],padding='VALID') patches = tf.reshape(patches,[n_patch,patch_size,patch_size,n_channel]) patches = tf.random.shuffle(patches) rows = tf.split(patches,n_col//patch_size,axis=0) rows = [tf.concat(tf.unstack(x),axis=1) for x in rows] x_aug = tf.concat(rows,axis=0) x_aug = tf.convert_to_tensor(x_aug) return tf.concat([x, x_aug],axis=2) def gaussian_blur(self,x): #create random gaussian blur filter mean = 0 std = tf.random.uniform(shape=[],minval=5,maxval=10,dtype=tf.float32) # std [5-10] size = tf.random.uniform(shape=[],minval=3,maxval=7,dtype=tf.int32) # size [7-15] self.kernel = self.gaussian_kernel(size,mean,std) self.kernel = tf.tile(self.kernel[:, :, tf.newaxis, tf.newaxis], [1, 1, 3, 1]) self.paddings = tf.convert_to_tensor([[size,size],[size,size],[0,0]]) x_aug = tf.nn.separable_conv2d(tf.expand_dims(tf.pad(x,self.paddings,'SYMMETRIC'), 0), self.kernel, self.pointwise_filter,strides=[1, 1, 1, 1], padding='VALID')
tensorflow.concat
14,072
from tensorflow.python.client import session [image] = provider.get(['image']) [label] = provider.get(['label']) image = _resize_image(image, height, width) with session.Session('') as sess: with queues.QueueRunners(sess): image, label = sess.run([image, label]) self.assertListEqual([height, width, 3], list(image.shape))
tensorflow.python.client.session.Session
14,073
import tensorflow as tf def build(self, rgb): """ load variable from npy to build the VGG :param rgb: rgb image [batch, height, width, 3] values scaled [0, 1] """ start_time = time.time() log('Building VGG19. Started at: %ds' % start_time) rgb_scaled = rgb * 255.0 # Convert RGB to BGR red, green, blue = tf.split(axis=3, num_or_size_splits=3, value=rgb_scaled) assert red.get_shape().as_list()[1:] == [224, 224, 1] assert green.get_shape().as_list()[1:] == [224, 224, 1] assert blue.get_shape().as_list()[1:] == [224, 224, 1] bgr = tf.concat(axis=3, values=[ blue - VGG_MEAN[0], green - VGG_MEAN[1], red - VGG_MEAN[2], ]) assert bgr.get_shape().as_list()[1:] == [224, 224, 3] self.conv1_1 = self.conv_layer(bgr, "conv1_1")
tensorflow.split
14,074
from tensorflow.python.client import session hash_key=layers.SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY) cross_dense = sparse_ops.sparse_tensor_to_dense(cross) with session.Session(): values = cross_dense.eval()
tensorflow.python.client.session.Session
14,075
import tensorflow as tf self.updates = tf.group(denoise_updates, ranker_updates) def DenoisingNet(self, list_size, forward_only=False, scope=None): with tf.variable_scope(scope or "denoising_model"): # If we are in testing, do not compute propensity if forward_only: return tf.ones_like(self.output)#, tf.ones_like(self.output)
tensorflow.variable_scope
14,076
import tensorflow as tf return images, sparse_labels def weight_variable(shape): initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial) def bias_variable(shape): initial = tf.constant(0.1, shape=shape) return tf.Variable(initial)
tensorflow.Variable
14,077
import tensorflow as tf name="batch_norm_ss") mean, variance = tf.nn.normalize_moments(counts, shifted_sum_x, shifted_sum_x2, shift, name="normalize_moments") second_moment = variance + tf.square(mean) return mean, variance, second_moment def build_moving_stats(): return ( tf.identity(self._moving_mean), tf.identity(self._moving_variance), tf.identity(self._moving_second_moment), ) mean, variance, second_moment = utils.smart_cond( use_batch_stats, build_batch_stats, build_moving_stats, ) return mean, variance, second_moment def _build_update_ops_variance(self, mean, variance, is_training): """Builds the moving average update ops when using moving variance.
tensorflow.identity
14,078
import tensorflow as tf ) masked_lm_predictions = tf.argmax( masked_lm_log_probs, axis=-1, output_type=tf.int32 ) masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1]) masked_lm_ids = tf.reshape(masked_lm_ids, [-1]) masked_lm_weights = tf.reshape(masked_lm_weights, [-1]) masked_lm_accuracy = tf.metrics.accuracy(
tensorflow.reshape
14,079
import tensorflow as tf num = (1 - self.alpha) * dxt + tf.tensordot(self.alpha * dxt , tf.transpose( tf.matmul(tf.abs(self.W_rec) * self.rec_Connectivity,self.Dale_rec)), axes=1) * \ tf.where(tf.greater(xt, 0), tf.ones_like(xt), tf.zeros_like(xt)) denom = dxt # sum over hidden units num = tf.reduce_sum(tf.square(num), axis=2) denom = tf.reduce_sum(tf.square(denom), axis=2) bounded = tf.where(tf.greater(denom, 1e-20), tf.div(num, 1.0 * denom), tf.ones_like(num)) nelems = tf.reduce_mean(tf.where(tf.greater(denom, 1e-20), 1.0 * tf.ones_like(num), 1.0 * tf.zeros_like(num)), axis=1) # sum mean over each batch by time steps Omega = tf.square(bounded - 1.0) Omega = tf.reduce_sum(tf.reduce_mean(Omega, axis=1)) / (1.0 * tf.reduce_sum(nelems)) out = tf.gradients(Omega, self.W_rec) out[0] = tf.Print(out[0], [out[0], self.W_rec, Omega], "omega grads") out[0] = tf.verify_tensor_all_finite(out[0], "dead omega grad")
tensorflow.greater
14,080
from tensorflow.python.platform import tf_logging as logging def every_n_step_begin(self, step): super(NanLoss, self).every_n_step_begin(step) return [self._loss_tensor] def every_n_step_end(self, step, outputs): super(NanLoss, self).every_n_step_end(step, outputs) if np.isnan(_extract_output(outputs, self._loss_tensor)): failure_message = "Model diverged with loss = NaN." if self._fail_on_nan_loss: logging.error(failure_message) raise NanLossDuringTrainingError else: logging.warning(failure_message) # We don't raise an error but we return "should stop" so we stop, but # without an exception. return True class RunHookAdapterForMonitors(session_run_hook.SessionRunHook):
tensorflow.python.platform.tf_logging.error
14,081
import tensorflow as tf A tensor of shape [T, B] that contains the loss per example, per time step. """ with tf.name_scope("cross_entropy_sequence_loss"): losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets) loss_mask = tf.sequence_mask(tf.to_int32(sequence_length), tf.to_int32(tf.shape(targets)[0])) losses = losses * tf.transpose(tf.to_float(loss_mask), [1, 0]) return losses def dice_loss(predictions, targets, weights=1., name='dice_loss'): with tf.name_scope(name): # predictions = tf.to_float(predictions) targets = tf.to_float(targets) intersection = 2 * tf.reduce_sum(predictions * targets) + weights union = weights + tf.reduce_sum(predictions) + tf.reduce_sum(targets) loss = -(intersection / (union)) return loss def precision_recall_auc_loss(labels, logits, precision_range=(0.0, 1.0), num_anchors=20, weights=1.0, dual_rate_factor=0.1, label_priors=None, surrogate_type='xent', lambdas_initializer=tf.constant_initializer(1.0), reuse=None,
tensorflow.reduce_sum
14,082
import tensorflow as tf for i in range(len(pw_list)): tf.summary.scalar('Inverse Propensity weights %d' % i, tf.reduce_mean(pw_list[i]), collections=['train'])
tensorflow.reduce_mean
14,083
import tensorflow as tf tf.assign(target, (1 - self.tau) * target + self.tau * source) for target, source in zip(target_params, source_params) ] # Initializing target to match source variables target_init_op = [ tf.assign(target, source) for target, source in zip(target_params, source_params) ] # Control flow is used because sess.run otherwise evaluates in nondeterministic order
tensorflow.assign
14,084
import tensorflow as tf sess.run(tf.global_variables_initializer()) print(f'\nl1={sess.run(l1)} l2={sess.run(l2)}') a = np.array([1, 2, 3], dtype=np.float32) tf_v = tf.Variable(5, dtype=tf.float32) sess.run(tf.global_variables_initializer()) print(f'a * tf_v = {sess.run(a * tf_v)}') weights = tf.constant([[1.0, -2], [-3, 4]]); regular_l1 = tf.contrib.layers.l1_regularizer(0.5)(weights) regular_l2 = tf.contrib.layers.l2_regularizer(0.5)(weights) print(f'\nregular_l1={sess.run(regular_l1)} regular_l2={sess.run(regular_l2)}')
tensorflow.global_variables_initializer
14,085
import tensorflow as tf with tf.variable_scope(name): if reuse: tf.get_variable_scope().reuse_variables() else: assert tf.get_variable_scope().reuse is False epsilon = 1e-5 mean, var = tf.nn.moments(x, [1, 2], keep_dims=True) scale = tf.get_variable('scale',[x.get_shape()[-1]], initializer=tf.truncated_normal_initializer(mean=1.0, stddev=0.02)) offset = tf.get_variable('offset',[x.get_shape()[-1]],initializer=tf.constant_initializer(0.0)) out = scale*tf.div(x-mean, tf.sqrt(var+epsilon)) + offset return out
tensorflow.nn.moments
14,086
import tensorflow as tf def image_preprocess(self, image): with tf.name_scope('image_preprocess'): if image.dtype.base_dtype != tf.float32: image = tf.cast(image, tf.float32) mean = [0.485, 0.456, 0.406] # rgb std = [0.229, 0.224, 0.225] if self.image_bgr:
tensorflow.cast
14,087
from tensorflow.python.ops import logging_ops Returns: Numpy array of predicted probabilities. """ return self._infer_model(x=x, input_fn=input_fn, batch_size=batch_size) def _get_train_ops(self, features, targets): """See base class.""" global_step = variables.get_global_step() assert global_step loss = self._loss( self._logits(features), targets, self._get_weight_tensor(features)) logging_ops.scalar_summary("loss", loss) linear_vars = self._get_linear_vars() dnn_vars = self._get_dnn_vars() grads = gradients.gradients(loss, dnn_vars + linear_vars) dnn_grads = grads[0:len(dnn_vars)] linear_grads = grads[len(dnn_vars):] train_ops = self._get_linear_training_ops( linear_grads, linear_vars) + self._get_dnn_training_ops(dnn_grads, dnn_vars)
tensorflow.python.ops.logging_ops.scalar_summary
14,088
import tensorflow as tf scale = tf.constant([[2., 4., 5.]] * batch_size) scale_v = [2., 4., 5.] concentration = tf.constant([[1.]] * batch_size) concentration_v = 1.
tensorflow.constant
14,089
import tensorflow as tf return losses, [outputs], encoder_state, attention_states, attention_weights, samples, beam_fun, initial_data def reconstruction_encoder_decoder(encoders, decoders, encoder_inputs, targets, feed_previous, encoder_input_length=None, training=True, reconstruction_weight=1.0, reconstruction_attn_weight=0.05, **kwargs): encoders = encoders[:1] if encoder_input_length is None: weights = get_weights(encoder_inputs[0], utils.EOS_ID, include_first_eos=True) encoder_input_length = [tf.to_int32(tf.reduce_sum(weights, axis=1))] attention_states, encoder_state, encoder_input_length = multi_encoder( encoder_input_length=encoder_input_length, encoders=encoders, encoder_inputs=encoder_inputs, training=training) outputs, attention_weights, states, _, samples, beam_fun, initial_data = attention_decoder( attention_states=attention_states, initial_state=encoder_state, feed_previous=feed_previous, decoder_inputs=targets[0][:, :-1], encoder_input_length=encoder_input_length, decoder=decoders[0], training=training, encoders=encoders )
tensorflow.reduce_sum
14,090
import tensorflow as tf conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME') biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0)) conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape()) return conv class batch_norm(object): def __init__(self, epsilon=1e-5, momentum = 0.9, name="batch_norm"): with tf.variable_scope(name): self.epsilon = epsilon self.momentum = momentum self.name = name def __call__(self, x): return tf.contrib.layers.batch_norm(x, decay=self.momentum, updates_collections=None, epsilon=self.epsilon, scale=True, is_training=tftrain, scope=self.name) def linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False): shape = input_.get_shape().as_list() with tf.variable_scope(scope or "Linear"): matrix = tf.get_variable("Matrix", [shape[1], output_size], tf.float32, tf.random_normal_initializer(stddev=stddev)) bias = tf.get_variable("bias", [output_size],
tensorflow.contrib.layers.batch_norm
14,091
import tensorflow as tf return y def check_shape(ts,shapes): i = 0 for (t,shape) in zip(ts,shapes): assert t.get_shape().as_list()==shape, "id " + str(i) + " shape " + str(t.get_shape()) + str(shape) i += 1 def avg_norm(t): return tf.reduce_mean(tf.sqrt(tf.reduce_sum(tf.square(t), axis=-1))) def gradient_add(g1, g2, param): print([g1, g2, param.name]) assert (not (g1 is None and g2 is None)), param.name if g1 is None: return g2 elif g2 is None: return g1
tensorflow.square
14,092
import tensorflow as tf def _deserialize(self, serialized_data, batch_size): """Convert serialized TFRecords into tensors. Args: serialized_data: A tensor containing serialized records. batch_size: The data arrives pre-batched, so batch size is needed to deserialize the data. """ feature_map = _TRAIN_FEATURE_MAP if self._is_training else _EVAL_FEATURE_MAP features = tf.parse_single_example(serialized_data, feature_map) users = tf.reshape(tf.decode_raw( features[movielens.USER_COLUMN], rconst.USER_DTYPE), (batch_size,)) items = tf.reshape(tf.decode_raw( features[movielens.ITEM_COLUMN], rconst.ITEM_DTYPE), (batch_size,)) def decode_binary(data_bytes): # tf.decode_raw does not support bool as a decode type. As a result it is # necessary to decode to int8 (7 of the bits will be ignored) and then # cast to bool. return tf.reshape(tf.cast(tf.decode_raw(data_bytes, tf.int8), tf.bool), (batch_size,)) if self._is_training: mask_start_index = tf.decode_raw( features[rconst.MASK_START_INDEX], tf.int32)[0] valid_point_mask = tf.less(tf.range(batch_size), mask_start_index)
tensorflow.decode_raw
14,093
import tensorflow as tf p = tf.cond(tf.random_uniform((), dtype=tf.float32) < 1e-4, lambda: tf.print('csrt acc ', [pct]),
tensorflow.print
14,094
import tensorflow as tf tf.app.flags.DEFINE_string( 'dataset_name', '{}_????', 'The pattern of the dataset name to load.') tf.app.flags.DEFINE_string( 'model_dir', './logs_sext_cpn/', 'The parent directory where the model will be stored.') tf.app.flags.DEFINE_integer( 'log_every_n_steps', 10, 'The frequency with which logs are print.') tf.app.flags.DEFINE_integer( 'save_summary_steps', 100, 'The frequency with which summaries are saved, in seconds.') tf.app.flags.DEFINE_integer( 'save_checkpoints_secs', 3600, 'The frequency with which the model is saved, in seconds.') # model related configuration tf.app.flags.DEFINE_integer( 'train_image_size', 384, 'The size of the input image for the model to use.') tf.app.flags.DEFINE_integer( 'heatmap_size', 96, 'The size of the output heatmap of the model.') tf.app.flags.DEFINE_string(
tensorflow.app.flags.DEFINE_integer
14,095
import tensorflow as tf h = tf.nn.tanh(tf.matmul(features_mean, w_h) + b_h) w_c = tf.get_variable('w_c', [self.D, self.H], initializer=self.weight_initializer) b_c = tf.get_variable('b_c', [self.H], initializer=self.const_initializer) c = tf.nn.tanh(tf.matmul(features_mean, w_c) + b_c) return c, h def _word_embedding(self, inputs, reuse=False): with tf.variable_scope('word_embedding', reuse=reuse): w = tf.get_variable('w', [self.V, self.M], initializer=self.emb_initializer) x = tf.nn.embedding_lookup(w, inputs, name='word_vector') # (N, T, M) or (N, M) return x def _project_features(self, features): with tf.variable_scope('project_features'): w = tf.get_variable('w', [self.D, self.D], initializer=self.weight_initializer) features_flat = tf.reshape(features, [-1, self.D]) features_proj = tf.matmul(features_flat, w) features_proj = tf.reshape(features_proj, [-1, self.L, self.D]) return features_proj
tensorflow.nn.embedding_lookup
14,096
import tensorflow as tf ch_emb = tf.reshape(ch_emb, [N, PL, ch_emb.shape[-1]]) qh_emb = tf.reshape(qh_emb, [N, QL, ch_emb.shape[-1]]) c_emb = tf.nn.dropout(tf.nn.embedding_lookup(self.word_mat, self.c), 1.0 - self.dropout) q_emb = tf.nn.dropout(tf.nn.embedding_lookup(self.word_mat, self.q), 1.0 - self.dropout)
tensorflow.nn.embedding_lookup
14,097
import tensorflow as tf else: start_logits = tf.squeeze( conv(tf.concat([self.enc[1], self.enc[2]], axis=-1), 1, bias=False, name="start_pointer"), -1) end_logits = tf.squeeze( conv(tf.concat([self.enc[1], self.enc[3]], axis=-1), 1, bias=False, name="end_pointer"), -1) self.logits = [mask_logits(start_logits, mask=tf.reshape(self.c_mask, [N, -1])), mask_logits(end_logits, mask=tf.reshape(self.c_mask, [N, -1]))] self.logits1, self.logits2 = [l for l in self.logits] outer = tf.matmul(tf.expand_dims(tf.nn.softmax(self.logits1), axis=2), tf.expand_dims(tf.nn.softmax(self.logits2), axis=1)) outer = tf.matrix_band_part(outer, 0, self.max_a_len) self.yp1 = tf.argmax(tf.reduce_max(outer, axis=2), axis=1) self.yp2 = tf.argmax(tf.reduce_max(outer, axis=1), axis=1) def _compute_loss(self): def focal_loss(logits, labels, weights=None, alpha=0.25, gamma=2): logits = tf.nn.sigmoid(logits) zeros = array_ops.zeros_like(logits, dtype=logits.dtype)
tensorflow.nn.softmax
14,098
import tensorflow as tf input_tensor = gather_indexes(input_tensor, positions) with tf.variable_scope("cls/predictions"): # We apply one more non-linear transformation before the output layer. # This matrix is not used after pre-training. with tf.variable_scope("transform"): input_tensor = tf.layers.dense( input_tensor, units=bert_config.hidden_size, activation=modeling.get_activation(bert_config.hidden_act),
tensorflow.variable_scope
14,099