seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf for embedding_name, path_to_meta in zip(embedding_names, paths_to_meta): # Initialize config embedding = config.embeddings.add() # Specifiy the embedding variable and the metadata embedding.tensor_name = embedding_name embedding.metadata_path = path_to_meta # Project the embeddings to space dimensions for visualization tf.contrib.tensorboard.plugins.projector.visualize_embeddings(summary_writer, config) def add_train_stats(model, hparams): with tf.variable_scope("stats") as scope: for i in range(hparams.tacotron_num_gpus): tf.summary.histogram("mel_outputs %d" % i, model.tower_mel_outputs[i]) tf.summary.histogram("mel_targets %d" % i, model.tower_mel_targets[i])
tensorflow.contrib.tensorboard.plugins.projector.visualize_embeddings
10,500
import tensorflow as tf conv(tf.concat([self.enc[1], self.enc[2]], axis=-1), 1, bias=False, name="start_pointer"), -1) end_logits = tf.squeeze( conv(tf.concat([self.enc[1], self.enc[3]], axis=-1), 1, bias=False, name="end_pointer"), -1) self.logits = [mask_logits(start_logits, mask=tf.reshape(self.c_mask, [N, -1])), mask_logits(end_logits, mask=tf.reshape(self.c_mask, [N, -1]))] self.logits1, self.logits2 = [l for l in self.logits] outer = tf.matmul(tf.expand_dims(tf.nn.softmax(self.logits1), axis=2), tf.expand_dims(tf.nn.softmax(self.logits2), axis=1)) outer = tf.matrix_band_part(outer, 0, self.max_a_len) self.yp1 = tf.argmax(tf.reduce_max(outer, axis=2), axis=1) self.yp2 = tf.argmax(tf.reduce_max(outer, axis=1), axis=1) def _compute_loss(self): def focal_loss(logits, labels, weights=None, alpha=0.25, gamma=2): logits = tf.nn.sigmoid(logits) zeros = array_ops.zeros_like(logits, dtype=logits.dtype) pos_p_sub = array_ops.where(labels > zeros, labels - logits, zeros) neg_p_sub = array_ops.where(labels > zeros, zeros, logits) cross_ent = - alpha * (pos_p_sub ** gamma) * tf.log(tf.clip_by_value(logits, 1e-8, 1.0)) \ - (1 - alpha) * (neg_p_sub ** gamma) * tf.log(tf.clip_by_value(1.0 - logits, 1e-8, 1.0))
tensorflow.matrix_band_part
10,501
import tensorflow as tf end_label = tf.one_hot(self.end_label, tf.shape(self.logits2)[1], axis=1) if self.config.loss_type == 'cross_entropy': start_loss = tf.nn.softmax_cross_entropy_with_logits( logits=self.logits1, labels=start_label) end_loss = tf.nn.softmax_cross_entropy_with_logits( logits=self.logits2, labels=end_label) self.loss = tf.reduce_mean(start_loss + end_loss) else: start_loss = focal_loss(tf.nn.softmax(self.logits1, -1), start_label)
tensorflow.nn.softmax_cross_entropy_with_logits
10,502
import tensorflow as tf '''因为rnn_outputs是三维的,这里需要将其转成2维的, 矩阵运算后再转换回来[batch_size, num_steps, num_classes]''' logits = tf.reshape(tf.matmul(tf.reshape(rnn_outputs, [-1, state_size]), W) +b, \ shape=[batch_size, num_steps, num_classes]) predictions = tf.nn.softmax(logits) y_as_list = tf.unstack(y, num=num_steps, axis=1) losses = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,logits=logits)
tensorflow.nn.softmax
10,503
from tensorflow.python.ops import math_ops class_id: Class for which we want binary metrics. weights: `Tensor` whose shape is broadcastable to the the first [D1, ... DN] dimensions of `predictions_idx` and `labels`. Returns: A [D1, ... DN] `Tensor` of false positive counts. """ with ops.name_scope(None, 'false_positives', (predictions_idx, labels)): labels, predictions_idx = _maybe_select_class_id(labels, predictions_idx, class_id) fp = set_ops.set_size(set_ops.set_difference( predictions_idx, labels, aminusb=True)) fp = math_ops.to_double(fp) if weights is not None: weights = math_ops.to_double(weights) fp = math_ops.mul(fp, weights) return fp def _streaming_sparse_false_positive_at_k(predictions_idx, labels, k=None, class_id=None, weights=None, name=None): """Calculates weighted per step false positives for precision@k. If `class_id` is specified, calculate binary true positives for `class_id` only.
tensorflow.python.ops.math_ops.to_double
10,504
import tensorflow as tf rand = tf.random.uniform([x_shape[0]]) epoch = tf.where(rand < 0.1, tf.zeros_like(epoch), epoch)
tensorflow.zeros_like
10,505
import tensorflow as tf Returns: A transformed tensor (tf.float32). """ def _repeat(x, n_repeats): with tf.variable_scope('_repeat'): rep = tf.transpose( tf.expand_dims(tf.ones(shape=tf.stack([ n_repeats, ])), 1), [1, 0]) rep = tf.to_int32(rep) x = tf.matmul(tf.reshape(x, (-1, 1)), rep) return tf.reshape(x, [-1]) def _interpolate(im, x, y, z, out_size):
tensorflow.stack
10,506
import tensorflow as tf self.assertEqual(save_path + "-?????-of-00002", val) meta_graph_filename = save._MetaGraphFilename(val) self.assertEqual(save_path + ".meta", meta_graph_filename) # Restore a different "v0" from shard 0 of the saved files. with tf.Session( target="", config=tf.ConfigProto(device_count={"CPU": 2})) as sess: with sess.graph.device("/cpu:0"): v0 = tf.Variable(111, name="v0") save = tf.train.Saver({"v0": v0}, sharded=True) tf.initialize_all_variables().run() self.assertEqual(111, v0.eval()) save.restore(sess, save_path + "-00000-of-00002")
tensorflow.ConfigProto
10,507
import tensorflow as tf else: X = self.t_conv(name + '_deconf', X, filter, f_size, 1, (not norm) and use_bias, "VALID", stddev) if norm == 'I': X = tf.contrib.layers.instance_norm(X, scope=scope, reuse=reuse) elif norm == 'B': X = tf.layers.batch_normalization(X, reuse=reuse, training=is_train, name=name) elif norm == 'G': X = tf.contrib.layers.group_norm(X, groups=16, scope=scope, reuse=reuse) if dropout > 0.0: X = tf.layers.dropout(X, dropout, training=is_train) if slope < 1.0: X = tf.nn.leaky_relu(X, slope) if slope > 0.0 else tf.nn.relu(X) return X F = 3 norm = self.args.norm # print('norm', norm) # print('skip cons', self.args.skip_connections) # print('VNET In:', I.get_shape().as_list()) if adaption_net: # print('ada scope T/R', is_train, reuse_ada)
tensorflow.nn.relu
10,508
import tensorflow as tf from datetime import datetime from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report slim = tf.contrib.slim global first first = True classnum=12 testnum = tf.placeholder(tf.int32) trainnum = tf.placeholder(tf.int32) validnum = tf.placeholder(tf.int32) learnrate = tf.placeholder(tf.float32) def getinputs(path): filename_queue=tf.train.string_input_producer([path]) reader=tf.TFRecordReader() _,serialized_example=reader.read(filename_queue) features=tf.parse_single_example(serialized_example, features={ 'label':tf.FixedLenFeature([], tf.int64), 'img_raw' : tf.FixedLenFeature([], tf.string), })
tensorflow.placeholder
10,509
import tensorflow as tf if inverse: scales = tf.math.exp(log_sigmas) log_x = tf.math.log(x) ldj = log_x
tensorflow.math.log
10,510
import tensorflow as tf logger.info('Load the first task saver!') saver.load_state_dict(np.load(f'./{inittask}/{taskname}.task0.saver.npy', allow_pickle=True)[()]) logger.info('Update all copies! (lazymodel, normalizers_copy)') tf.get_default_session().run(sync_model_to_lazymodel) tf.get_default_session().run(copy_normalizers) logger.info('Loaded normalizers:') load_norm = tf.get_default_session().run(normalizers_parameters) logger.info(load_norm) TASK_NUM = 1
tensorflow.get_default_session
10,511
import tensorflow as tf def main(_): tf.logging.set_verbosity(tf.logging.INFO)
tensorflow.logging.set_verbosity
10,512
import tensorflow as tf # Benchmark related def device_and_data_format(): return ("/gpu:0", "channels_first") if tf.test.is_gpu_available() else ("/cpu:0", "channels_last") def random_batch(batch_size, config): shape = (batch_size,) + config.input_shape images = tf.random_uniform(shape) labels = tf.random_uniform( [batch_size], minval=0, maxval=config.n_classes, dtype=tf.int32) return images, labels class MockIterator(object): def __init__(self, tensors): self._tensors = [tf.identity(x) for x in tensors]
tensorflow.random_uniform
10,513
import tensorflow as tf def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. d = tf.data.TFRecordDataset(input_file) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.apply( tf.contrib.data.map_and_batch(
tensorflow.data.TFRecordDataset
10,514
import tensorflow as tf action = tf.check_numerics(action, 'action') observ_dtype = self._parse_dtype(self._batch_env.observation_space) observ, reward, done = tf.py_func( lambda a: self._batch_env.step(a)[:3], [action], [observ_dtype, tf.float32, tf.bool], name='step') observ = tf.check_numerics(observ, 'observ') reward = tf.check_numerics(reward, 'reward') return tf.group( self._observ.assign(observ), self._action.assign(action),
tensorflow.check_numerics
10,515
import tensorflow as tf vdata = np.load(args.data_location) tf.reset_default_graph() idim = (36, 64)
tensorflow.reset_default_graph
10,516
import tensorflow as tf def __init__(self, batch, hidden, keep_prob=1.0, is_train=None, scope="ptr_net"): self.gru = tf.contrib.rnn.GRUCell(hidden) self.batch = batch self.scope = scope self.keep_prob = keep_prob self.is_train = is_train self.dropout_mask = dropout(tf.ones( [batch, hidden], dtype=tf.float32), keep_prob=keep_prob, is_train=is_train) def __call__(self, init, match, d, mask): with tf.variable_scope(self.scope): d_match = dropout(match, keep_prob=self.keep_prob, is_train=self.is_train) inp, logits1 = pointer(d_match, init * self.dropout_mask, d, mask) d_inp = dropout(inp, keep_prob=self.keep_prob, is_train=self.is_train) _, state = self.gru(d_inp, init) tf.get_variable_scope().reuse_variables() _, logits2 = pointer(d_match, state * self.dropout_mask, d, mask) return logits1, logits2
tensorflow.variable_scope
10,517
import tensorflow as tf image_list: a list of image tensors of the same dimension but possibly varying channel. crop_height: the height of the image following the crop. crop_width: the width of the image following the crop. Returns: the list of cropped images. """ outputs = [] for image in image_list: image_height = tf.shape(image)[0] image_width = tf.shape(image)[1] offset_height = (image_height - crop_height) / 2 offset_width = (image_width - crop_width) / 2 outputs.append(_crop(image, offset_height, offset_width, crop_height, crop_width)) return outputs def _smallest_size_at_least(height, width, smallest_side):
tensorflow.shape
10,518
import tensorflow as tf return tf.reduce_mean(loss) loss = tf.map_fn(fn=lambda inp: sample_compute(inp), elems=tf.range(resample), dtype=tf.float32, parallel_iterations=32) final_loss = tf.reduce_mean(loss) return final_loss
tensorflow.reduce_mean
10,519
import tensorflow as tf coord = tf.train.Coordinator() tf.train.start_queue_runners(coord=coord)
tensorflow.train.start_queue_runners
10,520
import tensorflow as tf self.s = tf.placeholder(tf.float32, [None, self.num_s], name='s1') # input state for agent1 self.S_ = tf.placeholder(tf.float32, [None, self.num_global_s], name='S_') # input Next Global State self.s_ = tf.placeholder(tf.float32, [None, self.num_s], name='s1_') # input next state for agent1 self.R = tf.placeholder(tf.float32, [None, ], name='R') # input Reward self.a = tf.placeholder(tf.float32, [None, self.num_a], name='a') # input Action onehot for agent1 self.done = tf.placeholder(tf.float32, [None, ], name='done') # input Done info ??? self.q_m_ = tf.placeholder(tf.float32, [None, ], name='q_value_next_max')
tensorflow.placeholder
10,521
import tensorflow as tf def dense(inputs, hidden, use_bias=True, scope="dense"): with tf.variable_scope(scope): shape = tf.shape(inputs) dim = inputs.get_shape().as_list()[-1] out_shape = [shape[idx] for idx in range( len(inputs.get_shape().as_list()) - 1)] + [hidden] flat_inputs = tf.reshape(inputs, [-1, dim]) W = tf.get_variable("W", [dim, hidden]) res = tf.matmul(flat_inputs, W) if use_bias: b = tf.get_variable( "b", [hidden], initializer=tf.constant_initializer(0.)) res = tf.nn.bias_add(res, b)
tensorflow.reshape
10,522
import tensorflow as tf Computes new shape with the smallest side equal to `smallest_side` while preserving the original aspect ratio. Args: height: an int32 scalar tensor indicating the current height. width: an int32 scalar tensor indicating the current width. smallest_side: A python integer or scalar `Tensor` indicating the size of the smallest side after resize. Returns: new_height: an int32 scalar tensor indicating the new height. new_width: and int32 scalar tensor indicating the new width. """ smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32) height = tf.to_float(height) width = tf.to_float(width) smallest_side = tf.to_float(smallest_side) scale = tf.cond(tf.greater(height, width), lambda: smallest_side / width, lambda: smallest_side / height) new_height = tf.to_int32(height * scale) new_width = tf.to_int32(width * scale) return new_height, new_width
tensorflow.convert_to_tensor
10,523
import tensorflow as tf
tensorflow.where
10,524
import tensorflow as tf z_s_flat = tf.reshape(z_s, [-1]) y_s_flat = tf.reshape(y_s, [-1])
tensorflow.reshape
10,525
import tensorflow as tf one = tf.Variable(1.0) twos = tf.Variable([2.0, 2.0, 2.0]) init = tf.initialize_all_variables() save = tf.train.Saver(tf.all_variables())
tensorflow.initialize_all_variables
10,526
import tensorflow as tf return True return False def _restore_vars(self, sess): """ :param sess: :return: boolean for successful or not """ if not self._restore_optimistic: if self.restore_ckpt_file is None: logger.warn( Color.yellow('No checkpoint file for restore vars, checkpoint file is None', bold=True)) return False self._restore_saver = tf.train.Saver(self._var_list, name='tk_restore') self._restore_saver.restore(sess, self.restore_ckpt_file) return True else: return self._optimistic_restore_model(sess) def _optimistic_restore_model(self, sess): """ restore weights of same names with model. :param sess: :return: """ if self.restore_ckpt_file is None:
tensorflow.train.Saver
10,527
import tensorflow as tf # Exports to meta_graph meta_graph_def = slice_saver.export_meta_graph(filename) with tf.Graph().as_default(): # Restores from MetaGraphDef. new_saver = tf.train.import_meta_graph(filename) # Generates a new MetaGraphDef. new_meta_graph_def = new_saver.export_meta_graph() # It should be the same as the original. self.assertProtoEquals(meta_graph_def, new_meta_graph_def)
tensorflow.train.import_meta_graph
10,528
import tensorflow as tf with tf.variable_scope(scope): deconv_weight = tf.Variable( tf.random_normal([filter_h, filter_w, num_channels_out, num_channels_in], stddev=0.1, dtype=tf.float32)) deconv_bias = tf.Variable(tf.zeros([num_channels_out], dtype=tf.float32)) map = tf.nn.conv2d_transpose(input_data, deconv_weight, output_dims, strides=[1, stride_h, stride_w, 1], padding=padding) map = tf.nn.bias_add(map, deconv_bias) activation = non_linear_fn(map) # print(scope, 'out', activation.get_shape().as_list()) return activation def self_attention(x, channels, act_func=tf.nn.relu, scope='attention'):
tensorflow.nn.bias_add
10,529
import tensorflow as tf return None def build_generator(self,image,reuse=False,name='generator'): with tf.variable_scope(name): if reuse: tf.get_variable_scope().reuse_variables() else: assert tf.get_variable_scope().reuse is False
tensorflow.variable_scope
10,530
import tensorflow as tf initializer=tf.constant_initializer(0), trainable=False) self.dropout = tf.placeholder_with_default(0.0, (), name="dropout")
tensorflow.placeholder_with_default
10,531
import tensorflow as tf v = self.get_slot(var, "v") v_t = v.assign(beta2_t * v + (1. - beta2_t) * tf.square(grad)) m = self.get_slot(var, "m") m_t = m.assign(beta1_t * m + (1. - beta1_t) * grad) v_t_hat = tf.div(v_t, 1. - beta2_t) m_t_hat = tf.div(m_t, 1. - beta1_t) g_t = tf.div(m_t_hat, tf.sqrt(v_t_hat) + eps) g_t_1 = self.get_slot(var, "g") g_t = g_t_1.assign(g_t)
tensorflow.div
10,532
import tensorflow as tf vx_keys = tf.reshape(tf.Variable([], collections=[], dtype=tf.string), (-1, 1)) vz_keys = tf.reshape(tf.Variable([], collections=[], dtype=tf.string), (-1, 1)) x_t = tf.gather(x, l) x_t_len = tf.strings.length(x_t) x_t = tf.string_split([x_t], delimiter='').values z_t = tf.gather(y, m) z_t_len = tf.strings.length(z_t) z_t = tf.string_split([z_t], delimiter='').values for i in tf.range(start=0, limit=x_t_len - self._p + 1, delta=1, dtype=None, name='range'): u = tf.string_join(x_t[i:i + self._p], '') vx_keys, r = tf.cond( tf.greater(vx.lookup(u), -1), true_fn=lambda: (vx_keys, tf.add(vx.lookup(u), 1)), false_fn=lambda: (tf.concat([vx_keys, tf.reshape(u, (-1, 1))], axis=0),
tensorflow.strings.length
10,533
import tensorflow as tf params_size_t = self._cell.params_size() self._rnn_params = tf.get_variable( "lstm_params", initializer=tf.random_uniform( [params_size_t], -config.init_scale, config.init_scale), validate_shape=False) c = tf.zeros([config.num_layers, self.batch_size, config.hidden_size], tf.float32) h = tf.zeros([config.num_layers, self.batch_size, config.hidden_size], tf.float32) self._initial_state = (tf.contrib.rnn.LSTMStateTuple(h=h, c=c),) outputs, h, c = self._cell(inputs, h, c, self._rnn_params, is_training) outputs = tf.transpose(outputs, [1, 0, 2]) outputs = tf.reshape(outputs, [-1, config.hidden_size]) return outputs, (tf.contrib.rnn.LSTMStateTuple(h=h, c=c),) def _get_lstm_cell(self, config, is_training): if config.rnn_mode == BASIC: return tf.contrib.rnn.BasicLSTMCell( config.hidden_size, forget_bias=0.0, state_is_tuple=True, reuse=not is_training) if config.rnn_mode == BLOCK: return tf.contrib.rnn.LSTMBlockCell( config.hidden_size, forget_bias=0.0) raise ValueError("rnn_mode %s not supported" % config.rnn_mode) def _build_rnn_graph_lstm(self, inputs, config, is_training): """Build the inference graph using canonical LSTM cells."""
tensorflow.contrib.rnn.LSTMStateTuple
10,534
from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten # self.glimpses = tf.concat([glimpse1,glimpse2,glimpse3],axis=-1) # Block 1 conv1a = Conv2D(padding="same", filters=RNN_SIZE//8, kernel_size=[8, 8], strides=4, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(self.inputs) conv1b = Conv2D(padding="same", filters=RNN_SIZE//8, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv1a) conv1c = Conv2D(padding="same", filters=RNN_SIZE//8, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv1b) pool1 = MaxPool2D(pool_size=[2,2])(conv1c) # Block 2 conv2a = Conv2D(padding="same", filters=RNN_SIZE//4, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(pool1) conv2b = Conv2D(padding="same", filters=RNN_SIZE//4, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv2a) conv2c = Conv2D(padding="same", filters=RNN_SIZE//4, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv2b) pool2 = MaxPool2D(pool_size=[2,2])(conv2c) # Block 3 conv3a = Conv2D(padding="same", filters=RNN_SIZE//2, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(pool2) conv3b = Conv2D(padding="same", filters=RNN_SIZE//2, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv3a) conv3c = Conv2D(padding="same", filters=RNN_SIZE//2, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv3b) pool3 = MaxPool2D(pool_size=[2,2])(conv3c) # final convolutional layer #removed GOAL_SIZE conv4 = Conv2D(padding="valid", filters=RNN_SIZE-loc_layer_size, kernel_size=[2, 2], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=None)(pool3) # FC layers
tensorflow.keras.layers.MaxPool2D
10,535
import tensorflow as tf l = (LinearWrap(img) .Conv2D('conv0', NF, nl=LeakyReLU) .Conv2D('conv1', NF * 2) .Conv2D('conv2', NF * 4) .Conv2D('conv3', NF * 8, stride=1) .Conv2D('conv4', 1, stride=1, nl=tf.identity, use_bias=True)()) return l def _build_graph(self, inputs): A, B = inputs with tf.name_scope('preprocess'): A = tf.transpose(A / 128.0 - 1.0, [0, 3, 1, 2]) B = tf.transpose(B / 128.0 - 1.0, [0, 3, 1, 2]) def viz3(name, a, b, c): with tf.name_scope(name): im = tf.concat([a, b, c], axis=3) im = tf.transpose(im, [0, 2, 3, 1]) im = (im + 1.0) * 128 im = tf.clip_by_value(im, 0, 255)
tensorflow.name_scope
10,536
import tensorflow as tf if common_layers.is_on_tpu(): _remove_summaries() # summaries not currently working on TPU return tf.contrib.tpu.TPUEstimatorSpec( tf.estimator.ModeKeys.TRAIN, loss=loss, train_op=train_op) else: return tf.estimator.EstimatorSpec( tf.estimator.ModeKeys.TRAIN, loss=loss, train_op=train_op) def estimator_spec_eval(self, features, logits, labels, loss, losses_dict): """Construct EstimatorSpec for EVAL mode.""" hparams = self.hparams
tensorflow.estimator.EstimatorSpec
10,537
import tensorflow as tf self.y = tf.compat.v1.placeholder(tf.int32, shape = (None, self.config.data["num_categories"]), name="y") # ex. (50000, 10) self.train = tf.compat.v1.placeholder(tf.bool)
tensorflow.compat.v1.placeholder
10,538
import tensorflow as tf flags.DEFINE_integer("num_gpus", 2, "Total number of GPUs to use.") flags.DEFINE_bool("multi_worker", True, "Multi-worker training.") # My additional flags tf.app.flags.DEFINE_boolean("use_original_ckpt", True, "use original ckpt") flags.DEFINE_integer("task_index", 0, "task_index") flags.DEFINE_string( "worker", "localhost:3000,localhost:3001", "specify workers in the cluster"
tensorflow.app.flags.DEFINE_boolean
10,539
import tensorflow as tf locs, scales = tf.map_fn(loop_hyper_deocder, zs, dtype=(tf.float32, tf.float32), parallel_iterations=1, back_prop=False) lower_bound = 1e-9# TODO scales = tf.maximum(scales, lower_bound) print("Hyper Decoder") ys = conditional_entropy_model.decompress(y_strings, locs, scales, y_min_v, y_max_v, y_shape) print("Entropy Decoder") def loop_synthesis(element): y = tf.expand_dims(element[0], 0) x_coori = tf.expand_dims(element[1], 0) x_coori= tf.cast(x_coori,tf.float32) x = synthesis_transform(x_coori,y) return tf.squeeze(x, [0]) element=[ys,x_coori] xs = tf.map_fn(loop_synthesis, element, dtype=tf.float32, parallel_iterations=1, back_prop=False) print("Synthesis Transform") return xs ###################################### write & read binary files. ######################################
tensorflow.cast
10,540
import tensorflow as tf tf.transpose(self.x, [1, 0, 2]), initializer=state, parallel_iterations=1) rnn_outputs = \ tf.scan( self.output_step_scan, rnn_states, initializer=tf.zeros([self.N_batch, self.N_out]), parallel_iterations= 1) return tf.transpose(rnn_outputs, [1, 0, 2]), tf.unstack(rnn_states) # fix spectral radius of recurrent matrix def initial_W(self):
tensorflow.zeros
10,541
import tensorflow as tf ha = tf.matmul(varphis, param_eta * tf.matmul(Kt, prec) + Wsa) + wa # hss(s): eta * (\varphi(s)^T * K^T * \Sigma^{-1} * K * \varphi(s)) varphisKt = tf.matmul(varphis, Kt) hss = param_eta * tf.reduce_sum(tf.matmul(varphisKt, prec) * varphisKt, axis=1) Haa = param_eta * prec + Waa # Haa = 0.5 * (Haa + TT.transpose(Haa))
tensorflow.matmul
10,542
import tensorflow as tf regressor = XLNetRegressor(hparams=hparams) logits = regressor(inputs) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) logits_ = sess.run(logits) self.assertEqual(logits_.shape, (batch_size, max_time))
tensorflow.global_variables_initializer
10,543
import tensorflow as tf # How to set placements on multiple devices. # Here, assume we have three devies CPU:0, GPU:0, and GPU:1 if tf.test.is_built_with_cuda(): with tf.device('/cpu:0'): a = tf.constant([1.0, 3.0, 5.0], shape=[1, 3]) b = tf.constant([2.0, 4.0, 6.0], shape=[3, 1]) with tf.device('/gpu:1'): c = tf.matmul(a,b) c = tf.reshape(c, [-1]) with tf.device('/gpu:2'): d = tf.matmul(b,a) flat_d = tf.reshape(d, [-1]) combined = tf.mul(c, flat_d) print(sess.run(combined))
tensorflow.matmul
10,544
import tensorflow as tf if tf_output1_dtype == tf.string: cast1 = tf.dtypes.as_string(sub if not swap else add, name="TOSTR1") else: cast1 = tf.cast(sub if not swap else add, tf_output1_dtype, "CAST1") out0 = tf.identity(cast0, "TENSOR_OUTPUT0") out1 = tf.identity(cast1, "TENSOR_OUTPUT1") # Use a different model name for the non-batching variant model_name = tu.get_model_name( "savedmodel_nobatch" if max_batch == 0 else "savedmodel", input_dtype,
tensorflow.identity
10,545
import tensorflow as tf def inference(self,images): images=tf.cast(images,tf.float32)/255.0 l1 = tf.matmul(images, self.w1)+self.b1 l1=tf.nn.relu(l1) l2 = tf.matmul(l1, self.w2)+self.b2 l2=tf.nn.relu(l2) l3=tf.matmul(l2, self.w3)+self.b3 l3=tf.nn.relu(l3) out=tf.matmul(l3, self.w4)+self.b4 return out
tensorflow.nn.relu
10,546
from tensorflow.python.framework import ops update_op = size.assign(new_size) if metrics_collections: ops.add_to_collections(metrics_collections, value) if updates_collections: ops.add_to_collections(updates_collections, update_op) return value, update_op # pylint: enable=invalid-slice-index
tensorflow.python.framework.ops.add_to_collections
10,547
import tensorflow as tf # load pre-trained data from file # glove = np.load(params['glove'])['embeddings'] # np.array # training the embedding during training glove = np.zeros( (self.params["embedding_vocabulary_size"], self.params["embedding_dim"]), dtype=np.float32, ) # Add OOV word embedding embedding_array = np.vstack([glove, [[0.0] * self.params["embedding_dim"]]]) embedding_variable = tf.Variable( embedding_array, dtype=tf.float32, trainable=True ) # embedding_variable = tf.get_variable( # 'embedding_variable', # shape=(self.params["embedding_vocabulary_size"] + 1, self.params["embedding_dim"]), # dtype=tf.float32, # initializer=tf.contrib.layers.xavier_initializer(), # regularizer=tf.contrib.layers.l2_regularizer(self.params["regularizer_rate"]), # trainable=True # )
tensorflow.Variable
10,548
import tensorflow as tf self.conv4 = tf.layers.conv2d(self.pool3, self.config.cifar10_cnn["num_filters"], self.config.cifar10_cnn["filter_size"], padding='same', activation=tf.nn.relu) self.drop3 = tf.layers.dropout(self.conv4, self.config.cifar10_cnn["keep_prob"], training=self.train) # b. Flatten input data self.flatten = tf.reshape(self.drop3, [-1, self.config.cifar10_cnn["fc1_nb_units"]])
tensorflow.layers.dropout
10,549
import tensorflow as tf with client_graph.as_default(): decoded_x = stage.decode(encoded_x, decode_params, shape=shape) with self.session(graph=client_graph): decoded_x = self.evaluate(decoded_x) return TestData(x, encoded_x, decoded_x, state, state_update_tensors, updated_state) def _non_adaptive_one_to_many_encode_decode(): """Implementation of the method for `EncodingStageInterface`.""" server_graph = tf.Graph() with server_graph.as_default(): x = input_fn() shape = py_utils.static_or_dynamic_shape(x) encode_params, decode_params = stage.get_params() encoded_x = stage.encode(x, encode_params) # Get all values out of TensorFlow as Python constants. This is a trivial # example of communication happening outside of TensorFlow. with self.session(graph=server_graph):
tensorflow.Graph
10,550
import tensorflow as tf batch_size = 8 inp = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)] out = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)] weights = [tf.ones_like(inp[0], dtype=tf.float32) for _ in range(8)] with tf.variable_scope("root"): _, losses = SampleGRUSeq2Seq(inp, out, weights) updates = [] params = tf.global_variables() optimizer = tf.train.AdamOptimizer(0.03, epsilon=1e-5) for i in range(len(buckets)): full_grads = tf.gradients(losses[i], params) grads, _ = tf.clip_by_global_norm(full_grads, 30.0) update = optimizer.apply_gradients(zip(grads, params)) updates.append(update) sess.run([tf.global_variables_initializer()]) steps = 6 for _ in range(steps): bucket = random.choice(np.arange(len(buckets))) length = buckets[bucket][0] i = [np.array([np.random.randint(9) + 1 for _ in range(batch_size)], dtype=np.int32) for _ in range(length)] # 0 is our "GO" symbol here. o = [np.array([0] * batch_size, dtype=np.int32)] + i feed = {} for i1, i2, o1, o2 in zip(inp[:length], i[:length], out[:length], o[:length]): feed[i1.name] = i2 feed[o1.name] = o2
tensorflow.global_variables_initializer
10,551
import tensorflow as tf # Test case 1, 2. x = tf.placeholder_with_default(input=1, shape=[]) # None would fire an exception were it actually executed. self.assertTrue(normal._is_scalar_helper(x.shape, lambda: None)) self.assertTrue( normal._is_scalar_helper(tf.TensorShape(None), lambda: tf.shape(x))) x = tf.placeholder_with_default(input=[1], shape=[1]) # None would fire an exception were it actually executed. self.assertFalse(normal._is_scalar_helper(x.shape, lambda: None)) self.assertFalse( normal._is_scalar_helper(tf.TensorShape(None), lambda: tf.shape(x))) # There's no notion of partially known shapes in eager mode, so exit # early. if tf.executing_eagerly(): return # Test case 3. x = tf.placeholder_with_default(input=1, shape=None) is_scalar = normal._is_scalar_helper(x.shape, lambda: tf.shape(x)) self.assertTrue(self.evaluate(is_scalar))
tensorflow.TensorShape
10,552
import tensorflow as tf self.in_window = tf.less( batch_shaped(band_features["closest_time_diff"]), band_time_diff ) # Before and after flux. before_flux = batch_win_shaped(band_features["before_flux"]) after_flux = batch_win_shaped(band_features["after_flux"]) before_time = batch_win_shaped(band_features["before_time"]) after_time = batch_win_shaped(band_features["after_time"]) self.dtime = batch_2win_shaped( tf.concat([before_time, after_time], axis=1) - tile_to_2win(closest_time), ) self.dflux = batch_2win_shaped( tf.concat([before_flux, after_flux], axis=1) - tile_to_2win(closest_flux), ) # Masking tensor. left_mask = _left_mask( batch_shaped( band_features["before_padding"]), window_size)
tensorflow.concat
10,553
import tensorflow as tf ------ ValueError If input tensor is not 2D. """ if weight_init is None: num_features = tensor.get_shape()[-1].value weight_init = tf.truncated_normal([num_features, size], stddev=0.01) if bias_init is None: bias_init = tf.zeros([size]) with tf.name_scope(name, 'fully_connected', [tensor]): w = tf.Variable(weight_init, name='w', dtype=tf.float32) b = tf.Variable(bias_init, name='b', dtype=tf.float32) return tf.nn.xw_plus_b(tensor, w, b) def weight_decay(penalty_type, penalty): """Add weight decay. Args: model: TensorflowGraph. Returns: A scalar tensor containing the weight decay cost.
tensorflow.Variable
10,554
import tensorflow as tf lengths = tf.to_float(tf.expand_dims(encoder_input_length, axis=1)) mask = tf.sequence_mask(encoder_input_length, maxlen=tf.shape(hidden_states)[1]) weights = tf.to_float(mask) / lengths weighted_average = tf.reduce_sum(hidden_states * tf.expand_dims(weights, axis=2), axis=1) return weighted_average, weights def last_state_attention(hidden_states, encoder_input_length, *args, **kwargs): weights = tf.one_hot(encoder_input_length - 1, tf.shape(hidden_states)[1]) weights = tf.to_float(weights) weighted_average = tf.reduce_sum(hidden_states * tf.expand_dims(weights, axis=2), axis=1) return weighted_average, weights def local_attention(state, hidden_states, encoder, encoder_input_length, pos=None, scope=None, context=None, **kwargs): batch_size = tf.shape(state)[0] attn_length = tf.shape(hidden_states)[1]
tensorflow.to_float
10,555
import tensorflow as tf 'The frequency with which summaries are saved, in seconds.') tf.app.flags.DEFINE_integer( 'save_checkpoints_secs', 3600, 'The frequency with which the model is saved, in seconds.') # model related configuration tf.app.flags.DEFINE_integer( 'train_image_size', 384, 'The size of the input image for the model to use.') tf.app.flags.DEFINE_integer( 'heatmap_size', 96, 'The size of the output heatmap of the model.') tf.app.flags.DEFINE_string( 'backbone', 'seresnext50',#or seresnext50 seresnet50 'The backbone network to use for feature pyramid.') tf.app.flags.DEFINE_float( 'heatmap_sigma', 1., 'The sigma of Gaussian which generate the target heatmap.') tf.app.flags.DEFINE_float( 'bbox_border', 25., 'The nearest distance of the crop border to al keypoints.') tf.app.flags.DEFINE_integer( 'train_epochs', 50, 'The number of epochs to use for training.') tf.app.flags.DEFINE_integer( 'epochs_per_eval', 20, 'The number of training epochs to run between evaluations.') tf.app.flags.DEFINE_integer( 'batch_size', 10,
tensorflow.app.flags.DEFINE_float
10,556
from tensorflow.python.framework import random_seed def _infer_model(self, x, batch_size=None, axis=None, proba=False): # Converts inputs into tf.DataFrame / tf.Series. batch_size = -1 if batch_size is None else batch_size input_fn, feed_fn = _get_predict_input_fn(x, batch_size) checkpoint_path = saver.latest_checkpoint(self._model_dir) with ops.Graph().as_default() as g: random_seed.set_random_seed(self._config.tf_random_seed) contrib_framework.create_global_step(g) features, _ = input_fn() feed_dict = feed_fn() if feed_fn is not None else None predictions = self._get_predict_ops(features) if not isinstance(predictions, dict): predictions = {'predictions': predictions}
tensorflow.python.framework.random_seed.set_random_seed
10,557
from tensorflow.python.framework import ops training_op = control_flow_ops.group(*all_grads) self._BenchmarkOp(training_op, "cudnn_lstm %s %s" % (config_name, self._GetConfigDesc(config))) def benchmarkTfRNNLSTMTraining(self): test_configs = self._GetTestConfig() for config_name, config in test_configs.items(): num_layers = config["num_layers"] num_units = config["num_units"] batch_size = config["batch_size"] seq_length = config["seq_length"] with ops.Graph().as_default(), ops.device("/device:GPU:0"): inputs = seq_length * [ array_ops.zeros([batch_size, num_units], dtypes.float32) ] initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127) cell = rnn_cell.LSTMCell( num_units=num_units, initializer=initializer, state_is_tuple=True) multi_cell = rnn_cell.MultiRNNCell( [cell() for _ in range(num_layers)]) outputs, final_state = core_rnn.static_rnn( multi_cell, inputs, dtype=dtypes.float32)
tensorflow.python.framework.ops.Graph
10,558
import tensorflow as tf if use_pool: l = tf.nn.max_pool(l, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
tensorflow.nn.max_pool
10,559
import tensorflow as tf "url.") tf.flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string( "gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
tensorflow.flags.DEFINE_string
10,560
import tensorflow as tf name='%s_omega' % self.layer_name, dtype=self.dtype, initializer=initialization.xavier_initializer( shape=bias_shape, dtype=self.dtype, uniform=self.normal_initializer, mask=None))) else: setattr( self, 'kappa_%s' % layer, tf.constant(1.)) setattr( self, 'omega_%s' % layer, tf.constant(1.)) if self.adapation: setattr( self, 'eta_%s' % layer, tf.get_variable( name='%s_eta' % self.layer_name,
tensorflow.constant
10,561
import tensorflow as tf idx_z0_y0_x1 = base_z0_y0 + x1_clip idx_z0_y1_x0 = base_z0_y1 + x0_clip idx_z0_y1_x1 = base_z0_y1 + x1_clip idx_z1_y0_x0 = base_z1_y0 + x0_clip idx_z1_y0_x1 = base_z1_y0 + x1_clip idx_z1_y1_x0 = base_z1_y1 + x0_clip idx_z1_y1_x1 = base_z1_y1 + x1_clip # Use indices to lookup pixels in the flat image and restore # channels dim im_flat = tf.reshape(im, tf.stack([-1, channels])) im_flat = tf.to_float(im_flat) i_z0_y0_x0 = tf.gather(im_flat, idx_z0_y0_x0) i_z0_y0_x1 = tf.gather(im_flat, idx_z0_y0_x1) i_z0_y1_x0 = tf.gather(im_flat, idx_z0_y1_x0) i_z0_y1_x1 = tf.gather(im_flat, idx_z0_y1_x1) i_z1_y0_x0 = tf.gather(im_flat, idx_z1_y0_x0) i_z1_y0_x1 = tf.gather(im_flat, idx_z1_y0_x1) i_z1_y1_x0 = tf.gather(im_flat, idx_z1_y1_x0) i_z1_y1_x1 = tf.gather(im_flat, idx_z1_y1_x1) # Finally calculate interpolated values. x0_f = tf.to_float(x0) x1_f = tf.to_float(x1) y0_f = tf.to_float(y0) y1_f = tf.to_float(y1) z0_f = tf.to_float(z0) z1_f = tf.to_float(z1) # Check the out-of-boundary case. x0_valid = tf.to_float(
tensorflow.gather
10,562
import tensorflow as tf mse_loss_list.append(0.5 * tf.losses.mean_squared_error(targets_list[pred_ind], pred_outputs[pred_ind], weights=1.0 / tf.cast(cur_batch_size, tf.float32), scope='loss_{}'.format(pred_ind), loss_collection=None,#tf.GraphKeys.LOSSES, # mean all elements of all pixels in all batch reduction=tf.losses.Reduction.MEAN))# SUM, SUM_OVER_BATCH_SIZE, default mean by all elements temp_loss = tf.reduce_mean(tf.reshape(tf.losses.mean_squared_error(targets_list[-1], pred_outputs[-1], weights=1.0, loss_collection=None, reduction=tf.losses.Reduction.NONE), [cur_batch_size, config.class_num_joints[(params['model_scope'] if 'all' not in params['model_scope'] else '*')], -1]), axis=-1) num_topk = config.class_num_joints[(params['model_scope'] if 'all' not in params['model_scope'] else '*')] // 2 gather_col = tf.nn.top_k(temp_loss, k=num_topk, sorted=True)[1] gather_row = tf.reshape(tf.tile(tf.reshape(tf.range(cur_batch_size), [-1, 1]), [1, num_topk]), [-1, 1]) gather_indcies = tf.stop_gradient(tf.stack([gather_row, tf.reshape(gather_col, [-1, 1])], axis=-1)) select_targets = tf.gather_nd(targets_list[-1], gather_indcies) select_heatmap = tf.gather_nd(pred_outputs[-1], gather_indcies) mse_loss_list.append(tf.losses.mean_squared_error(select_targets, select_heatmap, weights=1.0 / tf.cast(cur_batch_size, tf.float32), scope='loss_{}'.format(len(pred_outputs) - 1), loss_collection=None,#tf.GraphKeys.LOSSES, # mean all elements of all pixels in all batch reduction=tf.losses.Reduction.MEAN)) else: for pred_ind in list(range(len(pred_outputs))): mse_loss_list.append(tf.losses.mean_squared_error(targets_list[pred_ind], pred_outputs[pred_ind], weights=1.0 / tf.cast(cur_batch_size, tf.float32), scope='loss_{}'.format(pred_ind), loss_collection=None,#tf.GraphKeys.LOSSES, # mean all elements of all pixels in all batch
tensorflow.gather_nd
10,563
import tensorflow as tf grads_pruned += [(grad[0] * self.masks[idx_mask], grad[1])] return grads_pruned def __save_model(self): """Save the current model.""" save_path = self.saver_train.save(self.sess_train, FLAGS.ws_save_path, self.global_step) tf.logging.info('model saved to ' + save_path) def __restore_model(self, is_train): """Restore a model from the latest checkpoint files. Args: * is_train: whether to restore a model for training """
tensorflow.logging.info
10,564
import tensorflow as tf features={ 'label':tf.FixedLenFeature([], tf.int64), 'img_raw' : tf.FixedLenFeature([], tf.string), }) image=tf.decode_raw(features['img_raw'],tf.uint8) label=tf.cast(features['label'],tf.int32) image=tf.reshape(image,[4096,1]) return image,label def get_batch(image,label,batch_size,crop_size): #print(image.shape) #print(label.shape) images,labels=tf.train.shuffle_batch([image,label], batch_size=batch_size,num_threads=10,capacity=10000,min_after_dequeue=200) return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size]) def get_test_batch(image,label,batch_size): images,labels=tf.train.batch([image,label],batch_size=batch_size) return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size]) def get_valid_batch(image,label,batch_size): images,labels=tf.train.batch([image,label],batch_size=batch_size) return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size])
tensorflow.train.shuffle_batch
10,565
import tensorflow as tf tf.abs(self.W_rec) * self.rec_Connectivity, self.Dale_rec, name="in_1"), transpose_b=True, name="1") + tf.matmul( rnn_in, tf.abs(self.W_in) * self.input_Connectivity, transpose_b=True, name="2") + self.b_rec) \ + np.sqrt(2.0 * self.alpha * self.rec_noise * self.rec_noise)\ * tf.random_normal(state.get_shape(), mean=0.0, stddev=1.0)
tensorflow.abs
10,566
import tensorflow as tf self.data_format = data_format if( data_format =='NCHW' ): self.strides = [1, 1, d_h, d_w] else: self.strides = [1, d_h, d_w, 1] def __call__(self,input_var,name=None,**xargs): shapes = tf.shape(input_var) if( self.data_format == 'NCHW' ): shapes = tf.stack([shapes[0],tf.shape(self.b)[0],shapes[2]*self.strides[2],shapes[3]*self.strides[3]]) else: shapes = tf.stack([shapes[0],shapes[1]*self.strides[1],shapes[2]*self.strides[2],tf.shape(self.b)[0]]) return tf.nn.bias_add( tf.nn.conv2d_transpose(input_var,self.w,output_shape=shapes, data_format=self.data_format, strides=self.strides,padding='SAME'), self.b,data_format=self.data_format,name=name) def get_variables(self): return {'w':self.w,'b':self.b} class WeightNormTransposedConv2d(object): def __init__(self,name,input_dim,out_dim,
tensorflow.shape
10,567
import tensorflow as tf if mode == 'eval': for checkpoint in _get_next_checkpoint(): tf.logging.info('Starting to evaluate.') try:
tensorflow.logging.info
10,568
import tensorflow as tf class MaxToKeepTest(tf.test.TestCase): def testNonSharded(self): save_dir = os.path.join(self.get_temp_dir(), "max_to_keep_non_sharded") try: gfile.DeleteRecursively(save_dir) except OSError: pass # Ignore gfile.MakeDirs(save_dir) with self.test_session() as sess: v = tf.Variable(10.0, name="v") save = tf.train.Saver({"v": v}, max_to_keep=2) tf.initialize_all_variables().run() self.assertEqual([], save.last_checkpoints) s1 = save.save(sess, os.path.join(save_dir, "s1")) self.assertEqual([s1], save.last_checkpoints) self.assertTrue(gfile.Exists(s1)) s2 = save.save(sess, os.path.join(save_dir, "s2")) self.assertEqual([s1, s2], save.last_checkpoints) self.assertTrue(gfile.Exists(s1))
tensorflow.Variable
10,569
import tensorflow as tf final_loss = tf.reduce_sum(loss) return final_loss, cstr_pct def contra_traj_lossV8(pred, tgt, horizon=12): horizon_pred, horizon_tgt = horizon_sumV1(pred, horizon), horizon_sumV1(tgt, horizon) # horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon) horizon_pred1, horizon_pred2 = tf.split(horizon_pred, 2, axis=0) horizon_tgt1, horizon_tgt2 = tf.split(horizon_tgt, 2, axis=0) pred_flat1, pred_flat2 = tf.reshape(horizon_pred1, [-1, 1]), tf.reshape(horizon_pred2, [1, -1]) tgt_flat1, tgt_flat2 = tf.reshape(horizon_tgt1, [-1, 1]), tf.reshape(horizon_tgt2, [1, -1]) tgt_dif = tgt_flat1 - tgt_flat2 pred_dif = pred_flat1 - pred_flat2 geq = tf.cast(tgt_dif > 0, tf.bool) tgt_posi_dif = tf.where(geq, tgt_dif, -tgt_dif) pred_posi_dif = tf.where(geq, pred_dif, -pred_dif) loss = tf.maximum(0., tgt_posi_dif - pred_posi_dif)
tensorflow.reshape
10,570
import tensorflow as tf _debug(self.conv5) if self.test_classification: with tf.variable_scope('logits'): print('Building unit: logits') self.score = tf.reduce_mean(self.conv5, axis=[1, 2])
tensorflow.variable_scope
10,571
from tensorflow.python.ops import clip_ops global_step: Optional global_step. If provided, `decay = decay*n/(n+1)`. This provides a quicker adaptation of the mean for the first steps. report_summary: If `True`, will add histogram summaries of the `max_norm`. epsilon: Small value chosen to avoid zero variance. name: The name for this operation is used to scope operations and summaries. Returns: A function for applying gradient clipping. """ def gradient_clipping(grads_and_vars): """Internal function for adaptive clipping.""" grads, variables = zip(*grads_and_vars) norm = clip_ops.global_norm(grads) max_norm, log_mean = _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name) # reports the max gradient norm for debugging if report_summary: summary.scalar("global_norm/adaptive_max_gradient_norm", max_norm) # factor will be 1. if norm is smaller than max_norm factor = array_ops.where(norm < max_norm, array_ops.ones_like(norm), math_ops.exp(log_mean) / norm) if static_max_norm is not None:
tensorflow.python.ops.clip_ops.global_norm
10,572
import tensorflow as tf with tf.variable_scope(scope): d_memory = dropout(memory, keep_prob=keep_prob, is_train=is_train) s0 = tf.nn.tanh(dense(d_memory, hidden, scope="s0")) s = dense(s0, 1, use_bias=False, scope="s") s1 = softmax_mask(tf.squeeze(s, [2]), mask) a = tf.expand_dims(tf.nn.softmax(s1), axis=2) res = tf.reduce_sum(a * memory, axis=1) return res
tensorflow.squeeze
10,573
import tensorflow as tf def build_cnet(self, state_in, name, reuse=False, batch_size=64): reg = tf.contrib.layers.l2_regularizer(1e-3) with tf.variable_scope(name, reuse=reuse): layer_c1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg)
tensorflow.variable_scope
10,574
import tensorflow as tf self.config = config self._activation_fn = tf.nn.relu self._embedding_initializers = { 'embeddings': tf.truncated_normal_initializer(stddev=0.01), } self._embedding_regularizers = {} self._initializers = { "w": tf.contrib.layers.xavier_initializer(), } self._regularizers = { 'w': tf.contrib.layers.l2_regularizer(config.l2) } self._construct_placeholders() self._construct_weights() self._construct() tf.summary.scalar('Model/Loss', tf.get_collection(GraphKeys.LOSSES)[0]) self.summary = tf.summary.merge_all() def _construct(self): """ Construct the model; main part of it goes here """
tensorflow.contrib.layers.l2_regularizer
10,575
import tensorflow as tf x1 = x0 + 1 y0 = tf.to_int32(tf.floor(y)) y1 = y0 + 1 z0 = tf.to_int32(tf.floor(z)) z1 = z0 + 1 x0_clip = tf.clip_by_value(x0, zero, max_x) x1_clip = tf.clip_by_value(x1, zero, max_x) y0_clip = tf.clip_by_value(y0, zero, max_y) y1_clip = tf.clip_by_value(y1, zero, max_y) z0_clip = tf.clip_by_value(z0, zero, max_z) z1_clip = tf.clip_by_value(z1, zero, max_z) dim3 = width dim2 = width * height dim1 = width * height * depth base = _repeat( tf.range(num_batch) * dim1, out_depth * out_height * out_width) base_z0_y0 = base + z0_clip * dim2 + y0_clip * dim3 base_z0_y1 = base + z0_clip * dim2 + y1_clip * dim3 base_z1_y0 = base + z1_clip * dim2 + y0_clip * dim3
tensorflow.clip_by_value
10,576
from tensorflow.python.framework import ops def _calc_bias_add_flops(graph, node): """Calculates the computing needed for BiasAdd.""" input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0]) input_shape.assert_is_fully_defined() input_count = np.prod(input_shape.as_list()) return ops.OpStats("flops", input_count) @ops.RegisterStatistics("BiasAdd", "weight_parameters") def _calc_bias_add_weight_params(graph, node):
tensorflow.python.framework.ops.OpStats
10,577
import tensorflow as tf output = tf.matmul(scores, facts) # [B, 1, H] # output = tf.reshape(output, [-1, tf.shape(facts)[-1]]) else: scores = tf.reshape(scores, [-1, tf.shape(facts)[1]]) output = facts * tf.expand_dims(scores, -1) output = tf.reshape(output, tf.shape(facts)) return output
tensorflow.expand_dims
10,578
import tensorflow as tf input_use_counts = [0] * len(cell_inputs + blocks) for (idx1, _, idx2, _) in cell_arch: input_use_counts[idx1] += 1 input_use_counts[idx2] += 1 # Concat only unused blocks with tf.variable_scope('combine'): block_use_counts = input_use_counts[len(cell_inputs):] out_blocks = [block for (block, use_count) in zip(blocks, block_use_counts) if use_count == 0] comb_ch = len(out_blocks) * block_ch X = tf.concat(out_blocks, axis=3)
tensorflow.variable_scope
10,579
import tensorflow as tf class MetaGraphTest(tf.test.TestCase): def _TestDir(self, test_name): test_dir = os.path.join(self.get_temp_dir(), test_name) if os.path.exists(test_dir): shutil.rmtree(test_dir) gfile.MakeDirs(test_dir) return test_dir def testAddCollectionDef(self): test_dir = self._TestDir("good_collection") filename = os.path.join(test_dir, "metafile") with self.test_session(): # Creates a graph. v0 = tf.Variable(10.0, name="v0") var = tf.Variable(tf.constant(0, dtype=tf.int64)) count_up_to = var.count_up_to(3) input_queue = tf.FIFOQueue(30, tf.float32, shared_name="collection_queue") qr = tf.train.QueueRunner(input_queue, [count_up_to]) tf.initialize_all_variables() # Creates a saver. save = tf.train.Saver({"v0": v0}) # Adds a set of collections. tf.add_to_collection("int_collection", 3) tf.add_to_collection("float_collection", 3.5) tf.add_to_collection("string_collection", "hello") tf.add_to_collection("variable_collection", v0) # Add QueueRunners. tf.train.add_queue_runner(qr) # Adds user_defined proto in three formats: string, bytes and Any.
tensorflow.constant
10,580
import tensorflow as tf update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps") eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0)) q_values = q_func(observations_ph.get(), num_actions, scope="q_func") deterministic_actions = tf.argmax(q_values, axis=1) batch_size = tf.shape(observations_ph.get())[0] random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64) chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions) output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions) update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps)) _act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph], outputs=output_actions,
tensorflow.stack
10,581
import tensorflow as tf y0_f = tf.to_float(y0) y1_f = tf.to_float(y1) z0_f = tf.to_float(z0) z1_f = tf.to_float(z1) # Check the out-of-boundary case. x0_valid = tf.to_float( tf.less_equal(x0, max_x) & tf.greater_equal(x0, 0)) x1_valid = tf.to_float( tf.less_equal(x1, max_x) & tf.greater_equal(x1, 0)) y0_valid = tf.to_float( tf.less_equal(y0, max_y) & tf.greater_equal(y0, 0)) y1_valid = tf.to_float( tf.less_equal(y1, max_y) & tf.greater_equal(y1, 0)) z0_valid = tf.to_float( tf.less_equal(z0, max_z) & tf.greater_equal(z0, 0)) z1_valid = tf.to_float( tf.less_equal(z1, max_z) & tf.greater_equal(z1, 0)) w_z0_y0_x0 = tf.expand_dims(((x1_f - x) * (y1_f - y) * (z1_f - z) * x1_valid * y1_valid * z1_valid), 1) w_z0_y0_x1 = tf.expand_dims(((x - x0_f) * (y1_f - y) * (z1_f - z) * x0_valid * y1_valid * z1_valid), 1)
tensorflow.less_equal
10,582
import tensorflow as tf # Individual components of the loss that will need summaries. clone_loss = None regularization_loss = None # Compute and aggregate losses on the clone device. with tf.device(clone.device): all_losses = [] clone_losses = tf.get_collection(tf.GraphKeys.LOSSES, clone.scope) if clone_losses:
tensorflow.device
10,583
import tensorflow as tf # +np.diag(np.ones(self.N_rec)*(1-self.alpha))))) # add diagnal matrix 1-alpha to account for persistance tau return (1.1/rho) * W # - .9*np.diag(np.ones(self.N_rec)*(1-self.alpha)) #correct for tau # vanishing gradient regularization, Omega, as in Pascanu # NOTE: this is RELU specific def dOmega_dWrec(self): # states in shape timesteps, batch, n_rec states = self.states dxt_list = tf.gradients(self.error, states) #dxt_list[0] = tf.Print(dxt_list[0], [dxt_list[0]], "dxt 0: ") test = tf.gradients(states[0], states[-1]) dxt = tf.stack(dxt_list) xt = tf.stack(states) num = (1 - self.alpha) * dxt + tf.tensordot(self.alpha * dxt ,
tensorflow.gradients
10,584
import tensorflow as tf np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/tSNE/tSNEisadv_' + f1, is_adv_all) return None def tSNE_visual_carliniLi(hps, num_batch): # Construct graph images, labels = input_name.build_input( FLAGS.dataset, FLAGS.eval_data_path, hps.batch_size, FLAGS.mode) # FLAGS.mode='attack', batch_size=200 Res = model_name.ResNet(hps, images, FLAGS.mode, Reuse=False) Res.build_graph() saver = tf.train.Saver() # Open session and restore checkpoint sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) tf.train.start_queue_runners(sess) sess.run(tf.global_variables_initializer()) ckpt_state = tf.train.get_checkpoint_state(FLAGS.log_root) # Choose dir according to rt tf.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path) saver.restore(sess, ckpt_state.model_checkpoint_path) model_carlini = models_carlini(hps) if FLAGS.attack_method == 'carliniLi': attack_carlini = attacks.carliniLi.CarliniLi(sess, model_carlini, largest_const=10 ** -3) elif FLAGS.attack_method == 'carliniL2': attack_carlini = attacks.carliniL2.CarliniL2(sess, model_carlini, batch_size=10, max_iterations=1000, confidence=0,binary_search_steps=3) adv_image = tf.placeholder(tf.float32, shape=[hps.batch_size, image_size, image_size, num_channel]) _, logits_nor = model_carlini.predict(images, tsne_logits=True) _, logits_adv = model_carlini.predict(adv_image, tsne_logits=True) dim_logits = logits_nor.shape[1]
tensorflow.train.get_checkpoint_state
10,585
import tensorflow as tf Builds the computation graph with Tensorflow """ start_t = time.time() self._setup_placeholders() self._embed() self._encode() self._fuse() self._decode() self._compute_loss() self._create_train_op() self.logger.info('Time to build graph: {} s'.format(time.time() - start_t)) param_num = total_params(tf.trainable_variables()) self.logger.info('There are {} parameters in the model'.format(param_num)) """ :description: Placeholders """ def _setup_placeholders(self): if self.demo: self.c = tf.placeholder(tf.int32, [None, self.config.max_p_len], "context")
tensorflow.trainable_variables
10,586
import tensorflow as tf with tf.contrib.summary.create_file_writer( logdir=model_dir, filename_suffix=".host_call").as_default(): with tf.contrib.summary.always_record_summaries(): for i, name in enumerate(metric_names): if reduce_fn is None: scalar = args[i][0] else: scalar = reduce_fn(args[i]) with tf.contrib.summary.record_summaries_every_n_global_steps( 100, global_step=step): tf.contrib.summary.scalar(prefix + name, scalar, step=step) return tf.contrib.summary.all_summary_ops() global_step_tensor = tf.reshape(tf.train.get_or_create_global_step(), [1]) other_tensors = [tf.reshape(monitor_dict[key], [1]) for key in metric_names] return host_call_fn, [global_step_tensor] + other_tensors def two_stream_loss(FLAGS, features, labels, mems, is_training): """Pretraining loss with two-stream attention Transformer-XL.""" #### Unpack input mem_name = "mems" mems = mems.get(mem_name, None) inp_k = tf.transpose(features["input_k"], [1, 0]) inp_q = tf.transpose(features["input_q"], [1, 0])
tensorflow.reshape
10,587
import tensorflow as tf Random Network Distillation """ def random_net_distill(x_ph, a_ph, hidden_sizes=(400,300), activation=tf.nn.relu, output_activation=tf.tanh, action_space=None, dropout_rate=0.1): act_dim = a_ph.shape.as_list()[-1] act_limit = action_space.high[0] with tf.variable_scope('rnd_targ_act'): rnd_targ_act = act_limit * mlp(x_ph, list(hidden_sizes) + [act_dim], activation, output_activation) with tf.variable_scope('rnd_pred_act'): # rnd_pred_act = act_limit * mlp(x_ph, list(hidden_sizes) + [act_dim], activation, output_activation) rnd_pred_act_in_dim = x_ph.shape.as_list()[1] rnd_pred_act_dropout_mask_generator = DropoutMaskGenerator(rnd_pred_act_in_dim,
tensorflow.variable_scope
10,588
import tensorflow as tf "lookup_table", dtype=tf.float32, shape=[len(vocab), size_layers], initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.01), ) lookup_table = tf.concat((tf.zeros(shape=[1, size_layers]), lookup_table[1:, :]), 0) forward = tf.nn.embedding_lookup(lookup_table, self.X) self.Y = tf.placeholder(tf.float32, (None, None, n_mels * resampled)) self.decoder_inputs = tf.concat((tf.zeros_like(self.Y[:, :1, :]), self.Y[:, :-1, :]), 1) self.decoder_inputs = self.decoder_inputs[:, :, -n_mels:] self.Z = tf.placeholder(tf.float32, (None, None, fourier_window_size // 2 + 1)) batch_size = tf.shape(self.X)[0] seq_lens = tf.count_nonzero(tf.reduce_sum(self.decoder_inputs, -1), 1, dtype=tf.int32) + 1 def cells(reuse=False): return tf.contrib.rnn.DropoutWrapper( tf.nn.rnn_cell.LSTMCell( size_layers, initializer=tf.orthogonal_initializer(), reuse=reuse ),
tensorflow.placeholder
10,589
import tensorflow as tf hparams = imagetransformer_latent_tiny() hparams.mode = tf.estimator.ModeKeys.TRAIN block_dim = int(hparams.hidden_size // hparams.num_blocks) block_v_size = 2**(hparams.bottleneck_bits / (hparams.num_residuals * hparams.num_blocks)) block_v_size = int(block_v_size) means = tf.get_variable( name="means", shape=[hparams.num_residuals, hparams.num_blocks, block_v_size, block_dim], initializer=tf.uniform_unit_scaling_initializer()) hparams.bottleneck = functools.partial( discretization.discrete_bottleneck, hidden_size=hparams.hidden_size, z_size=hparams.bottleneck_bits, filter_size=hparams.filter_size, startup_steps=hparams.startup_steps, bottleneck_kind=hparams.bottleneck_kind, num_blocks=hparams.num_blocks, num_residuals=hparams.num_residuals, reshape_method=hparams.reshape_method, beta=hparams.vq_beta,
tensorflow.uniform_unit_scaling_initializer
10,590
from tensorflow.python.ops import control_flow_ops def _create_slots(self, var_list): # Create slots for the first and second moments. for v in var_list: self._zeros_slot(v, "g", self._name) def _apply_dense(self, grad, var): lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype) g_t = grad g_t_1 = self.get_slot(var, "g") g_t = g_t_1.assign(g_t) var_update = state_ops.assign_sub(var, 2. * lr_t * g_t - lr_t * g_t_1) # Adam would be lr_t * g_t return control_flow_ops.group(*[var_update, g_t]) def _apply_sparse(self, grad, var): raise NotImplementedError("Sparse gradient updates are not supported.") class OptimisticAdamOptimizer(optimizer.Optimizer): def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8, use_locking=False, name="Adamirror"): super(OptimisticAdamOptimizer, self).__init__(use_locking, name) self._lr = learning_rate self._beta1 = beta1
tensorflow.python.ops.control_flow_ops.group
10,591
import tensorflow as tf m = model # Do initialization of all variables sess.run(tf.global_variables_initializer()) # Load datasets with defaults sess.run([m.train_dataset_init_op, m.pred_dataset_init_op], feed_dict={ m.ph.train_images: np.zeros((1, w, h, in_ch)), m.ph.train_classes: np.zeros((1,)), m.ph.pred_images: np.zeros((1, w, h, in_ch)), m.ph.pred_classes: np.zeros((1,)) }) def _make_session(self): config = tf.ConfigProto(allow_soft_placement=True) config.gpu_options.allow_growth = True sess = tf.Session(config=config) return sess def _feed_dataset_to_model(self, images, classes=None, is_train=False): m = self._model utils.logger.log('Feeding dataset to model...') # Mock classes if required classes = classes or [0 for _ in range(len(images))] if is_train: self._sess.run(m.train_dataset_init_op, feed_dict={
tensorflow.ConfigProto
10,592
import tensorflow as tf self.w4=tf.get_variable('w4', [512,classnum],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.b1 = tf.get_variable('b1', [2048],initializer=tf.constant_initializer(0.0)) self.b2 = tf.get_variable('b2', [3072],initializer=tf.constant_initializer(0.0)) self.b3 = tf.get_variable('b3', [512],initializer=tf.constant_initializer(0.0)) self.b4 = tf.get_variable('b4', [classnum],initializer=tf.constant_initializer(0.0)) def inference(self,images): images=tf.cast(images,tf.float32)/255.0 l1 = tf.matmul(images, self.w1)+self.b1 l1=tf.nn.relu(l1) l2 = tf.matmul(l1, self.w2)+self.b2 l2=tf.nn.relu(l2) l3=tf.matmul(l2, self.w3)+self.b3 l3=tf.nn.relu(l3)
tensorflow.cast
10,593
import tensorflow as tf optimizer_options = tf.OptimizerOptions(opt_level=tf.OptimizerOptions.L1, do_function_inlining=True) graph_options = tf.GraphOptions(optimizer_options=optimizer_options) config = tf.ConfigProto(allow_soft_placement=True, graph_options=graph_options) if params.device_list:
tensorflow.ConfigProto
10,594
import tensorflow as tf import tensorflow as tf from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import array_ops import numpy as np def safe_get(name, *args, **kwargs): """ Same as tf.get_variable, except flips on reuse_variables automatically """ try: return tf.get_variable(name, *args, **kwargs) except ValueError: tf.get_variable_scope().reuse_variables() return tf.get_variable(name, *args, **kwargs) def init_weights(shape, name=None): shape = tuple(shape) weights = np.random.normal(scale=0.01, size=shape).astype('f') return safe_get(name, list(shape), initializer=tf.constant_initializer(weights), dtype=tf.float32) def init_bias(shape, name=None): return safe_get(name, initializer=tf.zeros(shape, dtype=tf.float32))
tensorflow.get_variable
10,595
import tensorflow as tf def general_conv2d(self, input_data, filters = 64, kernel_size = 7, stride = 1, stddev = 0.02, activation_function = "relu", padding = "VALID", do_norm=True, relu_factor = 0, name="conv2d"): with tf.variable_scope(name): conv = tf.layers.conv2d(input_data, filters, kernel_size, stride, padding, activation=None) if do_norm: conv = tf.layers.batch_normalization(conv, momentum=0.9) if activation_function == "relu": conv = tf.nn.relu(conv, name = 'relu') if activation_function == "leakyrelu":
tensorflow.layers.batch_normalization
10,596
import tensorflow as tf # Build a new graph with different initialization. v0 = tf.Variable(-1.0)
tensorflow.Variable
10,597
import tensorflow as tf else: # 创建worker两个网络的具体步骤 with tf.variable_scope(scope): # 这里的scope传入的是worker的名字 self.global_step = globalAC.global_step self.obs_space = N_S self.act_space = N_A self.k = 16 self.g_dim = 256 self.c = 10 self.vf_hidden_size = 128 # for value function network self.alpha = 0.5 # for build loss self.batch_processor = FeudalBatchProcessor(self.c) self.build_model() # build feudal policy model with tf.name_scope('local_grad'): grads = tf.gradients(self.loss, self.var_list) grads, _ = tf.clip_by_global_norm(grads, 40) with tf.name_scope('sync'): # worker和global的同步过程 with tf.name_scope('pull'): # 获取global参数,复制到local—net self.pull_params_op = tf.group(*[v1.assign(v2) for v1, v2 in zip(self.var_list, globalAC.var_list)]) with tf.name_scope('push'): # 将参数传送到gloabl中去 self.update_params_op = OPT.apply_gradients(zip(grads, globalAC.var_list)) # 其中传送的是local—net的actor和critic的参数梯度grads,具体计算在上面定义 # apply_gradients是tf.train.Optimizer中自带的功能函数,将求得的梯度参数更新到global中 self.inc_step = self.global_step.assign_add(tf.shape(self.obs)[0]) self.train_op = tf.group(self.update_params_op, self.inc_step) # GLOBALE_STEP += tf.shape(self.obs)[0]
tensorflow.gradients
10,598
import tensorflow as tf config = tf.ConfigProto(log_device_placement=False) if self.on_gpu: config.gpu_options.allow_growth = True self.sess = tf.Session(config=config) self.sess.run(init) checkpoint_file = self.model_dir
tensorflow.Session
10,599