seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf def testRNNDecoder(self): with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)): inp = [tf.constant(0.5, shape=[2, 2])] * 2 _, enc_state = tf.nn.rnn( tf.nn.rnn_cell.GRUCell(2), inp, dtype=tf.float32) dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3 cell = tf.nn.rnn_cell.OutputProjectionWrapper( tf.nn.rnn_cell.GRUCell(2), 4) dec, mem = tf.nn.seq2seq.rnn_decoder(dec_inp, enc_state, cell) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 4), res[0].shape) res = sess.run([mem])
tensorflow.nn.rnn_cell.GRUCell
12,600
import tensorflow as tf Returns: counts: The histogram, as counts per bin. boundaries: A `Tensor` used to build the histogram representing boundaries. """ with tf.compat.v1.name_scope(name, 'histogram'): x = tf.reshape(tf_utils.get_values(x), [-1]) if categorical: x_dtype = x.dtype x = x if x_dtype == tf.string else tf.strings.as_string(x) elements, counts = count_per_key(x) if x_dtype != elements.dtype: elements = tf.strings.to_number(elements, tf.int64) return counts, elements if boundaries is None: boundaries = tf.range(11, dtype=tf.float32) / 10.0 elif isinstance(boundaries, int) or (isinstance(boundaries, tf.Tensor) and boundaries.get_shape().ndims == 0): min_value, max_value = _min_and_max(x, True) boundaries = tf.linspace( tf.cast(min_value, tf.float32), tf.cast(max_value, tf.float32), tf.cast(boundaries, tf.int64))
tensorflow.strings.to_number
12,601
import tensorflow as tf output_dtype: (Optional) If not None, casts the output tensor to this type. name: (Optional) A name for this operation. Returns: The tuple (hl, hr) containing two `Tensor` instances with the hl and hr parameters. If `x` is floating point, each parameter will have the same type as `x`. If `x` is integral, the output is cast to float32. Raises: TypeError: If the type of `x` is not supported. """ with tf.compat.v1.name_scope(name, 'tukey_h_params'): return _tukey_parameters(x, reduce_instance_dims, output_dtype)[2:] def _tukey_parameters( x: common_types.TensorType, reduce_instance_dims: bool = True, output_dtype: Optional[tf.DType] = None ) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]: """Efficient computation of L-moments.""" if output_dtype is None:
tensorflow.compat.v1.name_scope
12,602
import tensorflow as tf Preprocessing for downsampled_imagenet 32x32 and 64x64 generation from http://arxiv.org/abs/1601.06759 (page 8). """ del training def flatten_image(features): img = features['image'] flat = tf.cast(tf.reshape(img, [-1]), tf.int64) new_features = {'image': flat} return new_features return dataset.map(flatten_image)
tensorflow.reshape
12,603
import tensorflow as tf if self.has_inputs: data_fields["inputs"] = tf.VarLenFeature(tf.int64) if self.packed_length: if self.has_inputs: data_fields["inputs_segmentation"] = tf.VarLenFeature(tf.int64) data_fields["inputs_position"] = tf.VarLenFeature(tf.int64) data_fields["targets_segmentation"] = tf.VarLenFeature(tf.int64) data_fields["targets_position"] = tf.VarLenFeature(tf.int64) data_items_to_decoders = None return (data_fields, data_items_to_decoders) def eval_metrics(self):
tensorflow.VarLenFeature
12,604
import tensorflow as tf def fc(x, scope, nh, *, init_scale=1.0, init_bias=0.0): with tf.variable_scope(scope): nin = x.get_shape()[1].value w = tf.get_variable("w", [nin, nh], initializer=ortho_init(init_scale)) b = tf.get_variable("b", [nh], initializer=tf.constant_initializer(init_bias)) return tf.matmul(x, w)+b def batch_to_seq(h, nbatch, nsteps, flat=False): if flat: h = tf.reshape(h, [nbatch, nsteps]) else:
tensorflow.matmul
12,605
import tensorflow as tf if 'batch_normalization' not in v.name]) total_loss = tf.identity(loss, name='total_loss') if mode == tf.estimator.ModeKeys.TRAIN: global_step = tf.train.get_or_create_global_step() lr_values = [params['learning_rate'] * decay for decay in params['lr_decay_factors']] learning_rate = tf.train.piecewise_constant(tf.cast(global_step, tf.int32), [int(_) for _ in params['decay_boundaries']], lr_values) truncated_learning_rate = tf.maximum(learning_rate, tf.constant(params['end_learning_rate'], dtype=learning_rate.dtype)) # Create a tensor named learning_rate for logging purposes. tf.identity(truncated_learning_rate, name='learning_rate') tf.summary.scalar('learning_rate', truncated_learning_rate)
tensorflow.cast
12,606
import tensorflow as tf dtype = initial_learning_rate.dtype maximal_learning_rate = tf.cast(self.maximal_learning_rate, dtype)
tensorflow.cast
12,607
from tensorflow.python.platform import gfile except OSError: pass # Ignore gfile.MakeDirs(save_dir)
tensorflow.python.platform.gfile.MakeDirs
12,608
import tensorflow as tf stitch3_1, stitch3_2 = fc3_1, fc3_2 dropout3_1 = contrib.layers.dropout(stitch3_1, keep_prob=keep_prob, is_training=is_training, scope="dropout3_1") dropout3_2 = contrib.layers.dropout(stitch3_2, keep_prob=keep_prob, is_training=is_training, scope="dropout3_2") output_1 = contrib.layers.fully_connected(dropout3_1, n_output_1, activation_fn=None, scope="output_1") output_2 = contrib.layers.fully_connected(dropout3_2, n_output_2, activation_fn=None, scope="output_2") with tf.variable_scope("loss"): loss_base_1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_1, logits=output_1)) loss_base_2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_2, logits=output_2)) reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) loss_total = loss_base_1 + loss_base_2 + tf.reduce_sum(reg_losses) with tf.variable_scope("evaluation"): accuracy_1 = tf.reduce_mean(tf.cast(tf.equal( tf.argmax(output_1, axis=-1), tf.argmax(y_1, axis=-1)), tf.float32), name="accuracy_1") accuracy_2 = tf.reduce_mean(tf.cast(tf.equal( tf.argmax(output_2, axis=-1),
tensorflow.nn.softmax_cross_entropy_with_logits
12,609
import tensorflow as tf drop_remainder=predict_drop_remainder) result = estimator.predict(input_fn=predict_input_fn) output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv") with tf.gfile.GFile(output_predict_file, "w") as writer: num_written_lines = 0 tf.logging.info("***** Predict results *****") for (i, prediction) in enumerate(result): probabilities = prediction["probabilities"]
tensorflow.gfile.GFile
12,610
import tensorflow as tf self.weight_initializer = tf.contrib.layers.xavier_initializer() self.const_initializer = tf.constant_initializer(0.0) self.emb_initializer = tf.random_uniform_initializer(minval=-1.0, maxval=1.0) # Place holder for features and captions self.features = tf.placeholder(tf.float32, [None, self.L, self.D]) self.captions = tf.placeholder(tf.int32, [None, self.T + 1]) def _get_initial_lstm(self, features): with tf.variable_scope('initial_lstm'): features_mean = tf.reduce_mean(features, 1) w_h = tf.get_variable('w_h', [self.D, self.H], initializer=self.weight_initializer) b_h = tf.get_variable('b_h', [self.H], initializer=self.const_initializer) h = tf.nn.tanh(tf.matmul(features_mean, w_h) + b_h) w_c = tf.get_variable('w_c', [self.D, self.H], initializer=self.weight_initializer) b_c = tf.get_variable('b_c', [self.H], initializer=self.const_initializer) c = tf.nn.tanh(tf.matmul(features_mean, w_c) + b_c) return c, h def _word_embedding(self, inputs, reuse=False): with tf.variable_scope('word_embedding', reuse=reuse): w = tf.get_variable('w', [self.V, self.M], initializer=self.emb_initializer) x = tf.nn.embedding_lookup(w, inputs, name='word_vector') # (N, T, M) or (N, M)
tensorflow.get_variable
12,611
import tensorflow as tf beta = tf.get_variable('beta', initializer=tf.constant(0.0, shape=[n_out])) gamma = tf.get_variable('gamma', initializer=tf.constant(1.0, shape=[n_out])) batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments') ema = tf.train.ExponentialMovingAverage(decay=0.9) def mean_var_with_update(): ema_apply_op = ema.apply([batch_mean, batch_var]) with tf.control_dependencies([ema_apply_op]): return tf.identity(batch_mean), tf.identity(batch_var) mean, var = tf.cond(b_train, mean_var_with_update, lambda: (ema.average(batch_mean), ema.average(batch_var))) normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3) return normed
tensorflow.identity
12,612
import tensorflow as tf v = tf.Variable(np.int64(15), name="v") save = tf.train.Saver({"v": v}, restore_sequentially=True)
tensorflow.train.Saver
12,613
import tensorflow as tf print("Entropy Encode (Hyper)") y_strings, y_min_v, y_max_v = conditional_entropy_model.compress(ys, locs, scales) y_shape = tf.shape(ys)[:] print("Entropy Encode")
tensorflow.shape
12,614
import tensorflow as tf anchor_match_negative_indicator_matrix, margin=margin, use_semi_hard=use_semi_hard, anchor_positive_mining_distances=anchor_positive_mining_distances, anchor_match_mining_distance_matrix=( anchor_match_mining_distance_matrix))) negative_distances = tf.boolean_mask( negative_distances, mask=negative_distances < negative_distances.dtype.max) negative_mining_distances = tf.boolean_mask( negative_mining_distances, mask=negative_distances < negative_distances.dtype.max) active_triplet_ratio = ( tf.cast(num_active_triplets, dtype=tf.float32) / num_total_triplets) active_mining_triplet_ratio = ( tf.cast(num_active_mining_triplets, dtype=tf.float32) / num_total_triplets)
tensorflow.boolean_mask
12,615
import tensorflow as tf return scale * tf.nn.tanh(1. / scale * x) bs, sl, vec = tf.shape(rep_tensor)[0], tf.shape(rep_tensor)[1], tf.shape(rep_tensor)[2] ivec = hn or rep_tensor.get_shape().as_list()[2]
tensorflow.shape
12,616
import tensorflow as tf # Loss Functions self.value_loss = 0.5*tf.reduce_sum(tf.square(self.target_v - tf.reshape(self.value, shape=[-1]))) # H(x) = Sum[p(x)*log(p(x))] self.entropy = - 0.01 * tf.reduce_sum(self.policy * tf.log(tf.clip_by_value(self.policy,1e-10,1.0))) self.policy_loss = - 0.2 * tf.reduce_sum( tf.log(tf.clip_by_value(self.policy[:,0],1e-15,1.0)) * self.advantages + tf.log(tf.clip_by_value(self.policy[:,1],1e-15,1.0)) * self.advantages) #For Normal RL Part self.loss = self.value_loss + self.policy_loss - self.entropy # removed self.blocking_loss, valid_loss, discrete_policy _loss #+ 0.5*self.mypos_loss + 0.5*self.goalpos_loss #For Imitation Learning Part
tensorflow.clip_by_value
12,617
import tensorflow as tf def softmax(target, axis, name=None): max_axis = tf.reduce_max(target, axis, keep_dims=True) target_exp = tf.exp(target - max_axis)
tensorflow.reduce_max
12,618
import tensorflow as tf tf.app.flags.DEFINE_integer('feats_per_layer', 32, 'Number of feature channels at each layer') tf.app.flags.DEFINE_boolean('total_pool', True, 'If true, pool all feature maps to 1x1 size in final layer') tf.app.flags.DEFINE_integer('pool_stride', '1', 'If 2, we get progressive pooling - with overlap pooling, AlexNet style') TRAIN_FILE = 'train_{}.tfrecords'.format(records.tfrecord_name()) VALIDATION_FILE = 'validation_{}.tfrecords'.format(records.tfrecord_name()) TEST_FILE = 'test_{}.tfrecords'.format(records.tfrecord_name()) def NUM_CLASSES(): return 10 if FLAGS.parity == 'none' else 5 def read_and_decode(filename_queue): reader = tf.TFRecordReader() _, serialized_example = reader.read(filename_queue) features = tf.parse_single_example( serialized_example, # Defaults are not specified since both keys are required. features={ 'image_raw': tf.FixedLenFeature([], tf.string), 'label': tf.FixedLenFeature([], tf.int64), }) if FLAGS.contrast_norm == 'areafactor': image = tf.decode_raw(features['image_raw'], tf.float32) else: image = tf.decode_raw(features['image_raw'], tf.uint8) image = tf.cast(image, tf.float32) * (1. / 255)
tensorflow.TFRecordReader
12,619
import tensorflow as tf except Exception: # If initializer is a constant, do not specify shape. b = tf.get_variable(name='b', initializer=b_init, dtype=LayersConfig.tf_dtype, **b_init_args) self.outputs = act(tf.matmul(self.inputs, W) + b) else: self.outputs = act(tf.matmul(self.inputs, W)) self.all_layers.append(self.outputs) if b_init is not None:
tensorflow.matmul
12,620
from tensorflow.python.ops import init_ops test_configs = self._GetTestConfig() for config_name, config in test_configs.items(): num_layers = config["num_layers"] num_units = config["num_units"] batch_size = config["batch_size"] seq_length = config["seq_length"] with ops.Graph().as_default(), ops.device("/device:GPU:0"): inputs = seq_length * [ array_ops.zeros([batch_size, num_units], dtypes.float32) ] initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127) cell = rnn_cell.LSTMCell( num_units=num_units, initializer=initializer, state_is_tuple=True) multi_cell = rnn_cell.MultiRNNCell( [cell() for _ in range(num_layers)]) outputs, final_state = core_rnn.static_rnn( multi_cell, inputs, dtype=dtypes.float32) trainable_variables = ops.get_collection( ops.GraphKeys.TRAINABLE_VARIABLES) gradients = gradients_impl.gradients([outputs, final_state],
tensorflow.python.ops.init_ops.random_uniform_initializer
12,621
import tensorflow as tf inter_block_soft = tf.nn.softmax(inter_block_logits_masked, 2) # bs,bn,bl,vec inter_block_attn_output = tf.reduce_sum(self_attn_result * inter_block_soft, 2) # bs,bn,vec with tf.variable_scope('self_attn_inter_block'): inter_block_attn_output_mask = tf.cast(tf.ones([bs, bn], tf.int32), tf.bool) block_ct_res = directional_attention_with_dense( inter_block_attn_output, inter_block_attn_output_mask, direction, 'disa', keep_prob, is_train, wd, activation ) # [bs,bn,vec] block_ct_res_tile = tf.tile(tf.expand_dims(block_ct_res, 2), [1, 1, bl, 1])#[bs,bn,vec]->[bs,bn,bl,vec] with tf.variable_scope('combination'): # input:1.rep_map[bs,bn,bl,vec]; 2.self_attn_result[bs,bn,bl,vec]; 3.rnn_res_tile[bs,bn,bl,vec] rep_tensor_with_ct = tf.concat([rep_map, self_attn_result, block_ct_res_tile], -1) # [bs,bn,bl,3vec] new_context_and_gate = linear(rep_tensor_with_ct, 2 * ivec, True, 0., 'linear_new_context_and_gate', False, wd, keep_prob, is_train) # [bs,bn,bl,2vec] new_context, gate = tf.split(new_context_and_gate, 2, 3) # bs,bn,bl,vec if activation == "relu": new_context_act = tf.nn.relu(new_context) elif activation == "elu": new_context_act = tf.nn.elu(new_context) elif activation == "linear": new_context_act = tf.identity(new_context)
tensorflow.variable_scope
12,622
import tensorflow as tf tf.logging.info( "Best trial info: Step: %s, Best Value Step: %s, " "Best Value: %s", global_step, best_metric_global_step, metric_value) return global_step, best_metric_global_step, metric_value def _remove_checkpoint(checkpoint_path): for ext in ["meta", "data-00000-of-00001", "index"]: src_ckpt = checkpoint_path + ".{}".format(ext) tf.logging.info("removing {}".format(src_ckpt)) tf.gfile.Remove(src_ckpt) def _find_valid_cands(curr_step): filenames = tf.gfile.ListDirectory(FLAGS.output_dir) candidates = [] for filename in filenames: if filename.endswith(".index"): ckpt_name = filename[:-6] idx = ckpt_name.split("-")[-1] if int(idx) > curr_step: candidates.append(filename) return candidates output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
tensorflow.gfile.ListDirectory
12,623
import tensorflow as tf Returns: A `Tensor` of shape `(num_elements)` containing pseudorandom values. """ def next_num(num): # This creates a cycle of length 136. return tf.mod((num * 13), 137) num = tf.reshape(tf.mod(seed, 136) + 1, (1,)) result = num for _ in range(num_elements - 1): num = next_num(num) result = tf.concat([result, num], 0) return tf.to_float(result) @encoding_stage.tf_style_encoding_stage class PlusRandomNumEncodingStage(encoding_stage.EncodingStageInterface): """[Example] encoding stage, adding random values given a random seed. This is an example implementation of an `EncodingStageInterface` that depends on a shared random seed. The seed `Tensor` should be created in the `get_params` method, and the same values should evantually be passed to both `encode` and `decode` methods, making sure a randomized transform is invertible. """
tensorflow.to_float
12,624
import tensorflow as tf b_t2 = utils.bias_variable([deconv_shape2[3].value], name="b_t2") conv_t2 = utils.conv2d_transpose_strided(fuse_1, W_t2, b_t2, output_shape=tf.shape(image_net["pool3"])) fuse_2 = tf.add(conv_t2, image_net["pool3"], name="fuse_2") shape = tf.shape(image) deconv_shape3 = tf.stack([shape[0], shape[1], shape[2], 3]) W_t3 = utils.weight_variable([16, 16, 3, deconv_shape2[3].value], name="W_t3") b_t3 = utils.bias_variable([3], name="b_t3") conv_t3 = tf.nn.relu(utils.conv2d_transpose_strided(fuse_2, W_t3, b_t3, output_shape=deconv_shape3, stride=8)) annotation_pred = tf.argmax(conv_t3, dimension=3, name="prediction") return tf.expand_dims(annotation_pred, dim=3), conv_t3 def train(loss_val, var_list): optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate) grads = optimizer.compute_gradients(loss_val, var_list=var_list) if FLAGS.debug: # print(len(var_list)) for grad, var in grads: utils.add_gradient_summary(grad, var) return optimizer.apply_gradients(grads)
tensorflow.expand_dims
12,625
import tensorflow as tf training_chief_hooks=[restore_hook, saver_hook]) def estimator_spec_eval( self, features, logits, labels, loss, restore_hook, use_tpu): """Construct EstimatorSpec for EVAL mode.""" hparams = self.hparams problem = hparams.problem if logits.get_shape().ndims == 3: logits = tf.expand_dims(tf.expand_dims(logits, 2), 3) eval_metrics_fns = metrics.create_evaluation_metrics([problem], hparams) if use_tpu: def metric_fn(tf_logits, labels): with tf.device("cpu:0"), mtf.utils.outside_all_rewrites(): eval_metrics = {} for metric_name, metric_fn in six.iteritems(eval_metrics_fns): if metric_name.split("/")[-1] not in t2t_model.TPU_METRIC_BLACKLIST: eval_metrics[metric_name] = metric_fn( tf_logits, None, tf.identity(labels)) return eval_metrics return tpu_estimator.TPUEstimatorSpec( tf.estimator.ModeKeys.EVAL, evaluation_hooks=[restore_hook], loss=loss, eval_metrics=(metric_fn, [logits, labels])) else:
tensorflow.device
12,626
import tensorflow as tf facts = tf.concat(facts, 2) if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) # Trainable parameters mask = tf.equal(mask, tf.ones_like(mask)) facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] query = tf.layers.dense(query, facts_size, activation=None, name='f1_trans_shine' + stag) query = prelu(query) queries = tf.tile(query, [1, tf.shape(facts)[1]])
tensorflow.ones_like
12,627
import tensorflow as tf z_t_len = tf.strings.length(z_t) z_t = tf.string_split([z_t], delimiter='').values for i in tf.range(start=0, limit=x_t_len - self._p + 1, delta=1, dtype=None, name='range'): u = tf.string_join(x_t[i:i + self._p], '') vx_keys, r = tf.cond( tf.greater(vx.lookup(u), -1), true_fn=lambda: (vx_keys, tf.add(vx.lookup(u), 1)), false_fn=lambda: (tf.concat([vx_keys, tf.reshape(u, (-1, 1))], axis=0), tf.constant(1, dtype=tf.int64, name='constant')) ) vx.insert(u, r) for i in tf.range(start=0, limit=z_t_len - self._p + 1, delta=1, dtype=None, name='range'): u = tf.string_join(z_t[i:i + self._p], '') vz_keys, r = tf.cond(
tensorflow.reshape
12,628
import tensorflow as tf num_units = inputs.get_shape()[-1] with tf.variable_scope(scope): H = tf.layers.dense(inputs, units=num_units, activation=tf.nn.relu, name="dense1") T = tf.layers.dense(
tensorflow.layers.dense
12,629
import tensorflow as tf checkpoint_path = os.path.join(self.config["log_dir"], "model.max.ckpt") print("Restoring from {}".format(checkpoint_path)) session.run(tf.global_variables_initializer()) saver.restore(session, checkpoint_path)
tensorflow.global_variables_initializer
12,630
import tensorflow as tf y_conv, img_summary = deepnn(x, train) # Define your loss function - softmax_cross_entropy with tf.variable_scope('x_entropy'): cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv)) # Define your AdamOptimiser, using FLAGS.learning_rate to minimixe the loss function decayed_learning_rate = tf.train.exponential_decay(FLAGS.learning_rate, tf.Variable(0, trainable=False), 1000, 0.8) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): optimiser = tf.train.AdamOptimizer(decayed_learning_rate, name="Adam").minimize(cross_entropy) # calculate the prediction and the accuracy accuracy, acc_op = tf.metrics.accuracy(labels=tf.argmax(y_, axis=1), predictions=tf.argmax(y_conv, axis=1)) loss_summary = tf.summary.scalar('Loss', cross_entropy) acc_summary = tf.summary.scalar('Accuracy', accuracy) # summaries for TensorBoard visualisation validation_summary = tf.summary.merge([img_summary, acc_summary]) training_summary = tf.summary.merge([img_summary, loss_summary]) test_summary = tf.summary.merge([img_summary, acc_summary]) # saver for checkpoints saver = tf.train.Saver(tf.global_variables(), max_to_keep=1)
tensorflow.argmax
12,631
import tensorflow as tf q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.get_variable_scope().name + "/model") # target q network evaluation with tf.variable_scope("target_q_func", reuse=False): target_policy = q_func(sess, ob_space, ac_space, 1, 1, None, reuse=False, layers=layers) target_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.get_variable_scope().name + "/target_q_func") # compute estimate of best possible value starting from state at t + 1 double_q_values = None double_obs_ph = target_policy.obs_ph if double_q:
tensorflow.get_variable_scope
12,632
import tensorflow as tf in_top_k = tf.nn.in_top_k(A, B, 1) sess.run(tf.global_variables_initializer()) print(f'\nl1={sess.run(l1)} l2={sess.run(l2)}') a = np.array([1, 2, 3], dtype=np.float32) tf_v = tf.Variable(5, dtype=tf.float32) sess.run(tf.global_variables_initializer()) print(f'a * tf_v = {sess.run(a * tf_v)}') weights = tf.constant([[1.0, -2], [-3, 4]]); regular_l1 = tf.contrib.layers.l1_regularizer(0.5)(weights) regular_l2 = tf.contrib.layers.l2_regularizer(0.5)(weights) print(f'\nregular_l1={sess.run(regular_l1)} regular_l2={sess.run(regular_l2)}') val_val = sess.run(val) print('\nval=' + str(val_val)) print(f'\nargmax_0={val_val.argmax(0)} argmax_1={val_val.argmax(1)}') print('\ntf.argmax(val, 0)=' + str(sess.run(tf.argmax(val, 0)))) print('tf.argmax(val, 1)=' + str(sess.run(tf.argmax(val, 1)))) values, indices = sess.run(top_k) print(f'\ntop_k: values={values}\nindices={indices}') print(f'in_top_k = {sess.run(in_top_k)}') sess.close()
tensorflow.contrib.layers.l2_regularizer
12,633
from tensorflow.python.framework import ops x = ops.convert_to_tensor(x, name="x") return gen_math_ops._tanh(x, name=name) ops.RegisterShape("Abs")(common_shapes.unchanged_shape) ops.RegisterShape("Ceil")(common_shapes.unchanged_shape) ops.RegisterShape("Conj")(common_shapes.unchanged_shape) ops.RegisterShape("Cos")(common_shapes.unchanged_shape) ops.RegisterShape("Exp")(common_shapes.unchanged_shape) ops.RegisterShape("Floor")(common_shapes.unchanged_shape) ops.RegisterShape("Imag")(common_shapes.unchanged_shape) ops.RegisterShape("Inv")(common_shapes.unchanged_shape)
tensorflow.python.framework.ops.RegisterShape
12,634
import tensorflow as tf with tf.device('/gpu:0'): c = tf.matmul(a,b) c = tf.reshape(c, [-1])
tensorflow.reshape
12,635
import tensorflow as tf offset_height=0, offset_width=0, target_height=tf.cast(h_crop, tf.int32), target_width=tf.cast(w_crop, tf.int32))
tensorflow.cast
12,636
import tensorflow as tf new_x_shape = x_shape[:-2]+[np.prod(x_shape[-2:])] return tf.reshape(x, new_x_shape) def split_heads(x, n, k=False): #[-1,n_ctx,head,head_emb] if k: return tf.transpose(split_states(x, n), [0, 2, 3, 1]) else: return tf.transpose(split_states(x, n), [0, 2, 1, 3]) def merge_heads(x): #[-1,head,n_ctx,emb] return merge_states(tf.transpose(x, [0, 2, 1, 3])) def conv1d(x, scope, nf, rf, w_init=tf.random_normal_initializer(stddev=0.02), b_init=tf.constant_initializer(0), pad='VALID', train=False): with tf.variable_scope(scope): #x = [-1,n_ctx,512] nx = shape_list(x)[-1] #rf = 1,nx=emb,nf=3*emb w = tf.get_variable("w", [rf, nx, nf], initializer=w_init) b = tf.get_variable("b", [nf], initializer=b_init) if rf == 1: #faster 1x1 conv c = tf.reshape(tf.matmul(tf.reshape(x, [-1, nx]), tf.reshape(w, [-1, nf]))+b, shape_list(x)[:-1]+[nf]) else: #was used to train LM c = tf.nn.conv1d(x, w, stride=1, padding=pad)+b return c def attn(x, scope, n_state, n_head, train=False, scale=False):
tensorflow.constant_initializer
12,637
import tensorflow as tf [ tf.multinomial( -dist[:, i, :], num_samples=self.hparams.num_samples) for i in range(self.hparams.num_blocks) ], axis=1) nearest_hot = tf.one_hot(nearest_idx, depth=self.hparams.block_v_size) nearest_hot = tf.reduce_mean(nearest_hot, axis=-2) else: if self.hparams.random_top_k > 1: _, top_k_idx = tf.nn.top_k(-dist, k=self.hparams.random_top_k) nearest_idx = tf.gather(
tensorflow.one_hot
12,638
import tensorflow as tf scope="dropout3_1") dropout3_2 = contrib.layers.dropout(stitch3_2, keep_prob=keep_prob, is_training=is_training, scope="dropout3_2") output_1 = contrib.layers.fully_connected(dropout3_1, n_output_1, activation_fn=None, scope="output_1") output_2 = contrib.layers.fully_connected(dropout3_2, n_output_2, activation_fn=None, scope="output_2") with tf.variable_scope("loss"): loss_base_1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_1, logits=output_1)) loss_base_2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_2, logits=output_2)) reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) loss_total = loss_base_1 + loss_base_2 + tf.reduce_sum(reg_losses) with tf.variable_scope("evaluation"): accuracy_1 = tf.reduce_mean(tf.cast(tf.equal( tf.argmax(output_1, axis=-1), tf.argmax(y_1, axis=-1)), tf.float32), name="accuracy_1") accuracy_2 = tf.reduce_mean(tf.cast(tf.equal( tf.argmax(output_2, axis=-1), tf.argmax(y_2, axis=-1)), tf.float32), name="accuracy_2")
tensorflow.get_collection
12,639
import tensorflow as tf nbkg * argus_pdf_phalf_WN(mes, m0, argpar, mes_low, mes_high), name="sum_pdf") return tf.div(add, nsig + nbkg, name="sum_pdf_normalized") # data in RooFit genereren en importeren # draai dit in ROOT: # data.write("roofit_demo_random_data_values.dat"); # om het weer in te lezen: # RooDataSet *data; # data->RooDataSet.read("roofit_demo_random_data_values.dat", RooArgList(mes)) data_raw = np.loadtxt(project_dn + "roofit_demo_random_data_values.dat", dtype=np.float64) data = tf.constant(data_raw, name='event_data', dtype=tf.float64) # // --- Perform extended ML fit of composite PDF to toy data --- # sum.fitTo(*data,"Extended") ; # convert to tf constants, otherwise you'll get complaints about float32s... constraint_tf = {} for key in constraint.keys(): low = constraint[key][0] high = constraint[key][1] constraint_tf[key] = (tf.constant(low, dtype=tf.float64), tf.constant(high, dtype=tf.float64))
tensorflow.constant
12,640
import tensorflow as tf const_var = tf.Variable(tf.constant([8, 6, 7, 5, 3, 0, 9])) const_fill_var = tf.Variable(tf.constant(-1, shape=[row_dim, col_dim])) sess.run(const_var.initializer) sess.run(const_fill_var.initializer) print(sess.run(const_var)) print(sess.run(const_fill_var)) linear_var = tf.Variable(tf.linspace(start=0.0, stop=1.0, num=3)) # Generates [0.0, 0.5, 1.0] includes the end sequence_var = tf.Variable(tf.range(start=6, limit=15, delta=3)) # Generates [6, 9, 12] doesn't include the end sess.run(linear_var.initializer) sess.run(sequence_var.initializer) print(sess.run(linear_var)) print(sess.run(sequence_var)) rnorm_var = tf.random_normal([row_dim, col_dim], mean=0.0, stddev=1.0) runif_var = tf.random_uniform([row_dim, col_dim], minval=0, maxval=4)
tensorflow.range
12,641
from tensorflow.python.framework import ops tf.cast(a, tf.int32) ==> [1, 2] # dtype=tf.int32 ``` Args: x: A `Tensor` or `SparseTensor`. dtype: The destination type. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor` with same shape as `x`. Raises: TypeError: If `x` cannot be cast to the `dtype`. """ with ops.op_scope([x], name, "Cast") as name: if isinstance(x, ops.SparseTensor): values_cast = cast(x.values, dtype, name=name) return ops.SparseTensor(x.indices, values_cast, x.shape) else: # TODO(touts): Handle what Josh said. # # Could return ops.convert_to_tensor(x, dtype=dtype, ...) here, but that # allows some conversions that cast() can't do, e.g. casting numbers to # strings. x = ops.convert_to_tensor(x, name="x") if x.dtype.base_dtype == dtype: return x return gen_math_ops.cast(x, dtype, name=name)
tensorflow.python.framework.ops.op_scope
12,642
import tensorflow as tf k = tf.to_int32(tf.floor(tf.to_float(tf.shape(context_outputs)[0]) * self.config["top_span_ratio"])) top_span_indices = coref_ops.extract_spans(tf.expand_dims(candidate_mention_scores, 0), tf.expand_dims(candidate_starts, 0), tf.expand_dims(candidate_ends, 0), tf.expand_dims(k, 0), util.shape(context_outputs, 0), True) # [1, k] top_span_indices.set_shape([1, None]) top_span_indices = tf.squeeze(top_span_indices, 0) # [k] top_span_starts = tf.gather(candidate_starts, top_span_indices) # [k] top_span_ends = tf.gather(candidate_ends, top_span_indices) # [k] top_span_emb = tf.gather(candidate_span_emb, top_span_indices) # [k, emb] top_span_cluster_ids = tf.gather(candidate_cluster_ids, top_span_indices) # [k] top_span_mention_scores = tf.gather(candidate_mention_scores, top_span_indices) # [k] top_span_sentence_indices = tf.gather(candidate_sentence_indices, top_span_indices) # [k] top_span_speaker_ids = tf.gather(speaker_ids, top_span_starts) # [k] c = tf.minimum(self.config["max_top_antecedents"], k) if self.config["coarse_to_fine"]:
tensorflow.gather
12,643
import tensorflow as tf cur_weights = tf.slice(weights_tensors[tf.cast(rand_horizon, tf.int32)], [0, 0], [epi_len, new_w]) # cur_weights = tf.slice(weights_tensors, [tf.cast(rand_horizon, tf.int32), 0, 0], [1, epi_len, new_w]) horizon_pred = tf.matmul(pred, cur_weights) horizon_tgt = tf.matmul(tgt, cur_weights) return horizon_pred, horizon_tgt
tensorflow.matmul
12,644
import tensorflow as tf x = tf.reshape(x, shape=[num_matrices, self._num_nodes, input_size, batch_size]) x = tf.transpose(x, perm=[3, 1, 2, 0]) # (batch_size, num_nodes, input_size, order) x = tf.reshape(x, shape=[batch_size * self._num_nodes, input_size * num_matrices]) weights = tf.get_variable( 'weights', [input_size * num_matrices, output_size], dtype=dtype, initializer=tf.contrib.layers.xavier_initializer()) x = tf.matmul( x, weights) # (batch_size * self._num_nodes, output_size) biases = tf.get_variable("biases", [output_size], dtype=dtype, initializer=tf.constant_initializer( bias_start, dtype=dtype))
tensorflow.matmul
12,645
from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten # FC layers for goal_pos input # goal_layer1 = Dense(units=GOAL_SIZE)(goal_pos) # goal_layer2 = Dense(units=GOAL_SIZE)(goal_layer1) # FC layers to find next location loc_layer1 = Dense(units=loc_layer_size)(prev_loc) loc_layer2 = Dense(units=loc_layer_size)(loc_layer1) # Concatenationation of above layers, followed by FC layer concat = tf.concat([flat1b, loc_layer2],1) # goal_layer2 h1 = Dense(units=RNN_SIZE)(concat) h2 = Dense(units=RNN_SIZE)(h1) self.h3 = tf.nn.relu(h2+concat) #Recurrent network for temporal dependencies lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(RNN_SIZE,state_is_tuple=True) c_init = np.zeros((1, lstm_cell.state_size.c), np.float32) h_init = np.zeros((1, lstm_cell.state_size.h), np.float32) state_init = [c_init, h_init] c_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.c]) h_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.h]) state_in = (c_in, h_in)
tensorflow.keras.layers.Dense
12,646
from tensorflow.python.framework import op_def_library as _op_def_library op_list = _op_def_pb2.OpList() _text_format.Merge(_InitOpDefLibrary.op_list_ascii, op_list) _op_def_registry.register_op_list(op_list) op_def_lib = _op_def_library.OpDefLibrary() op_def_lib.add_op_list(op_list) return op_def_lib
tensorflow.python.framework.op_def_library.OpDefLibrary
12,647
import tensorflow as tf import convert_to_records as records import numpy as np FLAGS = tf.app.flags.FLAGS # Basic model parameters. tf.app.flags.DEFINE_float('learning_rate', 0.1, 'Initial learning rate.') tf.app.flags.DEFINE_string('pm', '66661', 'pooling scheme across scales. Each number specifies the number of scales remaining at each layer. The first number has to be the same as used in --num_scales.') tf.app.flags.DEFINE_integer('conv_kernel', 5, 'Size of convolutional kernel') tf.app.flags.DEFINE_integer('pool_kernel', 3, 'Size of spatial pooling kernel') tf.app.flags.DEFINE_integer('feats_per_layer', 32, 'Number of feature channels at each layer') tf.app.flags.DEFINE_boolean('total_pool', True, 'If true, pool all feature maps to 1x1 size in final layer') tf.app.flags.DEFINE_integer('pool_stride', '1', 'If 2, we get progressive pooling - with overlap pooling, AlexNet style') TRAIN_FILE = 'train_{}.tfrecords'.format(records.tfrecord_name()) VALIDATION_FILE = 'validation_{}.tfrecords'.format(records.tfrecord_name())
tensorflow.app.flags.DEFINE_integer
12,648
import tensorflow as tf return tf.nn.relu(x, name=name) def get_variable(name, shape, dtype, initializer, trainable=True, regularizer=None): with tf.device('/cpu:0'): var = tf.get_variable(name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, trainable=trainable, collections=[tf.GraphKeys.WEIGHTS, tf.GraphKeys.GLOBAL_VARIABLES]) return var def conv(inp, name, size, out_channels, strides=[1, 1, 1, 1], dilation=None, padding='SAME', apply_relu=True, alpha=0.0,bias=True, initializer=tf.contrib.layers.xavier_initializer_conv2d()): batch_size = inp.get_shape().as_list()[0] res1 = inp.get_shape().as_list()[1] res2 = inp.get_shape().as_list()[1] in_channels = inp.get_shape().as_list()[3] with tf.variable_scope(name): W = get_variable("W", shape=[size, size, in_channels, out_channels], dtype=tf.float32, initializer=initializer, regularizer=tf.nn.l2_loss) b = get_variable("b", shape=[1, 1, 1, out_channels], dtype=tf.float32, initializer=tf.zeros_initializer(),trainable=bias)
tensorflow.contrib.layers.xavier_initializer_conv2d
12,649
import tensorflow as tf new_saver.restore(sess, saver0_ckpt) # Addes loss and train. labels = tf.constant(0, tf.int32, shape=[100], name="labels") batch_size = tf.size(labels) labels = tf.expand_dims(labels, 1) indices = tf.expand_dims(tf.range(0, batch_size), 1)
tensorflow.size
12,650
import tensorflow as tf z_t = tf.gather(y, m) z_t_len = tf.strings.length(z_t) z_t = tf.string_split([z_t], delimiter='').values for i in tf.range(start=0, limit=x_t_len - self._p + 1, delta=1, dtype=None, name='range'): u = tf.string_join(x_t[i:i + self._p], '') vx_keys, r = tf.cond( tf.greater(vx.lookup(u), -1), true_fn=lambda: (vx_keys, tf.add(vx.lookup(u), 1)), false_fn=lambda: (tf.concat([vx_keys, tf.reshape(u, (-1, 1))], axis=0), tf.constant(1, dtype=tf.int64, name='constant')) ) vx.insert(u, r) for i in tf.range(start=0, limit=z_t_len - self._p + 1, delta=1, dtype=None, name='range'): u = tf.string_join(z_t[i:i + self._p], '') vz_keys, r = tf.cond( tf.greater(vz.lookup(u), -1), true_fn=lambda: (vz_keys, tf.add(vz.lookup(u), 1)), false_fn=lambda: ( tf.concat([vz_keys, tf.reshape(u, (-1, 1))], axis=0), tf.constant(1, dtype=tf.int64))
tensorflow.constant
12,651
import tensorflow as tf """ Parameters ------------ net : :class:`Layer` Previous layer with output shape of (batch, width, r). """ # with tf.name_scope(name): # self.outputs = self._apply_activation(self._PS(self.inputs, r=scale)) outputs = self.act(self._PS(inputs, r=self.scale)) return outputs @private_method def _PS(self, I, r): X = tf.transpose(I, [2, 1, 0]) # (r, w, b) X = tf.batch_to_space_nd(X, [r], [[0, 0]]) # (1, r*w, b) X = tf.transpose(X, [2, 1, 0]) return X
tensorflow.transpose
12,652
import tensorflow as tf rnn_params, base_variable_scope='Model/RNN') tf.add_to_collection(tf.GraphKeys.SAVEABLE_OBJECTS, params_saveable) self._cost = tf.get_collection_ref(self.with_prefix(self._name, 'cost'))[0]
tensorflow.add_to_collection
12,653
import tensorflow as tf print(f"Obs space: {ob_space}") print(f"policy.obs_ph: {policy.obs_ph}") print(f"policy.processed_obs: {policy.processed_obs}") print(f"Obs_phs space: {obs_phs}") #assert 5 == 1 ####################### for var in tf.all_variables(): print(var) batch_size = tf.shape(policy.obs_ph)[0] n_actions = ac_space.nvec if isinstance(ac_space, MultiDiscrete) else ac_space.n random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=n_actions, dtype=tf.int64) chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions) output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions) update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps)) _act = tf_util.function(inputs=[policy.obs_ph, stochastic_ph, update_eps_ph], outputs=output_actions, givens={update_eps_ph: -1.0, stochastic_ph: True}, updates=[update_eps_expr]) def act(obs, stochastic=True, update_eps=-1): return _act(obs, stochastic, update_eps) return act, obs_phs def build_act_with_param_noise(q_func, ob_space, ac_space, stochastic_ph, update_eps_ph, sess, param_noise_filter_func=None):
tensorflow.cond
12,654
import tensorflow as tf prediction) and values storing Tensors representing predictions (argmax over channels). Each prediction has size [batch, height, width]. """ outputs_to_predictions = { output: [] for output in model_options.outputs_to_num_classes } for i, image_scale in enumerate(eval_scales): with tf.variable_scope(tf.get_variable_scope(), reuse=True if i else None): outputs_to_scales_to_logits = multi_scale_logits( images, model_options=model_options, image_pyramid=[image_scale], is_training=False, fine_tune_batch_norm=False) if add_flipped_images:
tensorflow.get_variable_scope
12,655
from tensorflow.python.framework import ops for (i, x) in enumerate(args): x = ops.convert_to_tensor(x)
tensorflow.python.framework.ops.convert_to_tensor
12,656
import tensorflow as tf # initializer=tf.truncated_normal_initializer(), # initializer=tf.keras.initializers.lecun_normal(), dtype=tf.float32) self.position_emb = tf.reshape(self.position_emb, [-1, 2 * self.config.hidden_size]) shape = tf.shape(output) output = tf.reshape(output, [-1, 2 * self.config.hidden_size]) atten_hidden = tf.tanh( tf.add( tf.matmul(self.position_emb, W), tf.matmul(output, U))) alpha = tf.nn.softmax( tf.reshape(tf.matmul(atten_hidden, V), [-1, shape[1], 1]), axis=1) output = tf.reshape(output, [-1, shape[1], 2 * self.config.hidden_size]) C = tf.multiply(alpha, output) return tf.concat([output, C], axis=-1) def _train_epoch(self, train_batches, dropout): """ :param train_batches: :param dropout: :return: """ total_num, total_loss = 0, 0 log_every_n_batch, n_batch_loss = 1000, 0 for bitx, batch in enumerate(train_batches, 1): feed_dict = {self.c: batch['passage_token_ids'], self.q: batch['question_token_ids'],
tensorflow.multiply
12,657
import tensorflow as tf self.real_pc_rotated = self.rotate_n_angles(self.real_pc, self.rot_label_pl) self.real_pc_pred, real_pc_end_points = self.get_pred(self.real_pc_rotated) self.real_pc_rot_loss = self.get_loss(self.real_pc_pred, self.rot_label_pl, real_pc_end_points) with tf.variable_scope('generator'): self.generator_out = self.generator(self.noise, self.n_output, **gen_kwargs) self.gen_out_rotated = self.rotate_n_angles(self.generator_out, self.rot_label_pl) self.gen_out_pred, gen_out_end_points = self.get_pred(self.gen_out_rotated) self.gen_out_rot_loss = self.get_loss(self.gen_out_pred, self.rot_label_pl, gen_out_end_points) #classification loss #need to fix if self.ms_task: with tf.variable_scope('mixed'): #add fake pc as a rotation class num_to_add = int(max(self.batch_size/self.num_angles, 1)) idx = tf.range(0, self.batch_size, 1) idx = tf.random_shuffle(idx)[0:num_to_add] self.fake_to_add = tf.gather(self.generator_out, idx) self.mixed_pc = tf.concat([self.real_pc_rotated, self.fake_to_add], 0) self.mixed_label = tf.concat([self.rot_label_pl, tf.constant(self.num_angles, shape = (num_to_add,))], axis = 0) mixed_idx = tf.range(0, self.mixed_label.get_shape().as_list()[0], 1) mixed_idx = tf.random_shuffle(mixed_idx)[0:self.batch_size] self.mixed_pc = tf.gather(self.mixed_pc, mixed_idx) self.mixed_label = tf.gather(self.mixed_label, mixed_idx) self.mixed_pred, mixed_end_points = self.get_pred(self.mixed_pc) self.mixed_loss = self.get_loss(self.mixed_pred, self.mixed_label, mixed_end_points) with tf.variable_scope('discriminator') as scope: self.real_prob, self.real_logit = self.discriminator(self.real_pc_rotated, scope=scope, **disc_kwargs) self.synthetic_prob, self.synthetic_logit = self.discriminator(self.gen_out_rotated, reuse=True, scope=scope, **disc_kwargs)
tensorflow.random_shuffle
12,658
import tensorflow as tf save_summary_steps=10) # TPUEstimator estimator = tf.contrib.tpu.TPUEstimator( model_fn=model_fn, config=config,
tensorflow.contrib.tpu.TPUEstimator
12,659
import tensorflow as tf return multi_gpu_model(model, gpus=gpus, cpu_merge=cpu_merge, cpu_relocation=cpu_relocation) def force_cpu(): '''Force CPU on a GPU system ''' import keras.backend as K import tensorflow as tf config = tf.ConfigProto(device_count={'GPU': 0}) session = tf.Session(config=config) K.set_session(session)
tensorflow.ConfigProto
12,660
import tensorflow as tf self.mu = self.mu * action_bound[1]; self.sigma = self.sigma + 1e-4 # get action from distribution self.normal_dist = tf.contrib.distributions.Normal(self.mu, self.sigma) self.action = tf.squeeze(self.normal_dist.sample(1),axis=0); self.action = tf.clip_by_value(self.action, action_bound[0], action_bound[1]) # Loss and train op self.loss = -self.normal_dist.log_prob(self.a_his) * self.target # Add cross entropy cost to encourage exploration self.loss -= entropy_beta * self.normal_dist.entropy() self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) self.grads_and_vars = self.optimizer.compute_gradients(self.loss) self.grads=[]; self.vars=[]; for i in range(len(self.grads_and_vars)): self.grads.append(self.grads_and_vars[i][0]); self.vars.append(self.grads_and_vars[i][1]); self.grads=self.grads[-1*NUM_VARS:]; self.vars=self.vars[-1*NUM_VARS:]; self.train_op = self.optimizer.apply_gradients( self.grads_and_vars, global_step=tf.contrib.framework.get_global_step())
tensorflow.train.AdamOptimizer
12,661
import tensorflow as tf """ grads = tf.gradients(
tensorflow.gradients
12,662
import tensorflow as tf tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, tpu_config=tf.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) train_examples = None num_train_steps = None num_warmup_steps = None if FLAGS.do_train: train_examples = processor.get_train_examples(FLAGS.data_dir) num_train_steps = int(
tensorflow.contrib.tpu.TPUConfig
12,663
import tensorflow as tf y = output if add_bias: bias = tf.Variable([0.0]) y = output + bias sig_y = tf.clip_by_value(tf.sigmoid(y), 0.001, 0.999) # avoid NaNs loss = -tf.reduce_sum(target*tf.log(sig_y) + (1-target)*tf.log(1-sig_y)) return sig_y, loss def ranking_margin_objective(output, margin=1.0):
tensorflow.log
12,664
import tensorflow as tf idx = np.random.randint(0, self.tree.capacity, size=n, dtype=np.int) return np.vstack(self.tree.data[idx]) def update(self, idx, error): p = self._get_priority(error) self.tree.update(idx, p) def _get_priority(self, error): error += self.epsilon # avoid 0 clipped_error = np.clip(error, 0, self.abs_err_upper) return np.power(clipped_error, self.alpha) sess = tf.Session() # Create actor and critic. actor = Actor(sess, ACTION_DIM, ACTION_BOUND, LR_A, REPLACE_ITER_A) critic = Critic(sess, STATE_DIM, ACTION_DIM, LR_C, GAMMA, REPLACE_ITER_C, actor.a, actor.a_) actor.add_grad_to_graph(critic.a_grads) M = Memory(MEMORY_CAPACITY) saver = tf.train.Saver(max_to_keep=100) if LOAD_MODEL: all_ckpt = tf.train.get_checkpoint_state('./data', 'checkpoint').all_model_checkpoint_paths
tensorflow.Session
12,665
import tensorflow as tf def create_config_proto(): config = tf.ConfigProto()
tensorflow.ConfigProto
12,666
import tensorflow as tf N_loops = 100 timings = [] tf.logging.set_verbosity(tf.logging.ERROR)
tensorflow.logging.set_verbosity
12,667
import tensorflow as tf If `keepdims` is `False`, the rank of the tensor is reduced by 1. If `keepdims` is `True`, the reduced dimension is retained with length 1. Returns ------- A tensor with maximum values of `x`. """ axis = _normalize_axis(axis, get_ndim(x)) return tf.reduce_max(x, axis=axis, keep_dims=keepdims) def l2_normalize(x, axis): """Normalizes a tensor wrt the L2 norm alongside the specified axis. Parameters ---------- x: input tensor.
tensorflow.reduce_max
12,668
import tensorflow as tf out=tf.matmul(l3, self.w4)+self.b4 return out def valid_inference(self,images): images=tf.cast(images,tf.float32)/255.0 l1 = tf.matmul(images, self.w1)+self.b1 l1=tf.nn.relu(l1) l2 = tf.matmul(l1, self.w2)+self.b2 l2=tf.nn.relu(l2) l3=tf.matmul(l2, self.w3)+self.b3 l3=tf.nn.relu(l3) out=tf.matmul(l3, self.w4)+self.b4 return out
tensorflow.matmul
12,669
import tensorflow as tf batch_size = tf.shape(observations_ph.get())[0] random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64) chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions) output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions) update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps)) act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph], outputs=[output_actions, update_eps_expr, eps], givens={update_eps_ph: -1.0, stochastic_ph: True}, updates=[update_eps_expr]) return act
tensorflow.cond
12,670
import tensorflow as tf e = compute_energy(hidden_states, state, encoder, input_length=encoder_input_length, **kwargs) mask = tf.sequence_mask(encoder_input_length, maxlen=tf.shape(hidden_states)[1], dtype=tf.float32) e *= mask if encoder.attn_norm_fun == 'none': weights = e elif encoder.attn_norm_fun == 'sigmoid': weights = tf.nn.sigmoid(e) elif encoder.attn_norm_fun == 'max': weights = tf.one_hot(tf.argmax(e, -1), depth=tf.shape(e)[1]) else: e -= tf.reduce_max(e, axis=1, keep_dims=True) T = encoder.attn_temperature or 1.0 exp = tf.exp(e / T) * mask weights = exp / tf.reduce_sum(exp, axis=-1, keep_dims=True)
tensorflow.nn.sigmoid
12,671
import tensorflow as tf masks = tf.stack([mask0, mask1, mask2, mask3, mask4, mask5]) scores = tf.constant([[0.05, 1.0, 0.2],
tensorflow.constant
12,672
import tensorflow as tf def train_model_fn(features, labels, mode, params): # pylint: disable=unused-argument """Defines the model function.""" target_num_classes = FLAGS.target_num_classes global_step = tf.train.get_global_step() src_features, src_labels = features['src'], tf.cast(labels['src'], tf.int64) finetune_features = features['finetune'] target_features = features['target'] num_classes = FLAGS.src_num_classes finetune_one_hot_labels = tf.one_hot( tf.cast(labels['finetune'], tf.int64), target_num_classes) target_one_hot_labels = tf.one_hot( tf.cast(labels['target'], tf.int64), target_num_classes) with tf.variable_scope('rl_controller') as rl_scope: # It creates a `rl_scope` which will be used for ops. pass rl_entropy, label_weights, log_prob = rl_label_weights(rl_scope) loss_entropy, loss_weights, loss_log_prob = get_loss_weights(rl_scope) def gather_init_weights(): inst_weights = tf.stop_gradient(tf.gather(label_weights, src_labels)) return inst_weights
tensorflow.cast
12,673
import tensorflow as tf with self.tempWorkingDir(tempdir): # Save training snapshots to a relative path. traindir = "train/" os.mkdir(traindir) filename = "snapshot" filepath = os.path.join(traindir, filename) with self.test_session() as sess: # Build a simple graph. v0 = tf.Variable(0.0) inc = v0.assign_add(1.0) save = tf.train.Saver({"v0": v0}) # Record a short training history. tf.initialize_all_variables().run() save.save(sess, filepath, global_step=0) inc.eval() save.save(sess, filepath, global_step=1) inc.eval() save.save(sess, filepath, global_step=2) with self.test_session() as sess: # Build a new graph with different initialization. v0 = tf.Variable(-1.0)
tensorflow.train.Saver
12,674
import tensorflow as tf train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf.estimator.ModeKeys.EVAL: def metric_fn(per_example_loss, label_ids, logits, is_real_example): predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) accuracy = tf.metrics.accuracy( labels=label_ids, predictions=predictions, weights=is_real_example) loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example) return { "eval_accuracy": accuracy, "eval_loss": loss, } eval_metrics = (metric_fn, [per_example_loss, label_ids, logits, is_real_example]) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss,
tensorflow.metrics.accuracy
12,675
import tensorflow as tf trainable: boolean defining if the variable is for training Returns: Variable Tensor """ var = tf.get_variable( name, shape, initializer=initializer, trainable=trainable) return var
tensorflow.get_variable
12,676
import tensorflow as tf def example_reading_spec(self): data_fields = {"targets": tf.VarLenFeature(tf.int64)} if self.has_inputs: data_fields["inputs"] = tf.VarLenFeature(tf.int64) if self.packed_length: if self.has_inputs: data_fields["inputs_segmentation"] = tf.VarLenFeature(tf.int64) data_fields["inputs_position"] = tf.VarLenFeature(tf.int64) data_fields["targets_segmentation"] = tf.VarLenFeature(tf.int64) data_fields["targets_position"] = tf.VarLenFeature(tf.int64) data_items_to_decoders = None return (data_fields, data_items_to_decoders) def eval_metrics(self): return [ metrics.Metrics.ACC, metrics.Metrics.ACC_TOP5, metrics.Metrics.ACC_PER_SEQ, metrics.Metrics.NEG_LOG_PERPLEXITY, metrics.Metrics.APPROX_BLEU, metrics.Metrics.ROUGE_2_F, metrics.Metrics.ROUGE_L_F
tensorflow.VarLenFeature
12,677
import tensorflow as tf return [self.ENCODED_VALUES_KEY] @property def commutes_with_sum(self): """See base class.""" return False @property def decode_needs_input_shape(self): """See base class.""" return False def get_params(self): """See base class.""" params = {self.ADD_PARAM_KEY: tf.constant(1.0)} return params, params def encode(self, x, encode_params): """See base class.""" return {self.ENCODED_VALUES_KEY: x + encode_params[self.ADD_PARAM_KEY]} def decode(self, encoded_tensors, decode_params, num_summands=None, shape=None): """See base class.""" del num_summands # Unused.
tensorflow.constant
12,678
import tensorflow as tf grads = tf.gradients(self.loss, self.var_list) grads, _ = tf.clip_by_global_norm(grads, 40) with tf.name_scope('sync'): # worker和global的同步过程 with tf.name_scope('pull'): # 获取global参数,复制到local—net self.pull_params_op = tf.group(*[v1.assign(v2) for v1, v2 in zip(self.var_list, globalAC.var_list)]) with tf.name_scope('push'): # 将参数传送到gloabl中去 self.update_params_op = OPT.apply_gradients(zip(grads, globalAC.var_list)) # 其中传送的是local—net的actor和critic的参数梯度grads,具体计算在上面定义 # apply_gradients是tf.train.Optimizer中自带的功能函数,将求得的梯度参数更新到global中 self.inc_step = self.global_step.assign_add(tf.shape(self.obs)[0]) self.train_op = tf.group(self.update_params_op, self.inc_step) # GLOBALE_STEP += tf.shape(self.obs)[0]
tensorflow.name_scope
12,679
import tensorflow as tf return tf.where(tf.greater(sign_order, 0), positive, negative) def evaluate_spherical_harmonics( degree_l: TensorLike, order_m: TensorLike, theta: TensorLike, phi: TensorLike, name: str = "spherical_harmonics_evaluate_spherical_harmonics") -> TensorLike: # pylint: disable=line-too-long with tf.name_scope(name): degree_l = tf.convert_to_tensor(value=degree_l) order_m = tf.convert_to_tensor(value=order_m) theta = tf.convert_to_tensor(value=theta) phi = tf.convert_to_tensor(value=phi) var_type = theta.dtype sign_m = tf.math.sign(order_m) order_m = tf.abs(order_m) zeros = tf.zeros_like(order_m)
tensorflow.name_scope
12,680
import tensorflow as tf util.export_state_tuples(self._initial_state, self._initial_state_name) util.export_state_tuples(self._final_state, self._final_state_name) def import_ops(self): """Imports ops from collections.""" if self._is_training: self._train_op = tf.get_collection_ref("train_op")[0] self._lr = tf.get_collection_ref("lr")[0] self._new_lr = tf.get_collection_ref("new_lr")[0] self._lr_update = tf.get_collection_ref("lr_update")[0] rnn_params = tf.get_collection_ref("rnn_params") if self._cell and rnn_params: params_saveable = tf.contrib.cudnn_rnn.RNNParamsSaveable( self._cell,
tensorflow.get_collection_ref
12,681
import tensorflow as tf n_patch = n_row*n_col // (patch_size**2) patches = tf.image.extract_patches(tf.expand_dims(x,0),sizes=window,strides=window,rates=[1, 1, 1, 1],padding='VALID') patches = tf.reshape(patches,[n_patch,patch_size,patch_size,n_channel]) patches = tf.random.shuffle(patches) rows = tf.split(patches,n_col//patch_size,axis=0) rows = [tf.concat(tf.unstack(x),axis=1) for x in rows] x_aug = tf.concat(rows,axis=0) x_aug = tf.convert_to_tensor(x_aug) return tf.concat([x, x_aug],axis=2) def gaussian_blur(self,x): #create random gaussian blur filter mean = 0 std = tf.random.uniform(shape=[],minval=5,maxval=10,dtype=tf.float32) # std [5-10] size = tf.random.uniform(shape=[],minval=3,maxval=7,dtype=tf.int32) # size [7-15] self.kernel = self.gaussian_kernel(size,mean,std) self.kernel = tf.tile(self.kernel[:, :, tf.newaxis, tf.newaxis], [1, 1, 3, 1]) self.paddings = tf.convert_to_tensor([[size,size],[size,size],[0,0]]) x_aug = tf.nn.separable_conv2d(tf.expand_dims(tf.pad(x,self.paddings,'SYMMETRIC'), 0), self.kernel, self.pointwise_filter,strides=[1, 1, 1, 1], padding='VALID') x_aug = tf.squeeze(x_aug) return tf.concat([x, x_aug],axis=2) def high_low_pass(self,x): x_low = tf.nn.separable_conv2d(tf.expand_dims(tf.pad(x,self.paddings,'SYMMETRIC'), 0), self.kernel, self.pointwise_filter,strides=[1, 1, 1, 1], padding='VALID') x_low = tf.squeeze(x_low) x_high = x - x_low
tensorflow.random.uniform
12,682
import tensorflow as tf probability of each label learned by the loss, if not provided. true_positives_lower_bound: Lower bound on the number of true positives given `labels` and `logits`. This is the same lower bound which is used in the loss expression to be optimized. false_positives_upper_bound: Upper bound on the number of false positives given `labels` and `logits`. This is the same upper bound which is used in the loss expression to be optimized. Raises: ValueError: If `logits` and `labels` do not have the same shape. """ with tf.variable_scope(scope, 'recall_at_precision', [logits, labels, label_priors], reuse=reuse): labels, logits, weights, original_shape = _prepare_labels_logits_weights(labels, logits, weights) num_labels = losses_utils.get_num_labels(logits) # Convert other inputs to tensors and standardize dtypes. target_precision = losses_utils.convert_and_cast(target_precision, 'target_precision', logits.dtype) dual_rate_factor = losses_utils.convert_and_cast(dual_rate_factor, 'dual_rate_factor', logits.dtype)
tensorflow.variable_scope
12,683
import tensorflow as tf self.q_m_ = tf.placeholder(tf.float32, [None, ], name='q_value_next_max') self.q_target = tf.placeholder(tf.float32, [None,], name='q_tot_target')
tensorflow.placeholder
12,684
import tensorflow as tf tf.summary.scalar("loss", weighted_error) if full_tensorboard_log: tf.summary.histogram("td_error", td_error) # update_target_fn will be called periodically to copy Q network to target Q network
tensorflow.summary.histogram
12,685
import tensorflow as tf config.gpu_options.allow_growth = True config.gpu_options.per_process_gpu_memory_fraction = 0.5 # Placeholders self.sess = tf.Session(config=config) self.s_dim, self.a_dim = env.observation_space.shape, env.action_space.shape[0] self.a_bound = (env.action_space.high - env.action_space.low) / 2 self.actions = tf.placeholder(tf.float32, [None, self.a_dim], 'action') self.state = tf.placeholder(tf.float32, [None, self.s_dim[0]], 'state') self.advantage = tf.placeholder(tf.float32, [None, 1], 'advantage') self.rewards = tf.placeholder(tf.float32, [None, 1], 'rewards') self.keep_prob = tf.placeholder(tf.float32, name='dropout_keep_prob') # Dateset with experiennce replay self.dataset = tf.data.Dataset.from_tensor_slices({'state': self.state, 'actions': self.actions, 'rewards': self.rewards, 'advantage': self.advantage}) self.dataset = self.dataset.batch(self.MINIBATCH, drop_remainder=True) self.data_iter = self.dataset.make_initializable_iterator()
tensorflow.placeholder
12,686
import tensorflow as tf else: y_shape.append(s) y_shape = tuple(y_shape) y_permute_dim = list(range(get_ndim(y))) y_permute_dim = [y_permute_dim.pop(-2)] + y_permute_dim xt = tf.reshape(x, [-1, x_shape[-1]]) yt = tf.reshape(tf.transpose(y, perm=y_permute_dim), [y_shape[-2], -1]) return tf.reshape( tf.matmul(xt, yt), x_shape[:-1] + y_shape[:-2] + y_shape[-1:]) out = tf.matmul(x, y)
tensorflow.reshape
12,687
import tensorflow as tf predictions, label_ids, weights=is_real_example) tn, tn_op = tf.metrics.true_negatives( predictions, label_ids, weights=is_real_example) fp, fp_op = tf.metrics.false_positives( predictions, label_ids, weights=is_real_example) fn, fn_op = tf.metrics.false_negatives( predictions, label_ids, weights=is_real_example) # Compute Matthew's correlation mcc = tf.div_no_nan( tp * tn - fp * fn, tf.pow((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn), 0.5)) # Compute accuracy accuracy = tf.metrics.accuracy( labels=label_ids, predictions=predictions, weights=is_real_example) loss = tf.metrics.mean( values=per_example_loss, weights=is_real_example) return {"matthew_corr": (mcc, tf.group(tp_op, tn_op, fp_op, fn_op)), "eval_accuracy": accuracy, "eval_loss": loss,} eval_metrics = (metric_fn, [per_example_loss, label_ids, logits, is_real_example]) output_spec = contrib_tpu.TPUEstimatorSpec(
tensorflow.metrics.accuracy
12,688
import tensorflow as tf tf.app.flags.DEFINE_boolean('load_state', True, 'Load state if possible ') tf.app.flags.DEFINE_boolean('kill_depth', False, 'Ignore depth information') tf.app.flags.DEFINE_boolean('dev', False, 'Indicate development mode') tf.app.flags.DEFINE_integer('batch_size', 128, 'Batch size') tf.app.flags.DEFINE_float('learning_rate', 0.0001, 'Create visualization of ')
tensorflow.app.flags.DEFINE_integer
12,689
import tensorflow as tf values = [ tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_before_loss", simple_value=before_loss), tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_after_loss", simple_value=after_loss), tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/stop_token_loss", simple_value=stop_token_loss), tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_loss", simple_value=loss), ] if linear_loss is not None: values.append(tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_linear_loss", simple_value=linear_loss)) test_summary = tf.Summary(value=values) summary_writer.add_summary(test_summary, step)
tensorflow.Summary.Value
12,690
import tensorflow as tf return v if init: x = tf.matmul(x, tf.nn.l2_normalize(V.initialized_value(), 0)) init_scale = .01 m_init, v_init = tf.nn.moments(x, [0]) scale_init = init_scale / tf.sqrt(v_init + 1e-10) with tf.control_dependencies([g.assign(g * scale_init), b.assign_add(-m_init * scale_init)]): x = tf.reshape(scale_init, [1, num_units]) * (x - tf.reshape(m_init, [1, num_units])) else: V = maybe_avg(V) g = maybe_avg(g) b = maybe_avg(b) x = tf.matmul(x, V) scaler = g / tf.sqrt(tf.reduce_sum(tf.square(V), [0]))
tensorflow.reshape
12,691
import tensorflow as tf test_dir = self._TestDir("graph_extension") filename = os.path.join(test_dir, "metafile") saver0_ckpt = os.path.join(test_dir, "saver0.ckpt") with self.test_session(graph=tf.Graph()) as sess: # Creates an inference graph. # Hidden 1 images = tf.constant(1.2, tf.float32, shape=[100, 28]) with tf.name_scope("hidden1"): weights = tf.Variable( tf.truncated_normal([28, 128], stddev=1.0 / math.sqrt(float(28))), name="weights") biases = tf.Variable(tf.zeros([128]), name="biases") hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases) # Hidden 2 with tf.name_scope("hidden2"): weights = tf.Variable( tf.truncated_normal([128, 32], stddev=1.0 / math.sqrt(float(128))), name="weights") biases = tf.Variable(tf.zeros([32]), name="biases") hidden2 = tf.nn.relu(tf.matmul(hidden1, weights) + biases)
tensorflow.zeros
12,692
import tensorflow as tf def self_attention(facts, ATTENTION_SIZE, mask, stag='null'): if len(facts.get_shape().as_list()) == 2: facts = tf.expand_dims(facts, 1) def cond(batch, output, i): return tf.less(i, tf.shape(batch)[1]) def body(batch, output, i): self_attention_tmp = din_fcn_attention(batch[:, i, :], batch[:, 0:i+1, :], ATTENTION_SIZE, mask[:, 0:i+1], softmax_stag=1, stag=stag, mode='LIST') self_attention_tmp = tf.reduce_sum(self_attention_tmp, 1) output = output.write(i, self_attention_tmp) return batch, output, i + 1 output_ta = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True, element_shape=(facts[:, 0, :].get_shape())) _, output_op, _ = tf.while_loop(cond, body, [facts, output_ta, 0]) self_attention = output_op.stack() self_attention = tf.transpose(self_attention, perm = [1, 0, 2])
tensorflow.reduce_sum
12,693
import tensorflow as tf tf.summary.image("pred_annotation", tf.cast(pred_annotation, tf.uint8), max_outputs=2) #debug loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=tf.squeeze(annotation, squeeze_dims=[3]), name="entropy"))) # loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, # labels=annotation, # name="entropy"))) loss_summary = tf.summary.scalar("entropy", loss) trainable_var = tf.trainable_variables() if FLAGS.debug: for var in trainable_var: utils.add_to_regularization_and_summary(var) train_op = train(loss, trainable_var) print("Setting up summary op...") summary_op = tf.summary.merge_all() print("Setting up image reader...")
tensorflow.summary.scalar
12,694
import tensorflow as tf Args: batch: A batch of images and labels. Returns: The same batch where cutout has been applied to the images. """ length, replace = FLAGS.cutout_length, 0.0 images, labels = batch['image'], batch['label'] num_channels = tf.shape(images)[3] image_height, image_width = tf.shape(images)[1], tf.shape(images)[2] cutout_center_height = tf.random.uniform( shape=[], minval=0, maxval=image_height, dtype=tf.int32) cutout_center_width = tf.random.uniform( shape=[], minval=0, maxval=image_width, dtype=tf.int32)
tensorflow.shape
12,695
import tensorflow as tf X = tf.contrib.layers.group_norm(X, groups=16, scope=scope, reuse=reuse) if dropout > 0.0: X = tf.layers.dropout(X, dropout, training=is_train) if slope < 1.0: X = tf.nn.leaky_relu(X, slope) if slope > 0.0 else tf.nn.relu(X)
tensorflow.layers.dropout
12,696
import tensorflow as tf # 定义自变量和应变量 x = tf.placeholder(tf.float64, shape=[None, dimension], name='x') ## 将被预测值写成矩阵形式,会极大加快速度 y = tf.placeholder(tf.float64, shape=[None, 1], name="y") # 定义参数估计值和预测值 betaPred = tf.Variable(np.random.random([dimension, 1])) yPred = tf.matmul(x, betaPred, name="y_pred") # 定义损失函数 loss = tf.reduce_mean(tf.square(yPred - y)) model = {"loss_function": loss, "independent_variable": x, "dependent_variable": y, "prediction": yPred, "model_params": betaPred} return model
tensorflow.matmul
12,697
import tensorflow as tf with tf.variable_scope("decoder_symbols_seq2seq"): dec, mem = tf.nn.seq2seq.embedding_tied_rnn_seq2seq( enc_inp, dec_inp, cell, num_symbols=5, num_decoder_symbols=3, embedding_size=2) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 3), res[0].shape)
tensorflow.global_variables_initializer
12,698
import tensorflow as tf size = tf.random.uniform(shape=[],minval=3,maxval=7,dtype=tf.int32) # size [7-15] self.kernel = self.gaussian_kernel(size,mean,std) self.kernel = tf.tile(self.kernel[:, :, tf.newaxis, tf.newaxis], [1, 1, 3, 1]) self.paddings = tf.convert_to_tensor([[size,size],[size,size],[0,0]]) x_aug = tf.nn.separable_conv2d(tf.expand_dims(tf.pad(x,self.paddings,'SYMMETRIC'), 0), self.kernel, self.pointwise_filter,strides=[1, 1, 1, 1], padding='VALID') x_aug = tf.squeeze(x_aug) return tf.concat([x, x_aug],axis=2) def high_low_pass(self,x): x_low = tf.nn.separable_conv2d(tf.expand_dims(tf.pad(x,self.paddings,'SYMMETRIC'), 0), self.kernel, self.pointwise_filter,strides=[1, 1, 1, 1], padding='VALID') x_low = tf.squeeze(x_low) x_high = x - x_low return tf.concat([x, x_high, x_low],axis=2) def no_op(self,x): return x
tensorflow.pad
12,699