seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf self.Y_hat = training_decoder_output.rnn_output out_decoder2 = tf.reshape(self.Y_hat, [tf.shape(self.Y_hat)[0], -1, n_mels]) dec = conv1d_banks(out_decoder2, K=decoder_num_banks, is_training=self.training) dec = tf.layers.max_pooling1d(dec, pool_size=2, strides=1, padding="same") dec = tf.layers.conv1d(dec, embed_size // 2, 3, name="decoder-conv1-1", padding="SAME") dec = tf.nn.relu(tf.layers.batch_normalization(dec, training=self.training)) dec = tf.layers.conv1d(dec, embed_size // 2, 3, name="decoder-conv1-2", padding="SAME") dec = tf.layers.batch_normalization(dec, training=self.training) dec = tf.layers.dense(dec, embed_size // 2)
tensorflow.layers.conv1d
10,800
import tensorflow as tf w = tf.get_variable('W', filter_shape, initializer=w_init)
tensorflow.get_variable
10,801
import tensorflow as tf strides=(2, 2), padding="SAME") x = common_layers.layer_norm(x) else: x = common_layers.double_discriminator(x) x = tf.expand_dims(tf.expand_dims(x, axis=1), axis=1) x = tf.tanh(tf.layers.dense(x, hparams.bottleneck_bits, name="bottleneck")) d = x + tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x)) - 1.0 - x) if hparams.mode == tf.estimator.ModeKeys.TRAIN:
tensorflow.expand_dims
10,802
import tensorflow as tf sync_warmup_policy = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(warmup_policy.parameters(), policy.parameters())]) # batched noises noise = OUNoise(env.action_space, theta=FLAGS.OUNoise.theta, sigma=FLAGS.OUNoise.sigma, shape=(1, dim_action)) vfn = MLPVFunction(dim_state, [64, 64], normalizers.state) warmup_vfn = MLPVFunction(dim_state, [64, 64], normalizers.state) sync_warmup_vfn = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(warmup_vfn.parameters(), vfn.parameters())]) model = DynamicsModel(dim_state, dim_action, normalizers, FLAGS.model.hidden_sizes) lazy_model = DynamicsModel(dim_state, dim_action, normalizers, FLAGS.model.hidden_sizes) warmup_model = DynamicsModel(dim_state, dim_action, normalizers, FLAGS.model.hidden_sizes) sync_warmup_model = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(warmup_model.parameters(), model.parameters())]) shadow_models = [DynamicsModel(dim_state, dim_action, normalizers, FLAGS.model.hidden_sizes) for n in range(FLAGS.warmup.n_shadow_models)] sync_model_from_lazymodel = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(model.parameters(), lazy_model.parameters())]) sync_model_to_lazymodel = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(lazy_model.parameters(), model.parameters())]) virt_env = VirtualEnv(model, make_env(FLAGS.env.id, task_config=task), FLAGS.plan.n_envs, opt_model=FLAGS.slbo.opt_model) virt_runner = Runner(virt_env, **{**FLAGS.runner.as_dict(), 'max_steps': FLAGS.plan.max_steps}) virt_env_copy = VirtualEnv(model, make_env(FLAGS.env.id, task_config=task), nsample//FLAGS.plan.max_steps, opt_model=FLAGS.slbo.opt_model) virt_runner_copy = Runner(virt_env_copy, **{**FLAGS.runner.as_dict(), 'max_steps': FLAGS.plan.max_steps}) extra_runners = {} for sam in [1000, 2000, 4000, 8000, 10000, 16000]: extra_runners[f'train{sam}']= Runner(VirtualEnv(model, make_env(FLAGS.env.id, task_config=task), sam//FLAGS.plan.max_steps, opt_model=FLAGS.slbo.opt_model), **{**FLAGS.runner.as_dict(), 'max_steps': FLAGS.plan.max_steps}) extra_runners[f'collect{sam}'] = make_real_runner(sam//FLAGS.plan.max_steps, task_config=task) warmup_virt_env = VirtualEnv(warmup_model, make_env(FLAGS.env.id, task_config=task), FLAGS.plan.n_envs, opt_model=FLAGS.slbo.opt_model) warmup_virt_runner = Runner(warmup_virt_env, **{**FLAGS.runner.as_dict(), 'max_steps': FLAGS.plan.max_steps})
tensorflow.assign
10,803
import tensorflow as tf return average_grads def binary_mask(shape, p=0.7): samples = tf.random_uniform(shape, minval=0.0, maxval=1.0) mask = tf.less_equal(samples, p) return tf.cast(mask, tf.float32) def weighted_arithmetic_mean(w, x):
tensorflow.less_equal
10,804
import tensorflow as tf def _add_identity_op(self, X, input_idx, ni, w, h, ch, is_reduction, is_dynamic, is_train): stride = 2 if is_reduction else 1 with tf.variable_scope('identity_op'): # If stride > 1, calibrate, else, just return itself if stride > 1: X = self._calibrate(X, w, h, ch, w // stride, h // stride, ch, is_train=is_train) X = tf.reshape(X, (-1, w // stride, h // stride, ch)) # Sanity shape check return X def _add_max_pool_3x3_op(self, X, input_idx, ni, w, h, ch, is_reduction, is_dynamic, is_train): filter_size = 3 stride = 2 if is_reduction else 1 with tf.variable_scope('max_pool_3x3_op'):
tensorflow.reshape
10,805
import tensorflow as tf feature_emb_list.append(antecedent_distance_emb) feature_emb = tf.concat(feature_emb_list, 2) # [k, c, emb] feature_emb = tf.nn.dropout(feature_emb, self.dropout) # [k, c, emb] target_emb = tf.expand_dims(top_span_emb, 1) # [k, 1, emb] similarity_emb = top_antecedent_emb * target_emb # [k, c, emb] target_emb = tf.tile(target_emb, [1, c, 1]) # [k, c, emb] pair_emb = tf.concat([target_emb, top_antecedent_emb, similarity_emb, feature_emb], 2) # [k, c, emb] with tf.variable_scope("slow_antecedent_scores"): slow_antecedent_scores = util.ffnn(pair_emb, self.config["ffnn_depth"], self.config["ffnn_size"], 1, self.dropout) # [k, c, 1] slow_antecedent_scores = tf.squeeze(slow_antecedent_scores, 2) # [k, c] return slow_antecedent_scores # [k, c] def get_fast_antecedent_scores(self, top_span_emb): with tf.variable_scope("src_projection"): source_top_span_emb = tf.nn.dropout(util.projection(top_span_emb, util.shape(top_span_emb, -1)), self.dropout) # [k, emb] target_top_span_emb = tf.nn.dropout(top_span_emb, self.dropout) # [k, emb] return tf.matmul(source_top_span_emb, target_top_span_emb, transpose_b=True) # [k, k]
tensorflow.variable_scope
10,806
import tensorflow as tf scope: str or VariableScope optional scope for variable_scope. reuse: bool or None whether or not the variables should be reused. To be able to reuse the scope must be given. Returns ------- act: (tf.Variable, bool, float) -> tf.Variable function to select and action given observation. ` See the top of the file for details. """ with tf.variable_scope(scope, reuse=reuse): observations_ph = U.ensure_tf_input(make_obs_ph("observation")) stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic") update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps") eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0)) q_values = q_func(observations_ph.get(), num_actions, scope="q_func") deterministic_actions = tf.argmax(q_values, axis=1) batch_size = tf.shape(observations_ph.get())[0]
tensorflow.variable_scope
10,807
import tensorflow as tf w = tf.get_variable("w", [nx, ny], initializer=w_init) b = tf.get_variable("b", [ny], initializer=b_init) return tf.matmul(x, w)+b
tensorflow.matmul
10,808
import tensorflow as tf name: name of the variable shape: list of ints initializer: initializer for Variable Returns: Variable Tensor """ with tf.device('/cpu:0'): dtype = tf.float16 if use_fp16 else tf.float32 var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype) return var
tensorflow.device
10,809
import tensorflow as tf batch_size, size=num_elements, dtype=np.int64) indices_value = np.arange(num_elements, dtype=np.int64) indices = np.asarray( sorted(zip(indices_batch, indices_value)), dtype=np.int64) values = ["feature_value_for_embedding_lookup"] * num_elements shape = np.asarray([batch_size, num_elements], dtype=np.int64) with tf.Session() as sess: with tf.device("/cpu:0"): indices = tf.Variable(indices) values = tf.Variable(values) shape = tf.Variable(shape) st = tf.SparseTensor(indices, values, shape) st_handles = add_many_sparse_to_tensors_map(st)
tensorflow.device
10,810
import tensorflow as tf tf.saved_model.ASSETS_DIRECTORY, sanitized_vocab_filename(filename=vocab_filename)) files = tf.io.gfile.glob(prefix) + tf.io.gfile.glob( '{}.tfrecord.gz'.format(prefix))
tensorflow.io.gfile.glob
10,811
import tensorflow as tf if cfgs.ANGLE_RANGE == 180: gtboxes_and_label_r_ = tf.py_func(coordinate_present_convert, inp=[gtboxes_and_label_r, -1], Tout=tf.float32) gtboxes_and_label_r_ = tf.reshape(gtboxes_and_label_r_, [-1, 6]) gt_encode_label = tf.py_func(angle_label_encode, inp=[gtboxes_and_label_r_[:, -2], cfgs.ANGLE_RANGE, cfgs.OMEGA, cfgs.ANGLE_MODE], Tout=tf.float32) else: gt_encode_label = tf.py_func(angle_label_encode, inp=[gtboxes_and_label_r[:, -2], cfgs.ANGLE_RANGE, cfgs.OMEGA, cfgs.ANGLE_MODE], Tout=tf.float32) img = inputs_list[i][0] img_shape = inputs_list[i][-2:] img = tf.image.crop_to_bounding_box(image=img, offset_height=0, offset_width=0, target_height=tf.cast(img_shape[0], tf.int32), target_width=tf.cast(img_shape[1], tf.int32))
tensorflow.py_func
10,812
import tensorflow as tf # 5. 如果希望限制死TensorFlow使用GPU内存的百分比,可以使用config设置per_process_gpu_memory_fraction config.gpu_options.per_process_gpu_memory_fraction = 0.4 sess_limited = tf.Session(config=config) # 6. 有时,我们希望代码健壮到可以决定运行多少GPU合适。TensorFlow有内建函数可以探测到。如果我们期望代码在GPU内存合适时利用GPU计算能力,并分配指定操作给GPU,那么该功能是有益的 if tf.test.is_built_with_cuda(): pass # 7. 我们希望分配指定操作给GPU。下面是一个示例代码,做了一些简单的计算,并将它们分配给主CPU和两个副GPU with tf.device('/cpu:0'): a = tf.constant([1.0, 3.0, 5.0], shape=[1,3]) b = tf.constant([2.0, 4.0, 6.0], shape=[3, 1]) with tf.device('/gpu:0'): c = tf.matmul(a,b) c = tf.reshape(c, [-1]) with tf.device('/gpu:1'): d = tf.matmul(b, a) flat_d = tf.reshape(d, [-1]) combined = tf.multiply(c, flat_d) print(sess.run(combined))
tensorflow.matmul
10,813
import tensorflow as tf x, prediction, output_class = self.buildModel(lstm_layer, is_dynamic_rnn) new_sess = tf.compat.v1.Session(config=CONFIG) saver = tf.train.Saver() saver.restore(new_sess, model_dir) return x, prediction, output_class, new_sess
tensorflow.train.Saver
10,814
import tensorflow as tf eval_examples = processor.get_dev_examples(FLAGS.data_dir) eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record") filed_based_convert_examples_to_features( eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file) tf.logging.info("***** Running evaluation *****") tf.logging.info(" Num examples = %d", len(eval_examples)) tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size) eval_steps = None if FLAGS.use_tpu:
tensorflow.logging.info
10,815
from tensorflow.contrib.layers.python.layers import utils def build_no_ops(): return (tf.no_op(), tf.no_op()) # Only make the ops if we know that `is_training=True`, or the value of # `is_training` is unknown. is_training_const = utils.constant_value(is_training) if is_training_const is None or is_training_const: update_mean_op, update_second_moment_op = utils.smart_cond( is_training, build_update_ops, build_no_ops,
tensorflow.contrib.layers.python.layers.utils.constant_value
10,816
import tensorflow as tf masked_lm_weights, next_sentence_example_loss, next_sentence_log_probs, next_sentence_labels): """Computes the loss and accuracy of the model.""" masked_lm_log_probs = tf.reshape(masked_lm_log_probs, [-1, masked_lm_log_probs.shape[-1]]) masked_lm_predictions = tf.argmax(
tensorflow.reshape
10,817
import tensorflow as tf if not white: q_mu = tf.matrix_triangular_solve(Luu, q_mu, lower=True) Luu_tiled = tf.tile(Luu[None, :, :], [num_func, 1, 1]) # remove line once issue 216 is fixed q_sqrt_r = tf.matrix_triangular_solve(Luu_tiled, q_sqrt_r, lower=True) Li_eKuf = tf.matrix_triangular_solve(Luu, eKuf, lower=True) # M x N fmean = tf.matmul(Li_eKuf, q_mu, transpose_a=True) eKff = expectation(pXnew, kern) # N (psi0) eKuffu = expectation(pXnew, (kern, feat), (kern, feat)) # N x M x M (psi2) Luu_tiled = tf.tile(Luu[None, :, :], [num_data, 1, 1]) # remove this line, once issue 216 is fixed Li_eKuffu = tf.matrix_triangular_solve(Luu_tiled, eKuffu, lower=True) Li_eKuffu_Lit = tf.matrix_triangular_solve(Luu_tiled, tf.matrix_transpose(Li_eKuffu), lower=True) # N x M x M cov = tf.matmul(q_sqrt_r, q_sqrt_r, transpose_b=True) # D x M x M if mean_function is None or isinstance(mean_function, mean_functions.Zero): e_related_to_mean = tf.zeros((num_data, num_func, num_func), dtype=settings.float_type) else: # Update mean: \mu(x) + m(x) fmean = fmean + expectation(pXnew, mean_function) # Calculate: m(x) m(x)^T + m(x) \mu(x)^T + \mu(x) m(x)^T, # where m(x) is the mean_function and \mu(x) is fmean e_mean_mean = expectation(pXnew, mean_function, mean_function) # N x D x D Lit_q_mu = tf.matrix_triangular_solve(Luu, q_mu, adjoint=True) e_mean_Kuf = expectation(pXnew, mean_function, (kern, feat)) # N x D x M
tensorflow.matmul
10,818
import tensorflow as tf fvar = tf.tile(fvar[None, :], [num_func, 1]) # R x N # another backsubstitution in the unwhitened case if not white: A = tf.matrix_triangular_solve(tf.transpose(Lm), A, lower=False) # construct the conditional mean fmean = tf.matmul(A, f, transpose_a=True) if q_sqrt is not None: if q_sqrt.get_shape().ndims == 2: LTA = A * tf.expand_dims(tf.transpose(q_sqrt), 2) # R x M x N elif q_sqrt.get_shape().ndims == 3: L = tf.matrix_band_part(q_sqrt, -1, 0) # R x M x M A_tiled = tf.tile(tf.expand_dims(A, 0), tf.stack([num_func, 1, 1])) LTA = tf.matmul(L, A_tiled, transpose_a=True) # R x M x N else: # pragma: no cover raise ValueError("Bad dimension for q_sqrt: %s" % str(q_sqrt.get_shape().ndims)) if full_cov: fvar = fvar + tf.matmul(LTA, LTA, transpose_a=True) # R x N x N else: fvar = fvar + tf.reduce_sum(tf.square(LTA), 1) # R x N if not full_cov: fvar = tf.transpose(fvar) # N x R return fmean, fvar # N x R, R x N x N or N x R
tensorflow.stack
10,819
import tensorflow as tf input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length label_id = label_map[example.label] if ex_index < 5: tf.logging.info("*** Example ***") tf.logging.info("guid: %s" % (example.guid)) tf.logging.info("tokens: %s" % " ".join( [tokenization.printable_text(x) for x in tokens])) tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) tf.logging.info("label: %s (id = %d)" % (example.label, label_id)) feature = InputFeatures( input_ids=input_ids,
tensorflow.logging.info
10,820
import tensorflow as tf def fc(input_data, out_dim, non_linear_fn=None, initial_value=None, use_bias=True, scope='fc'): with tf.variable_scope(scope):
tensorflow.variable_scope
10,821
import tensorflow as tf passed to the Loss or Postprocess functions. """ flattened_inputs = tf.contrib.layers.flatten(preprocessed_inputs) class_prediction = tf.contrib.layers.fully_connected( flattened_inputs, self._num_classes) box_prediction = tf.contrib.layers.fully_connected(flattened_inputs, 4) return { 'class_predictions_with_background': tf.reshape( class_prediction, [-1, 1, self._num_classes]), 'box_encodings': tf.reshape(box_prediction, [-1, 1, 4]) } def postprocess(self, prediction_dict, true_image_shapes, **params): """Convert predicted output tensors to final detections. Unused. Args: prediction_dict: a dictionary holding prediction tensors. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes
tensorflow.reshape
10,822
from tensorflow.python.framework import ops # Divide total by max to get mean, for both vars and the update ops. mean_average_precision = _safe_scalar_div(total_var, max_var, name='mean') update = _safe_scalar_div(total_update, max_update, name=scope) if metrics_collections: ops.add_to_collections(metrics_collections, mean_average_precision) if updates_collections: ops.add_to_collections(updates_collections, update) return mean_average_precision, update
tensorflow.python.framework.ops.add_to_collections
10,823
import tensorflow as tf vz.insert(u, r) kk = tf.Variable(0, dtype=tf.int64) for i in tf.range(start=0, limit=tf.size(vx_keys), delta=1, dtype=None, name='range'): for j in tf.range(start=0, limit=tf.size(vz_keys), delta=1, dtype=None, name='range'): to_add = tf.cond( tf.greater(vz.lookup(vx_keys[i]), -1), true_fn=lambda: tf.math.multiply(vx.lookup(vx_keys[i]), vz.lookup(vz_keys[j])), false_fn=lambda: tf.constant(0, dtype=tf.int64) ) kk = tf.math.add(kk, to_add) kernel[l][m] = kk return tf.convert_to_tensor(kernel, dtype=tf.int64) def dim(self):
tensorflow.constant
10,824
import tensorflow as tf tf.gfile.MakeDirs(FLAGS.output_dir) input_files = [] for input_pattern in FLAGS.input_file.split(","): input_files.extend(tf.gfile.Glob(input_pattern)) tf.logging.info("*** Input Files ***") for input_file in input_files: tf.logging.info(" %s" % input_file) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, tpu_config=tf.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host))
tensorflow.contrib.cluster_resolver.TPUClusterResolver
10,825
import tensorflow as tf bsz_per_core = tf.shape(features["input_ids"])[0] def _transform_features(feature): out = tf.reshape(feature, [bsz_per_core, 4, -1]) out = tf.transpose(out, [2, 0, 1]) out = tf.reshape(out, [-1, bsz_per_core * 4]) return out inp = _transform_features(features["input_ids"]) seg_id = _transform_features(features["segment_ids"])
tensorflow.reshape
10,826
import tensorflow as tf tf_gradient_function = tf_gradients_lib.gradients # ISSUE: https://github.com/cybertronai/gradient-checkpointing/issues/38 def tf_gradients(ys, *args, **kwargs): """Decorate tf.gradients calls with explicit device placement to avoid memory leaks when splitting model across multiple GPUs""" source = ys[0] if isinstance(ys, (list, tuple)) else ys device = source.op.node_def.device if isinstance(source, tf.Tensor) else None with tf.device(device): return tf_gradient_function(ys, *args, **kwargs) MIN_CHECKPOINT_NODE_SIZE=1024 # use lower value during testing # specific versions we can use to do process-wide replacement of tf.gradients def gradients_speed(ys, xs, grad_ys=None, **kwargs):
tensorflow.device
10,827
import tensorflow as tf cols[3] / height, cols[2] / width], axis=1) # add batch dimension (assume batch_size==1) #assert image.get_shape()[0] == 1 boxes = tf.expand_dims(boxes, dim=0) image = tf.image.draw_bounding_boxes(image, boxes) # 在image上画gt_truth return tf.summary.image('ground_truth', image) def _add_act_summary(self, tensor): tf.summary.histogram('ACT/' + tensor.op.name + '/activations', tensor) tf.summary.scalar('ACT/' + tensor.op.name + '/zero_fraction', tf.nn.zero_fraction(tensor))
tensorflow.summary.image
10,828
import tensorflow as tf x2 = tf.maximum(tf.minimum(x2, width - 1.0), 0.0) y1 = tf.maximum(tf.minimum(y1, height - 1.0), 0.0) y2 = tf.maximum(tf.minimum(y2, height - 1.0), 0.0) bboxes = tf.concat([x1, y1, x2, y2], axis=1) return bboxes
tensorflow.concat
10,829
from tensorflow.python.ops import math_ops with ops.name_scope(None, 'false_negatives', (predictions_idx, labels)): labels, predictions_idx = _maybe_select_class_id(labels, predictions_idx, class_id) fn = set_ops.set_size(set_ops.set_difference(predictions_idx, labels, aminusb=False)) fn = math_ops.to_double(fn) if weights is not None: weights = math_ops.to_double(weights) fn = math_ops.mul(fn, weights) return fn def _streaming_sparse_false_negative_at_k(predictions_idx, labels, k, class_id=None, weights=None, name=None):
tensorflow.python.ops.math_ops.mul
10,830
import tensorflow as tf ) print_obj(func_name, "mixed_loss", mixed_loss) # Get gradient from returned list of length 1. mixed_gradients = tf.gradients( ys=mixed_loss, xs=[mixed_images], name="gradients"
tensorflow.gradients
10,831
import tensorflow as tf if isinstance(k_size, list): filter_shape = [k_size[0], k_size[1]] + [in_channel, out_dims] else: filter_shape = [k_size, k_size] + [in_channel, out_dims] if w_init is None: w_init = tf.contrib.layers.variance_scaling_initializer() if b_init is None: b_init = tf.constant_initializer() w = tf.get_variable('W', filter_shape, initializer=w_init) b = None if use_bias: b = tf.get_variable('b', [out_dims], initializer=b_init) conv = tf.nn.atrous_conv2d(value=input_tensor, filters=w, rate=rate, padding=padding, name='dilation_conv') if use_bias: ret = tf.add(conv, b)
tensorflow.get_variable
10,832
from tensorflow.python.ops import check_ops Raises: TypeError: if `alpha` and `beta` are different dtypes. """ parameters = locals() parameters.pop("self") with ops.name_scope(name, values=[alpha, beta]) as ns: with ops.control_dependencies([ check_ops.assert_positive(alpha), check_ops.assert_positive(beta), ] if validate_args else []): self._alpha = array_ops.identity(alpha, name="alpha") self._beta = array_ops.identity(beta, name="beta") super(InverseGamma, self).__init__( dtype=self._alpha.dtype, validate_args=validate_args, allow_nan_stats=allow_nan_stats, is_continuous=True,
tensorflow.python.ops.check_ops.assert_positive
10,833
import tensorflow as tf return final_loss def contra_traj_lossV1(pred, tgt, temp=10.0): # Trajectory-wise contrastive loss traj_pred = tf.reduce_mean(pred, axis=1) traj_tgt = tf.reduce_mean(tgt, axis=1) p1, p2 = tf.split(traj_pred, 2, axis=0) t1, t2 = tf.split(traj_tgt, 2, axis=0) soft_sign = tf.tanh((t1 - t2) * temp) loss = tf.maximum(0.0, soft_sign * ((t1 - t2) - (p1 - p2))) loss = tf.reduce_mean(loss) return loss def horizon_sumV1(input, horizon=12):
tensorflow.split
10,834
import tensorflow as tf per_band_data = band_settings.per_band_sub_model_fn_with_band_name( per_band_model_fn, features, params=params, value_if_masked=params.get("value_if_masked", 0.0), soft_onehot=Nonlinearity[params.get("input_soft_onehot", "sigmoid").upper()] ) return graph_typecheck.assert_shape( tf.stack(per_band_data, axis=4), [ params["batch_size"], 2 * params["window_size"], 3, cutoff_data_for_window_size(params["window_size"]).embedding_size, band_settings.nbands, ] )
tensorflow.stack
10,835
import tensorflow as tf GAMMA = 0.7 # Actor 學習率 # A_LR = 0.0001 A_LR = 0.001 # Critic 學習率 # C_LR = 0.0002 C_LR = 0.002 class MODEL(object): def __init__(self): self.sess = tf.Session() self.tfs = tf.placeholder(tf.float32, [None, 84, 84, 3], 'state') c0 = tf.cast(self.tfs, tf.float32) / 255. c1 = tf.nn.relu(self.conv(c0, 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2))) c2 = tf.nn.relu( self.conv(
tensorflow.Session
10,836
from tensorflow.python.ops import variable_scope coverages = [] attn_dists = [] p_gens = [] vocab_scores = [] sampled_words = [] self.encoder_features = encoder_features with variable_scope.variable_scope("attention_decoder"): # Get the weight vectors v and W_c (W_c is for coverage) v = variable_scope.get_variable("v", [options.attention_vec_size]) v = tf.expand_dims(tf.expand_dims(v, axis=0), axis=0) w_c = None if options.use_coverage:
tensorflow.python.ops.variable_scope.variable_scope
10,837
import tensorflow as tf def __init__(self,name,input_dim,out_dim, k_h=4,k_w=4,d_h=2,d_w=2,stddev=0.02,data_format='NHWC',epsilon=1e-9) : with tf.variable_scope(name) : assert data_format == 'NHWC'
tensorflow.variable_scope
10,838
import tensorflow as tf return op if __name__ == '__main__': # run it with "python -m tensorpack.tfutils.optimizer" x = tf.get_variable('x', shape=[6]) cost = tf.reduce_sum(tf.abs(x), name='cost') opt = tf.train.GradientDescentOptimizer(0.01) opt = AccumGradOptimizer(opt, 5) min_op = opt.minimize(cost) sess = tf.Session() sess.run(tf.global_variables_initializer()) with sess.as_default(): for k in range(20): min_op.run() print(x.eval())
tensorflow.Session
10,839
from tensorflow.python.ops import array_ops Raises: ValueError: If `weights` is not `None` and its shape doesn't match `values`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. """ check_ops.assert_type(values, dtypes.bool) count = _create_local('count', shape=[]) values = math_ops.to_float(values) if weights is not None: weights = math_ops.to_float(weights) values = math_ops.mul(values, weights) value_tensor = array_ops.identity(count) update_op = state_ops.assign_add(count, math_ops.reduce_sum(values)) if metrics_collections: ops.add_to_collections(metrics_collections, value_tensor) if updates_collections: ops.add_to_collections(updates_collections, update_op) return value_tensor, update_op def _streaming_true_positives(predictions, labels, weights=None, metrics_collections=None,
tensorflow.python.ops.array_ops.identity
10,840
import tensorflow as tf tower_grads = [] biases_regularizer = tf.no_regularizer weights_regularizer = tf.contrib.layers.l2_regularizer(cfgs.WEIGHT_DECAY) with tf.variable_scope(tf.get_variable_scope()): for i in range(num_gpu): with tf.device('/gpu:%d' % i): with tf.name_scope('tower_%d' % i):
tensorflow.get_variable_scope
10,841
import tensorflow as tf else: gamma = tf.ones([1] * ndims, name='gamma') return tf.nn.batch_normalization(inputdata, mean, var, beta, gamma, epsilon, name=name) @staticmethod
tensorflow.nn.batch_normalization
10,842
import tensorflow as tf else: action = env_.action_space.sample() observation, reward, done, info = env_.step(action) if done: break return_ += reward logger.info (f"render {cnt_} steps, return = {return_:.6f}") res = {'obs': obs, 'return': return_} return res def eval_rollout(runner, p, des): logger.info(des) runner.reset() data, ep_infos = runner.run(p, FLAGS.plan.n_trpo_samples) logp = p(data.state).log_prob(data.action).reduce_sum(axis=1).reduce_mean() logp = tf.get_default_session().run(logp) print ("state_mean:", np.mean(data.state)) print ("action_mean:", np.mean(data.action)) print ("warmup_logpac_mean:", logp) def testeval(policy, runner): runner.reset() _, ep_infos = runner.run(policy, FLAGS.rollout.n_test_samples) returns = [info['return'] for info in ep_infos] returns = np.mean(returns) return returns def evaluate(settings, tag): res = {}
tensorflow.get_default_session
10,843
import tensorflow as tf b = tf.get_variable('b', [input_size_y], initializer=tf.constant_initializer(0.0)) rnn_outputs = tf.reshape(rnn_outputs, [-1, state_size]) predictions = tf.matmul(rnn_outputs, W) + b yy = tf.reshape(y, [-1, input_size_y]) #batch_size*num_steps when yo udefine a placeholder in Tensorflow, the shape of the input during the session should be the same as the shape of the plcae holder "Mean squared error loss" loss=tf.reduce_mean(tf.square(tf.reshape(predictions,[-1])-tf.reshape(yy,[-1])))
tensorflow.reshape
10,844
import tensorflow as tf # slows down learning. In this code, we found that making local steps be much # smaller than 20 makes the algorithm more difficult to tune and to get to work. self.runner = RunnerThread(env, pi, 20) grads = tf.gradients(self.loss, pi.var_list) tf.summary.scalar("model/policy_loss", pi_loss / bs) tf.summary.scalar("model/value_loss", vf_loss / bs) tf.summary.scalar("model/entropy", entropy / bs) tf.summary.image("model/state", pi.x) tf.summary.scalar("model/grad_global_norm", tf.global_norm(grads)) tf.summary.scalar("model/var_global_norm", tf.global_norm(pi.var_list)) self.summary_op = tf.summary.merge_all()
tensorflow.summary.scalar
10,845
from tensorflow.python.framework import tensor_shape with ops.op_scope([value, filter, output_shape], name, "conv2d_transpose") as name: value = ops.convert_to_tensor(value, name="value") filter = ops.convert_to_tensor(filter, name="filter") if not value.get_shape()[3].is_compatible_with(filter.get_shape()[3]): raise ValueError( "input channels does not match filter's input channels, " "{} != {}".format(value.get_shape()[3], filter.get_shape()[3])) output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape") if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(4)): raise ValueError("output_shape must have shape (4,), got {}" .format(output_shape_.get_shape())) if isinstance(output_shape, (list, np.ndarray)): # output_shape's shape should be == [4] if reached this point. if not filter.get_shape()[2].is_compatible_with(output_shape[3]): raise ValueError( "output_shape does not match filter's output channels, " "{} != {}".format(output_shape[3], filter.get_shape()[2]))
tensorflow.python.framework.tensor_shape.vector
10,846
import tensorflow as tf def _terminate_eval(): tf.logging.info('Timeout passed with no new checkpoints ... terminating eval')
tensorflow.logging.info
10,847
import tensorflow as tf """ Train RNN graph """ def train_rnn(raw_data_x, raw_data_y, val_data_x, val_data_y,g, num_epochs, num_steps, batch_size, input_prob, output_prob, state_prob, epoch_before_val = 50, max_checks_without_progress=50,epoch_overlap=None, verbose=True, save=False): with tf.Session() as sess: "initialize the variables" sess.run(tf.global_variables_initializer()) raw_data_yp = np.insert(raw_data_y,0,0,axis=0)[:-1] val_data_yp = np.insert(val_data_y,0,0,axis=0)[:-1] "see the trainable variables"
tensorflow.Session
10,848
from tensorflow.contrib.framework import deprecated_args if k is not None: name = '%s_at_%d' % (name, k) else: name = '%s_at_k' % (name) if class_id is not None: name = '%s_class%d' % (name, class_id) return name @deprecated('2016-11-08', 'Please use `streaming_sparse_recall_at_k`, ' 'and reshape labels from [batch_size] to [batch_size, 1].') @deprecated_args(IGNORE_MASK_DATE, IGNORE_MASK_INSTRUCTIONS, 'ignore_mask') def streaming_recall_at_k(predictions, labels, k, ignore_mask=None, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes the recall@k of the predictions with respect to dense labels. The `streaming_recall_at_k` function creates two local variables, `total` and `count`, that are used to compute the recall@k frequency. This frequency is ultimately returned as `recall_at_<k>`: an idempotent operation that simply divides `total` by `count`.
tensorflow.contrib.framework.deprecated_args
10,849
from tensorflow.contrib.learn.python.learn.summary_writer_cache import SummaryWriterCache if self._summary_writer is None: self._summary_writer = SummaryWriterCache.get(estimator.model_dir)
tensorflow.contrib.learn.python.learn.summary_writer_cache.SummaryWriterCache.get
10,850
import tensorflow as tf return tf.reshape(x, shape=[x.shape[0], -1, x.shape[-1]]) # N = h * w s = tf.matmul(hw_flatten(g), hw_flatten(f), transpose_b=True) # # [bs, N, N] beta = tf.nn.softmax(s, axis=-1) # attention map o = tf.matmul(beta, hw_flatten(h)) # [bs, N, C] gamma = tf.get_variable("gamma", [1], initializer=tf.constant_initializer(0.0)) o = tf.reshape(o, shape=inputs.shape) # [bs, h, w, C] x = gamma * o + inputs return x @add_arg_scope def convolution(inputs, num_outputs, kernel_size,
tensorflow.reshape
10,851
from tensorflow.python.platform import gfile with sess.graph.device("/cpu:0"): v0 = tf.Variable(111, name="v0") with sess.graph.device("/cpu:1"): v1 = tf.Variable(222, name="v1") save = tf.train.Saver({"v0": v0, "v1": v1}, sharded=True, max_to_keep=2) tf.initialize_all_variables().run() self.assertEqual([], save.last_checkpoints) s1 = save.save(sess, os.path.join(save_dir, "s1")) self.assertEqual([s1], save.last_checkpoints) self.assertEqual(2, len(gfile.Glob(s1))) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1))) s2 = save.save(sess, os.path.join(save_dir, "s2")) self.assertEqual([s1, s2], save.last_checkpoints) self.assertEqual(2, len(gfile.Glob(s1))) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1))) self.assertEqual(2, len(gfile.Glob(s2))) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))
tensorflow.python.platform.gfile.Glob
10,852
from tensorflow.python.ops import control_flow_ops lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype) lambda_t = math_ops.cast(self._lambda_t, var.dtype.base_dtype) g_t = grad var_update = state_ops.assign_sub(var, lr_t * (g_t - lambda_t * var) ) return control_flow_ops.group(*[var_update]) def _apply_sparse(self, grad, var): raise NotImplementedError("Sparse gradient updates are not supported.")
tensorflow.python.ops.control_flow_ops.group
10,853
import tensorflow as tf # Decay the learning rate exponentially based on the number of steps. learning_rate = tf.train.exponential_decay( FLAGS.learning_rate, global_step, decay_steps, FLAGS.learning_rate_decay_factor, staircase=True) if gradient_clip is not None: clipped_grads = [ (tf.clip_by_value(grad, -gradient_clip, +gradient_clip), var) for grad, var in avg_grads ] else: clipped_grads = avg_grads if FLAGS.optimizer == 'momentum':
tensorflow.clip_by_value
10,854
import tensorflow as tf facts = tf.concat(facts, 2) print ("querry_size mismatch") query = tf.concat(values = [ query, query, ], axis=1) if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) mask = tf.equal(mask, tf.ones_like(mask)) facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] queries = tf.tile(query, [1, tf.shape(facts)[1]]) queries = tf.reshape(queries, tf.shape(facts)) din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag) d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag) d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag) d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]])
tensorflow.ones_like
10,855
import tensorflow as tf return 196.0 * 21.0 / 4096.0 else: rec = tf.cast(kw * kh, tf.float32) n_max = 7 + tf.math.ceil(tf.math.log(rec) / tf.math.log(2.)) ns = tf.range(0., n_max) ns_pow = tf.pow(2., ns) ks = tf.round(ns_pow / rec) diffs = tf.math.abs(ks / ns_pow - 1 / rec) n = tf.argmin(diffs) k = ks[n] scale = k / tf.pow(2., tf.cast(n, tf.float32)) scale *= rec return scale
tensorflow.math.abs
10,856
import tensorflow as tf 'num_cpu_threads', 0, 'The number of cpu cores used to train.') tf.app.flags.DEFINE_float( 'gpu_memory_fraction', 1., 'GPU memory fraction to use.') # scaffold related configuration tf.app.flags.DEFINE_string( 'data_dir', '../PASCAL/VOC_TF/VOC0712TF/', 'The directory where the dataset input data is stored.') tf.app.flags.DEFINE_string( 'dataset_name', 'pascalvoc_0712', 'The name of the dataset to load.') tf.app.flags.DEFINE_integer( 'num_classes', 21, 'Number of classes to use in the dataset.') tf.app.flags.DEFINE_string( 'dataset_split_name', 'train', 'The name of the train/test split.') tf.app.flags.DEFINE_string( 'model_dir', './logs_v3/', 'The directory where the model will be stored.') tf.app.flags.DEFINE_integer( 'log_every_n_steps', 10, 'The frequency with which logs are print.')
tensorflow.app.flags.DEFINE_integer
10,857
import tensorflow as tf tf.app.flags.DEFINE_integer( 'save_summary_steps', 100, 'The frequency with which summaries are saved, in seconds.') tf.app.flags.DEFINE_integer( 'save_checkpoints_secs', 3600, 'The frequency with which the model is saved, in seconds.') # model related configuration tf.app.flags.DEFINE_integer( 'train_image_size', 384, 'The size of the input image for the model to use.') tf.app.flags.DEFINE_integer( 'heatmap_size', 96, 'The size of the output heatmap of the model.') tf.app.flags.DEFINE_string( 'backbone', 'seresnext50',#or seresnext50 seresnet50 'The backbone network to use for feature pyramid.') tf.app.flags.DEFINE_float( 'heatmap_sigma', 1., 'The sigma of Gaussian which generate the target heatmap.') tf.app.flags.DEFINE_float( 'bbox_border', 25., 'The nearest distance of the crop border to al keypoints.') tf.app.flags.DEFINE_integer( 'train_epochs', 50, 'The number of epochs to use for training.') tf.app.flags.DEFINE_integer(
tensorflow.app.flags.DEFINE_string
10,858
from tensorflow.python.ops import nn # sigmoid_cross_entropy_with_logits requires [batch_size, 1] target. # Check that we got int32/int64 for classification. if (not target.dtype.is_compatible_with(dtypes.int64) and not target.dtype.is_compatible_with(dtypes.int32)): raise ValueError("Target's dtype should be int32, int64 or compatible. " "Instead got %s." % target.dtype) # sparse_softmax_cross_entropy_with_logits requires [batch_size] target. if len(target.get_shape()) == 2: target = array_ops.squeeze(target, squeeze_dims=[1]) loss_vec = nn.sparse_softmax_cross_entropy_with_logits(logits, target) return loss_vec def _run_metrics(predictions, targets, metrics, weights): result = {} targets = math_ops.cast(targets, predictions.dtype) for name, metric in six.iteritems(metrics or {}): if "weights" in inspect.getargspec(metric)[0]:
tensorflow.python.ops.nn.sparse_softmax_cross_entropy_with_logits
10,859
import tensorflow as tf mask3 = tf.constant([[1, 1], [1, 1]], dtype=tf.float32) mask4 = tf.constant([[0, 0], [0, 0]], dtype=tf.float32)
tensorflow.constant
10,860
import tensorflow as tf fd.write(params.to_json()) def collect_params(all_params, params): collected = tf.contrib.training.HParams() for k in params.values().keys(): collected.add_hparam(k, getattr(all_params, k))
tensorflow.contrib.training.HParams
10,861
import tensorflow as tf elif activation == "sigmoid": A = tf.nn.sigmoid(tf.multiply(beta,h))
tensorflow.multiply
10,862
import tensorflow as tf wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale)) gx = tf.get_variable("gx", [nh*4], initializer=tf.constant_initializer(1.0)) bx = tf.get_variable("bx", [nh*4], initializer=tf.constant_initializer(0.0)) wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale)) gh = tf.get_variable("gh", [nh*4], initializer=tf.constant_initializer(1.0)) bh = tf.get_variable("bh", [nh*4], initializer=tf.constant_initializer(0.0)) b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))
tensorflow.constant_initializer
10,863
import tensorflow as tf processed_image = utils.process_image(image, mean_pixel) with tf.variable_scope("inference"): image_net = vgg_net(weights, processed_image) conv_final_layer = image_net["conv5_3"]
tensorflow.variable_scope
10,864
import tensorflow as tf initializer=tf.zeros_initializer()) logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
tensorflow.matmul
10,865
import tensorflow as tf eval_input_fn = input_fn_builder( input_files=input_files, max_seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq, is_training=False) result = estimator.evaluate( input_fn=eval_input_fn, steps=FLAGS.max_eval_steps) output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt") with tf.gfile.GFile(output_eval_file, "w") as writer: tf.logging.info("***** Eval results *****") for key in sorted(result.keys()): tf.logging.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if __name__ == "__main__": # flags.mark_flag_as_required("input_file") # flags.mark_flag_as_required("bert_config_file") # flags.mark_flag_as_required("output_dir")
tensorflow.gfile.GFile
10,866
import tensorflow as tf batch_size = params["batch_size"] num_examples = len(features) # This is for demo purposes and does NOT scale to large data sets. We do # not use Dataset.from_generator() because that uses tf.py_func which is # not TPU compatible. The right way to load data is with TFRecordReader. d = tf.data.Dataset.from_tensor_slices({ "input_ids": tf.constant( all_input_ids, shape=[num_examples, seq_length], dtype=tf.int32), "input_mask": tf.constant( all_input_mask, shape=[num_examples, seq_length], dtype=tf.int32), "segment_ids": tf.constant( all_segment_ids, shape=[num_examples, seq_length], dtype=tf.int32), "masked_lm_positions": tf.constant( all_masked_lm_positions,
tensorflow.constant
10,867
from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten #removed GOAL_SIZE flat1b = Dense(units=RNN_SIZE-loc_layer_size)(flat1a) # FC layers for goal_pos input # goal_layer1 = Dense(units=GOAL_SIZE)(goal_pos) # goal_layer2 = Dense(units=GOAL_SIZE)(goal_layer1) # FC layers to find next location loc_layer1 = Dense(units=loc_layer_size)(prev_loc) loc_layer2 = Dense(units=loc_layer_size)(loc_layer1) # Concatenationation of above layers, followed by FC layer concat = tf.concat([flat1b, loc_layer2],1) # goal_layer2 h1 = Dense(units=RNN_SIZE)(concat) h2 = Dense(units=RNN_SIZE)(h1) self.h3 = tf.nn.relu(h2+concat) #Recurrent network for temporal dependencies
tensorflow.keras.layers.Dense
10,868
import tensorflow as tf for i in range(num_dec_timesteps)] weights = [tf.constant(1.0, shape=[batch_size]) for i in range(num_dec_timesteps)] def ForwardBackward(enc_inp, dec_inp, feed_previous): scope_name = "fp_{}".format(feed_previous) with tf.variable_scope(scope_name): dec_op, _ = seq2seq(enc_inp, dec_inp, feed_previous=feed_previous) net_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope_name) optimizer = tf.train.AdamOptimizer(0.03, epsilon=1e-5) update_op = optimizer.minimize( tf.nn.seq2seq.sequence_loss(dec_op, targets, weights), var_list=net_variables) return dec_op, update_op, net_variables
tensorflow.get_collection
10,869
import tensorflow as tf """Slice encoder hidden state into block_dim. Args: x: Encoder hidden state of shape [-1, hidden_size]. Returns: Sliced states of shape [-1, num_blocks, block_dim]. """ x_sliced = tf.reshape( x, shape=[-1, self.hparams.num_blocks, self.hparams.block_dim]) return x_sliced def nearest_neighbor(self, x, means): """Find the nearest element in means to elements in x. Args:
tensorflow.reshape
10,870
import tensorflow as tf def random_net_distill(x_ph, a_ph, hidden_sizes=(400,300), activation=tf.nn.relu, output_activation=tf.tanh, action_space=None, dropout_rate=0.1): act_dim = a_ph.shape.as_list()[-1] act_limit = action_space.high[0] with tf.variable_scope('rnd_targ_act'): rnd_targ_act = act_limit * mlp(x_ph, list(hidden_sizes) + [act_dim], activation, output_activation) with tf.variable_scope('rnd_pred_act'): # rnd_pred_act = act_limit * mlp(x_ph, list(hidden_sizes) + [act_dim], activation, output_activation) rnd_pred_act_in_dim = x_ph.shape.as_list()[1] rnd_pred_act_dropout_mask_generator = DropoutMaskGenerator(rnd_pred_act_in_dim, hidden_sizes, model_prob=1.0 - dropout_rate) rnd_pred_act_dropout_mask_phs = rnd_pred_act_dropout_mask_generator.generate_dropout_mask_placeholders()
tensorflow.variable_scope
10,871
import tensorflow as tf outputs, _ = tf.nn.bidirectional_dynamic_rnn(cell, cell_bw, dec, dtype=tf.float32) outputs = tf.concat(outputs, 2) self.Z_hat = tf.layers.dense(outputs, 1 + fourier_window_size // 2) self.loss1 = tf.reduce_mean(tf.abs(self.Y_hat - self.Y)) self.loss2 = tf.reduce_mean(tf.abs(self.Z_hat - self.Z)) self.loss = self.loss1 + self.loss2 self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.loss) # In[3]:
tensorflow.train.AdamOptimizer
10,872
import tensorflow as tf flat_shape = (np.prod(shape[:-1]), shape[-1]) else: raise NotImplementedError a = np.random.normal(0.0, 1.0, flat_shape) u, _, v = np.linalg.svd(a, full_matrices=False) q = u if u.shape == flat_shape else v # pick the one with the correct shape q = q.reshape(shape) return (scale * q[:shape[0], :shape[1]]).astype(np.float32) return _ortho_init #################################################### def _build_anet(self, name, trainable): # 定義Actor 新舊的網路模型 with tf.variable_scope(name): c0 = tf.cast(self.tfs, tf.float32) / 255. c1 = tf.nn.relu(self.conv(c0, 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2))) c2 = tf.nn.relu(self.conv(c1, 'c2', nf=64, rf=4, stride=2,
tensorflow.variable_scope
10,873
import tensorflow as tf # Create a tensot flow constant equal to the number of classes C = tf.constant(N_classes, name="C") one_hot_matrix = tf.one_hot(vect-1, C, axis=0) #axis=0 means it is mapping to column vectors
tensorflow.constant
10,874
from tensorflow.python.training import moving_averages def build_update_ops(): """Builds the exponential moving average update ops.""" update_mean_op = moving_averages.assign_moving_average( variable=self._moving_mean, value=mean, decay=self._decay_rate, name="update_moving_mean").op update_second_moment_op = moving_averages.assign_moving_average( variable=self._moving_second_moment, value=second_moment, decay=self._decay_rate, name="update_moving_second_moment").op return update_mean_op, update_second_moment_op def build_no_ops():
tensorflow.python.training.moving_averages.assign_moving_average
10,875
import tensorflow as tf # loss and optimizer self.loss = tf.reduce_mean(tf.square(tf.subtract(self.value_estimate, self.target))) self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) self.train_op = self.optimizer.minimize( self.loss, global_step=tf.contrib.framework.get_global_step()) def predict(self, state, sess=None): sess = sess or tf.get_default_session() return sess.run(self.value_estimate, { self.state: [state] })[0][0]
tensorflow.contrib.framework.get_global_step
10,876
import tensorflow as tf "metadata.") tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
tensorflow.flags.DEFINE_string
10,877
import tensorflow as tf # Under the benchmarking mode, user can specify whether nor not to use # the forward-only option, which will only compute the loss function. # forward-only cannot be enabled with eval at the same time. tf.flags.DEFINE_boolean('eval', False, 'whether use eval or benchmarking') tf.flags.DEFINE_boolean('forward_only', False, """whether use forward-only or training for benchmarking""") tf.flags.DEFINE_integer('batch_size', 0, 'batch size per compute device') tf.flags.DEFINE_integer('num_batches', 100, 'number of batches to run, excluding warmup') tf.flags.DEFINE_integer('num_warmup_batches', None, 'number of batches to run before timing') tf.flags.DEFINE_integer('autotune_threshold', None, 'The autotune threshold for the models') tf.flags.DEFINE_integer('num_gpus', 1, 'the number of GPUs to run on') tf.flags.DEFINE_integer('display_every', 10, """Number of local steps after which progress is printed out""") tf.flags.DEFINE_string('data_dir', None, """Path to dataset in TFRecord format (aka Example protobufs). If not specified, synthetic data will be used.""") tf.flags.DEFINE_string('data_name', None, """Name of dataset: imagenet or flowers.
tensorflow.flags.DEFINE_integer
10,878
import tensorflow as tf self.feature_size = data_set.feature_size if self.hparams.ranker_learning_rate < 0: self.ranker_learning_rate = tf.Variable(float(self.hparams.learning_rate), trainable=False) else: self.ranker_learning_rate = tf.Variable(float(self.hparams.ranker_learning_rate), trainable=False) self.learning_rate = self.ranker_learning_rate # self.weighs_propen= # Feeds for inputs. self.is_training = tf.placeholder(tf.bool, name="is_train") self.docid_inputs = [] # a list of top documents self.letor_features = tf.placeholder(tf.float32, shape=[None, self.feature_size], name="letor_features") # the letor features for the documents self.labels = [] # the labels for the documents (e.g., clicks) self.types=[] for i in range(self.max_candidate_num): self.docid_inputs.append(tf.placeholder(tf.int64, shape=[None],
tensorflow.placeholder
10,879
import tensorflow as tf #目标是让loss下降 adv_x = x - r if (clip_min is not None) and (clip_max is not None): adv_x = tf.clip_by_value(adv_x, clip_min, clip_max) return adv_x
tensorflow.clip_by_value
10,880
from tensorflow.python.ops import math_ops Raises: ValueError: If `weights` is not `None` and its shape doesn't match `values`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. """ check_ops.assert_type(values, dtypes.bool) count = _create_local('count', shape=[]) values = math_ops.to_float(values) if weights is not None: weights = math_ops.to_float(weights) values = math_ops.mul(values, weights) value_tensor = array_ops.identity(count) update_op = state_ops.assign_add(count, math_ops.reduce_sum(values)) if metrics_collections: ops.add_to_collections(metrics_collections, value_tensor) if updates_collections: ops.add_to_collections(updates_collections, update_op)
tensorflow.python.ops.math_ops.to_float
10,881
import tensorflow as tf num_classes = 1000 eval_inputs = tf.random_uniform((batch_size, height, width, 3)) logits, _ = mobilenet_v1.mobilenet_v1(eval_inputs, num_classes, is_training=False) predictions = tf.argmax(logits, 1) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(predictions)
tensorflow.argmax
10,882
import tensorflow as tf inputs = batch_norm(inputs, training, data_format) inputs = tf.nn.relu(inputs) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=3, strides=strides, data_format=data_format) inputs = batch_norm(inputs, training, data_format) inputs = tf.nn.relu(inputs) inputs = conv2d_fixed_padding( inputs=inputs, filters=4 * filters, kernel_size=1, strides=1, data_format=data_format) inputs = batch_norm(inputs, training, data_format) inputs += shortcut
tensorflow.nn.relu
10,883
import tensorflow as tf The output tensor of the block; shape should match inputs. """ shortcut = inputs inputs = batch_norm(inputs, training, data_format) inputs = tf.nn.relu(inputs) # The projection shortcut should come after the first batch norm and ReLU # since it performs a 1x1 convolution.
tensorflow.nn.relu
10,884
import tensorflow as tf int(s_w/2), int(s_w/4), int(s_w/8), int(s_w/16) output_z_ = lrelu(linear(trans_z, self.gf_dim*8*s_h16*s_w16, 'd_h0_lin')) output_h0 = tf.reshape(output_z_, [-1, s_h16, s_w16, self.gf_dim * 8]) output_h1 = lrelu(deconv2d(tf.concat([output_h0, tgtctx_h3], 3), [self.batch_size, s_h8, s_w8, self.gf_dim*4], name='d_h1')) output_h2 = lrelu(deconv2d(tf.concat([output_h1, tgtctx_h2], 3), [self.batch_size, s_h4, s_w4, self.gf_dim*2], name='d_h2'))
tensorflow.concat
10,885
import tensorflow as tf with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)): inp = [tf.constant(0.5, shape=[2, 2])] * 2 dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
tensorflow.constant
10,886
import tensorflow as tf weights_regularizer=weights_regularizer, biases_regularizer=biases_regularizer, biases_initializer=tf.constant_initializer(0.0)): gtboxes_and_label_h, gtboxes_and_label_r = tf.py_func(self.get_gtboxes_and_label, inp=[inputs_list[i][1], inputs_list[i][2], inputs_list[i][3]], Tout=[tf.float32, tf.float32]) gtboxes_and_label_h = tf.reshape(gtboxes_and_label_h, [-1, 5]) gtboxes_and_label_r = tf.reshape(gtboxes_and_label_r, [-1, 6]) if cfgs.ANGLE_RANGE == 180: gtboxes_and_label_r_ = tf.py_func(coordinate_present_convert, inp=[gtboxes_and_label_r, -1], Tout=tf.float32) gtboxes_and_label_r_ = tf.reshape(gtboxes_and_label_r_, [-1, 6]) gt_encode_label = tf.py_func(angle_label_encode, inp=[gtboxes_and_label_r_[:, -2], cfgs.ANGLE_RANGE,
tensorflow.reshape
10,887
import tensorflow as tf geq = tf.cast(tgt_dif > 0, tf.bool) tgt_posi_dif = tf.where(geq, tgt_dif, -tgt_dif) pred_posi_dif = tf.where(geq, pred_dif, -pred_dif) loss = tf.maximum(0., tgt_posi_dif - pred_posi_dif) cstr_pct = tf.math.count_nonzero(loss, dtype=tf.float32) / tf.cast(tf.reduce_prod(tf.shape(loss)), tf.float32) unorm_w = tf.exp((tgt_flat1 + tgt_flat2)/temp) loss = unorm_w * loss / (tf.reduce_sum(unorm_w)) a = tf.print(tf.reduce_sum(unorm_w)) with tf.control_dependencies([a]): final_loss = tf.reduce_sum(loss) return final_loss, cstr_pct def contra_traj_lossV8(pred, tgt, horizon=12): horizon_pred, horizon_tgt = horizon_sumV1(pred, horizon), horizon_sumV1(tgt, horizon) # horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon) horizon_pred1, horizon_pred2 = tf.split(horizon_pred, 2, axis=0) horizon_tgt1, horizon_tgt2 = tf.split(horizon_tgt, 2, axis=0) pred_flat1, pred_flat2 = tf.reshape(horizon_pred1, [-1, 1]), tf.reshape(horizon_pred2, [1, -1])
tensorflow.control_dependencies
10,888
import tensorflow as tf return tf.maximum(x, leak*x) def conv2d(input_, output_dim, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, name="conv2d"): with tf.variable_scope(name): w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim], initializer=tf.truncated_normal_initializer(stddev=stddev)) # print("c", w.get_shape()) conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME') biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0)) conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape()) return conv class batch_norm(object): def __init__(self, epsilon=1e-5, momentum = 0.9, name="batch_norm"): with tf.variable_scope(name): self.epsilon = epsilon self.momentum = momentum self.name = name def __call__(self, x):
tensorflow.nn.bias_add
10,889
import tensorflow # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import os import h5py import numpy as np import tensorflow if int(tensorflow.__version__.split(".")[0]) == 2: import tensorflow.compat.v1 as tf else: import tensorflow as tf from datetime import datetime, timedelta def input_iterator(data_root, subject_id, train=False): fnames = [name.split('.')[0] for name in os.listdir(os.path.join(data_root, subject_id)) if not name.startswith('.')] fnames.sort() for i in range(len(fnames) - 1): assert datetime.strptime(fnames[i+1], "%Y-%m-%d").date() - datetime.strptime(fnames[i], "%Y-%m-%d").date() == timedelta(days=1)
tensorflow.__version__.split
10,890
import tensorflow as tf name = 'cond_update_grad' op = tf.cond(pred, update_grad, tf.no_op, name=name).op return op if __name__ == '__main__': # run it with "python -m tensorpack.tfutils.optimizer" x = tf.get_variable('x', shape=[6]) cost = tf.reduce_sum(tf.abs(x), name='cost') opt = tf.train.GradientDescentOptimizer(0.01) opt = AccumGradOptimizer(opt, 5) min_op = opt.minimize(cost) sess = tf.Session() sess.run(tf.global_variables_initializer()) with sess.as_default(): for k in range(20): min_op.run() print(x.eval())
tensorflow.train.GradientDescentOptimizer
10,891
import tensorflow as tf weighted_average = tf.reduce_sum(tf.expand_dims(weights, axis=2) * hidden_states, axis=1) else: # Local attention of Luong et al. (http://arxiv.org/abs/1508.04025) wp = get_variable('Wp', [state_size, state_size]) vp = get_variable('vp', [state_size, 1]) pos = tf.nn.sigmoid(tf.matmul(tf.nn.tanh(tf.matmul(state, wp)), vp)) pos = tf.floor(encoder_input_length * pos) pos = tf.reshape(pos, [-1, 1]) pos = tf.minimum(pos, encoder_input_length - 1) idx = tf.tile(tf.to_float(tf.range(attn_length)), tf.stack([batch_size])) idx = tf.reshape(idx, [-1, attn_length]) low = pos - encoder.attn_window_size high = pos + encoder.attn_window_size mlow = tf.to_float(idx < low) mhigh = tf.to_float(idx > high) m = mlow + mhigh m += tf.to_float(idx >= encoder_input_length)
tensorflow.range
10,892
import tensorflow as tf save_dir = os.path.join(self.get_temp_dir(), "max_to_keep_sharded") try: gfile.DeleteRecursively(save_dir) except OSError: pass # Ignore gfile.MakeDirs(save_dir) with tf.Session( target="", config=tf.ConfigProto(device_count={"CPU": 2})) as sess: with sess.graph.device("/cpu:0"): v0 = tf.Variable(111, name="v0") with sess.graph.device("/cpu:1"): v1 = tf.Variable(222, name="v1") save = tf.train.Saver({"v0": v0, "v1": v1}, sharded=True, max_to_keep=2) tf.initialize_all_variables().run() self.assertEqual([], save.last_checkpoints) s1 = save.save(sess, os.path.join(save_dir, "s1")) self.assertEqual([s1], save.last_checkpoints) self.assertEqual(2, len(gfile.Glob(s1))) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1))) s2 = save.save(sess, os.path.join(save_dir, "s2")) self.assertEqual([s1, s2], save.last_checkpoints) self.assertEqual(2, len(gfile.Glob(s1))) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1))) self.assertEqual(2, len(gfile.Glob(s2))) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))
tensorflow.initialize_all_variables
10,893
import tensorflow as tf LAMBDA = 10.0 self.g_loss = tf.add((G_loss_A + G_loss_B), (recon_loss_A + recon_loss_B) * LAMBDA, name='G_loss_total') self.d_loss = tf.add(D_loss_A, D_loss_B, name='D_loss_total') self.collect_variables('gen', 'discrim') add_moving_summary(recon_loss_A, recon_loss_B, self.g_loss, self.d_loss) def _get_optimizer(self): lr = tf.get_variable('learning_rate', initializer=2e-4, trainable=False) return tf.train.AdamOptimizer(lr, beta1=0.5, epsilon=1e-3) def get_data(datadir, isTrain=True): if isTrain: augs = [ imgaug.Resize(int(SHAPE * 1.12)), imgaug.RandomCrop(SHAPE), imgaug.Flip(horiz=True),
tensorflow.get_variable
10,894
import tensorflow as tf raise NotImplementedError() def get_test_examples(self, data_dir): """Gets a collection of `InputExample`s for prediction.""" raise NotImplementedError() def get_labels(self): """Gets the list of labels for this data set.""" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): """Reads a tab separated value file.""" with tf.gfile.Open(input_file, "r") as f: reader = csv.reader(f, delimiter="\t", quotechar=quotechar) lines = [] for line in reader: lines.append(line) return lines class XnliProcessor(DataProcessor): """Processor for the XNLI data set.""" def __init__(self): self.language = "zh"
tensorflow.gfile.Open
10,895
import tensorflow as tf classes, min_score_thresh=0.65, min_iou_thresh=0.5, is_class_agnostic=False) nms_masks_expected2 = tf.stack([mask0, mask1, mask4, mask2]) nms_scores_expected2 = tf.constant([1.0, 0.9, 0.85, 0.8], dtype=tf.float32) nms_classes_expected2 = tf.constant([1, 2, 2, 3], dtype=tf.int32) self.assertAllEqual(nms_masks1.numpy(), nms_masks_expected1.numpy()) self.assertAllClose(nms_scores1.numpy(), nms_scores_expected1.numpy()) self.assertAllEqual(nms_classes1.numpy(), nms_classes_expected1.numpy())
tensorflow.constant
10,896
import tensorflow as tf def __init__(self, num_layers, num_units, batch_size, input_size, keep_prob=1.0, is_train=None, scope="native_gru"): self.num_layers = num_layers self.grus = [] self.inits = [] self.dropout_mask = [] self.scope = scope for layer in range(num_layers): input_size_ = input_size if layer == 0 else 2 * num_units gru_fw = tf.contrib.rnn.GRUCell(num_units) gru_bw = tf.contrib.rnn.GRUCell(num_units) init_fw = tf.tile(tf.Variable( tf.zeros([1, num_units])), [batch_size, 1]) init_bw = tf.tile(tf.Variable( tf.zeros([1, num_units])), [batch_size, 1]) mask_fw = dropout(tf.ones([batch_size, 1, input_size_], dtype=tf.float32), keep_prob=keep_prob, is_train=is_train, mode=None) mask_bw = dropout(tf.ones([batch_size, 1, input_size_], dtype=tf.float32),
tensorflow.contrib.rnn.GRUCell
10,897
import tensorflow as tf elif actL == 'esp' or actL == 'relu': #r2 score norm= tf.reduce_mean( tf.squared_difference(Y,tf.reduce_mean(Y)) ) accuracy = 1 - tf.divide( tf.reduce_mean(tf.squared_difference(an, Y)), norm) elif actL == 'softmax': #accuracy score for multiclass classification Yp = tf.sigmoid(betan*hn) correct = tf.equal(tf.argmax(Yp), tf.argmax(Y)) accuracy= tf.reduce_mean(tf.cast(correct, "float")) #-----------------Initialize the graph and start the session------------------------------------------------- init = tf.global_variables_initializer() with tf.Session() as sess: # Run the initialization
tensorflow.cast
10,898
import tensorflow as tf if cnn_rnn_zack: assert audio_context_len == 1 assert zack_hack > 0 and zack_hack % 2 == 0 export_feat_tensors = {} # Input tensors feats_audio_nunroll = tf.placeholder(dtype, shape=[batch_size, rnn_nunroll + zack_hack, audio_context_len, audio_nbands, audio_nchannels], name='feats_audio') feats_other_nunroll = tf.placeholder(dtype, shape=[batch_size, rnn_nunroll, nfeats], name='feats_other') print('feats_audio: {}'.format(feats_audio_nunroll.get_shape())) print('feats_other: {}'.format(feats_other_nunroll.get_shape())) if mode != 'gen': targets_nunroll = tf.placeholder(dtype, shape=[batch_size, rnn_nunroll]) # TODO: tf.ones acts as an overridable placeholder but this is still awkward target_weights_nunroll = tf.ones([batch_size, rnn_nunroll], dtype) # Reshape input tensors to remove nunroll dim; will briefly restore later during RNN if necessary if cnn_rnn_zack: feats_audio = tf.reshape(feats_audio_nunroll, shape=[batch_size, rnn_nunroll + zack_hack, audio_nbands, audio_nchannels]) else: feats_audio = tf.reshape(feats_audio_nunroll, shape=[batch_size * rnn_nunroll, audio_context_len, audio_nbands, audio_nchannels]) feats_other = tf.reshape(feats_other_nunroll, shape=[batch_size * rnn_nunroll, nfeats]) if mode != 'gen': targets = tf.reshape(targets_nunroll, shape=[batch_size * rnn_nunroll]) target_weights = tf.reshape(target_weights_nunroll, shape=[batch_size * rnn_nunroll])
tensorflow.placeholder
10,899