content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def pack(a: ArrayLike, ox: int, oy: int, wx: int, wy: int, sx: int, sy: int, px: int = 0, py: int = 0, is_column: bool = True) -> ShapeletsArray: """ Reverses the :obj:`~shapelets.compute.unpack` operation For a thorough explanation of this method, consult the `ArrayFire documentation <https://arrayfire.org/docs/group__image__func__wrap.htm>`_ Parameters ---------- a: ArrayLike Input array ox: int Output size for 1st dimension oy: int Output size for 2st dimension wx: int Window size along 1st dimension wy: int Window size along 2st dimension sx: int Stride along 1st dimension sy: int Stride along 2st dimension px: int (default: 0) Padding along 1st dimension py: int (default: 0) Padding along 2st dimension is_column: bool (default: True) Determines if an output patch is formed from a column (if true) or a row (if false) Returns ------- ShapeletsArray A new array instance Examples -------- >>> import shapelets.compute as sc >>> a = sc.array([1,2,3,4,5,6,7,8,9]) >>> w = sc.unpack(a, 3, 1, 3, 1) >>> sc.pack(w, 9, 1, 3, 1, 3, 1) [9 1 1 1] 1 2 3 4 5 6 7 8 9 """ return _pygauss.pack(a, ox, oy, wx, wy, sx, sy, px, py, is_column)
ac1af1eeb76fed5ecf4a1d68f17a6683415a2f6d
35,259
def maybe_create_token_network( token_network_proxy: TokenNetworkRegistry, token_proxy: CustomToken ) -> TokenNetworkAddress: """ Make sure the token is registered with the node's network registry. """ block_identifier = token_network_proxy.rpc_client.get_confirmed_blockhash() token_address = token_proxy.address token_network_address = token_network_proxy.get_token_network( token_address=token_address, block_identifier=block_identifier ) if token_network_address is None: _, new_token_network_address = token_network_proxy.add_token( token_address=token_address, channel_participant_deposit_limit=TokenAmount(UINT256_MAX), token_network_deposit_limit=TokenAmount(UINT256_MAX), given_block_identifier=block_identifier, ) return new_token_network_address else: return token_network_address
1c477f8ad02ac99c2039d59dc04eef5f6ea6b1ed
35,260
def rmsprop(grad, init_params, callback=None, num_iters=100, step_size=0.1, gamma=0.9, eps=10**-8): """Root mean squared prop: See Adagrad paper for details.""" flattened_grad, unflatten, x = flatten_func(grad, init_params) avg_sq_grad = np.ones(len(x)) for i in range(num_iters): g = flattened_grad(x, i) if callback: callback(unflatten(x), i, unflatten(g)) avg_sq_grad = avg_sq_grad * gamma + g**2 * (1 - gamma) x = x - step_size * g/(np.sqrt(avg_sq_grad) + eps) return unflatten(x)
6c794d5a16dc7b46c055edfb714995f00450eb92
35,261
import re def parse_transceiver_dom_sensor(output_lines): """ @summary: Parse the list of transceiver from DB table TRANSCEIVER_DOM_SENSOR content @param output_lines: DB table TRANSCEIVER_DOM_SENSOR content output by 'redis' command @return: Return parsed transceivers in a list """ res = [] p = re.compile(r"TRANSCEIVER_DOM_SENSOR\|(Ethernet\d+)") for line in output_lines: m = p.match(line) assert m, "Unexpected line %s" % line res.append(m.group(1)) return res
9a9e069543a8a80b9e741452c37ed1c665b56398
35,262
def get_excerpt(post): """Returns an excerpt between ["] and [/"] post -- BBCode string""" match = _re_excerpt.search(post) if match is None: return "" excerpt = match.group(0) excerpt = excerpt.replace(u'\n', u"<br/>") return _re_remove_markup.sub("", excerpt)
b417fc18604020e91a9b5b4f4b154fba7fcbcdc1
35,263
def dropblock(net, is_training, keep_prob, dropblock_size, data_format='channels_first'): """DropBlock: a regularization method for convolutional neural networks. DropBlock is a form of structured dropout, where units in a contiguous region of a feature map are dropped together. DropBlock works better than dropout on convolutional layers due to the fact that activation units in convolutional layers are spatially correlated. See https://arxiv.org/pdf/1810.12890.pdf for details. Args: net: `Tensor` input tensor. is_training: `bool` for whether the model is training. keep_prob: `float` or `Tensor` keep_prob parameter of DropBlock. "None" means no DropBlock. dropblock_size: `int` size of blocks to be dropped by DropBlock. data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. Returns: A version of input tensor with DropBlock applied. Raises: if width and height of the input tensor are not equal. """ if not is_training or keep_prob is None: return net tf.logging.info('Applying DropBlock: dropblock_size {}, net.shape {}'.format( dropblock_size, net.shape)) if data_format == 'channels_last': _, width, height, _ = net.get_shape().as_list() else: _, _, width, height = net.get_shape().as_list() if width != height: raise ValueError('Input tensor with width!=height is not supported.') dropblock_size = min(dropblock_size, width) # seed_drop_rate is the gamma parameter of DropBlcok. seed_drop_rate = (1.0 - keep_prob) * width**2 / dropblock_size**2 / ( width - dropblock_size + 1)**2 # Forces the block to be inside the feature map. w_i, h_i = tf.meshgrid(tf.range(width), tf.range(width)) valid_block_center = tf.logical_and( tf.logical_and(w_i >= int(dropblock_size // 2), w_i < width - (dropblock_size - 1) // 2), tf.logical_and(h_i >= int(dropblock_size // 2), h_i < width - (dropblock_size - 1) // 2)) valid_block_center = tf.expand_dims(valid_block_center, 0) valid_block_center = tf.expand_dims( valid_block_center, -1 if data_format == 'channels_last' else 0) randnoise = tf.random_uniform(net.shape, dtype=tf.float32) block_pattern = (1 - tf.cast(valid_block_center, dtype=tf.float32) + tf.cast( (1 - seed_drop_rate), dtype=tf.float32) + randnoise) >= 1 block_pattern = tf.cast(block_pattern, dtype=tf.float32) if dropblock_size == width: block_pattern = tf.reduce_min( block_pattern, axis=[1, 2] if data_format == 'channels_last' else [2, 3], keepdims=True) else: if data_format == 'channels_last': ksize = [1, dropblock_size, dropblock_size, 1] else: ksize = [1, 1, dropblock_size, dropblock_size] block_pattern = -tf.nn.max_pool( -block_pattern, ksize=ksize, strides=[1, 1, 1, 1], padding='SAME', data_format='NHWC' if data_format == 'channels_last' else 'NCHW') percent_ones = tf.cast(tf.reduce_sum((block_pattern)), tf.float32) / tf.cast( tf.size(block_pattern), tf.float32) net = net / tf.cast(percent_ones, net.dtype) * tf.cast( block_pattern, net.dtype) return net
de401f315590505e5db9b58c78a13fdaa3b7f1bb
35,264
def certify(String, cert, password): """check a certificate for a string""" return certificate(String, password) == cert
05ecd81e4738c4304e7d37c6c530035375ded09b
35,266
def is_same_float(a, b, tolerance=1e-09): """Return true if the two floats numbers (a,b) are almost equal.""" abs_diff = abs(a - b) return abs_diff < tolerance
a8c10ae330db1c091253bba162f124b10789ba13
35,267
def generate_grid_speed(method, shape, speed_range): """Generate a speed distribution according to sampling method. Parameters ---------- method : str Method for generating the speed distribution. shape : tuple Shape of grid that the speed distribution should be defined on. speed_range : tuple of float Minimum and maximum allowed speeds. Returns ------- speed : np.ndarray Speed values matched to the shape of the grid, and in the allowed range, sampled according to input method. """ if method == 'flat': speed = speed_range[0] * np.ones(shape) elif method == 'random': speed = speed_range[0] + np.random.random(shape) * (speed_range[1] - speed_range[0]) elif method == 'ifft': values = [] for length in shape: values.append(ifft_sample_1D(length)) output = np.einsum(subs[len(values)], *values) speed = speed_range[0] + output * (speed_range[1] - speed_range[0]) elif method == 'fourier': output = fourier_sample(shape) speed = speed_range[0] + output * (speed_range[1] - speed_range[0]) elif method == 'mixed_random_ifft': if np.random.rand() > 0.5: speed = generate_grid_speed('random', shape, speed_range) else: speed = generate_grid_speed('ifft', shape, speed_range) elif method == 'mixed_random_fourier': if np.random.rand() > 0.5: speed = generate_grid_speed('random', shape, speed_range) else: speed = generate_grid_speed('fourier', shape, speed_range) else: raise ValueError(f'Speed sampling method {method} not recognized for this grid shape') return speed
700b1a6341bc1f218be04ff957ee3d02f9520adf
35,268
def common_atoms(cycle1, cycle2): """ INPUT: two cycles with type: list of atoms OUTPUT: a set of common atoms """ set1 = set(cycle1) set2 = set(cycle2) return set1.intersection(set2)
1e85887a6199bf88a71709057c79025c0937a420
35,269
def get_two_body_decay_scaled_for_chargeless_molecules( ion_pos: Array, ion_charges: Array, init_ee_strength: float = 1.0, register_kfac: bool = True, logabs: bool = True, trainable: bool = True, ) -> Jastrow: """Make molecular decay jastrow, scaled for chargeless molecules. The scale factor is chosen so that the log jastrow is initialized to 0 when electrons are at ion positions. Args: ion_pos (Array): an (nion, d) array of ion positions. ion_charges (Array): an (nion,) array of ion charges, in units of one elementary charge (the charge of one electron) init_ee_strength (float, optional): the initial strength of the electron-electron interaction. Defaults to 1.0. register_kfac (bool, optional): whether to register the computation with KFAC. Defaults to True. logabs (bool, optional): whether to return the log jastrow (True) or the jastrow (False). Defaults to True. trainable (bool, optional): whether to allow the jastrow to be trainable. Defaults to True. Returns: Callable: a flax Module with signature (r_ei, r_ee) -> jastrow or log jastrow """ r_ii, charge_charge_prods = physics.potential._get_ion_ion_info( ion_pos, ion_charges ) jastrow_scale_factor = 0.5 * jnp.sum( jnp.linalg.norm(r_ii, axis=-1) * charge_charge_prods ) jastrow = TwoBodyExpDecay( ion_charges, init_ee_strength, log_scale_factor=jastrow_scale_factor, register_kfac=register_kfac, logabs=logabs, trainable=trainable, ) return jastrow
97ded768c3c0f9b61d3f5fc0e313b072b7ba241f
35,271
def compute_huffman_code(message_probs): """ The input is a dictionary of messages, and their relative probabilities (which must add to 1). The output is a dictionary of each message and it's new codeword (a bytestring) in the Huffman encoding. """ return tree_to_encoding(message_tree(message_probs))
fa760829736810ec908ea2ed9103937477971a2d
35,272
from typing import Optional from typing import Dict from typing import Any def genomics_cnn(batch_size: int, len_seqs: int, num_motifs: int, len_motifs: int, num_denses: int, num_classes: int = 10, embed_size: int = 4, one_hot: bool = True, l2_weight: float = 0.0, dropout_rate: float = 0.1, before_conv_dropout: bool = False, use_mc_dropout: bool = False, spec_norm_hparams: Optional[Dict[str, Any]] = None, gp_layer_hparams: Optional[Dict[str, Any]] = None, **unused_kwargs: Dict[str, Any]) -> tf.keras.models.Model: """Builds Genomics CNN model. Args: batch_size: (int) Value of the static per_replica batch size. len_seqs: (int) Sequence length. num_motifs: (int) Number of motifs (= number of filters) to apply to input. len_motifs: (int) Length of the motifs (= size of convolutional filters). num_denses: (int) Number of nodes in the dense layer. num_classes: (int) Number of output classes. embed_size: (int) Static size of hidden dimension of the embedding output. one_hot: (bool) If using one hot encoding to encode input sequences. l2_weight: (float) L2 regularization coefficient. dropout_rate: (float) Fraction of the convolutional output units and dense. layer output units to drop. before_conv_dropout: (bool) Whether to use filter wise dropout before the convolutional layer. use_mc_dropout: (bool) Whether to apply Monte Carlo dropout. spec_norm_hparams: (dict) Hyperparameters for spectral normalization. gp_layer_hparams: (dict) Hyperparameters for Gaussian Process output layer. **unused_kwargs: (dict) Unused keyword arguments that will be ignored by the model. Returns: (tf.keras.Model) The 1D convolutional model for genomic sequences. """ # define layers if spec_norm_hparams: spec_norm_bound = spec_norm_hparams['spec_norm_bound'] spec_norm_iteration = spec_norm_hparams['spec_norm_iteration'] else: spec_norm_bound = None spec_norm_iteration = None conv_layer = models_util.make_conv2d_layer( use_spec_norm=(spec_norm_hparams is not None), spec_norm_bound=spec_norm_bound, spec_norm_iteration=spec_norm_iteration) dense_layer = models_util.make_dense_layer( use_spec_norm=(spec_norm_hparams is not None), spec_norm_bound=spec_norm_bound, spec_norm_iteration=spec_norm_iteration) output_layer = models_util.make_output_layer( gp_layer_hparams=gp_layer_hparams) # compute outputs given inputs inputs = tf.keras.Input( shape=[len_seqs], batch_size=batch_size, dtype=tf.int32) x = _input_embedding( inputs, VOCAB_SIZE, one_hot=one_hot, embed_size=embed_size) # filter-wise dropout before conv, # x.shape=[batch_size, len_seqs, vocab_size/embed_size] if before_conv_dropout: x = models_util.apply_dropout( x, dropout_rate, use_mc_dropout, filter_wise_dropout=True, name='conv_dropout') x = _conv_pooled_block( x, conv_layer=conv_layer( filters=num_motifs, kernel_size=(len_motifs, embed_size), strides=(1, 1), kernel_regularizer=tf.keras.regularizers.l2(l2_weight), name='conv')) x = models_util.apply_dropout( x, dropout_rate, use_mc_dropout, name='dropout1') x = dense_layer( units=num_denses, activation=tf.keras.activations.relu, kernel_regularizer=tf.keras.regularizers.l2(l2_weight), name='dense')( x) x = models_util.apply_dropout( x, dropout_rate, use_mc_dropout, name='dropout2') if gp_layer_hparams and gp_layer_hparams['gp_input_dim'] > 0: # Uses random projection to reduce the input dimension of the GP layer. x = tf.keras.layers.Dense( gp_layer_hparams['gp_input_dim'], kernel_initializer='random_normal', use_bias=False, trainable=False, name='gp_random_projection')( x) outputs = output_layer(num_classes, name='logits')(x) return tf.keras.Model(inputs=inputs, outputs=outputs)
5553dfda33e2596ba3372d9b7158f9964bfd6cfd
35,273
def setDuration(*args): """setDuration(ALInterpolationBangBangAcceleration pObject, float pDuration)""" return _almathinternal.setDuration(*args)
74f8bf125266a0174aa39f6e7284854bf066a32f
35,277
def regex(pattern): """ Does a pattern search using the system's `find` utility :param pattern: a pattern you intend to search for :return: String """ cmd = "find " + MIGRATION_FOLDER + " -type f -exec grep -il " + pattern + " {} +" out = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE) stdout, stderr = out.communicate() if not stderr: return stdout.decode() else: return stderr.decode()
5b5df47dc85239db2294fa36411d259617756186
35,278
def _mp_fabber_options(wsp): """ :return: General Fabber options for multiphase decoding """ # General options. note that the phase is always PSP number 1 options = { "method" : "vb", "noise" : "white", "model" : "asl_multiphase", "data" : wsp.asldata, "mask" : wsp.rois.mask, "nph" : wsp.asldata.nphases, "ntis" : wsp.asldata.ntis, "repeats" : wsp.asldata.rpts[0], # We have already checked repeats are fixed "save-mean" : True, "save-model-fit" : True, "max-iterations": 30, "PSP_byname1" : "phase", "PSP_byname1_prec" : 0.2, } # Spatial mode if wsp.mp_spatial: options.update({ "method" : "spatialvb", }) if wsp.mp_spatial_phase: # Make the phase a spatial prior options["PSP_byname1_type"] = "M" else: # Make the magnitudes and offsets spatial priors prior = 2 for pld_idx in range(wsp.asldata.ntis): options.update({ "PSP_byname%i" % prior: "mag%i" % (pld_idx+1), "PSP_byname%i_type" % prior : "M", "PSP_byname%i" % (prior+1): "offset%i" % (pld_idx+1), "PSP_byname%i_type" % (prior+1) : "M", }) # Special case if we have only 1 PLD the magnitude and offset # parameters are named differently for compatibility options["PSP_byname2"] = "mag" options["PSP_byname3"] = "offset" # Additional user-specified multiphase fitting options override the above options.update(wsp.ifnone("mp_options", {})) return options
af1c724bd9b88a0d76e7c7d18a3fa2b19591984e
35,279
def get_player_objects_from_challenge_info(player, should_be_completed=False, search_by_discord_name=True): """ Search for a challenge in the DB corresponding to the player param str/int player: The gamertag or id of the player to search for param bool should_be_completed: If the challenge should already be completed or not param bool search_by_discord_name: Searches for player by full discord_name instead of gamertag param str message_author: The discord_user that send the message (eg. Pandabeer#2202) returns tuple os3_rll.models.player.Player: (p1, p2) """ if isinstance(player, str): player = Player.get_player_id_by_username(player, discord_name=search_by_discord_name) with Database() as db: db.execute_prepared_statement( "SELECT `p1`, `p2` FROM `challenges` WHERE (`p1`=%s OR `p2`=%s) AND `winner` IS {} NULL ORDER BY `id` DESC".format( "NOT" if should_be_completed else "" ), (player, player), ) if db.rowcount == 0: raise ChallengeException("No challenges found") p1, p2 = db.fetchone() return Player(p1), Player(p2)
e4a39d3a72063b41d5b9b92f0b1535ba164beeb1
35,280
def get_output(interpreter, score_threshold, labels): """Returns list of detected objects.""" boxes = output_tensor(interpreter, 0) class_ids = output_tensor(interpreter, 1) scores = output_tensor(interpreter, 2) count = int(output_tensor(interpreter, 3)) def get_label(i): id = int(class_ids[i]) return labels.get(id, id) def make(i): ymin, xmin, ymax, xmax = boxes[i] bbox = BBox( xmin = np.maximum(0.0, xmin), ymin = np.maximum(0.0, ymin), xmax = np.minimum(1.0, xmax), ymax = np.minimum(1.0, ymax)) return DetectedObject( label = get_label(i), score = scores[i], area = bbox.area, centroid = bbox.centroid, bbox = bbox) return [make(i) for i in range(count) if scores[i] >= score_threshold]
83518a28025d7d39bf475cd0f6aa222ad7197659
35,282
def provide_session(func): """ Function decorator that provides a session if it isn't provided. If you want to reuse a session or run the function as part of a database transaction, you pass it to the function, if not this wrapper will create one and close it for you. """ @wraps(func) def wrapper(*args, **kwargs): arg_session = 'session' func_params = func.__code__.co_varnames session_in_args = arg_session in func_params and \ func_params.index(arg_session) < len(args) session_in_kwargs = arg_session in kwargs if session_in_kwargs or session_in_args: return func(*args, **kwargs) else: with create_session() as session: kwargs[arg_session] = session return func(*args, **kwargs) return wrapper
d11233a852a8c2f4ac7a95179b588e7b497bbf3a
35,283
def encode_urlencoded_form(fields): """ Encode dict of fields as application/x-www-form-urlencoded. """ body = urlencode(fields, doseq=1) headers = {'Content-Type': 'application/x-www-form-urlencoded'} return body, headers
26d4b99070e8d354775f7f2b6e97673dedd582f0
35,284
def process_wrapper(row, seq, cds_seq, seq_lookup, tile_begins, tile_ends, qual, locate_log, mutrate, base, posteriorQC, adjusted_er): """ Wrapper function to process each line (pair of reads) """ mut_parser = locate_mut.MutParser(row, seq, cds_seq, seq_lookup, tile_begins, tile_ends, qual, locate_log, mutrate, base, posteriorQC, adjusted_er) hgvs, outside_mut, all_df, hgvs_r1_clusters, hgvs_r2_clusters, track_df = mut_parser._main() return hgvs, outside_mut, all_df, hgvs_r1_clusters, hgvs_r2_clusters, track_df
19547caddd84c1eb54b32525babf4d8b624aea70
35,285
def default_cfg(): """ Set parameter defaults. """ # Simulation specification cfg_spec = dict( nfreq=20, start_freq=1.e8, bandwidth=0.2e8, start_time=2458902.33333, integration_time=40., ntimes=40, cat_name="gleamegc.dat", apply_gains=True, apply_noise=True, ant_pert=False, seed=None, ant_pert_sigma=0.0, hex_spec=(3,4), hex_ants_per_row=None, hex_ant_sep=14.6, use_ptsrc=True ) # Diffuse model specification cfg_diffuse = dict( use_diffuse=False, nside=64, obs_latitude=-30.7215277777, obs_longitude = 21.4283055554, obs_height = 1073, beam_pol='XX', diffuse_model='GSM', eor_random_seed=42, nprocs=1 ) # Beam model parameters cfg_beam = dict( ref_freq=1.e8, spectral_index=-0.6975, seed=None, perturb_scale=0.0, mainlobe_scale_mean=1.0, mainlobe_scale_sigma=0.0, xstretch_mean=1.0, xstretch_sigma=0.0, ystretch_mean=1.0, ystretch_sigma=0.0, xystretch_same=True, xystretch_dist=None, rotation_dist='', rotation_mean=0.0, rotation_sigma=0.0, mainlobe_width=0.3, nmodes=8, beam_coeffs=[ 0.29778665, -0.44821433, 0.27338272, -0.10030698, -0.01195859, 0.06063853, -0.04593295, 0.0107879, 0.01390283, -0.01881641, -0.00177106, 0.01265177, -0.00568299, -0.00333975, 0.00452368, 0.00151808, -0.00593812, 0.00351559 ] ) # Fluctuating gain model parameters cfg_gain = dict(nmodes=8, seed=None) # Noise parameters cfg_noise = dict(nsamp=1., seed=None, noise_file=None) # reflection parameters cfg_reflection = dict(amp=1.e-2, dly=800.) # xtalk parameters cfg_xtalk = dict(amp=1.e-2, dly=400.) # Combine into single dict cfg = { 'sim_beam': cfg_beam, 'sim_spec': cfg_spec, 'sim_diffuse': cfg_diffuse, 'sim_noise': cfg_noise, 'sim_gain': cfg_gain, 'sim_reflection': cfg_reflection, 'sim_xtalk': cfg_xtalk, } return cfg
0b76e2166ce17d6ab42e4f72d7003ba6c03b11f6
35,286
def merge_outputs(*streams: Stream) -> OutputStream: """Include all given outputs in one ffmpeg command line.""" return MergeOutputsNode(streams).stream()
bd6dee8b54843556426a01f691f87f28a62c1a99
35,287
def softmax(vector, theta=1.0): """Takes an vector w of S N-element and returns a vectors where each column of the vector sums to 1, with elements exponentially proportional to the respective elements in N. Parameters ---------- vector : array of shape = [N, M] theta : float (default = 1.0) used as a multiplier prior to exponentiation Returns ------- dist : array of shape = [N, M] Which the sum of each row sums to 1 and the elements are exponentially proportional to the respective elements in N """ w = np.atleast_2d(vector) e = np.exp(np.array(w) / theta) dist = e / np.sum(e, axis=1).reshape(-1, 1) return dist
06b4df32e5a04b49e3eaa1626638dad845c4b4a0
35,288
def scaled_elementary_effect_i( model, i_python, init_input_pars, stepsize, sd_i, sd_model ): """Scales EE by (SD_i / SD_M)""" ee_i = elementary_effect_i(model, i_python, init_input_pars, stepsize) return ee_i * (sd_i / sd_model)
c345c4e40a58b0ac9228edb0cd9e96a05d28e1db
35,289
def arch_mnasnet_b1(variant, feat_multiplier=1.0, **kwargs): """Creates a mnasnet-b1 model. Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet Paper: https://arxiv.org/pdf/1807.11626.pdf. Args: feat_multiplier: multiplier to number of channels per layer. """ arch_def = [ # stage 0, 112x112 in ['ds_r1_k3_s1_c16_noskip'], # stage 1, 112x112 in ['ir_r3_k3_s2_e3_c24'], # stage 2, 56x56 in ['ir_r3_k5_s2_e3_c40'], # stage 3, 28x28 in ['ir_r3_k5_s2_e6_c80'], # stage 4, 14x14in ['ir_r2_k3_s1_e6_c96'], # stage 5, 14x14in ['ir_r4_k5_s2_e6_c192'], # stage 6, 7x7 in ['ir_r1_k3_s1_e6_c320_noskip'] ] model_kwargs = dict( block_defs=decode_arch_def(arch_def), stem_size=32, feat_multiplier=feat_multiplier, **kwargs ) return model_kwargs
25a9a0c0f4a026f122c73139cb8464c47fae299a
35,290
def accession(data, location): """ Generate an accession for the given location in data. """ return "{ac}:{gene}".format( ac=parent_accession(location), gene=data["gene"], )
a8023857b812b510990c6161194b8f85832df14a
35,291
def poly_oval(x0,y0, x1,y1, steps=20, rotation=0): """return an oval as coordinates suitable for create_polygon""" # x0,y0,x1,y1 are as create_oval # rotation is in degrees anti-clockwise, convert to radians rotation = rotation * pi / 180.0 # major and minor axes a = (x1 - x0) / 2.0 b = (y1 - y0) / 2.0 # center xc = x0 + a yc = y0 + b point_list = [] # create the oval as a list of points for i in range(steps): # Calculate the angle for this step # 360 degrees == 2 pi radians theta = (pi * 2) * (float(i) / steps) x1 = a * cos(theta) y1 = b * sin(theta) # rotate x, y x = (x1 * cos(rotation)) + (y1 * sin(rotation)) y = (y1 * cos(rotation)) - (x1 * sin(rotation)) point_list.append(round(x + xc)) point_list.append(round(y + yc)) return point_list
7a188676654ca47a53da33df40ac16387ecaa490
35,292
def not_found(request): """Error page for 404.""" response = render(request, "projectile/404.html") response.status_code = 404 return response
fb17b8f1cdcdfa530adb569abc9b006ed2868492
35,293
def reset_universe_id(): """ reset_universe_id() Resets the auto-generated unique Universe ID counter to 10000. """ return _openmoc.reset_universe_id()
bd7815a749e4d89c8d3a78aea1b24046db9c78bf
35,295
from typing import Union def native_mean(data: Union[list, np.ndarray, pd.Series]) -> float: """ Calculate Mean of a list. :param data: Input data. :type data: list, np.ndarray, or pd.Series :return: Returns the mean. :rtype: float :example: *None* :note: *None* """ data = _remove_nan(data=_to_list(data=data)) return sum(data) / len(data)
3d62a3fdbb77ae0dbc8b19eaefee40abe96d3888
35,296
import json def dataset_info_4_biogps(request, ds_id): """ get information about a dataset """ ds = adopt_dataset(ds_id) if ds is None: return general_json_response(GENERAL_ERRORS.ERROR_NOT_FOUND, "dataset with this id not found") s = json.dumps(ds, cls=ComplexEncoder) oj = json.loads(s) del oj['metadata'] oj['id'] = ds.id oj['lastmodified'] = ds.lastmodified.strftime('%b.%d, %Y') oj['created'] = ds.created.strftime('%b.%d, %Y') oj['summary_wrapped'] = ds.summary_wrapped oj['owner'] = ds.metadata['owner'] if oj['owner'] == "ArrayExpress Uploader": oj['sample_source'] = 'http://www.ebi.ac.uk/arrayexpress/experiments/'\ + oj['geo_gse_id'] + '/samples/' oj['source'] = 'http://www.ebi.ac.uk/arrayexpress/experiments/'\ + oj['geo_gse_id'] if 'sample_geneid' in ds.metadata: oj['sample_geneid'] = ds.metadata['sample_geneid'] if ds.metadata.get('pubmed_id', None): oj['pubmed_id'] = ds.metadata['pubmed_id'] factors = [] if oj['factors']: for e in oj['factors']: i = oj['factors'].index(e) k = list(ds.metadata['factors'][i])[0] if k.startswith('GSM'): k = k.rstrip(' 1') else: k = k.rstrip(' ') factors.append({k: e}) elif 'factors' in ds.metadata: # get factors from metadata['factors'] for e in ds.metadata['factors']: k = list(e)[0] if 'factorvalue' in e[k]: factors.append({k: e[k]['factorvalue']}) oj['factors'] = factors # ret = _contruct_meta(ds) # fa = get_ds_factors_keys(ds) # ret.update({'factors': ds.factors}) ts = Tag.objects.get_for_object(ds) tags = [t.name for t in ts] oj['tags'] = tags return general_json_response(detail=oj)
d38f3bde125cb20a48a2842bdc8021f3eb032c8c
35,297
def _set_reducemax_attrs(desc_d, attrs): """Add addition attributes for ReduceMax.""" backend = desc_d['process'] if backend == 'cuda' and _reducemax_pattern(desc_d)[0]: attrs['enable_tile_c0'] = True elem_per_thread = 4 blockdim_x = 64 blockdim_y = 16 griddim_x = 1 griddim_y = _reducemax_pattern(desc_d)[1] / (blockdim_y * elem_per_thread) attrs['dim'] = ' 0 0 128 64 b1 t1 0 1 128 128 b0 t0' attrs['bind_block'] = str(griddim_x) + ' ' + str(griddim_y) attrs['bind_thread'] = str(blockdim_x) + ' ' + str(blockdim_y) return attrs
d5b50da5b3097690a0cc2ab27bbac47448f35bb7
35,298
def getTail(compiler,version): """ Function which generates the Tail of a Compiler module file. @input compiler :: compiler name ('intel','pgi',..) @input version :: version of the compiler @return :: list of Lua lines """ strA = 'local version = "{0}"'.format(version) strB = 'local mdir = pathJoin(mroot,"Compiler/{0}",version)'.format(compiler.lower()) strC = '-- a. compiled with {0}/{1}'.format(compiler.title(),version) strD = ' local mdir = pathJoin(mroot,"Compiler",CLUSTERNAME,"{0}",version)'.format(compiler) res = ['','', '-- MODULEPATH modification to include packages', '-- that are compiled with this version of the compiler', '-- and available ON ALL clusters', strA, 'local mroot = os.getenv("MODULEPATH_ROOT")', strB, 'prepend_path("MODULEPATH",mdir)','','', '-- MODULEPATH modification to include packages', '-- that are:', strC, '-- b. ONLY available ON a specific cluster','', 'local CLUSTERNAME = nil', 'local str = os.getenv("UUFSCELL")','', 'if str ~= nil then', ' if str == "ash.peaks" then', ' CLUSTERNAME = "ash"', ' elseif str == "ember.arches" then', ' CLUSTERNAME = "em"', ' elseif str == "kingspeak.peaks" then', ' CLUSTERNAME = "kp"', ' elseif str == "lonepeak.peaks" then', ' CLUSTERNAME = "lp"', ' end','', ' if CLUSTERNAME ~= nil then', strD, ' prepend_path("MODULEPATH",mdir)', ' end', 'end'] return res
46df63461d05b26fbc5e5a45e6162a2794f92ed1
35,299
def make_attr_string(attr): """Returns an attribute string in the form key="val".""" attr_string = ' '.join('%s="%s"' % (k, v) for k, v in attr.items()) return '%s%s' % (' ' if attr_string != '' else '', attr_string)
7185a6e725349313a4cc67ae643a18d9ab63c871
35,300
def optimize_weights(instrument, sample): """Optimize the weights on a particular sample""" guess = [1.0] * sample.shape[1] bounds = [(0.0,5.0)] * sample.shape[1] def function(w, instrument, sample): """This is the function that is minimized iteratively using scipy.optimize.minimize to find the best weights (w)""" wf = weight_forecast(sample, w) # We introduce a capital term, as certain currencies like HKD are very 'numerate', which means we need millions of HKD to get a # significant position position = instrument.position(forecasts = wf, nofx=True, capital=10E7).rename(instrument.name).to_frame().dropna() # position = instrument.position(forecasts = wf, nofx=True).rename(instrument.name).to_frame().dropna() l = accountCurve([instrument], positions = position, panama_prices=instrument.panama_prices().dropna(), nofx=True) s = l.sortino() try: assert np.isnan(s) == False except: print(sample, position) raise return -s result = minimize(function, guess, (instrument, sample),\ method = 'SLSQP',\ bounds = bounds,\ tol = 0.01,\ constraints = {'type': 'eq', 'fun': lambda x: sample.shape[1] - sum(x)},\ options = {'eps': .1}, ) return result.x
f5070a76a7cb2a3210caee01928e9ae5b83055a3
35,301
from datetime import date def voto(ano): """ FUNÇÃO QUE VALIDA A IDADE DO ELEITOR :param ano: ano de nascimento :return: idade menor que 16 anos: NÃO VOTA idade entre 16 e 17 anos e acima de 65 anos: VOTO OPCIONAL idade entre 18 e 65: APTO A VOTAR """ idade = date.today().year - ano if idade < 16: return f'{amarelo}COM {idade} ANOS: NÃO VOTA{fimdacor}' elif 16 <= idade < 18 or idade > 65: return f'{amarelo}COM {idade} ANOS: VOTO OPCIONAL{fimdacor}' else: return f'{amarelo}COM {idade} ANOS: APTO A VOTAR{fimdacor}'
ccd103a7fcd31d021c1b563df3c883d8ea81d668
35,303
import curses def new_border_and_win(ws): """ Returns two curses windows, one serving as the border, the other as the inside from these *_FRAME tuples above. """ return ( curses.newwin(ws[1][1], ws[1][0], ws[0][1], ws[0][0]), curses.newwin(ws[1][1] - 2, ws[1][0] - 2, ws[0][1] + 1, ws[0][0] + 1), )
cec302bda38ba5fa9d0c88dbfac1c501984a96a0
35,304
import socket def mk_sock(mcast_host, mcast_ip, mcast_port): """ multicast socket setup """ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.setsockopt( socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, socket.inet_aton(mcast_ip) + socket.inet_aton(mcast_host), ) sock.bind((mcast_host, mcast_port)) return sock
702a95e5baec4ecf54206f8f16cfa22d8365f8a0
35,307
def next_line_start_or_here(text, pos): """Find next line start, or `pos` if `pos` is already a line start """ if pos == 0 or (pos-1 < len(text) and text[pos-1] == "\n"): return pos return next_line_start(text, pos)
623be9a39064e2a67fc79ca0a12e95477071fc35
35,309
def getimdata(cubenm, verbose=False): """Get fits image data """ if verbose: print(f'Getting image data from {cubenm}') with fits.open(cubenm, memmap=True, mode='denywrite') as hdu: dxas = hdu[0].header['CDELT1']*-1*u.deg dyas = hdu[0].header['CDELT2']*u.deg nx, ny = hdu[0].data[0, 0, :, :].shape[0], hdu[0].data[0, 0, :, :].shape[1] old_beam = Beam.from_fits_header( hdu[0].header ) datadict = { 'image': hdu[0].data[0, 0, :, :], 'header': hdu[0].header, 'oldbeam': old_beam, 'nx': nx, 'ny': ny, 'dx': dxas, 'dy': dxas } return datadict
6ea45ba1dbbf46d9d6518a28e643bb67d3a6d627
35,310
import math def N(latitude): """ Transverse radius of curvature. Returns radius of curvature in east-west direction. latitude: Latitude in radians. """ return a/math.sqrt(1-e2*pow(math.sin(latitude),2.0))
ac1bbf3c7f6aa28251d25a584ec283a09b9e01b5
35,311
def cmd(command_id): """ A helper function for identifying command functions """ def decorator(func): func.__COMMAND_ID__ = command_id return func return decorator
cff8664ad18c78629bb3c1b4946be592142711e0
35,312
import json def getBatchExperiments(): """ return the information related to the experiments of a batch :return: informations of the experiment :rtype: Dict """ data = request.json['data'] experiments = [] for key in data: batch_experiments = queueManager.getBatchExperiments(key) for experiment in batch_experiments: new_exp = experiment new_exp["optimization_data"] = queueManager.getExperimentInfo( experiment["batchId"], experiment["experimentId"]) experiments.append(new_exp) return json.dumps(experiments)
1762c7449800ce38f0ffc867aa0962827b561a5b
35,313
def noiseProb(ksStat, Ntot): """pvalue = noiseProb(ksStat, Ntot) Returns probability of being in same distribution for given K-S statistic""" s = Ntot*pow(ksStat, 2) # For d values that are in the far tail of the distribution (i.e. # p-values > .999), the following lines will speed up the computation # significantly, and provide accuracy up to 7 digits. if (s > 7.24) or ( (s > 3.76) and (Ntot > 99) ): return 2.0*exp( -1.0 * (2.000071 + 0.331/sqrt(Ntot) + 1.409/Ntot )*s ) else: # Express d as d = (k-h)/Ntot, where k is a +ve integer and 0 < h < 1. k = ceil(ksStat*Ntot) h = k - ksStat*Ntot m = 2*k - 1.0 # Create the H matrix, which describes the CDF, as described in Marsaglia, # et al. if m > 1.0: c = [1.0/gamma(x) for x in np.linspace(2,m+1,m)] r = np.linspace(0,0,m) r[0] = 1.0 r[1] = 1.0 T = toeplitz(c,r) T[:,0] = T[:,0] - [ pow(h, x)/gamma(x + 1) for x in np.linspace(1,m,m) ] #T[m-1, :] = np.fliplr( T[:,0] ) # Try this because fliplr is throwing a fit... L = len(T[:,0]) T[m-1, :] = [ T[L-x-1,0] for x in range(L) ] T[m-1, 1] = (1.0 - 2.0*pow(h,m) + pow( max(0, 2.0*h - 1.0), m ) )/gamma(m+1) else: T = (1.0 - 2.0*pow( h, m ) + pow( max(0,2.0*h - 1), m) )/gamma(m+1) T=np.array([T,T]) # Make T appear to be a 2D array # Scaling before raising the matrix to a power if not np.isscalar(T): lmax = abs( np.amax(np.linalg.eig(T)[0]) ) T = np.power( T/lmax, Ntot ) else: lmax = 1.0 # Pr(DNtot < d) = Ntot!/Ntot * tkk , where tkk is the kth element of TNtot = T^Ntot. # p-value = Pr(Dn > d) = 1-Pr(Dn < d) return 1.0 - exp(gammaln(Ntot+1) + Ntot*log(lmax) - Ntot*log(Ntot)) * T[k-1,k-1]
2e4a99c631ef8ba7225aa8e328dfa03de68c18ed
35,314
def _check_nmant(np_type, nmant): """ True if fp type `np_type` seems to have `nmant` significand digits Note 'digits' does not include implicit digits. And in fact if there are no implicit digits, the `nmant` number is one less than the actual digits. Assumes base 2 representation. Parameters ---------- np_type : numpy type specifier Any specifier for a numpy dtype nmant : int Number of digits to test against Returns ------- tf : bool True if `nmant` is the correct number of significand digits, false otherwise """ np_type = np.dtype(np_type).type max_contig = np_type(2 ** (nmant + 1)) # maximum of contiguous integers tests = max_contig + np.array([-2, -1, 0, 1, 2], dtype=np_type) return np.all(tests - max_contig == [-2, -1, 0, 0, 2])
9e1905d6efb1f2c5dcdc668128a614a48ef6c7f3
35,315
import json import uuid import time def timeboard_send_steps_list(self, steps, scenario_name, timeout): """ Change all steps in timeboard :param steps: The list of steps extracted from the scenario json file :type steps: list :param scenario_name: name of the scenario to which the steps belong :type scenario_name: string :param timeout: maximum time to wait for a reaction from the local manager :type timeout: float """ step_list = [] step_id_to_index = {} index = 0 for step in steps: if not step['action']: step_list.append({'name': step['name'], 'eta': step['eta']}) step_id_to_index[step[id]] = index index += 1 goal = RequestToLocalManagerGoal(action="stepsList", payload=json.dumps({ 'id': str(uuid.uuid4()), 'timestamp': time.time(), 'args': { 'scenarioName': scenario_name, 'stepsList': step_list } })) return self._send_goal_and_wait(goal, timeout), step_id_to_index
8d3cbbd2f61b1f6c4169737f72972c52ebc14fe1
35,316
from typing import Union from typing import Dict def get_indices(s: Union[str, 'ChainedBase']) -> Dict[int, str]: """ Retrieve a dict of characters and escape codes with their real index into the string as the key. """ codeindices = get_code_indices(s) if not codeindices: # This function is not for non-escape-code stuff, but okay. return {i: c for i, c in enumerate(s)} indices = {} for codeindex in sorted(codeindices): code = codeindices[codeindex] if codeindex == 0: indices[codeindex] = code continue # Grab characters before codeindex. start = max(indices or {0: ''}, key=int) startcode = indices.get(start, '') startlen = start + len(startcode) indices.update({i: s[i] for i in range(startlen, codeindex)}) indices[codeindex] = code lastindex = max(indices, key=int) lastitem = indices[lastindex] start = lastindex + len(lastitem) textlen = len(s) if start < (textlen - 1): # Grab chars after last code. indices.update({i: s[i] for i in range(start, textlen)}) return indices
813c1b23a55400a645f6f79208b86d8c9e9d3905
35,317
import re def parse_shortcodes(post_body): """ I stole this shortcode regex from Wordpress's source. It is very confusing. """ tagregexp = '|'.join([re.escape(t) for t in TAGS_WE_CAN_PARSE.keys()]) pattern = re.compile( '\\[(\\[?)(' + tagregexp + ')\\b([^\\]\\/]*(?:\\/(?!\\])[^\\]\\/]*)*?)(?:(\\/)\\]|\\](?:([^\\[]*(?:\\[(?!\\/\\2\\])[^\\[]*)*)\\[\\/\\2\\])?)(\\]?)' ) return re.sub(pattern, replace_tags, post_body)
2a8a76e7b34a4e176a0236740886d4e7dc89bb20
35,318
from typing import Callable def on_start(func: Callable) -> FunctionCallback: """Decorator for creating a callback from a function. The function will be executed when the `Events.START` is triggered. The function should take :class:`argus.engine.State` as the first argument. Example: .. code-block:: python import argus from argus.engine import State @argus.callbacks.on_start def start_callback(state: State): state.logger.info("Start training!") model.fit(train_loader, val_loader=val_loader, callbacks=[start_callback]) """ return FunctionCallback(Events.START, func)
d3e6fbad2932b88d9545c0368b75488b21f5f798
35,319
def conditional_entropy(cond_counts): """ Compute the conditional entropy of a conditional multinomial distribution given a list of lists of counts or Counters for x given each y: H(x|y) = \sum_y p(y) \sum_x p(x|y) \log (1 / p(x|y)) """ if isinstance(cond_counts, dict): cond_counts = list(cond_counts.values()) cond_ents = [_ent_impl(xs) for xs in cond_counts] cond_ent_sum = sum(cond_ent_y * ny for cond_ent_y, ny in cond_ents) y_tot = sum(ny for _, ny in cond_ents) return cond_ent_sum / y_tot
9695e282f59a6eefe6f83217c554f734d8bdbafb
35,320
def xover_selection(snakes, survivors, opts, num_survivors): """ Picks parents from the current generation of snakes for crossover params: snakes: list, current generation of snakes of class Snake survivors: list, snakes of class Snake that survived opts: dict, contains hyperparamters num_survivors: int, how many survivors there should be returns: list of parents of class Snake """ parents = [] max_num_parents = opts["PopulationSize"] - num_survivors while len(parents) < max_num_parents: for survivor in survivors: if (len(parents) < max_num_parents): parents.append(survivor) for snake in snakes: if snake not in survivors: if snake.selected: if (len(parents) < max_num_parents): parents.append(snake) return parents
990e65aac637abe8c4c6c8a661f4a039b0900ca4
35,321
def rgb2hsv(image): """ Convert RGB image to HSV. Parameters ---------- image : af.Array - A 3 D arrayfire array representing an 3 channel image, or - A multi dimensional array representing batch of images. Returns -------- output : af.Array - A RGB image. """ output = Array() safe_call(backend.get().af_rgb2hsv(c_pointer(output.arr), image.arr)) return output
72b76e200e1ff1fba05fe7f3211b19b34b1ef982
35,322
def make_colormap(seq,cmapname='CustomMap'): """Return a LinearSegmentedColormap seq: a sequence of floats and RGB-tuples. The floats should be increasing and in the interval (0,1). """ seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3] cdict = {'red': [], 'green': [], 'blue': []} for i, item in enumerate(seq): if isinstance(item, float): r1, g1, b1 = seq[i - 1] r2, g2, b2 = seq[i + 1] cdict['red'].append([item, r1, r2]) cdict['green'].append([item, g1, g2]) cdict['blue'].append([item, b1, b2]) return mcolors.LinearSegmentedColormap(cmapname, cdict)
44dd4b40c4519244a857aa1e6d62b5be974f42d0
35,323
def _interp_fit(y0, y1, y_mid, f0, f1, dt): """Fit coefficients for 4th order polynomial interpolation. Args: y0 (Tensor) : Function value at the start of the interval. y1 (Tensor) : Function value at the end of the interval. y_mid (Tensor) : Function value at the mid-point of the interval. f0 (Tensor) : Derivative value at the start of the interval. f1 (Tensor) : Derivative value at the end of the interval. dt (float64) : Width of the interval. Returns: `[a, b, c, d, e]` : For interpolating with the polynomial `p = a * x ** 4 + b * x ** 3 + c * x ** 2 + d * x + e` for values of `x` between 0 (start of interval) and 1 (end of interval). """ a = _dot_product([-2 * dt, 2 * dt, -8, -8, 16], [f0, f1, y0, y1, y_mid]) b = _dot_product([5 * dt, -3 * dt, 18, 14, -32], [f0, f1, y0, y1, y_mid]) c = _dot_product([-4 * dt, dt, -11, -5, 16], [f0, f1, y0, y1, y_mid]) d = dt * f0 e = y0 return [a, b, c, d, e]
021b045ca7635471cde1c3e3f6a887d093666bb4
35,324
def interleave_value(t0,series,begin=False,end=False): """Add t0 between every element of *series*""" T = [] if begin: T += [t0] if len(series) > 0: T += [series[0]] for t in series[1:]: T += [t0,t] if end: T += [t0] return T
33e3d8562a482e897bb3fd8d49f33a1dfed9bfb9
35,325
def menu(): """Menu grafico testuale per programma di gestione Immobili """ x = 1 while x !=0 : print (" Menu'") print(" Gestione Immobiliare") print(" INSERIMENTO IMMOBILE .........digita 1 --> ") print(" MODIFICA IMMOBILE .........digita 2 --> ") print(" CANCELLA IMMOBILE .........digita 3 --> ") print(" STAMPA TUTTI GLI IMMOBILI......digita 4 --> ") print(" INSERISCI NUOVO CLIENTE........digita 5 --> ") print(" STAMPA ANAGRAFICA CLIENTI......digita 6 --> ") print(" CERCA IMMOBILE PER INDIRIZZO...digita 7 --> ") print(" STAMPA IMMOBILI PER CATALOGO...digita 8 --> ") print("\n") print(" PER USCIRE ................digita 0 --> ") print("\n\n") x = input("scegli cosa vuoi fare digita 0 per uscire............... --> ") if x == "1": return 1 elif x == "2": return 2 elif x == "3": return 3 elif x == "4": return 4 elif x == "5": return 5 elif x == "6": return 6 elif x == "7": return 7 elif x == "8": return 8 elif x == "0": x = 0 else: print(" Scelta non valida - solo numeri da 0 a 8") x = 1 print("Hai scelto di uscire, Grazie!") return 0
bfb16f3a50339b6e9ed672e1002e727b10f7cc39
35,326
def named(new_name): """ Sets given string as command name instead of the function name. The string is used verbatim without further processing. Usage:: @named('load') def do_load_some_stuff_and_keep_the_original_function_name(args): ... The resulting command will be available only as ``load``. To add aliases without renaming the command, check :func:`aliases`. .. versionadded:: 0.19 """ def wrapper(func): setattr(func, ATTR_NAME, new_name) return func return wrapper
c47d71c4d622fdfcba5854d4f01f2148a07c36ff
35,327
def remove_junk_chars(bucket_name): """Remove characters that shouldn't or won't be in a bucket name""" name = bucket_name names = [] #Remove junk chars junk_chars = ["'", '"', "&#39;", "!"] for junk_char in junk_chars: name = name.replace(junk_char, "") #Remove domains (this can be added later) domains = [".com", ".org", ".net", ".edu", ".gov"] for domain in domains: name = name.replace(domain, "") #Replace junk char with space so it can be replaced by a replacement char name = name.replace(","," ") name = name.replace("."," ") name = name.replace("*"," ") name = name.replace("&", " and ") #Remove any duplicate spaces while " " in name: name = name.replace(" ", " ") #Add the name without "and" if it's there (e.g. "Bob & Sue" becomes "Bob and Sue" and "Bob Sue") names.append(name.strip()) if " and " in name: names.append(name.replace(" and ", " ").strip()) return names
cd2a3076215a3a8bb94903e1015391abc00ecfaa
35,328
def create_2x2_arrays(num_arrays): """This creates a multi-dimensional array of n 2x2 arrays Arguments: num_var {[int]} -- [this is the number of desired arrays] Returns: [numpy array] -- [a zero filled n dimensional array with n 2x2 arrays] """ temp_list = [2] for temp in range(0, num_arrays): temp_list.append(2) return np.zeros((temp_list))
26994d64e0dde46083481fd9f1594c9c102065f3
35,329
def calculate_gr(fr_pattern, density, composition): """ Calculates a g(r) pattern from a given F(r) pattern, the material density and composition. :param fr_pattern: F(r) pattern :param density: density in g/cm^3 :param composition: composition as a dictionary with the elements as keys and the abundances as values :return: g(r) pattern """ return calculate_gr_raw(fr_pattern, convert_density_to_atoms_per_cubic_angstrom(composition, density))
ac2ad0de0a01e9c629a2affa0a943bb6948e014c
35,330
import random def get_batch(data_bucket, bucket_id, batch_size=1): """ Return one batch to feed into the model """ # only pad to the max length of the bucket encoder_size, decoder_size = config.BUCKETS[bucket_id] encoder_inputs, decoder_inputs = [], [] for _ in range(batch_size): encoder_input, decoder_input = random.choice(data_bucket) # pad both encoder and decoder, reverse the encoder encoder_inputs.append(list(reversed(_pad_input(encoder_input, encoder_size)))) decoder_inputs.append(_pad_input(decoder_input, decoder_size)) # now we create batch-major vectors from the data selected above. batch_encoder_inputs = _reshape_batch(encoder_inputs, encoder_size, batch_size) batch_decoder_inputs = _reshape_batch(decoder_inputs, decoder_size, batch_size) # create decoder_masks to be 0 for decoders that are padding. batch_masks = [] for length_id in range(decoder_size): batch_mask = np.ones(batch_size, dtype=np.float32) for batch_id in range(batch_size): # we set mask to 0 if the corresponding target is a PAD symbol. # the corresponding decoder is decoder_input shifted by 1 forward. if length_id < decoder_size - 1: target = decoder_inputs[batch_id][length_id + 1] if length_id == decoder_size - 1 or target == config.PAD_ID: batch_mask[batch_id] = 0.0 batch_masks.append(batch_mask) return batch_encoder_inputs, batch_decoder_inputs, batch_masks
81c17c41021509a82d9fe08f8614997bee937a3b
35,333
import html import json def get_report_formatter(format_name): """ Get the correct report table output function for a named format Args: format_name (str): Name of the desired format Returns: A formatting output function if the format is recognized, or None if it is not. """ format_name = get_canonical_format_name(format_name) if format_name == 'markdown': return markdown.report if format_name == 'html': return html.report if format_name == 'json': return json.report return None
cbb8f623715d3c272e745d3e454053e419e42960
35,334
from typing import Set def print_rule_tree(rt: Set['FlowRule']): """ Recursively explore rt-contained rules' parents. """ rt = rt.copy() res = [] seen = set() while rt: rule = rt.pop() if rule in seen: continue seen.add(rule) res.insert(0, rule) for parent_rule_groups in rule._parent_rules: for pr in parent_rule_groups: rt.add(pr) res.sort(key=human_rule_sorter) return res
05b67cda248753042f9f6fbd05dd0b84797a73a3
35,335
def sort(request): """ Valid values for the 'sort' parameter used in the Index setops methods (intersection, union, etc.) Caution: Don't confuse this one with the "sort" fixture used for DataFrame.append or concat. That one has parameters [True, False]. We can't combine them as sort=True is not permitted in in the Index setops methods. """ return request.param
0f1e7bb570b6f8f617a7564695c1a20d71cfbe80
35,336
from datetime import datetime import pytz def date_string_to_utc_float_string(date_string, timezone=None): """Return a utc_float_string for a given date_string - date_string: string form between 'YYYY' and 'YYYY-MM-DD HH:MM:SS.f' """ dt = None s = None for fmt in [ '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M', '%Y-%m-%d %H', '%Y-%m-%d', '%Y-%m', '%Y' ]: try: dt = datetime.strptime(str(date_string), fmt) except ValueError: continue else: break if dt: if timezone: tz = pytz.timezone(timezone) dt = tz.localize(dt).astimezone(pytz.utc) s = dt_to_float_string(dt) return s
cd1b1db88d6b9add6dcd252c58877030a31038a8
35,337
import pathlib def _find_nested(d1, d2): """ Find paths in d1 that are nested inside paths in d2. Returns tuples with keys `(d1_key, d2_key)` where `d1_path` was a subdirectory of `d2_path`. """ # Copy to ensure we don't alter # original dicts in parent scope d1 = d1.copy() d2 = d2.copy() # Make paths `pathlib.Path` objects d1 = _normalize_paths(d1, type_fn=pathlib.Path) d2 = _normalize_paths(d2, type_fn=pathlib.Path) return [ (d1_key, d2_key) for d1_key, d1_path in d1.items() for d2_key, d2_path in d2.items() if d2_path in d1_path.parents ]
21b8a9deab9b4ce57987916e98d8ec8c321fd533
35,338
def safe(method): """ Decorator to return safe in case of error. """ def ret(*args, **kw): try: return method(*args, **kw) except Exception as e: log.exception(e) # return result return ret
9d3d99fec1f2e7e53a35858ad3d5d94d10e443c8
35,339
def flashpoint_alert_list_command(client: Client, args: dict) -> CommandResults: """ List alerts from Flashpoint. :param client: Client object :param args: The command arguments :return: Standard command result or no records found message. """ args = validate_alert_list_args(args) response = client.http_request("GET", url_suffix=URL_SUFFIX_V1['ALERTS'], params=args) alerts = response.get('data', []) if not alerts: return CommandResults(readable_output=MESSAGES['NO_RECORDS_FOUND'].format('alerts')) readable_output = prepare_hr_for_alerts(alerts) token_context = { 'since': 'N/A', 'until': 'N/A', 'size': 'N/A', 'scroll_id': 'N/A', 'name': 'flashpoint-alert-list' } links = response.get('links', {}).get('next', {}).get('href') if links: token_hr = "To retrieve the next set of result use," context = prepare_context_from_next_href(links) for con in context: token_context[con] = context[con][0] token_hr += "\n" + con + " = " + context[con][0] readable_output += token_hr for alert in alerts: tags = alert.get('tags', {}) if 'archived' in tags.keys(): alert['tags']['archived'] = True else: alert['tags']['archived'] = False if 'flagged' in tags.keys(): alert['tags']['flagged'] = True else: alert['tags']['flagged'] = False outputs = { FLASHPOINT_PATHS['ALERT']: alerts, FLASHPOINT_PATHS['TOKEN']: token_context } outputs = remove_empty_elements(outputs) return CommandResults( outputs=outputs, readable_output=readable_output, raw_response=response )
703f14150c3ac1bce3d70bbe32e2e49f617295e2
35,340
def create_user(db, django_user_model: AbstractUser, test_password: str): """ factory for creating users """ def make_user(username, email, first_name, last_name) -> AbstractUser: new_user: AbstractUser = User.objects.create(username=username, email=email, first_name=first_name, last_name=last_name) # assign password new_user.set_password(test_password) new_user.save() return new_user yield make_user
81390a58e4a30b83515c6fa22cb0d42e16a816e6
35,341
def _read_network_data_from_h5(fname): """Read the network stored by the write_network_to_h5 function""" bias_accumulator = [] weight_accumulator = [] with h5py.File(fname, "r") as hdf: n_dense_layers = hdf["n_dense_layers"][...][0] activation = list(hdf["activation"].keys())[0] # Extract activation function for i in range(n_dense_layers): bias_accumulator.append(hdf[BIAS_OUTPAT.format(i)][...]) weight_accumulator.append(hdf[WEIGHT_OUTPAT.format(i)][...]) xmin = hdf["xmin"][...] xmax = hdf["xmax"][...] ymin = hdf["ymin"][...] ymax = hdf["ymax"][...] dense_params = [(b, w) for b, w in zip(bias_accumulator, weight_accumulator)] return dense_params, xmin, xmax, ymin, ymax, activation
e78a0966d9ed30cb1c9d35447f91c5c22009fd40
35,342
import json def credentials_from_file(file): """Load credentials corresponding to an evaluation from file""" with open(file) as file: return json.load(file)
8f73c595b4e61757ae454b1674a7177ab4d05059
35,343
import requests def get_workspace_vars(auth, workspace_id): """ Function to get variables created in a workspace """ headers = {"Content-Type": "application/json"} url = f"https://intersight.com/tfc/api/v2/workspaces/{workspace_id}/vars" response = requests.get(url, headers=headers, auth=auth) response_data = response.json() print(response_data) workspace_vars = {} for var in response_data["data"]: var_id = var["id"] workspace_vars[var_id] = {} workspace_vars[var_id]["var_name"] = var["attributes"]["key"] workspace_vars[var_id]["var_value"] = var["attributes"]["value"] workspace_vars[var_id]["sensitive"] = var["attributes"]["sensitive"] workspace_vars[var_id]["var_description"] = var["attributes"]["description"] return workspace_vars
ed05bc7fee86d0303e25fe6ea0b0fd898a08e347
35,344
def locate_btn(game, team_name, mkt_type, verbose=False): """ given selenium game, team_name, and mkt_type returns find the specific bet button for the givens game: selenium obj team_name: str mkt_type: 0 is point spread, 1 is moneyline, and 2 is over/under """ bet_buttons = get_bet_buttons(game) index = btn_index(game, mkt_type, team_name) to_click = bet_buttons[index] if verbose: print(f"btn_index: {index}") return to_click
641412d41a1a90153bac456ed56702a3a5abc09c
35,345
def mailchimp_get_endpoint(**kwargs): """Endpoint that the mailchimp webhook hits to check that the OSF is responding""" return {}, http_status.HTTP_200_OK
6e46145b976dae5c77d5b8049da91464669aab40
35,346
def format_date(value, format='%Y-%m-%d'): """Returns a formatted time string :param value: The datetime object that should be formatted :param format: How the result should look like. A full list of available directives is here: http://goo.gl/gNxMHE """ return value.strftime(format)
3f094918610617e644db69415d987fa770a06014
35,347
def Transpose(node): """(Simple) transpose >>> print(matlab2cpp.qscript("a = [1,2,3]; b = a.'")) sword _a [] = {1, 2, 3} ; a = irowvec(_a, 3, false) ; b = arma::strans(a) ; """ # unknown datatype if not node.num: return "arma::strans(%(0)s)" """ # colvec -> rowvec if node[0].dim == 1: node.dim = 2 # rowvec -> colvec elif node[0].dim == 2: node.dim = 1 """ # not complex type #if node.mem < 4: # return "arma::strans(", "", ")" return "arma::strans(", "", ")"
16f4eb901ee59c424474b5f55264109dc47e6958
35,348
import numbers import collections def get_xml_type(val): """Returns the data type for the xml type attribute""" if type(val).__name__ in ('str', 'unicode'): return 'str' if type(val).__name__ in ('int', 'long'): return 'int' if type(val).__name__ == 'float': return 'float' if type(val).__name__ == 'bool': return 'bool' if isinstance(val, numbers.Number): return 'number' if type(val).__name__ == 'NoneType': return 'null' if isinstance(val, dict): return 'dict' if isinstance(val, collections.Iterable): return 'list' return type(val).__name__
95a93523d0c982ed2bbd81bba528198915a7eff3
35,349
def DatetimeToWmiTime(dt): """Take a datetime tuple and return it as yyyymmddHHMMSS.mmmmmm+UUU string. Args: dt: A datetime object. Returns: A string in CMI_DATETIME format. http://www.dmtf.org/sites/default/files/standards/documents/DSP0004_2.5.0.pdf """ td = dt.utcoffset() if td: offset = (td.seconds + (td.days * 60 * 60 * 24)) / 60 if offset >= 0: str_offset = "+%03d" % offset else: str_offset = "%03d" % offset else: str_offset = "+000" return u"%04d%02d%02d%02d%02d%02d.%06d%s" % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond, str_offset)
706faec64a116ad4dc255b6ff9b87b4a8488bcff
35,350
def _add_suffix(params, model): """Add derivative suffixes to a list of parameters.""" params_full = params.copy() suffix = { "basic": {}, "derivatives": {"derivative1"}, "power2": {"power2"}, "full": {"derivative1", "power2", "derivative1_power2"}, } for par in params: for suff in suffix[model]: params_full.append(f"{par}_{suff}") return params_full
178b71bb24dde36c262d115e485b568fe0bef503
35,351
def _random_covariance_matrix(batch_size): """Generate a batch of random covariance matrices. Args: batch_size: Number of elements in the first dimension returned. Returns: A tensor with dimensions [batch_size, 2, 2]. """ # Make a random covariance matrix by taking the outer product of 10 random # matrices. x = np.random.normal(size=(batch_size, 10, 2)) sigma = np.matmul(x.transpose(0, 2, 1), x) return sigma
19ab668bc43e4213ed5b4a042730f9953424b37f
35,353
def parse_ranges_highlight(ranges_string): """Process ranges highlight string. Args: ranges_string: (str) A string representing a numerical range of a list of numerical ranges. See the help info of the -r flag of the print_tensor command for more details. Returns: An instance of tensor_format.HighlightOptions, if range_string is a valid representation of a range or a list of ranges. """ ranges = None def ranges_filter(x): r = np.zeros(x.shape, dtype=bool) for range_start, range_end in ranges: r = np.logical_or(r, np.logical_and(x >= range_start, x <= range_end)) return r if ranges_string: ranges = command_parser.parse_ranges(ranges_string) return tensor_format.HighlightOptions( ranges_filter, description=ranges_string) else: return None
263c6b11d123277e6a2636c482ba045751a8863d
35,354
def have_instance(nova_connection: NovaConnection, instance_name: str): """ Check if the instance_name is in the same region that nova_connection :param nova_connection: NovaConnection :param instance_name: str content the instance name :return: bool """ for server in nova_connection.connection.servers.list(): server_info = dict(server.to_dict()) if 'name' in server_info and server_info['name'] == instance_name: return True return False
4174a3817301007f3cb5eaab02bfc8c14b7526fd
35,355
import torch def torchify_dict(data: dict): """ Transform np.ndarrays to torch.tensors. Parameters ---------- data : dict property data of np.ndarrays. References ---------- .. [1] https://github.com/ken2403/schnetpack/blob/6617dbf4edd1fc4d4aae0c984bc7a747a4fe9c0c/src/schnetpack/data/atoms.py """ torch_properties = {} for pname, prop in data.items(): if prop.dtype in [np.int32, np.int64]: torch_properties[pname] = torch.LongTensor(prop) elif prop.dtype in [np.float32, np.float64]: torch_properties[pname] = torch.FloatTensor(prop.copy()) else: raise CellDataError( "Invalid datatype {} for property {}!".format(type(prop), pname) ) return torch_properties
fbfc2a05e6e6710bae1aee8487c277f463b58225
35,356
def select_black_ou(board): """ 手番側の有効な王の利きを求めるために使う :param board: :return: """ # 桂馬で王手されているかを調べるために、擬似的に王から桂馬の効きを計算する # 王の通常の動きの計算もある # 2回使うので、collectionに登録する name = 'black_short_ou' collection = tf.get_collection_ref(name) if len(collection) == 0: selected = tf.to_float(tf.equal(board, Piece.BLACK_OU)) tf.add_to_collection(name, selected) else: selected = collection[0] return selected
7b30d104b6fb916b7b70025f7812dc2fd26a4bad
35,358
def bytes_to_decimal_bytes(bytes_decimal_str, is_little_endian=False): """ :param bytes_decimal_str: :param is_little_endian: :return: """ if not bytes_decimal_str.isdigit(): raise Exception('bytes_decimal_str 不是数字字符串!') return length = len(bytes_decimal_str) tmp_list = [bytes_decimal_str[i:i+2] for i in range(length) if (i % 2 == 0)] hex_list = list() for byte_str in tmp_list: h_str = hex(int(byte_str)) hex_list.append(h_str[2:].zfill(2)) byte_str = bytes.fromhex(''.join(hex_list)) # print(byte_str) return byte_str
2a5dbbfe643919c20e764268ca4046151d80eaf2
35,359
from .. import sim from ..support.morlet import MorletSpec, index2ms from scipy import signal as spsig def prepareSpectrogram( sim=None, timeRange=None, electrodes=['avg', 'all'], pop=None, LFPData=None, NFFT=256, noverlap=128, nperseg=256, minFreq=1, maxFreq=100, stepFreq=1, smooth=0, includeAxon=True, logy=False, normSignal=False, normPSD=False, normSpec=False, filtFreq=False, filtOrder=3, detrend=False, transformMethod='morlet', **kwargs): """ Function to prepare data for plotting of the spectrogram """ data = prepareLFP( sim=sim, timeRange=timeRange, electrodes=electrodes, pop=pop, LFPData=LFPData, NFFT=NFFT, noverlap=noverlap, nperseg=nperseg, minFreq=minFreq, maxFreq=maxFreq, stepFreq=stepFreq, smooth=smooth, includeAxon=includeAxon, logy=logy, normSignal=normSignal, normPSD=normPSD, normSpec=normSpec, filtFreq=filtFreq, filtOrder=filtOrder, detrend=detrend, transformMethod=transformMethod, **kwargs) print('Preparing spectrogram data...') if not sim: if not timeRange: timeRange = [0, sim.cfg.duration] lfps = np.array(data['electrodes']['lfps']) names = data['electrodes']['names'] electrodes = data['electrodes'] spect_data = {} spect_data['vmin'] = None spect_data['vmax'] = None # Morlet wavelet transform method if transformMethod == 'morlet': fs = int(1000.0/sim.cfg.recordStep) spec = [] spect_data['morlet'] = [] spect_data['extent'] = [] freqList = None if logy: freqList = np.logspace(np.log10(minFreq), np.log10(maxFreq), int((maxFreq-minFreq)/stepFreq)) for i, elec in enumerate(names): lfp_elec = lfps[i, :] t_spec = np.linspace(0, index2ms(len(lfp_elec), fs), len(lfp_elec)) spec.append(MorletSpec(lfp_elec, fs, freqmin=minFreq, freqmax=maxFreq, freqstep=stepFreq, lfreq=freqList)) vmin = np.array([s.TFR for s in spec]).min() vmax = np.array([s.TFR for s in spec]).max() if normSpec: vmin = 0 vmax = 1 spect_data['vmin'] = vmin spect_data['vmax'] = vmax for i, elec in enumerate(names): T = timeRange F = spec[i].f if normSpec: S = spec[i].TFR / vmax else: S = spec[i].TFR spect_data['morlet'].append(S) spect_data['extent'].append([np.amin(T), np.amax(T), np.amin(F), np.amax(F)]) # FFT transform method elif transformMethod == 'fft': spect_data['fft'] = [] for i, elec in enumerate(names): lfp_elec = lfps[:, i] fs = int(1000.0/sim.cfg.recordStep) f, t_spec, x_spec = spsig.spectrogram(lfp_elec, fs=fs, window='hanning', detrend=mlab.detrend_none, nperseg=nperseg, noverlap=noverlap, nfft=NFFT, mode='psd') x_mesh, y_mesh = np.meshgrid(t_spec*1000.0, f[f<maxFreq]) spect_data['fft'].append(10*np.log10(x_spec[f<maxFreq])) vmin = np.array(spect_data['fft']).min() vmax = np.array(spect_data['fft']).max() spect_data['vmin'] = vmin spect_data['vmax'] = vmax spect_data['xmesh'] = x_mesh spect_data['ymesh'] = y_mesh #for i, elec in enumerate(electrodes): # plt.pcolormesh(x_mesh, y_mesh, spec[i], cmap=cm.viridis, vmin=vmin, vmax=vmax) data['electrodes']['spectrogram'] = spect_data return data
209f0f58878a36184d93824d84e07fa4e00a146b
35,361
def mock_func_call(*args, **kwargs): """ Mock function to be used instead of benchmark """ options = Options() cost_func = make_cost_function() results = [] result_args = {'options': options, 'cost_func': cost_func, 'jac': 'jac', 'hess': 'hess', 'initial_params': [], 'params': [], 'error_flag': 4} result = fitbm_result.FittingResult(**result_args) results.append(result) failed_problems = [] unselected_minimizers = {} return results, failed_problems, unselected_minimizers
9dd8c1e928f649f8acea70e423f08df29fb9c59c
35,362
def _full_gauss_den(x, mu, va, log): """ This function is the actual implementation of gaussian pdf in full matrix case. It assumes all args are conformant, so it should not be used directly Call gauss_den instead Does not check if va is definite positive (on inversible for that matter), so the inverse computation and/or determinant would throw an exception.""" d = mu.size inva = lin.inv(va) fac = 1 / N.sqrt( (2*N.pi) ** d * N.fabs(lin.det(va))) # we are using a trick with sum to "emulate" # the matrix multiplication inva * x without any explicit loop #y = -0.5 * N.sum(N.dot((x-mu), inva) * (x-mu), 1) y = -0.5 * N.dot(N.dot((x-mu), inva) * (x-mu), N.ones((mu.size, 1), x.dtype))[:, 0] if not log: y = fac * N.exp(y) else: y = y + N.log(fac) return y
6828755f0dc526009babe48ec31b14977e1187eb
35,363
def load_texture_pair(filename): """ Handles textures """ return[ arcade.load_texture(filename), arcade.load_texture(filename, mirrored=True) ]
20cd31cc09dfc6a503e678135b1de406263c8719
35,364
def set_field_value(context, field_value): """populates variable into a context""" if field_value: context['field_value'] = field_value else: context['field_value'] = '' return ''
68110380f244b78550a04d08ad9bda5df193211e
35,365
def get_bbox(src_bbox, offset): """src_bboxにoffsetを適用し、元の領域を復元する。 RPNから得たoffset予測値をアンカーボックスに適用して提案領域を得る。といったケースで利用する。 Args: src_bbox (tensor / ndarray): オフセットを適用するBoudingBox。 Its shape is :math:`(R, 4)`. 2軸目に以下の順でBBoxの座標を保持する。 :math:`p_{ymin}, p_{xmin}, p_{ymax}, p_{xmax}`. offset (tensor / ndarray): オフセット。 形状はsrc_bboxに同じ。 2軸目にオフセットの形状を保持数r。 :math:`t_y, t_x, t_h, t_w`. tx =(x−xa)/wa, ty =(y−ya)/ha, tw = log(w/wa), th = log(h/ha) ※それぞれ、アンカーからのオフセット ※「x」は予測された領域の中心x、「xa」はアンカーの中心x。 Returns: tensor: オフセットを適用したBoudingBox。 形状はsrc_bboxに同じ。 1軸目はsrc_bboxと同じ情報を示す。 2軸目にはオフセットを適用した座標を保持する。 :math:`\\hat{g}_{ymin}, \\hat{g}_{xmin}, \\hat{g}_{ymax}, \\hat{g}_{xmax}`. """ if type(src_bbox) == np.ndarray and type(offset) == np.ndarray: xp = np else: xp = K if src_bbox.shape[0] == 0: return xp.zeros((0, 4), dtype=offset[:, 0].dtype) # src_bbox(anchorなど)の左上と右下の座標から、中心座標+高さ+幅の形式に変換する src_height = src_bbox[:, 2] - src_bbox[:, 0] src_width = src_bbox[:, 3] - src_bbox[:, 1] src_ctr_y = src_bbox[:, 0] + 0.5 * src_height src_ctr_x = src_bbox[:, 1] + 0.5 * src_width # オフセットを中心座標、高さ、幅毎にまとめる dy = offset[:, 0] dx = offset[:, 1] dh = offset[:, 2] dw = offset[:, 3] # 論文にあるオフセット算出式(以下)から逆算 # tx =(x−xa)/wa, ty =(y−ya)/ha, tw = log(w/wa), th = log(h/ha) # ※それぞれ、アンカーからのオフセット # ※「x」は予測された領域の中心x、「xa」はアンカーの中心x。 ctr_y = dy * src_height + src_ctr_y ctr_x = dx * src_width + src_ctr_x h = xp.exp(dh) * src_height w = xp.exp(dw) * src_width # 矩形の左上と右下の座標に変換 ymin = ctr_y - 0.5 * h xmin = ctr_x - 0.5 * w ymax = ctr_y + 0.5 * h xmax = ctr_x + 0.5 * w bbox = xp.transpose(xp.stack((ymin, xmin, ymax, xmax), axis=0)) return bbox
3afc2cacbd86a6b507c14b7623a1759148ca71fc
35,366
def show_books(object_list): """ 加载指定书籍列表的模板。 :param object_list: Book模型实例的列表 :return: 返回一个字典作为模板的上下文 """ if len(object_list) > 0: try: getattr(object_list[0], 'object') except AttributeError: pass else: object_list = map(lambda ele: ele.object, object_list) context = { 'books_list': object_list, } return context
034707460c73eed6e69578726c860ee55a070ac6
35,367
def load_mesh_from_file(filepath, color=None, alpha=None): """ Load a a mesh or volume from files like .obj, .stl, ... :param filepath: path to file :param **kwargs: """ actor = load(str(filepath)) actor.c(color).alpha(alpha) return actor
49d4a1aa576d2df3d18647f0b48cd84662f9f713
35,368
import six def find_html_form(forms, form_match): # type: (Dict[AnyKey, Form], FormSearch) -> Optional[Form] """ Searches for the specified form amongst a group of multiple forms. :param forms: Possible forms to distinguish and look for a specific one. :param form_match: Search criteria to retrieve the specific form. Can be a form name, the form index (from all available forms on page) or an iterable of key/values of form fields to search for a match (first match is used if many are available). Also, can be directly the targeted form if already retrieved (pass-through operation). :return: matched form or ``None`` when not found. """ form = None # direct instance match if isinstance(form_match, Form): form = form_match # match by name or index elif isinstance(form_match, (int, six.string_types)): form = forms[form_match] else: # select form if all key/value pairs specified match the current one for f in forms.values(): f_fields = [(fk, fv[0].value) for fk, fv in f.fields.items()] if all((mk, mv) in f_fields for mk, mv in form_match.items()): form = f break return form
7168a9b2734a7f67bb0e490700689c5e92f18b6e
35,369
import re from typing import OrderedDict def parse_dict(s, sep=None): """ parser for (ordered) dicts :s: the input string to parse, which should be of the format key1 := value1, key2 := value2, key3 := value3, where the last comma is optional :sep: separator (default is ':=') :returns: OrderedDict(('key1', 'value1'), ...) """ if sep is None: sep = ":=" # deal with the comma on the last line s_split = s.split(",") if re.match("^\s*$", s_split[-1]): del s_split[-1] # now deal with the dict try: return OrderedDict( map ( lambda s2: tuple(map( # split at lambda s3: s3.strip(), s2.split(sep) )), s_split ) ) except: raise return OrderedDict()
dcfdec6dcc68661f5d27f49a280326bec6cfd90b
35,370
def compute_pdi(value_matrix, slice_list): """ Computes a 'preference' discordance index :param value_matrix: :param slice_list: :return: """ pdi = ['x'] pdimax = ['x'] for i in slice_list: incident = compute_incident(value_matrix, i) complement = invert_matrix(incident) g = sparse_graph(complement, 1) Q = dowker_relation(g) strct = Q.diagonal() tmp = np.zeros(len(strct)) tmax = np.zeros(len(strct)) pmax = len(strct) - 1 for j in range(len(Q[0])): q = [] for k in range(len(Q[0][j])): if k is not j: q.append(Q[0][j][k]) if strct[j] < 0: tmp[j] = 0 tmax[j] = pmax else: val = strct[j] - max(q) tmp[j] = val tmax[j] = pmax pdi.append(tmp) pdimax.append(tmax) pdi = np.array(pdi[1:]).sum(axis=0) pdimax = np.array(pdimax[1:]).sum(axis=0) pdi = np.divide(pdi, pdimax) return pdi
e4dafa4259a0d8ecdd496a18cca6dc7cd44c69ad
35,371
def sublist_search(array1, array2): """ :param array1: sublist :param array2: list :return: Whether list contains sublist Time complexity: O(n * m) """ # Empty array if len(array1) == 0: return True for start_idx in range(len(array2)): if check_is_contained(array1, array2, start_idx): return True return False
905d89b1a831e5f3d0ae2ee3c7fdbe6956bbd507
35,372
def get_instance_names(node: AST) -> list[str]: """Extract names from an assignment node, only for instance attributes. Parameters: node: The node to extract names from. Returns: A list of names. """ return [name.split(".", 1)[1] for name in get_names(node) if name.startswith("self.")]
c96d4e0b5a79845f695e8ee6c48a1db544670692
35,374
def is_geq_than(x,y): """ x is not None and greater than or equal to y """ if x != None: if x>=y: return True return False
706e801cb23673ed094e330ee8582aaf14fe85c3
35,375