content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def create_training_patches(images, patch_size, patches_per_image=1, patch_stride=None): """ Returns a batch of image patches, given a batch of images. Args: images (list, numpy.array): Batch of images. patch_size (tuple, list): The (width, height) of the patch to return. patches_per_image (int): Number of random patches to generate from each image in the input batch. Default is 1. patch_stride (int): Stride to use in strided patching. Default is None, which does not use strided patching. If integer is passed then strided patching will be used regardless of what is passed to 'patches_per_image'. Returns: (numpy.array): Batch of image patches. """ image_patches = [] for im in images: if patch_stride is None: for i in range(patches_per_image): image_patches.append(get_random_patch(im, patch_size)) else: image_patches += list(get_stride_patches(im, patch_size, patch_stride, 2)) return np.array(image_patches)
5fce19d2d13f790500e0cbd42934dd6e83c6b084
12,573
import time def get_prover_options(prover_round_tag='manual', prover_round=-1) -> deephol_pb2.ProverOptions: """Returns a ProverOptions proto based on FLAGS.""" if not FLAGS.prover_options: tf.logging.fatal('Mandatory flag --prover_options is not specified.') if not tf.gfile.Exists(FLAGS.prover_options): tf.logging.fatal('Required prover options file "%s" does not exist.', FLAGS.prover_options) prover_options = deephol_pb2.ProverOptions() if FLAGS.max_theorem_parameters is not None: tf.logging.warning( 'Overring max_theorem_parameters in prover options to %d.', FLAGS.max_theorem_parameters) prover_options.action_generator_options.max_theorem_parameters = ( FLAGS.max_theorem_parameters) with tf.gfile.Open(FLAGS.prover_options) as f: text_format.MergeLines(f, prover_options) if prover_options.builtin_library: tf.logging.warning('builtin_library is deprecated. Do not provide.') if str(prover_options.builtin_library) not in ['core']: tf.logging.fatal('Unsupported built in library: %s', prover_options.builtin_library) if FLAGS.timeout_seconds is not None: prover_options.timeout_seconds = FLAGS.timeout_seconds if not FLAGS.output: tf.logging.fatal('Missing flag --output [recordio_pattern]') prover_options.prover_round = deephol_pb2.ProverRound( start_seconds=int(round(time.time())), tag=prover_round_tag, round=prover_round) _verify_prover_options(prover_options) # Log prover options. tf.logging.info('Using prover_options:\n %s', str(prover_options)) return prover_options
ee1ee9fb7ce573c543f0750d6b8fd1eed98deec9
12,574
def bracketpy(pystring): """Find CEDICT-style pinyin in square brackets and correct pinyin. Looks for square brackets in the string and tries to convert its contents to correct pinyin. It is assumed anything in square brackets is CC-CEDICT-format pinyin. e.g.: "拼音[pin1 yin1]" will be converted into "拼音 pīnyīn". """ if len(findall("(\[.+?\])", pystring)) >= 1: cedpylist = findall("(\[.+?\])", pystring) for item in cedpylist: pystring = pystring.replace(item, " " + pyjoin(item[1:-1])) return pystring if len(findall("(\[.+?\])", pystring)) < 1: return pystring else: return None
0499047ec45e6b9c66c27dd89663667b13ddbfb1
12,575
import psutil def get_ram_usage_bytes(size_format: str = 'M'): """ Size formats include K = Kilobyte, M = Megabyte, G = Gigabyte """ total = psutil.virtual_memory().total available = psutil.virtual_memory().available used = total - available # Apply size if size_format == 'K': used = used / 1024 if size_format == 'M': used = used / 1024 / 1024 if size_format == 'G': used = used / 1024 / 1024 / 1024 return int(used)
990987078b0ad3c2ac2ee76dcf96b7cdf01f0354
12,576
def weighted_crossentropy(weights, name='anonymous'): """A weighted version of tensorflow.keras.objectives.categorical_crossentropy Arguments: weights = np.array([0.5,2,10]) # Class one at 0.5, class 2 twice the normal weights, class 3 10x. name: string identifying the loss to differentiate when models have multiple losses Returns: keras loss function named name+'_weighted_loss' """ string_globe = 'global ' + name + '_weights\n' string_globe += 'global ' + name + '_kweights\n' string_globe += name + '_weights = np.array(weights)\n' string_globe += name + '_kweights = K.variable('+name+'_weights)\n' exec(string_globe, globals(), locals()) fxn_postfix = '_weighted_loss' string_fxn = 'def ' + name + fxn_postfix + '(y_true, y_pred):\n' string_fxn += '\ty_pred /= K.sum(y_pred, axis=-1, keepdims=True)\n' string_fxn += '\ty_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())\n' string_fxn += '\tloss = y_true * K.log(y_pred) * ' + name + '_kweights\n' string_fxn += '\tloss = -K.sum(loss, -1)\n' string_fxn += '\treturn loss\n' exec(string_fxn, globals(), locals()) loss_fxn = eval(name + fxn_postfix, globals(), locals()) return loss_fxn
071ab00a723d54194b4b27c889c638debe82f10a
12,577
def _get_package_type(id): """ Given the id of a package this method will return the type of the package, or 'dataset' if no type is currently set """ pkg = model.Package.get(id) if pkg: return pkg.type or u'dataset' return None
c84e137e2b0adaf8719d757f178aa47d4a63c46a
12,578
def _find_protruding_dimensions(f, care, fol): """Return variables along which `f` violates `care`.""" vrs = joint_support([f, care], fol) dims = set() for var in vrs: other_vars = vrs - {var} f_proj = fol.exist(other_vars, f) care_proj = fol.exist(other_vars, care) if (care_proj | ~ f_proj) != fol.true: dims.add(var) return dims
02da5718645652288c9fa6fcedc07198afe49a58
12,579
import random def simulate_relatedness(genotypes, relatedness=.5, n_iter=1000, copy=True): """ Simulate relatedness by randomly copying genotypes between individuals. Parameters ---------- genotypes : array_like An array of shape (n_variants, n_samples, ploidy) where each element of the array is an integer corresponding to an allele index (-1 = missing, 0 = reference allele, 1 = first alternate allele, 2 = second alternate allele, etc.). relatedness : float, optional Fraction of variants to copy genotypes for. n_iter : int, optional Number of times to randomly copy genotypes between individuals. copy : bool, optional If False, modify `genotypes` in place. Returns ------- genotypes : ndarray, shape (n_variants, n_samples, ploidy) The input genotype array but with relatedness simulated. """ # check genotypes array genotypes = np.asarray(genotypes) assert genotypes.ndim >= 2 n_variants = genotypes.shape[0] n_samples = genotypes.shape[1] # copy input array if copy: genotypes = genotypes.copy() else: # modify in place pass # determine the number of variants to copy genotypes for n_copy = int(relatedness * n_variants) # iteratively introduce relatedness for i in range(n_iter): # randomly choose donor and recipient donor_index = random.randint(0, n_samples-1) donor = genotypes[:, donor_index] recip_index = random.randint(0, n_samples-1) recip = genotypes[:, recip_index] # randomly pick a set of variants to copy variant_indices = random.sample(range(n_variants), n_copy) # copy across genotypes recip[variant_indices] = donor[variant_indices] return genotypes
e319f4e15c4c08eb90260b77efc25ea330aac4c9
12,581
def pages_substitute(content): """ Substitute tags in pages source. """ if TAG_USERGROUPS in content: usergroups = UserGroup.objects.filter(is_active=True).order_by('name') replacement = ", ".join(f"[{u.name}]({u.webpage_url})" for u in usergroups) content = content.replace(TAG_USERGROUPS, replacement) return content
13c2138256a0e1afa0ad376994849f2716020540
12,582
def vcfanno(vcf, out_file, conf_fns, data, basepath=None, lua_fns=None): """ annotate a VCF file using vcfanno (https://github.com/brentp/vcfanno) """ if utils.file_exists(out_file): return out_file if lua_fns is None: lua_fns = [] vcfanno = config_utils.get_program("vcfanno", data) with file_transaction(out_file) as tx_out_file: conffn = _combine_files(conf_fns, tx_out_file) luafn = _combine_files(lua_fns, tx_out_file) luaflag = "-lua {0}".format(luafn) if luafn and utils.file_exists(luafn) else "" basepathflag = "-base-path {0}".format(basepath) if basepath else "" cores = dd.get_num_cores(data) cmd = "{vcfanno} -p {cores} {luaflag} {basepathflag} {conffn} {vcf} | sed -e 's/Number=A/Number=1/g' | bgzip -c > {tx_out_file}" message = "Annotating {vcf} with vcfanno, using {conffn}".format(**locals()) do.run(cmd.format(**locals()), message) return out_file
808488bd07c56b541694715193df4ae1cb51869c
12,583
def clean(params: dict) -> str: """ Build clean rules for Makefile """ clean = "\t@$(RM) -rf $(BUILDDIR)\n" if params["library_libft"]: clean += "\t@make $@ -C " + params["folder_libft"] + "\n" if params["library_mlx"] and params["compile_mlx"]: clean += "\t@make $@ -C " + params["folder_mlx"] + "\n" return clean
fb7dd0e7a2fbb080dd8b0d5d4489e9c5ef1367ec
12,584
import re def mathematica(quero: str, meta: str = '') -> bool: """mathematica Rudimentar mathematical operations (boolean result) Args: quero (_type_, optional): _description_. Defaults to str. Returns: bool: True if evaluate to True. """ # neo_quero = quero.replace(' ', '').replace('(', '').replace(')', '') neo_quero = quero.replace(' ', '') if quero == 'True': return True if quero == 'False': return False if neo_quero.find('&&') > -1: parts = neo_quero.split('&&') # print(parts) # return bool(parts[0]) and bool(parts[1]) return logicum(parts[0]) and logicum(parts[1]) if neo_quero.find('||') > -1: parts = neo_quero.split('||') # return bool(parts[0]) or bool(parts[1]) return logicum(parts[0]) or logicum(parts[1]) # regula = r"(\d*)(.{1,2})(\d*)" regula = r"(?P<n1>(\d*))(?P<op>(\D{1,2}))(?P<n2>(\d*))" r1 = re.match(regula, neo_quero) if r1.group('op') == '==': return int(r1.group('n1')) == int(r1.group('n2')) if r1.group('op') == '!=': return int(r1.group('n1')) != int(r1.group('n2')) if r1.group('op') == '<=': return int(r1.group('n1')) <= int(r1.group('n2')) if r1.group('op') == '>=': return int(r1.group('n1')) >= int(r1.group('n2')) if r1.group('op') == '<': return int(r1.group('n1')) < int(r1.group('n2')) if r1.group('op') == '>': return int(r1.group('n1')) > int(r1.group('n2')) raise ValueError( 'mathematica: <quaero> [{1}] <op>? [{0}]'.format(str(quero), meta))
b05777e880688d8f3fc90ba9d54098f341054bd7
12,585
def time_pet(power,energy): """Usage: time_pet(power,energy)""" return energy/power
11e9c82b8c1be84995f9517e04ed5e1270801e27
12,587
def compute_sigma0( T, S, **kwargs, ): """ compute the density anomaly referenced to the surface """ return compute_rho(T, S, 0, **kwargs) - 1000
300d5552c70e6fd6d8708345aa3eed53795309cf
12,588
def get_neighbors_radius(nelx, nely, coord, connect, radius): """ Check neighboring elements that have the centroid within the predetermined radius. Args: nelx (:obj:`int`): Number of elements on the x axis. nely (:obj:`int`): Number of elements on the x axis coord (:obj:`numpy.array`): Coordinates of the element. connect (:obj:`numpy.array`): Element connectivity. radius (:obj:`float`): Radius to get elements in the vicinity of each element. Returns: neighbors, H, centroids """ el_number = nelx * nely centroids = np.empty((el_number, 2)) idx = connect[:, 1:] - 1 centroids[:, 0] = np.sum(coord[idx, 1], axis = 1)/4 centroids[:, 1] = np.sum(coord[idx, 2], axis = 1)/4 ind_rows = [] ind_cols = [] data = [] cols = 0 neighbors = [] for el in range(el_number): distance = np.sqrt(np.sum((centroids[el] - centroids)**2, axis=1)) mask = distance <= radius neighbor = mask.nonzero()[0] + 1 neighbors.extend(neighbor - 1) hi = radius - distance hi_max = np.maximum(0, hi) data.extend(hi_max[mask]) aux = len(hi_max[mask]) rows = np.repeat(el, aux) #.tolist() columns = np.arange(0, aux) ind_rows.extend(rows) ind_cols.extend(columns) if aux > cols: cols = aux H = csc_matrix((data, (ind_rows, ind_cols)), shape=(nelx*nely, cols)).toarray() neighbors = csc_matrix((neighbors, (ind_rows, ind_cols)), shape=(nelx*nely, cols), dtype='int').toarray() return neighbors, H, centroids
669e11d3a2890f1485e33e021b2671ff6f197c03
12,589
import copy def merge_with(obj, *sources, **kwargs): """ This method is like :func:`merge` except that it accepts customizer which is invoked to produce the merged values of the destination and source properties. If customizer returns ``None``, merging is handled by this method instead. The customizer is invoked with five arguments: ``(obj_value, src_value, key, obj, source)``. Args: obj (dict): Destination object to merge source(s) into. sources (dict): Source objects to merge from. subsequent sources overwrite previous ones. Keyword Args: iteratee (callable, optional): Iteratee function to handle merging (must be passed in as keyword argument). Returns: dict: Merged object. Warning: `obj` is modified in place. Example: >>> cbk = lambda obj_val, src_val: obj_val + src_val >>> obj1 = {'a': [1], 'b': [2]} >>> obj2 = {'a': [3], 'b': [4]} >>> res = merge_with(obj1, obj2, cbk) >>> obj1 == {'a': [1, 3], 'b': [2, 4]} True .. versionadded:: 4.0.0 .. versionchanged:: 4.9.3 Fixed regression in v4.8.0 that caused exception when `obj` was ``None``. """ if obj is None: return None sources = list(sources) iteratee = kwargs.pop("iteratee", None) if iteratee is None and sources and callable(sources[-1]): iteratee = sources.pop() sources = [copy.deepcopy(source) for source in sources] if callable(iteratee): iteratee = partial(callit, iteratee, argcount=getargcount(iteratee, maxargs=5)) else: iteratee = None return _merge_with(obj, *sources, iteratee=iteratee, **kwargs)
94a16ae7d3f3e73ef8e27b32cd38a09c61ad1b2b
12,590
def count_class_nbr_patent_cnt(base_data_list, calculate_type): """ 统计在所有数据中不同分类号对应的专利数量 :param base_data_list: :return: """ class_number_patent_cnt_dict = dict() for base_data in base_data_list: class_number_value = base_data[const.CLASS_NBR] calculate_class_number_patent_count_dict(class_number_value, class_number_patent_cnt_dict, calculate_type) return class_number_patent_cnt_dict
6dfe06c2233fbfafc8083dc968d32520564319f8
12,591
def plot_pta_L(df): """ INPUTS -df: pandas dataframe containing the data to plot OUTPUTS -saves pta graphs in .html """ title = generate_title_run_PTA(df, "Left Ear", df.index[0]) labels = {"title": title, "x": "Frequency (Hz)", "y": "Hearing Threshold (dB HL)"} fig = go.Figure() fig.update_layout(title=labels["title"], xaxis_title=labels["x"], yaxis_title=labels["y"], xaxis_type="log", xaxis_range=[np.log10(100), np.log10(20000)], yaxis_range=[80, -20], yaxis_dtick=10, xaxis_showline=True, xaxis_linecolor="black", yaxis_showline=True, yaxis_linecolor="black", yaxis_zeroline=True, yaxis_zerolinewidth=1, yaxis_zerolinecolor="black") x, y = data_to_plot_PTA(df, "LE_") fig.add_trace(go.Scatter(x=x, y=y, line_color="blue", mode='lines+markers', name=labels["title"], hovertemplate="%{x:1.0f} Hz<br>" + "%{y:1.0f} dB HL")) completed = save_graph_PTA(fig, df, "Left Ear") if completed is True: return True else: return False
ec82b58a6a476bee5e864b8678bb90d2998d4a02
12,592
def create_graph(edge_num: int, edge_list: list) -> dict: """ Create a graph expressed with adjacency list :dict_key : int (a vertex) :dict_value : set (consisted of vertices adjacent to key vertex) """ a_graph = {i: set() for i in range(edge_num)} for a, b in edge_list: a_graph[a - 1].add(b - 1) # All graphs always need this line a_graph[b - 1].add(a - 1) # Only undirected graph needs this line return a_graph
6ec1a71cf82a3a669090df42ac7d53e1286fda2d
12,593
def get_current_version() -> str: """Read the version of the package. See https://packaging.python.org/guides/single-sourcing-package-version """ version_exports = {} with open(VERSION_FILE) as file: exec(file.read(), version_exports) # pylint: disable=exec-used return version_exports["VERSION"]
c283d58881aa381503bb3500bd7d745f25df0f7e
12,595
import random def seed_story(text_dict): """Generate random seed for story.""" story_seed = random.choice(list(text_dict.keys())) return story_seed
0c0f41186f6eaab84a1d197e9335b4c28fd83785
12,596
def _get_rel_att_inputs(d_model, n_heads): # pylint: disable=invalid-name """Global relative attentions bias initialization shared across the layers.""" assert d_model % n_heads == 0 and d_model % 2 == 0 d_head = d_model // n_heads bias_initializer = init.RandomNormalInitializer(1e-6) context_bias_layer = core.Weights(bias_initializer, shape=(1, n_heads, 1, d_head)) location_bias_layer = core.Weights(bias_initializer, shape=(1, n_heads, 1, d_head)) return context_bias_layer, location_bias_layer
57f58f29a586571f1cc8fa1fc69956a4168cbf16
12,598
def two_time_pad(): """A one-time pad simply involves the xor of a message with a key to produce a ciphertext: c = m ^ k. It is essential that the key be as long as the message, or in other words that the key not be repeated for two distinct message blocks. Your task: In this problem you will break a cipher when the one-time pad is re-used. c_1 = 3801025f45561a49131a1e180702 c_2 = 07010051455001060e551c571106 These are two hex-encoded ciphertexts that were formed by applying a “one-time pad” to two different messages with the same key. Find the two corresponding messages m_1 and m_2. Okay, to make your search simpler, let me lay out a few ground rules. First, every character in the text is either a lowercase letter or a space, aside from perhaps the first character in the first message which might be capitalized. As a consequence, no punctuation appears in the messages. Second, the messages consist of English words in ASCII. Finally, all of the words within each message is guaranteed to come from the set of the 100 most common English words: https://en.wikipedia.org/wiki/Most_common_words_in_English. Returns: Output the concatenation of strings m_1 and m_2. (Don't worry if words get smashed together as a result.) """ c_1 = '3801025f45561a49131a1e180702' c_2 = '07010051455001060e551c571106' # converting the hexadecimal representaiton to integers for every 2 bytes since it xor operations become on integers c_1_int = [int(c_1[i] + c_1[i+1], 16) for i in range(0, len(c_1), 2)] c_2_int = [int(c_2[i] + c_2[i+1], 16) for i in range(0, len(c_1), 2)] xord = [c_1_int[i] ^ c_2_int[i] for i in range(len(c_1_int))] #xor of the two lists which are integer representations result = construct('',xord) if result == None: return None else: print(result) new_string = ''.join([chr(ord(result[i]) ^ xord[i]) for i in range(len(result))]) return new_string + result
d4c45312f32b372a065365c78a991969e2bc53be
12,599
def same_datatypes(lst): """ Überprüft für eine Liste, ob sie nur Daten vom selben Typ enthält. Dabei spielen Keys, Länge der Objekte etc. eine Rolle :param lst: Liste, die überprüft werden soll :type lst: list :return: Boolean, je nach Ausgang der Überprüfung """ datatype = type(lst[0]).__name__ for item in lst: if type(item).__name__ != datatype: # return False, wenn die Liste verschiedene Datentypen enthält return False # Datentypen sind gleich, aber sind deren Strukturen auch gleich? (für komplexe Datentypen) if datatype == "dict": keys = lst[0].keys() for item in lst: if item.keys() != keys: # return False, wenn die Keys der Dictionaries verschieden sind return False elif datatype == "list": if sum([len(x) for x in lst]) / len(lst) != len(lst[0]): # return False, falls die Listen in der Liste verschiedene Längen haben return False datatypes = list(map(lambda x: type(x).__name__, lst[0])) for item in lst: if list(map(lambda x: type(x).__name__, item)) != datatypes: # return False, falls die Elemente der inneren Listen verschiedene Datenytpen haben return False return True
9c49376ec34ed0970171597f77de4c4c224350b4
12,600
def _show_stat_wrapper_Progress(count, last_count, start_time, max_count, speed_calc_cycles, width, q, last_speed, prepend, show_stat_function, add_args, i, lock): """ calculate """ count_value, max_count_value, speed, tet, ttg, = Progress._calc(count, last_count, start_time, max_count, speed_calc_cycles, q, last_speed, lock) return show_stat_function(count_value, max_count_value, prepend, speed, tet, ttg, width, i, **add_args)
3c98f44acc8de94573ba37a7785df18fc8e72966
12,601
def _to_base58_string(prefixed_key: bytes): """ Convert prefixed_key bytes into Es/EC strings with a checksum :param prefixed_key: the EC private key or EC address prefixed with the appropriate bytes :return: a EC private key string or EC address """ prefix = prefixed_key[:PREFIX_LENGTH] assert prefix == ECAddress.PREFIX or prefix == ECPrivateKey.PREFIX, 'Invalid key prefix.' temp_hash = sha256(prefixed_key[:BODY_LENGTH]).digest() checksum = sha256(temp_hash).digest()[:CHECKSUM_LENGTH] return base58.encode(prefixed_key + checksum)
326580e714d6489a193347498c68ef9d90f6f651
12,602
def round_int(n, d): """Round a number (float/int) to the closest multiple of a divisor (int).""" return round(n / float(d)) * d
372c0f8845994aaa03f99ebb2f65243e6490b341
12,603
def merge_array_list(arg): """ Merge multiple arrays into a single array :param arg: lists :type arg: list :return: The final array :rtype: list """ # Check if arg is a list if type(arg) != list: raise errors.AnsibleFilterError('Invalid value type, should be array') final_list = [] for cur_list in arg: final_list += cur_list return final_list
649412488655542f27a1e7d377252c060107b57e
12,604
def load_callbacks(boot, bootstrap, jacknife, out, keras_verbose, patience): """ Specifies Keras callbacks, including checkpoints, early stopping, and reducing learning rate. Parameters ---------- boot bootstrap jacknife out keras_verbose patience batch_size Returns ------- checkpointer earlystop reducelr """ if bootstrap or jacknife: checkpointer = tf.keras.callbacks.ModelCheckpoint( filepath=out + "_boot" + str(boot) + "_weights.hdf5", verbose=keras_verbose, save_best_only=True, save_weights_only=True, monitor="val_loss", save_freq="epoch", ) else: checkpointer = tf.keras.callbacks.ModelCheckpoint( filepath=out + "_weights.hdf5", verbose=keras_verbose, save_best_only=True, save_weights_only=True, monitor="val_loss", save_freq="epoch", ) earlystop = tf.keras.callbacks.EarlyStopping( monitor="val_loss", min_delta=0, patience=patience ) reducelr = tf.keras.callbacks.ReduceLROnPlateau( monitor="val_loss", factor=0.5, patience=int(patience / 6), verbose=keras_verbose, mode="auto", min_delta=0, cooldown=0, min_lr=0, ) return checkpointer, earlystop, reducelr
0ca09ccaca4424c1a546caade3d809b7f69cbb5e
12,605
def build_sentence_model(cls, vocab_size, seq_length, tokens, transitions, num_classes, training_mode, ground_truth_transitions_visible, vs, initial_embeddings=None, project_embeddings=False, ss_mask_gen=None, ss_prob=0.0): """ Construct a classifier which makes use of some hard-stack model. Args: cls: Hard stack class to use (from e.g. `spinn.fat_stack`) vocab_size: seq_length: Length of each sequence provided to the stack model tokens: Theano batch (integer matrix), `batch_size * seq_length` transitions: Theano batch (integer matrix), `batch_size * seq_length` num_classes: Number of output classes training_mode: A Theano scalar indicating whether to act as a training model with dropout (1.0) or to act as an eval model with rescaling (0.0). ground_truth_transitions_visible: A Theano scalar. If set (1.0), allow the model access to ground truth transitions. This can be disabled at evaluation time to force Model 1 (or 2S) to evaluate in the Model 2 style with predicted transitions. Has no effect on Model 0. vs: Variable store. """ # Prepare layer which performs stack element composition. if cls is spinn.plain_rnn.RNN: if FLAGS.use_gru: compose_network = partial(util.GRULayer, initializer=util.HeKaimingInitializer()) else: compose_network = partial(util.LSTMLayer, initializer=util.HeKaimingInitializer()) embedding_projection_network = None elif cls is spinn.cbow.CBOW: compose_network = None embedding_projection_network = None else: if FLAGS.lstm_composition: if FLAGS.use_gru: compose_network = partial(util.TreeGRULayer, initializer=util.HeKaimingInitializer()) else: compose_network = partial(util.TreeLSTMLayer, initializer=util.HeKaimingInitializer()) else: assert not FLAGS.connect_tracking_comp, "Can only connect tracking and composition unit while using TreeLSTM" compose_network = partial(util.ReLULayer, initializer=util.HeKaimingInitializer()) if project_embeddings: embedding_projection_network = util.Linear else: assert FLAGS.word_embedding_dim == FLAGS.model_dim, \ "word_embedding_dim must equal model_dim unless a projection layer is used." embedding_projection_network = util.IdentityLayer # Build hard stack which scans over input sequence. sentence_model = cls( FLAGS.model_dim, FLAGS.word_embedding_dim, vocab_size, seq_length, compose_network, embedding_projection_network, training_mode, ground_truth_transitions_visible, vs, predict_use_cell=FLAGS.predict_use_cell, use_tracking_lstm=FLAGS.use_tracking_lstm, tracking_lstm_hidden_dim=FLAGS.tracking_lstm_hidden_dim, X=tokens, transitions=transitions, initial_embeddings=initial_embeddings, embedding_dropout_keep_rate=FLAGS.embedding_keep_rate, ss_mask_gen=ss_mask_gen, ss_prob=ss_prob, connect_tracking_comp=FLAGS.connect_tracking_comp, context_sensitive_shift=FLAGS.context_sensitive_shift, context_sensitive_use_relu=FLAGS.context_sensitive_use_relu, use_input_batch_norm=False) # Extract top element of final stack timestep. if FLAGS.lstm_composition or cls is spinn.plain_rnn.RNN: sentence_vector = sentence_model.final_representations[:,:FLAGS.model_dim / 2].reshape((-1, FLAGS.model_dim / 2)) sentence_vector_dim = FLAGS.model_dim / 2 else: sentence_vector = sentence_model.final_representations.reshape((-1, FLAGS.model_dim)) sentence_vector_dim = FLAGS.model_dim sentence_vector = util.BatchNorm(sentence_vector, sentence_vector_dim, vs, "sentence_vector", training_mode) sentence_vector = util.Dropout(sentence_vector, FLAGS.semantic_classifier_keep_rate, training_mode) # Feed forward through a single output layer logits = util.Linear( sentence_vector, sentence_vector_dim, num_classes, vs, name="semantic_classifier", use_bias=True) return sentence_model.transitions_pred, logits
085d6a0538bfa06a34c543c27efd651c4c46168a
12,606
def read_xls_as_dict(filename, header="top"): """ Read a xls file as dictionary. @param filename File name (*.xls or *.xlsx) @param header Header position. Options: "top", "left" @return Dictionary with header as key """ table = read_xls(filename) if (header == "top"): return read_table_header_top(table) elif (header == "left"): return read_table_header_left(table) else: return {}
9ed410e42a11ee898466bb2f36b6d02e051b21ec
12,607
def check_hostgroup(zapi, region_name, cluster_id): """check hostgroup from region name if exists :region_name: region name of hostgroup :returns: true or false """ return zapi.hostgroup.exists(name="Region [%s %s]" % (region_name, cluster_id))
b237b544ac59331ce94dd1ac471187a60d527a1b
12,608
import tempfile def matlab_to_tt(ttemps, eng, is_orth=True, backend="numpy", mode="l"): """Load matlab.object representing TTeMPS into Python as TT""" _, f = tempfile.mkstemp(suffix=".mat") eng.TTeMPS_to_Py(f, ttemps, nargout=0) tt = load_matlab_tt(f, is_orth=is_orth, mode=mode, backend=backend) return tt
e21087e2587368a55ece7a50f576573c5284373a
12,609
def encode_mecab(tagger, string): """ string을 mecab을 이용해서 형태소 분석 :param tagger: 형태소 분석기 객체 :param string: input text :return tokens: 형태소 분석 결과 :return indexs: 띄어쓰기 위치 """ string = string.strip() if len(string) == 0: return [], [] words = string.split() nodes = tagger.pos(" ".join(words)) tokens = [] for node in nodes: surface = node[0].strip() if 0 < len(surface): for s in surface.split(): # mecab 출력 중 '영치기 영차' 처리 tokens.append(s) indexs = [] index, start, end = -1, 0, 100000 for i, token in enumerate(tokens): # 분류가 잘 되었는지 검증 if end < len(words[index]): start = end end += len(token) else: index += 1 start = 0 end = len(token) indexs.append(i) # values 중 실제 시작 위치 기록 assert words[index][start:end] == token, f"{words[index][start:end]} != {token}" return tokens, indexs
847278728ebe7790d8aef2a125a420d5779adc6b
12,610
def nutrient_limited_growth(X,idx_A,idx_B,growth_rate,half_saturation): """ non-linear response with respect to *destination/predator* compartment Similar to holling_type_II and is a reparameterization of holling II. The response with respect to the origin compartment 'B' is approximately linear for small 'B' and converges towards an upper limit governed by the 'growth_rate' for large 'B'. For examples see: `Examples <https://gist.github.com/465b/cce390f58d64d70613a593c8038d4dc6>`_ Parameters ---------- X : np.array containing the current state of the contained quantity of each compartment idx_A : integer index of the element representing the destination/predator compartment idx_B : integer index of the element representing the origin/pray compartment growth_rate : float first parameter of the interaction. governs the upper limit of the response. half_saturation : float second parameter of the interaction. governs the slope of the response. Returns ------- df : float change in the origin and destitnation compartment. Calculated by consumption_rate = ((hunting_rate * origin_compartment) / (1 + hunting_rate * food_processing_time * origin_compartment)) * destination_compartment """ A = X[idx_A] # quantity of compartment A (predator/consumer) B = X[idx_B] # quantity of compartment B (prey/nutrient) df = growth_rate*(B/(half_saturation+B))*A return df
05e66a0e426a404a5356f04f8568ab23548b6dbe
12,611
def aes128_decrypt(AES_KEY, _data): """ AES 128 位解密 :param requestData: :return: """ # 秘钥实例 newAes = getAesByKey(AES_KEY) # 解密 data = newAes.decrypt(_data) rawDataLength = len(data) # 剔除掉数据后面的补齐位 paddingNum = ord(data[rawDataLength - 1]) if paddingNum > 0 and paddingNum <= 16: data = data[0:(rawDataLength - paddingNum)] return data
520c03a509f63807a62ccb0385e99bc9b674fd67
12,612
def human_readable_size(size, decimals=1): """Transform size in bytes into human readable text.""" for unit in ["B", "KB", "MB", "GB", "TB"]: if size < 1000: break size /= 1000 return f"{size:.{decimals}f} {unit}"
5fb0dc79162d0bc0a945061aa0889735b24fff7b
12,613
def generichash_blake2b_final(statebuf, digest_size): """Finalize the blake2b hash state and return the digest. :param statebuf: :type statebuf: bytes :param digest_size: :type digest_size: int :return: the blake2 digest of the passed-in data stream :rtype: bytes """ _digest = ffi.new("unsigned char[]", crypto_generichash_BYTES_MAX) rc = lib.crypto_generichash_blake2b_final(statebuf, _digest, digest_size) ensure(rc == 0, 'Unexpected failure', raising=exc.RuntimeError) return ffi.buffer(_digest, digest_size)[:]
a81da8346bafb2f7d8fd40b0d9ff204689d002f8
12,614
def walker_input_formatter(t, obs): """ This function formats the data to give as input to the controller :param t: :param obs: :return: None """ return obs
651038cd4dc0e8c8ccb89a10a5b20f6031e17ba8
12,615
def build_url_base(url): """Normalize and build the final url :param url: The given url :type url: str :return: The final url :rtype: str """ normalize = normalize_url(url=url) final_url = "{url}/api".format(url=normalize) return final_url
a500a6d96ab637182abab966817209324ddc670a
12,616
from typing import Tuple from typing import List def build_decoder( latent_dim: int, input_shape: Tuple, encoder_shape: Tuple, filters: List[int], kernels: List[Tuple[int, int]], strides: List[int] ) -> Model: """Return decoder model. Parameters ---------- latent_dim:int, Size of the latent vector. encoder_shape:Tuple, Output shape of the last convolutional layer of the encoder model. filters:List[int], List of filters for the convolutional layer. kernels:List[Tuple[int, int]], List of kernel sizes for the convolutional layer. strides:List[int] List of strides for the convolutional layer. """ decoder_input = Input( shape=(latent_dim,), name='decoder_input' ) x = Dense(np.prod(encoder_shape))(decoder_input) x = Reshape(encoder_shape)(x) x = decoder_blocks( x, reversed(filters), reversed(kernels), reversed(strides) ) decoder_output = Conv2DTranspose( filters=1, kernel_size=kernels[0], activation=axis_softmax, padding='same', name='decoder_output' )(x) reshape = Reshape(input_shape)(decoder_output) # Instantiate Decoder Model return Model( decoder_input, reshape, name='decoder' )
efdac0fa9df81249e531b2568cd8f91816c209a6
12,617
def execute_with_python_values(executable, arguments=(), backend=None): """Execute on one replica with Python values as arguments and output.""" backend = backend or get_local_backend() def put(arg): return Buffer.from_pyval( arg, device=executable.DeviceOrdinals()[0], backend=backend) arguments = [put(arg) for arg in arguments] return executable.Execute(arguments).to_py()
26c9352feb2e5c7e6fb46702105245f582218e91
12,618
def _get_label_members(X, labels, cluster): """ Helper function to get samples of a specified cluster. Args: X (np.ndarray): ndarray with dimensions [n_samples, n_features] data to check validity of clustering labels (np.array): clustering assignments for data X cluster (int): cluster of interest Returns: members (np.ndarray) array of dimensions (n_samples, n_features) of samples of the specified cluster. """ indices = np.where(labels == cluster)[0] members = X[indices] return members
18c213f88816108f93ddd38cdd2c934f431ea35a
12,620
from typing import Tuple def get_spectrum_by_close_values( mz: list, it: list, left_border: float, right_border: float, *, eps: float = 0.0 ) -> Tuple[list, list, int, int]: """int Function to get segment of spectrum by left and right border :param mz: m/z array :param it: it intensities :param left_border: left border :param right_border: right border :param eps: epsilon to provide regulation of borders :return: closest to left and right border values of spectrum, left and right """ mz, it = mz.copy(), it.copy() left = bisect_left(mz, left_border - eps) right = bisect_right(mz, right_border + eps) return mz[left:right].copy(), it[left:right].copy(), left, right
0ec34b044b9105fe1c232baa9b51760cbb96b9d9
12,621
def refresh_wrapper(trynum, maxtries, *args, **kwargs): """A @retry argmod_func to refresh a Wrapper, which must be the first arg. When using @retry to decorate a method which modifies a Wrapper, a common cause of retry is etag mismatch. In this case, the retry should refresh the wrapper before attempting the modifications again. This method may be passed to @retry's argmod_func argument to effect such a refresh. Note that the decorated method must be defined such that the wrapper is its first argument. """ arglist = list(args) # If we get here, we *usually* have an etag mismatch, so specifying # use_etag=False *should* be redundant. However, for scenarios where we're # retrying for some other reason, we want to guarantee a fresh fetch to # obliterate any local changes we made to the wrapper (because the retry # should be making those changes again). arglist[0] = arglist[0].refresh(use_etag=False) return arglist, kwargs
089b859964e89d54def0058abc9cc7536f5d8877
12,623
def compute_frames_per_animation( attacks_per_second: float, base_animation_length: int, speed_coefficient: float = 1.0, engine_tick_rate: int = 60, is_channeling: bool = False) -> int: """Calculates frames per animation needed to resolve a certain ability at attacks_per_second. Args: attacks_per_second: attacks per second of character base_animation_length: animation length of ability speed_coefficient: speed-up scalar of ability engine_tick_rate: server tick rate is_channeling: whether or not the ability is a channeling skill Returns: int: number of frames one casts needs to resolve for """ _coeff = engine_tick_rate / (attacks_per_second * speed_coefficient) if is_channeling: return np.floor(_coeff) else: return np.ceil((base_animation_length - 1) / base_animation_length * _coeff)
44427cf28152de21de42f0220e75f87717235275
12,624
def pad_rect(rect, move): """Returns padded rectangles given specified padding""" if rect['dx'] > 2: rect['x'] += move[0] rect['dx'] -= 1*move[0] if rect['dy'] > 2: rect['y'] += move[1] rect['dy'] -= 1*move[1] return rect
48bdbdc9d4736e372afc983ab5966fc80a221d4d
12,625
import asyncio async def yes_no(ctx: commands.Context, message: str="Are you sure? Type **yes** within 10 seconds to confirm. o.o"): """Yes no helper. Ask a confirmation message with a timeout of 10 seconds. ctx - The context in which the question is being asked. message - Optional messsage that the question should ask. """ await ctx.send(message) try: message = await ctx.bot.wait_for("message", timeout=10, check=lambda message: message.author == ctx.message.author) except asyncio.TimeoutError: await ctx.send("Timed out waiting. :<") return False if message.clean_content.lower() not in ["yes", "y"]: await ctx.send("Command cancelled. :<") return False return True
2b8ab0bfc51d4be68a42507bad6dbb945465d2e4
12,626
def __validation(size: int, it1: int, it2: int, it3: int, it4: int) -> bool: """ Проверка на корректность тура size: размер маршрута it1, it2, it3, it4: индексы городов: t1, t2i, t2i+1, t2i+2 return: корректен или нет """ return between(size, it1, it3, it4) and between(size, it4, it2, it1)
5fcb29f45c456115e8b87f0313e05f327c702849
12,627
def get_type_associations(base_type, generic_base_type): # type: (t.Type[TType], t.Type[TValue]) -> t.List[t.Tuple[t.Type[TValue], t.Type[TType]]] """Create and return a list of tuples associating generic_base_type derived types with a corresponding base_type derived type.""" return [item for item in [(get_generic_type(sc_type, generic_base_type), sc_type) for sc_type in get_subclasses(base_type)] if item[1]]
fe18bf72a96d6dfa8fad2c625732e781d54cae4d
12,628
import rpy2.robjects as robj from rpy2.robjects.packages import importr import anndata2ri def identify_empty_droplets(data, min_cells=3, **kw): """Detect empty droplets using DropletUtils """ importr("DropletUtils") adata = data.copy() col_sum = adata.X.sum(0) if hasattr(col_sum, 'A'): col_sum = col_sum.A.squeeze() keep = col_sum > min_cells adata = adata[:,keep] #adata.X = adata.X.tocsc() anndata2ri.activate() robj.globalenv["X"] = adata res = robj.r('res <- emptyDrops(assay(X))') anndata2ri.deactivate() keep = res.loc[res.FDR<0.01,:] data = data[keep.index,:] data.obs['empty_FDR'] = keep['FDR'] return data
9c2d532d75afb6044836249eb525e86c60511c9b
12,629
def catalog_category_RSS(category_id): """ Return an RSS feed containing all items in the specified category_id """ items = session.query(Item).filter_by( category_id=category_id).all() doc = jaxml.XML_document() doc.category(str(category_id)) for item in items: doc._push() doc.item() doc.id(item.id) doc.name(item.name) doc.description(item.description) doc.imagepath('"' + item.image + '"') doc.category_id(item.category_id) doc.user_id(item.user_id) doc._pop() return doc.__repr__()
13554cf1eba3a83c0fb23a6f848751721579dfea
12,630
def get_caller_name(N=0, allow_genexpr=True): """ get the name of the function that called you Args: N (int): (defaults to 0) number of levels up in the stack allow_genexpr (bool): (default = True) Returns: str: a function name CommandLine: python -m utool.util_dbg get_caller_name python -m utool get_caller_name python ~/code/utool/utool/__main__.py get_caller_name python ~/code/utool/utool/__init__.py get_caller_name python ~/code/utool/utool/util_dbg.py get_caller_name Example: >>> # ENABLE_DOCTEST >>> from utool.util_dbg import * # NOQA >>> import utool as ut >>> N = list(range(0, 13)) >>> allow_genexpr = True >>> caller_name = get_caller_name(N, allow_genexpr) >>> print(caller_name) """ if isinstance(N, (list, tuple, range)): name_list = [] for N_ in N: try: name_list.append(get_caller_name(N_)) except AssertionError: name_list.append('X') return '[' + ']['.join(name_list) + ']' parent_frame = get_stack_frame(N=N + 2) caller_name = parent_frame.f_code.co_name co_filename = parent_frame.f_code.co_filename if not allow_genexpr: count = 0 while True: count += 1 if caller_name == '<genexpr>': parent_frame = get_stack_frame(N=N + 1 + count) caller_name = parent_frame.f_code.co_name else: break #try: # if 'func' in parent_frame.f_locals: # caller_name += '(' + meta_util_six.get_funcname(parent_frame.f_locals['func']) + ')' #except Exception: # pass if caller_name == '<module>': # Make the caller name the filename caller_name = splitext(split(co_filename)[1])[0] if caller_name in {'__init__', '__main__'}: # Make the caller name the filename caller_name = basename(dirname(co_filename)) + '.' + caller_name return caller_name
6c6ce7690d1bc4bd51037056e27f5dbd73085e29
12,631
from typing import Any def create_user( *, db: Session = Depends(deps.get_db), user_in: schema_in.UserCreateIn, ) -> Any: """ Create new user. """ new_user = User(**{k: v for k, v in user_in.dict().items() if k != 'password'}) new_user.hashed_password = get_password_hash(user_in.password) new_user.gid = -1 try: db.add(new_user) db.commit() except IntegrityError: db.rollback() raise HTTPException( status_code=400, detail="The user with this username already exists in the system.", ) new_role = Role(uid=new_user.uid, nickname=new_user.nickname, avatar=new_user.avatar, gid=-1) new_role.reset() db.add(new_role) db.commit() return GameEnum.OK.digest()
cd8c3036026639d3e29e6dc030335f328d11c144
12,634
import math def number_format(interp, num_args, number, decimals=0, dec_point='.', thousands_sep=','): """Format a number with grouped thousands.""" if num_args == 3: return interp.space.w_False ino = int(number) dec = abs(number - ino) rest = "" if decimals == 0 and dec >= 0.5: if number > 0: ino += 1 else: ino -= 1 elif decimals > 0: s_dec = str(dec) if decimals + 2 < len(s_dec): if ord(s_dec[decimals + 2]) >= ord('5'): dec += math.pow(10, -decimals) if dec >= 1: if number > 0: ino += 1 else: ino -= 1 rest = "0" * decimals else: s_dec = str(dec) if not rest: rest = s_dec[2:decimals + 2] else: rest = s_dec[2:] + "0" * (decimals - len(s_dec) + 2) s = str(ino) res = [] i = 0 while i < len(s): res.append(s[i]) if s[i] != '-' and i != len(s) - 1 and (len(s) - i - 1) % 3 == 0: for item in thousands_sep: res.append(item) i += 1 if decimals > 0: for item in dec_point: res.append(item) return interp.space.wrap("".join(res) + rest)
9d5ab0b9ed5dd6054ce4f356e6811c1b155e2062
12,635
from typing import Optional def _serialization_expr(value_expr: str, a_type: mapry.Type, py: mapry.Py) -> Optional[str]: """ Generate the expression of the serialization of the given value. If no serialization expression can be generated (e.g., in case of nested structures such as arrays and maps), None is returned. :param value_expr: Python expression of the value to be serialized :param a_type: the mapry type of the value :param py: Python settings :return: generated expression, or None if not possible """ result = None # type: Optional[str] if isinstance(a_type, (mapry.Boolean, mapry.Integer, mapry.Float, mapry.String)): result = value_expr elif isinstance(a_type, mapry.Path): if py.path_as == 'str': result = value_expr elif py.path_as == 'pathlib.Path': result = 'str({})'.format(value_expr) else: raise NotImplementedError( "Unhandled path_as: {}".format(py.path_as)) elif isinstance(a_type, (mapry.Date, mapry.Datetime, mapry.Time)): result = '{value_expr}.strftime({dt_format!r})'.format( value_expr=value_expr, dt_format=a_type.format) elif isinstance(a_type, mapry.TimeZone): if py.timezone_as == 'str': result = value_expr elif py.timezone_as == 'pytz.timezone': result = 'str({})'.format(value_expr) else: raise NotImplementedError( 'Unhandled timezone_as: {}'.format(py.timezone_as)) elif isinstance(a_type, mapry.Duration): result = '_duration_to_string({})'.format(value_expr) elif isinstance(a_type, mapry.Array): result = None elif isinstance(a_type, mapry.Map): result = None elif isinstance(a_type, mapry.Class): result = "{}.id".format(value_expr) elif isinstance(a_type, mapry.Embed): result = "serialize_{}({})".format( mapry.py.naming.as_variable(a_type.name), value_expr) else: raise NotImplementedError( "Unhandled serialization expression of type: {}".format(a_type)) return result
5920a4c10dabe2ff061a1f141cd9c9f10faebafa
12,636
def get_init_hash(): """ 获得一个初始、空哈希值 """ return imagehash.ImageHash(np.zeros([8, 8]).astype(bool))
cd4665e6b5cdf232883093dab660aafcc2109a44
12,637
def get_vertex_between_points(point1, point2, at_distance): """Returns vertex between point1 and point2 at a distance from point1. Args: point1: First vertex having tuple (x,y) co-ordinates. point2: Second vertex having tuple (x,y) co-ordinates. at_distance: A distance at which to locate the vertex on the line joining point1 and point2. Returns: A Point object. """ line = LineString([point1, point2]) new_point = line.interpolate(at_distance) return new_point
acb5cd76ef7dd3a16592c5fbaf74d6d777ab338c
12,638
def disable_cache(response: Response) -> Response: """Prevents cached responses""" response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate, public, max-age=0" response.headers["Expires"] = 0 response.headers["Pragma"] = "no-cache" return response
6f63c7e93a7c354c85171652dca51162e15b7137
12,639
def get_dir(src_point, rot_rad): """Rotate the point by `rot_rad` degree.""" sn, cs = np.sin(rot_rad), np.cos(rot_rad) src_result = [0, 0] src_result[0] = src_point[0] * cs - src_point[1] * sn src_result[1] = src_point[0] * sn + src_point[1] * cs return src_result
40b36671c50a6b6b8905eca9915901cd613c2aaa
12,640
def sparse_gauss_seidel(A,b,maxiters=100,tol=1e-8): """Returns the solution to the system Ax = b using the Gauss-Seidel method. Inputs: A (array) - 2D scipy.sparse matrix b (array) - 1D NumPy array maxiters (int, optional) - maximum iterations for algorithm to perform. tol (float) - tolerance for convergence Returns: x (array) - solution to system Ax = b. x_approx (list) - list of approximations at each iteration. """ if type(A) != spar.csr_matrix: A = spar.csr_matrix(A) n = A.shape[0] x0 = np.zeros(n) x = np.ones(n) x_approx = [] for k in xrange(maxiters): x = x0.copy() diag = A.diagonal() for i in xrange(n): rowstart = A.indptr[i] rowend = A.indptr[i+1] Aix = np.dot(A.data[rowstart:rowend], x[A.indices[rowstart:rowend]]) x[i] += (b[i] - Aix)/diag[i] if np.max(np.abs(x0-x)) < tol: return x, x_approx x0 = x x_approx.append(x) print "Maxiters hit!" return x, x_approx
139fefa8e45d14f32ea9bb4dd25df03762737090
12,641
def delete_user(user_id): """Delete user from Users database and their permissions from SurveyPermissions and ReportPermissions. :Route: /api/user/<int:user_id> :Methods: DELETE :Roles: s :param user_id: user id :return dict: {"delete": user_id} """ user = database.get_user(user_id) database.delete_user(user) return {"delete": user.id}
c8f86dc20db67a5e3511e082f6308903b1acdaa2
12,642
def selectTopFive(sortedList): """ 从sortedList中选出前五,返回对应的名字与commit数量列成的列表 :param sortedList:按值从大到小进行排序的authorDict :return:size -- [commit数量] labels -- [名字] """ size = [] labels = [] for i in range(5): labels.append(sortedList[i][0]) size.append(sortedList[i][1]) return size, labels
747ad379ed73aeb6ccb48487b48dc6150350204e
12,643
def get_license(file): """Returns the license from the input file. """ # Collect the license lic = '' for line in file: if line.startswith('#include') or line.startswith('#ifndef'): break else: lic += line return lic
126fff2dd0464ef1987f3ab672f6b36b8fa962f7
12,644
def quote_query_string(chars): """ Multibyte charactor string is quoted by double quote. Because english analyzer of Elasticsearch decomposes multibyte character strings with OR expression. e.g. 神保町 -> 神 OR 保 OR 町 "神保町"-> 神保町 """ if not isinstance(chars, unicode): chars = chars.decode('utf-8') token = u'' qs = u'' in_escape = False in_quote = False in_token = False for c in chars: # backslash escape if in_escape: token += c in_escape = False continue if c == u'\\': token += c in_escape = True continue # quote if c != u'"' and in_quote: token += c continue if c == u'"' and in_quote: token += c qs += token token = u'' in_quote = False continue # otherwise: not in_quote if _is_delimiter(c) or c == u'"': if in_token: qs += _quote_token(token) token = u'' in_token = False if c == u'"': token += c in_quote = True else: qs += c continue # otherwise: not _is_delimiter(c) token += c in_token = True if token: qs += _quote_token(token) return qs
8d1888df17a617d42e6a0d1b909e08e4f84fa4c9
12,645
def copy_params(params: ParamsDict) -> ParamsDict: """copy a parameter dictionary Args: params: the parameter dictionary to copy Returns: the copied parameter dictionary Note: this copy function works recursively on all subdictionaries of the params dictionary but does NOT copy any non-dictionary values. """ validate_params(params) params = {**params} if all(isinstance(v, dict) for v in params.values()): return {k: copy_params(params[k]) for k in params} return params
8248b31698f6b51103dc34bad7b13373591b10cd
12,646
def watch_list(request): """ Get watchlist or create a watchlist, or delete from watchlist :param request: :return: """ if request.method == 'GET': watchlist = WatchList.objects.filter(user=request.user) serializer = WatchListSerializer(watchlist, many=True) return Response(data=serializer.data, status=status.HTTP_200_OK) elif request.method == 'POST': movie_id = request.data.get('movie_id') if movie_id is not None: # check if movie is in db try: movie = Movie_Collected.objects.get(pk=movie_id) watchlist = WatchList.objects.filter(user=request.user, movie=movie).exists() if watchlist: message = {"error": "Movie already in watchlist"} return Response(data=message, status=status.HTTP_400_BAD_REQUEST) else: watchlist = WatchList.objects.create(user=request.user, movie=movie) serializer = WatchListSerializer(watchlist) return Response(data=serializer.data, status=status.HTTP_201_CREATED) except Movie_Collected.DoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) else: message = {'error': 'Movie id is required'} return Response(data=message, status=status.HTTP_400_BAD_REQUEST) elif request.method == 'DELETE': movie_id = request.data.get('movie_id') if movie_id is not None: try: movie = Movie_Collected.objects.get(pk=movie_id) WatchList.objects.filter(user=request.user, movie=movie).delete() return Response(status=status.HTTP_204_NO_CONTENT) except Movie_Collected.DoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) else: message = {'error': 'Movie id is required'} return Response(data=message, status=status.HTTP_400_BAD_REQUEST)
c92d39da05546fea9330ffe44cea5dd0c30f6427
12,647
async def user_has_pl(api, room_id, mxid, pl=100): """ Determine if a user is admin in a given room. """ pls = await api.get_power_levels(room_id) users = pls["users"] user_pl = users.get(mxid, 0) return user_pl == pl
5678af17469202e0b0a0232e066e7ed5c8212ee6
12,648
import cgi from typing import Optional def get_cgi_parameter_bool_or_default(form: cgi.FieldStorage, key: str, default: bool = None) -> Optional[bool]: """ Extracts a boolean parameter from a CGI form (``"1"`` = ``True``, other string = ``False``, absent/zero-length string = default value). """ s = get_cgi_parameter_str(form, key) if s is None or len(s) == 0: return default return is_1(s)
905dfa96628414e3b076fd3345113588f3f6ef08
12,649
def loss_function_1(y_true, y_pred): """ Probabilistic output loss """ a = tf.clip_by_value(y_pred, 1e-20, 1) b = tf.clip_by_value(tf.subtract(1.0, y_pred), 1e-20, 1) cross_entropy = - tf.multiply(y_true, tf.log(a)) - tf.multiply(tf.subtract(1.0, y_true), tf.log(b)) cross_entropy = tf.reduce_mean(cross_entropy, 0) loss = tf.reduce_mean(cross_entropy) return loss
8426ef13bd56fa3ff11226556d37bf738333a165
12,650
def sanitize_for_json(tag): """eugh the tags text is in comment strings""" return tag.text.replace('<!--', '').replace('-->', '')
211c07864af825ad29dfc806844927db977e6ce0
12,651
def load_data_and_labels(dataset_name): """ Loads MR polarity data from files, splits the data into words and generates labels. Returns split sentences and labels. """ for i in [1]: # Load data from files positive_examples = list(open('data/'+str(dataset_name)+'/'+str(dataset_name)+'.pos',encoding="utf-8").readlines()) # positive_examples = positive_examples[0:1000] positive_examples = [s.strip() for s in positive_examples] negative_examples = list(open('data/'+str(dataset_name)+'/'+str(dataset_name)+'.neg',encoding="utf-8").readlines()) # negative_examples = negative_examples[0:1000] negative_examples = [s.strip() for s in negative_examples] # Split by words x_text = positive_examples + negative_examples x_text = [clean_str(sent) for sent in x_text] x_text = [s.split(" ") for s in x_text] # Generate labels positive_labels = [[0, 1] for _ in positive_examples] negative_labels = [[1, 0] for _ in negative_examples] y = np.concatenate([positive_labels, negative_labels], 0) return [x_text, y]
d753494f3a614850c07f40230c3373eab13b0c6b
12,652
def tileswrap(ihtORsize, numtilings, floats, wrapwidths, ints=[], readonly=False): """Returns num-tilings tile indices corresponding to the floats and ints, wrapping some floats""" qfloats = [floor(f * numtilings) for f in floats] Tiles = [] for tiling in range(numtilings): tilingX2 = tiling * 2 coords = [tiling] b = tiling for q, width in zip_longest(qfloats, wrapwidths): c = (q + b % numtilings) // numtilings coords.append(c % width if width else c) b += tilingX2 coords.extend(ints) Tiles.append(hashcoords(coords, ihtORsize, readonly)) return Tiles
e9a9dc439307fc114c9abc939f642ea411acd26e
12,653
def coerce(data, egdata): """Coerce a python object to another type using the AE coercers""" pdata = pack(data) pegdata = pack(egdata) pdata = pdata.AECoerceDesc(pegdata.type) return unpack(pdata)
dc7499530b77a25c8b51537e2e21115d3ce3ccee
12,654
from typing import Union from typing import List def _write_mkaero1(model: Union[BDF, OP2Geom], name: str, mkaero1s: List[MKAERO1], ncards: int, op2_file, op2_ascii, endian: bytes, nastran_format: str='nx') -> int: """writes the MKAERO1 data = (1.3, -1, -1, -1, -1, -1, -1, -1, 0.03, 0.04, 0.05, -1, -1, -1, -1, -1) """ key = (3802, 38, 271) makero1s_temp = [] makero1s_final = [] for mkaero in mkaero1s: nmachs = len(mkaero.machs) nkfreqs = len(mkaero.reduced_freqs) assert nmachs > 0, mkaero assert nkfreqs > 0, mkaero if nmachs <= 8 and nkfreqs <= 8: # no splitting required makero1s_final.append((mkaero.machs, mkaero.reduced_freqs)) elif nmachs <= 8 or nkfreqs <= 8: # one of machs or kfreqs < 8 makero1s_temp.append((mkaero.machs, mkaero.reduced_freqs)) else: # both machs and kfreqs > 8 nloops_mach = int(np.ceil(nmachs/8)) for i in range(nloops_mach): machs_temp = _makero_temp(mkaero.machs, i, nloops_mach) assert len(machs_temp) > 0, (i, nloops_mach, machs_temp) makero1s_temp.append((machs_temp, mkaero.reduced_freqs)) for (machs, reduced_freqs) in makero1s_temp: nmachs = len(machs) nkfreqs = len(reduced_freqs) assert nmachs > 0, nmachs assert nkfreqs > 0, nkfreqs if nmachs <= 8 and nkfreqs <= 8: # pragma: no cover raise RuntimeError(f'this should never happen...nmachs={nmachs} knfreqs={nkfreqs}') if nmachs <= 8: # nkfreqs > 8 nloops = int(np.ceil(nkfreqs/8)) for i in range(nloops): reduced_freqs_temp = _makero_temp(reduced_freqs, i, nloops) makero1s_final.append((machs, reduced_freqs_temp)) elif nkfreqs <= 8: # nmachs > 8 nloops = int(np.ceil(nmachs/8)) for i in range(nloops): machs_temp = _makero_temp(machs, i, nloops) assert len(machs_temp) > 0, (i, nloops_mach, machs_temp) makero1s_final.append((machs_temp, reduced_freqs)) else: # pragma: no cover raise RuntimeError(f'this should never happen...nmachs={nmachs} knfreqs={nkfreqs}') #raise RuntimeError((nmachs, nkfreqs)) ncards = len(makero1s_final) nfields = 16 nbytes = write_header(name, nfields, ncards, key, op2_file, op2_ascii) for machs, reduced_freqs in makero1s_final: data = [] nmachs = len(machs) nkfreqs = len(reduced_freqs) assert nmachs > 0, machs assert nkfreqs > 0, reduced_freqs nint_mach = 8 - nmachs nint_kfreq = 8 - nkfreqs fmt1 = b'%if' % nmachs + b'i' * nint_mach fmt2 = b'%if' % nkfreqs + b'i' * nint_kfreq spack = Struct(endian + fmt1 + fmt2) data.extend(machs.tolist()) assert nint_mach < 8, nint_mach if nint_mach: data.extend([-1]*nint_mach) data.extend(reduced_freqs.tolist()) if nint_kfreq: data.extend([-1]*nint_kfreq) op2_ascii.write(f' mkaero1 data={data}\n') op2_file.write(spack.pack(*data)) return nbytes
ad45ec25989714685a6a2b2e61d6833a9ab56a6d
12,656
def _mesh_obj_large(): """build a large, random mesh model/dataset""" n_tri, n_pts = 400, 1000 node = np.random.randn(n_pts, 2) element = np.array([np.random.permutation(n_pts)[:3] for _ in range(n_tri)]) perm = np.random.randn(n_tri) np.random.seed(0) el_pos = np.random.permutation(n_pts)[:16] return PyEITMesh(node=node, element=element, perm=perm, el_pos=el_pos, ref_node=0)
c2db6a3484dc4923d92519488d0b10d7a7cd75bb
12,657
def cursor(): """Return a database cursor.""" return util.get_dbconn("mesosite").cursor()
516cf2a1716204487dd4cff4f063397365b21fa1
12,658
def custom_field_check(issue_in, attrib, name=None): """ This method allows the user to get in the comments customfiled that are not common to all the project, in case the customfiled does not existe the method returns an empty string. """ if hasattr(issue_in.fields, attrib): value = str(eval('issue_in.fields.%s'%str(attrib))) if name != None: return str("%s : %s"%(name,value)) else: return str(value) else: return str("")
d9c051fa922f34242d3b5e94e8534b4dc8038f19
12,659
def header(text, color='black', gen_text=None): """Create an HTML header""" if gen_text: raw_html = f'<h1 style="margin-top:16px;color: {color};font-size:54px"><center>' + str( text) + '<span style="color: red">' + str(gen_text) + '</center></h1>' else: raw_html = f'<h1 style="margin-top:12px;color: {color};font-size:54px"><center>' + str( text) + '</center></h1>' return raw_html
646b0a16b35cd4350feadd75674eea6ab6da6404
12,660
def block_pose(detection, block_size=0.05): # type: (AprilTagDetection, float) -> PoseStamped """Given a tag detection (id == 0), return the block's pose. The block pose has the same orientation as the tag detection, but it's position is translated to be at the cube's center. Args: detection: The AprilTagDetection. block_size: The block's side length in meters. """ transform = tf.transformations.concatenate_matrices( tf.transformations.translation_matrix( [detection.pose.pose.position.x, detection.pose.pose.position.y, detection.pose.pose.position.z] ), tf.transformations.quaternion_matrix( [detection.pose.pose.orientation.x, detection.pose.pose.orientation.y, detection.pose.pose.orientation.z, detection.pose.pose.orientation.w] ), tf.transformations.translation_matrix( [0, 0, -block_size / 2] ) ) t = tf.transformations.translation_from_matrix(transform) q = tf.transformations.quaternion_from_matrix(transform) ps = PoseStamped() ps.header.frame_id = detection.pose.header.frame_id ps.header.stamp = detection.pose.header.stamp ps.pose.position = Point(*t) ps.pose.orientation = Quaternion(*q) return ps
da6ee3bb1bf8a071ea5859d17dcad07ecd8781a3
12,661
async def batch_omim_similarity( data: models.POST_OMIM_Batch, method: str = 'graphic', combine: str = 'funSimAvg', kind: str = 'omim' ) -> dict: """ Similarity score between one HPOSet and several OMIM Diseases """ other_sets = [] for other in data.omim_diseases: try: disease = Omim.get(other) hpos = ','.join([str(x) for x in disease.hpo]) except KeyError: hpos = '' other_sets.append( models.POST_HPOSet( set2=hpos, name=other ) ) res = await terms.batch_similarity( data=models.POST_Batch( set1=data.set1, other_sets=other_sets ), method=method, combine=combine, kind=kind ) return res
2da8dc25d867f132ec0f571b4d9dff3d7de38c21
12,662
def vector(*, unit: _Union[_cpp.Unit, str, None] = default_unit, value: _Union[_np.ndarray, list]): """Constructs a zero dimensional :class:`Variable` holding a single length-3 vector. :param value: Initial value, a list or 1-D numpy array. :param unit: Optional, unit. Default=dimensionless :returns: A scalar (zero-dimensional) Variable. :seealso: :py:func:`scipp.vectors` """ return _cpp.vectors(dims=[], unit=unit, values=value)
dda09f89ba00ffab789c7ed9f6f6713a45c9bd03
12,663
import laspy def read_lidar(filename, **kwargs): """Read a LAS file. Args: filename (str): Path to a LAS file. Returns: LasData: The LasData object return by laspy.read. """ try: except ImportError: print( "The laspy package is required for this function. Use pip install laspy to install it." ) return return laspy.read(filename, **kwargs)
5336c34223216d4a1857cc5c858ccca704508e22
12,664
def get_gene_starting_with(gene_symbol: str, verbose: bool = True): """ get the genes that start with the symbol given Args: - gene_symbol: str - verbose: bool Returns: - list of str - None """ gene_symbol = gene_symbol.strip().upper() ext = "search/symbol/{}*".format(gene_symbol) data = get_api_response("{}/{}".format(URL, ext)) res = data["response"]["docs"] if res == []: if verbose: print("No gene found starting with {}".format(gene_symbol)) return else: gene_symbols = [res[i]["symbol"] for i in range(len(res))] if verbose: print("Found these genes starting with {}:".format(gene_symbol)) for symbol in gene_symbols: print(symbol) return gene_symbols
c0f092a93d44dd264f6b251ff3eba565b29abda0
12,665
import time def gen_timestamp(): """ Generates a unique (let's hope!), whole-number, unix-time timestamp. """ return int(time() * 1e6)
cb044e7428c062660eb998856245d4cd2c692a7e
12,666
def learningCurve(X, y, Xval, yval, Lambda): """returns the train and cross validation set errors for a learning curve. In particular, it returns two vectors of the same length - error_train and error_val. Then, error_train(i) contains the training error for i examples (and similarly for error_val(i)). In this function, you will compute the train and test errors for dataset sizes from 1 up to m. In practice, when working with larger datasets, you might want to do this in larger intervals. """ # Number of training examples m, _ = X.shape # You need to return these values correctly error_train = np.zeros(m) error_val = np.zeros(m) for i in range(m): theta = trainLinearReg(X[:i + 1], y[:i + 1], Lambda) error_train[i], _ = linearRegCostFunction(X[:i + 1], y[:i + 1], theta, 0) error_val[i], _ = linearRegCostFunction(Xval, yval, theta, 0) return error_train, error_val
8cdfdec694cbfadef92375c7cf8eba4da012be59
12,667
def decode_xml(text): """Parse an XML document into a dictionary. This assume that the document is only 1 level, i.e.: <top> <child1>content</child1> <child2>content</child2> </top> will be parsed as: child1=content, child2=content""" xmldoc = minidom.parseString(text) return dict([(x.tagName, x.firstChild.nodeValue) for x in xmldoc.documentElement.childNodes if x.childNodes.length == 1])
826bdc1ff0c4df503fdbc6f7e76b013d907b208b
12,668
def _qcheminputfile(ccdata, templatefile, inpfile): """ Generate input file from geometry (list of lines) depending on job type :ccdata: ccData object :templatefile: templatefile - tells us which template file to use :inpfile: OUTPUT - expects a path/to/inputfile to write inpfile """ string = '' if hasattr(ccdata, 'charge'): charge = ccdata.charge else: charge = 0 if hasattr(ccdata, 'mult'): mult = ccdata.mult else: print('Multiplicity not found, set to 1 by default') mult = 1 # $molecule string += '$molecule\n' string += '{0} {1}\n'.format(charge, mult) # Geometry (Maybe a cleaner way to do this..) atomnos = [pt.Element[x] for x in ccdata.atomnos] atomcoords = ccdata.atomcoords[-1] if not type(atomcoords) is list: atomcoords = atomcoords.tolist() for i in range(len(atomcoords)): atomcoords[i].insert(0, atomnos[i]) for atom in atomcoords: string += ' {0} {1:10.8f} {2:10.8f} {3:10.8f}\n'.format(*atom) string += '$end\n\n' # $end # $rem with open(templates.get(templatefile), 'r') as templatehandle: templatelines = [x for x in templatehandle.readlines()] for line in templatelines: string += line # $end return string
3ca565c4c599bccfd3916a0003126b3085cc7254
12,669
def arrangements(ns): """ prime factors of 19208 lead to the "tribonacci" dict; only needed up to trib(4) """ trib = {0: 1, 1: 1, 2: 2, 3: 4, 4: 7} count = 1 one_seq = 0 for n in ns: if n == 1: one_seq += 1 if n == 3: count *= trib[one_seq] one_seq = 0 return count # # one-liner... # return reduce(lambda c, n: (c[0]*trib[c[1]], 0) if n == 3 else (c[0], c[1]+1), ns, (1,0))[0]
01f3defb25624d7a801be87c7336ddf72479e489
12,670
def vtpnt(x, y, z=0): """坐标点转化为浮点数""" return win32com.client.VARIANT (pythoncom.VT_ARRAY | pythoncom.VT_R8, (x, y, z))
1e7c79353d010d4dd8daa4b7fa7c39b841ff8ffe
12,671
from datetime import datetime def get_time_delta(pre_date: datetime): """ 获取给定时间与当前时间的差值 Args: pre_date: Returns: """ date_delta = datetime.datetime.now() - pre_date return date_delta.days
4f8894b06dc667b166ab0ee6d86b484e967501ac
12,672
from bs4 import BeautifulSoup def render_checkbox_list(soup_body: object) -> object: """As the chosen markdown processor does not support task lists (lists with checkboxes), this function post-processes a bs4 object created from outputted HTML, replacing instances of '[ ]' (or '[]') at the beginning of a list item with an unchecked box, and instances of '[x]' (or '[X]') at the beginning of a list item with a checked box. Args: soup_body: bs4 object input Returns: modified bs4 object """ if not isinstance(soup_body, BeautifulSoup): raise TypeError('Input must be a bs4.BeautifulSoup object') for ul in soup_body.find_all('ul'): for li in ul.find_all('li', recursive=False): if (li.contents[0].string[:2] == '[]') or (li.contents[0].string[:3] == '[ ]'): unchecked = soup_body.new_tag("input", disabled="", type="checkbox") li.contents[0].string.replace_with(li.contents[0].string.replace('[] ', u'\u2002')) li.contents[0].string.replace_with(li.contents[0].string.replace('[ ] ', u'\u2002')) li.contents[0].insert_before(unchecked) li.find_parent('ul')['style'] = 'list-style-type: none; padding-left: 0.5em; margin-left: 0.25em;' elif (li.contents[0].string[:3] == '[x]') or (li.contents[0].string[:3] == '[X]'): checked = soup_body.new_tag("input", disabled="", checked="", type="checkbox") li.contents[0].string.replace_with(li.contents[0].string.replace('[x] ', u'\u2002')) li.contents[0].string.replace_with(li.contents[0].string.replace('[X] ', u'\u2002')) li.contents[0].insert_before(checked) li.find_parent('ul')['style'] = 'list-style-type: none; padding-left: 0.5em; margin-left: 0.25em;' return soup_body
640f00d726a1268eb71134e29dbde53ef0ec44f5
12,673
def slowness_to_velocity(slowness): """ Convert a slowness log in µs per unit depth, to velocity in unit depth per second. Args: slowness (ndarray): A value or sequence of values. Returns: ndarray: The velocity. """ return 1e6 / np.array(slowness)
dbfc3b4206ddf615da634e328328c4b8588e5c7a
12,674
def SingleDetectorLogLikelihoodModelViaArray(lookupNKDict,ctUArrayDict,ctVArrayDict, tref, RA,DEC, thS,phiS,psi, dist,det): """ DOCUMENT ME!!! """ global distMpcRef # N.B.: The Ylms are a function of - phiref b/c we are passively rotating # the source frame, rather than actively rotating the binary. # Said another way, the m^th harmonic of the waveform should transform as # e^{- i m phiref}, but the Ylms go as e^{+ i m phiref}, so we must give # - phiref as an argument so Y_lm h_lm has the proper phiref dependence U = ctUArrayDict[det] V = ctVArrayDict[det] Ylms = ComputeYlmsArray(lookupNKDict[det], thS,-phiS) if (det == "Fake"): F=np.exp(-2.*1j*psi) # psi is applied through *F* in our model else: F = ComplexAntennaFactor(det, RA,DEC,psi,tref) distMpc = dist/(lal.PC_SI*1e6) # Term 2 part 1 : conj(Ylms*F)*crossTermsU*F*Ylms # Term 2 part 2: Ylms*F*crossTermsV*F*Ylms term2 = 0.j term2 += F*np.conj(F)*(np.dot(np.conj(Ylms), np.dot(U,Ylms))) term2 += F*F*np.dot(Ylms,np.dot(V,Ylms)) term2 = np.sum(term2) term2 = -np.real(term2) / 4. /(distMpc/distMpcRef)**2 return term2
35c27e53833cb54f856adde6815bf51c3feca019
12,675
import numpy as np def manualcropping(I, pointsfile): """This function crops a copy of image I according to points stored in a text file (pointsfile) and corresponding to aponeuroses (see Args section). Args: I (array): 3-canal image pointsfile (text file): contains points' coordinates. Pointsfile must be organized such that: - column 0 is the ID of each point - column 1 is the X coordinate of each point, that is the corresponding column in I - column 2 is the Y coordinate, that is the row in I - row 0 is for txt columns' names - rows 1 and 2 are for two points of the scale - rows 3 to 13 are aponeuroses' points in panoramic images // raws 3 to 10 in simple images - following rows are for muscle fascicles (and are optional for this function) Other requirements: pointsfile's name must 1) include extension 2) indicates whether I is panoramic or simple by having 'p' or 's' just before the point of the extension. Returns: I2 (array) : array of same type than I. It is the cropped image of I according to the aponeuroses' points manually picked and stored in pointsfile. point_of_intersect (tuple) : point at right of the image; should correspond to the point of intersection of deep and upper aponeuroses. min_raw, max_raw, min_col, max_col: indices of the location of the cropped image in the input raw image """ data = open(pointsfile, 'r') #finds whether the image is panoramic or simple search_point = -1 while (pointsfile[search_point] != '.') and (search_point > (-len(pointsfile))): search_point = search_point-1 if (search_point == -len(pointsfile)): raise TypeError("Input pointsfile's name is not correct. Check extension.") else: imagetype = pointsfile[search_point-1] #extract points from the input file picked_points = [] for line in data: line = line.strip('\n') x = line.split('\t') picked_points.append((x[1], x[2])) #keep aponeuroses points according to image type if imagetype == 'p': #keep points 3 to 13 included apos = np.asarray(picked_points[3:14], dtype=np.float64, order='C') elif imagetype == 's': #keep points 3 to 10 included apos = np.asarray(picked_points[3:11], dtype=np.float64, order='C') else: raise ValueError("pointsfile's name does not fulfill conditions. See docstrings") #find max and min indexes for columns and raws to crop image I #with a margin of 10 pixels (5 pixels for min_raw). #Coordinates are inverted in apos min_raw = max(0, np.min(apos[:, 1])-10) max_raw = min(I.shape[0], np.max(apos[:, 1])+20) min_col = max(0, np.min(apos[:, 0])-10) max_col = min(I.shape[1], np.max(apos[:, 0])+10) i_cropped = np.copy(I[int(min_raw):int(max_raw), int(min_col):int(max_col), :]) index = np.argmax(apos[:, 0]) point_of_intersect = (apos[index][1] - min_raw, apos[index][0] - min_col) #close file data.close() return i_cropped, point_of_intersect, int(min_raw), int(max_raw), int(min_col), int(max_col)
eb3f49b5b46d1966946fc3d00bcae113f51c60d1
12,676
from datetime import datetime def prepare_time_micros(data, schema): """Convert datetime.time to int timestamp with microseconds""" if isinstance(data, datetime.time): return int(data.hour * MCS_PER_HOUR + data.minute * MCS_PER_MINUTE + data.second * MCS_PER_SECOND + data.microsecond) else: return data
bfdfe40065db66417bf2b641a24b195f4114687e
12,677
def get_configs_path_mapping(): """ Gets a dictionary mapping directories to back up to their destination path. """ return { "Library/Application Support/Sublime Text 2/Packages/User/": "sublime_2", "Library/Application Support/Sublime Text 3/Packages/User/": "sublime_3", "Library/Preferences/IntelliJIdea2018.2/": "intellijidea_2018.2", "Library/Preferences/PyCharm2018.2/": "pycharm_2018.2", "Library/Preferences/CLion2018.2/": "clion_2018.2", "Library/Preferences/PhpStorm2018.2": "phpstorm_2018.2", }
d7617139a36ca2e1d4df57379d6af73e3b075c84
12,678
def num_from_bins(bins, cls, reg): """ :param bins: list The bins :param cls: int Classification result :param reg: Regression result :return: computed value """ bin_width = bins[0][1] - bins[0][0] bin_center = float(bins[cls][0] + bins[cls][1]) / 2 return bin_center + reg * bin_width
468e56075cf214f88d87298b259f7253d013a3f3
12,680
def rotate90(matrix: list) -> tuple: """return the matrix rotated by 90""" return tuple(''.join(column)[::-1] for column in zip(*matrix))
770a8a69513c4f88c185778ad9203976d5ee6147
12,681
def get_aspect(jdate, body1, body2): """ Return the aspect and orb between two bodies for a certain date Return None if there's no aspect """ if body1 > body2: body1, body2 = body2, body1 dist = distance(long(jdate, body1), long(jdate, body2)) for i_asp, aspect in enumerate(aspects['value']): orb = get_orb(body1, body2, i_asp) if i_asp == 0 and dist <= orb: return body1, body2, i_asp, dist elif aspect - orb <= dist <= aspect + orb: return body1, body2, i_asp, aspect - dist return None
11a9b05fbc924290e390329395361da0c856541e
12,682