content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def onBoard(top, left=0): """Simplifies a lot of logic to tell if the coords are within the board""" return 0 <= top <= 9 and 0 <= left <= 9
2b2007ae2e3acdbb9c04c3df1397cffce97c6717
10,247
def name( path ): """Extracts the resource name.""" return path[1+path.rfind('/'):]
7e8448e5b1c62c30e7ae28c80580731aa9f6f9fb
10,250
def report_dfa(q_0, dfa, g): """ Give a complete description of the DFA as a string """ output = "" output += "----------- DFA ----------- " + "Goal: " + g + " ----------- \n" transitions = [(i,dfa[(i,sigma)],sigma) for i,sigma in dfa] transitions.sort() for i,j,sigma in transitions: output += (f"delta({i},{sigma}) = {j}\n") output += f"q_0 = {q_0}\n" return output
99d86557b9e1aede93f46e1ef78ecb1fe6cdbf30
10,251
def score(n): """根据成绩,计算成绩的级别,级别有:A、B、C、D 成绩>=90 ——A 成绩>=80 ——B 成绩>=70 ——C 成绩>=60 ——D 参数: - n-:成绩 返回值:返回‘A’或‘B’或者'C'或者'D' """ if n >= 90: return 'A' if n >= 80: return 'B' if n >= 70: return 'C' if n >= 60: return 'D'
fb854dbdb77e31b75a70eddf6d001814100c44d3
10,252
import os def normalize_path(p): """Normalize filesystem path (case and origin). If two paths are identical, they should be equal when normalized.""" return os.path.normcase(os.path.abspath(p))
747948440eeebd99fca79ec413a616eaaba4dbdd
10,253
def algo_options(): """ Return the list of default options for supported algorithms. """ return {"grid": "", "hubbard": "", "medial": "-merge -burst -expand", "octree": "", "spawn": ""}
f52c12efe477bd50a49e7053cca97fc9e4072433
10,254
def shutting_down(globals=globals): """ Whether the interpreter is currently shutting down. For use in finalizers, __del__ methods, and similar; it is advised to early bind this function rather than look it up when calling it, since at shutdown module globals may be cleared. """ # At shutdown, the attribute may have been cleared or set to None. v = globals().get('_shutting_down') return v is True or v is None
01f601b989611b06a438a3be3e15937eff389038
10,255
def _err(msg=None): """To return a string to signal "error" in output table""" if msg is None: msg = 'error' return '!' + msg
029fae3786df61160867696fb71cc94b2edf0506
10,256
def add_xls_tag(file_name): """ Check the file_name to ensure it has ".xlsx" extension, if not add it """ if file_name[:-5] != ".xlsx": return file_name + ".xlsx" else: return file_name
e053d9f7dad8e638122e93d05e49c5a06de3e664
10,258
import os import json def Open(filename): """ You can open/read a Zype File with Open() function. Usage: Open(filename) Filename is the Zype File's Name. """ if os.path.isfile(filename): with open(filename) as Zype: content = Zype.read() content = content.replace('<', ' "') content = content.replace('>', '":') content = content.replace(';', ',') content = content.replace('{', '{') content = content.replace('}', ' }') content = content.replace("'", '"') content = '{\n' + content + '\n}' content = content.replace('(', '{') content = content.replace(')', ' }') return json.loads(content)
20a48af9c7e7a0f74e72fa02c7241915b5c5b78c
10,260
import sys import os def check_enableusersite(): """Check if user site directory is safe for inclusion The function tests for the command line flag (including environment var), process uid/gid equal to effective uid/gid. None: Disabled for security reasons False: Disabled by user (command line option) True: Safe and enabled """ if sys.flags.no_user_site: return False if hasattr(os, "getuid") and hasattr(os, "geteuid"): # check process uid == effective uid if os.geteuid() != os.getuid(): return None if hasattr(os, "getgid") and hasattr(os, "getegid"): # check process gid == effective gid if os.getegid() != os.getgid(): return None return True
466ce7da62b6578db0f99ca31c7271ef3bc2d832
10,262
def pedestal_ids_file(base_test_dir): """Mock pedestal ids file for testing.""" pedestal_ids_dir = base_test_dir / "auxiliary/PedestalFinder/20200117" pedestal_ids_dir.mkdir(parents=True, exist_ok=True) file = pedestal_ids_dir / "pedestal_ids_Run01808.0000.h5" file.touch() return file
edda4c192e577774e0cc4a38aaa7d838127e7dcd
10,263
def get_fa_icon_class(app_config): """Return Font Awesome icon class to use for app.""" if hasattr(app_config, "fa_icon_class"): return app_config.fa_icon_class else: return 'fa-circle'
3766abb1f80b7ccea9e09a5edf011f1586e9b49e
10,264
from typing import Any def get_valid_ref(ref: Any) -> str: """Checks flow reference input for validity :param ref: Flow reference to be checked :return: Valid flow reference, either 't' or 's' """ if ref is None: ref = 't' else: if not isinstance(ref, str): raise TypeError("Error setting flow reference: Input is not a string") if ref not in ['s', 't']: raise ValueError("Error setting flow reference: Input is not 's' or 't', but {}".format(ref)) return ref
16c66dc9e0568bcd33e1cc9956ed31b5ae47595e
10,267
def return_dict(): """ "interfaces": { "Tunnel0": { "state": "UP" }, "Tunnel1": { "state": "DOWN" } } } """ return {"interfaces": {"Tunnel0": {"state": "UP"}, "Tunnel1": {"state": "DOWN"}}}
2c3e71341c425166d11aba9175e3a98027c3d53e
10,268
import torch def replace_magnitude(x, mag): """ Extract the phase from x and apply it on mag x [B,2,F,T] : A tensor, where [:,0,:,:] is real and [:,1,:,:] is imaginary mag [B,1,F,T] : A tensor containing the absolute magnitude. """ phase = torch.atan2(x[:, 1:], x[:, :1]) # imag, real return torch.cat([mag * torch.cos(phase), mag * torch.sin(phase)], dim=1)
b535e4271c6fc4fe508af358fbe3eee59a95cc6b
10,269
def cut_rod2(p, n, r={}): """Cut rod. Same functionality as the original but implemented as a top-down with memoization. """ q = r.get(n, None) if q: return q else: if n == 0: return 0 else: q = 0 for i in range(n): q = max(q, p[i] + cut_rod2(p, n - (i + 1), r)) r[n] = q return q
2fa1433dffb9099709e466025645c4981f289692
10,270
def _set_karma(bot, trigger, change, reset=False): """Helper function for increasing/decreasing/resetting user karma.""" channel = trigger.sender user = trigger.group(2).split()[0] if reset: bot.db.set_nick_value(user, 'karma', 0) return karma = bot.db.get_nick_value(user, 'karma') karma = int(karma) if karma else 0 if karma or change > 0: bot.db.set_nick_value(user, 'karma', karma + change) return karma + change else: return -1
72531c032625c365e057057edb84672b86a4faac
10,271
def monthly_file_name(var, model, rcp): """Function for creating file connections for different variables scenarios and models. Preasently, ensemble member r1i1p1 is assumed, as well as 200601-210012 dime span for the monthly data.""" # e.g. hfls_Amon_CNRM-CM5_rcp85_r1i1p1_200601-210012.nc f = var + "_" + "Amon_" + model + "_rcp" + rcp + "_r1i1p1_200601-210012.nc" return f
7aeea6d3724c470e076645ddb96877e9fa15843f
10,276
def calc_distance(l): """Calculates distance between list items in two lists""" # l needs to start from zero, get lowest number and substract it from all numbers min_l = min([x[1] for x in l if x[1] != ''], default=0) l = [[x[0], (x[1] - min_l)] for x in l if x[1] != ''] distance = 0 for idx, item in enumerate(l): if len(item) > 1 and item[1] != '': # check if we found a sense diff = abs(item[1] - idx) # check how far away we are in our token list distance += diff return distance
57d52eb4bb4b772024d68cb397f2e315825a25df
10,277
def lam(x, lam0, alpha=4.0): """Return classic alpha model lambda value(s) for input value(s).""" return lam0 * ( 1.0 - x**alpha )
37a10d890acc2b983c09300b865e58fe9dc2f7fe
10,278
def subset(part, whole): """Test whether `part` is a subset of `whole`. Both must be iterable. Note consumable iterables will be consumed by the test! This is a convenience function. Examples:: assert subset([1, 2, 3], [1, 2, 3, 4, 5]) assert subset({"cat"}, {"cat", "lynx"}) """ return all(elt in whole for elt in part)
4200ea95e0c9ff03d0b7589812d0313c5d13cfac
10,279
from pathlib import Path def stem(fname, include_suffix=False): """/blah/my_file.json.gz --> my_file""" path = Path(fname) stem = path.stem # If a filename has multiple suffixes, take them all off. stem = stem[: stem.index(".")] if "." in stem else stem if include_suffix: stem = stem + "".join(path.suffixes) return stem
9e4c557dff8583f9129215f91f8e123f062ccf52
10,280
import sqlite3 def username_lookup(userName): """ Lookup user from availability.db by unique username """ # connect to db conn = sqlite3.connect("availability.db") # create cursor c = conn.cursor() # check if lookup initiated for specific user or for all users if userName: # query for username in db c.execute("SELECT * FROM users WHERE username = ?", (userName,)) else: c.execute("SELECT * FROM users") # store user/users list into variable users = c.fetchall() # commit connect and close conn.commit() conn.close() # return user return users
36c0927a79791eca8d69f574799490c716f2fe05
10,281
def get_backend(config): """ :param app: celery instance :type app: celery.Celery """ klass = config.backend_class kwargs = config.backend_kwargs url = config.backend_url return klass(url, **kwargs)
57d6c82f99888ae992503492a8edfa802315a3cc
10,282
def atol(s, base=None): # real signature unknown; restored from __doc__ """ atol(s [,base]) -> long Return the long integer represented by the string s in the given base, which defaults to 10. The string s must consist of one or more digits, possibly preceded by a sign. If base is 0, it is chosen from the leading characters of s, 0 for octal, 0x or 0X for hexadecimal. If base is 16, a preceding 0x or 0X is accepted. A trailing L or l is not accepted, unless base is 0. """ return 0
f39aa403cbad98c448a35305c97c595bc3a77c02
10,283
def tokens_to_sovatoms(tokens: float) -> int: """Convert tokens to sovatoms.""" return int(tokens * 100000000)
23f4bc3a1afc4520b5e2416332702d0a594c5039
10,284
def _frame_only(frame, skeleton_data): """ Save only one frame from skeleton data """ result = [] for raw_descriptor in skeleton_data: if raw_descriptor[0] == frame: result.append(raw_descriptor) return result
1581ddc52eedc81dbe2cc71ed66524b6af5f4b3d
10,285
def sort_node_id(a, b): """ @param a, b: node pair to be sorted by id @type treenode, treenode """ if a.id > b.id: return 1 else: return -1
2db33f63f009d4d50c922b4daf45dbac56a60312
10,286
def __Boundary_outside__(self): """Is the boundary is on the outside of the mesh.""" return self.leftCell() is not None and self.rightCell() is None
cce3ffadc5e2598beae8b320b4d2f5122d6de904
10,287
def flow_cell_mode(info_reads): """Return flow cell sequencing mode.""" res = '' if info_reads: read_lens = [ a['num_cycles'] for a in info_reads if not a['is_indexed_read']] if len(read_lens) == 1: res = '1x{}'.format(read_lens[0]) elif len(set(read_lens)) == 1: res = '2x{}'.format(read_lens[0]) else: res = ' + '.join(map(str, read_lens)) index_lens = [ a['num_cycles'] for a in info_reads if a['is_indexed_read']] if index_lens: res += '/' if len(set(index_lens)) == 1: res += '{}x{}'.format(len(index_lens), index_lens[0]) else: res += '+'.join(map(str, index_lens)) return res or '?x?'
0563d0c163c2bd42b05d07fb30940669a8ef3513
10,289
def sortKey(e): """ This sorts the chores based on their start time. e[0] is the start time for all the chores in my array of chores. """ return int(e[0])
00d37751d23fe6210b406485cbd76906075b2578
10,290
def merge_into_dict(original, secondary): """Merge two dictionaries into the first and return it. This is simply a conveinence wrapper around the dictionary update method. In addition to the update it returns the original dict to allow for chaining. Args: original: The dict which will be updated. secondary: The dict which will be copied. Returns: The updated original dictionary. """ original.update(secondary) return original
899d40396885f0775f2cbaa865702ed0e5706dba
10,291
def normalize_obs(obs_dict, obs_normalization_stats): """ Normalize observations using the provided "mean" and "std" entries for each observation key. The observation dictionary will be modified in-place. Args: obs_dict (dict): dictionary mapping observation key to np.array or torch.Tensor. Leading batch dimensions are optional. obs_normalization_stats (dict): this should map observation keys to dicts with a "mean" and "std" of shape (1, ...) where ... is the default shape for the observation. Returns: obs_dict (dict): obs dict with normalized observation arrays """ # ensure we have statistics for each modality key in the observation assert set(obs_dict.keys()).issubset(obs_normalization_stats) for m in obs_dict: mean = obs_normalization_stats[m]["mean"] std = obs_normalization_stats[m]["std"] # check shape consistency shape_len_diff = len(mean.shape) - len(obs_dict[m].shape) assert shape_len_diff in [0, 1], "shape length mismatch in @normalize_obs" assert mean.shape[shape_len_diff:] == obs_dict[m].shape, "shape mismatch in @normalize obs" # handle case where obs dict is not batched by removing stats batch dimension if shape_len_diff == 1: mean = mean[0] std = std[0] obs_dict[m] = (obs_dict[m] - mean) / std return obs_dict
c866b6d18df13b9e6903b7d96024d48cde99d2ff
10,292
def get_closest_language(differences): """[summary] Args: differences ([list]): [contains tuples with language and difference score] Returns: [tuple]: [containing 3 closest possible languages to given input] """ # print(differences) differences_sorted = sorted(differences, key=lambda item: item[1]) return differences_sorted[0][0], differences_sorted[1][0], differences_sorted[2][0]
55e231e249c8bae5116be0b2feac96d184a6cbee
10,293
def remap_labels(labels, mappings): """ :param labels: :param mappings: :return: """ for source, target in mappings.items(): labels[labels == int(source)] = target return labels
df418b76c7831ac315e095cd9bf75b7596b04a91
10,296
def get_bprop_l2_loss(self): """Grad definition for `L2Loss` operation.""" def bprop(x, out, dout): dx = x * dout return (dx,) return bprop
6f52c07d9133939f5a7dc6d12a2416169f32147e
10,297
def change_dimension_shape_resolution(changed_dimension_shape_conflict): """Create a resolution for a changed shape prior""" return changed_dimension_shape_conflict.ChangeDimensionResolution( changed_dimension_shape_conflict )
c6f93f080ed14876a4724924dc848fb835364c33
10,298
def extract_tracklist_begin_num(content): """Return list of track names extracted from messy web content. The name of a track is defined as a line which begins with a number (excluding whitespace). """ tracklist = [] for line in content.splitlines(): # Empty line if not line: continue # Strip leading and trailing whitespace line.lstrip() line.rstrip() if line[0].isdigit(): tracklist.append(line) return tracklist
7d860fb0ea444ae0d9bd536a4644fa6b1c11a826
10,299
def net_solar_radiation(rs, albedo=0.23): """ Calculate the fraction of the solar radiation that is not reflected from the surface (Allen et al. 1998). Parameters ---------- rs : float Solar radiation (MJ m-2 day-1). albedo : float, optional Albedo (-), default is 0.23 (value for a hypothetical grass reference crop. Returns ------- float Net solar radiation (MJ m-2 day-1). """ return (1 - albedo) * rs
41a8930966db4a658d1c3ec31e754987bc9ed387
10,300
def text_color(message='{}', color_code='\033[0;37m'): """Set text to a color, default color is White""" no_color = '\033[0m' return f'{color_code}{message}{no_color}'
bede08b771b33ce26bbb0ee4fd42f3712d224cc1
10,301
def get_max_elements_in_row(row: list) -> int: """ Loops through the dataset and gets the max elements in a list if one is found in the list or 1 if all strings. """ max_array_count = 0 # get the max height of the cells. for item in row: if isinstance(item, list) and len(item) > max_array_count: max_array_count = len(item) elif isinstance(item, str) and max_array_count == 0: max_array_count = 1 return max_array_count
0f8573bbabe95c03e3eda5949ff9c1c209b0f5fa
10,302
def get_adventure(): # noqa: E501 """Get all adventures # noqa: E501 :rtype: List[Adventure] """ return "do some magic!"
8ca2cb6b8e0aa004ca7fa54ec35f907c3351cab8
10,303
def cst_efield_2freq_cut_healpix(cst_efield_2freq_cut_healpix_main): """Make function level cut down HEALPix 2-freq efield beam.""" return cst_efield_2freq_cut_healpix_main.copy()
b7b2388efbe4f6ca1e22c08e46ec70d391c68ce8
10,304
def shift_chord(chord, shift): """Shift chord""" if chord < 12: new_chord = (chord + shift) % 12 elif chord < 24: new_chord = (chord - 12 + shift) % 12 + 12 else: new_chord = chord return new_chord
bc72df71282d8ae7d15e591bb2a46e5d00f1cd30
10,305
def register_new_user(access, username, password): """ Register a new user & handle duplicate detection """ if access.user_data(username) is not None: raise ValueError("User '%s' already exists!" % username) if username in access.pending_users(): raise ValueError("User '%s' has already registered!" % username) access.register(username, password) if access.need_admin(): access.approve_user(username) access.set_user_admin(username, True) return True return False
25b98c4def9da81d71176aed196f61b2e71d64c5
10,306
def get_not_constant_species(model): """ get species of the model that are not constant @param model: libsbml.model @type model: libsbml.model @return: list of species @rtype: list[libsbml.species] """ def not_const(s): return not( s.getConstant() or s.getBoundaryCondition() ) return filter(not_const, model.getListOfSpecies())
abb376899405fa1623ec5ca646d0399e067fd5cc
10,307
from math import floor from typing import Tuple from typing import Union def conv_output_shape( h_w: Tuple[int, int], kernel_size: Union[int, Tuple[int, int]] = 1, stride: int = 1, padding: int = 0, dilation: int = 1, ) -> Tuple[int, int]: """ Calculates the output shape (height and width) of the output of a convolution layer. kernel_size, stride, padding and dilation correspond to the inputs of the torch.nn.Conv2d layer (https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html) :param h_w: The height and width of the input. :param kernel_size: The size of the kernel of the convolution (can be an int or a tuple [width, height]) :param stride: The stride of the convolution :param padding: The padding of the convolution :param dilation: The dilation of the convolution """ if not isinstance(kernel_size, tuple): kernel_size = (int(kernel_size), int(kernel_size)) h = floor( ((h_w[0] + (2 * padding) - (dilation * (kernel_size[0] - 1)) - 1) / stride) + 1 ) w = floor( ((h_w[1] + (2 * padding) - (dilation * (kernel_size[1] - 1)) - 1) / stride) + 1 ) return h, w
ea757a413a3b38ea024b85dd6987beda8a3b9aeb
10,310
def sum_all(grid_values): """ Calculates the sum of all grid values. Results in scores from :param grid_values: values of grid :return: metrics score """ score = sum(grid_values) return score
a7741961c240cb8edbff050566b7f9341a203af7
10,311
def _path(from_object, to_object): """ Calculates the 'path' of objects starting from 'from_object' to 'to_object', along with the index of the first common ancestor in the tree. Returns (index, list) tuple. """ if from_object._root != to_object._root: raise ValueError('No connecting path found between ' + str(from_object) + ' and ' + str(to_object)) other_path = [] obj = to_object while obj._parent is not None: other_path.append(obj) obj = obj._parent other_path.append(obj) object_set = set(other_path) from_path = [] obj = from_object while obj not in object_set: from_path.append(obj) obj = obj._parent index = len(from_path) i = other_path.index(obj) while i >= 0: from_path.append(other_path[i]) i -= 1 return index, from_path
0ccfe54d36832b8dce3c55168f02abb3c79261ef
10,312
def is_type_name(type_name): """Does not handle keywords. """ return type_name[0].isupper()
fd8f4e7cdc0bac7f00bc41ecaa87533103907db5
10,314
def process(data, action): """Выполняет верхнюю операцию в стаке и возвращаем результат в стэк""" if not action.function: return True # Если для операции не хватает аргументов или значение не подходит - выдаём ошибку try: # Берём нужное кол-во аргументов для операции if action.arguments == 1: a = data.Q.pop() data.Q.append(action(a)) elif action.arguments == 2: b = data.Q.pop() a = data.Q.pop() data.Q.append(action(a, b)) return True except (TypeError, ValueError): return None
84bce6a3d942e1db308b3fe4848da9044cc856d4
10,315
def get_key_in_nested_dict(nested_dict, target_key): """ Traverses the passed dict to find and return the value of target_key in the dict. :param nested_dict: dictionary to search in :param target_key: key you are looking for :return: values of key """ for key in nested_dict: if key == target_key: return nested_dict[key] elif type(nested_dict[key]) is dict: return get_key_in_nested_dict(nested_dict[key], target_key) elif type(nested_dict[key]) is list: if type(nested_dict[key][0]) is dict: for item in nested_dict[key]: res = get_key_in_nested_dict(item, target_key) if res: return res
f0e677cc02ccbab7a4e3c09ba8bf1694c52b1818
10,316
import argparse import sys def _parse_args() -> argparse.Namespace: """ Creates an arg parser for AsciiQrCode and parses the supplied args. :return: The parsed args. """ parser = argparse.ArgumentParser(description='Processes ASCII QR Codes.') parser.add_argument('-v', '--verbose', action='store_true', default=False, help='Enable verbose logging.') parser.add_argument('--dump-qr-code', action='store_true', default=False, help='Write the created QR code image to disc.') parser.add_argument('file', type=str, help='A file containing an ASCII QR code.') # Print help when no args are supplied. if len(sys.argv) == 1: parser.print_help(sys.stderr) exit(1) return parser.parse_args()
bea8fb2e58b55e3e749036e171aa7bc5796d330b
10,318
def unique(data): """ in python 3: TypeError: '<' not supported between instances of 'int' and 'str' need to keep the same Type of member in List """ data.sort() l = len(data) - 1 i = 0 while i < l: if (data[i] == data[i + 1]): del data[i] i -= 1 l -= 1 i += 1 return data
5102a0d868741f9dca745e11972987ce74e78b8a
10,320
def PtknsToStr(path_tokens): """ There are three ways to store paths: As a single string: '/Protein/Phe/Ca' <- the format entered by the user As a list of tokens ['Protein', 'Phe', 'Ca'] <- split into tokens As a list of nodes in a tree (pointers to nodes in a tree hierarchy) This function converts between the first two formats. """ text = '' if len(path_tokens) > 0: text = path_tokens[0] for i in range(1, len(path_tokens)): text += '/' + path_tokens[i] else: text = '' return text
8dd64e6213f8a49f5b20619261a4b3d8daa8d95d
10,321
def traverse_grid(start_cell, direction, num_steps): """ Helper function that iterates over the cells in a grid in a linear direction and returns a list of their indices. """ indices = list() for step in range(num_steps): row = start_cell[0] + step * direction[0] col = start_cell[1] + step * direction[1] indices.append((row, col)) return indices
0ce6f91759c36a63b103ebff2cddd8dd50ca837b
10,322
def __format_size(file_size): """ Format file size information depending on the amount of bytes. """ file_size = int(file_size) list_units = ["B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"] list_index = 0 while file_size > 1000: file_size = float(file_size) / 1000 list_index += 1 if list_index > len(list_units) - 1: list_index = len(list_units) - 1 break if list_index > 0: file_size = round(file_size, 1) unit = list_units[list_index] else: if file_size == 1: unit = "byte" else: unit = "bytes" return "%s %s" % (str(file_size), unit)
d893e09281fce75cb5d0ea011cca623767ae6cbb
10,323
def parse_wiggle_header(header): """ :param header: :return: """ _, chrom, start, step = header.strip().split() foo, chrom = chrom.split('=') assert foo == 'chrom', 'Unexpected wiggle header: {}'.format(header) bar, start = start.split('=') assert bar == 'start', 'Unexpected wiggle header: {}'.format(header) loo, step = step.split('=') assert loo == 'step', 'Unexpected wiggle header: {}'.format(header) # NB: wiggle coordinates start at 1, make 0-based here return chrom, int(start) - 1, int(step)
87525a16b849fca7f78b786c220c7861de085b82
10,324
def _path_in_ignore_dirs(source_path: str, ignore_dirs: list) -> bool: """Checks if the source path is to be ignored using a case sensitive match Arguments: source_path: the path to check ignore_dirs: the list of directories to ignore Return: Returns True if the path is to be ignored, and False if not """ # Break apart the source path for later checking if '/' in source_path: source_parts = source_path.split('/') test_join_char = '/' elif '\\' in source_path: source_parts = source_path.split('\\') test_join_char = '\\' else: source_parts = [source_path] test_join_char = '/' for one_dir in ignore_dirs: # Check for multiple folders if '/' in one_dir: dir_parts = one_dir.split('/') elif '\\' in one_dir: dir_parts = one_dir.split('\\') else: dir_parts = [one_dir] # See if the entire path to check is to be found if test_join_char.join(dir_parts) not in source_path: continue # Check path particles to ensure the entire path names match and not just parts of path names # 'test' vs. 'testing' for example parts_matched = 0 for one_part in dir_parts: if one_part not in source_parts: break parts_matched += 1 if parts_matched == len(dir_parts): return True return False
5d392392e735bdbc1e60cca9414fca8a1eb3ec0c
10,325
import argparse def init_parser(): """Initialize and return the command line parser.""" autoddoc_description =\ ("AutoDDoc 0.1\n" "Documentation generator script for D using DDoc.\n" "Copyright Ferdinand Majerech 2011.\n\n" "AutoDDoc scans subdirectories of the current directory for D or DDoc\n" "sources (.d, .dd or .ddoc) and generates documentation using settings\n" "from a configuration file.\n" "NOTE: AutoDDoc will only work if package/module hierarchy matches the\n" "directory hierarchy, so e.g. module 'pkg.util' would be in file './pkg/util.d' .") autoddoc_epilog =\ ("\nTutorial:\n" "1. Copy the script to your project directory and move into that directory.\n" " Relative to this directory, module names must match their filenames,\n" " so e.g. module 'pkg.util' would be in file './pkg/util.d' .\n" "2. Generate AutoDDoc configuation file. To do this, type\n" " './autoddoc.py -g'. This will generate file 'autoddoc.cfg' .\n" "3. Edit the configuation file. Set project name, version, output\n" " directory and other parameters.\n" "4. Generate the documentation by typing './autoddoc.py' .\n") parser = argparse.ArgumentParser(description= autoddoc_description, formatter_class=argparse.RawDescriptionHelpFormatter, epilog = autoddoc_epilog, add_help=True) parser.add_argument("config_file", nargs="?", default="autoddoc.cfg", help="Configuration file to use to generate documentation. " "Can not be used with any optional arguments. " "If not specified, 'autoddoc.cfg' is assumed. " "Examples: 'autoddoc.py config.cfg' " "will generate documentation using file 'config.cfg' . " "'autoddoc.py' will generate documentation " "using file 'autoddoc.cfg' if it exists.", metavar="config_file") parser.add_argument("-g", "--gen-config", nargs="?", const="autoddoc.cfg", help="Generate default AutoDDoc configuation file. " "config_file is the filename to use. If not specified, " "autoddoc.cfg is used.", metavar="config_file") parser.add_argument("-s", "--gen-style", nargs="?", const="autoddoc_style.css", help="Generate default AutoDDoc style sheet. " "css_file is the filename to use. If not specified, " "autoddoc_style.css is used.", metavar="css_file") parser.add_argument("-i", "--gen-index", nargs="?", const="autoddoc_index.dd", help="Generate default AutoDDoc documentation index. " "index_file is the filename to use. If not specified, " "autoddoc_index.dd is used.", metavar="index_file") return parser
49090858be2baed2f0efcf375c195267d5eacc38
10,328
import requests def open_recipe_file(file, recipes_path=None, github_repo='bioconda/bioconda-recipes'): """ Open a file at a particular location and return contents as string """ if recipes_path: return open(f'{recipes_path}/{file}').read() else: # if no clone of the repo is available locally, download from GitHub r = requests.get(f'https://raw.githubusercontent.com/{github_repo}/master/{file}') if r.status_code == 404: raise OSError else: return r.content
ce5fc3c054bc937203966459e9981a5befdae40b
10,329
def confidence_interval(data, column_name, confidence_level): """ get a 95% confidence interval from a bootstrap dataframe column Parameters ---------- data : pandas dataframe the bootstrap dataframe generated by :py:func:`.bootstrapLE` column_name : string the statistic that you want the interval for, specified by the name of the column containing it confidence_level : float a real number between 0 and 1 that represents the desired confidence level. eg. 0.95 for 95%. Returns ---------- list a two-element list with the lower bound and upper bound. """ results = data[column_name].tolist() results.sort() lower_bound = int((1 - confidence_level) / 2 * len(results)) - 1 upper_bound = int((confidence_level + 1) / 2 * len(results)) - 1 if lower_bound < 0: lower_bound = 0 return [round(float(results[lower_bound]), 1), round(float(results[upper_bound]), 1)]
69668a88030c0a2d6d90dbc1b834cbab76d0ec17
10,330
def dist_to_freq(dist: float, bw: float, ts: float) -> float: """ """ return int(2 * dist * bw / (299792458 * ts))
5e232569e25e663ca6b13a7acdd0c5f1b2afd5aa
10,331
def pig_latin(wrd): """Returns the Pig Latin version of a word. For words that begin with a consonant, take the consonant/consonant cluster and move it to the end of the word, adding the suffix 'ay' to the end of the word. For words that begin with a vowel, leave the word as is and add the suffix 'way' to the end of the word. >>> pig_latin('dog') 'ogday' >>> pig_latin('brush') 'ushbray' >>> pig_latin('elephant') 'elephantway' >>> pig_latin('awesome') 'awesomeway' >>> pig_latin('rhythm') 'rhythmay' """ idx = next((i for i, v in enumerate(wrd) if v in 'aeiou'), len(wrd)) return wrd[idx:] + (wrd[:idx] or 'w') + 'ay'
2eba5f4aaff1391e60f4097e526539d5b486c9bd
10,334
def parse_units(units_str): """ Extract and parse the units Extract the bounds over which the expression is assumed to apply. Parameters ---------- units_str Returns ------- Examples -------- >>> parse_units('Widgets/Month [-10,10,1]') ('Widgets/Month', (-10,10,1)) >>> parse_units('Month [0,?]') ('Month', [-10, None]) >>> parse_units('Widgets [0,100]') ('Widgets', (0, 100)) >>> parse_units('Widgets') ('Widgets', (None, None)) >>> parse_units('[0, 100]') ('', (0, 100)) """ if not len(units_str): return units_str, (None, None) if units_str[-1] == "]": units, lims = units_str.rsplit("[") # types: str, str else: units = units_str lims = "?, ?]" lims = tuple( [float(x) if x.strip() != "?" else None for x in lims.strip("]").split( ",")] ) return units.strip(), lims
18f35a06aedfc9d026cfa70a1217b0b5b0420ca5
10,335
def seg_pixel_accuracy_nd(label_imask, pred_imask, vague_idx=-1, use_vague=False, macro_average=True, empty_result=0.0): """ The segmentation pixel accuracy (for MXNet nd-arrays). Parameters ---------- label_imask : mx.nd.array Ground truth index mask (maybe batch of). pred_imask : mx.nd.array Predicted index mask (maybe batch of). vague_idx : int, default -1 Index of masked pixels. use_vague : bool, default False Whether to use pixel masking. macro_average : bool, default True Whether to use micro or macro averaging. empty_result : float, default 0.0 Result value for an image without any classes. Returns ------- float or tuple of two floats PA metric value. """ assert (label_imask.shape == pred_imask.shape) if use_vague: mask = (label_imask != vague_idx) sum_u_ij = mask.sum().asscalar() if sum_u_ij == 0: if macro_average: return empty_result else: return 0, 0 sum_u_ii = ((label_imask == pred_imask) * mask).sum().asscalar() else: sum_u_ii = (label_imask == pred_imask).sum().asscalar() sum_u_ij = pred_imask.size if macro_average: return float(sum_u_ii) / sum_u_ij else: return sum_u_ii, sum_u_ij
7da093ef624dee07fd335021ae2317d53583e612
10,336
import importlib import inspect def getCoursesFromModels(): """This method imports and inspects Models.py file and adds name of its classes to the list. User class is skipped so it is not added into list of courses. Returns: list: List of string names of classes in Models.py file. """ coursesList = [] mod = importlib.import_module('project.models') for name, obj in inspect.getmembers(mod, inspect.isclass): if name != "User": coursesList.append(name.lower()) coursesList.sort() return coursesList
fd3685ca68e98afd9384ad0711e43ac68e02c94d
10,337
def extract_seed(path, key): """ Scrape the 5-character seed from the path and return it as an integer. :param path: path to the tsv file containing results :param key: substring preceding the seed, "batch-train" for splits, seed-" for shuffles """ try: i = path.find(key) + len(key) return int(path[i:i + 5]) except ValueError: return 0
c5073ad43e8a966aba5382894490aa6cbc871271
10,338
def get_neighbor_dict_from_table(db, table_name): """ returns a dict with bgp neighbor ip as key and neighbor name as value :param table_name: config db table name :param db: config_db """ neighbor_dict = {} neighbor_data = db.get_table(table_name) try: for entry in neighbor_data: neighbor_dict[entry] = neighbor_data[entry].get( 'name') if 'name' in neighbor_data[entry] else 'NotAvailable' return neighbor_dict except Exception: return neighbor_dict
17d8230956c36db8c79bd98f0eec63320b6e77d0
10,339
import pickle def get_cells_to_calculate_variance(file_path, percentage_of_extreme_cells=15): """ Get a list of cells to use to calculate the methylation and variance In this case we have a list of all cells that might be possible and the avg methylation and variance of them and we want to take the percentage to take top and bottom cells :param file_path: :return: """ with open(file_path, "rb") as f: all_cells = pickle.load(f) all_cells.sort(key=lambda x: x[1]) low = percentage_of_extreme_cells * len(all_cells) / 100 + 1 top = len(all_cells) - percentage_of_extreme_cells * len(all_cells) / 100 - 1 cells = all_cells[:int(low)] + all_cells[int(top):] return [i[0] for i in cells]
c151b03cb873f179eb946dd12f6a286aeea8e7a3
10,340
def calc_buckets( start: float, upper_bound: float, /, *, increment_ratio: float = 1.20 ) -> tuple[float, ...]: """Calculate histogram buckets on a logarithmic scale.""" # See https://amplitude.com/blog/2014/08/06/optimal-streaming-histograms # for more details. result: list[float] = [] while start <= upper_bound: result.append(start) start *= increment_ratio return tuple(result)
541fdb81b150a81d24b515acaf53365afb4b62b4
10,341
def from_hexpoints(s): """Parses a string from its codepoints as hex. Given a string containing one or more code points (as hex values), parse it to a a string. For example, the input "006E 006F" produces output "no". """ return "".join([chr(int(cp, 16)) for cp in s.split()])
55b5088d000cf90059ca93d7e3895aee043d554e
10,342
def basicFitness(individual, env): """ The trivial case, where fitness is just the result of passing through the environment. """ return individual.result
7d108bac92ce390699b66e1ead5b08080856c5be
10,344
import os def solution_name() -> str: """ Get the Solution Name from environment variable :return: the solution name """ return os.environ["SOLUTION_NAME"]
a43b79ac9ad39b22ab65de02ec6845f3058e259e
10,345
def find_missing_integer(arr): """ Find the first missing integer in unsorted array of integers """ # segregate integers, with negative (including zero) on right and positives # on left left = 0 right = len(arr) - 1 while left < right: if arr[left] > 0: left += 1 elif arr[right] <= 0: right -= 1 else: arr[left], arr[right] = arr[right], arr[left] left += 1 right -= 1 # mark indexes of positive integers as negative for idx in arr[:left+1]: pos_idx = abs(idx) if pos_idx < len(arr): arr[pos_idx-1] = -abs(arr[pos_idx-1]) # find first positive integer for idx, elt in enumerate(arr[:left+1]): if elt > 0: return idx + 1 return left + 1
d31b5a42ed9bd43d7d036098e8491b4b839c5873
10,346
import re import argparse def valid_labels_regex(arg_value, pat=re.compile(r"^all|random|([a-z|0-9|,|\.]+)|([a-z|0-9|\.]+-[a-z]+-[1-9]-[0-9]-[1-9]-[c|h],?)+$")): """Get the params regex.""" if not pat.match(arg_value): raise argparse.ArgumentTypeError return arg_value
7f278654cf7c1630ce6751aaacfa85d0b08e65cb
10,347
def get_html_fields(fields): """ Converts a fields table with mandatory keywords type, text, id and partially optional keywords value, allowed_values, checked and help into html input lines. The tal in the templates has become too complicated therefore the python code handles most of the conditions. """ html_fields = [] for field in fields: html_field = {} for key in ["help","text","id"]: if key in field: html_field[key] = field[key] if field["type"] == "text": html_field["input_html"] = ("<input name=\"" + field["id"] + "\" value=\"" + field["value"] + "\" id=\"" + field["id"] + "\"type=\"text\">\n") if field["type"] == "select": html_field["input_html"] = ("<select name=\"" + field["id"] + "\" value=\"" + field["value"] + "\">\n") for option in field["allowed_values"]: html_field["input_html"] += "<option>"+option+"</option>\n" html_field["input_html"] += "</select>\n" if field["type"] == "checkbox": html_field["input_html"] = ("<input name=\"" + field["id"] + "\" value=\"" + field["value"] + "\" id=\"" + field["id"] + "\"type=\"checkbox\"") if "checked" in field: html_field["input_html"] += " checked=\"checked\"" html_field["input_html"] += ">\n" html_fields.append(html_field) return html_fields
9269d26a86892791d5ef4847546600d629c8f7c6
10,350
def get_metadata_from_attributes(Object, skip_attributes = None, custom_classes = None): """ Get metadata dict from attributes of an object. Parameters ---------- Object : object from which the attributes are. skip_attributes : list, optional If given, these attributes are skipped (next to the methods of the class of Object). The default is None. custom_classes : dict, optional Dict where keys are classes and values are functions that specify how objects of this class should be stored in metadata_dict. The default is None. Returns ------- metadata_dict : dict dict where keys-values are attributes from Object. """ if skip_attributes is None: skip_attributes = [] skip_attributes += dir(type(Object)) # methods of class will be skipped as attributes if custom_classes is None: custom_classes = {} metadata_dict = {} for a in dir(Object): if a not in skip_attributes: a_val = getattr(Object,a) if a_val is None: metadata_dict[a] = "None" elif type(a_val) in custom_classes: # treat class as specified in custom_classes dict metadata_dict[a] = custom_classes[type(a_val)](a_val) elif callable(a_val): # only get docstrings from callables metadata_dict[a] = a_val.__doc__ else: metadata_dict[a] = a_val return metadata_dict
0c6eacae223ea94e128ff5b507fcaa5a71034c38
10,351
import zipfile import json def _GetExtensionInfoFromCRX(crx_path): """Parse an extension archive and return information. Note: The extension name returned by this function may not be valid (e.g. in the case of a localized extension name). It's use is just meant to be informational. Args: crx_path: path to crx archive to look at. Returns: Tuple consisting of: (crx_version, extension_name)""" crx_zip = zipfile.ZipFile(crx_path) manifest_contents = crx_zip.read('manifest.json') decoded_manifest = json.loads(manifest_contents) crx_version = decoded_manifest['version'] extension_name = decoded_manifest['name'] return (crx_version, extension_name)
8a5f2ef2547c67d65195334df89d589fbff54dcf
10,352
def update_state(td_state, job_results): """ Updates the torsiondrive state with the compute jobs. The state is updated inplace Parameters ---------- td_state : dict The current torsiondrive state job_results : dict A dictionary of completed jobs and job ID's Returns ------- None """ for grid_id_str, job_result_tuple_list in job_results.items(): if grid_id_str not in td_state['grid_status']: td_state['grid_status'][grid_id_str] = [] td_state['grid_status'][grid_id_str] += job_result_tuple_list return td_state
476d19fb9946045dc555f815d680d0475f253003
10,353
from typing import Tuple def yaml_remove_split(docstr: str) -> Tuple[str, str]: """Extract parameter summary within :yaml: tags in docstring, and clean up the :yaml: tag for the full docstring. Return cleaned up docstring, and summary version.""" key = ":yaml:" summary = "" i_start = docstr.find(key) while i_start >= 0: i_key_stop = i_start + len(key) + 1 # end of :yaml: i_stop = docstr.find("`", i_key_stop) + 1 # end of content after it fullkey = docstr[i_start:i_stop] # includes `content` after key summary = docstr[i_key_stop : (i_stop - 1)] # just the content docstr = docstr.replace(fullkey, summary) # Search for any other keys: i_start = docstr.find(key) return docstr, summary
e0e3e34871c56c5cc176fcd371e5402846771078
10,354
def trace_size_converter(value, params, key): """ Converts trace size from seconds to samples """ params = params[key] if value is None: return None if not value: return value if not params['frequency']: raise AttributeError('No frequency specified for trace-size argument!') return int(float(value) * params['frequency'])
87ec3707b36ee76bed365c4a91d4af829161b4b5
10,355
def decode_chrome_leveldb_bytes(value): """ decode the encoded localstorage values from the leveldb file """ if value[0] == 0: return value[1:].decode('utf-16le') elif value[0] == 1: return value[1:].decode('utf-8') else: msg = "Unable to process Chrome LevelDB bytes in unknown format: {}".format(value) print(msg) raise ValueError(msg)
e200c3da3a2f75c215fa13fc12eb12f1034b3e14
10,357
import requests import logging def get_url_descriptor(url): """ Returns an url descriptor or None on failure """ try: resp = requests.get(url) except requests.exceptions.ConnectionError: logging.error("Error connecting to {}".format(url)) return None if resp.ok: return resp return None
8e09711e08200980c1778fd47673dcaaba67c47b
10,358
def year_isnt_int(YEARS): """A funciton to test if years are integers""" return any(type(x) != int for x in YEARS)
d23d093f4655846d07228019b4110e2ef3180c69
10,359
import os def get_pictures(pictures_dir): """Return a list of picture files found in pictures_dir""" pictures = [] for directory, subdirs, files in os.walk(pictures_dir): for fname in files: if fname.lower().endswith(('.jpg', '.png')): pictures.append(os.path.join(directory, fname)) return pictures
5bb257fdf3bb910d8c0b45bbc8368a23d17ca7bf
10,360
import json def isJsonDict(s): """ Take a string and determine if valid JSON. """ try: data = json.loads(s) return type(data) == dict except ValueError: return False
222841fc39f78a45f0c682a83282e714a26dc0ed
10,361
from datetime import datetime def blog(): """ The blog section will show the user an infinite cascade of blog posts that will load based on the user scroll. """ return dict( title="Blog", year=datetime.now().year )
51a6447ef8081cf8053afbbacfcd145efefca5e4
10,363
import json def read_dependencies(file_path): """Reads a json file and creates an iterable of unique dependencies. Args: file_path: The path to the runtime dependencies file. Returns: An iterable with unique dependencies. """ deps = None with open(file_path) as deps_file: deps = json.load(deps_file) deps_set = set() for _, dep_list in deps.items(): deps_set.update(dep_list) return deps_set
52756a443b3a7b0ee6ed1a932b3d1b7984fe4189
10,364
def get_tags_by_month_for_users(data , usernames): """ initialize list of months and sets for each list of data in data slice out twitter information if username in username list for each tag in hashtags in tweet add hashtag to set in data list :param data - list of twitter data: :param usernames - list of usernames in twitter data: :return data_list - list of month numbers and set of unique hashtags: """ data_list = [(i, set()) for i in range(1, 13)] for lists in data: username = lists[0] month = lists[1]-1 hashtags = lists[2] if username in usernames: for tag in hashtags: data_list[month][1].add(tag) return data_list
ca2fbfb2137b47fe23c7d113f002e528cb55b3ae
10,365
def get_face_rectangles(azure_response): """ Returns the rectangles corresponding to the faces detected by Azure :param azure_response: Response from Azure Face request as dictionary. :return: The rectangles of any detected face with the format: (width, height, left, top) """ result = [] for face in azure_response: result.append(face.face_rectangle) return result
d07e40f1b4c648c52ea660179f8d8c1f4957f0db
10,367
def retr_spacegroup_number(xtalstr,ini0): """ Retrieve the space group number. When choosing real space grids some space groups imply restrictions on the number of points along the different lattice vectors. To impose the correct restrictions we need to know the space group number of the current system. This function add this number to the dictionary. """ ini0["spacegroup"] = xtalstr.spacegroup_number return ini0
f86827aa089b79c054fe8c4d78c5a6da48f0463e
10,368
import requests import numpy as np from requests.exceptions import ReadTimeout def post_nadlan_sviva_rest(body, only_neighborhoods=True, subject=None, add_coords=True): """take body from request and post to nadlan.gov.il deals REST API, Sviva=other parameters, demographics, etc, subject 1: nadlan subject 2: area services subject 4: demography subject 8: education subject 16: environment subject 32: greenarea subject 64: transaccess """ if only_neighborhoods: if body['DescLayerID'] != 'NEIGHBORHOODS_AREA': raise ValueError('DescLayerID is {}.. only NEIGHBORHOODS_AREA allowed.'.format( body['DescLayerID'])) if subject is not None: url = 'https://www.nadlan.gov.il/Nadlan.REST//Mwa/GetDetails?subjects={}&pointBuffer=1500'.format( subject) else: url = 'https://www.nadlan.gov.il/Nadlan.REST//Mwa/GetPreInfo?subjects=127&pointBuffer=1500' try: r = requests.post(url, json=body, timeout=10) except ReadTimeout: raise ValueError('TimeOut') if r.status_code != 200: raise ValueError('couldnt get a response ({}).'.format(r.status_code)) result = r.json() if result is not None: if add_coords: try: result['X'] = body['X'] result['Y'] = body['Y'] except TypeError: result['X'] = np.nan result['Y'] = np.nan return result else: raise ValueError('no data')
c659b5ac32b6b5317d614460def207fe9fc20820
10,370
import numpy def positions_join(positions_sequence): """Return concatenated positions arrays.""" return numpy.concatenate(positions_sequence)
f2743b00ac6b3c19e2c8efe0209e8a3f938389b7
10,371
def callback_on_failure(): """Simple callback on failure""" return False
98fcb160561ff4c6c25f49e95610394b56580a04
10,372
def snd(pair): """Return the second element of pair """ return pair[1]
a7893d60324ebdd358a0a0bb86bd2842d2617629
10,374
from numpy import log def log_mean(T_hi, T_lo, exact=False): """ compute the logarithmic mean """ if exact: return (T_hi-T_lo)/log(T_hi/T_lo) else: d = T_hi - T_lo return T_hi - d/2*(1 + d/6/T_hi*(1 + d/2/T_hi))
dcae1d753c416a117dc2f8502454d18f9413fc36
10,376
def reduceSet(setList): """Step through a list of sets and combine sets that overlap. This will create a unique list of sets, where each set contains the headers of reads that optical duplicates.""" setList2 = [setList[0]] for item1 in setList[1:]: inSetList2 = False for item2 in setList2: if item1 & item2: # If there is an intersection item2 |= item1 # Combine sets and go to next item in setList inSetList2 = True break if not inSetList2: # If I could not find any overlaps then append set to setList2 setList2.append(item1) return(setList2)
4e7e763ae78d5cf97fa122f991b01529e12c86a5
10,377
def weird_compare(interval, start, end): """ This is a function that is used later on in data_upscale, to add a corrective integer (either 1 or 0), when that function checks whether the image it is working on has reached the border. This function is not expected to be used anywhere outside data_upscale. :param interval: Width in pixels of the target zoom images compared to the original image. :param start: Position in pixels of the start of the ROI (either in X or Y direction) :param end: Position in pixels of the end of the ROI (either in X or Y direction) :return: 0 or 1, depending on where the ROI lies on the direction. """ if interval - (end - start) % interval <= start % interval: return 1 else: return 0
f4b67c2b740a318ad659d1001bd14d402dad9e21
10,378