content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def interface_details(): """Get interface details, CLI view""" if success_login_form is None: return redirect(url_for('base_blueprint.login')) else: return render_template('more_int_detials.html', details=GetDetails.more_int_details(device, username, password, ssh_port, request.form.get('details')))
f5643644583babb92a188d04c77a59582db69b52
29,951
def complement_sequence(sequence: str, reverse: bool = False) -> str: """Complement the given sequence, with optional reversing. Args: sequence: Input sequence reverse: Whether or not to perform reverse complementation Returns: Complemented (and optionally reversed) string """ sequence = sequence.upper() if reverse: sequence = reversed(sequence) return ''.join(NUCLEOTIDE_COMPLEMENT[c] for c in sequence)
74c85857f3abf669cfaa43d05d1f0190a448bb54
29,952
def calc_llr(tree_dict: StrDict) -> int: """ Calculate the longest linear route for a synthetic route :param tree_dict: the route """ return calc_depth(tree_dict) // 2
18c4cf62e434ee1e7c5902871feb323c1b72a96d
29,953
def _GetArmVersion(arch): """Returns arm_version for the GN build with the given architecture.""" if arch == 'armeabi': return 6 elif arch == 'armeabi-v7a': return 7 elif arch in ['arm64-v8a', 'x86', 'x86_64']: return None else: raise Exception('Unknown arch: ' + arch)
fbad0d1066fe4a7e81d2341291b436f5dd98fff0
29,954
def package_copy(r, id, type, revision_number=None, version_name=None): """ Copy package - create a duplicate of the Package, set author as current user """ revision = get_package_revision(id, type, revision_number, version_name) """ it may be useful to copy your own package ... if r.user.pk == revision.author.pk: return HttpResponseForbidden('You are the author of this %s' % revision.package.get_type_name()) """ try: package = Package.objects.get( full_name=revision.package.get_copied_full_name(), author__username=r.user.username ) return HttpResponseForbidden( 'You already have a %s with that name' % revision.package.get_type_name() ) except: package = revision.package.copy(r.user) revision.save_new_revision(package) return render_to_response("json/%s_copied.json" % package.get_type_name(), {'revision': revision}, context_instance=RequestContext(r), mimetype='application/json')
8f244a6e8b1309b8b129f316698047b6c78f0186
29,955
def contract(equation, *operands, **kwargs): """ Wrapper around :func:`opt_einsum.contract` that caches contraction paths. :param bool cache_path: whether to cache the contraction path. Defaults to True. """ backend = kwargs.pop('backend', 'numpy') cache_path = kwargs.pop('cache_path', True) if not cache_path: return opt_einsum.contract(equation, *operands, backend=backend, **kwargs) # memoize the contraction path out = kwargs.pop('out', None) kwargs_key = tuple(kwargs.items()) shapes = tuple(tuple(t.shape) for t in operands) key = equation, shapes, kwargs_key if key in _PATH_CACHE: expr = _PATH_CACHE[key] else: expr = opt_einsum.contract_expression(equation, *shapes, **kwargs) _PATH_CACHE[key] = expr return expr(*operands, backend=backend, out=out)
c7ac17fcee8eef036181e0ee2a96d0b0d4a38593
29,956
import array def _compute_jn_pcoa_avg_ranges(jn_flipped_matrices, method): """Computes PCoA average and ranges for jackknife plotting returns 1) an array of jn_averages 2) an array of upper values of the ranges 3) an array of lower values for the ranges method: the method by which to calculate the range IQR: Interquartile Range ideal fourths: Ideal fourths method as implemented in scipy """ x,y = shape(jn_flipped_matrices[0]) all_flat_matrices = [matrix.ravel() for matrix in jn_flipped_matrices] summary_matrix = vstack(all_flat_matrices) matrix_sum = numpy_sum(summary_matrix, axis=0) matrix_average = matrix_sum / float(len(jn_flipped_matrices)) matrix_average = matrix_average.reshape(x,y) if method == 'IQR': result = matrix_IQR(summary_matrix) matrix_low = result[0].reshape(x,y) matrix_high = result[1].reshape(x,y) elif method == 'ideal_fourths': result = idealfourths(summary_matrix, axis=0) matrix_low = result[0].reshape(x,y) matrix_high = result[1].reshape(x,y) elif method == "sdev": # calculate std error for each sample in each dimension sdevs = zeros(shape=[x,y]) for j in xrange(y): for i in xrange(x): vals = array([pcoa[i][j] for pcoa in jn_flipped_matrices]) sdevs[i,j] = vals.std(ddof=1) matrix_low = -sdevs/2 matrix_high = sdevs/2 return matrix_average, matrix_low, matrix_high
bba0003df771b60a55b11b67a7df7cb36039d69f
29,957
from typing import List def get_readmission_label_keys(time_windows: List[int]) -> List[str]: """Get label keys for readmission. Args: time_windows: list<int> of the considered time windows (in days) for readmission. Returns: list<str> of labels for readmission within X days """ return [f"{READMISSION_LABEL_BASE}_{t}_days" for t in time_windows]
1ba53ef818aadb719832d23afb250fb817b1e087
29,958
def multiplication(image1, image2): """ Multiply (pixel-wise) the two input images and return the result. <gui> <item name="image1" type="Image" label="Image 1"/> <item name="image2" type="Image" label="Image 2"/> <item name="result" type="Image" role="return" initializer="output=True" label="Result"/> </gui> """ return pixelwise_operation(image1, image2, itk.MultiplyImageFilter)
2813758eba743155960d617eb03aafa937a4cfc0
29,960
def ioat_scan_accel_engine(client, pci_whitelist): """Scan and enable IOAT accel engine. Args: pci_whitelist: Python list of PCI addresses in domain:bus:device.function format or domain.bus.device.function format """ params = {} if pci_whitelist: params['pci_whitelist'] = pci_whitelist return client.call('ioat_scan_accel_engine', params)
714e40288b2ba141d113c0951bf2c171ebcc76d3
29,961
import zipfile import csv def load_test(tstfile): """Load a test from file. This reads a test from a csv file. Parameters ---------- tstfile : :class:`str` Path to the file """ # default version string version_string = "1.0.0" try: with zipfile.ZipFile(tstfile, "r") as zfile: info = TxtIO(zfile.open("info.csv")) data = csv.reader(info) first_line = _nextr(data) if first_line[0] == "wtp-version": version_string = first_line[1] header = _nextr(data) else: header = first_line version = version_parse(version_string) _check_version(version) if header[0] != "Testtype": raise ValueError( f"load_test: expected 'Testtype' but got '{header[0]}'" ) if header[1] == "PumpingTest": routine = _load_pumping_test else: raise ValueError(f"load_test: unknown test type '{header[1]}'") except Exception as exc: raise LoadError(f"load_test: couldn't load test '{tstfile}'") from exc return routine(tstfile)
c8b4e7f2dfc7e627afd1ae58a64723a4deed8248
29,962
def reorder_instruments(curr_instruments): """ Dialog to remove and add instruments at certain indexes. :param curr_instruments: initial list of instruments :return: The list of instruments in the new order """ while True: instruments_with_indexes(curr_instruments) tmp_instruments = [instrument for instrument in curr_instruments] old_idx = prompt("Enter the index of the instrument to move or [enter] to finish: ", validator=IndexValidator(len(tmp_instruments) - 1)) or None if old_idx is None: break move_instrument = tmp_instruments.pop(int(old_idx)) instruments_with_indexes(tmp_instruments) new_idx = prompt(f"Enter the index to insert {move_instrument.part_name()}: ", validator=IndexValidator(len(tmp_instruments), allow_empty=False)) tmp_instruments.insert(int(new_idx), move_instrument) print("New instrument order: ") instruments_with_indexes(tmp_instruments) correct = prompt("Is this correct? [Y/n] ", default='Y', validator=YNValidator()) if answered_yes(correct): curr_instruments = [instrument for instrument in tmp_instruments] return curr_instruments
03c042e086d99c9e5ab52c37a2272af82411c777
29,963
def compute_propeller_with_normal_position(arg_class, cabin_arr): """ compute propeller array and connected arm array :param cabin_arr: numpy array of cabin :param arg_class: argument class :return: propeller_arr, arm_arr """ l1 = arg_class.l1 l2 = arg_class.l2 l3 = arg_class.l3 # fuselage length l = l1 + l2 + l3 # propeller setting ratio txs = arg_class.txs # the ratio of setting position corresponding to overall length angles = arg_class.angles # angle of arm which is connected with a propeller # outer line of the collection of propeller radius = arg_class.radius # the radius of each propeller pr = arg_class.pr # the arm length lp = arg_class.lp # setting shift zdiffp = arg_class.zdiffp # setting coefficient for arm on z axis k = arg_class.k # argm radius arm_r = arg_class.arm_r # propeller number(because of symmetric, get the half number of propellers) half_propeller_number = len(txs) # coords of joint point joint_points = [] for idx in range(half_propeller_number): point = [l * txs[idx], np.max(cabin_arr[:, 1]), idx * zdiffp] joint_points.append(point) # coords of propellers at left side propeller_arr_l = [] # coords of propellers at right side propeller_arr_r = [] for angle, joint_point in zip(angles, joint_points): angle = 180 - angle angle = angle * np.pi / 180.0 # get center coords center = np.array([joint_point[0] + (radius + pr) * np.cos(angle), joint_point[1] + (radius + pr) * np.sin(angle), joint_point[2]]) # z range z = np.linspace(-k * lp + joint_point[2], (1 - k) * lp + joint_point[2], 30) for zi in z: # x range(create circle) x = np.linspace(center[0] - pr, center[0] + pr, 30) for xi in x: target = np.sqrt(pr ** 2 - (xi - center[0]) ** 2) yui = center[1] + target yli = center[1] - target # left side plu = [xi, yui, zi] pll = [xi, yli, zi] propeller_arr_l.append(plu) propeller_arr_l.append(pll) # right side pru = [xi, -yui, zi] prl = [xi, -yli, zi] propeller_arr_r.append(pru) propeller_arr_r.append(prl) propeller_arr_r = np.array(propeller_arr_r) propeller_arr_l = np.array(propeller_arr_l) # put together propeller arr propeller_arr = np.concatenate([propeller_arr_l, propeller_arr_r], axis=0) # create arm arm_arr = [] # right part x = np.linspace(0, radius + pr, 30) for xi in x: y = np.linspace(-arm_r, arm_r, 30) for yi in y: target = np.sqrt(arm_r ** 2 - yi ** 2) zui = target zli = -target pu = [xi, yi, zui] pl = [xi, yi, zli] for idx in range(half_propeller_number): rep_j = joint_points[idx] angle = angles[idx] # turn over 3d on z axis against upper part angle_u = -1 * (180 - angle) * np.pi / 180.0 t_arr_u = turnover_3d(angle_u, np.array([0, 0, 1])) # turn over 3d on z axis against left part angle_l = 180 * np.pi / 180.0 t_arr_l = turnover_3d(angle_l, np.array([0, 0, 1])) puu = np.dot(t_arr_u.T, np.array(pu)) + np.array(rep_j) pll = np.dot(t_arr_l.T, puu) + np.array([l, 0, -2 * zdiffp * idx + (half_propeller_number - 1) * zdiffp]) arm_arr.append(puu.tolist()) arm_arr.append(pll.tolist()) arm_arr = np.array(arm_arr) return propeller_arr, arm_arr
c58aa5939f1b4fef05c9bfa09781310a9b64ab52
29,964
def get_dims_linear(weight_mat_layers, weight_dict): """ Returns a list of dimensions of layers of an mlp in decreasing order. """ dims = [] for ix, layer in enumerate(weight_mat_layers): dim_out, dim_in = weight_dict[layer].shape if ix == 0: dims.extend([dim_in, dim_out]) else: dims.append(dim_out) return dims
eba82695a5c3bd1f850703b172e1f0a7b84fa010
29,965
def yices_distinct(n, arg): """Returns (distinct arg[0] ... arg[n-1]).""" return libyices.yices_distinct(n, arg)
7d37cf6a2193cb4bb0d1d46f4f9986afbe35ad50
29,966
def is_cog_contributor(): """Check if whoever used the command is in the bots contributors.""" async def predicate(ctx): if str(ctx.author.id) in ctx.bot.contributors: return True else: raise NotAContributorError(f"Command {ctx.command.name} raised an error: {str(ctx.author)} is not a contributor.") return commands.check(predicate)
d0a7d8096f03ce1bbeed2e6c6265c46d0ae1022a
29,967
def tuplify2d(x): """Convert ``x`` to a tuple of length two. It performs the following conversion: .. code-block:: python x => x if isinstance(x, tuple) and len(x) == 2 x => (x, x) if not isinstance(x, tuple) Args: x (any): the object to be converted Returns: tuple: """ if isinstance(x, tuple): assert len(x) == 2 return x return (x, x)
64170b14dbe7eb8885d21f45acff6b43979f1219
29,968
def init_mako(app, **kw): """ Initializes the Mako TemplateLookup based on the application configuration and updates the _request_ctx_stack before each request """ def get_first(dicts, keys, default=None): # look in one or more dictionaries returning the first found value for d in dicts: found = filter(lambda x: x in d, keys) if found: return d[found[0]] return default dirs = get_first([kw, app.config], map(lambda x: 'MAKO_%s' % x, ('DIRS', 'DIRECTORIES', 'DIR', 'DIRECTORY')), default='.') if type(dirs) == str: dirs = dirs.split(' ') get = app.config.get kw['input_encoding'] = kw.pop('input_encoding', get('MAKO_INPUT_ENCODING', 'utf-8')) kw['output_encoding'] = kw.pop('output_encoding', get('MAKO_OUTPUT_ENCODING', 'utf-8')) kw['module_directory'] = kw.pop('module_directory', get('MAKO_CACHEDIR', None)) kw['collection_size'] = kw.pop('collection_size', get('MAKO_CACHESIZE', -1)) kw['imports'] = kw.pop('imports', get('MAKO_IMPORTS', None)) lookup = TemplateLookup(directories=dirs, **kw) @app.before_request def before_request(): _request_ctx_stack.top._mako_lookup = lookup return app
60713b06cde3be9eca72207aea69a30d9061cffc
29,970
import time def erase_devices(): """Erase all the drives on this server. This method performs sanitize erase on all the supported physical drives in this server. This erase cannot be performed on logical drives. :returns: a dictionary of controllers with drives and the erase status. :raises exception.HPSSAException, if none of the drives support sanitize erase. """ server = objects.Server() for controller in server.controllers: drives = [x for x in controller.unassigned_physical_drives if (x.get_physical_drive_dict().get('erase_status', '') == 'OK')] if drives: controller.erase_devices(drives) while not has_erase_completed(): time.sleep(300) server.refresh() status = {} for controller in server.controllers: drive_status = {x.id: x.erase_status for x in controller.unassigned_physical_drives} sanitize_supported = controller.properties.get( 'Sanitize Erase Supported', 'False') if sanitize_supported == 'False': msg = ("Drives overwritten with zeros because sanitize erase " "is not supported on the controller.") else: msg = ("Sanitize Erase performed on the disks attached to " "the controller.") drive_status.update({'Summary': msg}) status[controller.id] = drive_status return status
5f9a7a2328b24cb0fb45ea560f570b596c0326d7
29,971
def earlyon(time,duration,*args): """ Some lights have a slight delay before they turn on (capacitors that need to be charged up?). This takes the current time and subtracts that delay so the code looks like they turn on at the right time, but we really send the command a little bit early to give the illusion that they're all in sync """ duration = int(duration,10) cmd = '"'+('" "'.join(args))+'"' if args[-1]=="on": return [(time-duration,cmd)] else: return [(time,cmd)]
5671d46ffe42bd456689cffc3ce3e1f6731101c8
29,972
def downsample_seg_to_mip(seg, mip_start, mip_end): """ Downsample a segmentation to the desired mip level. Args: seg (3darray): A volume segmentation. mip_start (int): The MIP level of seg. mip_end (int): The desired MIP level. Returns: 3darray: seg downsampled to :param: mip_end """ assert mip_end > mip_start mip = mip_start while mip < mip_end: seg = downsample_seg(seg) mip += 1 return seg
9245c6f1b0602f284a7d565758e322af083e6242
29,973
def example(name): """Renders a sample page with the name specified in the URL.""" return template('<b>Hello {{name}}</b>!', name=name)
df52c3ed0708698d7049223b5ea1b7d98f8c3eb7
29,974
def tri(N, M=None, k=0, dtype=float): """Creates an array with ones at and below the given diagonal. Args: N (int): Number of rows. M (int): Number of columns. ``M == N`` by default. k (int): The sub-diagonal at and below which the array is filled. Zero is the main diagonal, a positive value is above it, and a negative value is below. dtype: Data type specifier. Returns: cupy.ndarray: An array with ones at and below the given diagonal. .. seealso:: :func:`numpy.tri` """ if M is None: M = N out = cupy.empty((N, M), dtype=dtype) return _tri_kernel(M, k, out)
e7de7d0bc41563450d7e98071a14ca3b85f250c5
29,975
def UpdateGClientBranch(webkit_rev, magic_gclient_branch): """Update the magic gclient branch to point at |webkit_rev|. Returns: true if the branch didn't need changes.""" target = FindSVNRev(webkit_rev) if not target: print "r%s not available; fetching." % webkit_rev subprocess.check_call(['git', 'fetch', GetRemote()], shell=(os.name == 'nt')) target = FindSVNRev(webkit_rev) if not target: print "ERROR: Couldn't map r%s to a git revision." % webkit_rev sys.exit(1) current = RunGit(['show-ref', '--hash', magic_gclient_branch]) if current == target: return False # No change necessary. subprocess.check_call(['git', 'update-ref', '-m', 'gclient sync', magic_gclient_branch, target], shell=(os.name == 'nt')) return True
52de6ec5139052de914d29d44b926938227894db
29,976
def calculate_trajectories(particles, daughters, alpha=1.): """Calculates the trajectories of the particles. Args: particles: a dataframe with the particle information. daughters: a dataframe where each line represents a daughter for the particles. alpha: for how long should stable tracks should be propagated. """ particles_for_lines = particles.copy() distances_to_primary_vertex = _distances_to_primary_vertex(particles_for_lines) alpha = 1.1 * distances_to_primary_vertex.max() particles_for_lines['NDaughters'] = daughters.groupby('Id').apply(len) particles_for_lines['NDaughters'] = particles_for_lines['NDaughters'].fillna(0.).astype(int) # Particles with daughters lines_daughters = particles_for_lines.join(daughters, how='inner') lines_daughters = lines_daughters.join(particles_for_lines[['Vx', 'Vy', 'Vz']], on='DaughterId', rsuffix='_decay') # Particles WITHOUT daughters lines_single = _add_line_continuation(particles_for_lines[particles_for_lines['NDaughters'] == 0], alpha, '_decay') lines = pd.concat([lines_daughters, lines_single]) decay_length = _decay_length(lines) return lines[decay_length > 0]
fb9422ac315dc1c2b6e6781cfef93e8235dd7f2d
29,978
def site_title(request, registry, settings): """Expose website name from ``tm.site_title`` config variable to templates. This is the default ``<title>`` tag. Example: .. code-block:: html+jinja <meta> <title>My page - {{ site_title }}</title> </meta> """ # Use .get() for BBB return settings.get("tm.site_title", "")
fcc61acecabb163ef6e55ed2fde7d4d025a8082a
29,979
import typing from pathlib import Path import importlib import inspect import ast def linkcode_resolve(repo_link: str, domain: str, info: dict[str, str]) -> typing.Optional[str]: """ Function called by linkcode to get the URL for a given resource. See for more details: https://www.sphinx-doc.org/en/master/usage/extensions/linkcode.html#confval-linkcode_resolve """ if domain != "py": raise Exception("Unknown domain passed to linkcode function.") symbol_name = info["fullname"] build_root = get_build_root() # Import the package to find files origin = build_root / info["module"].replace(".", "/") search_locations = [] if origin.is_dir(): search_locations.append(origin.absolute().as_posix()) origin = origin / "__init__.py" else: origin = Path(origin.absolute().as_posix() + ".py") if not origin.exists(): raise Exception(f"Could not find `{info['module']}` as a package or file.") # We can't use a normal import (importlib.import_module), because the module can conflict with another copy # in multiversion builds. We load the module from the file location instead spec = importlib.util.spec_from_file_location(info["module"], origin, submodule_search_locations=search_locations) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) symbol = [module] for name in symbol_name.split("."): symbol.append(getattr(symbol[-1], name)) symbol_name = name try: lines, start = inspect.getsourcelines(symbol[-1]) end = start + len(lines) except TypeError: # Find variables by parsing the ast source = ast.parse(inspect.getsource(symbol[-2])) while isinstance(source.body[0], ast.ClassDef): source = source.body[0] for ast_obj in source.body: if isinstance(ast_obj, ast.Assign): names = [] for target in ast_obj.targets: if isinstance(target, ast.Tuple): names.extend([name.id for name in target.elts]) else: names.append(target.id) if symbol_name in names: start, end = ast_obj.lineno, ast_obj.end_lineno break else: raise Exception(f"Could not find symbol `{symbol_name}` in {module.__name__}.") _, offset = inspect.getsourcelines(symbol[-2]) if offset != 0: offset -= 1 start += offset end += offset file = Path(inspect.getfile(module)).relative_to(build_root).as_posix() try: sha = git.Repo(build_root).commit().hexsha except git.InvalidGitRepositoryError: # We are building a historical version, no git data available sha = build_root.name url = f"{repo_link}/blob/{sha}/{file}#L{start}" if end != start: url += f"-L{end}" return url
1fd4571b81f98c82c57dae43a2be957380dca91f
29,980
def deterministic_hash(items): """ Intermediary hashing function that allows deterministic hashing of a list of items. :param items: List of items to hash :return: Numeric, deterministic hash, returns 0 if item is none """ h = 0 for item in items: if not item: pass elif not isinstance(item, (int, long)): h ^= bytes2long(item) else: h ^= item return h
da3950039762e2b499f522cbd891a87d98633bd9
29,982
def fetch_known_transcripts_with_gene_label(cursor, datasets): """ Fetch known transcripts along with the gene they belong to """ datasets = format_for_IN(datasets) query = """SELECT DISTINCT gene_ID,transcript_ID FROM observed LEFT JOIN transcript_annotations AS ta ON ta.ID = observed.transcript_ID WHERE (ta.attribute = 'transcript_status' AND ta.value = 'KNOWN') AND observed.dataset IN """ + datasets cursor.execute(query) known_transcripts = [(x[0], x[1], "FSM_transcript") for x in cursor.fetchall()] return known_transcripts
92dbd97ee79672ff0986c2caecf90ab95f05fa70
29,985
def change_data(): """Редактирование профиля пользователя.""" form = ChangeDataForm() if form.validate_on_submit(): current_user.age = form.age.data current_user.country = form.country.data current_user.city = form.city.data current_user.telegram = form.telegram.data current_user.git = form.git.data db.session.commit() flash('Профиль успешно изменен.', 'success') return redirect( url_for('user_profile', username=current_user.username)) elif request.method == 'GET': form.age.data = current_user.age form.country.data = current_user.country form.city.data = current_user.city form.telegram.data = current_user.telegram current_user.git = form.git.data return render_template( 'profile/change_data.html', form=form, )
6dc8299a07733fe7291d1c8b646848f6e1b60c60
29,986
from io import StringIO def run_checks(root, parent, cmds, scmds, paths='', opts={}): """Run the checks given in 'cmds', expected to have well-known signatures, and report results for any which fail. Return failure if any of them did. NB: the function name of the commands passed in is used to name the NOT file which excepts files from them.""" ret = 0 for cmd in cmds: s = StringIO() exclude = not_check(root, cmd.__name__) result = cmd(root, parent, gen_files(root, parent, paths, exclude), output=s) ret |= result if result != 0: print(s.getvalue()) for cmd in scmds: s = StringIO() exclude = not_check(root, cmd.__name__) result = cmd(root, parent, gen_links(root, parent, paths, exclude), output=s) ret |= result if result != 0: print(s.getvalue()) return ret
bde53f0f0fca0b6d12f6cf58b631cc841a0d567f
29,988
def numpy_to_rdkit(adj, nf, ef, sanitize=False): """ Converts a molecule from numpy to RDKit format. :param adj: binary numpy array of shape (N, N) :param nf: numpy array of shape (N, F) :param ef: numpy array of shape (N, N, S) :param sanitize: whether to sanitize the molecule after conversion :return: an RDKit molecule """ if rdc is None: raise ImportError('`numpy_to_rdkit` requires RDKit.') mol = rdc.RWMol() for nf_ in nf: atomic_num = int(nf_) if atomic_num > 0: mol.AddAtom(rdc.Atom(atomic_num)) for i, j in zip(*np.triu_indices(adj.shape[-1])): if i != j and adj[i, j] == adj[j, i] == 1 and not mol.GetBondBetweenAtoms(int(i), int(j)): bond_type_1 = BOND_MAP[int(ef[i, j, 0])] bond_type_2 = BOND_MAP[int(ef[j, i, 0])] if bond_type_1 == bond_type_2: mol.AddBond(int(i), int(j), bond_type_1) mol = mol.GetMol() if sanitize: rdc.SanitizeMol(mol) return mol
93295c556037ffa3e84373b73ca1308a9a1d53b7
29,989
from typing import Dict from typing import Any from pathlib import Path def get_path(key: str, **kwargs: Dict[str, Any]) -> Path: """Get a file path string system variable as a pathlib.Path instance. See signature of get() for parameter details.""" return Path(get(key, **kwargs))
9fe34573ced90c266ef7b73a430cc95ba4d09bc5
29,990
def get_reduced_tree(tree, reduce_by): """ Given a tree decomposition in tree and a required size of reduction, produces a new tree decomposition with treewidth reduced by the requested size and a list of eliminated nodes. We use a greedy algorithm to find nodes to eliminate. This algorithm deletes variable subtrees from the maximal node. The variables corresponding to larger subtrees are deleted first. If the length of subtrees are equal then subtrees passing through more nodes are removed first Parameters ---------- tree : networkx.Graph tree decomposition we need to reduce reduce_by : int reduce treewidth by this amount Returns ------- new_tree : networkx.Graph() reduced tree decomposition eliminated_nodes : list list of eliminated nodes """ max_clique = find_max_cliques(tree)[0] treewidth = len(max_clique) - 1 current_treewidth = treewidth if reduce_by < 0 or reduce_by > treewidth - 1: raise ValueError( 'Requested reduce_by: {}, allowed range: [0, {}]'.format( reduce_by, treewidth-1)) eliminated_nodes = [] new_tree = tree while current_treewidth > treewidth - reduce_by: nodes_by_subwidth = get_subtree_by_length_width( tree, list(max_clique)) # get (node, path length, total node's subtree width) nodes_in_rmorder = [(node, len(nodes_by_subwidth[node]), sum(nodes_by_subwidth[node])) for node in nodes_by_subwidth] # sort by path length, then by total width of subtree nodes_in_rmorder = sorted( nodes_in_rmorder, key=lambda x: (x[1], x[2])) rmnode = nodes_in_rmorder[-1][0] new_tree = rm_element_in_tree(new_tree, rmnode) eliminated_nodes.append(rmnode) max_clique = find_max_cliques(new_tree)[0] current_treewidth = len(max_clique) - 1 assert len(list(new_tree.selfloop_edges())) == 0 return new_tree, eliminated_nodes
a644ae326ef86e9b53bb3c3c510e46740038c8d3
29,992
from pathlib import Path def temp_path(suffix=""): """Return the path of a temporary directory.""" directory = mkdtemp(suffix=suffix) return Path(directory)
2cd196a2a1974816d49d75fd10a0d43b03c12612
29,993
def maybe_utf8(value): """Encode to utf-8, only if the value is Unicode.""" if isinstance(value, unicode): return value.encode("utf-8") return value
82e15ef35527e064a2b5bf3934c135985d60e1fe
29,994
def parse_id_as_interval(id_string, regex): """ The fasta ids contain the locus information. """ match = regex.match(id_string) genome = match.group("genome") seqid = match.group("seqid") start_tmp = int(match.group("start")) end_tmp = int(match.group("end")) start = min([start_tmp, end_tmp]) end = max([start_tmp, end_tmp]) del start_tmp del end_tmp return (genome, seqid, start, end)
7d35bdd7b4418d1edcd433cd39b9defc9050c6f6
29,995
def map_sentences_to_indices_of_vectors(sentences, word_to_index_glove, unknown_token): """ map senteces to integers that represent the index of each word in the glove vocabulary """ # the list to be returned mapped_sentences = [] # get the index of the unknown token unknown_token_index = word_to_index_glove[unknown_token] # iterate for each sentence for sentence in sentences: # get the split sentence split_sentence = sentence.split() # map it to the corresponding indices mapped_sentence = [word_to_index_glove.get(word, unknown_token_index) for word in split_sentence] # append it to the list mapped_sentences.append(mapped_sentence) # return the list return mapped_sentences
04a27bd4ccd5ac9d0366218107ee36b61d4a7655
29,996
import numpy def jordan_wigner_dual_basis_jellium(grid, spinless=False, include_constant=False): """Return the jellium Hamiltonian as QubitOperator in the dual basis. Args: grid (Grid): The discretization to use. spinless (bool): Whether to use the spinless model or not. include_constant (bool): Whether to include the Madelung constant. Returns: hamiltonian (QubitOperator) """ # Initialize. n_orbitals = grid.num_points() volume = grid.volume_scale() if spinless: n_qubits = n_orbitals else: n_qubits = 2 * n_orbitals hamiltonian = QubitOperator() # Compute vectors. momentum_vectors = {} momenta_squared_dict = {} for indices in grid.all_points_indices(): momenta = momentum_vector(indices, grid) momentum_vectors[indices] = momenta momenta_squared_dict[indices] = momenta.dot(momenta) # Compute the identity coefficient and the coefficient of local Z terms. identity_coefficient = 0. z_coefficient = 0. for k_indices in grid.all_points_indices(): momenta = momentum_vectors[k_indices] if momenta.any(): momenta_squared = momenta.dot(momenta) identity_coefficient += momenta_squared / 2. identity_coefficient -= (numpy.pi * float(n_orbitals) / (momenta_squared * volume)) z_coefficient += numpy.pi / (momenta_squared * volume) z_coefficient -= momenta_squared / (4. * float(n_orbitals)) if spinless: identity_coefficient /= 2. # Add identity term. identity_term = QubitOperator((), identity_coefficient) hamiltonian += identity_term # Add local Z terms. for qubit in range(n_qubits): qubit_term = QubitOperator(((qubit, 'Z'),), z_coefficient) hamiltonian += qubit_term # Add ZZ terms and XZX + YZY terms. zz_prefactor = numpy.pi / volume xzx_yzy_prefactor = .25 / float(n_orbitals) for p in range(n_qubits): index_p = grid_indices(p, grid, spinless) position_p = position_vector(index_p, grid) for q in range(p + 1, n_qubits): index_q = grid_indices(q, grid, spinless) position_q = position_vector(index_q, grid) difference = position_p - position_q skip_xzx_yzy = not spinless and (p + q) % 2 # Loop through momenta. zpzq_coefficient = 0. term_coefficient = 0. for k_indices in grid.all_points_indices(): momenta = momentum_vectors[k_indices] momenta_squared = momenta_squared_dict[k_indices] if momenta_squared == 0: continue cos_difference = numpy.cos(momenta.dot(difference)) zpzq_coefficient += (zz_prefactor * cos_difference / momenta_squared) if skip_xzx_yzy: continue term_coefficient += (xzx_yzy_prefactor * cos_difference * momenta_squared) # Add ZZ term. qubit_term = QubitOperator(((p, 'Z'), (q, 'Z')), zpzq_coefficient) hamiltonian += qubit_term # Add XZX + YZY term. if skip_xzx_yzy: continue z_string = tuple((i, 'Z') for i in range(p + 1, q)) xzx_operators = ((p, 'X'),) + z_string + ((q, 'X'),) yzy_operators = ((p, 'Y'),) + z_string + ((q, 'Y'),) hamiltonian += QubitOperator(xzx_operators, term_coefficient) hamiltonian += QubitOperator(yzy_operators, term_coefficient) # Include the Madelung constant if requested. if include_constant: hamiltonian += QubitOperator((),) * (2.8372 / grid.scale) # Return Hamiltonian. return hamiltonian
d52a5a102297213de830f58c8190337589d0d9ca
29,998
def boxcar_decay(tbins, t0, area_box, height_box, area_decay): """ Compute the lightcurve from one or more boxcar-decay functions. Parameters ---------- tbins : array edges of the time bins used for the lightcurve t0 : float or array start times of the boxcar-decays area_box : float or array areas of the boxcar portion of the boxcar-decays height_box : float or array heights of the boxcar-decays area_decay : float or array areas of the decay portions of the boxcar-decays Returns ------- y : array lightcurve values Notes ----- This function is a bottleneck when creating a lightcurve from a long series of flares. If this code is to be adapted for quick simulation of years-long series of flares, this is where the speedup needs to happen. """ # politely let user know that, in this instance, astropy Quantities are not wanted if any(isinstance(x, u.Quantity) for x in [tbins, t0, area_box, height_box, area_decay]): raise ValueError('No astropy Quantity input for this function, please.') # this is going to have to be ugly for it to be fast, I think # standardize t0, area_box, height_box, and area_decay for array input t0, area_box, height_box, area_decay = [np.reshape(a, [-1]) for a in [t0, area_box, height_box, area_decay]] # compute end of box, start of decay t1 = t0 + area_box/height_box # correct for portions hanging over ends of tbins t0 = np.copy(t0) t0[t0 < tbins[0]] = tbins[0] t1[t1 > tbins[-1]] = tbins[-1] # initialize y array y = np.zeros((len(t0), len(tbins)-1)) i_rows = np.arange(y.shape[0]) # add starting portion of box to first bin that is only partially covered by it i0 = np.searchsorted(tbins, t0, side='right') frac = (tbins[i0] - t0)/(tbins[i0] - tbins[i0-1]) y[i_rows, i0-1] += frac*height_box # add box to bins fully covered by it inbox = (tbins[None, :-1] > t0[:, None]) & (tbins[None, 1:] < t1[:, None]) y += height_box[:,None]*inbox # add ending fraction of box to last bin that is partially covered by it i1 = np.searchsorted(tbins, t1, side='left') frac = (t1 - tbins[i1-1])/(tbins[i1] - tbins[i1-1]) y[i_rows, i1-1] += frac*height_box # deal with any cases where the box was entirely within a bin j = i0 == i1 y[i_rows[j], i0[j]-1] = area_box[j]/(tbins[i0][j] - tbins[i0-1][j]) # add decay # compute cumulative decay integral at all time points amp_decay = height_box tau_decay = area_decay / amp_decay with np.errstate(over='ignore', invalid='ignore'): Idecay = -amp_decay[:,None]*tau_decay[:,None]*np.exp(-(tbins[None,:] - t1[:,None])/tau_decay[:,None]) ydecay = np.diff(Idecay, 1)/np.diff(tbins) keep = tbins[:-1] > t1[:, None] y[keep] += ydecay[keep] # add fractional piece of exponential i1 = np.searchsorted(tbins, t1, side='right') inrange = i1 < len(tbins) i_rows, i1 = i_rows[inrange], i1[inrange] Idecay1 = -amp_decay*tau_decay ydecay1 = (Idecay[i_rows, i1] - Idecay1[i_rows])/(tbins[i1] - tbins[i1-1]) y[i_rows, i1-1] += ydecay1 return np.sum(y, 0)
31beb8d6cab940bd75a814121833535819c17e69
29,999
def updateShaderState(self): """Updates all shader program variables. """ if not self.ready(): return opts = self.opts self.shader.load() voxValXform = self.imageTexture.voxValXform voxValXform = [voxValXform[0, 0], voxValXform[0, 3], 0, 0] invNumLabels = 1.0 / (opts.lut.max() + 1) self.shader.setFragParam('voxValXform', voxValXform) self.shader.setFragParam('invNumLabels', [invNumLabels, 0, 0, 0]) self.shader.unload() return True
611b093ce51e99e5c7c1e3da5dcc7cd1a8c07b01
30,000
import _winreg from cake.registry import queryString def _getMinGWInstallDir(): """Returns the MinGW install directory. Typically: 'C:\MinGW'. @return: The path to the MinGW install directory. @rtype: string @raise WindowsError: If MinGW is not installed. """ possibleSubKeys = [ r"SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\MinGW", r"SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\{AC2C1BDB-1E91-4F94-B99C-E716FE2E9C75}_is1", ] # Try all known registry locations. for subKey in possibleSubKeys: try: return queryString(_winreg.HKEY_LOCAL_MACHINE, subKey, "InstallLocation") except WindowsError: # If this is the last possibility, re-raise the exception. if subKey is possibleSubKeys[-1]: raise
262f65aaf413fe718b753ffa706c97c80b73f349
30,001
import urllib def nextbus(a, r, c="vehicleLocations", e=0): """Returns the most recent latitude and longitude of the selected bus line using the NextBus API (nbapi) Arguments: a=agency, r=route, c=command, e=epoch timestamp for start date of track, 0 = the last 15 minutes """ nbapi = "http://webservices.nextbus.com" nbapi += "/service/publicXMLFeed?" nbapi += "command={}&a={}&r={}&t={}".format(c, a, r, e) xml = minidom.parse(urllib.request.urlopen(nbapi)) # If more than one vehicle, just get the first bus = xml.getElementsByTagName("vehicle")[0] if bus: at = bus.attributes return(at["lat"].value, at["lon"].value) else: return (False, False)
08ce12f3a1d96572014dc528de255f89e5b0ee46
30,002
def latest_blog_posts(context, num): """ Displays the most recent blog posts. It takes an argument, num and displays so many posts depending on the value. """ latest_blog_posts = Post.objects.all()[:num].select_related() return { 'latest_blog_posts': latest_blog_posts }
74ee23753e674e1dd1f8fa8af92002c739a28ee3
30,003
from typing import List from typing import Dict import copy def _expand_array_paths_to_preserve(paths: List[DetailedPath]) -> Dict[str, List[int]]: """ Used by "filter_element_match" - Returns a dictionary of string paths mapped to array indices that we want to preserve. :param paths: A list of lists of detailed paths (containing strings and array indices) to elements that matched query criteria :return: A dict where the keys are a dot-separated path to an array, and the values are a list of indices in that array that we want to keep. If there are no indices in the original path, that path will be ignored. Some paths may be expanded into multiple paths where there are multiple levels of indices (arrays of arrays). :Example: _expand_array_paths_to_preserve([["F", 1, 2], ["F", 1, 3], ["G", "H"], ["L", 1, "M"]]) {'F': [1], 'F.1': [2, 3], 'L': [1]} This data will be used to remove all elements from row["F"][1] that are not at index 2, and 3. We'll then remove all elements from "F" that are not at index [1], and all elements from "L" that are not at index 1. """ # Break path into multiple paths if array elements in path expanded: List[DetailedPath] = [] for path in paths: while path != [] and not isinstance(path[-1], int): path.pop() new_path: DetailedPath = [] for elem in path: new_path.append(elem) if isinstance(elem, int) and new_path not in expanded: expanded.append(copy.deepcopy(new_path)) # Combine paths where the key is a dot-separated path to the array, and the value are the indices # of the array we want to preserve merge_paths: Dict[str, List[int]] = defaultdict(list) for path in expanded: merge_paths[join_detailed_path(path[0:-1])].append(path[-1]) # type: ignore return merge_paths
7f44a717285bc30c3162d39dcf18a1cdc3920bed
30,004
def get_request_fixture_names(request): """Get list of fixture names for the given FixtureRequest. Get the internal and mutable list of fixture names in the enclosing scope of the given request object. Compatibility with pytest 3.0. """ return request._pyfuncitem._fixtureinfo.names_closure
665fff4538f3817b6eb882f9a873683d69003bfd
30,005
def _unique(values, *, return_inverse=False): """Helper function to find unique values with support for python objects. Uses pure python method for object dtype, and numpy method for all other dtypes. Parameters ---------- values : ndarray Values to check for unknowns. return_inverse : bool, default=False If True, also return the indices of the unique values. Returns ------- unique : ndarray The sorted unique values. unique_inverse : ndarray The indices to reconstruct the original array from the unique array. Only provided if `return_inverse` is True. """ if values.dtype == object: return _unique_python(values, return_inverse=return_inverse) # numerical out = np.unique(values, return_inverse=return_inverse) if return_inverse: uniques, inverse = out else: uniques = out # np.unique will have duplicate missing values at the end of `uniques` # here we clip the nans and remove it from uniques if uniques.size and is_scalar_nan(uniques[-1]): nan_idx = np.searchsorted(uniques, np.nan) uniques = uniques[:nan_idx + 1] if return_inverse: inverse[inverse > nan_idx] = nan_idx if return_inverse: return uniques, inverse return uniques
4768b0e055cfd9a42b5332f7a47aa608aa7b90c0
30,006
def _create_range_tool( data, min_time, max_time, plot_range, width, height, time_column: str = None, y: str = "y_index", ): """Create plot bar to act as as range selector.""" ext_min = min_time - ((max_time - min_time) * 0.15) ext_max = max_time + ((max_time - min_time) * 0.15) plot_height = max(120, int(height * 0.20)) rng_select = figure( x_range=(ext_min, ext_max), title="Range Selector", plot_height=plot_height, plot_width=width, x_axis_type="datetime", y_axis_type=None, tools="", toolbar_location=None, ) help_str = ( "Drag the middle or edges of the selection box to change " + "the range in the main chart" ) rng_select.add_layout( Title(text=help_str, align="right", text_font_size="10px"), "below" ) rng_select.xaxis[0].formatter = _get_tick_formatter() if isinstance(data, dict): for _, series_def in data.items(): rng_select.circle( x=series_def["time_column"], y=y, color=series_def["color"], source=series_def["source"], ) elif isinstance(data, pd.DataFrame): rng_select.circle( x=time_column, y=y, color="blue", source=ColumnDataSource(data) ) range_tool = RangeTool(x_range=plot_range) range_tool.overlay.fill_color = "navy" range_tool.overlay.fill_alpha = 0.2 rng_select.ygrid.grid_line_color = None rng_select.add_tools(range_tool) rng_select.toolbar.active_multi = range_tool return rng_select
42cf2d4f5986dd454a9aa968ce4db4136b5acd1f
30,007
def pivot(df, index, column, value): """ Pivot a dataframe. Reverse operation of melting. Useful for configuring evolution See pandas' pivot_table documentation for more details Args: - index (list): indexes argument of pd.pivot_table - column (str): column name to pivot on - value (str): column name containing the value to fill the pivoted df """ if df.dtypes[value].type == np.object_: df = pd.pivot_table(df, index=index, columns=column, values=value, aggfunc=lambda x: ' '.join(x)) else: df = pd.pivot_table(df, index=index, columns=column, values=value) df = df.reset_index() return df
b9a8c63d5ce320f4a156c8f42b92173dd1d86ca0
30,008
def split_pdf_image_into_row_image_block(pdf_image): """ split the whole pdf image into row image block :param pdf_image: the whole color pdf image :return: """ gray_image = cv2.cvtColor(pdf_image, cv2.COLOR_BGR2GRAY) binarized_image = cv2.adaptiveThreshold( src=gray_image, maxValue=255, adaptiveMethod=cv2.ADAPTIVE_THRESH_GAUSSIAN_C, thresholdType=cv2.THRESH_BINARY, blockSize=11, C=2 ) # sum along the row axis row_sum = np.sum(binarized_image, axis=1) idx_row_sum = np.argwhere(row_sum < row_sum.max())[:, 0] split_idx = [] start_idx = idx_row_sum[0] for index, idx in enumerate(idx_row_sum[:-1]): if idx_row_sum[index + 1] - idx > 5: end_idx = idx split_idx.append((start_idx, end_idx)) start_idx = idx_row_sum[index + 1] split_idx.append((start_idx, idx_row_sum[-1])) pdf_image_splits = [] for index in range(len(split_idx)): idx = split_idx[index] pdf_image_split = pdf_image[idx[0]:idx[1], :, :] pdf_image_splits.append(pdf_image_split) return pdf_image_splits
9a304f54167c4fbb7739c7022b12c5b574240861
30,009
def get_tokens(): """ Get all the active tokens in the datbase.""" return query_db('select token from token')
960c613147d3d55a3e56dcef06fc974a4c553929
30,010
def get_stats(beta0, n, sigma, lam, pen, ntrials=100, maxiter=100): """ run ntrials regression problems return mean of the mse, and 95% confidence interval """ if pen is None: mses = run_trials_ols(beta0, n, sigma, ntrials=ntrials) else: mses = run_trials(beta0, n, sigma, lam, pen, ntrials=ntrials, maxiter=maxiter) mmean = np.mean(mses) qs = np.quantile(mses, [0.025, 0.875]) return mmean, qs
e6357f64b40eb424e295088920f5f8dcba558896
30,012
def squeeze_output_dim_0(initial_ndims, point_types): """Determine if the output needs to squeeze a singular dimension 0. The dimension 0 is squeezed iff all input parameters: - contain one sample, - have the corresponding dimension 0 squeezed, i.e. if all input parameters have ndim strictly less than the ndim corresponding to their vectorized shape. """ for ndim, point_type in zip(initial_ndims, point_types): vect_ndim = POINT_TYPES_TO_NDIMS[point_type] assert ndim <= vect_ndim if ndim == vect_ndim: return False return True
448291f75d758867e65c1693de6c40ab80a7b642
30,013
import math def quaternion_from_matrix(matrix, isprecise=False): """Return quaternion from rotation matrix. If isprecise is True, the input matrix is assumed to be a precise rotation matrix and a faster algorithm is used. """ M = np.array(matrix, dtype=np.float64, copy=False)[:4, :4] if isprecise: q = np.empty((4,)) t = np.trace(M) if t > M[3, 3]: q[0] = t q[3] = M[1, 0] - M[0, 1] q[2] = M[0, 2] - M[2, 0] q[1] = M[2, 1] - M[1, 2] else: i, j, k = 0, 1, 2 if M[1, 1] > M[0, 0]: i, j, k = 1, 2, 0 if M[2, 2] > M[i, i]: i, j, k = 2, 0, 1 t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3] q[i] = t q[j] = M[i, j] + M[j, i] q[k] = M[k, i] + M[i, k] q[3] = M[k, j] - M[j, k] q = q[[3, 0, 1, 2]] q *= 0.5 / math.sqrt(t * M[3, 3]) else: m00 = M[0, 0] m01 = M[0, 1] m02 = M[0, 2] m10 = M[1, 0] m11 = M[1, 1] m12 = M[1, 2] m20 = M[2, 0] m21 = M[2, 1] m22 = M[2, 2] # symmetric matrix K K = np.array([[m00 - m11 - m22, 0.0, 0.0, 0.0], [m01 + m10, m11 - m00 - m22, 0.0, 0.0], [m02 + m20, m12 + m21, m22 - m00 - m11, 0.0], [m21 - m12, m02 - m20, m10 - m01, m00 + m11 + m22]]) K /= 3.0 # quaternion is eigenvector of K that corresponds to largest eigenvalue w, V = np.linalg.eigh(K) q = V[[3, 0, 1, 2], np.argmax(w)] if q[0] < 0.0: np.negative(q, q) return q
dcc4ee7e6b2493a96a78b7da45b6f6edb12be550
30,014
def check_version(stdout): """Check version of Ensembl-VEP. Example of the first part of an output from the command `vep --help`: #----------------------------------# # ENSEMBL VARIANT EFFECT PREDICTOR # #----------------------------------# Versions: ensembl : 104.1af1dce ensembl-funcgen : 104.59ae779 ensembl-io : 104.1d3bb6e ensembl-variation : 104.6154f8b ensembl-vep : 104.3 Help: [email protected] , [email protected] Twitter: @ensembl """ vep_version = int( float( next( (line for line in stdout.split("\n") if "ensembl-vep" in line) ).split()[2] ) ) return vep_version
5c3b716db7016f1b612f764fb54e3b25d970b0f2
30,015
def tf_config(): """ Default tensorflow config. """ config = tf.ConfigProto() config.gpu_options.allow_growth = True return config
fdc28c9968457af92afb18f8315e07c04890ff50
30,016
def customize_response_cros(data): """定制跨域响应体""" response = make_response(jsonify(data)) # 设置响应请求头 response.headers["Access-Control-Allow-Origin"] = '*' # 允许使用响应数据的域。也可以利用请求header中的host字段做一个过滤器。 response.headers["Access-Control-Allow-Methods"] = 'POST' # 允许的请求方法 response.headers["Access-Control-Allow-Headers"] = "x-requested-with,content-type" # 允许的请求header return response
c702644bc9faae45057263a207e42e110d125165
30,019
def shortdateformat(value, default_value=None): """ Example value: datetime.strptime("2018-07-25 10:15:00", "%Y-%m-%d %H:%M:%S") Example output: '25 July' "shortdateformat" was designed for use in summary tables where space is tight and dates are shown on their own line. The original intended use was in conjunction with "timeformat" in admin app summary tables. It is now (Jan 2018) also used in briefs-frontend on the "Publish your requirements" page only. ** USE OUR STANDARD dateformat RATHER THAN THIS UNLESS THERE IS A GOOD REASON NOT TO ** """ return _format_date(value, default_value, DISPLAY_SHORT_DATE_FORMAT, localize=False)
31ce7fa824df3d746d5e81e960864fabb1329307
30,020
def build_suffix_array(text): """ Build suffix array of the string text and return a list result of the same length as the text such that the value result[i] is the index (0-based) in text where the i-th lexicographically smallest suffix of text starts. """ order = sort_characters(text) classes = compute_char_classes(text, order) shiftlen = 1 while shiftlen < len(text): order = sort_doubled(text, shiftlen, order, classes) classes = update_classes(order, classes, shiftlen) shiftlen *= 2 return order
05eb036cb749e030d84bb2d494a447faa7f93e6c
30,021
from mne.utils import _time_mask def compute_auc(dip, tmin=-np.inf, tmax=np.inf): """Compute the AUC values for a DipoleFixed object.""" if not isinstance(dip, DipoleFixed): raise TypeError('dip must be a DipoleFixed, got "%s"' % (type(dip),)) pick = pick_types(dip.info, meg=False, dipole=True) if len(pick) != 1: raise RuntimeError('Could not find dipole data') time_mask = _time_mask(dip.times, tmin, tmax, dip.info['sfreq']) data = dip.data[pick[0], time_mask] return np.sum(np.abs(data)) * len(data) * (1. / dip.info['sfreq'])
cedfeb7934ee86e6c1ae702ba0cbecceb83c90db
30,023
def left_join(ht1, ht2): """ :param ht1: left hash table :param ht2: right hash table :return: list of joined values from both hash tables """ results = [] for item in ht1.table: while item is not None: key = item.val[0] joined = [key, ht1.get(key), ht2.get(key)] results.append(joined) item = item.next return results
8f34e03d055a32ea337b27cd800eeb393d136dfa
30,024
from typing import Tuple import torch import re def load_pretrained_cifar10_model( path: str, resnet_size: int = 32, ) -> Tuple[nn.Module, DifferentiableNormalize]: """ Loads a pretrained CIFAR-10 ResNet from the given path along with its associated normalizer. """ model: nn.Module = getattr(cifar_resnets, f'resnet{resnet_size}')() model_state = torch.load(path, map_location=torch.device('cpu')) model.load_state_dict({re.sub(r'^module\.', '', k): v for k, v in model_state['state_dict'].items()}) normalizer = DifferentiableNormalize( mean=config.CIFAR10_MEANS, std=config.CIFAR10_STDS, ) return model, normalizer
2d1a907b2d90459661bdd0e578828ac0949d68e3
30,025
import logging def create_project(**kwargs): # noqa: E501 """Creates a project with an original network file. Creates a project with an original network file. # noqa: E501 :param designation: :type designation: str :param description: :type description: str :param network_designation: :type network_designation: str :param network_directed: :type network_directed: bool :param network_multigraph: :type network_multigraph: bool :param network_file: Binary object which contains the network file with a standard network format. :type network_file: str :param additional_network_file: Binary object which contains an additional network file with a standard network format (especailly used for CSV imports). :type additional_network_file: str :param file_format: :type file_format: str :rtype: Project """ body = dict(kwargs.items()).get('body') file = dict(kwargs.items()).get('network_file') additional_file = dict(kwargs.items()).get('additional_network_file') # Try to process and safe the file before accessing the Database try: file_format = body.get('file_format') network_file = NetworkFile(file_format, file, additional_file) node_list = network_file.parse_nodes() except Exception: logging.exception("Exception while handling the input file") e = http_exceptions.InternalServerError( description='Something went wrong! Please check if your network file is correct.') raise e try: db = DatabaseConnector.get_db_instance() project_id = db.add_project( designation=body.get('designation'), description=body.get('description') ) original_network_id = db.add_original_network_to_project( designation=body.get('network_designation'), directed=body.get('network_directed'), multigraph=body.get('network_multigraph'), project_id=project_id ) predicted_network_id = db.add_predicted_network_to_project( designation=body.get('network_designation'), project_id=project_id ) nodes = db.add_nodes(node_list, original_network_id, predicted_network_id) edge_list = network_file.parse_edges(nodes) db.add_edges_to_original_network(edge_list, original_network_id) for node in nodes: attribute_list = network_file.parse_attributes(node[0]) if attribute_list: db.add_node_attributes(attribute_list, node[1]) graph = build_original_graph('project_id', project_id) save_predicted_graph_to_db(graph.copy(), predicted_network_id) default_evaluation_setup = { "random_seed": 42, "with_validation": False, "train_sampling_ratio": 0.8, "test_sampling_ratio": 0.9, "ml_preprocessing": False } db.add_or_update_evaluation_result(project_id, default_evaluation_setup) return Project( id=project_id, designation=body.get('designation'), description=body.get('description'), original_network_id=original_network_id, predicted_network_id=predicted_network_id ) except Exception: logging.exception("Exception occured while inserting data in the database") e = http_exceptions.InternalServerError( description='Something went wrong! The input file seems to be wrong and the data could not be loaded into the database.') raise e
c57951358383b18f35fdf9c6ca899f09979565ae
30,027
def _get_boto_client(cluster, access_key, secret_key): """ Returns a boto client object that can be used to communicate with the Object Storage cluster. """ client = boto.connect_s3(aws_access_key_id=access_key, aws_secret_access_key=secret_key, host=BASE_URL_TEMPLATE.format(cluster), calling_format=OrdinaryCallingFormat()) # set this for later use client.obj_cluster = cluster return client
4351e74610948c17c8c35ae86a5fa1ca15f4158f
30,028
def triangleArea(a: Vec3, b: Vec3, c: Vec3) -> float: """ Calculate area of triangle :return: area """ return cross3(b - a, c - a).length() / 2.0
ad5c73e07421c01f3db3a9d4592ec218307c8a80
30,029
async def read_object_name(app, device_id, addr): """ Execute a single request using `ReadPropertyRequest`. This will read the `objectName` property of a remote device. :param app: An app instance :param device_id: BACnet device id (integer number) :param addr: The network address of the remote device :return: The object name value """ return await app.execute_request( ReadPropertyRequest( objectIdentifier=('device', device_id), propertyIdentifier='objectName', destination=Address(addr) ) )
3e9e023be615911ca0f43ff2a0aec62cec695d25
30,030
import requests from bs4 import BeautifulSoup def jws_omex_dict(): """ Returns dictionary of available JWS combine archives. :return: { id: download_url } dict """ jws_omex = {} num_omex = 0 for page_iter in range(NUM_PAGES): url = URL.format(page_iter+1) # 1 based counting page = requests.get(url) if page.status_code == 200: soup = BeautifulSoup(page.content, 'html.parser') # select all <a> in <td> items = soup.select('td a') # only interested in the download links links = [a.get("href") for a in items if "combinearchive?download=1" in a.get('href')] print("N(page={}) = {}".format(page_iter+1, len(links))) num_omex += len(links) for url in links: tokens = url.split('/') name = tokens[3] jws_omex[name] = "http://jjj.mib.ac.uk" + url # pprint.pprint(jws_omex) print('---------') print(num_omex) return jws_omex
21e378038bbce07a166d315a183a08ce69f9a544
30,031
def ztrsv(A, x, Uplo=CblasLower, TransA=CblasNoTrans, Diag=CblasNonUnit): """ returns x' This function computes inv(op(A)) x for x, where op(A) = A, A^T, A^H for TransA = CblasNoTrans, CblasTrans, CblasConjTrans. When Uplo is CblasUpper then the upper triangle of A is used, and when Uplo is CblasLower then the lower triangle of A is used. If Diag is CblasNonUnit then the diagonal of the matrix is used, but if Diag is CblasUnit then the diagonal elements of the matrix A are taken as unity and are not referenced. """ xn = array_typed_copy(x) _gslwrap.gsl_blas_ztrsv(Uplo, TransA, Diag, A, xn) return xn
11b3420e09718fabc907b484142b67ce4f828c4f
30,032
def get_trainable_vars(name=None): """Return the trainable variables. Parameters ---------- name : str the scope Returns ------- list of tf.Variable trainable variables """ return tf.compat.v1.get_collection( tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES, scope=name)
3c5b005613a7a6f0cd0420e60337ed7bf88bf92f
30,033
def player_to_string(game: reversi.ReversiGame, player_colour: str, player: ai_players.Player) \ -> str: """ Returns the string representation of the type of the player. Preconditions: - player_colour in {'white', 'black'} """ if game.get_human_player() == 1 and player_colour == 'black': return 'Human' elif game.get_human_player() == -1 and player_colour == 'white': return 'Human' else: # the player is one of the AI players if isinstance(player, ai_players.RandomPlayer): return 'Random Moves' elif (isinstance(player, ai_players.MinimaxPlayer) or isinstance(player, ai_players.MinimaxABPlayer)): return 'Minimax ' + str(player.depth)
a1e6dfe184d471616fac7a1ab1bbb2d959c5457c
30,034
from typing import Any def encode_pd_timestamp(v: pd.Timestamp) -> Any: """ Specializes :func:`encode` for invocations where ``v`` is an instance of the :class:`~pandas.Timestamp` class. """ return { "__kind__": kind_inst, "class": "pandas.Timestamp", "args": encode([str(v)]), "kwargs": {"freq": v.freqstr if v.freq else None}, }
fec08229d2a9b8f7115986e4c97677b401200adf
30,035
async def getWebUserAmount(cls:"WebIndex", where:str="1=1", values:tuple=()) -> int: """ simply gives a number of all matched user """ res:list = cls.Web.BASE.PhaazeDB.selectQuery(f"SELECT COUNT(*) AS `I` FROM `user` WHERE {where}", values) return res[0]['I']
a019a8de0644a7295cfafdce80a544ae31bcb225
30,036
def randomise_spikes(spiketrain, n_surrogates=1, decimals=None): """ Generates surrogates of a spike train by spike time randomization. The surrogates are obtained by keeping the spike count of the original `spiketrain`, but placing the spikes randomly in the interval `[spiketrain.t_start, spiketrain.t_stop]`. The generated independent `neo.SpikeTrain` objects follow Poisson statistics (exponentially distributed inter-spike intervals). Parameters ---------- spiketrain : neo.SpikeTrain The spike train from which to generate the surrogates. n_surrogates : int, optional Number of surrogates to be generated. Default: 1 decimals : int or None, optional Number of decimal points for every spike time in the surrogates. If None, machine precision is used. Default: None Returns ------- list of neo.SpikeTrain Each surrogate spike train obtained independently from `spiketrain` by randomly distributing its spikes in the interval `[spiketrain.t_start, spiketrain.t_stop]`. Examples -------- >>> import quantities as pq >>> import neo ... >>> st = neo.SpikeTrain([100, 250, 600, 800] * pq.ms, t_stop=1 * pq.s) >>> print(randomise_spikes(st)) # doctest: +SKIP [<SpikeTrain(array([ 131.23574603, 262.05062963, 549.84371387, 940.80503832]) * ms, [0.0 ms, 1000.0 ms])>] >>> print(randomise_spikes(st, n_surrogates=2)) # doctest: +SKIP [<SpikeTrain(array([ 84.53274955, 431.54011743, 733.09605806, 852.32426583]) * ms, [0.0 ms, 1000.0 ms])>, <SpikeTrain(array([ 197.74596726, 528.93517359, 567.44599968, 775.97843799]) * ms, [0.0 ms, 1000.0 ms])>] >>> print(randomise_spikes(st, decimals=0)) # doctest: +SKIP [<SpikeTrain(array([ 29., 667., 720., 774.]) * ms, [0.0 ms, 1000.0 ms])>] """ # Create surrogate spike trains as rows of a Quantity array sts = ((spiketrain.t_stop - spiketrain.t_start) * np.random.random(size=(n_surrogates, len(spiketrain))) + spiketrain.t_start).rescale(spiketrain.units) # Round the surrogate data to decimal position, if requested if decimals is not None: sts = sts.round(decimals) # Convert the Quantity array to a list of SpikeTrains, and return them return [neo.SpikeTrain(np.sort(st), t_start=spiketrain.t_start, t_stop=spiketrain.t_stop, sampling_rate=spiketrain.sampling_rate) for st in sts]
cf8d911f73a3a62b9586ea41c4683ba84a91b8a1
30,037
def interface_PSO(theta, args): """ Function to interface the PSO with the ANFIS. Each particle has its own ANFIS instance. theta (nPop, n_var) learners (nPop, ) J (nPop, ) """ args_PSO = (args[0], args[1]) learners = args[2] nPop = theta.shape[0] J = np.zeros(nPop) for i in range(nPop): J[i] = learners[i].create_model(theta[i, :], args_PSO) return J
a725db597ccf4a5928c305ad1493bcd0c99b94a5
30,038
def corr2d(X, K): """计算二维互相关运算。""" h, w = K.shape Y = mnp.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1)) for i in range(Y.shape[0]): for j in range(Y.shape[1]): Y[i, j] = (X[i:i + h, j:j + w] * K).sum() return Y
dcbd523879df0f2529a0e68b6c65d829addbc786
30,039
from src.praxxis.sqlite import sqlite_scene from src.praxxis.sqlite import sqlite_notebook from src.praxxis.display import display_scene def history(history_db, library_db, current_scene_db): """displays the notebook history of the sceen""" curr_scene = sqlite_scene.get_current_scene(history_db) notebook_history = sqlite_scene.get_notebook_history(current_scene_db) display_scene.display_history(curr_scene, notebook_history) # get paths and format for writing into notebook list notebooks = [] for notebook_info in notebook_history: # pass the library_db, notebook name, notebook library notebook_data = sqlite_notebook.get_notebook(library_db, notebook_info[1])[0] notebooks.insert(0, (notebook_data)) sqlite_notebook.write_list(current_scene_db, notebooks) return notebooks
c6ac411488f69b61678fb298639a8dfa0a103901
30,040
def get_optimal_parameters_from_dict(selected_dict, num_features): """ Find optimal parameters from dictionary of selected features Arguments --------- selected_dict: dictionary keys = parameters values = dictionary keys = task index values = list of list of selected features (for each subsample) num_features: int Total number of features Returns ------- opt_params: string Optimal parameters, leading to highest consistency index mean of features selected for each subsample for each task => params leading to the best ci mean. """ opt_params = '' opt_ci_mean = -1 # set to -1 because it is the worst case ci value for (params, selected_dict_p) in selected_dict.iteritems(): ci_list = [] #list of ci, one per task, computed with current params for (task_idx, sel_list) in selected_dict_p.iteritems(): ci_of_current_task = consistency_index_k(sel_list, num_features) ci_list.append(ci_of_current_task) ci_mean = np.mean(ci_list) if ci_mean >= opt_ci_mean: opt_ci_mean = ci_mean opt_params = params return opt_params
d473f963c482bcdd8a2eebd65f2e9ae50fe46a32
30,041
async def _get_input_dialog(self: 'TelegramClient', dialog): """ Returns a :tl:`InputDialogPeer`. This is a bit tricky because it may or not need access to the client to convert what's given into an input entity. """ try: if dialog.SUBCLASS_OF_ID == 0xa21c9795: # crc32(b'InputDialogPeer') dialog.peer = await self.get_input_entity(dialog.peer) return dialog elif dialog.SUBCLASS_OF_ID == 0xc91c90b6: # crc32(b'InputPeer') return _tl.InputDialogPeer(dialog) except AttributeError: pass return _tl.InputDialogPeer(await self.get_input_entity(dialog))
a58570d5192713e3813cd3cfc6d1295916684a96
30,042
from datetime import datetime def kep_to_sat(kep,epoch,bstar=0.21109E-4,whichconst=wgs72,afspc_mode=False): """kep_to_sat(kep,epoch,bstar=0.21109E-4,whichconst=wgs72,afspc_mode=False) Converts a set of keplerian elements into a Satellite object. Args: kep(1x6 numpy array): the osculating keplerian elements at epoch epoch(float): the epoch bstar(float): bstar drag coefficient whichconst(float): gravity model. refer pypi sgp4 documentation afspc_mode(boolean): refer pypi sgp4 documentation Returns: Satellite object: an sgp4 satellite object encapsulating the arguments """ deg2rad = np.pi / 180.0; # 0.0174532925199433 xpdotp = 1440.0 / (2.0 * np.pi); # 229.1831180523293 tumin = whichconst.tumin satrec = Satellite() satrec.error = 0; satrec.whichconst = whichconst # Python extension: remembers its consts satrec.satnum = 0 dt_obj = datetime.utcfromtimestamp(epoch) t_obj = dt_obj.timetuple() satrec.epochdays = (t_obj.tm_yday + t_obj.tm_hour/24 + t_obj.tm_min/1440 + t_obj.tm_sec/86400) satrec.ndot = 0 satrec.nddot = 0 satrec.bstar = bstar satrec.inclo = kep[2] satrec.nodeo = kep[4] satrec.ecco = kep[1] satrec.argpo = kep[3] satrec.mo = __true_to_mean(kep[5],kep[1]) satrec.no = 86400/(2*np.pi*(kep[0]**3/398600.4405)**0.5) satrec.no = satrec.no / xpdotp; # rad/min satrec.a = pow( satrec.no*tumin , (-2.0/3.0) ); # ---- find standard orbital elements ---- satrec.inclo = satrec.inclo * deg2rad; satrec.nodeo = satrec.nodeo * deg2rad; satrec.argpo = satrec.argpo * deg2rad; satrec.mo = satrec.mo * deg2rad; satrec.alta = satrec.a*(1.0 + satrec.ecco) - 1.0; satrec.altp = satrec.a*(1.0 - satrec.ecco) - 1.0; satrec.epochyr = dt_obj.year satrec.jdsatepoch = epoch/86400.0 + 2440587.5 satrec.epoch = dt_obj # ---------------- initialize the orbit at sgp4epoch ------------------- sgp4init(whichconst, afspc_mode, satrec.satnum, satrec.jdsatepoch-2433281.5, satrec.bstar, satrec.ecco, satrec.argpo, satrec.inclo, satrec.mo, satrec.no, satrec.nodeo, satrec) return satrec
9f5a9f3d487d9ea924ea1c8858c8b0796e543bf2
30,043
def enumerate (): """ Returns an iterator to the features map. """ return __all_features.iteritems ()
fda0a96102add04c4282a61f99d9a664e76f2bd6
30,044
def _create_forward(out_node): """Create a user-friendly forward function. Ensures that a single value instead of a tuple is returned if the user asked for the gradient with respect to only one input. Args: out_node: The function definition AST. Returns: The function definition with potentially changed return statement. """ retval = out_node.body[0].body[-1] if len(retval.value.elts) == 1: retval.value = retval.value.elts[0] return out_node
80cdd1814d62b282c1cde37c783d97a067264e51
30,045
def _get_trip_from_id(trip_obj_list, trip_id): """ Get a trip from a list, based on a trip id """ found_trip_obj = None for trip_obj in trip_obj_list: if trip_obj.id == trip_id: found_trip_obj = trip_obj break return found_trip_obj
f2bbacfccda1e4ff778ba793ad238f744400f020
30,047
def density_plot(df, y_column, models, model_names=(), columns_to_exclude=()): """This function creates the density plot of predicted positive class probability on actual positive and negative data by each model in models in the same plot. It also computes the difference between the distributions on positive and negative data using Bhattacharyya distance, KL distance, and cross entropy (a.k.a. log-loss). Parameters ---------- df : DataFrame Data to be plotted y_column : str Label of the class column models : array-like The model objects to be evaluated model_names : array-like The name of the models to be shown in the legends columns_to_exclude : tuple, optional (default=()) Labels of unwanted columns Returns ------- plot_wrapper : pytalite.plotwrapper.PlotWrapper The PlotWrapper object that contains the information and data of the plot Raises ------ ValueError If models is empty or models and model_names does not have the same length """ # Get X, y array representation of data snd predict probability X, y = df_to_arrays(df, y_column, columns_to_exclude) pos_idx = y == 1 neg_idx = y == 0 n_models = len(models) if n_models == 0: raise ValueError("no models to evaluate") if len(model_names) == 0: model_names = ["model %d" % (i + 1) for i in range(n_models)] if len(model_names) != n_models: raise ValueError("models and model_names must have the same length") # List and array to store data pos_data = np.empty((0, 1000)) neg_data = np.empty((0, 1000)) bds = [] kls = [] ces = [] with plt.style.context(style_path): fig = plt.figure(figsize=(12, 9)) grid = GridSpec(2, 1, height_ratios=[3.5, 3.5], hspace=0) ax1 = fig.add_subplot(grid[0]) ax2 = fig.add_subplot(grid[1]) scores = [] # Compute density curve for all models for model, model_name in zip(models, model_names): y_prob = model.predict_proba(X)[:, 1] # Fit gaussian kernels on the data kernel_pos = st.gaussian_kde(y_prob[pos_idx]) kernel_neg = st.gaussian_kde(y_prob[neg_idx]) xs = np.arange(1000) / 1000 pos_y = kernel_pos(xs) neg_y = kernel_neg(xs) # Normalize the curve pos_norm = (pos_y / pos_y.sum())[np.newaxis, :] neg_norm = (neg_y / neg_y.sum())[np.newaxis, :] # Compute all three scores bd = _bhattacharyya_distance(pos_norm, neg_norm, normalize=True) kl = st.entropy(pos_norm[0], neg_norm[0]) ce = _cross_entropy(pos_norm, neg_norm, normalize=True) # Plot using the kernels line_plot(ax1, xs, pos_y, legend=model_name, line_color=None, line_label=False) line_plot(ax2, xs, neg_y, line_color=None, line_label=False) scores.append("%s: Bhattacharyya Distance: %.4f, KL Distance: %.4f, Cross-Entropy: %.4f" % (model_name, bd, kl, ce)) # Add data pos_data = np.vstack((pos_data, pos_y)) neg_data = np.vstack((neg_data, neg_y)) bds.append(bd) kls.append(kl) ces.append(ce) ylim_max = max(pos_data.max(), neg_data.max()) * 1.1 ylim_min = round(-ylim_max * 0.05, 1) # Add scores to plot as text # ax3.text(0.5, 0.5, "\n".join(scores), va="center", ha="center") config_axes(ax1, xticks=[], ylabel="Positive Density", ylim=(ylim_min, ylim_max)) config_axes(ax2, y_invert=True, xlabel="Probability\n" + "\n".join(scores), ylabel="Negative Density", ylim=(ylim_min, ylim_max)) plt.show() return PlotWrapper(fig, (ax1, ax2), {"probability": xs, "pos_density": pos_data, "neg_density": neg_data, "Bhattacharyya": np.array(bds), "KL": np.array(kls), "cross_entropy": np.array(ces)})
e3120e3b5fc0b07e12e5fa41fe0f288a9d98495c
30,048
def clip_chk(x, lb, ub, allow_nan=False): """Clip all element of `x` to be between `lb` and `ub` like :func:`numpy:numpy.clip`, but also check :func:`numpy:numpy.isclose`. Shapes of all input variables must be broadcast compatible. Parameters ---------- x : :class:`numpy:numpy.ndarray` Array containing elements to clip. lb : :class:`numpy:numpy.ndarray` Lower limit in clip. ub : :class:`numpy:numpy.ndarray` Upper limit in clip. allow_nan : bool If true, we allow ``nan`` to be present in `x` without out raising an error. Returns ------- x : :class:`numpy:numpy.ndarray` An array with the elements of `x`, but where values < `lb` are replaced with `lb`, and those > `ub` with `ub`. """ assert np.all(lb <= ub) # np.clip does not do this check x = np.asarray(x) # These are asserts not exceptions since clip_chk most used internally. if allow_nan: assert np.all(isclose_lte(lb, x) | np.isnan(x)) assert np.all(isclose_lte(x, ub) | np.isnan(x)) else: assert np.all(isclose_lte(lb, x)) assert np.all(isclose_lte(x, ub)) x = np.clip(x, lb, ub) return x
e799e00adb4152a7d2ca2faf95eb82744149e59d
30,049
def grid_arc_seconds_1d_to_grid_pixel_indexes_1d(grid_arc_seconds_1d, shape, pixel_scales, origin=(0.0, 0.0)): """ Convert a grid of (y,x) arc second coordinates to a grid of (y,x) pixel 1D indexes. Pixel coordinates are \ returned as integers such that they are the pixel from the top-left of the 2D grid going rights and then \ downwards. For example: The pixel at the top-left, whose 2D index is [0,0], corresponds to 1D index 0. The fifth pixel on the top row, whose 2D index is [0,5], corresponds to 1D index 4. The first pixel on the second row, whose 2D index is [0,1], has 1D index 10 if a row has 10 pixels. The arc-second coordinate grid is defined by the class attribute origin, and coordinates are shifted to this \ origin before computing their 1D grid pixel indexes. The input and output grids are both of shape (total_pixels, 2). Parameters ---------- grid_arc_seconds_1d: ndarray The grid of (y,x) coordinates in arc seconds which is converted to 1D pixel indexes. shape : (int, int) The (y,x) shape of the original 2D array the arc-second coordinates were computed on. pixel_scales : (float, float) The (y,x) arc-second to pixel scales of the original 2D array. origin : (float, flloat) The (y,x) origin of the grid, which the arc-second grid is shifted. Returns -------- ndarray A grid of 1d pixel indexes with dimensions (total_pixels, 2). Examples -------- grid_arc_seconds_1d = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]]) grid_pixels_1d = grid_arc_seconds_1d_to_grid_pixel_indexes_1d(grid_arc_seconds_1d=grid_arc_seconds_1d, shape=(2,2), pixel_scales=(0.5, 0.5), origin=(0.0, 0.0)) """ grid_pixels = grid_arc_seconds_1d_to_grid_pixel_centres_1d(grid_arc_seconds_1d=grid_arc_seconds_1d, shape=shape, pixel_scales=pixel_scales, origin=origin) grid_pixel_indexes = np.zeros(grid_pixels.shape[0]) for i in range(grid_pixels.shape[0]): grid_pixel_indexes[i] = int(grid_pixels[i,0] * shape[1] + grid_pixels[i,1]) return grid_pixel_indexes
966133fe0a979c913b704a448d02124005a9946d
30,050
import fnmatch def make_test_run_filter( complete: bool = False, failed: bool = False, incomplete: bool = False, name: str = None, newer_than: dt.datetime = None, older_than: dt.datetime = None, passed: bool = False, result_error: bool = False, show_skipped: bool = False, sys_name: str = None, user: str = None): """Generate a filter function for use by dir_db.select and similar functions. This operates on TestAttribute objects, so make sure to pass the TestAttribute class as the transform to dir_db functions. :param complete: Only accept complete tests :param failed: Only accept failed tests :param incomplete: Only accept incomplete tests :param name: Only accept names that match this glob. :param newer_than: Only accept tests that are more recent than this date. :param older_than: Only accept tests older than this date. :param passed: Only accept passed tests :param result_error: Only accept tests with a result error. :param show_skipped: Accept skipped tests. :param sys_name: Only accept tests with a matching sys_name. :param user: Only accept tests started by this user. :return: """ if sys_name == LOCAL_SYS_NAME: sys_vars = system_variables.get_vars(defer=True) sys_name = sys_vars['sys_name'] # select once so we only make one filter. def filter_test_run(test_attrs: TestAttributes) -> bool: """Determine whether the test run at the given path should be included in the set.""" if show_skipped == 'no' and test_attrs.skipped: return False elif show_skipped == 'only' and not test_attrs.skipped: return False if complete and not test_attrs.complete: return False if incomplete and test_attrs.complete: return False if user and test_attrs.user != user: return False if sys_name and sys_name != test_attrs.sys_name: return False if passed and test_attrs.result != TestRun.PASS: return False if failed and test_attrs.result != TestRun.FAIL: return False if result_error and test_attrs.result != TestRun.ERROR: return False if older_than is not None and test_attrs.created > older_than: return False if newer_than is not None and test_attrs.created < newer_than: return False if name and not fnmatch.fnmatch(test_attrs.name, name): return False return True return filter_test_run
57f20287ac957b394b8ac25946da3da42deec8ec
30,051
def get_runnable_tasks(graph): """Parse a graph and return all runnable tasks.""" tasks = [] to_remove = [] # tasks that follow task that raises an error following_err = dict() for tsk in graph.sorted_nodes: if tsk not in graph.sorted_nodes: continue # since the list is sorted (breadth-first) we can stop # when we find a task that depends on any task that is already in tasks if set(graph.predecessors[tsk.name]).intersection(set(tasks)): break _is_runnable = is_runnable(graph, tsk) if _is_runnable is True: tasks.append(tsk) to_remove.append(tsk) elif _is_runnable is False: continue else: # a previous task had an error errored_task = _is_runnable # removing all successors of the errored task for task_err in errored_task: task_to_remove = graph.remove_successors_nodes(task_err) for tsk in task_to_remove: # adding tasks that were removed from the graph # due to the error in the errored_task following_err.setdefault(tsk, []) following_err[tsk].append(task_err.name) # removing tasks that are ready to run from the graph for nd in to_remove: graph.remove_nodes(nd) return tasks, following_err
155206f67986e65b74148e173d6641d994dd08bd
30,052
def get_defaults(lang): """Get the language-specific defaults, if available in spaCy. This allows using lexical attribute getters that depend on static language data, e.g. Token.like_num, Token.is_stop, Doc.noun_chunks etc. lang (unicode): ISO 639-1 language code. RETURNS (Language.Defaults): The language defaults. """ try: lang_cls = get_lang_class(lang) return lang_cls.Defaults except ImportError: return Language.Defaults
3ef08b4bd410407ad8519d1c4f84c6c279aa8257
30,053
def h2(*text, **kwargs): """Return a header 2""" return tydoc().h2(*text, **kwargs)
7b5b465282222c31d84121e6f890da964cb63fd5
30,054
def update_comment(comment_id, data): """ update comment using its id. """ comment = Comment.query.get(comment_id) for attribute in data: setattr(comment, attribute, data[attribute]) db.session.commit() return comment_schema.dump(comment).data
aaeb88479bf82ea3ac09a56d17e09c4ba05eda47
30,055
def RMSE(A, A_tilde): """ Root mean square error. Gives the standard deviation of the residuals (prediction errors). Parameters ---------- A : ndarray Forecast. A_tilde : ndarray Observation. Returns ------- float Root mean square error. """ return np.sqrt((abs(A_tilde - A)**2).mean())
58a5b833725497be224804a71855819afa9fc33f
30,057
def remove_punct(tokens): """ Remove punctuation marks from lists of tokens Parameters ---------- tokens: list a nested list containing lists of tokens or a list of spacy docs Returns ------- filtered_comments: list nested lists of tokens """ filtered_comments = [[token for token in comment if nlp.vocab[token.text].is_punct == False] for comment in tokens] return filtered_comments
c6d7b70a6bf3efe7be3dd144f10ac0f8f5b11e72
30,058
def to_camel_case(string: str) -> str: """ Converts a ``snake_case`` string to ``camelCase``. :param string: A ``snake_case`` string. :return: A ``camelCase`` version of the input. """ components = string.split("_") return components[0] + "".join(x.capitalize() for x in components[1:])
ae0d82efd9a5a65ef16cc401a0fe302b4f04d524
30,059
import requests def reconnect(user_data): """ Attempt to perform a login to the Unistudium website, saving the cookies in user_data. Returns: "OK" if the login was performed correctly, else a description with the error that can be used to inform the users. """ # Check if user's session exists if 'session' not in user_data: user_data['session'] = requests.Session() # Getting credentials if available try: payload = { "username": user_data['credentials']['username'], "password": user_data['credentials']['password'] } except KeyError: return "Non hai effettuato ancora il login, effettualo tramite il comando /login." # Check if server is alive status_code = requests.head(LOGIN_URL).status_code if status_code != 200: print(Fore.RED + "[CONNECTION] Server irraggiungibile. Status code: " + str(status_code)) return "Non riesco a contattare il server (CODE: %d), riprova più tardi..." % status_code # Check if the login is still valid (without performing a login) if user_data['session'].head(MAIN_URL).status_code == 200: return "OK" # Perform the login if user_data['session'].post(LOGIN_URL, data=payload).url != MAIN_URL: return "Le credenziali fornite non sono valide. Riprova." return "OK"
e05096d69605300680b29bc12784faa3734f40b2
30,060
from typing import Optional from typing import Collection from typing import List from typing import Tuple from typing import Set from typing import Dict import operator def yake( doc: Doc, *, normalize: Optional[str] = "lemma", ngrams: int | Collection[int] = (1, 2, 3), include_pos: Optional[str | Collection[str]] = ("NOUN", "PROPN", "ADJ"), window_size: int = 2, topn: int | float = 10, ) -> List[Tuple[str, float]]: """ Extract key terms from a document using the YAKE algorithm. Args: doc: spaCy ``Doc`` from which to extract keyterms. Must be sentence-segmented; optionally POS-tagged. normalize: If "lemma", lemmatize terms; if "lower", lowercase terms; if None, use the form of terms as they appeared in ``doc``. .. note:: Unlike the other keyterm extraction functions, this one doesn't accept a callable for ``normalize``. ngrams: n of which n-grams to consider as keyterm candidates. For example, `(1, 2, 3)`` includes all unigrams, bigrams, and trigrams, while ``2`` includes bigrams only. include_pos: One or more POS tags with which to filter for good candidate keyterms. If None, include tokens of all POS tags (which also allows keyterm extraction from docs without POS-tagging.) window_size: Number of words to the right and left of a given word to use as context when computing the "relatedness to context" component of its score. Note that the resulting sliding window's full width is ``1 + (2 * window_size)``. topn: Number of top-ranked terms to return as key terms. If an integer, represents the absolute number; if a float, value must be in the interval (0.0, 1.0], which is converted to an int by ``int(round(len(candidates) * topn))`` Returns: Sorted list of top ``topn`` key terms and their corresponding YAKE scores. References: Campos, Mangaravite, Pasquali, Jorge, Nunes, and Jatowt. (2018). A Text Feature Based Automatic Keyword Extraction Method for Single Documents. Advances in Information Retrieval. ECIR 2018. Lecture Notes in Computer Science, vol 10772, pp. 684-691. """ # validate / transform args ngrams = utils.to_collection(ngrams, int, tuple) include_pos = utils.to_collection(include_pos, str, set) if isinstance(topn, float): if not 0.0 < topn <= 1.0: raise ValueError( f"topn = {topn} is invalid; " "must be an int, or a float between 0.0 and 1.0" ) # bail out on empty docs if not doc: return [] stop_words: Set[str] = set() seen_candidates: Set[str] = set() # compute key values on a per-word basis word_occ_vals = _get_per_word_occurrence_values( doc, normalize, stop_words, window_size ) # doc doesn't have any words... if not word_occ_vals: return [] word_freqs = {w_id: len(vals["is_uc"]) for w_id, vals in word_occ_vals.items()} word_scores = _compute_word_scores(doc, word_occ_vals, word_freqs, stop_words) # compute scores for candidate terms based on scores of constituent words term_scores: Dict[str, float] = {} # do single-word candidates separately; it's faster and simpler if 1 in ngrams: candidates = _get_unigram_candidates(doc, include_pos) _score_unigram_candidates( candidates, word_freqs, word_scores, term_scores, stop_words, seen_candidates, normalize, ) # now compute combined scores for higher-n ngram and candidates candidates = list( ext_utils.get_ngram_candidates( doc, [n for n in ngrams if n > 1], include_pos=include_pos, ) ) attr_name = _get_attr_name(normalize, True) ngram_freqs = itertoolz.frequencies( " ".join(getattr(word, attr_name) for word in ngram) for ngram in candidates ) _score_ngram_candidates( candidates, ngram_freqs, word_scores, term_scores, seen_candidates, normalize, ) # build up a list of key terms in order of increasing score if isinstance(topn, float): topn = int(round(len(seen_candidates) * topn)) sorted_term_scores = sorted( term_scores.items(), key=operator.itemgetter(1), reverse=False, ) return ext_utils.get_filtered_topn_terms( sorted_term_scores, topn, match_threshold=0.8 )
b467f0598c70dbf1cec70dafec12ac4259720f91
30,061
def parse_qsub_defaults(parsed): """Unpack QSUB_DEFAULTS.""" d = parsed.split() if type(parsed) == str else parsed options={} for arg in d: if "=" in arg: k,v = arg.split("=") options[k.strip("-")] = v.strip() else: options[arg.strip("-")] = "" return options
a5c50aef405d88bcb018af48904a384b090d22a2
30,062
import torch def load_embeddings(word_map=None, binary=True): """ Creates an embedding tensor for the specified word map, for loading into the model. :param word_emb_file: file containing embeddings (stored in GloVe format) :param word_map: word map. If None, it will be comprised from the embeddings vocabulary :return: embeddings in the same order as the words in the word map, dimension of embeddings, a wordmap in case it wasn't supplied. """ print("Loading embeddings...") wv = KeyedVectors.load_word2vec_format(PATH_WORD2VEC, binary=binary) ev = KeyedVectors.load_word2vec_format(PATH_EMOJI2VEC, binary=binary) # Find embedding dimension emb_dim = wv.vector_size if word_map == None: word_map = {k: v + 1 for emb in [wv.key_to_index.keys(), ev.key_to_index.keys()] for v, k in enumerate(emb)} word_map['<unk>'] = len(word_map) + 1 word_map['<start>'] = len(word_map) + 1 word_map['<end>'] = len(word_map) + 1 word_map['<pad>'] = 0 vocab = set(word_map.keys()) # Create tensor to hold embeddings, initialize embeddings = torch.FloatTensor(len(vocab), emb_dim) _init_embedding(embeddings) # Iterate through the vector pairs for emb_word in vocab: if emb_word in wv.key_to_index: embeddings[word_map[emb_word]] = torch.FloatTensor(wv.get_vector(emb_word).copy()) elif emb_word in ev.key_to_index: embeddings[word_map[emb_word]] = torch.FloatTensor(ev.get_vector(emb_word).copy()) return word_map, embeddings, emb_dim
9c43d00411dd6036297c21569a232d52bd71acac
30,063
from typing import Dict import json import requests def update_business_profile(business: Business, profile_info: Dict) -> Dict: """Set the legal type of the business.""" if not business or not profile_info: return {'error': babel('Business and profile_info required.')} # contact phone is optional phone = profile_info.get('phone', '') error = {'error': 'Unknown handling'} if email := profile_info.get('email'): # assume the JSONSchema ensures it is a valid email format token = AccountService.get_bearer_token() account_svc_entity_url = current_app.config['ACCOUNT_SVC_ENTITY_URL'] # Create an entity record data = json.dumps( {'email': email, 'phone': phone, 'phoneExtension': '' } ) url = ''.join([account_svc_entity_url, '/', business.identifier, '/contacts']) rv = requests.post( url=url, headers={**AccountService.CONTENT_TYPE_JSON, 'Authorization': AccountService.BEARER + token}, data=data, timeout=AccountService.timeout ) if rv.status_code in (HTTPStatus.OK, HTTPStatus.CREATED): error = None if rv.status_code == HTTPStatus.NOT_FOUND: error = {'error': 'No business profile found.'} if rv.status_code == HTTPStatus.METHOD_NOT_ALLOWED: error = {'error': 'Service account missing privileges to update business profiles'} if rv.status_code == HTTPStatus.BAD_REQUEST and \ 'DATA_ALREADY_EXISTS' in rv.text: put = requests.put( url=''.join([account_svc_entity_url, '/', business.identifier]), headers={**AccountService.CONTENT_TYPE_JSON, 'Authorization': AccountService.BEARER + token}, data=data, timeout=AccountService.timeout ) if put.status_code in (HTTPStatus.OK, HTTPStatus.CREATED): error = None else: error = {'error': 'Unable to update existing business profile.'} return error
68c0ce0d9d205d34b02f7933ca3bc0e7179c7a12
30,064
def nextpow2(i): """ Find the next power of 2 for number i """ n = 1 while n < i: n *= 2 return n
5dbe396b222ccf79d3cd2017b32174f9e894a5f2
30,065
from typing import Sequence from typing import Dict from typing import List from typing import Set from typing import Tuple def _extra_topo_sort(bad_ordering: Sequence[DiscoveredExtension]) -> Sequence[DiscoveredExtension]: """ Simple depth-first search version of a topological sort, but without recursion. :param bad_ordering: :return: """ lookup: Dict[str, DiscoveredExtension] = {} for node in bad_ordering: lookup[node.name] = node ret: List[DiscoveredExtension] = [] visiting: Set[str] = set() visited: Set[str] = set() remaining: List[Tuple[str, int]] = [(node.name, 0) for node in bad_ordering] # This isn't really necessary, but makes things dependable for testing # and gives a reliable, consistent load order. remaining.sort(key=lambda t: t[0]) log(TRACE, _extra_topo_sort, 'Performing topo sort of {0}', bad_ordering) while remaining: node_name, state = remaining.pop() log(TRACE, _extra_topo_sort, 'Inspecting {0}, {1}', node_name, state) node = lookup[node_name] if state == 0: if node_name in visited: continue if node_name in visiting: # Better exception? This should not happen, based on the previous # searching. raise ValueError('Not a DAG') log(TRACE, _extra_topo_sort, ' - Visiting') visiting.add(node_name) remaining.append((node_name, 1)) for child in node.depends_on: log(TRACE, _extra_topo_sort, ' -- depends on {0}', child.name) remaining.append((child.name, 0)) for child in node.implements: log(TRACE, _extra_topo_sort, ' -- implements {0}', child.name) remaining.append((child.name, 0)) log(TRACE, _extra_topo_sort, 'Remaining to search: {0}', remaining) elif state == 1: log(TRACE, _extra_topo_sort, ' - Finished visit') visiting.remove(node_name) visited.add(node_name) ret.append(node) log(TRACE, _extra_topo_sort, 'Order: {0}', ret) return tuple(ret)
cdaf0229dda2460e68ee24afb987ca9f7e029d4d
30,066