content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import re def parse_user_next_stable(user): """ Parse the specified user-defined string containing the next stable version numbers and returns the discretized matches in a dictionary. """ try: data = re.match(user_version_matcher, user).groupdict() if len(data) < 3: raise AttributeError except AttributeError: return False return data
3d5de92fdb119a85bc6b5e87a8399cc07e6c9ee8
14,772
import tqdm def interp_ADCP_2D( sadcp, mask, depth, lon, lat, time, time_win=360.0, rmax=15.0, vmax=2.0, range_min=4.0, ): """ This is essentially a loop over the interp_ADCP function with some additional NaN handling. Assume data is of the form D[i, j] where each j represents a profile and i a depth in that profile. Parameters ---------- sadcp : Munch Munch structure of sadcp data mask : 2D array Mask of boolean values specifying valid depths to interpolate to. depth : array Depths (m) at which to interpolate ADCP data. lon : array Longitude of CTD/VMP profile. lat : array Latitude of CTD/VMP profile. time : array Time of CTD/VMP profile as matlab datenum. time_win : float, optional Time window for search (s) centered on time of profile. Data outside the time range is excluded. rmax : float, optional Distance threshold (m) defines a circle around the location of the profile. Data outside the circle is excluded. vmax : float, optional Velocity threshold (m/s) above which we remove velocity data range_min : float, optional ADCP minimum range threshold (m) below which we remove data Return ------ u : 2D array Zonal velocity (m/s) interpolated to given depths. v : 2D array Meridional velocity (m/s) interpolated to given depths. w : 2D array Vertical velocity (m/s) interpolated to given depths. lonm : array Mean longitude of ADCP data. latm : array Mean latitude of ADCP data. range_bottom : array Minimum beam range to bottom (m). n : array Number of ADCP profiles in average. """ u = np.full_like(mask, np.nan, dtype=float) v = np.full_like(mask, np.nan, dtype=float) w = np.full_like(mask, np.nan, dtype=float) lonm = np.full_like(time, np.nan) latm = np.full_like(time, np.nan) range_bottom = np.full_like(time, np.nan) n = np.full_like(time, np.nan) for i in tqdm(range(time.size)): valid = mask[:, i] try: u_, v_, w_, lon_, lat_, range_bottom_, n_ = interp_ADCP( sadcp, depth[valid], lon[i], lat[i], time[i], time_win=time_win, rmax=rmax, vmax=vmax, range_min=range_min, ) except RuntimeError as err: continue # Fill data u[valid, i] = u_ v[valid, i] = v_ w[valid, i] = w_ lonm[i] = lon_ latm[i] = lat_ range_bottom[i] = range_bottom_ n[i] = n_ return u, v, w, lonm, latm, range_bottom, n
ec092d203ef1cfee176bdf9ae05021fd876d444a
14,773
def extract_p(path, dict_obj, default): """ try to extract dict value in key path, if key error provide default :param path: the nested dict key path, separated by '.' (therefore no dots in key names allowed) :param dict_obj: the dictinary object from which to extract :param default: a default return value if key error :return: extracted value """ if dict_obj is None: return default keys = path.split('.') tmp_iter = dict_obj for key in keys: try: # dict.get() might make KeyError exception unnecessary tmp_iter = tmp_iter.get(key, default) except KeyError: return default return tmp_iter
1a563212e229e67751584885c5db5ac19157c37f
14,774
def default_lscolors(env): """Gets a default instanse of LsColors""" inherited_lscolors = os_environ.get("LS_COLORS", None) if inherited_lscolors is None: lsc = LsColors.fromdircolors() else: lsc = LsColors.fromstring(inherited_lscolors) # have to place this in the env, so it is applied env["LS_COLORS"] = lsc return lsc
0ad54d1220308a51194a464a2591be6edcc8d0ff
14,775
import logging def get_indices_by_sent(start, end, offsets, tokens): """ Get sentence index for textbounds """ # iterate over sentences sent_start = None sent_end = None token_start = None token_end = None for i, sent in enumerate(offsets): for j, (char_start, char_end) in enumerate(sent): if (start >= char_start) and (start < char_end): sent_start = i token_start = j if (end > char_start) and (end <= char_end): sent_end = i token_end = j + 1 assert sent_start is not None assert sent_end is not None assert token_start is not None assert token_end is not None if (sent_start != sent_end): logging.warn(f"Entity spans multiple sentences, truncating") token_end = len(offsets[sent_start]) toks = tokens[sent_start][token_start:token_end] return (sent_start, token_start, token_end, toks)
7ce90b69c63b18ee1c025970f5a645f5f4095d3b
14,776
def get_server_object_by_id(nova, server_id): """ Returns a server with a given id :param nova: the Nova client :param server_id: the server's id :return: an SNAPS-OO VmInst object or None if not found """ server = __get_latest_server_os_object_by_id(nova, server_id) return __map_os_server_obj_to_vm_inst(server)
384a5481c41937dfb7fcfdfdcc14bf0123db38a7
14,777
from xml.dom.minidom import parseString import attr def parse_string(xml): """ Returns a slash-formatted string from the given XML representation. The return value is a TokenString (see mbsp.py). """ string = "" dom = parseString(xml) # Traverse all the <sentence> elements in the XML. for sentence in dom.getElementsByTagName(XML_SENTENCE): _anchors.clear() # Populated by calling _parse_tokens(). _attachments.clear() # Populated by calling _parse_tokens(). # Parse the language from <sentence language="">. language = attr(sentence, XML_LANGUAGE, "en") # Parse the token tag format from <sentence token="">. # This information is returned in TokenString.tags, # so the format and order of the token tags is retained when exporting/importing as XML. format = attr(sentence, XML_TOKEN, [WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA]) format = not isinstance(format, basestring) and format or format.replace(" ","").split(",") # Traverse all <chunk> and <chink> elements in the sentence. # Find the <word> elements inside and create tokens. tokens = [] for chunk in children(sentence): tokens.extend(_parse_tokens(chunk, format)) # Attach PNP's to their anchors. # Keys in _anchors have linked anchor chunks (each chunk is a list of tokens). # The keys correspond to the keys in _attachments, which have linked PNP chunks. if ANCHOR in format: A, P, a, i = _anchors, _attachments, 1, format.index(ANCHOR) for id in sorted(A.keys()): for token in A[id]: token[i] += "-"+"-".join(["A"+str(a+p) for p in range(len(P[id]))]) token[i] = token[i].strip("O-") for p, pnp in enumerate(P[id]): for token in pnp: token[i] += "-"+"P"+str(a+p) token[i] = token[i].strip("O-") a += len(P[id]) # Collapse the tokens to string. # Separate multiple sentences with a new line. tokens = ["/".join([tag for tag in token]) for token in tokens] tokens = " ".join(tokens) string += tokens + "\n" # Return a TokenString, which is a unicode string that transforms easily # into a plain str, a list of tokens, or a Sentence. try: if MBSP: from mbsp import TokenString return TokenString(string, tags=format, language=language) except: return TaggedString(string, tags=format, language=language)
e3ccf32bcc148b2c6b9b44259d881f336720fde5
14,782
def join_ad_domain_by_taking_over_existing_computer_using_session( ad_session: ADSession, computer_name=None, computer_password=None, old_computer_password=None, computer_key_file_path=DEFAULT_KRB5_KEYTAB_FILE_LOCATION) -> ManagedADComputer: """ A fairly simple 'join a domain' function using pre-created accounts, which requires minimal input - an AD session. Specifying the name of the computer to takeover explicitly is also encouraged. Given those basic inputs, the domain's nearest controllers are automatically discovered and an account is found with the computer name specified. That account is then taken over so that it can be controlled by the local system, and kerberos keys and such are generated for it. By providing an AD session, one can build a connection to the domain however they so choose and then use it to join this computer, so you don't even need to necessarily use user credentials. :param ad_session: The ADSession object representing a connection with the domain to be joined. :param computer_name: The name of the computer to take over in the domain. This should be the sAMAccountName of the computer, though if computer has a trailing $ in its sAMAccountName and that is omitted, that's ok. If not specified, we will attempt to find a computer with a name matching the local system's hostname. :param computer_password: The password to set for the computer when taking it over. If not specified, a random 120 character password will be generated and set. :param old_computer_password: The current password of the computer being taken over. If specified, the action of taking over the computer will use a "change password" operation, which is less privileged than a "reset password" operation. So specifying this reduces the permissions needed by the user specified. :param computer_key_file_path: The path of where to write the keytab file for the computer after taking it over. This will include keys for both user and server keys for the computer. If not specified, defaults to /etc/krb5.keytab :returns: A ManagedADComputer object representing the computer taken over. """ # for joining a domain, default to using the local machine's hostname as a computer name if computer_name is None: computer_name = get_system_default_computer_name() logger.warning('No computer name was specified for joining via computer takeover. This is unusual and relies ' 'implicitly on the computers in the domain matching this library in terms of how they decide ' 'on the computer name, and may cause errors. The name being used is %s', computer_name) logger.info('Attempting to join computer to domain %s by taking over account with name %s', ad_session.get_domain_dns_name(), computer_name) computer = ad_session.take_over_existing_computer(computer_name, computer_password=computer_password, old_computer_password=old_computer_password) if computer_key_file_path is not None: computer.write_full_keytab_file_for_computer(computer_key_file_path) logger.info('Successfully joined computer to domain %s by taking over computer with name %s', ad_session.get_domain_dns_name(), computer_name) return computer
a9fea1126fd775c85cf9a354044315eef03a4ffb
14,783
def peak_sound_pressure(pressure, axis=-1): """ Peak sound pressure :math:`p_{peak}` is the greatest absolute sound pressure during a certain time interval. :param pressure: Instantaneous sound pressure :math:`p`. :param axis: Axis. .. math:: p_{peak} = \\mathrm{max}(|p|) """ return np.abs(pressure).max(axis=axis)
e3beb4d67dc414fa7aabdc7a9c4a06a5ddb371ab
14,784
def field_filter_query(field, values): """Need to define work-around for full-text fields.""" values = ensure_list(values) if not len(values): return {'match_all': {}} if field in ['_id', 'id']: return {'ids': {'values': values}} if len(values) == 1: if field in ['names', 'addresses']: field = '%s.text' % field return {'match_phrase': {field: values[0]}} return {'term': {field: values[0]}} return {'terms': {field: values}}
54d3b394e8dc38b2a0ead3b9d5a81da9f5f6915a
14,785
import logging def compute_investigation_stats(inv, exact=True, conf=0.95, correct=True): """ Compute all statistics for all protected features of an investigation Parameters ---------- inv : the investigation exact : whether exact tests should be used conf : overall confidence level (1- familywise error rate) Returns ------- all_stats: list of all statistics for the investigation """ # count the number of hypotheses to test total_hypotheses = num_hypotheses(inv) logging.info('Testing %d hypotheses', total_hypotheses) # # Adjusted Confidence Level (Bonferroni) # adj_conf = 1-(1-conf)/total_hypotheses if correct else conf # statistics for all investigations all_stats = {sens: compute_stats(ctxts, exact, adj_conf, inv.random_state) for (sens, ctxts) in sorted(inv.contexts.iteritems())} # flattened array of all p-values all_pvals = [max(stat[-1], 1e-180) for sens_stats in all_stats.values() for stat in sens_stats['stats']] # correct p-values if correct: pvals_corr = multipletests(all_pvals, alpha=1-conf, method='holm')[1] else: pvals_corr = all_pvals # replace p-values by their corrected value idx = 0 # iterate over all protected features for the investigation for (sens, sens_contexts) in inv.contexts.iteritems(): sens_stats = all_stats[sens]['stats'] # iterate over all contexts for a protected feature for i in range(len(sens_stats)): old_stats = sens_stats[i] all_stats[sens]['stats'][i] = \ np.append(old_stats[0:-1], pvals_corr[idx]) idx += 1 for (sens, sens_contexts) in inv.contexts.iteritems(): metric = sens_contexts[0].metric # For regression, re-form the dataframes for each context if isinstance(metric.stats, pd.DataFrame): res = all_stats[sens] res = pd.DataFrame(res['stats'], index=res['index'], columns=res['cols']) all_stats[sens] = \ {'stats': np.array_split(res, len(res)/len(metric.stats))} all_stats = {sens: sens_stats['stats'] for (sens, sens_stats) in all_stats.iteritems()} return all_stats
08bf8ab5c4e985c33fdb0bd0d9dfc1dc949f4d83
14,786
def group_bars(note_list): """ Returns a list of bars, where each bar is a list of notes. The start and end times of each note are rescaled to units of bars, and expressed relative to the beginning of the current bar. Parameters ---------- note_list : list of tuples List of notes to group into bars. """ bar_list = [] current_bar = [] current_bar_start_time = 0 for raw_note in note_list: if raw_note[0] != -1: current_bar.append(raw_note) elif raw_note[0] == -1: quarter_notes_per_bar = raw_note[2] - current_bar_start_time current_bar_scaled = [] for note in current_bar: current_bar_scaled.append((note[0], note[1], min([(note[2] - current_bar_start_time) / quarter_notes_per_bar, 1]), min([(note[3] - current_bar_start_time) / quarter_notes_per_bar, 1]))) bar_list.append(current_bar_scaled) current_bar = [] current_bar_start_time = raw_note[2] return bar_list
3b12a7c7e2395caa3648abf152915ece4b325599
14,787
def get_vmexpire_id_from_ref(vmexpire_ref): """Parse a container reference and return the container ID The container ID is the right-most element of the URL :param container_ref: HTTP reference of container :return: a string containing the ID of the container """ vmexpire_id = vmexpire_ref.rsplit('/', 1)[1] return vmexpire_id
e90c34c8489d91fb582a4bf15f874bcb2feaea82
14,788
def create_A_and_B_state_ligand(line, A_B_state='vdwq_q'): """Create A and B state topology for a ligand. Parameters ---------- line : str 'Atom line': with atomtype, mass, charge,... A_B_state : str Interactions in the A state and in the B state. vdwq_vdwq: ligand fully interacting in A and B state vdwq_vdw: vdw interactions and electrostatics in the A_state, only vdw in the B_state vdw_vdwq: charge vdw_dummy dummy_vdw vdwq_dummy Returns ------- text : str Atoms line for topology file with A and B state parameters """ atom_number = line.split()[0] atom_type = line.split()[1] residue_nr = line.split()[2] residue_name = line.split()[3] atom_name = line.split()[4] cgnr = line.split()[5] charge = line.split()[6] mass = line.split()[7] # A and B state are the same if A_B_state == 'vdwq_vdwq': text = line.split(';')[0] + ' ' + atom_type + ' ' + charge + ' ' + mass + '\n' # Turn on vdw elif A_B_state == 'dummy_vdw': charge = str(0.0) text = ' ' + atom_number + ' d%s ' % atom_type + ' ' + residue_nr + ' ' + \ residue_name + ' ' + atom_name + ' ' + cgnr + ' ' + charge + ' ' + mass + ' ' + \ atom_type + ' ' + charge + ' ' + mass + '\n' # Turn vdw off elif A_B_state == 'vdw_dummy': charge = str(0.0) text = ' ' + atom_number + ' ' + atom_type + ' ' + residue_nr + ' ' + \ residue_name + ' ' + atom_name + ' ' + cgnr + ' ' + charge + ' ' + mass + \ ' d%s ' % atom_type + ' ' + charge + ' ' + mass + '\n' # Turn vdw and electrostatics off elif A_B_state == 'vdwq_dummy': text = line.split(';')[0] + ' ' + ' d%s ' % atom_type + ' 0.0 ' + mass + '\n' # uncharge elif A_B_state == 'vdwq_vdw': text = line.split(';')[0] + ' ' + ' ' + atom_type + ' 0.0 ' + mass + '\n' # charge elif A_B_state == 'vdw_vdwq': text = ' ' + atom_number + ' ' + atom_type + ' ' + residue_nr + ' ' + \ residue_name + ' ' + atom_name + ' ' + cgnr + ' ' + str(0.0) + ' ' + \ mass + ' ' + atom_type + ' ' + charge + ' ' + mass + '\n' # Posre off elif A_B_state == 'dummy': charge = str(0.0) text = ' ' + atom_number + ' d%s ' % atom_type + ' ' + residue_nr + ' ' + \ residue_name + ' ' + atom_name + ' ' + cgnr + ' ' + charge + ' ' + mass + ' ' + '\n' # Turn vdw and electrostatics off elif A_B_state == 'vdwq': text = line.split(';')[0] + '\n' else: print('Transformation not implemented yet') return text
3ac16da60de68013b20ea3f1a6ce3173cd4871a1
14,789
def clean_params(estimator, n_jobs=None): """clean unwanted hyperparameter settings If n_jobs is not None, set it into the estimator, if applicable Return ------ Cleaned estimator object """ ALLOWED_CALLBACKS = ( "EarlyStopping", "TerminateOnNaN", "ReduceLROnPlateau", "CSVLogger", "None", ) estimator_params = estimator.get_params() for name, p in estimator_params.items(): # all potential unauthorized file write if name == "memory" or name.endswith("__memory") or name.endswith("_path"): new_p = {name: None} estimator.set_params(**new_p) elif n_jobs is not None and (name == "n_jobs" or name.endswith("__n_jobs")): new_p = {name: n_jobs} estimator.set_params(**new_p) elif name.endswith("callbacks"): for cb in p: cb_type = cb["callback_selection"]["callback_type"] if cb_type not in ALLOWED_CALLBACKS: raise ValueError("Prohibited callback type: %s!" % cb_type) return estimator
da639b03ea7dec534130105571c1623128e99143
14,790
def getValidOauth2TxtCredentials(force_refresh=False, api=None): """Gets OAuth2 credentials which are guaranteed to be fresh and valid.""" try: credentials = auth.get_admin_credentials(api) except gam.auth.oauth.InvalidCredentialsFileError: doRequestOAuth() # Make a new request which should store new creds. return getValidOauth2TxtCredentials(force_refresh=force_refresh, api=api) if credentials.expired or force_refresh: request = transport.create_request() credentials.refresh(request) return credentials
ff29fe312fe6ca875e53c56482033ca5ccceb71c
14,791
import itertools def get_combinations_sar(products, aoi): """Get a dataframe with all possible combinations of products and calculate their coverage of the AOI and the temporal distance between the products. Parameters ---------- products : dataframe Search results with product identifiers as index. aoi : shapely geometry Area of interest (lat/lon). Returns ------- combinations : dataframe Double-indexed output dataframe. Only combinations that contain the AOI are returned (with a 1% margin). """ couples = list(itertools.combinations(products.index, 2)) combinations = pd.DataFrame(index=pd.MultiIndex.from_tuples(couples)) for id_a, id_b in couples: footprint_a = wkt.loads(products.loc[id_a].footprint) footprint_b = wkt.loads(products.loc[id_b].footprint) footprint = footprint_a.union(footprint_b) combinations.at[(id_a, id_b), 'date_a'] = products.loc[id_a].date combinations.at[(id_a, id_b), 'date_b'] = products.loc[id_b].date combinations.at[(id_a, id_b), 'cover'] = coverage(aoi, footprint) combinations = combinations[combinations.cover >= 99.] combinations['dist'] = combinations.date_b - combinations.date_a combinations.dist = combinations.dist.apply(lambda x: abs(x.days)) combinations = combinations.sort_values(by='dist', ascending=True) return combinations
045fcf59e9dae17a17d77cb945d2fe63af01e7ae
14,792
import re def sample(s, n): """Show a sample of string s centered at position n""" start = max(n - 8, 0) finish = min(n + 24, len(s)) return re.escape(s[start:finish])
565f69224269ed7f5faa538d40ce277714144577
14,793
def getNeededLibraries(binary_filepath): """ Get all libraries given binary depends on. """ if False: return getNeededLibrariesLDD(binary_filepath) else: return getNeededLibrariesOBJDUMP(binary_filepath)
40fcb08fac7877f97cb9fa9f6f198e58c64fe492
14,794
from typing import List def load_transformer(input_paths:List[str], input_type:str=None) -> Transformer: """ Creates a transformer for the appropriate file type and loads the data into it from file. """ if input_type is None: input_types = [get_type(i) for i in input_paths] for t in input_types: if input_types[0] != t: error( """ Each input file must have the same file type. Try setting the --input-type parameter to enforce a single type. """ ) input_type = input_types[0] transformer_constructor = get_transformer(input_type) if transformer_constructor is None: error('Inputs do not have a recognized type: ' + str(get_file_types())) t = transformer_constructor() for i in input_paths: t.parse(i, input_type) t.report() return t
55eab62cdf5293ad03441fc91663383adcf12da7
14,795
from ocs_ci.ocs.platform_nodes import AWSNodes def delete_and_create_osd_node_aws_upi(osd_node_name): """ Unschedule, drain and delete osd node, and creating a new osd node. At the end of the function there should be the same number of osd nodes as it was in the beginning, and also ceph health should be OK. This function is for AWS UPI. Args: osd_node_name (str): the name of the osd node Returns: str: The new node name """ osd_node = get_node_objs(node_names=[osd_node_name])[0] az = get_node_az(osd_node) aws_nodes = AWSNodes() stack_name_of_deleted_node = aws_nodes.get_stack_name_of_node(osd_node_name) remove_nodes([osd_node]) log.info(f"name of deleted node = {osd_node_name}") log.info(f"availability zone of deleted node = {az}") log.info(f"stack name of deleted node = {stack_name_of_deleted_node}") if config.ENV_DATA.get("rhel_workers"): node_type = constants.RHEL_OS else: node_type = constants.RHCOS log.info("Preparing to create a new node...") node_conf = {"stack_name": stack_name_of_deleted_node} new_node_names = add_new_node_and_label_upi(node_type, 1, node_conf=node_conf) return new_node_names[0]
c376b8b499a9897723962e5af30984eb4d9f06fa
14,796
import math def encode_integer_compact(value: int) -> bytes: """Encode an integer with signed VLQ encoding. :param int value: The value to encode. :return: The encoded integer. :rtype: bytes """ if value == 0: return b"\0" if value < 0: sign_bit = 0x40 value = -value else: sign_bit = 0 n_bits = value.bit_length() n_bytes = 1 + int(math.ceil((n_bits - 6) / 7)) buf = bytearray(n_bytes) for i in range(n_bytes - 1, 0, -1): buf[i] = 0x80 | (value & 0x7F) value >>= 7 buf[0] = 0x80 | sign_bit | (value & 0x3F) buf[-1] &= 0x7F return bytes(buf)
daf9ed4a794754a3cd402e8cc4c3e614857941fe
14,797
def kin_phos_query(kin_accession): """ Query to pull related phosphosites using kinase accession :param kin_accession: string kinase accession :return: Flask_Table Phosphosite_results object """ session = create_sqlsession() q = session.query(Kinase).filter_by(kin_accession= kin_accession) kin = q.first() #subset of information about substrate phosphosites sites. subsets = kin.kin_phosphorylates table = Phosphosite_results(subsets) session.close() return table
94f5f7d987dface90ff5d061525d2277173ed271
14,798
def max_surplus(redemptions, costs, traders): """ Calculates the maximum possible surplus """ surplus = 0 transactions = 0.5 * traders for redemption, cost in zip(redemptions, costs): if redemption >= cost: surplus += ((redemption - cost) * transactions) return surplus
6dd452de1b8726c475c9b95d8c24a2f57fe71516
14,799
import string def generate_create_account_key(): """ Generates a random account creation key. Implementation is very similar to generate_reset_key(). """ chars = string.ascii_lowercase + string.digits return misc_utils.generate_random_string(constants.CREATE_ACCOUNT_KEY_LENGTH, chars=chars)
e52405a325b787b9473da5530c909bfdcff0d9b4
14,800
import re def parse_dblife(file): """Parse an DBLife file, returning a tuple: positions: list of (x,y) co-ordinates comments: all comments in file, as a list of strings, one per line. """ lines = file.split("\n") comments = [] positions = [] x = 0 y = 0 dblife_pattern = r"((\d*)(\.|O|o|\*))*" for line in lines: line = line.strip().rstrip() if line.startswith("!"): comments.append(line[2:]) # check if this is part of the pattern if re.match(dblife_pattern, line): count = 0 for char in line: # repeat counts if char.isdigit(): count *= 10 count += int(char) # blanks if char in ".": if count != 0: x += int(count) else: x += 1 count = 0 # ons if char in "oO*": if count != 0: for i in range(count): positions.append((x, y)) x += 1 else: positions.append((x, y)) x += 1 count = 0 count = 0 # newlines y += 1 x = 0 count = 0 return positions, comments
b2d54240280b657c82d8a70da9e9f0ce47a92c7a
14,801
from typing import Any from typing import Callable def db_handle_error(logger: Logger, default_return_val: Any) \ -> Any: """Handle operational database errors via decorator.""" def decorator(func: Callable) -> Any: def wrapper(*args, **kwargs): # type: ignore # Bypass attempt to perform query and just return default value is_db_disabled: bool = app_config.get( 'BROWSE_DISABLE_DATABASE') or False if is_db_disabled: if logger: logger.info( 'Database is disabled per BROWSE_DISABLE_DATABASE') return default_return_val try: return func(*args, **kwargs) except NoResultFound: return default_return_val except (OperationalError, DBAPIError) as ex: if logger: logger.warning( f'Error executing query in {func.__name__}: {ex}') return default_return_val except Exception as ex: if logger: logger.warning( f'Unknown exception in {func.__name__}: {ex}') raise return wrapper return decorator
1a807bc7a49abc9b50970145c520e823103f3607
14,802
from typing import Iterable from typing import Optional from typing import List from pathlib import Path def climb_directory_tree(starting_path: PathOrStr, file_patterns: Iterable[str]) -> Optional[List[Path]]: """Climb the directory tree looking for file patterns.""" current_dir: Path = Path(starting_path).absolute() if current_dir.is_file(): current_dir = current_dir.parent while current_dir.root != str(current_dir): for root_file in file_patterns: found_files = list(current_dir.glob(root_file)) if found_files: return found_files current_dir = current_dir.parent return None
80f036da4cf5564a2b96359ea67db19602333420
14,803
def serve_file(request, token, require_requester=True, verify_requester=True, signer=None): """Basic view to serve a file. Uses ``evaluate_request`` under the hood. Please refer to that function to view information about exceptions. :param request: the file request :type request: bgfiles.models.FileRequest :param token: the token :type token: str :param require_requester: whether we expect the token to contain the request :type require_requester: bool :param verify_requester: whether we need to verify the current user is the requester :type verify_requester: bool :param signer: signer to use :return: django.http.HTTPResponse """ file_request, data = evaluate_request(request, token, require_requester=require_requester, verify_requester=verify_requester, signer=signer) return toolbox.serve(file_request)
98bfae971e141130e94932afb8fdee2a285f2a5a
14,804
def d2_rho_heterodyne(t, rho_vec, A, args): """ Need to cythonize, docstrings """ M = A[0] + A[3] e1 = cy_expect_rho_vec(M, rho_vec, 0) d1 = spmv(M, rho_vec) - e1 * rho_vec M = A[0] - A[3] e1 = cy_expect_rho_vec(M, rho_vec, 0) d2 = spmv(M, rho_vec) - e1 * rho_vec return [1.0 / np.sqrt(2) * d1, -1.0j / np.sqrt(2) * d2]
6628c1a7299ee7842a839fd63b00857808bcd3ec
14,805
from pathlib import Path def get_venv(): """Return virtual environment path or throw an error if not found""" env = environ.get("VIRTUAL_ENV", None) if env: return Path(env) else: raise EnvironmentError("No virtual environment found.")
44dd4660198a8f5538cbe91ffe52468adc8ee0e8
14,806
def load_user(user_id): """Login manager load user method.""" return User.query.get(int(user_id))
40d5f35aa88163a6ab69c1da7bad6634225f2cf3
14,808
def test_interpolate_energy_dispersion(): """Test of interpolation of energy dispersion matrix using a simple dummy model.""" x = [0.9, 1.1] y = [8., 11.5] n_grid = len(x) * len(y) n_offset = 1 n_en = 30 n_mig = 20 clip_level = 1.e-3 # define simple dummy bias and resolution model using two parameters x and y def get_bias_std(i_en, x, y): i_en = i_en + 3 * ((x - 1) + (y - 10.)) de = n_en - i_en de[de < 0] = 0. bias = de**0.5 + n_mig / 2 rms = 5 - 2 * (i_en / n_en) bias[i_en < 3] = 2 * n_mig # return high values to zero out part of the table rms[i_en < 3] = 0 return bias, rms en = np.arange(n_en)[:, np.newaxis] mig = np.arange(n_mig)[np.newaxis, :] # auxiliary function to compute profile of the 2D distribution # used to check if the expected and interpolated matrixes are similar def calc_mean_std(matrix): n_en = matrix.shape[0] means = np.empty(n_en) stds = np.empty(n_en) for i_en in np.arange(n_en): w = matrix[i_en, :] if np.sum(w) > 0: means[i_en] = np.average(mig[0, :], weights=w) stds[i_en] = np.sqrt(np.cov(mig[0, :], aweights=w)) else: # we need to skip the empty columns means[i_en] = -1 stds[i_en] = -1 return means, stds # generate true values interp_pars = (1, 10) bias, sigma = get_bias_std(en, *interp_pars) mig_true = np.exp(-(mig - bias)**2 / (2 * sigma**2)) mig_true[mig_true < clip_level] = 0 # generate a grid of migration matrixes i_grid = 0 pars_all = np.empty((n_grid, 2)) mig_all = np.empty((n_grid, n_en, n_mig, n_offset)) for xx in x: for yy in y: bias, sigma = get_bias_std(en, xx, yy) mig_all[i_grid, :, :, 0] = (np.exp(-(mig - bias)**2 / (2 * sigma**2))) pars_all[i_grid, :] = (xx, yy) i_grid += 1 # do the interpolation and compare the results with expected ones mig_interp = interp.interpolate_energy_dispersion(mig_all, pars_all, interp_pars, method='linear') # check if all the energy bins have normalization 1 or 0 (can happen because of empty bins) sums = np.sum(mig_interp[:, :, 0], axis=1) assert np.logical_or(np.isclose(sums, 0., atol=1.e-5), np.isclose(sums, 1., atol=1.e-5)).min() # now check if we reconstruct the mean and sigma roughly fine after interpolation bias0, stds0 = calc_mean_std(mig_true) # true bias, stds = calc_mean_std(mig_interp[:, :, 0]) # interpolated # first remove the bins that are empty in true value idxs = bias0 > 0 bias0 = bias0[idxs] bias = bias[idxs] stds0 = stds0[idxs] stds = stds[idxs] # allowing for a 0.6 bin size error on the interpolated values assert np.allclose(bias, bias0, atol=0.6, rtol=0.) assert np.allclose(stds, stds0, atol=0.6, rtol=0.)
73c60f2b01d20b6e399dfb15da2c3c4b8622a90c
14,809
def _transpose_list_array(x): """Transposes a list matrix """ n_dims = len(x) assert n_dims > 0 n_samples = len(x[0]) rows = [None] * n_samples for i in range(n_samples): r = [None] * n_dims for j in range(n_dims): r[j] = x[j][i] rows[i] = r return rows
8815526c6485475aeaf791c2b1350449730b94f6
14,810
def load_businessgroup(request): """ Business Group Dependent/Chained Dropdown List """ business_type_id = request.GET.get('business_type') business_group_list = BusinessGroup.objects.filter( business_type_id=business_type_id).order_by('name') context = {'business_group_list': business_group_list} return render(request, 'app_sme12/form_partial/bus_group_dropdown_list_options.html', context)
8801fbd6ae99ed939721d94bd7f3539b5b050d0a
14,811
def seed_normalization(train_X, train_Y, test_X, testY, nor_method=0, merge=0, column=0): """ 0 for minmax 1 for standard, 2 for nothing :param nor_method: :param merge:是否训练集测试集一起归一化 :return: """ # imp_mean = SimpleImputer(missing_values=np.nan, strategy="mean") imp_mean = KNNImputer(n_neighbors=10,weights="uniform") train_X = imp_mean.fit_transform(train_X) test_X = imp_mean.fit_transform(test_X) if column == 0: if nor_method == 0: scaler = MinMaxScaler() elif nor_method == 1: scaler = StandardScaler() elif nor_method == 2: scaler = Normalizer() elif nor_method == 3: scaler = Pipeline([('min_max', MinMaxScaler()), ('standard', StandardScaler())]) else: return train_X, train_Y, test_X, testY if merge == 0: scaler.fit(np.vstack((train_X, test_X))) train_X = scaler.transform(train_X) test_X = scaler.transform(test_X) elif merge == 1: scaler.fit(train_X) train_X = scaler.transform(train_X) test_X = scaler.transform(test_X) else: train_X = scaler.fit_transform(train_X) test_X = scaler.fit_transform(test_X) #scaler.fit(np.vstack((train_X, test_X))) return train_X, train_Y, test_X, testY else: train_X = train_X.T x_mean = np.mean(train_X, axis=0) x_std = np.std(train_X, axis=0) train_X = (train_X - x_mean) / (x_mean - x_std) test_X = test_X.T x_mean = np.mean(test_X, axis=0) x_std = np.std(test_X, axis=0) test_X = (test_X - x_mean) / (x_mean - x_std) return train_X.T, train_Y, test_X.T, testY
bae1181b6cca53444f09a69abaa3958e8500f71c
14,812
import pathlib def combine_matrix_runs(path, runs, pacc_file): """Combine a set of transition matrix files. Args: path: The base path containing the data to combine. runs: The list of runs to combine. pacc_file: The name of the file to combine. Returns: A TransitionMatrix object with the combined data. """ true_path = pathlib.Path(path) return combine_matrices([read_matrix(true_path / run / pacc_file) for run in runs])
dee47a3f4bfb6229a5c6aec531a0b50df5275a0b
14,813
import json def get_pkg_descr(package, version=None, last_modified=None): """ Get package description from registry """ json_data = fetch_page('http://registry.npmjs.org/%s' % package, last_modified=last_modified) if json_data is None: # NB: empty string is not None but will fail the check return None else: return json.loads(json_data)
9be485ee3e63f25da995b6d454a8dd15de4b7a66
14,814
def has_pattern(str_or_strlist): """When passed a string, equivalent to calling looks_like_pattern. When passed a string list, returns True if any one of the strings looks like a pattern, False otherwise.""" strlist = [str_or_strlist] if isinstance(str_or_strlist, str) else str_or_strlist return len([s for s in strlist if looks_like_pattern(s)]) > 0
902069f01a59b5e42c25635271dc27375732437b
14,815
def update_hidden_area(*args): """update_hidden_area(hidden_area_t ha) -> bool""" return _idaapi.update_hidden_area(*args)
19739a98283203ece9f29d4fe073633318c0c2a4
14,816
def after_update_forecast_datasets(msg, config, checklist): """Calculate the list of workers to launch after the update_forecast_datasets worker ends. :arg msg: Nowcast system message. :type msg: :py:class:`nemo_nowcast.message.Message` :arg config: :py:class:`dict`-like object that holds the nowcast system configuration that is loaded from the system configuration file. :type config: :py:class:`nemo_nowcast.config.Config` :arg dict checklist: System checklist: data structure containing the present state of the nowcast system. :returns: Worker(s) to launch next :rtype: list """ next_workers = { "crash": [], "failure fvcom forecast": [], "failure nemo forecast": [], "failure nemo forecast2": [], "failure wwatch3 forecast": [], "failure wwatch3 forecast2": [], "success fvcom forecast": [], "success nemo forecast": [], "success nemo forecast2": [], "success wwatch3 forecast": [], "success wwatch3 forecast2": [], } if msg.type.startswith("success"): model = msg.type.split()[1] run_type = msg.type.split()[2] try: run_date = checklist[f"{model.upper()} run"][run_type]["run date"] except KeyError: # FVCOM run has model config prefixed to run type run_date = checklist[f"{model.upper()} run"][f"x2 {run_type}"]["run date"] next_workers[msg.type].append( NextWorker("nowcast.workers.ping_erddap", args=[f"{model}-forecast"]) ) if model == "nemo": next_workers[msg.type].extend( [ NextWorker( "nowcast.workers.make_plots", args=["nemo", run_type, "publish", "--run-date", run_date], ), NextWorker( "nowcast.workers.make_surface_current_tiles", args=[run_type, "--run-date", run_date], ), ] ) return next_workers[msg.type]
3ef9a6d37f871900e96f6227fea2f7678843acca
14,817
def index(request): """Homepage for this app. """ with open('index.html') as fp: return HttpResponse(fp.read())
b9ce38f59443e38e5d27ff7f153a834e1c11b429
14,818
def SECH(*args) -> Function: """ The SECH function returns the hyperbolic secant of an angle. Learn more: https//support.google.com/docs/answer/9116560 """ return Function("SECH", args)
594921375aaa7d4fb409e1a4792a6752f81b6bb2
14,819
def read_ATAC_10x(matrix, cell_names='', var_names='', path_file=''): """ Load sparse matrix (including matrices corresponding to 10x data) as AnnData objects. read the mtx file, tsv file coresponding to cell_names and the bed file containing the variable names Parameters ---------- matrix: sparse count matrix cell_names: optional, tsv file containing cell names var_names: optional, bed file containing the feature names Return ------ AnnData object """ mat = mmread(''.join([path_file, matrix])) mat = mat.toarray() mat = np.matrix(mat.transpose()) with open(path_file+cell_names) as f: barcodes = f.readlines() barcodes = [x[:-1] for x in barcodes] with open(path_file+var_names) as f: var_names = f.readlines() var_names = ["_".join(x[:-1].split('\t')) for x in var_names] adata = ad.AnnData(mat, obs=pd.DataFrame(index=barcodes), var=pd.DataFrame(index=var_names)) adata.uns['omic'] = 'ATAC' return(adata)
9f2073d7582f93db2f714f401fc0fb5e0762a2fc
14,820
def get_html_subsection(name): """ Return a subsection as HTML, with the given name :param name: subsection name :type name: str :rtype: str """ return "<h2>{}</h2>".format(name)
2e0f37a7bb9815eda24eba210d8518e64595b9b7
14,821
def compute_norms(items): """ Compute the norms of the item vectors provided. Arguments: items -- a hashmap which maps itemIDs to the characteristic vectors """ norms = {} for item in items: norms[item] = np.sqrt(np.sum(np.square(items[item]))) return norms
ff0a805b6a143b7b52c653226b69aed8319eb5ce
14,822
def do_part_1(): """ Solves part 1 """ digested_lines = list(map(digest_line, input_lines(2))) # Poor man's partial doubles = sum(map(lambda l: contains_nple(l, reps=2), digested_lines)) triples = sum(map(lambda l: contains_nple(l, reps=3), digested_lines)) print(doubles * triples) return doubles * triples
75fa72804d8721b4332d74f00c0bea4d82bcdd02
14,823
import torch def create_Rz_batch(a): """ Creates a batch of rotation matrices about z of angles a. Input (batch) Output (batch, 3, 3) """ return torch.stack([ torch.stack([torch.cos(a), torch.sin(a), torch.zeros_like(a)], dim=1), torch.stack([-torch.sin(a), torch.cos(a), torch.zeros_like(a)], dim=1), torch.stack([torch.zeros_like(a), torch.zeros_like(a), torch.ones_like(a)], dim=1) ], dim=2)
7abed1ef608c9985605096679d28c86f5fabab8e
14,824
import torch def get_upsample_filter(size): """Make a 2D bilinear kernel suitable for upsampling""" factor = (size + 1) // 2 if size % 2 == 1: center = factor - 1 else: center = factor - 0.5 og = np.ogrid[:size, :size] filter = (1 - abs(og[0] - center) / factor) * \ (1 - abs(og[1] - center) / factor) return torch.from_numpy(filter).float()
8c286e6c20f3400c5206f3f15514a65dc8f3b0b5
14,825
import math def lst2gmst(longitude, hour, minute=None, second=None, longitudeDirection='W', longitudeUnits='DEGREES'): """ Converts Local Sidereal Time to Greenwich Mean Sidereal Time. Parameters ---------- longitude : float (any numeric type) The longitude of the site to calculate the Local Sidereal Time. Defaults are Longitude WEST and units DEGREES, but these can be changed with the optional parameters lonDirection and lonUnits. hour : int (or float) If an integer, the function will expect a minute and second. If a float, it will ignore minute and second and convert from decimal hours to hh:mm:ss. minute : int Ignored if hour is a float. second : int (any numeric type, to include microseconds) Ignored if hour is a float. longitudeDirection : string Default is longitude WEST, 'W', but you can specify EAST by passing 'E'. longitudeUnits : string Default units are 'DEGREES', but this can be switched to radians by passing 'RADIANS' in this parameter. Returns ------- hour : int The hour of the calculated GMST minute : int The minutes of the calculated GMST second: float The seconds of the calculated GMST Examples -------- >>> lst2gmst(70.3425, hour=14, minute=26, second=18) (19, 7, 40.20000000000607) >>> lst2gmst(5.055477, hour=14.4383333333333333, longitudeDirection='E', longitudeUnits='RADIANS') (19, 7, 40.20107388985991) """ if minute != None and second != None: hours = sex2dec(hour, minute, second) elif minute == None and second == None: hours = hour else: raise AssertionError('minute and second must either be both set, or both unset.') if longitudeUnits.upper() == 'DEGREES': longitudeTime = longitude / 15.0 elif longitudeUnits.upper() == 'RADIANS': longitudeTime = longitude * 180.0 / math.pi / 15.0 if longitudeDirection.upper() == 'W': gmst = hours + longitudeTime elif longitudeDirection.upper() == 'E': gmst = hours - longitudeTime else: raise AssertionError('longitudeDirection must be W or E') gmst = gmst % 24.0 return dec2sex(gmst)
4e651dde2b5dadb1af5d00bc1813272190f07cdf
14,826
import ast def filter_funcs(node) -> bool: """Filter to get functions names and remove dunder names""" if not isinstance(node, ast.FunctionDef): return False elif node.name.startswith('__') or node.name.endswith('__'): return False else: return True
022181afa887965af0f2d4c5ec33de07b8a3c089
14,827
from typing import Optional def create_api_token( creator_id: UserID, permissions: set[PermissionID], *, description: Optional[str] = None, ) -> ApiToken: """Create an API token.""" num_bytes = 40 token = token_urlsafe(num_bytes) db_api_token = DbApiToken( creator_id, token, permissions, description=description ) db.session.add(db_api_token) db.session.commit() return _db_entity_to_api_token(db_api_token)
044d041bd013cb5b0ebc9c534b8c0162c3996172
14,829
from typing import Tuple def match_image_widths( image_i1: Image, image_i2: Image ) -> Tuple[Image, Image, Tuple[float, float], Tuple[float, float]]: """Automatically chooses the target width (larger of the two inputs), and scales both images to that width. Args: image_i1: 1st image to match width. image_i2: 2nd image to match width. Returns: Scaled image_i1. Scaled image_i2. Scaling factor (W, H) for image_i1. Scaling factor (W, H) for image_i2. """ max_width = max(image_i1.width, image_i2.width) # scale image_i1 new_width = int(max_width) new_height = int(image_i1.height * new_width / image_i1.width) scale_factor_i1 = (new_width / image_i1.width, new_height / image_i1.height) scaled_image_i1 = resize_image(image_i1, new_height, new_width) # scale image_i2 new_width = int(max_width) new_height = int(image_i2.height * new_width / image_i2.width) scale_factor_i2 = (new_width / image_i2.width, new_height / image_i2.height) scaled_image_i2 = resize_image(image_i2, new_height, new_width) return scaled_image_i1, scaled_image_i2, scale_factor_i1, scale_factor_i2
86f043a3069202b2dfc3eb24f9ac10e9077b2237
14,831
from typing import Optional from typing import Union from typing import Any from typing import Dict import copy def get_parameter_value_and_validate_return_type( domain: Optional[Domain] = None, parameter_reference: Optional[Union[Any, str]] = None, expected_return_type: Optional[Union[type, tuple]] = None, variables: Optional[ParameterContainer] = None, parameters: Optional[Dict[str, ParameterContainer]] = None, ) -> Optional[Any]: """ This method allows for the parameter_reference to be specified as an object (literal, dict, any typed object, etc.) or as a fully-qualified parameter name. In either case, it can optionally validate the type of the return value. """ if isinstance(parameter_reference, dict): parameter_reference = dict(copy.deepcopy(parameter_reference)) parameter_reference = get_parameter_value( domain=domain, parameter_reference=parameter_reference, variables=variables, parameters=parameters, ) if expected_return_type is not None: if not isinstance(parameter_reference, expected_return_type): raise ge_exceptions.ProfilerExecutionError( message=f"""Argument "{parameter_reference}" must be of type "{str(expected_return_type)}" \ (value of type "{str(type(parameter_reference))}" was encountered). """ ) return parameter_reference
9cdd3106a0397a63a13d71b1c0ce5815a41e47ed
14,832
def diff_tags(list_a, list_b): """ Return human readable diff string of tags changed between two tag lists :param list_a: Original tag list :param list_b: New tag list :return: Difference string """ status_str = text_type("") tags_added = [tag for tag in list_b if tag not in list_a] tags_removed = [tag for tag in list_a if tag not in list_b] if tags_added and tags_removed: status_str += "added: {0}".format(text_type(tags_added)) status_str += " removed: {0}".format(text_type(tags_removed)) elif tags_added: status_str += "added: {0}".format(text_type(tags_added)) elif tags_removed: status_str += "removed: {0}".format(text_type(tags_removed)) if not status_str: status_str = "no changes required." return status_str
e9f69bcdee0e2cb6fd260c56f8bbfe5f568afc63
14,833
import numpy def distance_on_great_circle(start_point, direction, distance): """compute the location of a point a specified distance along a great circle NOTE: This assumes a spherical earth. The error introduced in the location is pretty small (~15 km for a 13000 km path), but it totall screws with the altitude. YOU SHOULD NOT USE THE ALTITUDE COMING OUT OF THIS, ESPECIALLY IF YOU HAVE ANY MEANGINFUL DISTANCE Arguments: start_point: the starting point of the great circle. The direction is given in a NED frame at this point. Numpy (3,) array in radians, lla direction: a NED vector indicating the direction of the great circle distance: the length of the great circle arc (m) Returns: end_point: the end of a great circle path of length <distance> from <start_point> with initial <direction> """ start_xyz = geodesy.conversions.lla_to_xyz(start_point) direction = geometry.conversions.to_unit_vector(direction) delta_xyz = geodesy.conversions.ned_to_xyz( direction, numpy.array(start_point, ndmin=2)) rotation_axis = -geometry.conversions.to_unit_vector( numpy.cross(start_xyz, delta_xyz)) rotation_magnitude = distance / environments.earth.constants['r0'] rotation_quaternion = geometry.quaternion.Quaternion() rotation_quaternion.from_axis_and_rotation( rotation_axis, rotation_magnitude) end_point_xyz = rotation_quaternion.rot(start_xyz) end_point = geodesy.conversions.xyz_to_lla(end_point_xyz) return end_point
e39c62435c208cb2ea4e951b91b641cfbfcd45a8
14,834
def construct_tree_framework(bracket): """Given the tree in bracket form, creates a tree with labeled leaves and unlabeled inner nodes.""" if type(bracket)==int: #base case, creates leaf return Node(tree) else: #recursive step, inner nodes root = Node(None, construct_tree_framework(bracket[0]), construct_tree_framework(bracket[1])) return root
a54651fcc5604f46985b11d0d783c76f4368a9d0
14,835
def eckart_transform(atommasses, atomcoords): """Compute the Eckart transform. This transform is described in https://gaussian.com/vib/. Parameters ---------- atommasses : array-like Atomic masses in atomic mass units (amu). atomcoords : array-like Atomic coordinates. Returns ------- array-like Examples -------- >>> from overreact import _datasets as datasets >>> data = datasets.logfiles["tanaka1996"]["Cl·@UMP2/cc-pVTZ"] >>> eckart_transform(data.atommasses, data.atomcoords) array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]) >>> data = datasets.logfiles["symmetries"]["dihydrogen"] >>> eckart_transform(data.atommasses, data.atomcoords) array([[...]]) >>> data = datasets.logfiles["symmetries"]["water"] >>> eckart_transform(data.atommasses, data.atomcoords) array([[-9.42386999e-01, 0.00000000e+00, 0.00000000e+00, 2.99716727e-01, -2.86166258e-06, -7.42376895e-02, -1.19022276e-02, 4.33736541e-03, -1.28081683e-01], [-0.00000000e+00, -9.42386999e-01, 0.00000000e+00, 1.40934586e-02, -1.34562803e-07, 1.01850683e-01, -1.52466204e-01, -2.78628770e-01, -2.13218735e-02], [-0.00000000e+00, -0.00000000e+00, -9.42386999e-01, -1.47912143e-01, 1.41224899e-06, -1.40724409e-01, -3.86450545e-02, -1.77596105e-02, -2.61565554e-01], [-2.36544652e-01, -0.00000000e+00, -0.00000000e+00, -5.97037403e-01, -6.33525274e-01, 2.70812665e-02, -2.34354970e-01, 8.09905642e-02, 3.52169811e-01], [-0.00000000e+00, -2.36544652e-01, -0.00000000e+00, -2.80742485e-02, -2.97900030e-02, -6.93753868e-01, 5.78451116e-01, 2.06337502e-01, 2.89647600e-01], [-0.00000000e+00, -0.00000000e+00, -2.36544652e-01, 2.94641819e-01, 3.12648820e-01, -1.12274948e-02, -4.19760855e-01, 1.83772848e-01, 7.41205673e-01], [-2.36544652e-01, -0.00000000e+00, -0.00000000e+00, -5.97025305e-01, 6.33536675e-01, 2.68679525e-01, 2.81773098e-01, -9.82705016e-02, 1.58103880e-01], [-0.00000000e+00, -2.36544652e-01, -0.00000000e+00, -2.80736797e-02, 2.97905391e-02, 2.87983715e-01, 2.89697972e-02, 9.03711399e-01, -2.04701877e-01], [-0.00000000e+00, -0.00000000e+00, -2.36544652e-01, 2.94635849e-01, -3.12654446e-01, 5.71869440e-01, 5.73721626e-01, -1.13019078e-01, 3.00863871e-01]]) """ atommasses = np.asarray(atommasses) natom = len(atommasses) dof = 3 * natom moments, axes, atomcoords = inertia(atommasses, atomcoords, align=False) x = np.block( [ np.ones(natom)[:, np.newaxis], np.zeros(natom)[:, np.newaxis], np.zeros(natom)[:, np.newaxis], ] ) y = np.block( [ np.zeros(natom)[:, np.newaxis], np.ones(natom)[:, np.newaxis], np.zeros(natom)[:, np.newaxis], ] ) z = np.block( [ np.zeros(natom)[:, np.newaxis], np.zeros(natom)[:, np.newaxis], np.ones(natom)[:, np.newaxis], ] ) x *= np.sqrt(atommasses[:, np.newaxis]) y *= np.sqrt(atommasses[:, np.newaxis]) z *= np.sqrt(atommasses[:, np.newaxis]) D_trans = np.block([x.reshape(1, dof).T, y.reshape(1, dof).T, z.reshape(1, dof).T]) D_rot = np.array( [ np.cross((atomcoords @ axes)[i], axes[:, j]) / np.sqrt(atommasses[i]) for i in range(natom) for j in range(3) ] ) D = np.block([D_trans, D_rot]) return np.linalg.qr(D, mode="complete")[0]
833b18ecdb299d3183da24c3b9d40227e387a385
14,836
def as_java_array(gateway, java_type, iterable): """Creates a Java array from a Python iterable, using the given p4yj gateway""" java_type = gateway.jvm.__getattr__(java_type) lst = list(iterable) arr = gateway.new_array(java_type, len(lst)) for i, e in enumerate(lst): jobj = as_java_object(gateway, e) arr[i] = jobj return arr
d8a14a6506a0cbde6f09b4d071f6968da3e4d17d
14,837
import scipy def match(a: np.ndarray, b: np.ndarray) -> np.ndarray: """Finds the matrix R that minimizes the frobenius norm of RA - B, where R is orthonormal. Args: a (np.ndarray[samples, features]): the first matrix to match b (np.ndarray[samples, features]): the second matrix to match Returns: np.ndarray: the orthonormal matching matrix R """ tus.check_ndarrays( a=(a, ('samples', 'features'), ('float32', 'float64')), b=(b, (('samples', a.shape[0]), ('features', a.shape[1])), a.dtype) ) m = b @ a.T u, _, vh = scipy.linalg.svd(m) return np.real(u @ vh)
461f3f05ab1164bfbac3f9e8f6ccd5622791a6ff
14,838
import time import requests import json def macro_cons_silver_amount(): """ 全球最大白银ETF--iShares Silver Trust持仓报告, 数据区间从20060429-至今 :return: pandas.Series 2006-04-29 263651152 2006-05-02 263651152 2006-05-03 445408550 2006-05-04 555123947 2006-05-05 574713264 ... 2019-10-17 Show All 2019-10-18 Show All 2019-10-21 Show All 2019-10-22 Show All 2019-10-23 Show All """ t = time.time() res = requests.get( JS_CONS_SLIVER_ETF_URL.format( str(int(round(t * 1000))), str(int(round(t * 1000)) + 90) ) ) json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1]) date_list = [item["date"] for item in json_data["list"]] value_list = [item["datas"]["白银"] for item in json_data["list"]] value_df = pd.DataFrame(value_list) value_df.columns = json_data["kinds"] value_df.index = pd.to_datetime(date_list) temp_df = value_df["总价值(美元)"] url = "https://datacenter-api.jin10.com/reports/list_v2" params = { "max_date": "", "category": "etf", "attr_id": "2", "_": str(int(round(t * 1000))), } headers = { "accept": "*/*", "accept-encoding": "gzip, deflate, br", "accept-language": "zh-CN,zh;q=0.9,en;q=0.8", "cache-control": "no-cache", "origin": "https://datacenter.jin10.com", "pragma": "no-cache", "referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment", "sec-fetch-dest": "empty", "sec-fetch-mode": "cors", "sec-fetch-site": "same-site", "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36", "x-app-id": "rU6QIu7JHe2gOUeR", "x-csrf-token": "", "x-version": "1.0.0", } r = requests.get(url, params=params, headers=headers) temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, [0, 3]] temp_se.index = pd.to_datetime(temp_se.iloc[:, 0]) temp_se = temp_se.iloc[:, 1] temp_df = temp_df.append(temp_se) temp_df.dropna(inplace=True) temp_df.sort_index(inplace=True) temp_df = temp_df.reset_index() temp_df.drop_duplicates(subset="index", keep="last", inplace=True) temp_df.set_index("index", inplace=True) temp_df = temp_df.squeeze() temp_df.index.name = None temp_df.name = "silver_amount" url = "https://cdn.jin10.com/data_center/reports/etf_2.json" r = requests.get(url) data_json = r.json() append_temp_df = pd.DataFrame(data_json["values"]).T append_temp_df.columns = [item["name"] for item in data_json["keys"]] temp_append_df = append_temp_df["总价值"] temp_append_df.name = "silver_amount" temp_df = temp_df.reset_index() temp_df["index"] = temp_df["index"].astype(str) temp_df = temp_df.append(temp_append_df.reset_index()) temp_df.drop_duplicates(subset=["index"], keep="last", inplace=True) temp_df.index = pd.to_datetime(temp_df["index"]) del temp_df["index"] temp_df = temp_df[temp_df != 'Show All'] temp_df.sort_index(inplace=True) temp_df = temp_df.astype(float) return temp_df
d60bac23a480c056d237dda6b16eb267b2f54ee5
14,839
import random def shuffle(answers): """ Returns mixed answers and the index of the correct one, assuming the first answer is the correct one. """ indices = list(range(len(answers))) random.shuffle(indices) correct = indices.index(0) answers = [answers[i] for i in indices] return answers, correct
e597b4aeb65fecf47f4564f2fddb4d76d484707a
14,840
from typing import Union from pathlib import Path def annotations_to_xml(annotations_df: pd.DataFrame, image_path: Union[str, Path], write_file=True) -> str: """ Load annotations from dataframe (retinanet output format) and convert them into xml format (e.g. RectLabel editor / LabelImg). Args: annotations_df (DataFrame): Format [xmin,ymin,xmax,ymax,label,...] image_path: string/Path path to the file where these bboxes are found write_file: Writes the xml at the same path as the image it describes. Overwrites the existent file, if any. Returns: XML <annotation> <folder>unlabeled_imgs</folder> <filename>autumn-forest-from-above-2210x1473.jpeg</filename> <path>/work/trees/unlabeled_imgs/autumn-forest-from-above-2210x1473.jpeg</path> <source> <database>Unknown</database> </source> <size> <width>2210</width> <height>1473</height> <depth>3</depth> </size> <segmented>0</segmented> <object> <name>tree</name> <pose>Unspecified</pose> <truncated>0</truncated> <difficult>0</difficult> <bndbox> <xmin>718</xmin> <ymin>603</ymin> <xmax>792</xmax> <ymax>705</ymax> </bndbox> </object> </annotation> """ image_path = Path(image_path) out_dict = { 'folder': image_path.parent.name, 'filename': image_path.name, 'path': str(image_path), 'segmented': 0 } xml_out = '<annotation>\n' xml_out += dict2xml(out_dict, indent=" ") + '\n' xml_out += "\n".join([__annotation_row_to_dict(row) for _, row in annotations_df.iterrows()]) xml_out += '\n</annotation>\n' if write_file: # annotations file should be near its image file_path = image_path.parent / f'{image_path.stem}.xml' with open(file_path, 'w+') as the_file: the_file.write(xml_out) return xml_out
68ab235299da7026b77feb715e260f3e1749ec3b
14,841
def depth(sequence, func=max, _depth=0): """ Find the nesting depth of a nested sequence """ if isinstance(sequence, dict): sequence = list(sequence.values()) depth_list = [ depth(item, func=func, _depth=_depth + 1) for item in sequence if (isinstance(item, dict) or util_type.is_listlike(item)) ] if len(depth_list) > 0: return func(depth_list) else: return _depth
84b6e7ccaa0f7924fa4a775eca41edf8422222d0
14,842
def tei_email(elem_text): """ create TEI element <email> with given element text """ email = etree.Element("email") email.text = elem_text return email
cd3d6cf53f7ea5a29c4a02a4ea0d0a2d2144645c
14,843
from typing import List from pathlib import Path def get_requirements(req_file: str) -> List[str]: """ Extract requirements from provided file. """ req_path = Path(req_file) requirements = req_path.read_text().split("\n") if req_path.exists() else [] return requirements
3433cd117bbb0ced7ee8238e36f20c69e15c5260
14,845
def get_gmail_account(slug): """ Return the details of the given account - just pass in the slug e.g. get_account('testcity') """ service = get_gapps_client() if not service: return None try: return service.users().get(userKey=make_email(slug)).execute() except HttpError: return None
959685e6f40b8333103e47f9ce8c50050ca95961
14,847
def unisolate_machine_command(): """Undo isolation of a machine. Returns: (str, dict, dict). Human readable, context, raw response """ headers = ['ID', 'Type', 'Requestor', 'RequestorComment', 'Status', 'MachineID', 'ComputerDNSName'] machine_id = demisto.args().get('machine_id') comment = demisto.args().get('comment') machine_action_response = unisolate_machine_request(machine_id, comment) machine_action_data = get_machine_action_data(machine_action_response) entry_context = { 'MicrosoftATP.MachineAction(val.ID === obj.ID)': machine_action_data } human_readable = tableToMarkdown("The request to stop the isolation has been submitted successfully:", machine_action_data, headers=headers, removeNull=True) return human_readable, entry_context, machine_action_response
d981005753030a1be50a3c0ff40022241096ea2f
14,848
def func(*listItems): """ 1、遍历所有的列表元素 2、遍历所有的列表元素里面的所有元素放进去一个列表里面 3、排序这个列表,返回最大的那个元素 """ tmp_list=[] for item in listItems: if isinstance(item,list): for i in item: tmp_list.append(i) tmp_list=list(filter(lambda k:isinstance(k,int),tmp_list)) tmp_list.sort(reverse=True) max_value=tmp_list[0] return max_value
adbef2744871f1d8f714cbf2a71d4321e3fb72f5
14,849
def factory(name: str): """Factory function to return a processing function for Part of Speech tagging. Parameters: ----------- name : str Identifier, e.g. 'spacy-de', 'stanza-de', 'flair-de', 'someweta-de', 'someweta-web-de' Example: -------- import nlptasks as nt import nlptasks.pos sequences = [['Die', 'Kuh', 'ist', 'bunt', '.']] myfn = nt.pos.factory("spacy-de") idseqs, TAGSET = myfn(sequences, maxlen=4) """ if name in ("spacy", "spacy-de"): return spacy_de elif name in ("stanza", "stanza-de"): return stanza_de elif name == "flair-de": return flair_de elif name in ("someweta", "someweta-de"): return someweta_de elif name in ("someweta-web", "someweta-web-de"): return someweta_web_de else: raise Exception(f"Unknown PoS tagger: '{name}'")
9166613ba98beeb56dcc5766217d951ff13f9b38
14,850
def align_dataframes(framea, frameb, fill_value = 0.0): """Use pandas DataFrame structure to align two-dimensional data :param framea: First pandas dataframe to align :param frameb: Other pandas dataframe to align :param fill_value: default fill value (0.0 float) return: tuple of aligned frames """ zeroframe = frameb.copy() zeroframe[:] = fill_value aligneda = framea.add(zeroframe, fill_value = fill_value) zeroframe = framea.copy() zeroframe[:] = fill_value alignedb = frameb.add(zeroframe, fill_value = fill_value) return aligneda, alignedb
86a5e8c399ab47a10715af6c90d0901c2207597c
14,852
def flip_ud(img): """ Expects shape to be (num_examples, modalities, depth, width, height) """ return np.flip(img.copy(), 3)
f7a14641a89f5a170cb3d19b412acdbcbe3ac2f3
14,853
def data_context_service_interface_pointuuid_otsi_service_interface_point_spec_otsi_capability_get(uuid): # noqa: E501 """data_context_service_interface_pointuuid_otsi_service_interface_point_spec_otsi_capability_get returns tapi.photonic.media.OtsiCapabilityPac # noqa: E501 :param uuid: Id of service-interface-point :type uuid: str :rtype: TapiPhotonicMediaOtsiCapabilityPac """ return 'do some magic!'
99b3ed0e843f0dd405cd0d0b618a4da92fbdcf55
14,854
def _get_trial_event_times(events, units, trial_cond_name): """ Get median event start times from all unit-trials from the specified "trial_cond_name" and "units" - aligned to GO CUE :param events: list of events """ events = list(events) + ['go'] event_types, event_times = (psth.TrialCondition().get_trials(trial_cond_name) * (experiment.TrialEvent & [{'trial_event_type': eve} for eve in events]) & units).fetch('trial_event_type', 'trial_event_time') period_starts = [(event_type, np.nanmedian((event_times[event_types == event_type] - event_times[event_types == 'go']).astype(float))) for event_type in events[:-1] if len(event_times[event_types == event_type])] present_events, event_starts = list(zip(*period_starts)) return np.array(present_events), np.array(event_starts)
c7198fdba392d7b5301109175408d3c0d95adbb9
14,855
from typing import Iterable def select_region(selections, positions, region): """ selection in region from selections """ if not region: return selections region = list(region) + [None, None] assert all([x is None or isinstance(x, Iterable) and len(x) == 2 for x in region]), 'region should be collections of x,y,z region' output = [] for sel in selections: for regi, reg in enumerate(region[:3]): if reg: if reg[0] <= positions[sel][regi] <= reg[1]: output.append(sel) return output
b9efc393b7d60773554130ded49d9dc9e00081e5
14,857
def summarize_center_and_dispersion( analysis_layer, summarize_type=["CentralFeature"], ellipse_size=None, weight_field=None, group_field=None, output_name=None, context=None, gis=None, estimate=False, future=False): """ .. image:: _static/images/summarize_center_and_dispersion/summarize_center_and_dispersion.png The ``summarize_center_and_dispersion`` method finds central features and directional distributions. It can be used to answer questions such as: * Where is the center? * Which feature is the most accessible from all other features? * How dispersed, compact, or integrated are the features? * Are there directional trends?s ==================== ========================================================= **Argument** **Description** -------------------- --------------------------------------------------------- analysis_layer Required frature layer. The point, line, or polygon features to be analyzed. See :ref:`Feature Input<FeatureInput>`. -------------------- --------------------------------------------------------- summarize_type Required list of strings. The method with which to summarize the ``analysis_layer``. Choice list: ["CentralFeature", "MeanCenter", "MedianCenter", "Ellipse"] -------------------- --------------------------------------------------------- ellipse_size Optional string. The size of the output ellipse in standard deviations. Choice list: ['1 standard deviations', '2 standard deviations', '3 standard deviations'] The default ellipse size is '1 standard deviations'. -------------------- --------------------------------------------------------- weight_field Optional field. A numeric field in the ``analysis_layer`` to be used to weight locations according to their relative importance. -------------------- --------------------------------------------------------- group_field Optional field. The field used to group features for separate directional distribution calculations. The ``group_field`` can be of integer, date, or string type. -------------------- --------------------------------------------------------- output_name Optional string. If provided, the method will create a feature service of the results. You define the name of the service. If ``output_name`` is not supplied, the method will return a feature collection. -------------------- --------------------------------------------------------- context Optional string. Context contains additional settings that affect task execution. For ``summarize_center_and_dispersion``, there are two settings. #. Extent (``extent``) - a bounding box that defines the analysis area. Only those features in the input layer that intersect the bounding box will be buffered. #. Output Spatial Reference (``outSR``) - the output features will be projected into the output spatial reference. -------------------- --------------------------------------------------------- estimate Optional boolean. If True, the number of credits to run the operation will be returned. -------------------- --------------------------------------------------------- future Optional boolean. If True, the result will be a GPJob object and results will be returned asynchronously. ==================== ========================================================= :returns: list of items if ``output_name`` is supplied else, a Python dictionary with the following keys: "central_feature_result_layer" : layer (FeatureCollection) "mean_feature_result_layer" : layer (FeatureCollection) "median_feature_result_layer" : layer (FeatureCollection) "ellipse_feature_result_layer" : layer (FeatureCollection) .. code-block:: python # USAGE EXAMPLE: To find central features and mean center of earthquake over past months. central_features = summarize_center_and_dispersion(analysis_layer=earthquakes, summarize_type=["CentralFeature","MeanCenter"], ellipse_size='2 standard deviations', weight_field='mag', group_field='magType', output_name='find central features and mean center of earthquake over past months') """ gis = _arcgis.env.active_gis if gis is None else gis return gis._tools.featureanalysis.summarize_center_and_dispersion( analysis_layer, summarize_type, ellipse_size, weight_field, group_field, output_name, context, estimate=estimate, future=future)
a1fc44cb1781bb11f39dda597fe884552ec07a99
14,858
def length_entropy(r: np.ndarray, minlen: int = 2) -> float: """Calculate entropy of diagonal lengths in RQ matrix. Args: r (np.ndarray[bool, bool]): Recurrence matrix minlen (int): Minimum length of a line Returns: float: Shannon entropy of distribution of segment lengths """ dlens = diagonal_lengths(r, minlen) counts = _dlen_counts(dlens, minlen, r.shape[0]) return entropy(counts)
fe20e36aade8bae5e8a2fc139ad887495818f336
14,859
def rotate(posList, axis, angle): """Rotate the points about a given axis by a given angle.""" #normalize axis, turn angle into radians axis = axis/np.linalg.norm(axis) angle = np.deg2rad(angle) #rotation matrix construction ux, uy, uz = axis sin, cos = np.sin(angle), np.cos(angle) rotMat = np.array([[cos+ux*ux*(1.-cos), ux*uy*(1.-cos)-uz*sin, ux*uz*(1.-cos)+uy*sin], [uy*ux*(1.-cos)+uz*sin, cos+uy*uy*(1.-cos), uy*uz*(1.-cos)-ux*sin], [uz*ux*(1.-cos)-uy*sin, uz*uy*(1.-cos)+ux*sin, cos+uz*uz*(1.-cos)]]) #rotate points return np.transpose(np.dot(rotMat,np.transpose(posList)))
0719bf548f5d952e78f0b2551f2edcd9510b1eca
14,861
def _make_frame_with_filename(tb, idx, filename): """Return a copy of an existing stack frame with a new filename.""" frame = tb[idx] return FrameSummary( filename, frame.lineno, frame.name, frame.line)
c775b77c3c282ed598adc25996fb418a9b85529e
14,862
def median(X): """ Middle value after sorting all values by size, or mean of the two middle values. Parameters ---------- X : np.array Dataset. Should be a two-dimensional array. Returns ------- a: np.array One-dimensional array that contains the median for each feature. """ return np.nanmedian(X, axis=0)
232d1ce560c4030b01b048cb9087d5e8c49b39ec
14,863
def _filter_none_values(d: dict): """ Filter out the key-value pairs with `None` as value. Arguments: d dictionary Returns: filtered dictionary. """ return {key: value for (key, value) in d.items() if value is not None}
bed2629e4fa96a391e15b043aa3a0d64c75d6ed0
14,864
def new_project(request): """ if this is a new project, call crud_project without a slug and with action set to New """ return crud_project(request, slug=None, action="New")
fc224a23fb2ecc39fce20a927c57be0ff74ed9d1
14,865
def get_Simon_instance(simon_instance): """Return an instance of the Simon family as a `Cipher`.""" if simon_instance == SimonInstance.simon_32_64: default_rounds = 32 n = 16 m = 4 z = "11111010001001010110000111001101111101000100101011000011100110" elif simon_instance == SimonInstance.simon_48_96: default_rounds = 36 n = 24 m = 4 z = "10001110111110010011000010110101000111011111001001100001011010" elif simon_instance == SimonInstance.simon_64_128: default_rounds = 44 n = 32 m = 4 z = "11011011101011000110010111100000010010001010011100110100001111" else: raise ValueError("invalid instance of Simon") class SimonKeySchedule(RoundBasedFunction): """Key schedule function.""" num_rounds = default_rounds input_widths = [n for _ in range(m)] output_widths = [n for _ in range(default_rounds)] @classmethod def set_num_rounds(cls, new_num_rounds): cls.num_rounds = new_num_rounds cls.input_widths = [n for _ in range(min(m, new_num_rounds))] cls.output_widths = [n for _ in range(new_num_rounds)] @classmethod def eval(cls, *master_key): if cls.num_rounds <= m: return list(reversed(master_key))[:cls.num_rounds] k = [None for _ in range(cls.num_rounds)] k[:m] = list(reversed(master_key)) for i in range(m, cls.num_rounds): tmp = RotateRight(k[i - 1], 3) if m == 4: tmp ^= k[i - 3] tmp ^= RotateRight(tmp, 1) k[i] = ~k[i - m] ^ tmp ^ int(z[(i - m) % 62]) ^ 3 return k class SimonEncryption(Encryption, RoundBasedFunction): """Encryption function.""" num_rounds = default_rounds input_widths = [n, n] output_widths = [n, n] round_keys = None @classmethod def set_num_rounds(cls, new_num_rounds): cls.num_rounds = new_num_rounds @classmethod def eval(cls, x, y): for i in range(cls.num_rounds): x, y = (y ^ SimonRF(x) ^ cls.round_keys[i], x) cls.add_round_outputs(x, y) return x, y class SimonCipher(Cipher): key_schedule = SimonKeySchedule encryption = SimonEncryption _simon_instance = simon_instance @classmethod def set_num_rounds(cls, new_num_rounds): cls.key_schedule.set_num_rounds(new_num_rounds) cls.encryption.set_num_rounds(new_num_rounds) @classmethod def test(cls): old_num_rounds = cls.num_rounds cls.set_num_rounds(default_rounds) if cls._simon_instance == SimonInstance.simon_32_64: plaintext = (0x6565, 0x6877) key = (0x1918, 0x1110, 0x0908, 0x0100) assert cls(plaintext, key) == (0xc69b, 0xe9bb) elif cls._simon_instance == SimonInstance.simon_48_96: plaintext = (0x726963, 0x20646e) key = (0x1a1918, 0x121110, 0x0a0908, 0x020100) assert cls(plaintext, key) == (0x6e06a5, 0xacf156) elif cls._simon_instance == SimonInstance.simon_64_128: plaintext = (0x656b696c, 0x20646e75) key = (0x1b1a1918, 0x13121110, 0x0b0a0908, 0x03020100) assert cls(plaintext, key) == (0x44c8fc20, 0xb9dfa07a) else: raise ValueError("invalid instance of Simon") cls.set_num_rounds(old_num_rounds) return SimonCipher
41ce1cdfdb58b15af8167f5e0d03fcd0beb94c80
14,866
def clamp(val: float) -> int: """Clamp a number to that expected by a reasonable RGB component This ensures that we don't have negative values, or values exceeding one byte Additionally, all number inputs are rounded Args: val (float): Raw float value to clamp Returns: int: Clamped R/G/B value """ return floor(min(max(0, val), 255))
b908afd06f8e5bf9b98f2729424e0b007c62a18a
14,868
import scipy def componental_mfpt(trans: np.ndarray, **kwargs) -> np.ndarray: """Compute Markov mean first passage times per connected component of the chain.""" n_comps, comp_labels = scipy.sparse.csgraph.connected_components( trans, **kwargs ) hier_trans = transition_matrix(trans) absorbing = np.isclose(np.diag(hier_trans), 1) if n_comps == 1 and not absorbing.any(): print('shortcut') return mfpt(hier_trans) else: print('longrun') times = np.full_like(hier_trans, fill_value=np.inf) # for each autonomous subsystem for comp_i in range(n_comps): is_comp = (comp_labels == comp_i) absorbing_i = np.flatnonzero(absorbing & is_comp) nonabsorbing_i = np.flatnonzero(~absorbing & is_comp) times[nonabsorbing_i[:, None], nonabsorbing_i] = mfpt( hier_trans[nonabsorbing_i[:, None], nonabsorbing_i] ) times[absorbing_i, absorbing_i] = 1 return times
76c9ade340668e5f564b874bc808170b2d0903cb
14,869
def atleast_1d(*arys): """ Convert inputs to arrays with at least one dimension. Scalar inputs are converted to 1-dimensional arrays, whilst higher-dimensional inputs are preserved. Parameters ---------- array1, array2, ... : array_like One or more input arrays. Returns ------- ret : ndarray An array, or sequence of arrays, each with ``a.ndim >= 1``. Copies are made only if necessary. See Also -------- atleast_2d, atleast_3d Examples -------- >>> np.atleast_1d(1.0) array([ 1.]) >>> x = np.arange(9.0).reshape(3,3) >>> np.atleast_1d(x) array([[ 0., 1., 2.], [ 3., 4., 5.], [ 6., 7., 8.]]) >>> np.atleast_1d(x) is x True >>> np.atleast_1d(1, [3, 4]) [array([1]), array([3, 4])] """ res = [] for ary in arys: ary = asanyarray(ary) if len(ary.shape) == 0 : result = ary.reshape(1) else : result = ary res.append(result) if len(res) == 1: return res[0] else: return res
440782c7c5a5b231425cc1c1282110e983dd8dc2
14,871
def lines2str(lines, sep = "\n"): """Merge a list of lines into a single string Args: lines (list, str, other): a list of lines or a single object sep (str, optional): a separator Returns: str: a single string which is either a concatenated lines (using a custom or the default separator) or a str(lines) result """ if isinstance(lines, str): return lines if hasattr(lines, '__iter__'): return sep.join(lines) return str(lines)
a9209bd8eda92f42a287725aaeccfcb35dab24cd
14,872
def evaluate(board): """ Evaluates chess board input parameter(s): board --> The chess board to be evaluated return parameter(s): score --> The board evaluation """ score = 0 for i in range(len(board)): for j in range(len(board[i])): # Add piece value and it's current square value (A Queen on d4 will be worth 900 + 5) piece_value = piece_values[board[i][j]] + \ square_values[board[i][j]][i][j] # Add piece value to overall board score score += piece_value return score
6b02f085ab47d241f7639143d82570e97891975a
14,874
def get_atom(value): """atom = [CFWS] 1*atext [CFWS] An atom could be an rfc2047 encoded word. """ atom = Atom() if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) atom.append(token) if value and value[0] in ATOM_ENDS: raise errors.HeaderParseError( "expected atom but found '{}'".format(value)) if value.startswith('=?'): try: token, value = get_encoded_word(value) except errors.HeaderParseError: # XXX: need to figure out how to register defects when # appropriate here. token, value = get_atext(value) else: token, value = get_atext(value) atom.append(token) if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) atom.append(token) return atom, value
f7d93daabfc96138e79443eb2e5c7e7a9b28fbbc
14,875
def create_page(): """新增页面""" tags = dbutils.get_tags() return render_template('edit.html', title='新建', edit=False, tags=tags)
48c14aabc76ff3c4886b2ff7f1340035936d81ce
14,876
def calculate_class_weights(): """ :return: class-wise true-label-area / false-label-area as a dictionary """ df = collect_stats() df = df.fillna(0) df = df.pivot(index = 'Class', columns = 'ImageId', values = 'TotalArea') df = df.sum(axis=1) df = df / (2500. - df) return df.to_dict()
42d006daee27d1400d76e9233e64e4d09683573c
14,877
def get_data_layer(roidb, num_classes): """return a data layer.""" if cfg.TRAIN.HAS_RPN: if cfg.IS_MULTISCALE: layer = GtDataLayer(roidb) else: layer = RoIDataLayer(roidb, num_classes) else: layer = RoIDataLayer(roidb, num_classes) return layer
32035fc837b402949a5fb75af0ad5dbe26a2e129
14,878
def split_on_first_brace(input,begin_brace = "{",end_brace = "}",error_replacement="brace_error"): """ input: string with {Something1} Something2 output: tuple (Something1,Something2) """ if error_replacement=="chapter_error": print(input[:20]) input = remove_empty_at_begin(input) if len(input) == 0: #raise RuntimeError("hi") print("first brace empty string ERROR") return error_replacement,input if input[0] != begin_brace: print("first brace NOT Brace ERROR") return error_replacement,input brace_count = 0 out1 = "" for elem in input: out1 += elem if elem == begin_brace: brace_count = brace_count + 1 if elem == end_brace: brace_count = brace_count - 1 if brace_count == 0: break out2 = input[len(out1):] out1 = out1[1:-1] return out1, out2
201f6789f9db65aa98b857c923e3ed0484aaea89
14,879
import warnings def check_count(value, total_count, dimension_type): """check the value for count.""" value = validate(value, "count", int) if value > total_count: raise ValueError( f"Cannot set the count, {value}, more than the number of coordinates, " f"{total_count}, for the {dimension_type} dimensions." ) if value < total_count: warnings.warn(f"The number of labels, {total_count}, are truncated to {value}.") return value
aed0b31e041c3c8ca791533697c2ad9e292a8fcc
14,881
import json def request_certificate(request): """Request the on-demand creation of a certificate for some user, course. A request doesn't imply a guarantee that such a creation will take place. We intentionally use the same machinery as is used for doing certification at the end of a course run, so that we can be sure users get graded and then if and only if they pass, do they get a certificate issued. """ if request.method == "POST": if request.user.is_authenticated(): # Enter your api key here xqci = CertificateGeneration( api_key=settings.APPSEMBLER_FEATURES['ACCREDIBLE_API_KEY'] ) username = request.user.username student = User.objects.get(username=username) course_key = CourseKey.from_string( request.POST.get('course_id') ) course = get_course(course_key) status = certificate_status_for_student( student, course_key)['status'] if status in [CertificateStatuses.unavailable, CertificateStatuses.notpassing, CertificateStatuses.error]: logger.info( 'Grading and certification requested for user {} in course {} via /request_certificate call'.format(username, course_key)) status = xqci.add_cert(student, course_key, course=course) return HttpResponse( json.dumps( {'add_status': status} ), content_type='application/json') return HttpResponse( json.dumps( {'add_status': 'ERRORANONYMOUSUSER'} ), content_type='application/json')
73605a4d02d515656ddbd4cb63e1c810d65f5f2e
14,882
def get_useable_checkers(): """ 列出可用插件列表 :return: """ useable_checkers = list() for (checker_name, checker_instance) in CHECKER_INSTANCE_DICT.items(): if checker_instance.useable: useable_checkers.append(checker_instance) return useable_checkers
9548c4423f2176081e59957a823afb986f134c7a
14,883
def get_training_roidb(imdb): """Returns a roidb (Region of Interest database) for use in training.""" if cfg.TRAIN.USE_FLIPPED: print 'Appending horizontally-flipped training examples...' imdb.append_flipped_images() print 'done' print 'Preparing training data...' wrdl_roidb.prepare_roidb(imdb) print 'done' return imdb.roidb
adec6258d6ffa810aef475ec5257ef92a762f1fa
14,884
def _used_in_calls(schedule_name: str, schedule: ScheduleBlock) -> bool: """Recursively find if the schedule calls a schedule with name ``schedule_name``. Args: schedule_name: The name of the callee to identify. schedule: The schedule to parse. Returns: True if ``schedule``calls a ``ScheduleBlock`` with name ``schedule_name``. """ blocks_have_schedule = False for block in schedule.blocks: if isinstance(block, Call): if block.subroutine.name == schedule_name: return True else: blocks_have_schedule = blocks_have_schedule or _used_in_calls( schedule_name, block.subroutine ) if isinstance(block, ScheduleBlock): blocks_have_schedule = blocks_have_schedule or _used_in_calls(schedule_name, block) return blocks_have_schedule
9d6ff0b3a415047252ef2148aa6e59e229531ef7
14,885
def font_size_splitter(font_map): """ Split fonts to 4 category (small,medium,large,xlarge) by maximum length of letter in each font. :param font_map: input fontmap :type font_map : dict :return: splitted fonts as dict """ small_font = [] medium_font = [] large_font = [] xlarge_font = [] fonts = set(font_map.keys()) - set(RANDOM_FILTERED_FONTS) for font in fonts: length = max(map(len, font_map[font][0].values())) if length <= FONT_SMALL_THRESHOLD: small_font.append(font) elif length > FONT_SMALL_THRESHOLD and length <= FONT_MEDIUM_THRESHOLD: medium_font.append(font) elif length > FONT_MEDIUM_THRESHOLD and length <= FONT_LARGE_THRESHOLD: large_font.append(font) else: xlarge_font.append(font) return { "small_list": small_font, "medium_list": medium_font, "large_list": large_font, "xlarge_list": xlarge_font}
d047e182df8d9015997c2debd6269012cb779df5
14,886
def dm2skin_normalizeWeightsConstraint(x): """Constraint used in optimization that ensures the weights in the solution sum to 1""" return sum(x) - 1.0
79024cb70fd6cbc3c31b0821baa1bcfb29317043
14,888