content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def getfrom(v): """ pass through function for using the filter_for decorator directly """ return v
7a01fecbac63bca67fef10bfb39f8641e0cacda7
7,131
def t_POUND(t): """header token""" # 如果header前面有换行 if t.value[0] == '\n': t.value = str(len(t.value) - 1) else: t.value = str(len(t.value)) return t
8625bb266d4ad9eec9f08a3edc81483c41e87aff
7,132
import re def _split_article_sentences(article_text): """ Recieves a string containing the plain text of a wikipedia article Returns a list containing the sentences of the recieved article text """ def not_image_thumb(paragraph): """ True if the paragraph is wikitext for displaying an image trough the "thumb" keyword i.e. thumb|25px|left """ return not bool(re.match("\|thumb\|?|\|?thumb\|",paragraph)) def split_sentences(text): """ Returns a list of the sentences in the text """ sentences = re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?|\!)\s', text) return sentences paragraphs = article_text.split("\n") # Remove empty paragraphs, composed by 0 or more space characters paragraphs = list(filter(lambda x: not bool(re.match(r'^\s*$', x)) , paragraphs)) # Remove the paragraphs corresponding to an image thumb paragraphs = list(filter(not_image_thumb, paragraphs)) # Split the paragraphs into sentences # i.e [pragraph1,paragraph] -> [[p1_sent1,p1_sent2],[p2_sent1]] sentences = list(map(split_sentences,paragraphs)) # Single list i.e. [[1,2],[3]] -> [1,2,3] sentences = [j for i in sentences for j in i] # Remove sentences not ending in '.!?' sentences = list(filter(lambda x: bool(re.match(r'.+[\?|\!|\.]$', x)), sentences)) return sentences
5fca6ac2113a7c54456132d3afb2171bd8644603
7,133
import os def ext(f): """Changes the extension of the a give file to jpeg""" return os.path.splitext(f)[0] + '.jpg'
51e6acce6ff2a7116e4194aed425e5b81a5f15f1
7,134
def _compute_fscores(a_stat, a_fscore_stat): """ Compute macro- and micro-averaged F-scores @param a_stat - statistics disctionary on single classes @param a_fscore_stat - verbose statistics with F-scores for each particular class (will be updated in this method) @return 6-tuple with macro- and micro-averaged precision, recall, and F-scores """ macro_P = micro_P = macro_R = micro_R = macro_F1 = micro_F1 = 0. n_classes = len(a_stat) if not n_classes: return (0., 0.) macro_F1 = micro_F1 = iF1 = iprec = 0. total_tp = total_fp = total_fn = 0 # obtain statistics for all classes for iclass, (tp, fp, fn) in a_stat.iteritems(): total_tp += tp total_fp += fp total_fn += fn if tp or (fp and fn): iprec = tp / float(tp + fp) if (tp or fp) else 0. ircall = tp / float(tp + fn) if (tp or fn) else 0. if iprec or ircall: iF1 = 2 * (iprec * ircall) / (iprec + ircall) macro_P += iprec macro_R += ircall macro_F1 += iF1 else: iF1 = 0 else: iF1 = 0 a_fscore_stat[iclass].append(iF1) # compute macro- and micro-averaged scores macro_P /= float(n_classes) macro_R /= float(n_classes) macro_F1 /= float(n_classes) if total_tp or (total_fp and total_fn): micro_P = total_tp / float(total_tp + total_fp) micro_R = total_tp / float(total_tp + total_fn) micro_F1 = 2 * micro_P * micro_R / (micro_P + micro_R) return (macro_P, micro_P, macro_R, micro_R, macro_F1, micro_F1)
a5e2cf4eff650e56a4c6b8c83955e3740d6bb937
7,135
def parse_employee_info(record_list): """ Parses the employee record information Example input: [('3c7ca263-9383-4d61-b507-2c8bd367567f', 123456, 'Austin', 'Grover', <memory at 0x11059def0>, True, 1, '00000000-0000-0000-0000-000000000000', datetime.datetime(2020, 2, 23, 16, 53, 25, 531305))] Args: Employee Record list Returns: list of dictionary records containing employee information. """ data_list = [] for record in record_list: user_password = memoryview(record[4]).tobytes().decode("utf-8") data_record = { "id": record[0], "employeeid": record[1], "firstname": record[2], "lastname": record[3], "password": user_password, "active": record[5], "classification": record[6], "managerid": record[7], "createdon": record[8], } data_list.append(data_record) return data_list
a0c4e9fb57bc452119f362525e9c16f516911922
7,139
import math def gas_release_rate(P1, P2, rho, k, CD, area): """ Gas massflow (kg/s) trough a hole at critical (sonic) or subcritical flow conditions. The formula is based on Yellow Book equation 2.22. Methods for the calculation of physical effects, CPR 14E, van den Bosch and Weterings (Eds.), 1996 Parameters ---------- P1 : float Upstream pressure P2 : float Downstream pressure rho : float Fluid density k : float Ideal gas k (Cp/Cv) CD : float Coefficient of discharge are : float Orifice area Returns ---------- : float Gas release rate / mass flow of discharge """ if P1 > P2: if P1 / P2 > ((k + 1) / 2) ** ((k) / (k - 1)): flow_coef = 1 else: flow_coef = ( 2 / (k - 1) * (((k + 1) / 2) ** ((k + 1) / (k - 1))) * ((P2 / P1) ** (2 / k)) * (1 - (P2 / P1) ** ((k - 1) / k)) ) return ( math.sqrt(flow_coef) * CD * area * math.sqrt(rho * P1 * k * (2 / (k + 1)) ** ((k + 1) / (k - 1))) ) else: return 0
8749b85457e3f9e24e08f2a9f5f059066e3518b8
7,140
def email2words(email): """Return a slightly obfuscated version of the email address. Replaces @ with ' at ', and . with ' dot '. """ return email.replace('@', ' at ').replace('.', ' dot ')
cd8dff104ace7eaad00164ba1161d1c49ce4a0e3
7,141
from typing import Union from typing import Callable def _get_callable_str(*, callable_: Union[Callable, str]) -> str: """ Get a callable string (label). Parameters ---------- callable_ : Callable or str Target function or method or property or dunder method name. Returns ------- callable_str : str A callable string (label). """ if isinstance(callable_, str): callable_str: str = callable_ else: callable_str = callable_.__name__ return callable_str
2b968e3f5ff79701e6f63bb75548ecb228ec5ed7
7,143
def join_race_and_gender(c_id_string, artist_df): """ takes in a string of c_id(s), ex: "8210" or "8210, 5670" returns a total count of male, female, white, black, asian, aian, mix, hispanic """ male = 0 female = 0 white = 0 black = 0 asian = 0 aian = 0 mix = 0 hispanic = 0 if type(c_id_string) != str: # this means that it is a 'nan' return None, None c_ids = str(c_id_string).split(', ') for c_id in c_ids: artist_row = artist_df[artist_df['constituent_id']==c_id] if len(artist_row) == 0: pass try: gender = artist_row['gender'].iloc[0] if gender == 'male': male += 1 elif gender == 'female': female += 1 else: pass except Exception: pass try: race = artist_row['race'].iloc[0] if race == 'white': white += 1 elif race == 'black': black += 1 elif race == 'asian': asian += 1 elif race == 'aian': aian += 1 elif race == 'mix': mix += 1 elif race == 'hispanic': hispanic += 1 else: pass except Exception: pass return male, female, white, black, asian, aian, mix, hispanic
3793049d85a12d77058f9574be34b197b5a84760
7,144
import re def to_mb(s): """Simple function to convert `disk_quota` or `memory` attribute string values into MB integer values. """ if s is None: return s if s.endswith('M'): return int(re.sub('M$', '', s)) elif s.endswith('G'): return int(re.sub('G$', '', s)) * 1000 return 512
870f276552ef90bbd5034551ea8ade0f5160491b
7,145
def length_left(lumber): """ Convenience function for calculating the length left in a piece of lumber :param lumber: a piece of Lumber :return: length remaining """ return lumber.length_left()
57dfd5e160abdc086759dd41df013181f1217f9d
7,146
def find_matching_paren_pair(s): """ Find the first matching pair of parentheses and return their positions """ paren_level = -1 open_pos = 0 for i in range(0, len(s)): if s[i] == "(": paren_level += 1 if paren_level == 0: open_pos = i elif s[i] == ")": if paren_level == 0: return (open_pos, i) paren_level -= 1 raise SyntaxError("Unterminated list '{val}'".format(val=s))
c50ce61ca96f1babb951d2c1051461be8633d783
7,147
def IsMerge(op): """Return true if `op` is a Merge.""" return op.type == "Merge" or op.type == "RefMerge"
8b5c7373cd698d23bd1b0df78a5986dded8960ec
7,148
def divisors(integer): """ Create a function named divisors/Divisors that takes an integer n > 1 and returns an array with all of the integer's divisors(except for 1 and the number itself), from smallest to largest. If the number is prime return the string '(integer) is prime' (null in C#) (use Either String a in Haskell and Result<Vec<u32>, String> in Rust). :param integer: a integer value. :return: a list of values which are divisors of an integer. """ values = [x for x in range(2, integer) if integer % x == 0] return values if values else "{} is prime".format(integer)
6fccf9c5ec49f8ddd852b5aad071f187739e5f0b
7,152
import re def fraction_to_word(aText, fractions): """Spell out fractions written with a '/'. """ aText = re.sub(r"1/2", fractions[0], aText) aText = re.sub(r"1/3", fractions[1], aText) aText = re.sub(r"2/3", fractions[2], aText) aText = re.sub(r"1/4", fractions[3], aText) aText = re.sub(r"3/4", fractions[4], aText) aText = re.sub(r"1/5", fractions[5], aText) return aText
15a99870f2161a354aa69fadc484435b9d37d477
7,154
def create_message_context_properties(message_type, message_id, source, identifier, is_cloud_event_format) -> dict: """Create message context properties dict from input param values.""" return { 'type': message_type, 'message_id': message_id, 'source': source, 'identifier': identifier, 'is_cloud_event_format': is_cloud_event_format }
c5edd78abb5e584089456f4d50a656ce46a7666c
7,155
import os import json import sys def use_resultdb(): """Checks the luci context to determine if resultdb is configured.""" ctx_filename = os.environ.get("LUCI_CONTEXT") if ctx_filename: try: with open(ctx_filename) as ctx_file: ctx = json.load(ctx_file) rdb = ctx.get('resultdb', {}) return rdb.get('hostname') and rdb.get('current_invocation') except (OSError, ValueError): print( "Failed to open LUCI_CONTEXT; skip enabling resultdb integration", file=sys.stderr) return False return False
cc589171a8f07d7560a8987dffe6b56602fbdd3f
7,156
def contours_by_Imax(data, minI_factor=8., cl_factor=1.2, num_contours=10): """Calculate the contours for the plot base on the maximum intensity.""" maxI = data.max() minI = maxI / minI_factor return [minI * cl_factor ** x for x in range(num_contours)]
c214c37ba0c924051d6b410f7237262a81c3087e
7,157
from functools import reduce import operator def get_nested_value(the_map, dot_separated_key): """Give a nested dictionary map, get the value specified by a dot-separated key where dots denote an additional depth. Taken from stack overflow (http://stackoverflow.com/a/12414913). """ keys = dot_separated_key.split(".") return reduce(operator.getitem, keys[:-1], the_map)[keys[-1]]
082d199adc51376592bd215ab32a309807e6089e
7,158
import pathlib def exists(path: str) -> bool: """Checks if path exists e.g j.sals.fs.exists("/home/rafy/testing_make_dir/test1") -> True j.sals.fs.exists("/home/rafy/testing_make_dir/fasdljd") -> False Args: path (str): path to check for existence Returns: bool: True if exists """ return pathlib.Path(path).exists()
a3b3717c947656042d3ddcf9e1107d3f6ec9c06d
7,161
def _create_cql_update_query(key_space: str, table_name: str, set_columns_value_dict: dict, primary_key_values: dict) -> str: """ This function will create an update CQL query""" cql_update = "UPDATE " + key_space + "." + table_name + " SET " for key in set_columns_value_dict: cql_update += key + " = '" + set_columns_value_dict[key] + "', " cql_update = cql_update[:-2] + " WHERE " # Now where conditions of primary key or composite key for key in primary_key_values: if primary_key_values[key]["type"] == "str": cql_update += key + " = '" + primary_key_values[key]["value"] + "', " else: cql_update += key + " = " + primary_key_values[key]["value"] + ", " cql_update = cql_update[:-2] cql_update += ";" return cql_update
b133feef14daefe7f5cc82936c2399bad3139672
7,162
import os import dill def write_lib_kde(kde, outname, libID): """Write a dict of KDE objects for one library. Parameters ---------- kde : {taxon:kde} outname : str output file name libID : int|str library ID Returns ------- str : file name of output. """ prefix = os.path.splitext(outname)[0] outFile = ''.join([prefix, '_', 'libID', str(libID), '.pkl']) with open(outFile, 'wb') as outFH: dill.dump(kde, outFH) return outFile
99ac957d151ac5ad7f100a88c3c0a326e10b7e11
7,163
def extract_file_number(file_name): """ Extract the file number from a file name """ file_name = str(file_name) dot_contents = file_name.split('.') hyp_contents = dot_contents[0].split('-') base_name = hyp_contents[len(hyp_contents) - 1] return int(base_name[:-1])
0b29ba7e75ddfcdc31832641d51f5f0a507021b0
7,164
def add_address(x, y): """Returns a string representation of the sum of the two parameters. x is a hex string address that can be converted to an int. y is an int. """ return "{0:08X}".format(int(x, 16) + y)
3e6fef3d5de0216c68c980b24d9f1ab05bc0a043
7,165
import re def replaceRoadWithRd(a_string): """assumes a_string is a string returns a string, with "Rd." where a_string has "Road" """ pattern = "[Rr]oad" replacement = "Rd." result = re.sub(pattern, replacement, a_string) return result
08cafdc84c61fc4131f923bf65f271b1d08be0c9
7,166
def extract_fcp_data(raw_data, status): """ extract data from smcli System_WWPN_Query output. Input: raw data returned from smcli Output: data extracted would be like: 'status:Free \n fcp_dev_no:1D2F\n physical_wwpn:C05076E9928051D1\n channel_path_id:8B\n npiv_wwpn': 'NONE'\n status:Free\n fcp_dev_no:1D29\n physical_wwpn:C05076E9928051D1\n channel_path_id:8B\n npiv_wwpn:NONE """ raw_data = raw_data.split('\n') # clear blank lines data = [] for i in raw_data: i = i.strip(' \n') if i == '': continue else: data.append(i) # process data into one list of dicts results = [] for i in range(0, len(data), 5): temp = data[i + 1].split(':')[-1].strip() # only return results match the status if temp.lower() == status.lower(): results.extend(data[i:i + 5]) return '\n'.join(results)
bbe9de1f075fa68a4130c44afea9d388b1a678d5
7,167
import os def find_table_config_pairs(tag, paths): """ Args: str tag: part of filename to filter files to be used dict paths: hard-coded paths to output folders Out: dict path_pairs: keys "config" and "meet_table" """ aa = os.listdir(paths[ "configs"]) bb = os.listdir(paths["meet_tables"]) path_pairs = [] for a in aa: # only few files, so nested loop is ok for b in bb: if tag in a and tag in b: # leave only "_usercomment_timestamp" part of filenames a_tag = a.split('.')[0][len('config_') :] b_tag = b.split('.')[0][len('meet_table_'):] if a_tag == b_tag: path_pair = { "config" : os.path.join(paths[ "configs"], a), "meet_table": os.path.join(paths["meet_tables"], b), "tag" : a_tag} path_pairs.append(path_pair) # alphabetically sort pairs according to the user provided tag name # (allows adding stat lines to all_stats.csv in sensible order) path_pairs = sorted(path_pairs, key=lambda entry: entry['tag']) return path_pairs
6399890b43a9eccb6853246773a93ee2c66449a7
7,168
import json def decode_resource_id_options(request): """ Extract resource ID options from a HTTP request, making sure the keys have the same names as the ResourceIdentifier object's fields. """ return { # Resource ID 'resource_id': request['id'], 'target_platforms': json.dumps(request['target_platforms']) if 'target_platforms' in request else None, # Font options 'character_regex': request.get('regex', None), 'tracking': int(request['tracking']) if 'tracking' in request else None, 'compatibility': request.get('compatibility', None), # Bitmap options 'memory_format': request.get('memory_format', None), 'storage_format': request.get('storage_format', None), 'space_optimisation': request.get('space_optimisation', None), }
97f61b5c1ffb82cb9f29caebb9a85d42348fbfa7
7,169
import json def load_data(data): """ Wrapper to load json data, to be compatible with Python3. Returns: JSON data Keyword arguments: data: might be bytes or str """ if type(data) == bytes: return json.loads(data.decode("utf-8")) else: return json.loads(data)
c33c661c2a42d162d06c3e17487e072908fd0bf4
7,170
def indices_for(df, nprocs): """ group rows in dataframe and assign each group to each process Args: df: Pandas dataframe object nprocs: number of processes used Returns: indeces grouped to each process """ N = df.shape[0] L = int(N / nprocs) indices = [] for i in range(nprocs): for j in range(L): indices.append(i) for i in range(N - (nprocs * L)): indices.append(nprocs - 1) return indices
c68408e6fcf70b885ca86fb80f8c31b0bd07e334
7,172
def _dynamic_range(fig, range_padding=0.05, range_padding_units='percent'): """Automatically rescales figure axes range when source data changes.""" fig.x_range.range_padding = range_padding fig.x_range.range_padding_units = range_padding_units fig.y_range.range_padding = range_padding fig.y_range.range_padding_units = range_padding_units return fig
e3c82605ab20ad6ab3c6f2a6e6d32479a9012ed2
7,173
def continuous_ob(orderbooks): """ Creates a continuous orderbook timeseries data, for all orderbooks included as input. i.e. all timestamps that one orderbook has and the other dont, in the latter repeates the information of the former, with this, the output will deliver two historical orderbooks with the same timestamp. Parameters ---------- orderbooks: dict With 2 or more orderbooks data. Returns ------- r_ts_orderbooks: dict With the 2 or more orderbooks now all of them with the same timestamps """ # orderbooks = ob_data.copy() exchanges = list(orderbooks.keys()) timestamps = [] # Create a joined list of all the dates among all the exchanges # [timestamps.append(orderbooks[exchanges[0]].keys()) for exchange in exchanges] # If for an exchange the timestamp does not contain info, use the previous timestamp that does return 1
03a5124a58f51625b1a63535f8b7ca3d06058aa7
7,174
def transform_observation(attribute, data): """ place to hook any transforms on te data """ output = data if (attribute == 'observation_time'): output = data[16:] return output
875d35a6c66cf57fe7ac4995f913b8dbe70e63f3
7,175
import numpy def nanallclose(x, y, rtol=1.0e-5, atol=1.0e-8): """Numpy allclose function which allows NaN Args: * x, y: Either scalars or numpy arrays Returns: * True or False Note: Returns True if all non-nan elements pass. """ xn = numpy.isnan(x) yn = numpy.isnan(y) if numpy.any(xn != yn): # Presence of NaNs is not the same in x and y return False if numpy.all(xn): # Everything is NaN. # This will also take care of x and y being NaN scalars return True # Filter NaN's out if numpy.any(xn): x = x[numpy.logical_not(xn)] y = y[numpy.logical_not(yn)] # Compare non NaN's and return return numpy.allclose(x, y, rtol=rtol, atol=atol)
053fdc260d215304f0e913a468775dd4ce86dba9
7,176
from typing import Tuple from typing import List from typing import Dict from typing import Any def info() -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]: """ Get input and output schemas :return: OpenAPI specifications. Each specification is assigned as (input / output) """ input_sample = [ { 'name': "a", 'type': "string", 'required': True, 'example': "1" }, { 'name': "b", 'type': "string", 'example': "2" } ] output_sample = [ { 'name': "integer", 'type': "string", 'required': True, 'example': '42' } ] return input_sample, output_sample
282f60cacac69865736c58712c4bfbdb2f5e2c24
7,180
def nats_to_params(nat1, nat2, nat3, nat4): """ natural parameters to distribution parameters """ alpha = nat1 + (1./2) nu = -2 * nat4 mu = nat3 / nu beta = - nat2 - (nu * (mu**2) / 2) return alpha, beta, mu, nu
56fba0a886d35948edf570c6eb379f6e0175b589
7,181
import sympy def get_symbol_list(number_of_symbols): """ Returns a list of sympy expression, each being an identity of a variable. To be used for input layer.""" return sympy.symbols(['x_{}'.format(i + 1) for i in range(number_of_symbols)], real=True)
2d81bfc1a162e52b42bf69d4cf68786717ba6f3c
7,183
def asst70_gene_descriptors(moa_abl1): """Create assertion70 gene_descriptors test fixture.""" return [moa_abl1]
a68a349aaebf107cdd645e213fb56f268046a619
7,184
def build_table_separator(outer_left='╠', inner='╬', outer_right='╣'): """ Builds formatted unicode header for the stopwatch table. :return: formatted unicode header """ return f"{outer_left}{'═' * 14}{inner}{'═' * 18}{inner}{'═' * 20}{outer_right}"
22d9aef799e2245a072e9032b9d8dcc2f138386e
7,186
def compute_cog(d): """ Args: d: dict Returns: percentage """ accuracy=(d['22']+d['25']+d['23']+d['24'])/27 return round(accuracy*100, 3)
a83cb11f4d5fdec3bb63533352eb4fd0b62782ea
7,187
import json def read_file(file): """Reads in a data file and generates insert statements for the sql dump.""" rs = "\n" with open(file, "r", encoding='utf-8') as f: data = json.load(f) # Insert conf rs += '''INSERT INTO Conference (conf_name, time, location) VALUES ("{}", "{}", "{}");\n'''\ .format(data["name"], data["datetime"], data["location"]) # Insert paper for paper in data["papers"]: ps = "" ps += """INSERT INTO Paper (PK_conf, paper_title, paper_keywords, paper_link, paper_type, paper_time) VALUES ("{}", "{}", "{}", "{}", "{}", "{}");\n""" \ .format(data["name"], paper["paper_title"], paper["paper_keywords"], paper["paper_link"], paper["paper_type"], paper["paper_time"]) for author in paper["paper_authors"]: try: given_name, family_name = author.rsplit(' ', 1) except ValueError: family_name = author given_name = None ps += """INSERT IGNORE INTO Author (family_name, given_name) VALUES ("{}", "{}");\n""" \ .format(family_name, given_name) ps += """INSERT INTO Paper_Author_Rel (PK_author_fn, PK_author_gn, PK_paper) VALUES ("{}", "{}", "{}");\n""" \ .format(family_name, given_name, paper["paper_title"]) rs += ps.replace("\"None\"", "NULL") # Insert tutorials for tutorial in data["tutorials"]: try: tutorial_author = ", ".join(tutorial["tutorial_author"]) except TypeError: tutorial_author = None ts = "" ts += """INSERT INTO Tutorial (PK_conf, tutorial_author, tutorial_name, tutorial_abstract, tutorial_location, tutorial_time, tutorial_link) VALUES ("{}", "{}", "{}", "{}", "{}", "{}", "{}");\n""" \ .format(data["name"], tutorial_author, tutorial["tutorial_name"], tutorial["tutorial_abstract"], tutorial["tutorial_location"], tutorial["tutorial_time"], tutorial["tutorial_link"]) rs += ts.replace("\"None\"", "NULL") # Insert workshops for workshop in data["workshops"]: try: workshop_organizer = ", ".join(workshop["workshop_organizer"]) except TypeError: workshop_organizer = None ws = "" ws += """INSERT INTO Workshop (PK_conf, workshop_name, workshop_organizer, workshop_description, workshop_day, workshop_location, workshop_link) VALUES ("{}", "{}", "{}", "{}", "{}", "{}", "{}");\n""" \ .format(data["name"], workshop["workshop_name"], workshop_organizer, workshop["workshop_description"], workshop["workshop_day"], workshop["workshop_location"], workshop["workshop_link"]) rs += ws.replace("\"None\"", "NULL") # Insert keynotes for keynote in data["keynotes"]: ks = "" ks += """INSERT INTO Keynote (PK_conf, keynote_title, keynote_speaker, keynote_speaker_bio, keynote_abstract, keynote_time, keynote_location, keynote_link) VALUES ("{}", "{}", "{}", "{}", "{}", "{}", "{}", "{}");\n""" \ .format(data["name"], keynote["keynote_title"], keynote["keynote_speaker"], keynote["keynote_speaker_bio"], keynote["keynote_abstract"], keynote["keynote_time"], keynote["keynote_location"], keynote["keynote_link"]) rs += ks.replace("\"None\"", "NULL") return rs
bf5db1e737278182bbddae836bbfc2f3fe8dc013
7,188
def set_callback(ar, cb): """ Runs when confimation is requred, checks if answer is present in request data, if so, runs corisponding function Otherwise returns with data that will pop up a dialog. """ cb_id = cb.uid answer = ar.xcallback_answers.get("xcallback__" + cb_id, None) if answer is None: buttons = [] xcallback = dict( id=cb_id, title=cb.title, buttons=buttons) rq_data = { k:v[0] if len(v) == 1 else v for k,v in (ar.rqdata.lists() if getattr(ar, "rqdata", None) is not None else {})} rq_data.pop("_dc", None) for c in cb.choices: buttons.append([c.name, c.label]) xcallback[c.name + "_resendEvalJs"] = ar.renderer.ar2js( ar, ar.selected_rows, rqdata= rq_data, xcallback={ "xcallback_id" : cb_id, "choice" : c.name }) # print(buttons) return ar.success(cb.message, xcallback=xcallback) else: cb.run(answer)
90acf651ad324b6e4958ed90f4bf8d76f37a6073
7,189
def _descend_namespace(caller_globals, name): """ Given a globals dictionary, and a name of the form "a.b.c.d", recursively walk the globals expanding caller_globals['a']['b']['c']['d'] returning the result. Raises an exception (IndexError) on failure. """ names = name.split('.') cur = caller_globals for i in names: if type(cur) is dict: cur = cur[i] else: cur = getattr(cur, i) return cur
111a5277907b60d3f27f037868ac8c84e390e159
7,191
def _get_extension_point_url_from_name(domain, category, pluggable_name): """Get the extension point URL based on a pluggable method name""" return '{}/{}/{}'.format(domain, category, pluggable_name).replace('//', '/')
a4a34409dac26e42d123c4fda66ddc0947134e00
7,192
def get_feature(feature_name, example): """Gets Tensorflow feature by name. Args: feature_name: The name of the feature. example: A Tensorflow example. Returns: The Tensorflow feature with the given feature name in the example. Raises: ValueError: If the given feature name is not in the Tensorflow example. """ if feature_name in example.features.feature: return example.features.feature[feature_name] else: raise ValueError('Feature name {} is not in the example {}'.format( feature_name, example))
80e35d7e1fe15e7a455123cbd139139dd977f216
7,193
def esc(code: int) -> str: """ Converts the integer code to an ANSI escape sequence :param code: code :return: escape sequence """ return f"\033[{code}m"
6bdc0679ba9b480220bc088bd09d6356dd539f1f
7,196
def get_paper_data(paper_name): """Gets paper code and paper type based on paper name""" split = paper_name.split("_") code = split[-1] paper_type = split[-2] if len(split) == 5: code = split[-2] + split[-3] paper_type = split[-1] elif len(split) == 3: code = "00" paper_type = split[-1] return code, paper_type
7f5804241a3f97b2c0ea534a4f030841e7c86e3b
7,199
import os def toLongPathSafe(path): """Converts the specified path string to a form suitable for passing to API calls if it exceeds the maximum path length on this OS. @param path: A path. Can be None/empty. Can contain ".." sequences. @return: The passed-in path, absolutized, and possibly with a "\\?\" prefix added, forward slashes converted to backslashes on Windows, and converted to a unicode string. """ if not path: return path path = os.path.abspath(path) # for consistency, always absolutize it if (os.name != 'nt'): return path if path[0] != path[0].upper(): path = path[0].upper()+path[1:] if path.startswith('\\\\?\\'): return path inputpath = path # ".." is not permitted in \\?\ paths; normpath is expensive so don't do this unless we have to if '.' in path: path = os.path.normpath(path) else: # path is most likely to contain / so more efficient to conditionalize this path = path.replace('/','\\') if '\\\\' in path: # consecutive \ separators are not permitted in \\?\ paths path = path.replace('\\\\','\\') if path.startswith(u'\\\\'): path = u'\\\\?\\UNC\\'+path.lstrip('\\') # \\?\UNC\server\share else: path = u'\\\\?\\'+path return path
dbbb39e9f5c1ad3fc02883af771c26b532901f28
7,200
from typing import List def str_class(s: str) -> str: """Expand a string of character ranges. Example: str_class("a-cx0-9") = "abcx0123456789" """ i = 0 n = len(s) ret: List[str] = [] while i < n: if i + 2 < n and s[i+1] == '-': start = ord(s[i]) end = ord(s[i+2]) if start <= end: ret.extend(chr(c) for c in range(start, end + 1)) else: ret.extend(chr(c) for c in range(start, end - 1, -1)) i += 3 else: ret.append(s[i]) i += 1 return ''.join(ret)
21bbd7b46a964f20377b4f98b203b6bf0ad210c2
7,202
from typing import List from typing import Tuple def filter_edges(nodes:List[int], edges: List[Tuple[int,int,int]]) -> List[Tuple[int,int,int]]: """Filter the edges of a subgraph. Parameters ---------- nodes: list containing the nodes edges: list containing the edges of the original graph Returns ------- inverse dictionary of nodes filtered edges""" ns = {v: k for k,v in enumerate(nodes)} new_edges = [] for x, y, v in edges: if x in ns and y in ns: new_edges.append((x, y,v)) return new_edges
50cf2986a5afa4748cf9b8505fc992f7b7f97db1
7,205
import math def is_primes(n) : """return 'True' if 'n' is a prime number. False otherwise""" b =[] if n==1: return False # 1 is not a prime if n==2: return True if n > 2 and n % 2 ==0: return False max_divisor = math.floor(math.sqrt(n)) for i in range (3, 1+max_divisor,2): if n % i == 0 : return False return True
8054efd19b2e6a3b0e1de896865ae7e36e1d9125
7,206
def GetStatus(issue): """Get the status of an issue, whether it is explicit or derived.""" return issue.status or issue.derived_status or ''
4f51142dc4e55adaa27eaf3f3e7e748a49d45df2
7,207
def set_safe_attr(instance, attr, val): """Sets the attribute in a thread safe manner. Returns if new val was set on attribute. If attr already had the value then False. """ if not instance or not attr: return False old_val = getattr(instance, attr, None) if val is None and old_val is None: return False elif val == old_val: return False else: setattr(instance, attr, val) return True
92f657a8e8919b47db6f38a31d4c1cad5bea4c93
7,208
def get_xml_subelement(xml_elem, subelement, attribute=None, multi=False, convert=None, default=None, quiet=False): """ Return the text or attribute of the specified subelement === PARAMETERS === xml_elem : search the children nodes of this element subelement: name of the subelement whose text will be retrieved attribute : convert : if not None, a callable used to perform the conversion of the text to a certain object type default : default value if subelement is not found quiet : if True, don't raise exceptions from conversions, instead use the default value === RETURN === The text associated with the sub-element or ``None`` in case of error === EXAMPLE === <xml_elem> <subelement value="THIS1">text1</subelement> <subelement value="THIS2">text2</subelement> </xml_elem> >>> get_xml_subelement(xml_elem, 'subelement') text1 >>> get_xml_subelement(xml_elem, 'subelement', value') THIS1 >>> get_xml_subelement(xml_elem, 'subelement', value', True) [THIS1, THIS2] """ if xml_elem is None or not subelement: return None subel = xml_elem.findall(subelement) if len(subel) == 0: return [default] if multi else default result = [] for el in subel: text = None if attribute is not None: if type(attribute) is list: text = [] for a in attribute: text += [el.attrib.get(a)] else: text = el.attrib.get(attribute, default) else: text = el.text if el.text is not None else default if convert: try: text = convert(text) except: if quiet: text = default else: raise result += [text] return result if multi else result[0]
574880a2bfb7a6203b5079ec38ebc0d9af41cb07
7,209
def parse_mem_str_to_gbsize(strmem): """ String like 845 MB 677.8 MB to GB size :param strtime: :return: """ strmem = strmem.strip() if strmem.endswith('MB'): memgb = float(strmem[:-2]) / 1024 elif strmem.endswith('GB'): memgb = float(strmem[:-2]) elif strmem.endswith('KB'): memgb = float(strmem[:-2]) / 1024 / 1024 else: raise Exception(f'Not recognized strmem={strmem}') return memgb pass
fd7d96c25b446a1c5ff94694908a9c355e340780
7,210
def civic_vid258(): """Create a test fixture for CIViC VID258.""" return { "id": "civic.vid:258", "type": "VariationDescriptor", "label": "A222V", "value_id": "ga4gh:VA.V5IUMLhaM8Oo-oAClUZqb-gDPaIzIi-A", "value": { "location": { "interval": { "end": 222, "start": 221, "type": "SimpleInterval" }, "sequence_id": "ga4gh:SQ.4RSETawLfMkNpQBPepa7Uf9ItHAEJUde", "type": "SequenceLocation" }, "state": { "sequence": "V", "type": "SequenceState" }, "type": "Allele" }, "xrefs": [ "clinvar:3520", "caid:CA170990", "dbsnp:1801133" ], "alternate_labels": [ "C677T", "ALA222VAL" ], "extensions": [ { "name": "civic_representative_coordinate", "value": { "chromosome": "1", "start": 11856378, "stop": 11856378, "reference_bases": "G", "variant_bases": "A", "representative_transcript": "ENST00000376592.1", "ensembl_version": 75, "reference_build": "GRCh37" }, "type": "Extension" }, { "name": "civic_actionability_score", "value": "55", "type": "Extension" } ], "structural_type": "SO:0001583", "expressions": [ { "syntax": "hgvs:transcript", "value": "NM_005957.4:c.665C>T", "type": "Expression" }, { "syntax": "hgvs:protein", "value": "NP_005948.3:p.Ala222Val", "type": "Expression" }, { "syntax": "hgvs:transcript", "value": "ENST00000376592.1:c.665G>A", "type": "Expression" }, { "syntax": "hgvs:genomic", "value": "NC_000001.10:g.11856378G>A", "type": "Expression" } ], "gene_context": "civic.gid:3672" }
ee725d2c4601ce400abf1cf9da3d5eaaa7ba5bda
7,212
import collections def ParseIoStatsLine(line): """Parses a line of io stats into a IoStats named tuple.""" # Field definitions: http://www.kernel.org/doc/Documentation/iostats.txt IoStats = collections.namedtuple('IoStats', ['device', 'num_reads_issued', 'num_reads_merged', 'num_sectors_read', 'ms_spent_reading', 'num_writes_completed', 'num_writes_merged', 'num_sectors_written', 'ms_spent_writing', 'num_ios_in_progress', 'ms_spent_doing_io', 'ms_spent_doing_io_weighted', ]) fields = line.split() return IoStats._make([fields[2]] + [int(f) for f in fields[3:]])
2764dc96f0720359f906f1b27763738331f63e19
7,213
def get_pixel_neighbors(height, width): """ Estimate the 4 neighbors of every pixel in an image :param height: image height :param width: image width :return: pixel index - neighbor index lists """ pix_id = [] neighbor_id = [] for i in range(height): for j in range(width): n = [] if i == 0: n = n + [(i + 1) * width + j] elif i == height - 1: n = n + [(i - 1) * width + j] else: n = n + [(i + 1) * width + j, (i - 1) * width + j] if j == 0: n = n + [i * width + j + 1] elif j == width - 1: n = n + [i * width + j - 1] else: n = n + [i * width + j + 1, i * width + j - 1] for k in n: pix_id.append(i*width+j) neighbor_id.append(k) return pix_id, neighbor_id
3e49081fdc59ff2b0df54b84e0cf8c5983ec7b2c
7,214
from typing import Counter def check_valid_solution(solution, graph): """Check that the solution is valid: every path is visited exactly once.""" expected = Counter( i for (i, _) in graph.iter_starts_with_index() if i < graph.get_disjoint(i) ) actual = Counter( min(i, graph.get_disjoint(i)) for i in solution ) difference = Counter(expected) difference.subtract(actual) difference = {k: v for k, v in difference.items() if v != 0} if difference: print('Solution is not valid!' 'Difference in node counts (expected - actual): {}'.format(difference)) return False return True
ec22134973153605b3a9b7a2ac7f180ffe55f97e
7,216
def _memoized_fibonacci_aux(n: int, memo: dict) -> int: """Auxiliary function of memoized_fibonacci.""" if n == 0 or n == 1: return n if n not in memo: memo[n] = _memoized_fibonacci_aux(n - 1, memo) + \ _memoized_fibonacci_aux(n - 2, memo) return memo[n]
9a6d8646139d6ae9f6f63d2e990545fd088407eb
7,218
from pathlib import Path import os def module_dir_path(request): """Assure a directory exists in this module. The directory name is given by the environment variable combining the module name and "_DIR". For example, module test_foo.py will use the environment variable named TEST_FOO_DIR. Return the path to the directory or None if the environment variable is not set.""" module_file_path = Path(request.module.__file__) env_var_name = "{}_{}".format(module_file_path.stem, "DIR").upper() module_dir_name = os.environ.get(env_var_name) if not module_dir_name: return None test_dir = module_file_path.parent module_dir = test_dir / module_dir_name module_dir.mkdir(mode = 0o775, exist_ok = True) return module_dir
f1debd9ee663c1be2165e229e76aa360e9dd03dc
7,219
import os import json def get_sagemaker_resource_config(): """ Returns JSON for config if training job is running on SageMaker else None """ cluster_config = None sm_config_path = '/opt/ml/input/config/resourceconfig.json' if os.path.exists(sm_config_path): with open(sm_config_path) as file: cluster_config = json.load(file) return cluster_config
5faf1d0ca27c0fe0fe6bbdfdbf731c68cede7e06
7,220
def set_lm_labels(dataset, vocab, stm_lex, stm_win=3): """ set labels of bi-directional language modeling and sentiment-aware language modeling :param dataset: dataset :param vocab: vocabulary :param stm_lex: sentiment lexicon :param stm_win: window size (i.e., length) of sentiment context :return: """ n_records = len(dataset) for i in range(n_records): words = dataset[i]['words'] # labels of language modeling and sentiment aware language modeling lm_labels_f, lm_labels_b = [], [] n_w = len(words) # language modeling in forward direction for j in range(n_w): if j == n_w - 1: next_word = -1 else: if words[j+1] in stm_lex: next_word = stm_lex[words[j+1]] else: next_word = -1 next_word = -1* next_word + 1 lm_labels_f.append(next_word) for j in range(n_w-1, -1, -1): if j == 0: next_word = -1 else: if words[j-1] in stm_lex: next_word = stm_lex[words[j-1]] else: next_word = -1 next_word = -1* next_word + 1 lm_labels_b.append(next_word) dataset[i]['lm_labels_f'] = list(lm_labels_f) dataset[i]['lm_labels_b'] = list(lm_labels_b)[::-1] # sentiment aware language modeling stm_lm_labels = [] opn_labels = [] for j in range(n_w): if words[j] in stm_lex: opn_labels.append(1) else: opn_labels.append(0) # left boundary of sentimental context stm_ctx_lb = j - stm_win if stm_ctx_lb < 0: stm_ctx_lb = 0 stm_ctx_rb = j + stm_win + 1 left_ctx = words[stm_ctx_lb:j] right_ctx = words[j+1:stm_ctx_rb] stm_ctx = left_ctx + right_ctx flag = False for w in stm_ctx: if w in stm_lex: flag = True break if flag: stm_lm_labels.append(1) else: stm_lm_labels.append(0) dataset[i]['stm_lm_labels'] = list(stm_lm_labels) dataset[i]['opn_labels'] = list(opn_labels) return dataset
ec87003f9f427c5de5ea969f7040f7f10d49c3ea
7,221
def pad_sequences(sequences, pad_func, maxlen = None): """ Similar to keras.preprocessing.sequence.pad_sequence but using Sample as higher level abstraction. pad_func is a pad class generator. """ ret = [] # Determine the maxlen max_value = max(map(len, sequences)) if maxlen is None: maxlen = max_value # Pad / truncate (done this way to deal with np.array) for sequence in sequences: cur_seq = list(sequence[:maxlen]) cur_seq.extend([pad_func()] * (maxlen - len(sequence))) ret.append(cur_seq) return ret
5879ab8f8df7477b9d73c87b6c8065fcc43a66df
7,222
import re def clean_text(text: str, remove_punctuation=False) -> str: """Cleans the inputted text based on the rules given in the comments. Code taken from: https://github.com/kk7nc/Text_Classification/ Args: text (str): the text to be cleaned remove_punctuation (bool): whether to remove punctuation or not Returns: the cleaned text """ rules = [ {r">\s+": u">"}, # remove spaces after a tag opens or closes {r"\s+": u" "}, # replace consecutive spaces {r"\s*<br\s*/?>\s*": u"\n"}, # newline after a <br> {r"</(div)\s*>\s*": u"\n"}, # newline after </p> and </div> and <h1/>... {r"</(p|h\d)\s*>\s*": u"\n\n"}, # newline after </p> and </div> and <h1/>... {r"<head>.*<\s*(/head|body)[^>]*>": u""}, # remove <head> to </head> {r'<a\s+href="([^"]+)"[^>]*>.*</a>': r"\1"}, # show links instead of texts {r"[ \t]*<[^<]*?/?>": u""}, # remove remaining tags {r"^\s+": u""}, # remove spaces at the beginning ] if remove_punctuation: rules.append({r"[.,\/#!$%\^&\*;:{}=\-_`~()]": u""}) for rule in rules: for (k, v) in rule.items(): regex = re.compile(k) text = regex.sub(v, text) text = text.rstrip() return text.lower()
0f666041724315696924808c335c0110b7ffc158
7,224
import webbrowser def openURL(url): """Opens a URL.""" webbrowser.open_new(url) return True
b2e843a49ddfb4b90e556f4edbaa4e20823f3097
7,225
def lisp_parens_with_stack(parens): """Output with stack to see whether parens are open(1), broken(-1), or balanced(0).""" open_stack = [] for par in parens: if par == '(': open_stack.append(par) if par == ')': try: open_stack.pop() except IndexError: return -1 if open_stack: return 1 else: return 0
f3cd34611fc8ff8cf33a7f62bcddf245c77ff473
7,226
def largest_nonadjacent_sum(arr): """ Find the largest sum of non-adjacent numbers """ before_last = 0 last = 0 for elt in arr: cur = before_last + elt before_last = max(before_last, last) last = max(cur, last) return last
39d4ab307e978f4ab9d1bd5a2983292dde3f6933
7,227
from typing import Any def eval_with_import(path: str) -> Any: """Evaluate the string as Python script. Args: path (str): The path to evaluate. Returns: Any: The result of evaluation. """ split_path = path.split('.') for i in range(len(split_path), 0, -1): try: exec('import {}'.format('.'.join(split_path[:i]))) break except Exception: continue return eval(path)
a9d614251f088c9105504aa0f9f99bbc1d8f1712
7,228
import os import codecs def readoptions(fname): """ Read `markowik` options from a file, one per line. """ if os.path.exists(fname): with codecs.open(fname, 'r', 'UTF8') as fp: cfg = fp.read() options = [x.strip() for x in cfg.split("\n") if x.strip()] else: options = [] return options
2a0149be62a7f57d1c0cb9fa1351d892dc1517c1
7,230
import six def get_train_random_forest_pai_cmd(model_name, data_table, model_attrs, feature_column_names, label_name): """Get a command to submit a KMeans training task to PAI Args: model_name: model name on PAI data_table: input data table name model_attrs: model attributes for KMeans feature_column_names: names of feature columns label_name: name of the label column Returns: A string which is a PAI cmd """ # default use numTrees = 1 tree_num = model_attrs.get("tree_num", 1) assert isinstance(tree_num, six.integer_types), \ "tree_num must be an integer" feature_cols = ",".join(feature_column_names) return '''pai -name randomforests -DinputTableName="%s" -DmodelName="%s" -DlabelColName="%s" -DfeatureColNames="%s" -DtreeNum="%d"''' % ( data_table, model_name, label_name, feature_cols, tree_num)
f826e0b24613b2ea8794524c3a5f982131f9a048
7,231
def diff_all_filter(trail, key=lambda x: x['pid']): """ Filter out trails with last key appeared before """ return trail if key(trail[-1]) not in set([key(c) for c in trail]) else None
6c7b6e7c64c4fcf097b5ac743e3c2720911c0248
7,232
import math def xy(n, image_size): """Returns position of pixel n in 2D array""" x = int(n % image_size) y = int(math.floor(n / image_size)) return (x,y)
def36d60055e5084b42d73833c4baeeab9723085
7,233
def format_labels(label): """ Assumes that the label can be split by the '_' character. :param label: :return: """ side, anatomy = label.split("_") if side.lower() in ["l", "left"]: side = "left" elif side.lower() in ["r", "right"]: side = "right" else: raise ValueError( "Label %s is not recognized: cannot determine side %s" % (label, side) ) label = "_".join([side, anatomy]) return label
9b0add955822713eb579168c9bae7a9ae6908fe6
7,234
import os def is_root(directory): """Check if the directory is the root directory. Args: directory: The directory to check. Return: Whether the directory is a root directory or not. """ # If you're curious as why this works: # dirname('/') = '/' # dirname('/home') = '/' # dirname('/home/') = '/home' # dirname('/home/foo') = '/home' # basically, for files (no trailing slash) it removes the file part, and # for directories, it removes the trailing slash, so the only way for this # to be equal is if the directory is the root directory. return os.path.dirname(directory) == directory
ccbdcce26cd3b8b0826ce2514bbd3612c5381a21
7,235
import requests import os def run_md5sum(cwl_input): """Pass a local md5sum cwl to the wes-service server, and return the path of the output file that was created.""" endpoint = 'http://localhost:8080/ga4gh/wes/v1/workflows' params = {'output_file': {'path': '/tmp/md5sum.txt', 'class': 'File'}, 'input_file': {'path': '../../testdata/md5sum.input', 'class': 'File'}} body = {'workflow_url': cwl_input, 'workflow_params': params, 'workflow_type': 'CWL', 'workflow_type_version': 'v1.0'} response = requests.post(endpoint, json=body).json() output_dir = os.path.abspath(os.path.join('workflows', response['workflow_id'], 'outdir')) return os.path.join(output_dir, 'md5sum.txt')
5f4c79953c691e705f66803269f83dd609be1e56
7,236
def get_app_name(): """ liefert ressource """ return 'resource'
580a2cf59b84f28f3efc95b00ffd051b53fa0ffa
7,237
import math def degree_to_radian(degree: float) -> float: """ Fungsi ini digunakan untuk mengonversi derajat ke radian. Rumus : derajat * (pi / 180 derajat) >>> degree_to_radian(60) 1.0471975511965976 """ return degree * (math.pi / 180)
5935b99621192edae5360b2066397997d2dc34f5
7,239
from typing import Type from typing import TypeVar from typing import get_args def _contains_unbound_typevar(t: Type) -> bool: """Recursively check if `t` or any types contained by `t` is a `TypeVar`. Examples where we return `True`: `T`, `Optional[T]`, `Tuple[Optional[T], ...]`, ... Examples where we return `False`: `int`, `Optional[str]`, ... :param t: Type to evaluate. :return: `True` if the input type contains an unbound `TypeVar`, `False` otherwise. """ # Check self if isinstance(t, TypeVar): return True # Check children for arg in get_args(t): if _contains_unbound_typevar(arg): return True return False
97e85b3aafea1d9dc86f69078ff39c93b6bd7c19
7,240
import os def _get_output_dir(output_dir, chunk_arr, name_arr): """Get the directory for the output cutout data.""" # Check the output directory if not os.path.isdir(output_dir): raise ValueError("Output directory '{:s}' does not exist".format(output_dir)) return [os.path.join(output_dir, str(chunk), str(name)) for (chunk, name) in zip(chunk_arr, name_arr)]
4aff96f5d26c8073f64277a23bbc3eacc9024ea3
7,241
def create_sample_db(ntables): """ Create a python description of a sample database """ rv = {} for i in range(1, ntables + 1): rv["table%s" % i] = { "columns": [ { "name": "id", "type": "integer", "use_sequence": "table%s_id_seq" % i, }, {"name": "data", "type": "text"}, ], } return rv
c49f583b4e7f58f1bbb7ad05902c1fc9010bd35c
7,242
import os import glob def listflat(path, ext=None): """ List files without recursion """ if os.path.isdir(path): if ext: if ext == 'tif' or ext == 'tiff': files = glob.glob(os.path.join(path, '*.tif')) files = files + glob.glob(os.path.join(path, '*.tiff')) else: files = glob.glob(os.path.join(path, '*.' + ext)) else: files = [os.path.join(path, fname) for fname in os.listdir(path)] else: files = glob.glob(path) # filter out directories files = [fpath for fpath in files if not isinstance(fpath, list) and not os.path.isdir(fpath)] return sorted(files)
227a12683472dfbaf298cb54b730049a270187f3
7,243
import argparse import os import torch def get_args(): """Expose different behaviors of the server to the user. Raises: AssertionError: If the combination of arguments are unsupported Returns: parsed arguments for the server, cached """ parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument("--m1", default=None, type=str, help="Request this as one model in the interface") parser.add_argument("--m2", default=None, type=str, help="Request this as another model in the interface") parser.add_argument("--address", default="127.0.0.1") # 0.0.0.0 for nonlocal use parser.add_argument( "--port", type=int, default=8000, help="Port on which to run the app." ) parser.add_argument("--dir", type=str, default=os.path.abspath("data")) parser.add_argument("--suggestions", type=str, default=os.path.abspath("data")) parser.add_argument( "--config", "-c", type=str, default=None, help="Use a custom analysis folder rather than the default paths to compare models", ) parser.add_argument( "--tokenization-type", "-t", type=str, default="gpt", help="One of {'gpt', 'bert'}. Tells the frontend how to visualize the tokens used by the model.", ) parser.add_argument( "--gpu-device", type=int, default=0, help="One of {0, 1, ..., n_gpus}. Will use this device as the main device." ) args, _ = parser.parse_known_args() # Checking config_provided = args.config is not None both_m_provided = args.m1 is not None and args.m2 is not None zero_m_provided = args.m1 is None and args.m2 is None only_one_m_provided = not both_m_provided and not zero_m_provided OneModelError = AssertionError("Please provide two models to compare against") TooMuchInfoError = AssertionError("Please provide EITHER the config directory OR two comparable models") if both_m_provided: if config_provided: raise TooMuchInfoError elif config_provided: if both_m_provided or only_one_m_provided: raise TooMuchInfoError elif only_one_m_provided: raise OneModelError if not torch.cuda.is_available(): args.gpu_device = "cpu" return args
8febdd4cc0962dce57c8d50682dac5a35d8a189f
7,244
import uuid import os def _homogenize_linesep(line): """Enforce line separators to be the right one depending on platform.""" token = str(uuid.uuid4()) line = line.replace(os.linesep, token).replace("\n", "").replace("\r", "") return line.replace(token, os.linesep)
f1808c46c44024d5d6adf419ebfefb31a14a64cb
7,246
def check_intra_node_overlap(node_a, node_b): """ check if alignments of the same read hit overlapping parts of the same node (or node_a and _b are different) """ same_node = True if node_a.name != node_b.name: same_node = False same_orientation = False overlap_bp = 0 overlap_pct = 0 else: overlap_bp = min(node_a.support_end, node_b.support_end) - max(node_a.support_start, node_b.support_start) # if overlap is negative (= alignments are apart), percent overlap should be 0 overlap_pct = max(0, round(overlap_bp / node_a.length * 100, 2)) same_orientation = node_a.orientation == node_b.orientation return same_node, same_orientation, overlap_bp, overlap_pct
13f019b552576cf57cf08363ff42f7e3067f659d
7,247
def square(root): """This function calculates the square of the argument value""" # result = num * num return root * root
54049a92a0383c756911a4604161e092d496ce62
7,248
import string from typing import Counter def letter_frequency(seq): """Returns a dictionary with the frequencies of letters in the sequence""" freq = filter(lambda x: x in string.ascii_letters, seq.lower()) freq = dict(Counter(freq).most_common()) freq.update(dict((x, 0) for x in filter(lambda x: x not in freq, string.ascii_lowercase))) return freq
bcbf61526c395bc8df36bf7b3a7a37b25dbe1aba
7,249
def process_map_input(input_lines): """ Find the dimensions of a map using the lines of input :param input_lines: List of string representing the map :return: (x: width, y: height) Tuple """ height = len(input_lines) - 1 width = len(input_lines[0]) return width, height
30662e1466d6d553ddee797c6252453c6f6f7f47
7,252
import random def weighted_choice(lst): """Choose element in integer list according to its value. E.g., in [1,10], the second element will be chosen 10 times as often as the first one. Returns the index of the chosen element. :ivar [int] lst: List of integers. :rtype: int """ total = sum(c for c in lst) r = random.uniform(0, total) upto = 0 # Loop over list and pick one element. for i in range(len(lst)): c = lst[i] if upto + c >= r: return i upto += c assert False, "Shouldn't get here"
513ca734a7b96f08993bd03c47df9c60288ebe50
7,253
def isfirefox(session): """ bool: Whether the session is using Firefox. """ return getattr(session.driver, "_firefox", False)
ca7c5c217de308642e4ec0687c42a0dc71cfeecd
7,255
from datetime import datetime def get_es_index_name(project, meta): """ Get the name for the output ES index :param project: seqr project identifier :param meta: index metadata :return: index name """ return '{project}__structural_variants__{sample_type}__grch{genome_version}__{datestamp}'.format( project=project, sample_type=meta['sampleType'], genome_version=meta['genomeVersion'], datestamp=datetime.today().strftime('%Y%m%d'), ).lower()
fc1245287aed07ddd8d90f33cadc095c22f944a3
7,256
def RegexCheck(re, line_number, line, regex, msg): """Searches for |regex| in |line| to check for a particular style violation, returning a message like the one below if the regex matches. The |regex| must have exactly one capturing group so that the relevant part of |line| can be highlighted. If more groups are needed, use "(?:...)" to make a non-capturing group. Sample message: line 6: Use var instead of const. const foo = bar(); ^^^^^ """ def _highlight(match): """Takes a start position and a length, and produces a row of '^'s to highlight the corresponding part of a string. """ return match.start(1) * ' ' + (match.end(1) - match.start(1)) * '^' match = re.search(regex, line) if match: assert len(match.groups()) == 1 return ' line %d: %s\n%s\n%s' % (line_number, msg, line, _highlight(match)) return ''
31e979570eb4e0b251f445555f24fadef8e6879d
7,258
def SymDatToPath(symtext): """Convert a Cygwin style symlink data to a relative path.""" return ''.join([ch for ch in symtext[12:] if ch != '\x00'])
0d5104819678ca12d95b5f987a88726c5aef3f18
7,262
from typing import Union from typing import List def count(obj: Union[int, List]) -> int: """Return the number of integers in obj. >>> count(27) 1 >>> count([4, 1, 8]) 3 >>> count([4]) 1 >>> count([]) 0 >>> count([4, [1, 2, 3], 8]) 5 >>> count([1, [2, 3], [4, 5, [6, 7], 8]]) 8 """ if isinstance(obj, int): return 1 else: return sum(count(i) for i in obj) # if isinstance(obj, int): # return 1 # else: # s = 0 # for lst_i in obj: # s += count(lst_i) # return s
d982b3096dc9c7776b32bab765e2a17769d128e9
7,263
def _single_quote_string(name: str) -> str: # pragma: no cover """Single quote a string to inject it into f-strings, since backslashes cannot be in double f-strings.""" return f"'{name}'"
39868168862f3bd60d8da6168503cbc51fcbda84
7,264
def convert_str_version_number(version_str): """ Convert the version number as a integer for easy comparisons :param version_str: str of the version number, e.g. '0.33' :returns: tuple of ints representing the version str """ version_numbers = version_str.split('.') if len(version_numbers) != 2: raise ValueError(f"Version number is malformed: '{version_str}'") return tuple(int(part) for part in version_numbers)
b550b7d07d9b226800de261f792bf0995ac21738
7,265
import torch def valid_simulation(_state, _params): """ AW - valid_simulation - return a binary variable per simulation indicating whether or not that simulation satifies the desired policy outcome. :param _state: tensor (N x D): tensor of the state trajectory. :return: tensor (N, ), bool: tensor of whether each simulation was valid. """ _n_infected = _state[:, :, 2] + _state[:, :, 3] + _state[:, :, 4] _valid = torch.logical_not(torch.any(_n_infected > _params.policy['infection_threshold'], dim=0)) return _valid
6eb82ec744cbab353f649e63d54c0d95236f1699
7,266
import copy def process_dataset(dataset, func): """Calculate things here using the dataset, then return a dictionary containing names and values for each calculated quantity.""" new_dataset = copy.copy(dataset) del new_dataset["val"] new_dataset.update(func(dataset)) return new_dataset
b38b2edd1abe9a990d8f524b78b38e8abadc7c55
7,268
def _infinity(name): """ Create a single instance of an class with a given name. """ return type(name + "Type", (object, ), { '__repr__': lambda self: "{0}.{1}".format(self.__module__, name), '__str__': lambda self: name, '__unicode__': lambda self: name, })()
bd6cbda227e9d97938ab9ed7c2d2e97b95dc5080
7,269