content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def max_size(resize_info): """ リサイズ情報から結合先として必要な画像サイズを計算して返す :param resize_info: リサイズ情報 :return: width, height """ max_w, max_h = 0, 0 for name, info in resize_info.items(): pos = info['pos'] size = info['size'] max_w = max(max_w, pos[0] + size[0]) max_h = max(max_h, pos[1] + size[1]) return max_w, max_h
1e28f993b3b0fac077f234b6388a2d9042396f6b
5,879
def quicksort(lyst): """This is a quicksort """ def partition_helper(lyst, first, last): pivot = lyst[first] left = (first + 1) right = last done = False while not done: while left <= right and lyst[left] <= pivot: left += 1 while right >= left and lyst[right] >= pivot: right -= 1 if right < left: done = True else: lyst[left], lyst[right] = lyst[right], lyst[left] lyst[first], lyst[right] = lyst[right], lyst[first] return right def quicksort_helper(lyst, first, last): if first < last: splitpoint = partition_helper(lyst, first, last) quicksort_helper(lyst, first, (splitpoint-1)) quicksort_helper(lyst, (splitpoint+1), last) return lyst quicksort_helper(lyst, 0, (len(lyst)-1)) return lyst
33385c01b877a86a2970f33dc4d0bd9d456dc983
5,880
def _validate_time_mode(mode, **kwargs): """Validate time mode.""" return mode
e30fd9071bde102b4986fe9ef846a812f7c08ff7
5,881
async def root(): """ Dependency is "static". Value of Depends doesn't get passed into function we still get redirected half the time though """ return {"message": "Hello World"}
6d3b634444240275f56d30aa0c1fe3b3bb84ce24
5,883
def getLatest(df): """ This get the data of the last day from the dataframe and append it to the details """ df_info = df.iloc[:,0:5] df_last = df.iloc[:,-1] df_info['latest'] = df_last return df_info
f42cae0552a4ac791d3499fa2ca1417a80a970ac
5,884
def strip_long_text(text, max_len, append=u'…'): """Returns text which len is less or equal max_len. If text is stripped, then `append` is added, but resulting text will have `max_len` length anyway. """ if len(text) < max_len - 1: return text return text[:max_len - len(append)] + append
02ce128f1de1dbeb2a2dcef5bc2b6eb8745322d3
5,886
def jsonify_query_result(conn, query): """deprecated""" res = query.all() #res = conn.execute(query) #return [dict(r) for r in res] return [r._asdict() for r in res]
ca11226c6f6fc731089f1d257db02a6cb83bd145
5,888
def tuplify2d(x): """Convert ``x`` to a tuple of length two. It performs the following conversion: .. code-block:: python x => x if isinstance(x, tuple) and len(x) == 2 x => (x, x) if not isinstance(x, tuple) Args: x (any): the object to be converted Returns: tuple: """ if isinstance(x, tuple): assert len(x) == 2 return x return (x, x)
64170b14dbe7eb8885d21f45acff6b43979f1219
5,894
import os def get_scene_info(path): """Extract information about the landsat scene from the file name""" fname = os.path.basename(path) parts = fname.split('_') output = {} output['sensor'] = parts[0] output['lpath' ] = parts[2][0:3] output['lrow' ] = parts[2][3:6] output['date' ] = parts[3] return output
4a1bbad4d8b9b2b1ad21ca78ca7a046d92232699
5,895
def earlyon(time,duration,*args): """ Some lights have a slight delay before they turn on (capacitors that need to be charged up?). This takes the current time and subtracts that delay so the code looks like they turn on at the right time, but we really send the command a little bit early to give the illusion that they're all in sync """ duration = int(duration,10) cmd = '"'+('" "'.join(args))+'"' if args[-1]=="on": return [(time-duration,cmd)] else: return [(time,cmd)]
5671d46ffe42bd456689cffc3ce3e1f6731101c8
5,896
def parse_id_as_interval(id_string, regex): """ The fasta ids contain the locus information. """ match = regex.match(id_string) genome = match.group("genome") seqid = match.group("seqid") start_tmp = int(match.group("start")) end_tmp = int(match.group("end")) start = min([start_tmp, end_tmp]) end = max([start_tmp, end_tmp]) del start_tmp del end_tmp return (genome, seqid, start, end)
7d35bdd7b4418d1edcd433cd39b9defc9050c6f6
5,898
def map_sentences_to_indices_of_vectors(sentences, word_to_index_glove, unknown_token): """ map senteces to integers that represent the index of each word in the glove vocabulary """ # the list to be returned mapped_sentences = [] # get the index of the unknown token unknown_token_index = word_to_index_glove[unknown_token] # iterate for each sentence for sentence in sentences: # get the split sentence split_sentence = sentence.split() # map it to the corresponding indices mapped_sentence = [word_to_index_glove.get(word, unknown_token_index) for word in split_sentence] # append it to the list mapped_sentences.append(mapped_sentence) # return the list return mapped_sentences
04a27bd4ccd5ac9d0366218107ee36b61d4a7655
5,899
def updateShaderState(self): """Updates all shader program variables. """ if not self.ready(): return opts = self.opts self.shader.load() voxValXform = self.imageTexture.voxValXform voxValXform = [voxValXform[0, 0], voxValXform[0, 3], 0, 0] invNumLabels = 1.0 / (opts.lut.max() + 1) self.shader.setFragParam('voxValXform', voxValXform) self.shader.setFragParam('invNumLabels', [invNumLabels, 0, 0, 0]) self.shader.unload() return True
611b093ce51e99e5c7c1e3da5dcc7cd1a8c07b01
5,900
def get_request_fixture_names(request): """Get list of fixture names for the given FixtureRequest. Get the internal and mutable list of fixture names in the enclosing scope of the given request object. Compatibility with pytest 3.0. """ return request._pyfuncitem._fixtureinfo.names_closure
665fff4538f3817b6eb882f9a873683d69003bfd
5,901
def check_version(stdout): """Check version of Ensembl-VEP. Example of the first part of an output from the command `vep --help`: #----------------------------------# # ENSEMBL VARIANT EFFECT PREDICTOR # #----------------------------------# Versions: ensembl : 104.1af1dce ensembl-funcgen : 104.59ae779 ensembl-io : 104.1d3bb6e ensembl-variation : 104.6154f8b ensembl-vep : 104.3 Help: [email protected] , [email protected] Twitter: @ensembl """ vep_version = int( float( next( (line for line in stdout.split("\n") if "ensembl-vep" in line) ).split()[2] ) ) return vep_version
5c3b716db7016f1b612f764fb54e3b25d970b0f2
5,902
import argparse def parse_cli_args(): """ These flags are the ones required by the Stream Deck SDK's registration procedure. They'll be set by the Stream Deck desktop software when it launches our plugin. """ parser = argparse.ArgumentParser( description='Stream Deck Google Meet Plugin') parser.add_argument('-port', type=int, required=True) parser.add_argument('-pluginUUID', dest='plugin_uuid', type=str, required=True) parser.add_argument('-registerEvent', dest='register_event', type=str, required=True) parser.add_argument('-info', type=str, required=True) # Ignore unknown args in case a Stream Deck update adds optional flags later. (known_args, _) = parser.parse_known_args() return known_args
adcad9860336482b2072ef8aecd398a2f4ce3b45
5,903
def left_join(ht1, ht2): """ :param ht1: left hash table :param ht2: right hash table :return: list of joined values from both hash tables """ results = [] for item in ht1.table: while item is not None: key = item.val[0] joined = [key, ht1.get(key), ht2.get(key)] results.append(joined) item = item.next return results
8f34e03d055a32ea337b27cd800eeb393d136dfa
5,904
import os import glob import warnings def get_geos_install_prefix(): """Return GEOS installation prefix or None if not found.""" env_candidate = os.environ.get("GEOS_DIR", None) if env_candidate is not None: candidates = [env_candidate] else: candidates = [os.path.expanduser("~/local"), os.path.expanduser("~"), "/usr/local", "/usr", "/opt/local", "/opt", "/sw"] for prefix in candidates: libfiles = [] libdirs = ["bin", "lib", "lib64"] libext = "dll" if os.name == "nt" else "so" libcode = "{0}geos_c".format("" if os.name == "nt" else "lib") libname = "{0}*.{1}*".format(libcode, libext) for libdir in libdirs: libfiles.extend(glob.glob(os.path.join(prefix, libdir, libname))) hfile = os.path.join(prefix, "include", "geos_c.h") if os.path.isfile(hfile) and libfiles: return prefix warnings.warn(" ".join([ "Cannot find GEOS library and/or headers in standard locations", "('{0}'). Please install the corresponding packages using your", "software management system or set the environment variable", "GEOS_DIR to point to the location where GEOS is installed", "(for example, if 'geos_c.h' is in '/usr/local/include'", "and 'libgeos_c' is in '/usr/local/lib', then you need to", "set GEOS_DIR to '/usr/local'", ]).format("', '".join(candidates)), RuntimeWarning) return None
79193b7a515f961dacdb666a6c25d038b3a14e0c
5,906
def _get_trip_from_id(trip_obj_list, trip_id): """ Get a trip from a list, based on a trip id """ found_trip_obj = None for trip_obj in trip_obj_list: if trip_obj.id == trip_id: found_trip_obj = trip_obj break return found_trip_obj
f2bbacfccda1e4ff778ba793ad238f744400f020
5,907
import os def countOriginals(subfolderPath): """return count of original vids""" items = os.listdir(subfolderPath) count = 0 for file in items: if file.startswith("Original_") and file.endswith(".description"): count = count + 1 return count
116ffa4fecf911d0dec436c5003acb2c9f42a673
5,908
def to_camel_case(string: str) -> str: """ Converts a ``snake_case`` string to ``camelCase``. :param string: A ``snake_case`` string. :return: A ``camelCase`` version of the input. """ components = string.split("_") return components[0] + "".join(x.capitalize() for x in components[1:])
ae0d82efd9a5a65ef16cc401a0fe302b4f04d524
5,909
def parse_qsub_defaults(parsed): """Unpack QSUB_DEFAULTS.""" d = parsed.split() if type(parsed) == str else parsed options={} for arg in d: if "=" in arg: k,v = arg.split("=") options[k.strip("-")] = v.strip() else: options[arg.strip("-")] = "" return options
a5c50aef405d88bcb018af48904a384b090d22a2
5,910
import collections def count_tweet_shed_words_freq(tweet_text, ind_shed_word_dict, shed_word_ind_dict, shed_words_set): """ Count the frequency of selected Hedonometer words in tweet text. param tweet_text: String of text field of tweet return: dict of shed_word_ind to shed_word_freq mapping """ ''' Tokenize and count words in tweet text Ref - 'We defined a word as any contiguous set of characters bounded by white space and/or a small set of punctuation characters.' - 'We therefore included all misspellings, words from any language used on Twitter, hyperlinks, etc.' - 'All pattern matches we made were case-insensitive, and we did not perform stemming.' ''' tweet_text_words = tweet_text.lower().split() counter = collections.Counter(tweet_text_words) tweet_shed_words_freq_dict = {int(shed_word_ind_dict[tweet_text_word]): int(tweet_text_word_freq) for tweet_text_word, tweet_text_word_freq in list(counter.items()) if tweet_text_word in shed_words_set} return tweet_shed_words_freq_dict
129130f5b9def7320c6e3dd2d8ef82493d21eb8a
5,914
def posts(parsed): """Calculates number of every type of post""" num_t_post = 0 num_corner_post = 0 num_line_post = 0 num_end_post = 0 num_gate_posts = 0 for post in parsed.posts(): if not post.isRemoval: if post.postType == 'tPost': num_t_post += 1 if post.postType == 'cornerPost': num_corner_post += 1 if post.postType == 'endPost': num_end_post += 1 if post.postType == 'gatePost': num_gate_posts += 1 for fence in parsed.fences: if not fence.isRemoval: if (fence.length/12) % 8 == 0: num_line_post += (fence.length/12) // 8 - 1 else: num_line_post += (fence.length/12) // 8 num_steel_post = num_t_post + num_corner_post + num_line_post + num_end_post + num_gate_posts return num_t_post, num_corner_post, num_line_post, num_end_post, num_gate_posts, num_steel_post
e8c5905a38ab560f0dba595eecf67865efc27121
5,915
def fasta(file_allname: str): """ 需要传入file_allname的路径 :param file_allname: :return: 返回fasta格式的序列list """ try: # file_allname = input("输入你要分析出的文件,包括后缀名\n") f = open(file_allname).read() fasts = f.split(">") fast_seq = [] index = 0 for fast in fasts: if fast: fast = ">" + fast fast_seq.append(fast) index = index + 1 return fast_seq except: print("请正确输入文件名称。")
bbd03531a7d311c322fdbd66e401788fb6526120
5,917
def format_size(size): """ :param float size: :rtype: str """ size = float(size) unit = 'TB' for current_unit in ['bytes', 'KB', 'MB', 'GB']: if size < 1024: unit = current_unit break size /= 1024 return '{0:.2f}'.format(size).rstrip('0').rstrip('.') + ' ' + unit
95470360fcc34df5a51a7cf354138413b41940aa
5,918
def rc_seq(seq=""): """Returns the reverse compliment sequence.""" rc_nt_ls = [] rc_dict = { "a": "t", "c": "g", "t": "a", "g": "c", "n": "n", "A": "T", "C": "G", "T": "A", "G": "C", "N": "N" } rc_nt_ls = [rc_dict[seq[i]] for i in range(len(seq)-1, -1, -1)] rc_seq_ = "".join(rc_nt_ls) return rc_seq_
827877a76d4ffbe61e40e4f00641afa4277f3ff5
5,919
import six def get_rotation(rotation): """ Return the text angle as float. The returned angle is between 0 and 360 deg. *rotation* may be 'horizontal', 'vertical', or a numeric value in degrees. """ try: angle = float(rotation) except (ValueError, TypeError): isString = isinstance(rotation, six.string_types) if ((isString and rotation == 'horizontal') or rotation is None): angle = 0. elif (isString and rotation == 'vertical'): angle = 90. else: raise ValueError("rotation is {0} expected either 'horizontal'" " 'vertical', numeric value or" "None".format(rotation)) return angle % 360
7ed0fd31f9a90ddb5743faa8e45e46f0d5cc08bd
5,920
def insert_with_key_enumeration(agent, agent_data: list, results: dict): """ Checks if agent with the same name has stored data already in the given dict and enumerates in that case :param agent: agent that produced data :param agent_data: simulated data :param results: dict to store data into :return: dict with inserted data/name pair """ # add to results dict and don't double agent names if agent.get_name() not in results: results[agent.get_name()] = agent_data else: # add index to agent name if another agent of same type was simulated before new_name = agent.get_name() + "_" + str( sum([agent.get_name() in s for s in list(results.keys())])) results[new_name] = agent_data return results
d2d653dcff20836c4eaf8cf55b31b1a1209a4ddd
5,922
import argparse def get_args(): """Get command-line arguments""" parser = argparse.ArgumentParser( description='First Bank of Change', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('value', metavar='int', type=int, help='Sum') args = parser.parse_args() if not 0 < args.value <= 100: parser.error('value "{}" must be > 0 and <= 100'.format(args.value)) return args
117ae5596c95154ac0dc10fd9b1793d89d471da8
5,923
def min__to__s(): """Convert minute to second""" return '6.0E+1{kind}*{var}'
2730af2cc79a6c4af6d1b18f79326623c0fd0289
5,924
def score_per_term(base_t, mis_t, special_t, metric): """Computes three distinct similarity scores for each list of terms. Parameters ---------- base_t, mismatch_t special_t: list of str Lists of toponym terms identified as base, mismatch or frequent (special) respectively. metric: str Indicates the metric to utilize in order to calculate the similarity score by comparing individually the three lists. Returns ------- tuple of (float, float, float) A similarity score for every list of terms. Each score is normalized in range [0,1]. """ scores = [0, 0, 0] # base, mis, special for idx, (term_a, term_b) in enumerate(zip( [base_t['a'], mis_t['a'], special_t['a']], [base_t['b'], mis_t['b'], special_t['b']] )): if term_a or term_b: scores[idx] = globals()[metric](u' '.join(term_a), u' '.join(term_b)) return scores[0], scores[1], scores[2]
55e5b9b0d9feaa359ab0907b399eb37514dcfacd
5,925
def do_simple_math(number1, number2, operator): """ Does simple math between two numbers and an operator :param number1: The first number :param number2: The second number :param operator: The operator (string) :return: Float """ ans = 0 if operator is "*": ans = number1 * number2 elif operator is "/": ans = number1 / number2 elif operator is "+": ans = number1 + number2 elif operator is "-": ans = number1 - number2 elif operator is "^": ans = number1 ** number2 elif operator is "%": ans = number1 % number2 return ans
eb745f9c3f3c1e18de30cbe6c564d68c29e39ff4
5,926
def snake(string): """snake_case""" return "_".join(string.split())
6bf99dede918937ad59ec9be14ffade8fadb5794
5,928
def generateKeys(): """ generates and returns a dictionary containing the original columns names from the LIDAR file as values and the currently used column names as corresponding keys ws_1 : Speed Value.1 dir_1 : Direction Value.1 h_1 : Node RT01 Lidar Height """ keys = {"ws_0" : "Speed Value", "dir_0" : "Direction Value", "h_0" : "Node RT00 Lidar Height"} for i in range(1, 11): keys.update({"ws_{}".format(i) : "Speed Value.{}".format(i), "dir_{}".format(i) : "Direction Value.{}".format(i), "h_{}".format(i) : "Node RT{:02d} Lidar Height".format(i+1), }) return keys
9d0d55c3fdc32ddda46da4a9e876d4ce1ecde25d
5,929
def get_uniprot_homologs(rev=False): """As above, but exclusively uniprot => mouse uniprot""" homologs = {} with open('data/corum_mouse_homologs.txt') as infile: data = [line.strip().split('\t') for line in infile] for line in data: original = line[1].split('|')[1] uniprot = line[0] # Picks first, and subsequently best. Seqid must be in desc order! if original not in homologs: homologs[original] = uniprot if rev: homologs = {value: key for key, value in homologs.items()} return homologs
969085375265b90b5501b4b86eaaed3e1c48795f
5,930
def is_intersection(g, n): """ Determine if a node is an intersection graph: 1 -->-- 2 -->-- 3 >>> is_intersection(g, 2) False graph: 1 -- 2 -- 3 | 4 >>> is_intersection(g, 2) True Parameters ---------- g : networkx DiGraph n : node id Returns ------- bool """ return len(set(g.predecessors(n) + g.successors(n))) > 2
415e5154095cd78112ef029b6c4d62c36da0b3b8
5,932
import typing def tokenize(data: typing.Union[str, typing.Sequence[str]]) -> list[str]: """break up string into tokens, tokens can be separated by commas or spaces creates separate tokens for: - "(" or "[" at beginning - ")" or "]" at end """ # break into tokens if isinstance(data, str): data = [data] tokens = [] for datum in data: datum = datum.replace(',', ' ') subtokens = datum.split(' ') for token in subtokens: if len(token) == 0: continue elif len(token) == 1: tokens.append(token) else: start_interval = token[0] in ['(', '['] end_interval = token[-1] in [')', ']'] # convert token based on contained intervals if start_interval and end_interval: tokens.append(token[0]) if len(token) > 2: tokens.append(token[1:-1]) tokens.append(token[-1]) elif start_interval: tokens.append(token[0]) tokens.append(token[1:]) elif end_interval: tokens.append(token[:-1]) tokens.append(token[-1]) else: tokens.append(token) return tokens
832343067c8777aa386c0c87c2c4e8202a7cb88f
5,934
def de_comma(string): """Remove any trailing commas >>> de_comma(',fred,,') == ',fred' True """ return string.rstrip(',')
453d615c1fbbef5139d05d6e4510731c969d6a86
5,935
import pickle def get_actual_data(base, n_run, log_path, subfolders): """ :param base: the sub folder name right before the _DATE_InstanceNumber :param n_run: the INSTANCE number in the subfolder name :param log_path: path to the main log folder containing all the runs of an experiment (e.g. ../data/CH6-14S1G1TNSV/) :param subfolders: the list of all the sub folders contained in log_folder :param log_path: complete path :return: """ for subfolder in subfolders: splitted = subfolder.split('_') # get basename, compare to base; compare n_run with experiment instance if splitted[0] == base and str(n_run).zfill(3) == splitted[2]: filepath = log_path + '/' + subfolder + '/global_save.txt' try: data = pickle.load(open(filepath, "rb")) except: print('Make sure your parameters are right!') data = None exit() return data
b9f76b14b90e3c187e19bcd0b8bbbfe865518fe7
5,936
def secs_to_str(secs): """Given number of seconds returns, e.g., `02h 29m 39s`""" units = (('s', 60), ('m', 60), ('h', 24), ('d', 7)) out = [] rem = secs for (unit, cycle) in units: out.append((rem % cycle, unit)) rem = int(rem / cycle) if not rem: break if rem: # leftover = weeks out.append((rem, 'w')) return ' '.join(["%02d%s" % tup for tup in out[::-1]])
0918fd72fbaaa0adf8fe75bcb1ef39b4e9aba75b
5,937
def get_computed_response_text_value(response): """ extract the text message from the Dialogflow response, fallback: None """ try: if len(response.query_result.fulfillment_text): return response.query_result.fulfillment_text elif len(response.query_result.fulfillment_messages[0].text.text[0]): return response.query_result.fulfillment_messages[0].text.text[0] else: return None except Exception as e: return None
fa7410ac4b0ef2c0dea59b0e9d001a7893a56479
5,938
def stairmaster_mets(setting): """ For use in submaximal tests on the StairMaster 4000 PT step ergometer. Howley, Edward T., Dennis L. Colacino, and Thomas C. Swensen. "Factors Affecting the Oxygen Cost of Stepping on an Electronic Stepping Ergometer." Medicine & Science in Sports & Exercise 24.9 (1992): n. pag. NCBI. Web. 10 Nov. 2016. args: setting (int): the setting of the step ergometer Returns: float: VO2:subscript:`2max` in kcal/kg*hour """ return 0.556 * 7.45 * setting
1d6cc9fc846773cfe82dfacb8a34fb6f46d69903
5,940
def clean_cancer_dataset(df_training): """ Checks and cleans the dataset of any potential impossible values, e.g. bi-rads columns, the 1st only allows values in the range of 1-5, ordinal Age, 2nd column, cannot be negative, integer Shape, 3rd column, only allows values between 1 and 4, nominal Margin, only allows a range of 1 to 5, nominal Density only allows values between 1-4,ordinal. All deletions will be performed in place. :return: cleaned up dataframe, count of removed points """ rows_pre_cleaning = df_training.shape[0] df_training.drop(df_training.index[df_training['bi_rads'] > 5], inplace=True) df_training.drop(df_training.index[df_training['shape'] > 4], inplace=True) df_training.drop(df_training.index[df_training['margin'] > 5], inplace=True) df_training.drop(df_training.index[df_training['density'] > 4], inplace=True) rows_removed = rows_pre_cleaning - df_training.shape[0] return df_training, rows_removed
a30f377b48bb665f42f3efa58b15d289f7e7f9b3
5,941
def isvalid(gridstr, x, y, test_value): """ Check if it would be legal to place a in pos x,y """ sq_indexes = ((0, 1, 2), (3, 4, 5), (6, 7, 8)) group_indexes = [(x_ind, y_ind) for x_ind in sq_indexes[x // 3] for y_ind in sq_indexes[y // 3]] for index in range(9): # Check squares in the same column if gridstr[x + 9 * index] == test_value: return False # Check the row if gridstr[index + 9 * y] == test_value: return False # Check the group x_index, y_index = group_indexes[index] if gridstr[x_index + 9 * y_index] == test_value: return False return True
a8481bbb18409814e54ad669bbb14b71e32b1139
5,943
def findPowerPlant(mirror, name): """Return power plant agent, if it exists""" if name in mirror.ppDict: return mirror.ppDict[name] else: print("*** Power Plant '%s' not found." % name) return None
35e432c7ab6dbe57488e2d7f84c3b6d077f2079a
5,944
def check_threshold(service, config_high_threshold, config_low_threshold, curr_util): """ Checks whether Utilization crossed discrete threshold Args: service: Name of the micro/macroservice config_high_threshold: Upper limit threshold to utilization set in config file config_low_threshold: Lower limit threshold to utilization set in config file curr_util: value of the current utilization Returns: String "High" if upper limit crossed String "Low" if lower limit crossed String "Normal" if none crossed """ if float(curr_util) > float(config_high_threshold): return "High" elif float(curr_util) < float(config_low_threshold): return "Low" else: return "Normal"
80bf8ab4f5b2bbac35df7c48764114e213fba580
5,947
def ols_data(): """ draper and smith p.8 """ xs = [35.3, 29.7, 30.8, 58.8, 61.4, 71.3, 74.4, 76.7, 70.7, 57.5, 46.4, 28.9, 28.1, 39.1, 46.8, 48.5, 59.3, 70, 70, 74.5, 72.1, 58.1, 44.6, 33.4, 28.6] ys = [10.98, 11.13, 12.51, 8.4, 9.27, 8.73, 6.36, 8.50, 7.82, 9.14, 8.24, 12.19, 11.88, 9.57, 10.94, 9.58, 10.09, 8.11, 6.83, 8.88, 7.68, 8.47, 8.86, 10.36, 11.08] # self.Xk = 28.6 # self.ypred_k = 0.3091 solution = {'slope': -0.0798, 'y_intercept': 13.623, 'n': len(xs), 'pred_x': 28.6, 'pred_error': 0.309} return xs, ys, solution
d741195075a51d1485c9f98031ca405cadf1db93
5,948
def valid_field(obj, field): """Returns ``True`` if given object (BaseDocument subclass or an instance thereof) has given field defined.""" return object.__getattribute__(obj, 'nanomongo').has_field(field)
32e662c5c0e666b7455aacdd6809e31cd20017fe
5,949
def double_bin_pharmacophore_graph(distance, bins, delta): """ Assign two bin values to the distance between pharmacophoric points. Parameters ---------- distance : float The distance that will be binned. bins : np.ndarray Array of bins. It has to be one dimensional and monotonic. delta : float The tolerance from which a distance value is considered to belong to the lower and upper bin. It has to be a value between 0 and 0.5 Returns ------- 2-tuple of int The two bins assigned to the distance. """ for ii in range(bins.shape[0] - 1): if distance == bins[ii]: return (bins[ii], bins[ii]) elif distance > bins[ii] and distance < bins[ii + 1]: if distance - bins[ii] > delta: return (bins[ii], bins[ii + 1]) else: return (bins[ii], bins[ii])
b7dedf4f31b5cd08c9875139df837a57a8117001
5,950
import os def is_empty_dir(target_dir): """return is empty directory or not :param str target_dir: target dir """ for root, _, files in os.walk(target_dir): for f in files: if os.path.isfile(os.path.join(root, f)): return False return True
8de606b422703b7602d62df0fae5c0b341761236
5,951
def is_absolute_url(parsed_url): """ check if it is an absolute url """ return all([parsed_url.scheme, parsed_url.netloc])
578c1443ec18f9b741cd205763604cba2242ac48
5,952
def get_levelized_cost(solution, cost_class='monetary', carrier='power', group=None, locations=None, unit_multiplier=1.0): """ Get the levelized cost per unit of energy produced for the given ``cost_class`` and ``carrier``, optionally for a subset of technologies given by ``group`` and a subset of ``locations``. Parameters ---------- solution : solution container cost_class : str, default 'monetary' carrier : str, default 'power' group : str, default None Limit the computation to members of the given group (see the groups table in the solution for valid groups). locations : str or iterable, default None Limit the computation to the given location or locations. unit_multiplier : float or int, default 1.0 Adjust unit of the returned cost value. For example, if model units are kW and kWh, ``unit_multiplier=1.0`` will return cost per kWh, and ``unit_multiplier=0.001`` will return cost per MWh. """ if group is None: group = 'supply' members = solution.groups.to_pandas().at[group, 'members'].split('|') if locations is None: locations_slice = slice(None) elif isinstance(locations, (str, float, int)): # Make sure that locations is a list if it's a single value locations_slice = [locations] else: locations_slice = locations cost = solution['costs'].loc[dict(k=cost_class, x=locations_slice, y=members)] ec_prod = solution['ec_prod'].loc[dict(c=carrier, x=locations_slice, y=members)] if locations is None: cost = cost.sum(dim='x').to_pandas() ec_prod = ec_prod.sum(dim='x').to_pandas() else: cost = cost.to_pandas() ec_prod = ec_prod.to_pandas() return (cost / ec_prod) * unit_multiplier
96b8f9a9fceaa932bcee72033e73ad8b9551759d
5,954
def reduce_range_overlaps(ranges): """Given a list with each element is a 2-tuple of min & max, returns a similar list simplified if possible. """ ranges = [ea for ea in ranges if ea] if len(ranges) < 2: return ranges first, *ranges_ordered = list(reversed(sorted(ranges, key=lambda ea: ea[1] - ea[0]))) r_min = first[0] r_max = first[1] disjointed_ranges = [] for r in ranges_ordered: if r_min <= r[0] <= r_max: r_max = max(r[1], r_max) elif r_min <= r[1] <= r_max: r_min = min(r[0], r_min) # Since we already looked at 'first' sorted by max range, not possible: r[0] < r_min and r[1] > r_max else: # range is possibly disjointed from other ranges. There may be a gap. disjointed_ranges.append(r) big_range = (r_min, r_max) clean_ranges = [big_range, *disjointed_ranges] return clean_ranges
fe62dd8bbb1fd0a985757cc417c9c230659294c5
5,956
import sys def exception_in_stack(): """Return true if we are currently in the process of handling an exception, ie one has been caught in a try block. https://docs.python.org/3/library/sys.html#sys.exc_info """ return sys.exc_info()[0] is not None
71f2076c956fa3bb92751778c29537df7bceac35
5,959
def meta_body(): """Ugoira page data.""" return '{"error":false,"message":"","body":{"src":"https:\/\/i.pximg.net\/img-zip-ugoira\/img\/2019\/04\/29\/16\/09\/38\/74442143_ugoira600x600.zip","originalSrc":"https:\/\/i.pximg.net\/img-zip-ugoira\/img\/2019\/04\/29\/16\/09\/38\/74442143_ugoira1920x1080.zip","mime_type":"image\/jpeg","frames":[{"file":"000000.jpg","delay":70},{"file":"000001.jpg","delay":70},{"file":"000002.jpg","delay":70},{"file":"000003.jpg","delay":70},{"file":"000004.jpg","delay":70},{"file":"000005.jpg","delay":70},{"file":"000006.jpg","delay":70},{"file":"000007.jpg","delay":70},{"file":"000008.jpg","delay":70},{"file":"000009.jpg","delay":70},{"file":"000010.jpg","delay":70},{"file":"000011.jpg","delay":70},{"file":"000012.jpg","delay":70},{"file":"000013.jpg","delay":70},{"file":"000014.jpg","delay":70},{"file":"000015.jpg","delay":70},{"file":"000016.jpg","delay":70},{"file":"000017.jpg","delay":70},{"file":"000018.jpg","delay":70},{"file":"000019.jpg","delay":70},{"file":"000020.jpg","delay":70},{"file":"000021.jpg","delay":70},{"file":"000022.jpg","delay":70},{"file":"000023.jpg","delay":70},{"file":"000024.jpg","delay":70},{"file":"000025.jpg","delay":70},{"file":"000026.jpg","delay":70},{"file":"000027.jpg","delay":70},{"file":"000028.jpg","delay":70},{"file":"000029.jpg","delay":70},{"file":"000030.jpg","delay":70},{"file":"000031.jpg","delay":70},{"file":"000032.jpg","delay":70},{"file":"000033.jpg","delay":70},{"file":"000034.jpg","delay":70},{"file":"000035.jpg","delay":70},{"file":"000036.jpg","delay":70},{"file":"000037.jpg","delay":70},{"file":"000038.jpg","delay":70},{"file":"000039.jpg","delay":70},{"file":"000040.jpg","delay":70},{"file":"000041.jpg","delay":70},{"file":"000042.jpg","delay":70},{"file":"000043.jpg","delay":70},{"file":"000044.jpg","delay":70},{"file":"000045.jpg","delay":70},{"file":"000046.jpg","delay":70},{"file":"000047.jpg","delay":70},{"file":"000048.jpg","delay":70},{"file":"000049.jpg","delay":70},{"file":"000050.jpg","delay":70},{"file":"000051.jpg","delay":70},{"file":"000052.jpg","delay":70},{"file":"000053.jpg","delay":70},{"file":"000054.jpg","delay":70},{"file":"000055.jpg","delay":70},{"file":"000056.jpg","delay":70},{"file":"000057.jpg","delay":70},{"file":"000058.jpg","delay":70},{"file":"000059.jpg","delay":70},{"file":"000060.jpg","delay":70},{"file":"000061.jpg","delay":70},{"file":"000062.jpg","delay":70},{"file":"000063.jpg","delay":70},{"file":"000064.jpg","delay":70},{"file":"000065.jpg","delay":70},{"file":"000066.jpg","delay":70},{"file":"000067.jpg","delay":70},{"file":"000068.jpg","delay":70},{"file":"000069.jpg","delay":70},{"file":"000070.jpg","delay":70},{"file":"000071.jpg","delay":70},{"file":"000072.jpg","delay":70},{"file":"000073.jpg","delay":70},{"file":"000074.jpg","delay":70},{"file":"000075.jpg","delay":70},{"file":"000076.jpg","delay":70}]}}'
abf9e01371938467b12721373a0e5fc8fb926016
5,960
import re def format_comments(text="default", line_size=90): """ Takes a string of text and formats it based on rule 1 (see docs). """ # rules to detect fancy comments, if not text regex1 = r"^ *?####*$" # rules to detect fancy comments, if text regex2 = r"^ *?####*([^#\n\r]+)#*" # if detected pattern 1, replace with this subst1 = "#"*line_size # if detected pattern 2, replace with this def subst2(match_obj): fix_pad = 4 + 2 # 4 hashes on left plus two spaces cap_group = match_obj.group(1).strip() return '#### ' + cap_group + ' ' + '#'*(line_size-fix_pad-len(cap_group)) text = re.sub(regex1, subst1, text, 0, re.MULTILINE) text = re.sub(regex2, subst2, text, 0, re.MULTILINE) # formatted text to return return text
6eba4539aa7128d5654ddab7fe08a2e9df6dc738
5,961
def config_to_dict(plato_config): """ Convert the plato config (can be nested one) instance to the dict. """ # convert the whole to dict - OrderedDict plato_config_dict = plato_config._asdict() def to_dict(elem): for key, value in elem.items(): try: value = value._asdict() elem[key] = to_dict(value) except: pass if isinstance(value, list): for idx, value_item in enumerate(value): try: value_item = value_item._asdict() value[idx] = to_dict(value_item) except: pass elem[key] = value return elem plato_config_dict = to_dict(plato_config_dict) return plato_config_dict
9e68c2859dc33370554f8015f96bd501f827c1b2
5,963
def get_parameter(model, name): """ Finds the named parameter within the given model. """ for n, p in model.named_parameters(): if n == name: return p raise LookupError(name)
ba35b743d9189c94da0dcce27630bba311ea8a46
5,964
import os def _demo_home(options): """For convenience demo home is in the same folder as jar file""" bp, fn = os.path.split(options.jar_file) demo_home = os.path.join(bp, 'demo') assert os.path.isdir(demo_home), 'Folder does not exist: "%s"' % demo_home return demo_home
97cdb9a36e56539719cf3aab1e7a5d0c95d87a1d
5,966
def valid_tetrodes(tetrode_ids, tetrode_units): """ Only keep valid tetrodes with neuron units so that there is corresponding spike train data. :param tetrode_ids: (list) of tetrode ids in the order of LFP data :param tetrode_units: (dict) number of neuron units on each tetrode :return: (list) of tetrode ids with neuron units """ return [x for x in tetrode_ids if tetrode_units[x] > 0]
c887f5e5c29d841da63fe0cd56c41eda5ddde891
5,967
import os import json async def get_config(guildid): """ :param guildid: :return: Guild-Config as Json """ path = os.path.join("data", "configs", f"{guildid}.json") with open(path, "r") as f: data = json.load(f) return data
4057569c71ac546a504cabe1ec19d6778f6ab6fa
5,969
def rewrite_metadata(content, dic): """From content, which is the old text with the metadata and dic which has the new data, return new_txt which has data replaced by dic content, with relevant headers added """ #Splitting into headers and body. Technically, body is a list of paragraphs where first one is the headers new_headers = "" body = content.split("\n\n") headers = body[0] #Replacing data in headers for line in headers.split("\n") : has_match = False #Replace data in preexisting line for key in list(dic.keys()) : if line.startswith(key) : new_headers = new_headers + key + ": " + str(dic[key]) + "\n" del dic[key] has_match = True #Copies existing header that is not overwrote by dic if not has_match : new_headers = new_headers + line + "\n" # In case we forgot to add a line manually for left in list(dic.keys()) : new_headers = new_headers + left + ": " + str(dic[left]) + "\n" #Formatting, joining new text body[0] = new_headers new_txt = "\n\n".join(body) return new_txt
14f7da66f19c24d073f1fdee4b56d49d28320e71
5,970
def reverse_dict_old(dikt): """ takes a dict and return a new dict with old values as key and old keys as values (in a list) example _reverse_dict({'AB04a':'b', 'AB04b': 'b', 'AB04c':'b', 'CC04x': 'c'}) will return {'b': ['AB04a', 'AB04b', 'AB04c'], 'c': 'CC04x'} """ new_dikt = {} for k, v in dikt.items(): if v in new_dikt: new_dikt[v].append(k) else: new_dikt[v] = [k] return new_dikt
50155858fbbe52dc8daae66e6a94c8885b80ba05
5,971
def Q(lambda_0, lambda_, eps_c, Delta, norm_zeta2, nu): """ Quadratic upper bound of the duality gap function initialized at lambda_0 """ lmd = lambda_ / lambda_0 Q_lambda = (lmd * eps_c + Delta * (1. - lmd) + 0.5 * nu * norm_zeta2 * (1. - lmd) ** 2) return Q_lambda
e7c624d822713efd9a63e92d40ecb9c13d5ee8d6
5,972
from typing import List import subprocess def import_template_ids() -> List[str]: """Return a list of all the supported template IDs.""" return subprocess.check_output(["meme", "-list-templates"]).decode("utf-8").splitlines()
89914e20965e87e9d9589955000854dc8f8d743b
5,974
def _scale_pot(pot, scale_coeff, numtors): """ Scale the potential """ print('scale_coeff test 0:', scale_coeff, numtors) scale_factor = scale_coeff**(2.0/numtors) print('scale_coeff test:', scale_coeff, numtors, scale_factor) new_pot = {} for idx, val in pot.items(): new_pot[idx] = pot[idx] * scale_factor return new_pot
0e634b7766a5822d3b2e80fffa0b56dccee125ab
5,975
import pkg_resources def get_substation_file(): """Return the default substation file for the CONUS.""" return pkg_resources.resource_filename('cerf', 'data/hifld_substations_conus_albers.zip')
7628c7981dd9f82b4210a451ad62fffa72222fe8
5,976
def get_tracer(request): """ Utility function to retrieve the tracer from the given ``request``. It is meant to be used only for testing purposes. """ return request['__datadog_request_span']._tracer
facd1ff0922dcc7743814cfd738d022316ba5d6d
5,977
def build_permissions_response(): """ Build a response containing only speech """ output = "I'm sorry, I was not able to lookup your home town. "\ "With your permission, I can provide you with this information. "\ "Please check your companion app for details" return { 'outputSpeech': { 'type': 'PlainText', 'text': output }, 'card': { 'type': 'AskForPermissionsConsent', 'permissions': [ 'read::alexa:device:all:address' ] }, 'shouldEndSession': True }
4b01a0fa32958127f7c373b0cedf5d518074e29e
5,979
def get_card_names(cards): """ :param cards: List of card JSONs :return: List of card names (str) """ names = [] for card in cards: name = card.get("name") names.append(name) return names
a30ad1ef7d8beaab0451d6f498254b0b5df3cf6d
5,980
import platform def pyversion(ref=None): """Determine the Python version and optionally compare to a reference.""" ver = platform.python_version() if ref: return [ int(x) for x in ver.split(".")[:2] ] >= [ int(x) for x in ref.split(".")[:2] ] else: return ver
2e31c7710b171ad67e56f9dbc1181685e0f32de1
5,981
import os def _escape_space(program): """escape spaces in for windows""" if os.name == "nt" and ' ' in program: return '"' + program + '"' else: return program
67a8fa1544f524e9a2591c3221f48c2c130ef86b
5,982
import re def clean_value(value, suffix): """ Strip out copy suffix from a string value. :param value: Current value e.g "Test Copy" or "test-copy" for slug fields. :type value: `str` :param suffix: The suffix value to be replaced with an empty string. :type suffix: `str` :return: Stripped string without the suffix. """ # type: (str, str) -> str return re.sub(r"([\s-]?){}[\s-][\d]$".format(suffix), "", value, flags=re.I)
d2ec3b3affbf71411039f234c05935132205ae16
5,983
def list_devices_to_string(list_item): """Convert cfg devices into comma split format. Args: list_item (list): list of devices, e.g. [], [1], ["1"], [1,2], ... Returns: devices (string): comma split devices """ return ",".join(str(i) for i in list_item)
717f40d3fd0c24b93d5859491d3f9f16a2b0a069
5,984
def config_split(config): """ Splits a config dict into smaller chunks. This helps to avoid sending big config files. """ split = [] if "actuator" in config: for name in config["actuator"]: split.append({"actuator": {name: config["actuator"][name]}}) del(config["actuator"]) split.append(config) return split
2006534ece382c55f1ba3914300f5b6960323e53
5,985
def find_next_square2(sq: int) -> int: """ This version is just more compact. """ sqrt_of_sq = sq ** (1/2) return -1 if sqrt_of_sq % 1 != 0 else int((sqrt_of_sq + 1) ** 2)
62246b78cc065b629961a7283671e776481a8659
5,986
import colorsys def hex_2_hsv(hex_col): """ convert hex code to colorsys style hsv >>> hex_2_hsv('#f77f00') (0.08569500674763834, 1.0, 0.9686274509803922) """ hex_col = hex_col.lstrip('#') r, g, b = tuple(int(hex_col[i:i+2], 16) for i in (0, 2 ,4)) return colorsys.rgb_to_hsv(r/255.0, g/255.0, b/255.0)
a80e9c5470dfc64c61d12bb4b823411c4a781bef
5,987
from pathlib import Path def _drivers_dir() -> str: """ ドライバ格納ディレクトリのパスを返します :return: ドライバ格納ディレクトリのパス """ return str(Path(__file__).absolute().parent.parent.joinpath('drivers'))
45b173099f6df24398791ec33332072a7651fa4f
5,988
def tousLesIndices(stat): """ Returns the indices of all the elements of the graph """ return stat.node2com.keys() #s=stat.node2com.values() global globAuthorIndex global globTfIdfTab #pprint(globAuthorIndex) #pprint(stat.node2com.values()) #glob node->index return [globAuthorIndex[x] for x in stat.node2com] #return stat.node2com.values() #def varianceGroupe(): #def distanceListePointsCentre(indexsCommunaute, centre):
fa847ee3913d521778ee3462c8e946f0ff001c76
5,989
import os def list_files(root_dir, mindepth = 1, maxdepth = float('inf'), filter_ext=[], return_relative_path=False): """ Usage: d = get_all_files(rootdir, mindepth = 1, maxdepth = 2) This returns a list of all files of a directory, including all files in subdirectories. Full paths are returned. WARNING: this may create a very large list if many files exists in the directory and subdirectories. Make sure you set the maxdepth appropriately. rootdir = existing directory to start mindepth = int: the level to start, 1 is start at root dir, 2 is start at the sub directories of the root dir, and-so-on-so-forth. maxdepth = int: the level which to report to. Example, if you only want in the files of the sub directories of the root dir, set mindepth = 2 and maxdepth = 2. If you only want the files of the root dir itself, set mindepth = 1 and maxdepth = 1 filter_ext(list, optional) : filter files ex. [.jpg, .png] return_relative_path(bool): Default false. If true return relative path else return absolute path """ root_dir = os.path.normcase(root_dir) file_paths = [] root_depth = root_dir.rstrip(os.path.sep).count(os.path.sep) - 1 lowered_filter_ext = tuple([ext.lower() for ext in filter_ext]) for abs_dir, dirs, files in sorted(os.walk(root_dir)): depth = abs_dir.count(os.path.sep) - root_depth if mindepth <= depth <= maxdepth: for filename in files: if filter_ext: if not filename.lower().endswith(lowered_filter_ext): continue if return_relative_path: rel_dir = os.path.relpath(abs_dir, root_dir) if rel_dir == ".": file_paths.append(filename) else: file_paths.append(os.path.join(rel_dir, filename)) else: # append full absolute path file_paths.append(os.path.join(abs_dir, filename)) elif depth > maxdepth: # del dirs[:] pass return file_paths
8df0e009f40e77ef7ed86ef870a9f1e508d876d5
5,990
def location_descriptors(): """Provide possible templated_sequence input.""" return [ { "id": "NC_000001.11:15455", "type": "LocationDescriptor", "location": { "sequence_id": "ncbi:NC_000001.11", "interval": { "start": { "type": "Number", "value": 15455 }, "end": { "type": "Number", "value": 15456 } }, "type": "SequenceLocation" }, "label": "NC_000001.11:15455", }, { "id": "NC_000001.11:15566", "type": "LocationDescriptor", "location": { "sequence_id": "ncbi:NC_000001.11", "interval": { "start": { "type": "Number", "value": 15565 }, "end": { "type": "Number", "value": 15566 } }, "type": "SequenceLocation" }, "label": "NC_000001.11:15566", }, { "id": "chr12:p12.1", "type": "LocationDescriptor", "location": { "species_id": "taxonomy:9606", "chr": "12", "interval": {"start": "p12.1", "end": "p12.1"} }, "label": "chr12:p12.1", }, { "id": "chr12:p12.2", "type": "LocationDescriptor", "location": { "species_id": "taxonomy:9606", "chr": "12", "interval": {"start": "p12.2", "end": "p12.2"} }, "label": "chr12:p12.2", }, { "id": "NC_000001.11:15455-15566", "type": "LocationDescriptor", "location": { "sequence_id": "ncbi:NC_000001.11", "interval": { "start": { "type": "Number", "value": 15455 }, "end": { "type": "Number", "value": 15566 } }, "type": "SequenceLocation" }, "label": "NC_000001.11:15455-15566", }, { "id": "chr12:p12.1-p12.2", "type": "LocationDescriptor", "location": { "species_id": "taxonomy:9606", "chr": "12", "interval": {"start": "p12.1", "end": "p12.2"} }, "label": "chr12:p12.1-p12.2", }, { "id": "fusor.location_descriptor:NP_001123617.1", "type": "LocationDescriptor", "location": { "sequence_id": "ga4gh:SQ.sv5egNzqN5koJQH6w0M4tIK9tEDEfJl7", "type": "SequenceLocation", "interval": { "start": { "type": "Number", "value": 171 }, "end": { "type": "Number", "value": 204 } } } }, { "id": "fusor.location_descriptor:NP_002520.2", "type": "LocationDescriptor", "location": { "sequence_id": "ga4gh:SQ.vJvm06Wl5J7DXHynR9ksW7IK3_3jlFK6", "type": "SequenceLocation", "interval": { "start": { "type": "Number", "value": 510 }, "end": { "type": "Number", "value": 781 } } } } ]
da13824ff6f91caa635700759a29fb1f36aae1be
5,991
def calculate_gc(x): """Calculates the GC content of DNA sequence x. x: a string composed only of A's, T's, G's, and C's.""" x = x.upper() return float(x.count('G') + x.count('C')) / (x.count('G') + x.count('C') + x.count('A') + x.count('T'))
aae64ff550ef26e75518bdad8a12b7cda9e060d2
5,992
def no_float_zeros(v): """ if a float that is equiv to integer - return int instead """ if v % 1 == 0: return int(v) else: return v
a33321408c43d164a8ca2c7f1d1bc6270e5708ec
5,993
import torch def quat_mult(q_1, q_2): """Multiplication in the space of quaternions.""" a_1, b_1, c_1, d_1 = q_1[:, 0], q_1[:, 1], q_1[:, 2], q_1[:, 3] a_2, b_2, c_2, d_2 = q_2[:, 0], q_2[:, 1], q_2[:, 2], q_2[:, 3] q_1_q_2 = torch.stack( ( a_1 * a_2 - b_1 * b_2 - c_1 * c_2 - d_1 * d_2, a_1 * b_2 + b_1 * a_2 + c_1 * d_2 - d_1 * c_2, a_1 * c_2 - b_1 * d_2 + c_1 * a_2 + d_1 * b_2, a_1 * d_2 + b_1 * c_2 - c_1 * b_2 + d_1 * a_2, ), dim=1, ) return q_1_q_2
dac82e246221f9af552f44ca26089443b8eaadd7
5,994
def _flip_dict_keys_and_values(d): """Switches the keys and values of a dictionary. The input dicitonary is not modified. Output: dict """ output = {} for key, value in d.items(): output[value] = key return output
b861fc3bd194d26ee05b9a56faad3394939064bf
5,995
def hasattrs(object, *names): """ Takes in an object and a variable length amount of named attributes, and checks to see if the object has each property. If any of the attributes are missing, this returns false. :param object: an object that may or may not contain the listed attributes :param names: a variable amount of attribute names to check for :return: True if the object contains each named attribute, false otherwise """ for name in names: if not hasattr(object, name): return False return True
f3a2fc308d041ed0de79e3389e30e02660a1d535
5,997
import json def try_parse_json(json_): """Converts the string representation of JSON to JSON. :param str json_: JSON in str representation. :rtype: :class:`dict` if converted successfully, otherwise False. """ if not json_: return False try: return json.loads(json_) except ValueError: return False
077819cf82e307aacf3e56b11fbba26a79559968
5,999
def field_paths(h5, key='externalFieldPath'): """ Looks for the External Fields """ if key not in h5.attrs: return [] fpath = h5.attrs[key].decode('utf-8') if '%T' not in fpath: return [fpath] path1 = fpath.split('%T')[0] tlist = list(h5[path1]) paths = [path1+t for t in tlist] return paths
578e1a2d0971a94afa665f368e9b72c8f6e449d3
6,001
def get_quantifier(ch, input_iter): """ Parse a quantifier from the input, where "ch" is the first character in the quantifier. Return the minimum number of occurrences permitted by the quantifier and either None or the next character from the input_iter if the next character is not part of the quantifier. """ if ch in '*?+': try: ch2, escaped = next(input_iter) except StopIteration: ch2 = None if ch2 == '?': ch2 = None if ch == '+': return 1, ch2 return 0, ch2 quant = [] while ch != '}': ch, escaped = next(input_iter) quant.append(ch) quant = quant[:-1] values = ''.join(quant).split(',') # Consume the trailing '?', if necessary. try: ch, escaped = next(input_iter) except StopIteration: ch = None if ch == '?': ch = None return int(values[0]), ch
36dea445aa416be79e86bb1e7c6f9dbe454c6c2a
6,002
def setup_config(quiz_name): """Updates the config.toml index and dataset field with the formatted quiz_name. This directs metapy to use the correct files Keyword arguments: quiz_name -- the name of the quiz Returns: True on success, false if fials to open file """ try: conf_file = open("config.toml", 'r') lines = conf_file.readlines() conf_file.close() for i in range(len(lines)): if lines[i].startswith("index"): lines[i] = "index = 'idx-{0}'\n".format(quiz_name.replace(" ", "_")) if lines[i].startswith("dataset"): lines[i] = "dataset = '{0}'\n".format(quiz_name.replace(" ", "_")) conf_file = open("config.toml", 'w') with conf_file: conf_file.writelines(lines) except Exception as e: print(e) return False return True
28aba9399926f27da89953c8b0c6b41d95a12d96
6,003
def compute_protien_mass(protien_string): """ test case >>> compute_protien_mass('SKADYEK') 821.392 """ p={'A':'71.03711','C':'103.00919','D':'115.02694','E':'129.04259','F':'147.06841','G':'57.02146','H':'137.05891','I':'113.08406','K':'128.09496','L':'113.08406','M':'131.04049','N':'114.04293','P':'97.05276','Q':'128.05858','R':'156.10111','S':'87.03203','T':'101.04768','V':'99.06841','W':'186.07931','Y':'163.06333'} mass=0 for x in protien_string: mass=mass+float(p[x]) #to change number of values after decimel point to 3 mass=round(mass,3) return mass
86a3ffd0ce3e95fcdf6d510d2865b35aeb93d779
6,004
import logging def find_duration(data): """Finds the duration of the ECG data sequence Finds the duration by looking at the last time value as the first value is always at time = 0 seconds :param data: 2D array of time sequences and voltage sequences :return: Time duration of data sequence """ logging.info("Detecting Duration of Data Stream...\n") return data[:, 0][-1]
e65135457e23886c402e0671d720fe9c5ed257a1
6,005
from typing import Dict def _average_latency(row: Dict): """ Calculate average latency for Performance Analyzer single test """ avg_sum_fields = [ "Client Send", "Network+Server Send/Recv", "Server Queue", "Server Compute", "Server Compute Input", "Server Compute Infer", "Server Compute Output", "Client Recv", ] avg_latency = sum(int(row.get(f, 0)) for f in avg_sum_fields) return avg_latency
f321cb4d55af605298225f2f0146a9a71ee7895b
6,006
def to_vsizip(zipfn, relpth): """ Create path from zip file """ return "/vsizip/{}/{}".format(zipfn, relpth)
6f5baf380bd7ab8a4ea92111efbc0f660b10f6f8
6,007
def compute_F1(TP, TN, FP, FN): """ Return the F1 score """ numer = 2 * TP denom = 2 * TP + FN + FP F1 = numer/denom Acc = 100. * (TP + TN) / (TP + TN + FP + FN) return F1, Acc
6f012246337534af37ff233ad78d9645907739e3
6,009
def name_full_data(): """Full name data.""" return { "name": "Doe, John", "given_name": "John", "family_name": "Doe", "identifiers": [ { "identifier": "0000-0001-8135-3489", "scheme": "orcid" }, { "identifier": "gnd:4079154-3", "scheme": "gnd" } ], "affiliations": [ { "id": "cern" }, { "name": "CustomORG" } ] }
ac590635dbe33e68dc88acd890d16dd3137befb2
6,010
def requires_moderation(page): """Returns True if page requires moderation """ return bool(page.get_moderator_queryset().count())
8f1cfa852cbeccfae6157e94b7ddf61d9597936e
6,011
from bs4 import BeautifulSoup def get_html_text(html): """ Return the raw text of an ad """ if html: doc = BeautifulSoup(html, "html.parser") return doc.get_text(" ") return ""
14353f368078ea6b1673d1066b0a529cc3e257d9
6,012
def valid_parentheses(string): """ Takes a string of parentheses, and determines if the order of the parentheses is valid. :param string: a string of parentheses and characters. :return: true if the string is valid, and false if it's invalid. """ stack = [] for x in string: if x == "(": stack.append(x) elif x == ")": if len(stack) > 0: stack.pop() else: return False return not stack
e8438404c461b7a113bbbab6417190dcd1056871
6,013