content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def ToHex(data): """Return a string representing data in hexadecimal format.""" s = "" for c in data: s += ("%02x" % ord(c)) return s
1566962a89967ae0d812ef416d56867a85d824fb
9,978
def _parse_common(xml, the_dict): """ Parse things in common for both variables and functions. This should be run after a more specific function like _parse_func or _parse_variable because it needs a member dictionary as an input. Parameters ---------- xml : etree.Element The xml representation for the member you would like to parse the_dict : dict The dictionary that has already been filled with more specific data. This dictionary is modified in-place and an updated version is returned. Returns ------- the_dict : dict The member dictionary that has been updated with the briefdescription and definition keys. """ # Find brief description mem_bd = xml.find('briefdescription') try: mem_bdstr = mem_bd.find('para').text mem_bdstr = mem_bdstr if mem_bdstr is not None else '' except AttributeError: mem_bdstr = '' the_dict['briefdescription'] = mem_bdstr # add member definition the_dict['definition'] = xml.find('definition').text return the_dict
a6606366ce9b0e4d2c848b16bc868532684b4abe
9,979
def elementWise(A, B, operation): """ execute an operate element wise and return result A and B are lists of lists (all lists of same lengths) operation is a function of two arguments and one return value """ return [[operation(x, y) for x, y in zip(rowA, rowB)] for rowA, rowB in zip(A, B)]
39e78ca7730bf8367daf3a55aeb617b2c0707a44
9,981
def find_spaces(string_to_check): """Returns a list of string indexes for each string this finds. Args: string_to_check; string: The string to scan. Returns: A list of string indexes. """ spaces = list() for index, character in enumerate(string_to_check): if character == ' ': spaces.append(index) return spaces
8bcd1d9911efab3c65e08524293b11afd449efa0
9,982
import re def CombineLogFiles(list_of_lists, logger): """Splices together multiple logcats from the same device. Args: list_of_lists: list of pairs (filename, list of timestamped lines) logger: handler to log events Returns: list of lines with duplicates removed """ cur_device_log = [''] for cur_file, cur_file_lines in list_of_lists: # Ignore files with just the logcat header if len(cur_file_lines) < 2: continue common_index = 0 # Skip this step if list just has empty string if len(cur_device_log) > 1: try: line = cur_device_log[-1] # Used to make sure we only splice on a timestamped line if re.match(r'^\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{3} ', line): common_index = cur_file_lines.index(line) else: logger.warning('splice error - no timestamp in "%s"?', line.strip()) except ValueError: # The last line was valid but wasn't found in the next file cur_device_log += ['***** POSSIBLE INCOMPLETE LOGCAT *****'] logger.info('Unable to splice %s. Incomplete logcat?', cur_file) cur_device_log += ['*'*30 + ' %s' % cur_file] cur_device_log.extend(cur_file_lines[common_index:]) return cur_device_log
a749e34e63163c8493e0d4923e78367a6396327f
9,983
def lcs_naive(first: str, index_f: int, second: str, index_s: int) -> int: """ Time Complexity: O(2^n) """ if index_f < 0 or index_s < 0: return 0 if first[index_f] == second[index_s]: return 1 + lcs_naive(first, index_f - 1, second, index_s - 1) return max( lcs_naive(first, index_f - 1, second, index_s), lcs_naive(first, index_f, second, index_s - 1), )
be2ef4013732d75ab14c69e8f93d38cc7f011e7c
9,984
from typing import Dict from pathlib import Path from typing import Tuple from typing import List import os import subprocess def verify(signify: Dict[str, str], snapshot: Path, filename: str="") -> Tuple[bool, List[str]]: """Verify the integrity of a given snapshot with signify. signify -- a dict with signify key and signify signed SHA256 checksums file snapshot -- the directory where the snapshot is stored filename -- the name of a file for a single file verification """ os.chdir(snapshot) snapshot_release = list( Path('.').glob('base*.tgz') )[0].as_posix().rstrip('.tgz').lstrip('base') signify_key = Path( signify['key_dir'], f"/etc/signify/openbsd-{snapshot_release}-base.pub" ).as_posix() command = f"signify -Cp {signify_key} -x {signify['file']} {filename}" status = subprocess.getstatusoutput(command) failed = [ i.split(":")[0] for i in status[1].split(os.linesep) if i.endswith("FAIL") ] return status[0] == 0, failed
785b476967f15203362ea99cea06d8e2d94b3522
9,985
import shelve import contextlib def readMirror(fileLocation): """Returns saved Model (mirror) from Pickled file at 'fileLocation' Needed to transfer model from IPY to PY3 """ #f = open(fileLocation,"rb") with contextlib.closing(shelve.open(fileLocation, 'r')) as shelf: mir = shelf['mir'] #shelf['mir'] = mir #mir = pickle.load(f) #f.close() print("*** Mirror read from %s" % fileLocation.split('\\')[-1]) return mir
f6d6c56dbe99d544ccdc041f35e48e87883166c8
9,986
def insertionsort(x, count = False): """ For each element e of x, move through the array until you come to a value that is less than e, or the end of the array, then place e at the new location. """ assignments, conditionals = 0, 0 for i in range(1, len(x)): element = x[i] j = i - 1 assignments += 2 while j > -1 and x[j] > element: conditionals += 2 x[j+1] = x[j] j -= 1 assignments += 2 x[j+1] = element assignments += 1 if not count: return x else: return assignments, conditionals
4a8ae9bda1dfee0cb41ae7544cb45d089e11f703
9,988
import requests def get_assimilator_data(mode, assimilator, text, link): """ This function is used for get parsed web/local document from a Tika-service mode: get metadata or content body assimilator: api address of the text assimilator, running apache tika text: piece of raw text link: web url to a html page or binary file """ payload = {'mode': mode} # Prepare data, if it is text pass it as it is else a dictionary with link if text: data = text else: data = {'link': link} # Prepare url for assimilator url = "%s/parse/" % assimilator request = requests.post(url, params=payload, data=data) return request.content
4af3970f8fd51e565c0d6bc5c2e92bf29eac98f5
9,989
import requests import logging def execute_dax_query(credential, dataset_id, daxQuery): """Execute DAX query""" url = f"https://api.powerbi.com/v1.0/myorg/datasets/{dataset_id}/executeQueries" try: token = credential.get_token("https://analysis.windows.net/powerbi/api/.default").token headers = { "Content-Type" : "application/json", "Authorization": f"Bearer {token}"} r = requests.post(url, headers=headers, data=daxQuery) r.encoding='utf-8-sig' return r except Exception as e: logging.exception(e) raise
9adcd81d0bcfc642b0c0336d351f5ad9b71aad7a
9,990
def get_items(wiki, only_undone=False) -> list: """ all items page list """ items = [] for page in wiki.pages(): content: str = wiki.page_info(page["pageid"])["*"] if content.startswith("{{面包屑|物品信息"): if only_undone and content.find("{{施工中}}") == -1: continue page["undone"] = content.find("{{施工中}}") > -1 page["category"] = content[11:13] items.append(page) return items
1637c04aa61f5efe882af6c7f1bed80c3cde3185
9,992
from typing import Any def _attempt_cast_as_float(value: Any) -> float: """ :param vale: a value :return: the value as a float if casting is possible, otherwise return 1 """ try: return float(value) except (ValueError, TypeError): return 1.0
8616bf4d59a08f8e98d26231f7aa51d419d52550
9,993
def addr_generator(start_ip, port, count): """Generator a list of (ip, port). """ def tostr(ip): return '.'.join([str(_) for _ in ip]) ip = [int(_) for _ in start_ip.split('.')] addr_list = [(tostr(ip), port)] for i in range(count-1): ip[-1] += 1 addr_list.append((tostr(ip), port)) return addr_list
100288629f35d9108e0b364266242da0110dd8f7
9,994
def filter_threshold(delta_f, threshold_value): """ Return indices of the data whose values are above the acceptable level """ return delta_f >= threshold_value
a0c34eba86a0fb4539174d75f2cde19994aaa8cf
9,995
import random import time def retry(func, *args, **kwargs): """Repeats a function until it completes successfully or fails too often. Args: func: The function call to repeat. args: The arguments which are passed to the function. kwargs: Key-word arguments which are passed to the function. Returns: What func returns. Exceptions: RuntimeError when number of retries has been exceeded. """ # config backoff = 1. + random.random() * 0.1 max_backoff = 32 max_retries = 5 # try to make the request for i in range(max_retries): try: # return on success return func(*args, **kwargs) except Exception: # sleep on failure time.sleep(backoff) backoff = 2 * backoff if backoff < max_backoff else backoff # max retries exceeded raise RuntimeError('The connection to the server timed out.')
cf6f7d3e434b54cf178b2867f7565f338e985968
9,996
def test_custom_timeout(monkeypatch, fake_response, aqhttp): """Timeout should override from the aqhttp.post method.""" class FakeRequest: def __init__(self, status_code=200, url="myfakeurl.com", method="post"): self.status_code = status_code self.url = url self.method = method self.body = {} # A fake requests sessions object class mock_request: @staticmethod def request(method, path, timeout=None, **kwargs): assert timeout == 0.1 return fake_response(method, path, {}, 200) monkeypatch.setattr("pydent.aqhttp.requests", mock_request) aqhttp.post("someurl", timeout=0.1, json_data={})
6b03e8f6124557119ef8b30191ab6d35f3d3e002
9,997
import pytz def utc_to_local(utc_dt, local_tz): """Accepts a datetime object in UTC time, and returns the same time, in the timezone that was also passed""" local_dt = utc_dt.replace(tzinfo=pytz.utc).astimezone(local_tz) return local_tz.normalize(local_dt)
15ade0c4d1b732b4fd9ef5c2f7de0eb32a3c6936
9,999
def _ScatterAddNdimShape(unused_op): """Shape function for ScatterAddNdim Op.""" return []
7c59fb40e177fea1bf1cd3970c364f4583fc37f9
10,000
def find_delimiter_in(value): """Find a good delimiter to split the value by""" for d in [';', ':', ',']: if d in value: return d return ';'
4a60fbe6294fff048645a6fbc397ec96fd748d67
10,002
import argparse def parse_args(): """Parse command-line arguments.""" p = argparse.ArgumentParser(description=__doc__) p.add_argument('-v', '--verbose', action='count', help='increase verbosity') p.add_argument('-f', '--featlist', default='feats.txt') p.add_argument('-s', '--samplelist', default='all') p.add_argument('-o', '--outdir', default='figs') return p.parse_args()
1995f18e1c9195f88912d9f0b5f1b660ff302739
10,003
def drift_stability_ind( missing_recs_drift, drift_tab, missing_recs_stability, stability_tab ): """ This function helps to produce the drift & stability indicator for further processing. Ideally a data with both drift & stability should produce a list of [1,1] Parameters ---------- missing_recs_drift Missing files from the drift tab drift_tab "drift_statistics" missing_recs_stability Missing files from the stability tab stability_tab "stability_index, stabilityIndex_metrics" Returns ------- List """ if len(missing_recs_drift) == len(drift_tab): drift_ind = 0 else: drift_ind = 1 if len(missing_recs_stability) == len(stability_tab): stability_ind = 0 elif ("stabilityIndex_metrics" in missing_recs_stability) and ( "stability_index" not in missing_recs_stability ): stability_ind = 0.5 else: stability_ind = 1 return drift_ind, stability_ind
6964f0b258d0c531e242b47ccc864b54c72d4dd0
10,004
import time def sample_function(integer1: int, integer2: int): """ This function takes as input two integers (integer1 and integer2) and returns integer1 raised to the power of integer2 Parameters: ----------- integer1 (int) a number greater than or equal to zero integer2 (int) a number greater than or equal to zero Returns: -------- iter_sum (int) the result of integer1 raised to the power of integer2 via an iterative sum approach total_time (float) total time in seconds it took to complete the computation """ # Time operation start_time = time.time() # Initialize sum iter_sum = 0 # Iterate over integers in a nested for loop for i in range(integer1): for j in range(integer2): # Perform sum iter_sum = iter_sum + i + j # Time how long it took total_time = time.time() - start_time return [iter_sum, total_time]
2e3ff92a398adda8cc558c8a20f939614b0e0b94
10,005
import functools def with_color(color_code: str): """Coloring decorator Arguments: color_code {str} -- e.g.: '\033[91m' """ def wrapper(func): @functools.wraps(func) def inner(args): result = func(f'{color_code}{args}\033[0m') return result return inner return wrapper
3f5ecd79b3d4579ba4348b2492eaaa3688201907
10,006
def over(funcs): """Creates a function that invokes all functions in `funcs` with the arguments it receives and returns their results. Args: funcs (list): List of functions to be invoked. Returns: function: Returns the new pass-thru function. Example: >>> func = over([max, min]) >>> func(1, 2, 3, 4) [4, 1] .. versionadded:: 4.0.0 """ def _over(*args): return [func(*args) for func in funcs] return _over
6cd1a966366ee372c18dd35d71adf91028a04b1c
10,008
def load_labels_map(labels_map_path): """Loads the labels map from the given path. The labels mmap must be in the following plain text format:: 1:label1 2:label2 3:label3 ... The indexes are irrelevant to this function, they can be in any order and can start from zero, one, or another number. Args: labels_map_path: the path to a labels map file Returns: a dictionary mapping indexes to label strings """ labels_map = {} with open(labels_map_path, "r") as f: for line in f: idx, label = line.split(":") labels_map[int(idx)] = label.strip() return labels_map
8ff1e41b87fedffa053981299c48488add754ff9
10,009
import math def bbox_to_integer_coords(t, l, b, r, image_h, image_w): """ t, l, b, r: float Bbox coordinates in a space where image takes [0; 1] x [0; 1]. image_h, image_w: int return: t, l, b, r int Bbox coordinates in given image's pixel space. C-style indices (i.e. `b` and `r` are exclusive). """ t *= image_h l *= image_h b *= image_h r *= image_h l, t = map(math.floor, (l, t)) r, b = map(math.ceil, (r, b)) # After rounding, make *exactly* square again b += (r - l) - (b - t) assert b - t == r - l # Make `r` and `b` C-style (=exclusive) indices r += 1 b += 1 return t, l, b, r
90fb198e2d6cd170a2e7a2b648f15554a9997389
10,010
def get_attrib_recursive(element, *attribs): """Find the first attribute in attribs in element or its closest ancestor that has any of the attributes in attribs. Usage examples: get_attrib_recursive(el, "fallback-langs") get_attrib_recursive(el, "xml:lang", "lang") Args: element: an etree element where to search for attributes in attribs attribs: one or more attribute label(s) to search for Returns: the value of the first attribute in attribes found in element or the closest ancestor that has any of the attributes in attribs, or None """ for attrib in attribs: # We could also element.attrib[attrib] instead of xpath, but it only # works for attributes without a name, like attrib="lang", while xpath # also works for attributes with a namespace, like attrib="xml:lang". path = element.xpath("./@" + attrib) if path: return path[0] if element.getparent() is not None: return get_attrib_recursive(element.getparent(), *attribs) else: return None
d04ba71a280bd1697cc61b79af21df46023473d2
10,012
def find_extra_inferred_properties(spec_dict: dict) -> list: """Finds if there are any inferred properties which are used. Args: spec_dict: Dict obj containing configurations for the import. Returns: List of properties that appear in inferredSpec but are not part of 'pvs' section. """ ret_list = [] if 'inferredSpec' in spec_dict: for property_name in spec_dict['inferredSpec']: if property_name not in spec_dict['pvs']: ret_list.append(property_name) return ret_list
3be2950a227cfca8d0ab4c4413322f8aa8b22cc0
10,013
import numpy def calculate_idf(list_books): """ IDF = ln((total_numbers of docs)/(number of docs in which the word is present)) :param list_books: list of the all the books :return: IDF of all the books """ total_documents = len(list_books) # total number of documents master_map = {} for book in list_books: for word in book.tf: if word in master_map: master_map[word] += 1 else: master_map[word] = 1 for word in master_map: word_count = master_map[word] master_map[word] = numpy.log(total_documents/word_count) return master_map
294d4a437b2899c601f6269b4a6837595c73f83c
10,014
def find_corresponding_basins(pfaf_id_level6,gdf_level7): """ Using a pfaf_id from level 6, find all hydrobasins in level 7 that make up the hydrobasin level 6 polygon. """ pfaf_id_level7_min = pfaf_id_level6*10 pfaf_id_level7_max = pfaf_id_level7_min + 9 gdf_level7_selection = gdf_level7.loc[(gdf_level7["PFAF_ID"] >= pfaf_id_level7_min)&(gdf_level7["PFAF_ID"] <= pfaf_id_level7_max)] return gdf_level7_selection
d8952abfb681fc2b1c33d53b930fff6c56f6bc0a
10,015
def get_content_function_ratio(content, function): """ Calculate the content-function word ratio. """ ratio = float(len(content)) / float(len(function)) if len(function) != 0 else 0 return round(ratio, 4)
e82109ed1fd2a3f3136945c4598358efdc0985e9
10,016
import subprocess def query(): """ Query the attached TEMPer device via temper-query command and parse the result. """ p = subprocess.run(['/usr/local/bin/temper_query'], stdout=subprocess.PIPE) p.check_returncode() temp = float(p.stdout.decode(encoding='ascii').strip()) return temp
c1b00f79b78b73f46f65fa7f4a98695e4501710e
10,017
def test_decorator(f): """Decorator that does nothing""" return f
b60b815e336a3f1ca3f12712a2d1d207a5fe110c
10,018
def find_machine_id(agents, host): """ :param agents: Array of mesos agents properties (machine_id + additional infos) :type: list of dict :param host: Host to find. Can be ip, hostname of agent ID :type: string :returns: a machine_id :rtype: dict """ for agent in agents: # down agent if 'state' in agent: if agent['state'] == "DOWN": agent['id'] = "" if host in [agent['hostname'], agent['ip'], agent['id']]: return {"hostname": agent['hostname'], "ip": agent['ip']} return None
706bb16d02aec1fe6844f3ed49b13e6fe54a08a6
10,019
def render_report(jobs_with_error): """Build a text report for the jobs with errors """ output = [] for job in jobs_with_error: errors_count = job.info.get('errors_count', 0) close_reason = job.info.get('close_reason') job_id = job.info["id"].split('/') url = 'https://app.scrapinghub.com/p/{0}/job/{1}/{2}'.format( job_id[0], job_id[1], job_id[2]) error_message = ['Errors found for job "{0}" ({1}):'.format( job.info['spider'], url)] if errors_count > 0: error_message.append(' There were {} error{}.'.format( errors_count, '' if errors_count == 1 else 's')) success_reasons = ('no_reason', 'finished') if close_reason not in success_reasons: error_message.append(' Close reason should not be "{}".'.format( close_reason)) output.append('\n'.join(error_message)) return '\n\n'.join(output)
42c0ef405b3b684830433aa5ddd38d4283c7472c
10,021
def in_range(target, bounds): """ Check whether target integer x lies within the closed interval [a,b] where bounds (a,b) are given as a tuple of integers. Returns boolean value of the expression a <= x <= b """ lower, upper = bounds return lower <= target <= upper
ac9dee9092388d150611ab5e1a4a800b72cf8f83
10,022
def valid_chrom(): """ Valid chromosomes should be 1 - 22, X, Y, and MT. Validity is not checked or enforced """ return '1'
050baa71f61eaa1f8160953aacfe3f1cd0193899
10,024
def annotateTree(bT, fn): """ annotate a tree in an external array using the given function """ l = [None]*bT.traversalID.midEnd def fn2(bT): l[bT.traversalID.mid] = fn(bT) if bT.internal: fn2(bT.left) fn2(bT.right) fn2(bT) return l
c254a1258e0bc4b0bbbe17c9a2830b955f6a7a55
10,025
import json import requests def save_request(url, output_file): """ Attempts to read from file. If there's no file, then it will save the contents of a url in json/html """ # check for cached version try: with open(output_file) as fp: data = fp.read() try: data = json.loads(data) except: pass except: # otherwise redownload and parse page = requests.get(url) with open(output_file, 'w') as fp: try: data = page.json() fp.write(json.dumps(data)) except: data = page.text fp.write(data) return data
79d20b101b0fe811ef9129a701c6f5487e2c8a84
10,026
def flatten_array(grid): """ Takes a multi-dimensional array and returns a 1 dimensional array with the same contents. """ grid = [grid[i][j] for i in range(len(grid)) for j in range(len(grid[i]))] while type(grid[0]) is list: grid = flatten_array(grid) return grid
4c0361cf8e63d7608b4213ddd8f8a4c498282dcf
10,027
import torch def cosine_sim(x1, x2, dim=1, eps=1e-8): """Returns cosine similarity between x1 and x2, computed along dim.""" x1 = torch.tensor(x1) x2 = torch.tensor(x2) w12 = torch.sum(x1 * x2, dim) w1 = torch.norm(x1, 2, dim) w2 = torch.norm(x2, 2, dim) return (w12 / (w1 * w2).clamp(min=eps)).squeeze()
a4992b3f3a4a483c96a5b18bbc3402df70a8b44d
10,028
import os def get_tile_num(fname, key='tile'): """ Given 'key' extract 'num' from 'key_num' in string. """ l = os.path.splitext(fname)[0].split('_') # fname -> list i = l.index(key) return int(l[i+1])
f0bd167bef38834609cda377aa7f28b06be79cac
10,032
def astype(value, types=None): """Return argument as one of types if possible.""" if value[0] in '\'"': return value[1:-1] if types is None: types = int, float, str for typ in types: try: return typ(value) except (ValueError, TypeError, UnicodeEncodeError): pass return value
47d066d9d4bb5b0b96216cc722c6896d6fdcf1a4
10,033
def _minmax(*args): """ Return the min and max of the input arguments """ min_ = min(*args) max_ = max(*args) return(min_, max_)
9985ebbffd3ee0b03dc751a3c90db00e922ab489
10,034
def distance0(cell1, cell2): """Return 0 distance for A* to behave like Dijkstra's algorithm.""" return 0
7850364b245afd304e3aa3ad443d7af74b36df79
10,036
def adjacent(g,node, n): """ find all adjacent nodes of input node in g g: 2D array of numbers, the adjacency matrix node: int, the node whose neighber you wanna find return: a list of ints """ result = [] for i in range(n): if g[node][i] != 0: result.append(i) return result
630160dbee314ed85980ac8bd825e96cd33765f4
10,037
import os def dirname(string): """ Return the dir name from a full path file. """ return os.path.dirname(string)
f60da77df69431b05ef59f035a3306b63c2a6deb
10,038
def split_integer(num, parts): """ Split number into given number of pars that are as equal as possible https://stackoverflow.com/questions/55465884/how-to-divide-an-unknown-integer-into-a-given-number-of-even-parts-using-python """ quotient, remainder = divmod(num, parts) lower_elements = [quotient for i in range(parts - remainder)] higher_elements = [quotient + 1 for j in range(remainder)] return lower_elements + higher_elements
bd6a74dc409cef3f6e397579eeb15e6c9cde69a6
10,039
def complete_sulci_name(sulci_list, side): """Function gathering sulci and side to obtain full name of sulci It reads suli prefixes from a list and adds a suffix depending on a given side. Args: sulci_list: a list of sulci side: a string corresponding to the hemisphere, whether 'L' or 'R' Returns: full_sulci_list: a list with full sulci names, ie with side included """ if any("right" in s for s in sulci_list) or any("left" in s for s in sulci_list): return sulci_list else: side = 'right' if side=='R' else 'left' suffix = '_' + side if isinstance(sulci_list, list): full_sulci_list = [] for sulcus in sulci_list: sulcus += suffix full_sulci_list.append(sulcus) return full_sulci_list else: return sulci_list + suffix
5a0e969976458b81bac01ee33cfa24bbc92659c4
10,042
def libsvm_convert_sparse_tensor(array_ids, array_values): """Transform the contents into TF understandable formats, which is similar to sparse_tensor_to_train_batch(). Args: array_ids: Sparse indices. array_values: Sparse values. Returns: List of the transformed (ids, values). """ indice_flatten_v = [] values_flatten_v = [] index = 0 for i in range(0, array_ids.shape[0]): for j in range(0, len(array_ids[i])): indice_flatten_v.append([index, array_ids[i][j]]) values_flatten_v.append(array_values[i][j]) index += 1 return indice_flatten_v, values_flatten_v
321625a2ecc6424b600d312dc968c67667098131
10,043
def get_unique_candidate_list(csv_data): """ Returns the unique list of candidates in a dictionary, where key is the candidate name and value is the number of votes. The votes will all be 0 to begin. :param csv_data: :return: """ candidate_dict = {} for row in csv_data: candidate_name = row[2] if candidate_name not in candidate_dict: candidate_dict[candidate_name] = { "votes": 0, "vote_percentage": "" } return candidate_dict
41dd1a0780609e0e7185e9787e0534d62d88b0af
10,047
import math def Vabrms_calc(va,vb): """Inverter terminal voltage - line to line RMS""" return abs(va-vb)/math.sqrt(2)
aeb9b30990513f88d4a671c7de035d0a5cd64296
10,048
from typing import List from typing import Union def get_common_movies(your_list: List) -> Union[str, List]: """ Checks if two characters were detected on the SWAPI database; if the result is positive, then it returns the common movies where both characters appeared. """ if len(your_list) in [0, 1]: result_nochar = "| RESULT | -> One or both characters weren't found on the database." return result_nochar elif len(your_list) > 2: raise SystemExit("ERROR: There are more than two sublists on list.") else: films_char_1 = your_list[0] films_char_2 = your_list[1] set_results = sorted(list(set(films_char_1) & set(films_char_2))) if len(set_results) == 0: result_nomatch = "| RESULT | -> No matches were found." return result_nomatch else: return set_results
04969de96e1d31e5490d184e2977ba74761f2b0c
10,049
import re def get_value(text, regex, value_type=float): """Dump a value from a file based on a regex passed in.""" pattern = re.compile(regex, re.MULTILINE) results = pattern.search(text) if results: return value_type(results.group(1)) else: print("Could not find the value {}, in the text provided".format(regex)) return value_type(0.0)
6c4b9990dab2d8fe9f55c7a8f8c97011d3857b01
10,050
def numberofdupes(string, idx): """return the number of times in a row the letter at index idx is duplicated""" # "abccdefgh", 2 returns 1 initial_idx = idx last = string[idx] while idx+1 < len(string) and string[idx+1] == last: idx += 1 return idx-initial_idx
e5b9aa310c821683632dbec1ce3ab9a8bf8a08b7
10,051
import functools def ensure_cls(cls, *containers, key_converter=None): """If the attribute is an instance of cls, pass, else try constructing.""" def converter(val): val = val if isinstance(val, cls) else cls(**val) return val def converter_list(converter, val): return [converter(item) for item in val] def converter_dict(converter, val): return {key_converter(key) if key_converter else key: converter(value) for key, value in val.items()} if containers: for container in reversed(containers): if container is list: converter = functools.partial(converter_list, converter) if container is dict: converter = functools.partial(converter_dict, converter) return converter
e243b19ff0d673c4792d6d234313da9373cad065
10,052
def linear_curve(t, a, b): """ fit data to linear model """ return a*t + b
da224450cffde96780268ad8d4abe4c754d55b46
10,055
def _get_channel_setting() -> dict: """Default config format for each channel :return dict channel_setting : Default config format for each channel """ channel_setting = { "block_versions": { "0.1a": 0 }, "hash_versions": { "genesis": 1, "0x2": 1, "0x3": 1 }, "load_cert": False, "consensus_cert_use": False, "tx_cert_use": False, "key_load_type": 0, } return channel_setting
94f19f0647b08db188aefc5e4899fc6d329f0e42
10,056
def _decline(di, t, qoi, b): """Arp's equation for general decline in a well - qoi: initial rate of production - di: initial decline rate - b: curvature (b=0 exponential) """ return qoi / ((1 + b * di * t) ** (1 / b))
86368cef4d7d24bff738d5fe4acde2a1a9fb8922
10,057
import hashlib def gen_robohash(s, size=200): """Return URL for robohash pic for sha1 hash of string ``s``""" h = hashlib.sha1(s).hexdigest() return ('http://robohash.org/{0}.png?size={1}x{1}&bgset=bg2&set=set1' .format(h, size))
f4ac71b54801a3b86e3ec4e0cf60f25a9cae5990
10,058
import re def start(): """\\A: Match the start of the text.""" regex = re.compile(r'\Aa', flags=re.MULTILINE) match_start = regex.search("album") match_starting_newline = regex.search("\nart") return match_start and not match_starting_newline and match_start.string
9aa89978b122cd5dc3d3c00d3217e731636c2f71
10,061
import os def calculate_wer_per_cat(df,category='category', id='', kind=False): """ Calculates the WER for every unique value for a certain column Args: df: the pandas dataframe category: name of a column in the pandas data frame, for each unique in this column value we return the wer. kind: name of the asr-tool Returns: df_out: a pandas dataframe with the word error rates for each value in the category >>> from minimock import mock >>> mock('pandas.DataFrame.to_csv') >>> DUMMY_DF = pandas.DataFrame({'wer':[0.2,0.4,0.1],'ref_words':[10,2,5], 'product':[2,0.8,0.5],'category':['aap','banaan','aap']}) >>> calculate_wer_per_cat(DUMMY_DF) Called pandas.DataFrame.to_csv( '\\\\input\\\\results\\\\results_category_.csv', index=False) category ref_words WER kind 0 aap 15 0.17 False 1 banaan 2 0.40 False """ df_out = df.groupby('category', as_index=False).agg({'ref_words': 'sum', 'product': 'sum'}) df_out['WER'] = (df_out['product'] / df_out['ref_words']).round(2) df_out = df_out.drop('product', 1) df_out['kind'] = kind df_out.to_csv(os.path.join(os.path.sep, 'input', 'results', f'results_{category}_{id}.csv'), index=False) return df_out
fc4c3cddb3f3db80eb0a9021105f03f0fa248dc3
10,062
from itertools import chain def flatten_list(lst): """ :param lst: original list :return: flattened list """ return list(chain(*lst))
5f0f796a543f0364cdb238ede40b6f021e698ba6
10,063
def function_with_cpp_args_kwargs(a, b, c=None, d=3, e=(None, "test")): """this is a doctring """ __cpp__ = """ return Py_BuildValue("(O,O,O,O,O)", a, b, c, d, e); """ return None
caf7622a4031bfee704f4ed3d0bcaa2a0591b52d
10,064
from typing import Any import attr def validated() -> Any: """Decorate an entity to handle validation. This will let ``attrs`` manage the class, using slots for fields, and forcing attributes to be passed as named arguments (this allows to not have to defined all required fields first, then optional ones, and resolves problems with inheritance where we can't handle the order) Returns ------- type The decorated class. Examples -------- >>> from isshub.domain.utils.entity import required_field, validated, BaseEntity >>> >>> @validated() ... class MyEntity(BaseEntity): ... my_field: str = required_field(str) >>> >>> MyEntity.__slots__ ('my_field',) >>> >>> instance = MyEntity() Traceback (most recent call last): ... TypeError: __init__() missing 1 required keyword-only argument: 'my_field' >>> instance = MyEntity(my_field='foo') >>> instance.my_field 'foo' >>> instance.validate() >>> instance.my_field = None >>> instance.validate() Traceback (most recent call last): ... TypeError: ("'my_field' must be <class 'str'> (got None that is a <class 'NoneType'>)... """ return attr.s(slots=True, kw_only=True, eq=False)
6903d654e74b6bd6fc2d062755823043125c922a
10,065
import uuid import requests import json def create_vendor(config): """ POST request to create a Vendor in QBO Refer here for other Vendor fields: https://developer.intuit.com/docs/api/accounting/vendor """ url = config['qbo_base_url'] + '/v3/company/' + config['realm_id'] + '/vendor?minorversion=12' vendor = { "DisplayName": "Vendor_demo_" + str(uuid.uuid4()), "CompanyName": "ABC Designing Firm", "PrimaryPhone": { "FreeFormNumber": "123-445-6789" }, "PrimaryEmailAddr": { "Address": "[email protected]" }, "BillAddr": { "Line1": "123 Mary Ave", "City": "Sunnyvale", "CountrySubDivisionCode": "CA", "Country": "USA", "PostalCode": "1111" } } headers = { "Accept": "application/json", "Content-Type": "application/json", "Authorization": "Bearer " + config['access_token'] } r = requests.post(url, headers=headers, data=json.dumps(vendor)) print (r.status_code) print (r.content) try: response = r.json()["Vendor"] except: response = r.content return r.status_code, response
c41a2bb5d589a5fa31eed3dda8506d49bcf42c57
10,066
def guess_game_name_from_clone_arg(clone_arg: str) -> str: """Guess the name of the game from the --clone argument. :arg clone_arg: The str given by the user for the clone argument. Usually a git link as `https://github.com/ScienceGamez/world-3.git`.. """ if "/" in clone_arg: # If url, probably the last value is used as game name last_value = clone_arg.split("/")[-1] # The last value might have a . inside return last_value.split('.')[0] else: return clone_arg
6a1203f966beb566195ae51960d727376b099cc1
10,067
def partition(arr: list, start: int, end: int) -> int: """ Choosing last element as pivot """ i, j = start - 1, start while j < end: if arr[j] < arr[end]: i += 1 arr[i], arr[j] = arr[j], arr[i] j += 1 i += 1 arr[i], arr[end] = arr[end], arr[i] return i
766c8596aa434ede7ee269e75530859b06e1b59f
10,069
def keep_type(value): """ Keep hook returned value type :param value: Value to be returned :return: Any """ return value
f5048e2863aca6eced8c3be76a3623fd5f3a808e
10,071
def varType(row): """Variants should be decomposed, and normalized using vt before applying this function""" c1 = len(row['REF']) == len(row['ALT']) c2 = ',' in row['REF'] c3 = ',' in row['ALT'] if c2 or c3: return 'other' if c1: return 'snp' else: return 'indel'
5870eb1c1fb13beb7e8e9756fa99de19df1f93ee
10,072
from functools import reduce def shekel(x): """ Shekel: The modified fifth De Jong function, Equation (21) of [2] minimum is f(x)=0.0 at x(-32,-32) """ A = [-32., -16., 0., 16., 32.] a1 = A * 5 a2 = reduce(lambda x1,x2: x1+x2, [[c] * 5 for c in A]) x1,x2 = x r = 0.0 for i in range(25): r += 1.0/ (1.0*i + pow(x1-a1[i],6) + pow(x2-a2[i],6) + 1e-15) return 1.0/(0.002 + r)
c2dd86b40072cc40dc4bba1a7f5d300d174c4ad8
10,073
def desitarget_nside(): """Default HEALPix Nside for all target selection algorithms.""" nside = 64 return nside
326338bcc9310725f92b74a29416812cebbd7db1
10,074
def get_table_9(): """表 9 通風の利用に関する区分݈の通風の利用における相当換気回数 NV_l Args: Returns: list: 表 9 通風の利用に関する区分݈の通風の利用における相当換気回数 NV_l """ table_9 = (0.0, 5.0, 20.0) return table_9
ed095f6ffd6fb146caed2628d089784ef91d6217
10,075
def find_nodes(graph, query, data=False): """ Iterator over all nodes matching the data query. """ return ((v, graph.nodes[v]) if data else v for v in graph if query(graph.nodes[v]))
e8f2e25f843133446c32ca7f012ac2aa1b80d2b9
10,076
def _SigninUIState(oobe): """Returns the signin ui state of the oobe. HIDDEN: 0, GAIA_SIGNIN: 1, ACCOUNT_PICKER: 2, WRONG_HWID_WARNING: 3, MANAGED_USER_CREATION_FLOW: 4. These values are in chrome/browser/resources/chromeos/login/display_manager.js """ return oobe.EvaluateJavaScript(''' loginHeader = document.getElementById('login-header-bar') if (loginHeader) { loginHeader.signinUIState_; } ''')
355b7ebbaf0010165109c27162ca70f5168abe05
10,077
def ccd_to_list(file_path: str) -> list: """Converts a .ccd file into a dict""" file = open(file_path, 'r') lines = file.readlines() file.close() data = [] for index, line in enumerate(lines): try: if line.startswith(' '): data.append(float(line.split(' ')[-1].strip())) elif isinstance(int(line[0]), int): split_line = line.split('\t') if len(split_line) > 1: data.append(float(split_line[-1].strip())) if split_line[1].startswith('23'): if int(split_line[0])+1 != int(lines[index+1].split('\t')[0]): print('DAY\tHOUR\tVALUE') print(line, lines[index+1]) else: if int(split_line[1][:2])+1 != int(lines[index+1].split('\t')[1][:2]): print('\nDAY\tHOUR\t\tVALUE') print(line.strip()) print(f'--- MISSING DATA UNTIL ---') print(lines[index+1].strip()) else: split_line = line.strip().split(' ') data.append(float(split_line[-1])) if split_line[1].startswith('23'): if int(split_line[0])+1 != int(lines[index+1].split(' ')[0]): print('DAY\tHOUR\tVALUE') print(line, lines[index+1]) else: if int(split_line[1][:2])+1 != int(lines[index+1].split(' ')[1][:2]): print('\nDAY\tHOUR\t\tVALUE') print(line.strip()) print(f'--- MISSING DATA UNTIL ---') print(lines[index+1].strip()) except ValueError: pass return data
12955459def48c12f0b7e6b0b889fd47df690b9d
10,078
import hmac import hashlib def generate_dendrite_mac(shared_secret: str, username: str, password: str, admin: bool) -> str: """ Generate a MAC for using in registering users with Dendrite. """ # From: https://github.com/matrix-org/dendrite/blob/master/clientapi/routing/register.go mac = hmac.new( key=shared_secret.encode('utf8'), digestmod=hashlib.sha1, ) mac.update(username.encode('utf8')) mac.update(b"\x00") mac.update(password.encode('utf8')) mac.update(b"\x00") mac.update(b"admin" if admin else b"notadmin") return mac.hexdigest()
bed29168d88db0d9b2bd0bf95f493a9d4b5e7a3f
10,080
def _cmpPair2(a, b): """Auxiliary comparision for sorting lists of pairs. Sorting on the second member of the pair.""" (x, y), (z, w) = a, b if y < w: return -1 elif y > w: return 1 elif x < z: return -1 elif x > z: return 1 else: return 0
5567ddc006756e224dde545503c427a3524c3c90
10,084
def toExport8F8(op): """Converts number to exportable 8.8 signed fixed point number.""" return int(round(op * 256.0))
3364703913e1c87223ffb11bdaf3b622db7eef1c
10,085
import math def LogRegressNomalize(value): """Uses log regress to normalize (-inf, inf) to [0, 1].""" value_exp = math.exp(value) return value_exp / (value_exp + 1)
a5d30077df70b3795754d62c6534781e61482f13
10,086
def configuration_path(): """File created by bin/combine.""" return 'config/swagger-combine.yml'
3d75a3630bf56dcb51cf776a609fe3d21c111ead
10,087
import argparse def parse_command_line_arguments(): """Parse command line arguments, checking their values.""" parser = argparse.ArgumentParser(description='') parser.add_argument('--model', default="./results/bert", help="the name of the model/checkpoint to be used for the classifier (e.g. ./results/checkpoint") parser.add_argument('--dataset', default="squad", choices=["squad", "duorc"], help="the name of the dataset to be used for testing") parser.add_argument('--subversion', default="", choices=["", "SelfRC", "ParaphraseRC"], help="the name of the subversion of the dataset, in case 'duorc' dataset is selected") parser.add_argument('--device', default="cpu", choices=["cpu", "cuda:0", "cuda:1"], help="device selected for performing the evaluation") parsed_arguments = parser.parse_args() return parsed_arguments
df9962306fac84261353fc90c61c6d2584078ff5
10,090
def print_summary(targets): """ Return 0 if all test passed Return 1 if all test completed but one or more failed Return 2 if one or more tests did not complete or was not detected """ passed = 0 failed = 0 tested = 0 expected = 0 return_code = 3 print("-----------------------------------------------------------------------------------------------------------") # Find all passed and failed for target in targets: for test in target["tests"]: expected += 1 if target[test]["tested"]: tested += 1 else: print("ERROR: Test {} for target {} not found".format(test, target["name"])) if target[test]["pass"]: passed += 1 else: failed += 1 if tested != expected: print("ERROR: Not all tests found!") print("Expected: {} Actual: {}".format(expected, tested)) return_code = 2 elif tested == passed: return_code = 0 else: return_code = 1 print("Summary: {} tests in total passed on {} targets ({})". format(passed, len(targets), ', '.join([t['name'] for t in targets]))) # Print those that failed if failed > 0: print() for target in targets: for test in target["tests"]: if not target[test]["pass"]: print("{}: {} failed".format(target["name"], test)) if (passed > 0): print("{:.0f}% tests passed, {} tests failed out of {}".format(passed/expected*100, failed, expected)) else: print("0% tests passed, {} tests failed out of {}".format(failed, tested)) return return_code
47d38ba3dd9fb91b1384aa6e7fcbdfe71d560261
10,093
def clean_input(text): """ 텍스트 정제 함수 :param text: 사용자가 입력한 텍스트 :return: ASCII 이외의 문자를 제거한 정제된 텍스트 """ # 간단하게 시작하기 위해서 ASCII 문자만 사용합니다 return str(text.encode().decode("ascii", errors="ignore"))
a06f49cc5cc43516865a2b3a0b769e0802775e24
10,094
def make_node2ancestors_recursively(node, node2ancestors): """ key=node, value=keyの全祖先のノードのセット となる辞書を作る。 Args: node: 全祖先を知りたいノード node2ancestors: key=ノード, value=keyの全祖先のセット Return: nodeにターゲットが存在しない:要素がnodeのみのセット nodeがnode2ancestors.keys()に存在する:node2ancestors[node] それ以外:nodeの全祖先のノードのセット """ if node in node2ancestors: return node2ancestors[node] if not node.targets: node2ancestors[node] = set() return {node} ancestors = set() for target in node.targets: ancestors |= {target} ancestors |= make_node2ancestors_recursively(target, node2ancestors) node2ancestors[node] = ancestors return ancestors
cc48caf9af05bf906ac463af704c1522867ee986
10,095
def to_dict(conf): """Converts a Config object to a dictionary. Args: conf (Config): A Config object Returns: dict: A multi-level dictionary containing a representation ogf the configuration. """ return conf._xpipe_to_dict()
6c709e04f1ffb88b5b5563defc2a03fabfafbb1e
10,096
from typing import MutableMapping def flatten_dict(d: MutableMapping, parent_key: str = "", delimiter: str = "."): """Flatten a nested dict into a single level dict.""" def _flatten_dict(d, parent_key="", delimiter="."): for k, v in d.items(): key = str(parent_key) + delimiter + str(k) if parent_key else k if v and isinstance(v, MutableMapping): yield from flatten_dict(v, key, delimiter=delimiter).items() else: yield key, v return dict(_flatten_dict(d, parent_key, delimiter))
35b338a40bbf00079813037776f93a9a822264db
10,098
def valid_hex(value): """Check if the string is a valid hex number representation.""" try: int(value, 16) except Exception: return False return True
4037b9103cd3308253f929d0a4cbbe3f37c1c219
10,099
def get_mapvars(mapfile): """Read the sugar .map file and return a dictionary where the key is the variable name and the value is the (start,r0,r1) where r0 is the first ordinal and r1 is the last. Sugar uses uniary encoding.""" mapvars = {} with open(mapfile,"r") as f: for line in f: (var,name,start,_) = line.strip().split(" ") if var=="int": if ".." in _: (r0,r1) = _.split("..") else: (r0,r1) = int(_),int(_) else: raise RuntimeError("Only variables of type {} are supported".format(var)) start = int(start) r0 = int(r0) r1 = int(r1) mapvars[name] = (start,r0,r1) return mapvars
c149270a4f1a86707b7ee5e2ed437b5736a2a986
10,100
import codecs def _windows_31j(codec_name): """ Windows-31J用の検索関数。 一番近いと思われるShift_JISにマッピングする。 :param codec_name: コーデック名 :type codec_name: str :return: Windows-31Jが指定された場合はShift_JIS、それ以外の場合はNone :rtype: codecs.CodecInfo """ # Windows-31JをShift_JISにマッピングする if codec_name.lower() == 'windows-31j': return codecs.lookup('shift_jis') else: return None
42b93b2b54c6bc3522a88402adc926212ad4bebb
10,101
import re def clean_tracklisting(string): """Clean tracklisting in track name: 'A1.', 'A2.', etc """ result = re.search(r'^([ABCD]{1}[1234]{0,1}\.\s?).*$', string, re.IGNORECASE) if result is not None: tracklisting = result.groups()[0] string = string.replace(tracklisting, '') return string
553f59de12950dc3e86f451cf263a6646938059f
10,103
def num_to_emoji(x: int) -> str: """Convet int to emoji.""" if x <= 20: return { -1: "💣", 0: "🟦", 1: "1️⃣", 2: "2️⃣", 3: "3️⃣", 4: "4️⃣", 5: "5️⃣", 6: "6️⃣", 7: "7️⃣", 8: "8️⃣", 9: "9️⃣", 10: "🔟", 11: "<:11:803632726509879346>", 12: "<:12:803633006790049806>", 13: "<:13:803633045742682173>", 14: "<:14:803633082330644492>", 15: "<:15:803633109945155664>", 16: "<:16:803633136763142175>", 17: "<:17:803633168640245790>", 18: "<:18:803633195106172958>", 19: "<:19:803633223913177089>", 20: "<:20:803633257358163968>", }[x] return f"{x} "
a9b14e94f56b33b7f449ee46108303b99612244d
10,104
def is_in_period(month, period): """Return which months fall within a specified group of calendar months. Parameters ---------- month : int or iterable of ints One or a series of calendar month numbers [1..12]. period : tuple of ints Group of calendar month numbers to match against. These are usually consecutive, e.g., [3, 4, 5] for MAM, but they don't have to be. Returns ------- return : bool or iterable of bools True for month numbers that are in the group. """ try: return [m in period for m in month] except TypeError: return month in period
e973f1ec11ea4dc6b87834c75d6374bbbb152635
10,105
def once(f): """Decorator. Defer to f once and only once, caching the result forever. Users with a functional background may recognize the concept of a `thunk`. """ unset = val = object() def _helper(*args, **kwargs): nonlocal val if val is unset: val = f(*args, **kwargs) return val return _helper
39e900779a6665155fe83770964e509ff88a12c4
10,106
import re def normalize_number(text): """数字1文字を0に置き換える(桁数は変わらない)""" return re.sub(r"\d", "0", text)
386a5ca07bedd0d52b9e06e9cbd8342295b6d77c
10,107
import hashlib def _hash_file(fpath, chunk_size=65535): """ Calculates the md5 hash of a file :param fpath: path to the file being validated :param chunk_size: Bytes to read at a time (leave unless large files) :return: The file hash """ hasher = hashlib.md5() with open(fpath, 'rb') as fpath_file: for chunk in iter(lambda: fpath_file.read(chunk_size), b''): hasher.update(chunk) return hasher.hexdigest()
9f17f13f487f95620ad705a6f2b23d57407e640e
10,109
def midpoint(start, end): """Find the mid-point between two points.""" x0, y0 = start x1, y1 = end return (x0+x1)/2, (y0+y1)/2
dab5a66b8af759998a295d64ba3711b6e99e4c27
10,110
import re def replace_pair(pair, content): """ Uses regex to locate a pair. First element of the tuple is the original error token. Second element of the tuple is the replacement token. """ return re.sub(pair[0], ' {} '.format(pair[1]), content)
022633d417393959a94629117a1e664709770571
10,113
from datetime import datetime def extract_sitename_date(directory_path, sitename_location, datetime_location): """Extract sitename and datetime from directory path name. Parameters ----------- directory_path : string A path to the directory name sitename_location : index list Index of sitename location in directory path name datetime_location : index list Index of datetime location in directory path name Returns ----------- list : list of site names and datetime information """ # Create an empty list to append sitename and date information site_name_date_list = [] # Assign datetime location to an object date_location = directory_path[datetime_location[0]: datetime_location[1]] # Specify datetime format format = "%Y%m%d" # Use datetime and format to create date varibale date = datetime.strptime(date_location, format) # Assign sitename information to a variable site = directory_path[sitename_location[0]: sitename_location[1]] # Append site variable to list site_name_date_list.append(site) # Append date variable to list site_name_date_list.append(date) return site_name_date_list
86a2085ba68b234585ef9855da64fa1fdd5459ce
10,115