content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import argparse def get_args(): """ Method to read the command line arguments and parse them """ parser = argparse.ArgumentParser( description="Split a playlist into multiple playlists" ) # Required arguments for the program parser.add_argument("-p", "--playlist_id", required=True, help="Playlist ID") parser.add_argument( "-l", "--limit", required=True, default=20, help="Size of each small playlist" ) return parser.parse_args()
3648cd847a760182029d39d858b06a0dd7bc1545
9,323
import copy def difference(df, cols, lag=1): """ Perform differencing on some columns in a dataframe. Input: ------ df: pandas dataframe containing the timeseries data. cols: list of strings indicating which columns to difference """ df2 = copy.deepcopy(df) # Difference based on the lag provided. for i in range(1, len(df2["block_end"])): for L in cols: curr = df2.loc[i, L] prev = df2.loc[i - lag, L] df2.loc[i, "d_{}_{}".format(lag, L)] = curr - prev return df2
00a7edf9fc49f60736a15099b0ab2db178671dc0
9,324
def parse_amount_string(amount: str) -> float: """ Parse strings like - '1.000,00-' - '1.000,00 S' - '23,23+' - '23,23 H' to the correct amount as float :param amount: The string of an amount :return: """ # Replace H and S amount = amount.replace("H", "+").replace("S", "-") # Remove german thousand separator amount = amount.replace(".", "") # Replace german decimal separator with english amount = amount.replace(",", ".") # Put the sign at the correct place and trim amount = amount[-1] + amount[:-1].strip() return float(amount)
1307f753289b53bcd3f895e2582903064c9125ad
9,326
def unique_chunks(lst, n): """ Returns unique chunks of length n from lst. """ if n < 1: return set() return {tuple(lst[i:i + n]) for i in range(0, len(lst), n)}
e06758a4cb13e42394560e3fe2b4889a8e321af9
9,327
from pathlib import Path def fixture_vcf_dir(fixtures_dir: Path) -> Path: """Return the path to the vcf fixtures directory""" return fixtures_dir / "vcfs"
7a77d40a34fc05b7acb20cc60c0e7343ffd4bfa8
9,329
def calcularPVoto(tVoto, vJogador): """ -> Calcula o percentual de voto de cada jogador que recebeu voto :param tVoto: Quantidade total de votos computados :param vJogador: Quantidade de votos do jagador a ser calculado o percentual Função criada por Jcvendrame """ return (vJogador / tVoto) * 100
d21dfc72d70d6a9bdea7d5420cea3a63a57204cf
9,330
def equalRankin(ranking, other): """Compare two rankings Args: ranking (List<CurrencyUsers>) other (List<CurrencyUsers>) Return: Boolean. True if both ranking are equals. """ if not other or not ranking: return False if len(ranking) != len(other): return False for user in ranking: user_other = next( (item for item in other if item.UserId == user.UserId), None) if not user_other or user_other.TimeWatched != user.TimeWatched: return False return True
351fc60ad18450179d7395f7f88698acfdb2c8d4
9,331
def get_game_range_row_names(game_begin, game_end): """Get the row range containing the given games. Sample row name: g_0000000001_m001 To capture the range of all moves in the two given games, the end row will need to go up to g_00..(N+1). Args: game_begin: an integer of the beginning game number. game_end: an integer of the ending game number, inclusive. Returns: The two string row numbers to pass to Bigtable as the row range. """ row_fmt = 'g_{:0>10}_' return row_fmt.format(game_begin), row_fmt.format(game_end + 1)
700740131dbd497af8b80832a7ad11960ccc710f
9,332
import math def score_word_count(count: int) -> float: """Score word frequency as log of count with min of 1.0""" return max(1.0, math.log(count))
e49febcac36653a3a188c0ec3edd9cca0c18b81a
9,333
import jinja2 def render(filename, variables): """ Grabs the jinja2 file and renders it :param filename: the jinja2 file to render :param variables: :return: """ with open(filename, 'rt') as f: filename = jinja2.Template(f.read()) return filename.render(**variables)
09fcf7a6966276e2a362f64bfac84dfb5fb1dd0c
9,334
def reduce(l): """ Args: Rangelist: generic list of tuples (can be either float or int tuple) Return: Reduced int list based on average value of every tuple in input tuple list """ result = [] for s in l: midVal = abs(float(s[0]) - float(s[1])) / 2.0 result.append(midVal) return result
5db6214bc439dcc149d0d4cef1b66930f40694d7
9,335
import math def choose_team_levels(num_teams, hackathon_level): """ Calculates the average experience level per team and distributes any remaining difference among some of the teams evenly Returns a list of team_levels """ avg_team_level = math.floor(hackathon_level / num_teams) team_levels = [] remainder = hackathon_level % num_teams remainder_per_team = math.ceil(remainder / num_teams) if remainder > 0: while remainder > 0: team_levels.append(avg_team_level + remainder_per_team) remainder -= remainder_per_team num_team_with_avg_level = num_teams - len(team_levels) teams_at_normal_level = [avg_team_level for team in range(num_team_with_avg_level)] return team_levels + teams_at_normal_level
aaf372f00969da62b966a2a09aff64a188fbce82
9,336
def infer_time_unit(time_seconds_arr): """ Determine the most appropriate time unit for an array of time durations specified in seconds. e.g. 5400 seconds => 'minutes', 36000 seconds => 'hours' """ if len(time_seconds_arr) == 0: return 'hours' max_time_seconds = max(time_seconds_arr) if max_time_seconds <= 60*2: return 'seconds' elif max_time_seconds <= 60*60*2: return 'minutes' elif max_time_seconds <= 24*60*60*2: return 'hours' else: return 'days'
11f25a712d8d66e8546fea2f7e36309dcebbcc74
9,338
def fprint(prompt: str, question: bool=False, returnstr: bool=False): """ Fancy print function """ tags = {'[o]': '[\033[01;32m+\033[0m]', '[ok]': '[\033[01;32m+\033[0m]', '[+]': '[\033[01;32m+\033[0m]', '[e]': '[\033[01;31m-\033[0m]', '[er]': '[\033[01;31m-\033[0m]', '[error]': '[\033[01;31m-\033[0m]', '[-]': '[\033[01;31m-\033[0m]', '[i]': '[\033[01;36mi\033[0m]', '[if]': '[\033[01;36mi\033[0m]', '[ifo]': '[\033[01;36mi\033[0m]', '[info]': '[\033[01;36mi\033[0m]', '[q]': '[\033[01;33m?\033[0m]', '[qu]': '[\033[01;33m?\033[0m]', '[question]': '[\033[01;33m?\033[0m]', '[?]': '[\033[01;33m?\033[0m]', '[c]': '[\033[01;41m!\033[0m]', '[cr]': '[\033[01;41m!\033[0m]', '[critical]': '[\033[01;41m!\033[0m]', '[!]': '[\033[01;41m!\033[0m]'} for tag in tags.keys(): if prompt.startswith(tag): prompt = prompt.replace(tag, tags[tag], 1) break if returnstr: return prompt elif question: return input(prompt) else: print(prompt)
27be27f40a112b5f14bbbf728deb319dccb77e7d
9,340
def flip_y(im): """mirrors an image over the x axis.""" source_pix = im.load() im = im.copy() dest_pix = im.load() width, height = im.size for i in range(width): for j in range(height): dest_pix[i,j] = source_pix[i, height-j-1] return im
9ad00b2de3e628cc6dd441884103b9d2e3492333
9,343
import torch def lower_matrix_to_vector(lower: torch.Tensor) -> torch.Tensor: """Convert a lower triangular matrix to a vector. Parameters ---------- lower : torch.Tensor lower Returns ------- torch.Tensor """ shape = lower.shape assert shape[-1] == shape[-2] lower_idx = torch.tril_indices(shape[-1], shape[-1]) lower_flat = lower[..., lower_idx[0], lower_idx[1]] return lower_flat
e4fe825caf5926ce3219c4dd7720d1b7f180b998
9,344
def blockify(sudoku): """ Converts 9x9 sudoku list into a list containing lists of values in given sudoku's blocks args: -sudoku - 9x9 sudoku list returns: List with lists of values of sudoku blocks """ i=0 block_row = [] while i<len(sudoku): j=0 while j<7: k=i blocked = [] while k<i+3: l=j block = [] while l<j+3: block.append(sudoku[k][l]) l += 1 blocked.extend(block) k += 1 block_row.append(blocked) j += 3 i += 3 return block_row
4f24aa3c3f8eb7132ab512bd74c03d8fd1947db0
9,345
def map_coords_to_scaled_float(coords, orig_size, new_size): """ maps coordinates relative to the original 3-D image to coordinates corresponding to the re-scaled 3-D image, given the coordinates and the shapes of the original and "new" scaled images. Returns a floating-point coordinate center where the pixel at array coordinates (0,0) has its center at (0.5, 0.5). Take the floor of the return value from this function to get back to indices. """ if not all( isinstance(arg, (tuple, list, set)) for arg in (coords, orig_size, new_size) ): raise TypeError( "`coords`, `orig_size` and `new_size` must be tuples corresponding to the image shape." ) if not all(len(arg) == len(coords) for arg in (orig_size, new_size)): raise TypeError( "Number of dimensions in `coords` ({}), `orig_size` ({}), and `new_size` ({}) did not match.".format( len(coords), len(orig_size), len(new_size) ) ) ratio = lambda dim: float(new_size[dim]) / orig_size[dim] center = lambda s, dim: s[dim] / 2.0 offset = lambda dim: (coords[dim] + 0.5) - center(orig_size, dim) new_index = lambda dim: (center(new_size, dim) + ratio(dim) * offset(dim)) return tuple([new_index(dim) for dim in range(len(orig_size))])
f5e1e1523366a9e1e37f9d1a304d9deea8d53e00
9,346
def _structure_summary(structure): """ Extract messages from the structure. Args: structure: a Pymatgen Structure object Returns: dict of the following messages: nsites (int): number of sites in the structure. is_ordered (bool): whether the structure is ordered or not. ...to be continued """ return {"n_sites": len(structure.sites), "is_ordered": structure.is_ordered}
65fe88a01d53df7ab487ae1d1ab24a4c2c746477
9,347
import torch def multiclass_nms(multi_bboxes, multi_scores, score_thr, nms_cfg, max_num=-1, score_factors=None): """NMS for multi-class bboxes. Args: multi_bboxes (Tensor): shape (n, #class*4) or (n, 4) multi_scores (Tensor): shape (n, #class), where the 0th column contains scores of the background class, but this will be ignored. score_thr (float): bbox threshold, bboxes with scores lower than it will not be considered. nms_thr (float): NMS IoU threshold max_num (int): if there are more than max_num bboxes after NMS, only top max_num will be kept. score_factors (Tensor): The factors multiplied to scores before applying NMS Returns: tuple: (bboxes, labels), tensors of shape (k, 5) and (k, 1). Labels are 0-based. """ num_classes = multi_scores.shape[1] bboxes, labels = [], [] nms_cfg_ = nms_cfg.copy() nms_type = nms_cfg_.pop('type', 'nms') nms_op = nms_type for i in range(1, num_classes): cls_inds = multi_scores[:, i] > score_thr if not cls_inds.any(): continue # get bboxes and scores of this class if multi_bboxes.shape[1] == 4: _bboxes = multi_bboxes[cls_inds, :] else: _bboxes = multi_bboxes[cls_inds, i * 4:(i + 1) * 4] _scores = multi_scores[cls_inds, i] if score_factors is not None: _scores *= score_factors[cls_inds] cls_dets = torch.cat([_bboxes, _scores[:, None]], dim=1) cls_dets, _ = nms_op(cls_dets, **nms_cfg_) cls_labels = multi_bboxes.new_full((cls_dets.shape[0], ), i - 1, dtype=torch.long) bboxes.append(cls_dets) labels.append(cls_labels) if bboxes: bboxes = torch.cat(bboxes) labels = torch.cat(labels) if bboxes.shape[0] > max_num: _, inds = bboxes[:, -1].sort(descending=True) inds = inds[:max_num] bboxes = bboxes[inds] labels = labels[inds] else: bboxes = multi_bboxes.new_zeros((0, 5)) labels = multi_bboxes.new_zeros((0, ), dtype=torch.long) return bboxes, labels
f3152e30eda4286ecfedc1b3fa3cf922470e0ada
9,350
def borders(det): """ Calculates the borders for image Parameters -------------- det: int array Detected face Returns ---------------------- l: int list Coordinates for left bound of border r: int list Coordinates for right bound of border t: int list Coordinates for top bound of border b: int list Coordinates for bottom bound of border """ l, r, t, b = det.left(), det.right(), det.top(), det.bottom() return l, r, t, b
d6358c88ee26e64b7b209d2f5f9725a5b3fad9ba
9,351
def get_line_row(lines, row): """ - lines: (Array string), array of lines - row: int, >=0, the row index to grab RETURN: string, if row greater than or equal to lines length, returns '' """ if row < len(lines): return lines[row] return ''
f03f230b677fabb3c488c496dad7e35f875023fe
9,352
def filter_dict_null(d): """ Filters recursively null values from dictionary """ if isinstance(d, dict): return dict( (k, filter_dict_null(v)) for k, v in list(d.items()) if filter_dict_null(v) is not None ) elif isinstance(d, list): if len(d) > 0: return list(map(filter_dict_null, d)) return None return d
13b0288f2e032d0e6ca115d02d6540bb8f8739b5
9,353
def get_1obj_gt_scenario(): """ Egovehicle stationary (represented by `o`). Seqeuence of 4-nanosecond timestamps. |-| | | |-| |-| | | |-| o (x,y,z) = (0,0,0) |-| | | |-| |-| | | (x,y,z)=(-3,2,0) |-| """ centers = [] # timestamp 0 cx = -3 cy = 2 cz = 0 centers += [(cx,cy,cz)] # timestamp 1 cx = -1 cy = 2 cz = 0 centers += [(cx,cy,cz)] # timestamp 2 cx = 1 cy = 2 cz = 0 centers += [(cx,cy,cz)] # timestamp 3 cx = 3 cy = 2 cz = 0 centers += [(cx,cy,cz)] yaw_angles = [0,0,0,0] return centers, yaw_angles
5f528802f8b7f131fdba344e7bcf195e9414bd0b
9,357
import os import subprocess def list_folders(root): """ List the folders from a root path :param root: :return: """ commands = ["find", os.path.realpath(root), "-type","d"] results = subprocess.check_output(commands) return results.decode().split("\n")[1:]
04aced7c8ab264e0babe87cc1f3caa6f243de29f
9,358
def fuel(i: int) -> int: """ >>> [fuel(i) for i in [12,14,1969,100756]] [2, 2, 654, 33583] """ return i // 3 - 2
ea738361f4dc7081c5adeaf628c5424d9919e1bc
9,359
import psutil def get_proc_name_from_pid(pid): """ using psutil to obtain proc name from pid :param pid: :return: proc name """ return psutil.Process(pid).name()
300ce9bcb945fce90a08b8dc2326e899180546fc
9,360
import os def is_git_repo(path): """ Rudimentary tests for if I have a git repo. Simply look for .git directory **Positional Arguments:** path: - The path that we are assessing """ return os.path.exists(os.path.join(path, ".git")) and \ os.path.isdir(os.path.join(path, ".git"))
00c113010b4aa9a946e50ad3788b4cd110907e79
9,361
import os def gcp_application_default_creds_exist(): """ Return true if the application default credentials file exists. :return: True if we can find app default creds file otherwise False. """ cred_file = os.path.expanduser('~/.config/gcloud/application_default_credentials.json') return os.path.exists(cred_file)
0ec98a81f74fe4d20bdf2c6e1764338871c303cc
9,362
import os def get_extension(filename): """ Gets the extension of a file Parameters ---------- str filename: the filename to extract extension from """ try: return os.path.splitext(filename)[1].replace(".", "") except (AttributeError, TypeError): return ""
6cc4a9eb2755db54801b3314961b32d1104cbe7b
9,363
import os def lookup_env(names): """ Look up for names in environment. Returns the first element found. """ for name in names: value = os.environ.get(name) if value: return value
0db95875d4dba3eafc659b9bb17e6a01526b599b
9,364
def get_bag_count(list_of_bags: list, colour: str) -> int: """ Recursive function to loop through list. Gather rules on line. While there are still rules to be processed, get colour and recursively call function again. Append to total. :return: Total amount of bags inside colour. :rtype: int """ # Figure out the current rule on line. rule = "" for line in list_of_bags: if line[: line.index(" bags")] == colour: rule = line # If contains no bags, skip. if "no" in rule: return 1 # Get the rest of the string. rule = rule[rule.index("contain") + 8 :].split() # Iterate over bags in rule, recursively searching for each bag. total = 0 i = 0 while i < len(rule): count = int(rule[i]) colour = rule[i + 1] + " " + rule[i + 2] total += count * get_bag_count(list_of_bags, colour) i += 4 return total + 1
593bb1d826cd996ca4725fe7e2c18043c8853021
9,366
def connection_str(): """ SQLAlchemy connection string to test database """ return "postgresql://nebulo_user:password@localhost:4442/nebulo_db"
b5220a9ce7e44acde4154686af00164d2f065a83
9,367
from typing import Dict import yaml def load_yaml_into_dict(file_path: str) -> Dict: """ This loads yaml files into a dictionary to be used in API calls. """ with open(file_path, "r") as yaml_file: loaded = yaml.safe_load(yaml_file) if isinstance(loaded, dict): return loaded print(f"Failed to parse invalid manifest: {file_path.split('/')[-1]}. Skipping.") return {}
891439af7cdd0e83f360b7398c98890419e8232f
9,369
from typing import Union def _get_reception_time_from_scene_dir_second(scene_dir_second: str) -> Union[str, None]: """If there is time datum inside `scene_dir_second` string, then return it. Otherwise, return None.""" # second part of scene dir, it can be: `13_53_00`, # `13_53_00_ETC2`, `14_35_23_CB11_SIR18`, etc. second_part = scene_dir_second.split('_') # I need at least three elements to create the time datum if len(second_part) < 3: return None # get the first three elements from the list second_part = second_part[0:3] # iterate over the list to check if all elements are numbers. # if there is an element that is not a number, then return None for part in second_part: if not part.isdigit(): return None # if all parts are numbers, then join them to create time datum return ':'.join(second_part)
89cf95ed1f110c6641de4eae6ac8230d78a7b802
9,370
import json import requests def get_arc_servicedict(url): """Returns a dict of service information for an ArcGIS REST service URL Arguments url (String): An ArcGIS REST service URL, e.g. 'http://services.slip.wa.gov.au/arcgis/rest/services/QC/MRWA_Public_Services/MapServer' """ res = json.loads(requests.get(url + "?f=pjson").content) d = dict() d["layer_ids"] = [str(x['id']) for x in res["layers"]] d["supportedExtensions"] = res["supportedExtensions"] return d
80a1775d809c63ea34729c02ddcf98b8488fd825
9,371
import json def dumps(obj): """ Serialize ``obj`` to a JSON formatted ``str``. 序列化对象 """ return json.dumps(obj)
8fa77ad5615531eea0e2190abf9eaf27196a2337
9,374
def from_grid_range(x): """from [-1,1] to [0,1]""" return (x + 1) / 2.0
a36e3ccace6fe385eeef1f4b5bf64c00f7b971ba
9,375
import re def escaped_split(inp_str, split_char): """ Split inp_str on character split_char but ignore if escaped. Since, return value is used to write back to the intermediate data file, any escape characters in the input are retained in the output. :param inp_str: String to split :param split_char: Split character :return: List of splits """ if len(split_char) > 1: raise ValueError('Expected split character. Found string!') out = re.sub(r'(\\.)|' + split_char, lambda m: m.group(1) or '\n', inp_str, len(inp_str)).split('\n') out = [x for x in out if x] return out
13eaf77ffff52fdd6cfaa83ee08fc773f241be17
9,376
def extract_result(log): """Extracts the name of each test condition run""" module_name = log['testInfo']["testName"] str="" for d in log['results']: src = d['src'] if src == 'WebRunner' or src == 'BROWSER' or src == module_name: # these are asyncronous and the order isn't predictable continue str += src + "\n" return str
4f4b5311c0dc27b7b178488bee81abeabf4e434d
9,378
def trace_fn(current_state, kernel_results, summary_freq=10, callbacks=()): """ Can be passed to the HMC kernel to obtain a trace of intermediate kernel results and histograms of the network parameters in Tensorboard. """ # step = kernel_results.step # with tf.summary.record_if(tf.equal(step % summary_freq, 0)): return kernel_results.is_accepted, [cb(*current_state) for cb in callbacks]
b129e3487304bc7dd6f36446841bfa28e5d4c699
9,379
import collections def insert(container, key_path, item): """ >>> insert({}, ['a', '1', '2', 'world'], 'hello') {'a': {'1': {'2': {'world': 'hello'}}}} """ if isinstance(container, collections.OrderedDict): gen = collections.OrderedDict update = lambda i, k, v: i.update({k: v}) else: gen = dict update = lambda i, k, v: i.__setitem__(k, v) sub_container = container for key in key_path[:-1]: if isinstance(key, int): raise ValueError('No int keys allowed in deep insert') if key not in sub_container: update(sub_container, key, gen()) sub_container = sub_container[key] update(sub_container, key_path[-1], item) return container
656c6a69f3f261d7598daca8bda37908ddf1527b
9,380
import torch def construct_edge_feature_gather(feature, knn_inds): """Construct edge feature for each point (or regarded as a node) using torch.gather Args: feature (torch.Tensor): point features, (batch_size, channels, num_nodes), knn_inds (torch.Tensor): indices of k-nearest neighbour, (batch_size, num_nodes, k) Returns: edge_feature: (batch_size, 2*channels, num_nodes, k) Notes: Pytorch Gather is 50x faster than advanced indexing, but needs 2x more memory. It is because it will allocate a tensor as large as expanded features during backward. """ batch_size, channels, num_nodes = feature.shape k = knn_inds.size(-1) # CAUTION: torch.expand feature_central = feature.unsqueeze(3).expand(batch_size, channels, num_nodes, k) feature_expand = feature.unsqueeze(2).expand(batch_size, channels, num_nodes, num_nodes) knn_inds_expand = knn_inds.unsqueeze(1).expand(batch_size, channels, num_nodes, k) feature_neighbour = torch.gather(feature_expand, 3, knn_inds_expand) # (batch_size, 2 * channels, num_nodes, k) edge_feature = torch.cat((feature_central, feature_neighbour - feature_central), 1) return edge_feature
b49d26e0e7cee13952ff85f8f1f8075658fc391a
9,382
import os import re def find_version(*file_paths): """Find version information in file.""" path = os.path.join(os.path.dirname(__file__), *file_paths) version_file = open(path).read() version_pattern = r"^__version__ = ['\"]([^'\"]*)['\"]" version_match = re.search(version_pattern, version_file, re.M) if version_match: return version_match.group(1) raise RuntimeError("Unable to find version string.")
47c84af5fa2578fbbf28d6ac72fe5ab88ac2db8d
9,383
def canonicalize_name(name: str) -> str: """ Normalize the name strings from certificates and emails so that they hopefully match. """ name = name.upper() for c in "-.,<> ": name = name.replace(c, "") return name
3cfee0a655c876c037bb915098e564376f9b8cf5
9,384
import timeit def get_exec_time(total_execs=1, _repeat=1): """ basically here we calculate the average time it takes to run this function or block of code """ def inner_wrapper(_function, *args, **kwargs): computational_times = timeit.repeat( lambda: _function(*args, **kwargs), number=total_execs, repeat=_repeat ) return sum(computational_times) / len(computational_times) return inner_wrapper
d0826d3fb047736c5d4a5baa4d440bb7d2af2373
9,385
def negate_value(func): """negate value decorator.""" def do_negation(name, value): print("decorate: we can change return values by negating value") return -value return do_negation
276981a7c668308c97ca9e54066163036cb55528
9,386
import os import json def get_pmc(uid, metadata_df, directory='data/cord-19/'): """ In: uid [str]: cord-uid of required file metadata_df: DataFrame containing metadata for file Returns: json of required file""" uid_df = metadata_df[metadata_df.cord_uid == uid] pmc = uid_df.iloc[0].pmc_json_files if pmc == 'none': return 'none' if ';' in pmc: pmc = pmc.split(';')[0].strip() pmc = os.path.join(directory, pmc) with open(pmc, 'r') as file: pmc_json = json.load(file) return pmc_json
da8d0825272493dceb0ce26d98410f9a5481acf6
9,387
import re def normalize_summary(summary): """Return normalized docstring summary.""" # Remove newlines summary = re.sub(r'\s*\n\s*', ' ', summary.rstrip()) # Add period at end of sentence if ( summary and (summary[-1].isalnum() or summary[-1] in ['"', "'"]) and (not summary.startswith('#')) ): summary += '.' return summary
002e72668e87d668c2d6df678092ac57fc2b1d37
9,388
import hashlib def md5_key(string): """ Use this to generate filenae keys """ m = hashlib.md5() m.update(string.encode('utf-8')) return m.hexdigest()
ffa2d26933b5a18f43d2c8ed696e880a38039ece
9,390
def name_to_hash(name: str) -> int: """ given a name, generate a unique-ish number. cannot simply use hash(), since that is different each time we re-run the process... """ hash_v = sum([ord(c) for c in name]) print('hash_v', hash_v) return hash_v
e707d401911d7ca41b019e73d1afcd0c66fe045e
9,391
def query_introspection() -> str: """Retrieve available queries.""" return """query { __type(name: "Query") { kind name fields { name description args { name description defaultValue } } possibleTypes { name kind description } } } """
f01c4a79517b60a5c130805a673665b9bfae858e
9,392
def re_exp_matching_backward(s, p): """ :type s: str for match :type p: pattern str :rtype: match or not """ def is_match(chr_for_match, match_pattern): return match_pattern == '.' or match_pattern == chr_for_match def match_core(str, pattern): if pattern < 0: return str < 0 if str < 0 : if p[pattern] != '*': return False else: return match_core(str, pattern - 2) if p[pattern] == '*': if is_match(s[str], p[pattern - 1]): if match_core(str - 1, pattern): return True return match_core(str, pattern - 2) if is_match(s[str], p[pattern]): return match_core(str - 1, pattern - 1) return False return match_core(len(s) - 1, len(p) -1)
03fb3bb85123435779b46086b1c2ef1705b686f3
9,393
def date_list(start, end): """ :param start: year start; format: 2017, int :param end: year end; format: 2019, int :return: a list include all the month """ assert int(start / 1000) == 0 or type(start) is int, 'start error' assert int(end / 1000) == 0 or type(end) is int, 'end error' result = [] years = end - start + 1 for year in range(years): for i in range(12): result.append((start + year) * 100 + i + 1) return result
4af977d47e611013ead4dd6538e7bdcbb87bf5be
9,394
import re def count_characters(text, whites=False): """ Get character count of a text Args: whites: If True, whitespaces are not counted """ if whites: return len(text) else: return len(re.sub(r"\s", "", text))
e4db9e873e800282cf7f2398272a8b4546fe171e
9,395
import re def remove_html(raw_text): """ Remove html tags """ text = str(raw_text) cleaner = re.compile('<.*?>') text = re.sub(cleaner, '', text) return text
397b49c052e055a71876d9883ab259f871b5015e
9,397
def pull_words(words_file, word_length): """Compile set of words, converted to lower case and matching length of start and end words. Args: words_file: str, name of the file containing all words word_length: int, length of the start/end words Returns: words_set: set, all possible interesting words """ words_set = set() with open(words_file) as words: for word in words: word_ = word.strip().lower() if len(word_) == word_length and word_ not in words_set: words_set.add(word_) return words_set
cbecb29bd93177cb14a208e7e3a7bcee14f7c010
9,398
def vertices_vector_to_matrix(vertices): """vertices_vector_to_matrix(vertices) -> List[List[float]] PyPRT outputs the GeneratedModel vertex coordinates as a list. The list contains the x, y, z coordinates of all the vertices. This function converts the vertex list into a list of N vertex coordinates lists (with N, the number of geometry vertices). Parameters: vertices: List[float] Returns: List[List[float]] Example: ``[[-10.0, 0.0, 10.0], [-10.0, 0.0, 0.0], [10.0, 0.0, 0.0], [10.0, 0.0, 10.0]] = vertices_vector_to_matrix([-10.0, 0.0, 10.0, -10.0, 0.0, 0.0, 10.0, 0.0, 0.0, 10.0, 0.0, 10.0])`` """ vertices_as_matrix = [] for count in range(0, int(len(vertices)/3)): vector_per_pt = [vertices[count*3], vertices[count*3+1], vertices[count*3+2]] vertices_as_matrix.append(vector_per_pt) return vertices_as_matrix
0d03a60f32ed722d089500840e1a2a2e645c20b4
9,399
import torch def random_well_conditioned_matrix(*shape, dtype, device, mean=1.0, sigma=0.001): """ Returns a random rectangular matrix (batch of matrices) with singular values sampled from a Gaussian with mean `mean` and standard deviation `sigma`. The smaller the `sigma`, the better conditioned the output matrix is. """ primitive_dtype = { torch.float: torch.float, torch.double: torch.double, torch.cfloat: torch.float, torch.cdouble: torch.double } x = torch.rand(shape, dtype=dtype, device=device) m = x.size(-2) n = x.size(-1) u, _, vh = torch.linalg.svd(x, full_matrices=False) s = (torch.randn(*(shape[:-2] + (min(m, n),)), dtype=primitive_dtype[dtype], device=device) * sigma + mean) \ .sort(-1, descending=True).values.to(dtype) return (u * s.unsqueeze(-2)) @ vh
bd2d7e232ffcd2848b836e9187d32a00339477de
9,400
def get_index_str(n, i): """ To convert an int 'i' to a string. Parameters ---------- n : int Order to put 0 if necessary. i : int The number to convert. Returns ------- res : str The number as a string. Examples -------- ```python getIndexStr(100,15) ``` Out: ``` '015' ``` """ if i < 0 or i > n: raise ValueError("N >= i or i > 0 is required") lm = len(str(n)) res = str(i) while lm > len(res): res = "0" + res return res
e7b3561a49b447d1edec22da8cc86d2a702ec039
9,401
def get_ipsec_udp_key_status( self, ) -> dict: """Get IPSEC UDP key status for all appliances .. list-table:: :header-rows: 1 * - Swagger Section - Method - Endpoint * - ikeless - GET - /ikeless/seedStatus :return: Returns dictionary ikeless key status \n * keyword **<ne_pk>** (`dict`): Appliance key status object \n * keyword **hasActiveSeed** (`bool`): If appliance has the active key material * keyword **hasNewSeed** (`bool`): If appliance has the new key material * keyword **detail** (`str`): Detail of appliance seed status, including date stamp ID of active and new seed :rtype: dict """ return self._get("/ikeless/seedStatus")
db5ac6fee37574987a023183f8416d40234ac4e4
9,402
import typing def describe_services( ecs, cluster: str, services: typing.Set[str] ) -> typing.List[typing.Dict[str, typing.Any]]: """Wrap `ECS.Client.describe_services` to allow more then 10 services in one call. """ result: typing.List[typing.Dict[str, typing.Any]] = [] services_list = list(services) for i in range(0, len(services_list), 10): response = ecs.describe_services( cluster=cluster, services=services_list[i : i + 10] ) result.extend(response["services"]) return result
f585610480aa7c657974b6f3163888fe7e9b6a32
9,403
import re def extract_page_nr(some_string): """ extracts the page number from a string like `Seite 21` :param some_string: e.g. `Seite 21` :type some_string: str :return: The page number e.g. `21` :rtype: str """ page_nr = re.findall(r'\d+', some_string) if len(page_nr) > 0: return "-".join(page_nr) else: return some_string
6d39314de89c8f4bf4d931f2dc329fe394a10091
9,404
def decode_lookup(key, dataset, description): """Convert a reference to a description to be used in data files""" if key in dataset: return dataset[key] else: decoded = input("Please enter {desc} for {key}: ".format(desc=description, key=key)) dataset[key] = decoded return decoded
4df44c411ef4d1ffe76e489611c4a65888b0a3cd
9,405
def create_explicit_child_condition(parentage_tuple_list): """ This states for a parent node, what its explicit children are. """ def explicit_child_condition(G): return all( [sorted(G.out_edges(y[0])) == sorted([(y[0],x) for x in y[1]]) for y in parentage_tuple_list]) return explicit_child_condition
81860f24e7538feb84e9205dc233d2bf7d1dd1b3
9,407
def intDictToStringDict(dictionary): """ Converts dictionary keys into strings. :param dictionary: :return: """ result = {} for k in dictionary: result[str(k)] = dictionary[k] return result
65e519f04433a5dfcb4d7ace9bad91d8e06db4e5
9,408
def calculate_displacement(src_grammar, tgt_grammar): """Calculate displacement between 2 grammar. E.g: S -> A B C to S -> B C A has displacement of [1 2 0]""" src_grammar_lst = src_grammar.split() tgt_grammar_lst = tgt_grammar.split() src_grammar_lst = src_grammar_lst[src_grammar_lst.index("->")+1:] tgt_grammar_lst = tgt_grammar_lst[tgt_grammar_lst.index("->")+1:] displacement = [] new_words = [] for word in tgt_grammar_lst: try: displacement.append(src_grammar_lst.index(word)) except ValueError: # Resolve ValueError: substring not found # Which indicates this is a new word displacement.append(-1) new_words.append(word) return displacement, new_words
0d14b5757d26c2b8398fe6ecbd94f53d0df70375
9,409
def create_confusion_matrix(actual, predicted, category): """ Calculates the confusion matrix for a give category. :param actual: The actual labels of the data :param predicted: The predicted labels of the data :param category: The category we of the confusion matrix :return: dictionary, with the values of the confusion matrix """ conf_matrix = dict() conf_matrix['TP'], conf_matrix['FP'], conf_matrix['TN'], conf_matrix['FN'] = 0, 0, 0, 0 print('The category is: {}'.format(category)) for sentence in predicted: if sentence in actual[predicted[sentence]] and predicted[sentence] == category: print('TP: Actual: {}, Predicted: {}'.format(category, category)) conf_matrix['TP'] += 1 elif sentence in actual[predicted[sentence]]: print('TN: Actual: not category, Predicted: not category'.format(predicted[sentence])) conf_matrix['TN'] += 1 elif sentence not in actual[predicted[sentence]] and predicted[sentence] == category: print('FP: Actual: not category, Predicted: {}'.format(category)) conf_matrix['FP'] += 1 else: print('FN: Actual: {}, Predicted: {}'.format(category, predicted[sentence])) conf_matrix['FN'] += 1 return conf_matrix
34ae6608a2d0293e651a627a21220ec70a54004f
9,410
import time def clock(func): """ 定义装饰器decorator,除了实现原函数功能外,额外提供计时功能 装饰器就是一个函数,它接收函数(原函数),返回函数(新函数) :param func: 被装饰的函数 :return: 装饰后的函数 """ def decorator(): t0 = time.perf_counter() result = func() elapsed = time.perf_counter() - t0 print("elapsed: {:.8f}s".format(elapsed)) return result return decorator
540250a4dfef4c385f208b834e3fea623a00db71
9,411
def get_page(paginated_list): """ Take a github.PaginatedList.PaginatedList and then iterate through the pages to get all of its entries Args: paginated_list (github.PaginatedList.PaginatedList): PyGithub paginated list object Returns: `list`: All entries in the paginated list """ idx = 0 _page_entries = paginated_list.get_page(idx) page_entries = [] while _page_entries: page_entries.extend(_page_entries) idx += 1 _page_entries = paginated_list.get_page(idx) return page_entries
0510537b20c18b6b1be5b10ca014e13be7a19a1f
9,413
import math def prime(n): """Primality test by trial division.""" if n == 2: return True elif n < 2 or n % 2 == 0: return False else: return not any(n % x == 0 for x in range(3, math.ceil(math.sqrt(n)) + 1, 2))
3504217a7e8149867ec16ddf9c54f4fac736d592
9,414
def average(numbers): """ :param list[float] numbers: a list of numbers :returns: the average of the given number sequence. an empty list returns 0. :rtype: float """ return float(sum(numbers)) / max(len(numbers), 1)
d86c6f24733d3032b82cb6c64c02eba37cc34a04
9,415
import six def _IsIdentityTypeMapping(type_mappings): """\ An identity type mapping is a special case where each of the input types matches the output type. """ for input_type, output_types in six.iteritems(type_mappings): if output_types != [input_type]: return False return True
27f13266dc23c5d4bd78a27c240664208d4f8c8f
9,416
def general_pool_fn(x): """ x[0]: function to call x[1] to x[n]: arguments of the function """ return x[0](*x[1:])
d398378d3d1671f0e58bff2bc8737ff07da0c3e3
9,417
def dt_hms(d, n=3): """ Allow negative times """ try: sign = '+' if d > 0. else '-' x = abs(d) h = int(x) x = (x-h) * 60. m = int(x) s = (x-m) * 60. w = n + 3 return f"{sign}{h:02d}h {m:02d}m {s:0{w}.{n}f}s" except: return None
30c04b23ba304a1d8dffcdbe38035bdde24b1848
9,418
def _EraseTombstone(device, tombstone_file): """Deletes a tombstone from the device. Args: device: An instance of DeviceUtils. tombstone_file: the tombstone to delete. """ return device.RunShellCommand( 'rm /data/tombstones/' + tombstone_file, root=True)
00e6f316062785d7465f501ea743a2dc94864aef
9,419
def shorten_class(class_name: str) -> str: """Returns a shortened version of the fully qualilied class name.""" return class_name.replace('org.chromium.', '.').replace('chrome.browser.', 'c.b.')
2064e6e0dc159bc130f84ce4a830857455d12ba4
9,421
def weight_function(run_params, displacement_norm): """Determine motion-dependent prediction weight of given supporter point. This method determines the weight to apply to each supporter point when using it for prediction of a target point based on the norm of its displacement vector. The larger the displacement, the higher weight the supporter point receives; the weight is a linear function of the displacement norm, with an offset of 1. Displacement-based weighting is used to give less importance to supporter points that are part of the "background" and have little correlation with the movement of the muscle fascia. Args: run_params (ParamValues): values of parameters used in tracking, including scalar alpha displacement_norm (float): L2 norm of relevant supporter point's displacement vector Returns: float weighting applied to supporter point when tracking target point """ alpha = run_params.displacement_weight return 1 + (alpha * displacement_norm)
2fdea32511ae8b4cedd47e79d7f8517a08a6b457
9,422
def _algorithm_kwargs(request): """Auto-parametrizes `_rl_algorithm_cls` for the `trainer` fixture.""" return dict(request.param)
fddb2a376449973f49d5a27cef04b3596e9cc3dd
9,423
def add_api_config_to_queries(generated_query_strings, search_engines): """ Merges the two parameters and returns a list of dicts that include the api config. If only 1 API key is provided, it is assumed this is valid for many searches and is used for all queries If more than 1 is provided, then the number of keys provided needs to match the number of queries Args: generated_query_strings: The output from the generate_query_strings function. search_engines: The search engines list that is found in the api_config file. See the documentation for usage guidelines (http://coast_search.readthedocs.io/). Returns: result_list: Updated list of query data now including search engine/api info """ if len(search_engines) == 1: se = search_engines[0] for query_object in generated_query_strings: query_object["se_name"] = se["name"] query_object["api_key"] = se["api_key"] query_object["search_engine_id"] = se["search_engine_id"] elif len(search_engines) == len(generated_query_strings): for i in range(0, len(search_engines)): query_object = generated_query_strings[i] se = search_engines[i] query_object["se_name"] = se["name"] query_object["api_key"] = se["api_key"] query_object["search_engine_id"] = se["search_engine_id"] else: raise Exception("Invalid number of API keys.") return generated_query_strings
209b14e98c2cb339f958fc7dfe456a4a40876c8c
9,424
def convert_entity_schema(entity_schema): """ Convert entity schmea to record schema """ spots = list() asocs = list() spot_asoc_map = dict() for entity in entity_schema: spots += [entity] spot_asoc_map[entity] = list() return spots, asocs, spot_asoc_map
6e3cc2bbecbbd88312c1a486142d9e8a50a5e39a
9,425
from typing import Dict from typing import Any import requests def pull_astronaut_list(url: str ='http://api.open-notify.org/astros.json') -> Dict[str, Any]: """ Pull a list of astronauts via API. Defaults to open-notify's API. Args: url: the URL to pull data from. Returns: A dict containing the astronaut count and names. """ data = requests.get(url).json() return data
d008cd1d62a435086dbd8dc08baaa5323298f11c
9,428
def get_widget_for_attr(traits_ui, attr_name): """ Return the Qt widget in the UI which displays the attribute specified. """ x_editor = traits_ui.get_editors(attr_name)[0] qt_widget = x_editor.control return qt_widget
2bb2959963734bee48d067f41425808412bd2421
9,429
def all(*args, span=None): """Create a new expression of the intersection of all conditions in the arguments Parameters ---------- args : list List of symbolic boolean expressions span : Optional[Span] The location of this operator in the source code. Returns ------- expr: Expr Expression """ if not args: raise ValueError("Any must take at least 1 argument") if len(args) == 1: return args[0] val = _ffi_api._OpAnd(args[0], args[1], span) # type: ignore for i in range(2, len(args)): val = _ffi_api._OpAnd(val, args[i], span) # type: ignore return val
f0cebfb241c10c2d53c58a8b4fb186e9d65a1b7a
9,430
from datetime import datetime def initialise_library(members, items, item_copies, library): """Takes in items that needs to be populated into the library, and conduct a series of pre-defined events by members (loan, renewal, return) The Library object after conducting the events is used to initialise the LibraryApplication object that presents a menu for members to interact with subsequently Args: members (list): A list of members that has registered with the library items (list): A list of unique items available in the library item_copies (dict): A dictionary of the number of copies available for each item in the library library (Library): A empty Library object to be initialise with the `member`, `items` and `item_copies`. Returns: library (Library): An initialised library object with all `members`, `items`, `item_copies`, and pre-defined events initialised for usage within the LibraryApplication. """ print("*** Start initialising library ***\n") # Adding the items, item_copies, members into the library # Registering the members for member in members: library.register_member(member) # Adding the items to the library for item in items: library.add_item(item) # Adding the copies of the items to the library for item in items: for _ in range(item_copies[item.title]): library.add_copy_item(item) # Key members_id and actual member object john = library.search_member("S123") mary = library.search_member("J111") print( "John borrows 4 items on 1 March 2021 and Mary borrows 1 item on 3 March 2021." ) item_copies_loaned = [ (john, datetime(2021, 3, 1), [1, 3, 6, 8]), (mary, datetime(2021, 3, 3), [9]), ] for record in item_copies_loaned: member = record[0] date_borrowed = record[1] copy_id_borrowed = record[2] for copy_id in copy_id_borrowed: item_copy = library.search_copy_item(copy_id) member.borrow_item(item_copy, date_borrowed) print("Showing copy items\n") print(library.copy_item_str() + "\n") print("Showing members") print(library.member_str() + "\n") print( "Member data after Mary renews loan on 5 March 2021 and John returns " "Copy Item 6, Dark Knight on 17 March 2021" ) # John returns 'Dark Knight on 17 March 2021' john.return_item("Dark Knight", datetime(2021, 3, 17)) # Mary renews loan on 5 March 2021 mary.renew("Powerpoint Presentation Tips", datetime(2021, 3, 5)) print(library.member_str() + "\n") print("John data after paying fines and he receives change $1.50") john.pay(2.00) print(f"{john}\n") print("John data after borrowing copy item 6, Dark Knight again on 22 March 2021") john.borrow_item(library.search_copy_item(6), datetime(2021, 3, 22)) print(f"{john}\n") print("Mary data after returning Powerpoint Presentation Tips on 17 March 2021") mary.return_item("Powerpoint Presentation Tips", datetime(2021, 3, 17)) print(f"{mary}") print("*** Done initialising library ***\n") print("******* Library Test Data *******") print(library) print("******* Ends Library Test Data *******\n") return library
0f5021358dd701790be75140673ede4634de1a41
9,431
def update_cache_bykey(cache_list, new_list, key='id'): """ Given a cache list of dicts, update the cache with a 2nd list of dicts by a specific key in the dict. :param cache_list: List of dicts :param new_list: New list of dicts to update by :param key: Optional, key to use as the identifier to update new entries with :return: Updated list of dicts """ # create a cache dict keyed by id. cache_bykey = {entry[key]: entry for entry in cache_list if entry.get(key)} # create a new dict keyed by id. new_bykey = {entry[key]: entry for entry in new_list if entry.get(key)} # combine and update cache into a 3rd dict combined_bykey = {**cache_bykey, **new_bykey} # return a list of the updated dict. return [value for key, value in combined_bykey.items()]
b077a1c40cbf0a8848ff9e017a644c20e1d25199
9,432
def calc_theor_avg_mass(dictionary, cfg, prec=6, reducing_end=None) -> float: """Returns theoretical average mass for glycan in dictionary form""" reducing_end_tag_mass = 0.0 if reducing_end is not None: if reducing_end in cfg["reducing_end_tag_avg"].keys(): reducing_end_tag_mass = cfg["reducing_end_tag_avg"][reducing_end] return round( sum([ dictionary[key] * cfg["avg_masses_underivatized"][key] for key in dictionary.keys() ]) + cfg["avg_masses_underivatized"]["H3O+"] + reducing_end_tag_mass, prec)
32a5aab08463366e8b43d13f2891489d94ab075a
9,433
import inspect import sys def is_builtin(key): """Test builtin using inspect (some modules not seen as builtin in sys.builtin_module_names may look builtin anyway to inspect and in this case we want to filter them out.""" try: inspect.getfile(sys.modules[key]) except TypeError: return True return False
1834b871b8d4f8d55f6de61052568dffdd2b8474
9,434
def Eliminar_Columnas(df, Dic): """ Recibe el dataframe y el listado de las columnas que se quieren eliminar """ df = df.drop(columns=Dic) return df
96c049508f196406807cc9102b87cb4aa4884650
9,435
def parse_tpl_file(tpl_file): """ parse a pest template file to get the parameter names Parameters ---------- tpl_file : str template file name Returns ------- par_names : list list of parameter names """ par_names = [] with open(tpl_file,'r') as f: try: header = f.readline().strip().split() assert header[0].lower() in ["ptf","jtf"],\ "template file error: must start with [ptf,jtf], not:" +\ str(header[0]) assert len(header) == 2,\ "template file error: header line must have two entries: " +\ str(header) marker = header[1] assert len(marker) == 1,\ "template file error: marker must be a single character, not:" +\ str(marker) for line in f: par_line = line.strip().split(marker)[1::2] for p in par_line: if p not in par_names: par_names.append(p) except Exception as e: raise Exception("error processing template file " +\ tpl_file+" :\n" + str(e)) par_names = [pn.strip().lower() for pn in par_names] return par_names
0797cfedbef07dcd118e13440691c287f952a740
9,436
def find_factors(n): """ Finds a list of factors of a number """ factList = {1, n} for i in range(2, int(n ** 0.5) + 1): if (n % i == 0): factList.add(i) factList.add(n // i) return sorted(factList)
0b8992bfe81bfd49c738b49380ceb0c8e7155b3f
9,437
def unique(a): """ Return the list with duplicate elements removed. Args: a (list): A list. Returns (list): The list with duplicate elements removed. """ # NOTES: # 1. Built-in function 'set()' can convert a list (ordered) into a set (unordered). # 2. Built-in function 'list()' can convert a set (unordered) into a list (ordered). return list(set(a))
1aeac608e53ebc91cb0709b69fc8731f0ad39562
9,438
def _log_commit_progress(table_size, no_chunks): """Shim to avoid sgr spamming output with commit progress for small images""" return table_size > 500000 or no_chunks > 100
82394d325bb755045ca7057ebbe53520024edeab
9,439
import subprocess def rearm_windows(): """Rearm Windows License""" rearm_cmd = r'cscript c:\Windows\System32\slmgr.vbs -rearm //nologo' return subprocess.check_call(rearm_cmd) == 0
d7c468005f2504b0210568efc588919cdb6e1568
9,440
def _warn(warn_message, *args, **kwargs): """ Inputs: warn_message- the warning message Used to override "warnings.formatwarning" to output only the warning message. """ return f'{warn_message}\n\n'
cf88c86af6492142c6f3d364f8cdf1f5cb39da1d
9,441
def max_key(dict): """ Returns the maximum key in an integer-keyed dictionary. Args: dict (dict): The integer-keyed dictionary. Returns: int: The maximum key. """ output = 0 for key, value in dict.items(): output = max(output, int(key)) return output
059a26fa690aaca2df2b0a7e251c206aa5e7276b
9,442
def seperator(digits): """Seperate thousands into list container. e.g ['1', '000'] for 1000.""" strdigits = str(digits) sep = [] while len(strdigits) > 3: sep.insert(0, strdigits[-3: len(strdigits)]) strdigits = strdigits[0:-3] # if strdigits not empty at the end of loop if strdigits: sep.insert(0, strdigits) return sep
5a75e73521900a5a600d4bac8496811b9a3a371d
9,443
import argparse def get_arguments(): """ parses the command line arguments. :return: """ parser = argparse.ArgumentParser( description='Analyse the estimator_status and ekf2_innovation message data for the ' '.ulg files in the specified directory') parser.add_argument("directory_path") parser.add_argument( '-o', '--overwrite', action='store_true', help='Whether to overwrite an already analysed file. If a file with .pdf extension exists ' 'for a .ulg file, the log file will be skipped from analysis unless this flag has ' 'been set.') parser.add_argument('--no-plots', action='store_true', help='Whether to only analyse and not plot the summaries for developers.') return parser.parse_args()
678d1069f5d290a4f45cb0c1df42608c2d8764ab
9,444
def polaritySanitizer(polarity): """Sanitize input polarity values. Renames the, case-insensitive, values 'positive', 'pos', or '+' to 'positive' and 'negative', 'neg', or '-' to 'negative'. Errors on unrecognized polarity value. Arguments: polarity (str): unsanitized polarity type """ if polarity.lower() in ["positive", "pos", "+"]: polarity = "positive" elif polarity.lower() in ["negative", "neg", "-"]: polarity = "negative" else: raise ValueError(f"{polarity} is not recognized as a polarity type.") return polarity
e328345ea48a9441f9ab323fd6a3ff5ca06f07d5
9,447
def hash_table_size(item, tablesize): """ A hashing technique that involves 1. Converting the characters in a string to a list of ordinal values 2. Get the sum of the list 3. Get the remained by doing a modulo using tablesize item - string tablesize """ ordinal_list = [ord(i) for i in item] return sum(ordinal_list) % tablesize
cf47a023c35693681485331878dfd3eb9164a7bf
9,448