content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import time def generate_features_for_all_nodes(n_feature, features_filename): """ generates node-list with features for RiWalk-NA """ # generate node-features-df print("\tFeatures generation starts.") start_time = time.time() nodes_all_features_df = n_feature.gen_features_all_nodes() nodes_all_features_df.to_csv(features_filename, index=False) print("\tFeature generation lasted {} seconds.".format(time.time() - start_time)) return nodes_all_features_df
e1f65c523a3140aa718ff867751657e04945fc38
6,991
import requests from bs4 import BeautifulSoup def query_spikeins(accession): """ Query spikeines IDs from Encode Websites """ query = f'https://www.encodeproject.org/experiments/{accession}/' page = requests.get(query) soup = BeautifulSoup(page.content, 'html.parser') for div in soup.find_all('div'): try: if div['data-test'] == 'spikeins': return div.find('a').get_text() except KeyError: continue return None
c05d9480bba0b052a44b4a683da4ef16fa01e6fc
6,992
def test_method_nesting(server): """Test that we correctly nest namespaces""" def handler(message): return { "jsonrpc": "2.0", "result": True if message.params[0] == message.method else False, "id": 1, } server._handler = handler assert server.nest.testmethod("nest.testmethod") assert server.nest.testmethod.some.other.method( "nest.testmethod.some.other.method")
9c8264f357e0e958b94669ef82ee70b3efff7e8c
6,994
def create_log_group_arn(logs_client, hosted_zone_test_name): """Return ARN of a newly created CloudWatch log group.""" log_group_name = f"/aws/route53/{hosted_zone_test_name}" response = logs_client.create_log_group(logGroupName=log_group_name) assert response["ResponseMetadata"]["HTTPStatusCode"] == 200 log_group_arn = None response = logs_client.describe_log_groups() for entry in response["logGroups"]: if entry["logGroupName"] == log_group_name: log_group_arn = entry["arn"] break return log_group_arn
79113cc7c9ac844c4d38cd1780b38d14a112b40d
6,995
def _make_filetags(attributes, default_filetag = None): """Helper function for rendering RPM spec file tags, like ``` %attr(0755, root, root) %dir ``` """ template = "%attr({mode}, {user}, {group}) {supplied_filetag}" mode = attributes.get("mode", "-") user = attributes.get("user", "-") group = attributes.get("group", "-") supplied_filetag = attributes.get("rpm_filetag", default_filetag) return template.format( mode = mode, user = user, group = group, supplied_filetag = supplied_filetag or "", )
56898ec1fc974721150b7e1055be3ab3782754a2
6,996
def split_game_path(path): """Split a game path into individual components.""" # filter out empty parts that are caused by double slashes return [p for p in path.split('/') if p]
9b939058aa7f8b3371d3e37b0252a5a01dba4e7b
6,998
import re def find_kwic(item, regexes, shell_nouns): """This function takes the location of a vertically annotated text of COCA/COHA and transforms it to the form [['word', 'lemma', 'POS'], ['word2', 'lemma2', 'POS2'], ...] As a second argument the regex objects to be used in the search are passed (they are built outside this function to avoid building them again and again). As a third argument, a list of shell nouns is passed, taken from 'settings.py' It first filters only the contexts around shell nouns to speed up the subsequent regex. Afterwards, matching results are extracted and converted into a human-readable form. """ f = open(item, "r") text = [x.split() for x in f.readlines()] # read the file, transform to list of lists f.close() shell_noun_locations = [id for id in range(len(text)) if text[id][0] in shell_nouns] # find out where in the text do we find shell nouns shell_noun_locations = [[x-7, x+7] for x in shell_noun_locations] # expand the context around the shell nouns to allow the regex to work shell_noun_locations = [[x,y] if x >= 0 else [0,y] for x,y in shell_noun_locations] # make sure the range does not get out of the list (left side) shell_noun_locations = [[x,y] if y <= len(text) else [x,len(text)] for x,y in shell_noun_locations] # make sure the range does not get out of the list (right side) contexts = [text[x:y] for x,y in shell_noun_locations] # extract the relevant contexts from the text contexts = [x for x in contexts if x[2] != "y"] # remove punctuation horizontal = [["_".join(x) for x in item] for item in contexts] # convert to horizontal markup to allow the regex search horizontal = [x+(5*["0_0_0"]) for x in horizontal] # add the dummy 0_0_0 to prevent overlap horizontal = " ".join([" ".join(context) for context in horizontal]) # transform to a plain text del shell_noun_locations, contexts, text # remove shell_noun_locations, text and contexts from the memory entries = [regex.findall(horizontal) for regex in regexes] # for each shell noun find the fitting contexts entries = [item for sublist in entries for item in sublist] # transform from list of lists to list entries = [re.sub("_\S+|0_0_0", "" ,x) for x in entries] # remove tags return entries
65a76b7be4037f2e142b20378ac7a8c68dc7f4c2
7,000
def is_numeric(value: str): """Return True if given value is a number""" return value.isdigit()
fe61469ab388534a17d079590591378f87078cd3
7,001
import torch def nb_Genes(w, device="cpu"): """ ========================================================================== \n Return the number of selected genes from the matrix w \n #----- INPUT \n w : (Tensor) weight matrix \n #----- OUTPUT \n nbG : (Scalar) the number of genes \n indGene_w : (array) the index of the genes \n =========================================================================== """ # d = w.shape[0] ind_genes = torch.zeros((d, 1), device="cpu") for i in range(d): if torch.norm(w[i, :]) > 0: ind_genes[i] = 1 indGene_w = (ind_genes == 1).nonzero()[:, 0] nbG = ind_genes.sum().int() return nbG, indGene_w.numpy()
ba9d7f150e177799c1fdf4c521859b6879b09997
7,002
def map_mix_query_attr_to_ch(mixing_query): """Map the mixing query attributes (tip_angle and phase) to the channel index. If the attribute is defined for the channel use the defined value else set it to 0. Args: spectral_dimension: A list SpectralDimension objects. index: The index of the event from a flatten event list. """ attributes = ["tip_angle", "phase"] return { item: { i: getattr(getattr(mixing_query, f"ch{i+1}"), item) or 0 if getattr(mixing_query, f"ch{i+1}") is not None else 0 for i in range(3) } for item in attributes }
9192b9abbc2710b8ebdf172ad34c3dfadf8048ef
7,008
import sys def interpolate(x, y, x0): """ Interpolates array Y (y=f(x)) at point x0, returning y0 x must be in increasing order """ x_a = 0 i_a = -1 x_b = 0 i_b = -1 i = 0 for point in x: if (point <= x0): x_a = point i_a = i if (point > x0): x_b = point i_b = i break i = i+1 if (i_a == -1 or i_b == -1): sys.exit("Error interpolating") y0 = y[i_a] + (y[i_b]-y[i_a])*(x0-x_a)/(x_b-x_a) return y0
fc2c7bbbb3b5994fdad7c623b6fb24fcf2f08794
7,011
def generate_file_path(package_path, file_name): """ Dynamically generate full path to file, including filename and extension. :param package_path: (array) ordered list of packages in path to test file :param file_name: (string) name of the file w/ test, including the extension :return: (string) full path to file, including filename and extension """ file_path = "" for package in package_path: file_path += package + "/" return file_path + file_name
a6d2ac12cdc726c4727e23301971e921cab9455b
7,012
def _fullname(attr): """Fully qualified name of an attribute.""" fullname = "" if hasattr(attr, "__module__"): fullname += attr.__module__ if hasattr(attr, "__name__"): if fullname: fullname += "." fullname += attr.__name__ if not fullname: fullname = str(attr) return fullname
672120f7b16175b9fed091fbdd93456ba5d89004
7,013
import sys def GetGClientCommand(platform=None): """Returns the executable command name, depending on the platform. """ if not platform: platform = sys.platform if platform.startswith('win'): # Windows doesn't want to depend on bash. return 'gclient.bat' else: return 'gclient'
3f4fee1065f18f8420bba3d79469ade8a1520dc6
7,014
def static(**kwargs): """ Return a predefined ``dict`` when the given regex matches. """ return lambda values: kwargs
ac12595cc1b70dd5f9cccd8ae043f650f0ef59c5
7,015
import hashlib def sha1(string): """Compute the sha1 hexdigest of the string.""" return hashlib.sha1(string.encode('utf-8')).hexdigest()
b663fc501e24a2331f69847024756b97dabc0cd4
7,016
import pytz from datetime import datetime def now(timezone): """Get the current time in the given timezone Args: timezone: The desired timezone as a string. eg 'US/Eastern' """ utc = pytz.timezone('UTC').localize(datetime.utcnow()) return utc.astimezone(pytz.timezone(timezone))
ebd89601ebcb945f01c3e68fbe0f5350e4fc2d0a
7,017
def ReadBlackList(path): """Read a blacklist of forbidden directories and files. Ignore lines starting with a # so we can comment the datafile. Args: path: file to load the blacklist from. Returns: dictionary of path:True mappings """ blacklist_file = open(path, 'r') catalog = [] for entry in blacklist_file: if not entry or entry[:1] == '#': pass # ignore comment and empty lines in blacklist file else: catalog.append(entry.strip()) return catalog
694b9bd8c09385677d49e8563ac8f08b923cadb0
7,018
import os def _append_build_id(base_name): """Returns base_name with BQ-friendly `GITHUB_SHA` appended""" build_id = os.environ.get("GITHUB_SHA", None) if not build_id: raise Exception("Unable to get build id; env var GITHUB_SHA not set") # valid BQ table names only allow underscores and alphanumeric chars # https://cloud.google.com/bigquery/docs/tables#table_naming table_name = "{}_{}".format(base_name, build_id.replace("-", "_")) return table_name
5a9a7c3cd5412a1ea27c1fe25a656eaea2e4bbcf
7,019
def batchify(X, size): """ ``` Splits X into separate batch sizes specified by size. Args: X(list): elements size(int): batch size Returns: list of evenly sized batches with the last batch having the remaining elements ``` """ return [X[x : x + size] for x in range(0, len(X), size)]
d3e4ad015eb3b8bb4cdbaa6bf87a2bc1989c4614
7,020
def listtoslides(data): """Checks if format is correct + adds img and durration elements""" slides = [] for slide in data: slide = slide[:2] slide[0] = slide[0][:25] slide[1] = slide[1][:180] slide.append("imgpath") slide.append(0) slides.append(slide) return slides
b4b7180fc5755eff6a32ff8b448f1dfd65ad6f75
7,021
def _scalePoints(points, scale=1, convertToInteger=True): """ Scale points and optionally convert them to integers. """ if convertToInteger: points = [ (int(round(x * scale)), int(round(y * scale))) for (x, y) in points ] else: points = [(x * scale, y * scale) for (x, y) in points] return points
3ce3fedfbf7c428386af1571cc1a770bd9f66018
7,022
def album_sticker_get(client, album, sticker): """Gets a sticker associated with an album.""" # I am pretty sure that MPD only implements stickers for songs, so # the sticker gets attached to the first song in the album. tracks = client.find("album", album) if len(tracks) == 0: return return client.sticker_get("song", tracks[0]["file"], "album_" + sticker)
4fd02292c1d7be672de9ccc926f5880c7b831503
7,023
def merge_schemas(a, b, path=None): """Recursively zip schemas together """ path = path if path is not None else [] for key in b: if key in a: if isinstance(a[key], dict) and isinstance(b[key], dict): merge_schemas(a[key], b[key], path + [str(key)]) elif a[key] == b[key]: pass else: print("Overriding '{}':\n\t- {}\n\t+ {}".format( '.'.join(path + [str(key)]), a[key], b[key])) a[key] = b[key] else: print("Adding '{}':\n\t+ {}".format( '.'.join(path + [str(key)]), b[key])) a[key] = b[key] return a
8915f5e6fa0c352379852b088a9fe111fc27a719
7,025
import argparse def parse_arguments(): """ Basic argument parsing using python's argparse return: Argparse parser object """ parser = argparse.ArgumentParser("Rfam fasta file generation handler") parser.add_argument('--seq-db', help="Sequence database in fasta format", action="store", default=None) parser.add_argument('--outdir', help="Output directory", action="store") parser.add_argument('-f', help='A file with a list of Rfam family accessions', action="store", default=None) return parser
60facf1af6361c867938ba567749294b6a02e9f4
7,026
def an_capabilities(b: bytes) -> list: """ Decode autonegotiation capabilities Args: b: coded *** Returns: human readable *** """ cap: list = [] i: int = (b[0] << 8) + b[1] cap_list = ['1000BASE-T (full duplex mode)', '1000BASE-T (half duplex mode)', '1000BASE-X (-LX, -SX, -CX full duplex mode)', '1000BASE-X (-LX, -SX, -CX half duplex mode)', 'Asymmetric and Symmetric PAUSE (for full-duplex links)', 'Symmetric PAUSE (for full-duplex links)', 'Asymmetric PAUSE (for full-duplex links)', 'PAUSE (for full-duplex links)', '100BASE-T2 (full duplex mode)', '100BASE-T2 (half duplex mode)', '100BASE-TX (full duplex mode)', '100BASE-TX (half duplex mode)', '100BASE-T4', '10BASE-T (full duplex mode)', '10BASE-T (half duplex mode)', 'Other or unknown'] for bit in range(len(cap_list)): if (i & (2**bit) ) > 0: cap.append(cap_list[bit]) return cap
1e8b60582ad27ab6c1feafaac3992c4dc0550bbf
7,027
import os def get_source_feed_from_folder_name(dir_path): """ get_source_feed_from_folder_name Get source feed from tf record name :param dir_path: TFRecord folder name :return: Source feed name """ if os.path.isdir(dir_path): dir_name = os.path.basename(dir_path) source_feed = dir_name.split("_")[0] return source_feed raise ValueError(f"The specified path is not a directory: {dir_path}")
aba61f7363fe82a5b2a70fd9c8d83651e2f82706
7,028
import math def math_logsumexp(data): """ achieve logsumexp by numpy Args: data: float array Returns: Float """ res = [] for i in data: res.append(math.exp(i)) return math.log(sum(res))
44a056d2aaa0298c62cc21ae2e224a974956ed8b
7,030
import torch def stableSoftMax(x): """ stableSoftMax computes a normalized softmax :param x: Tensor List :return: Tensor List """ x = torch.exp(x - torch.max(x)) return x/torch.sum(x)
fa1e017812b7fd0c4e964eafb3fd59eae141203b
7,031
def _get_port(config): """Get the server's port from configuration.""" if not config.has_option("server", "port"): return None port = config.getint("server", "port") return port
bee579fcfc82ea80c593dc7bd93ff3d39e63ef7b
7,033
def superkeyword_presence(document, superkeywords): """Return 1 if document contains any superkeywords, 0 if not.""" for word in superkeywords: if word in document.split(): return True return False
4b3223190651873d27562cc475ff623aa4cb5b47
7,034
def CleanUserUrl(user_url: str) -> str: """清理 user_url,去除其中的空格和无用参数 """ user_url = user_url.strip() return user_url.split("?")[0]
7c5c2cf5879d4ddfdbd1a60a679a747f162ebe35
7,036
def delete_nonestimator_parameters(parameters): """Delete non-estimator parameters. Delete all parameters in a parameter dictionary that are not used for the actual estimator. """ if 'Number' in parameters.keys(): del parameters['Number'] if 'UsePCA' in parameters.keys(): del parameters['UsePCA'] del parameters['PCAType'] if 'ReliefUse' in parameters.keys(): del parameters['ReliefUse'] del parameters['ReliefNN'] del parameters['ReliefSampleSize'] del parameters['ReliefDistanceP'] del parameters['ReliefNumFeatures'] if 'OneHotEncoding' in parameters.keys(): del parameters['OneHotEncoding'] del parameters['OneHotEncoding_feature_labels_tofit'] if 'Imputation' in parameters.keys(): del parameters['Imputation'] del parameters['ImputationMethod'] del parameters['ImputationNeighbours'] if 'SelectFromModel' in parameters.keys(): del parameters['SelectFromModel'] del parameters['SelectFromModel_lasso_alpha'] del parameters['SelectFromModel_estimator'] del parameters['SelectFromModel_n_trees'] if 'Featsel_Variance' in parameters.keys(): del parameters['Featsel_Variance'] if 'FeatPreProcess' in parameters.keys(): del parameters['FeatPreProcess'] if 'FeatureScaling' in parameters.keys(): del parameters['FeatureScaling'] if 'StatisticalTestUse' in parameters.keys(): del parameters['StatisticalTestUse'] del parameters['StatisticalTestMetric'] del parameters['StatisticalTestThreshold'] if 'Resampling_Use' in parameters.keys(): del parameters['Resampling_Use'] del parameters['Resampling_Method'] del parameters['Resampling_sampling_strategy'] del parameters['Resampling_n_neighbors'] del parameters['Resampling_k_neighbors'] del parameters['Resampling_threshold_cleaning'] del parameters['Resampling_n_cores'] if 'random_seed' in parameters.keys(): del parameters['random_seed'] return parameters
d84984d182a5945167b8e7880e493aa8fad832b7
7,037
def GetMaxIndex(tree): """get maximum node number.""" return tree.id
f443a9006765dface834aa9120c3ab38cd1d4369
7,038
def get_coord_limits(coord): """get cooordinate limits""" lower_limit = float('.'.join([str(coord).split('.')[0], str(coord).split('.')[1][:2]])) if lower_limit > 0: upper_limit = lower_limit + 0.01 else: tmp = lower_limit - 0.01 upper_limit = lower_limit lower_limit = tmp return lower_limit, upper_limit
803c0804e34a97d46a9555b4566be72949f55e8d
7,039
import glob import os def get_current_phase(path): """Returns the current phase of the current iteration""" files = glob.glob(path + "/phase_*.sh") phases = [0] for file in files: file = os.path.basename(file) phase = file.split(".")[0] # -> phase_x phase_num = int(phase.split("_")[-1]) # -> x phases.append(phase_num) return max(phases)
4f8f74fa4d71551c27f7757c28990a7b00fea55b
7,041
def robust_scale(df): """Return copy of `df` scaled by (df - df.median()) / MAD(df) where MAD is a function returning the median absolute deviation.""" median_subtracted = df - df.median() mad = median_subtracted.abs().median() return median_subtracted/mad
ba9ce747612c99997d890930e7ac7c582ba1af70
7,043
def _jsarr(x): """Return a string that would work for a javascript array""" return "[" + ", ".join(['"{}"'.format(i) for i in x]) + "]"
9c9b6df65bf4c01fa1c321445bbb7c86c6d28c5a
7,044
def join_kwargs(**kwargs) -> str: """ Joins keyword arguments and their values in parenthesis. Example: key1{value1}_key2{value2} """ return "_".join(key + "{" + value + "}" for key, value in kwargs.items())
3054573ec51676bb8d93e2fcabd4cb5097e4b897
7,046
def byte_builtin(): """byte: Immutable bytes array.""" return bytes("\xd0\xd2NUT", "utf-8").decode()
62e0d556d20ece651adb0a13b11d07670e0ea4f6
7,047
def extract_column_names(row_list): """ Extract names of columns from row list obtained from table csv. The first row contains all row names :param row_list: List of all rows in csv used for table creation :return: List of names present in table csv """ return row_list[0]
2adef82a7f583c262922ad28aa0de47b8b9b5e51
7,049
def index_to_point(index, origin, spacing): """Transform voxel indices to image data point coordinates.""" x = origin[0] + index[0] * spacing[0] y = origin[1] + index[1] * spacing[1] z = origin[2] + index[2] * spacing[2] return (x, y, z)
072f1ad5d1adc1e81d4771725475f6a07f32f3ce
7,050
def MakeEmptyTable(in_table=[[]], row_count=0, column_count=0): """ 1 Read in *in_table* 2 Create an empty table of '' values, which has with the same number of rows and columns as the table, (where columns based on the first row). 3 If the user has specified *row_count* and/or *column_count*, then these will be used place of the dimensions of *in_table*. Therefore *in_table* can be omitted from input if BOTH row_count and column_count are set. e.g. MakeEmptyTable(row_count=1000, column_count=200) And if *in_table* is used, then the other 2 inputs are optional. e.g. MakeEmptyTable(CSVdata, row_count=1000) will use column_count of CSVdata but row_count as 1000 Args in_table: <type 'list'> 2D table as list of lists, which may contain table data in rows and columns. e.g. [['abc','def','hij'], ['232','965','TES'] ['235','768','QWE']] row_count: <type 'int'> number of empty rows to create. If this is not set, then in_table row count will be used. column_count: <type 'int'> number of empty columns to create in each row. If this is not set, then column count of *in_table* first row will be used. """ if not row_count: row_count = len(in_table) # length of table if not column_count: column_count = len(in_table[0]) # length of first row of table row_contents = [""] * column_count # repeat '' for X columns blank_table = [row_contents] * row_count # repeat blank row for Y rows return blank_table
55b32c2914cb8e2194999e1b8ba2211373ef1bcb
7,051
import torch def choose_pseudo_gt(boxes, cls_prob, im_labels): """Get proposals with highest score. inputs are all variables""" num_images, num_classes = im_labels.size() boxes = boxes[:,1:] assert num_images == 1, 'batch size shoud be equal to 1' im_labels_tmp = im_labels[0, :] gt_boxes = [] gt_classes = [] gt_scores = [] for i in range(num_classes): if im_labels_tmp[i].data.cpu().numpy() == 1: max_value,max_index = cls_prob[:, i].max(0) gt_boxes.append(boxes[max_index]) gt_classes.append(torch.ones(1,1)*(i+1)) # return idx=class+1 to include the background gt_scores.append(max_value.view(-1,1)) gt_boxes = torch.cat(gt_boxes) gt_classes = torch.cat(gt_classes) gt_scores = torch.cat(gt_scores) proposals = {'gt_boxes' : gt_boxes, 'gt_classes': gt_classes, 'gt_scores': gt_scores} return torch.cat([gt_boxes,gt_classes],1), proposals
7565a9a72052c839cad5cf12033fdf144bfcb4b0
7,052
import math, random def split_list(data, splits={'train': 8, 'test': 2}, shuffle=True, seed=0): """ Split the list according to a given ratio Args: data (list): a list of data to split splits (dict): a dictionary specifying the ratio of splits shuffle (bool): shuffle the list before seed (int): random seed used for shuffling Returns: a dictionary of the splitted list """ data = data.copy() # work on a copy of the oridinal list n_tot = len(data) split_tot = float(sum([v for v in splits.values()])) n_split = {k:math.ceil(n_tot*v/split_tot) for k,v in splits.items()} if shuffle: random.seed(seed) random.shuffle(data) splitted = {} cnt = 0 for k, v in n_split.items(): splitted[k] = data[cnt:cnt+v] cnt += v return splitted
d9b25512e666a03ec2b589850c47a45231b279a0
7,053
import json def load_json(filename, user_limit=0, badge_limit=0): """ Loads data form JSON """ with open(filename) as f: data = json.loads(f.read()) if user_limit: data['transactions'] = data['transactions'][:user_limit] if badge_limit: data['badges'] = data['badges'][:badge_limit] data['transactions'] = [[b for b in t if b < badge_limit] for t in data['transactions']] return data
79890f86dcc0090f89b7c401959e3e2dfc86828c
7,055
def constraint_wrapper(fun, constraint): """ Wrap a function such that it's first argument is constrained """ def inner(x, *args, **kwargs): """the wrapped function""" return fun(constraint(x), *args, **kwargs) return inner
f9055fe2cd269e4586c545bfa951bdf2ba0677c1
7,056
def can_cast(value, class_type): """ Check if the value can be cast to the class_type, used in the parse tcl string function for tcl expressions like [Ada inputs 0] or [Ada alias robotblur] Args: value (object): The object we're attempting to cast. class_type (class): The class we're attempting to cast to. Returns: bool: If the value can be successfully cast """ try: class_type(value) return True except ValueError: return False
85c415d2eaadb16a532e209110c1fd0f778cb681
7,058
def build(argmap, data): """Builds an array of arguments from the provided map and data. The argmap must consist of a mapping of keys to argbuilder functions. keys in the argmap are indexed into data, and if they are present, the corresponding values are passed to the corresponding argmap function. The argmap function returns with one or more array elements to be added to the returned array. """ args = [] for name, fn in argmap.iteritems(): if name in data: args += fn(data[name]) return args
d9c6c9eede6d6a9ae36fea77dceb3c70cbfbdbbd
7,059
from typing import List def getTestFragments() -> List[str]: """ Returns a small list of testing fragments. >>> len(getTestFragments()) 5 """ return ['tttttt', 'ttttgg', 'tggaga', 'agacgc', 'cgcggg']
4bd66b0a0c90df0d20f3d66d9b789a31419e63f6
7,060
def powerlaw_dl(p, x): """ p are parameters of the model """ return p[0]*(x/80.)**(p[1]+2)
d2d0a331b260514a3e5a6dd0d78d2a55e6d1eedd
7,061
def get_forecast_metadata_variables(ds): """ Returns a list of variables that represent forecast reference time metadata. :param netCDF4.Dataset ds: An open netCDF4 Dataset. :rtype: list """ forecast_metadata_standard_names = { "forecast_period", "forecast_reference_time", } forecast_metadata_variables = [] for varname in ds.variables: standard_name = getattr(ds.variables[varname], "standard_name", None) if standard_name in forecast_metadata_standard_names: forecast_metadata_variables.append(varname) return forecast_metadata_variables
83b8fe0eb785c1a3129ec19df680ce135cd3fa82
7,062
def feet(i): """ feet(i) Return i (in inches) converted to feet. """ return i / 12
bff46c1399aabee1f589dea98c9c43ced30d0756
7,063
def get_16bytes_from_seed(n): """TI pseudo-random number generator""" mod1 = 2147483563 mod2 = 2147483399 mult1 = 40014 mult2 = 40692 if n: seed1 = (mult1 * n) % mod1 seed2 = n % mod2 else: seed1 = 12345 seed2 = 67890 result_arr = bytearray(16) for i in range(16): seed1 = (seed1 * mult1) % mod1 seed2 = (seed2 * mult2) % mod2; result = (seed1 - seed2) / mod1 if result < 0: result = result + 1 result_arr[i] = int(result * 256.) return result_arr
4e1d3aad238d0d6a13921052b12d18a474ad9b08
7,064
def sum_merge_tables(df1, df2, policy_id_column_name, df1_group_by, df2_group_by, df1_kpis, df2_kpis, df_no_dupl=None): """ Sums separately two dataframes and merge them Arguments --> the two 2 dataframes to sum and merge, the policy id column name the variables to aggregate on in the two dfs, the kpis to derive in the two dfs a df that contains the portfolio features and no policy duplicates. This df will be used when it is the full data for risk prediction that must be obtained Returns --> A merged df with the kpis summed adequatly """ # Figures must be calculated on portfolio and claims separately, then merging them on the same intersection variables (i.e. the portfolio features) used for the aggregation # Here, the analysis is not done by features but only by claims attributes. The portfolio kpis like exposure, premium are the same, they don't vary depending on claims attributes. if df1_group_by is None or len(df1_group_by) == 0: df1_group_by = ['Total'] df1_sum = df1[df1_kpis].sum() # Converts the pandas series, and tranposes it so that the column Total serves for the merge with claims df df1_sum = df1_sum.to_frame().T df1_sum['Total'] = 'Total' df2_sum = df2.groupby(df2_group_by)[df2_kpis].sum().reset_index() df2_sum['Total'] = 'Total' else: df1_sum = df1.groupby(df1_group_by)[df1_kpis].sum().reset_index() df2_sum = df2.groupby(df1_group_by+df2_group_by)[df2_kpis].sum().reset_index() # Merges portfolio and claims data based on the variables that served to aggregate both two df df_merged = df1_sum.merge(df2_sum, how='left', on=df1_group_by).set_index(keys=df1_group_by+df2_group_by) #- The steps above have removed the features, we get them back thanks to the df specified in the argument that corresponds to the portfolio data with features and no duplicates if df_no_dupl is not None: df_merged = df_no_dupl.merge(df_merged, how='left', on=policy_id_column_name).reset_index(drop=True) return df_merged
955505efc0535f198c4ac557d62f05350e315b31
7,066
def thcf_partial1(x): """ Partial derivative of the Three-Hump Camel Function with respect to x1. """ partial = x[1] + 4 * x[0] - 4.2 * (x[0] ** 3) + (x[0] ** 5) return partial # Gradient 1
cccc2505978cc49fffa0a445157ba5fdaf0abc30
7,067
def leftmostNonzeroEntries(M): """Returns the leftmost nonzero entries of M.""" return [ abs(M[l][M.nonzero_positions_in_row(l)[0]]) for l in range(0,M.dimensions()[0]) if M.nonzero_positions_in_row(l) != [] ]
9e42297dc3000a41dcdceebff10c4fc53e1709ac
7,068
def is_sequence(arg): """Returns True is passed arg is a list or a tuple""" return isinstance(arg, list) or isinstance(arg, tuple)
12a0a0186695f8b79a48905a22c0c1c69cde219f
7,069
import torch def pad_shift(x, shift, padv=0.0): """Shift 3D tensor forwards in time with padding.""" if shift > 0: padding = torch.ones(x.size(0), shift, x.size(2)).to(x.device) * padv return torch.cat((padding, x[:, :-shift, :]), dim=1) elif shift < 0: padding = torch.ones(x.size(0), -shift, x.size(2)).to(x.device) * padv return torch.cat((x[:, -shift:, :], padding), dim=1) else: return x
95f883714222787eb5fd92f7a0c6f1777c989399
7,070
def super_reduced_string(s): """Hackerrank Problem: https://www.hackerrank.com/challenges/reduced-string/problem Steve has a string of lowercase characters in range ascii[‘a’..’z’]. He wants to reduce the string to its shortest length by doing a series of operations. In each operation he selects a pair of adjacent lowercase letters that match, and he deletes them. For instance, the string aab could be shortened to b in one operation. Steve’s task is to delete as many characters as possible using this method and print the resulting string. If the final string is empty, print Empty String Args: s (str): String to reduce Returns: str: the reduced string, or "Empty String" if it's empty """ cur_string = s while True: found = False for i in range(1, len(cur_string)): if cur_string[i-1] == cur_string[i]: found = True cur_string = cur_string.replace(cur_string[i]*2, "", 1) break if not found: break if not cur_string: cur_string = "Empty String" return cur_string
09c48f38a877ff9ae92b985bd793224bd81247c8
7,071
import os def parsefile(settingfile): """Parse the input settingfile""" def func_remove_comments(string): if string.find('#') != -1: string = string[:string.find('#')] return string.strip() if not os.path.isfile(settingfile): print('not a file: {:}'.format(settingfile)) raise FileNotFoundError('not a file') log = {'nice': True,'info':''} # this universal string is used to exit prompt error_info = 'Error: the input setting file is wrong' infile = [] with open(settingfile,mode='rt') as f: nm = 0 while True: line = f.readline() nm += 1 if len(line) == 0: break line = line.strip() if len(line) == 0 or line[0] == '#': continue info = str(nm) + ': ' + line # help append `=' if line.find('=') == -1: line += ' =' lp = line.split('=',maxsplit=1) if len(lp[0].split()) != 1: log['nice'] = False log['info'] = 'Error: ' + info break key = lp[0].strip().lower().replace('-','_') # take care of 'basis_set' comment exception if key == 'basis_set': ndx_first = lp[1].find('#') if ndx_first == -1: strtmp = '' for i in lp[1].split(): strtmp += i + ' ' value = strtmp.strip() elif ndx_first == len(lp[1]): # only `#' exist value = '#' else: ndx_second = lp[1][ndx_first+1:].find('#') if ndx_second == -1: strtmp = '' for i in lp[1].split(): strtmp += i + ' ' value = strtmp.strip() else: strtmp = '' for i in lp[1][:ndx_first+ndx_second+1].split(): strtmp += i + ' ' value = strtmp.strip() else: value = func_remove_comments(lp[1]) # take care of special cases if key == 'atomtype_list': value = value.replace('"',' ').replace("'",' ') value = value.replace(';',' ').replace(',',' ').strip() if len(value) != 0 and value[0] == '[': value = value[1:].strip() if len(value) != 0 and value[-1] == ']': value = value[:len(value)-1].strip() if len(value) == 0: value = None else: value = value.split() elif key in ['symmetry_list','counter_list','offset_list']: if len(value) == 0: value = None else: if value[0] != '[': value = '[' + value if value[-1] != ']': value = value + ']' try: value = eval(value) except: log['nice'] = False log['info'] = 'Error: ' + info break elif key == 'charge_spin': lr = value.split() if len(lr) == 0: value = '0 1' elif len(lr) == 2: value = lr[0] + ' ' + lr[1] else: log['nice'] = False log['info'] = 'Error: ' + info break else: if len(value) == 0: value = None elif len(value.split()) != 1: log['nice'] = False log['info'] = 'Error: ' + info break infile.append([key,value,info]) if not log['nice']: return log, [] # process infile to different blocks i = 0 profile = [] while i < len(infile): ls = [] if infile[i][0] == 'command': ls.append(infile[i]) j = i + 1 while j < len(infile) and infile[j][0] != 'command': ls.append(infile[j]) j += 1 profile.append(ls) i = j else: i += 1 if len(profile) == 0: log['nice'] = False log['info'] = 'Error: no command is found' return log, [] return log, profile
7a4596f3204d9e84c8e6d507b8fd47cd8497e4fa
7,072
import torch def batch_img(data, vocab): """Pad and batch a sequence of images.""" c = data[0].size(0) h = max([t.size(1) for t in data]) w = max([t.size(2) for t in data]) imgs = torch.zeros(len(data), c, h, w).fill_(1) for i, img in enumerate(data): imgs[i, :, 0:img.size(1), 0:img.size(2)] = img return imgs
e123813b14e7e35bea4786a7fc1772bdfb0673a1
7,074
def rotate_tour(tour, start=0): """ Rotate a tour so that it starts at the given ``start`` index. This is equivalent to rotate the input list to the left. Parameters ---------- tour: list The input tour start: int, optional (default=0) New start index for the tour Returns ------- rotated: list The rotated tour """ idx = tour.index(start) if idx != 0: rotated = tour[idx:] + tour[:idx] else: rotated = tour return rotated
b2356aaecb00dc993e88b5f7105c0b4aed495521
7,075
def no_space(original_str, keyslist, replace_str): """ :param original_str: A string found in the keyslist :param keyslist: A list of keys found in the each of the two knowledge graph dictionaries (nodes and edges) :param replace_str: A string to replace the original_str in the keyslist """ # Find the location of the original string in the list index_str = keyslist.index(original_str) # Remove the original string from the list keyslist.remove(original_str) # Insert the new string where the original string used to be keyslist.insert(index_str, replace_str) return keyslist
1184effc9dc301b234e01c469839783f2c16e25b
7,076
def format_data(value): """ 返回百分数据的值 """ return round(value*100, 1)
add5b16641f3cd0fef69f2cb54ce9448a2e1ad1b
7,077
import torch def infer_mask(seq, eos_ix, batch_first=True, include_eos=True, dtype=torch.float): """ compute length given output indices and eos code :param seq: tf matrix [time,batch] if batch_first else [batch,time] :param eos_ix: integer index of end-of-sentence token :param include_eos: if True, the time-step where eos first occurs is has mask = 1 :returns: lengths, int32 vector of shape [batch] """ assert seq.dim() == 2 is_eos = (seq == eos_ix).to(dtype=torch.float) if include_eos: if batch_first: is_eos = torch.cat((is_eos[:,:1]*0, is_eos[:, :-1]), dim=1) else: is_eos = torch.cat((is_eos[:1,:]*0, is_eos[:-1, :]), dim=0) count_eos = torch.cumsum(is_eos, dim=1 if batch_first else 0) mask = count_eos == 0 return mask.to(dtype=dtype)
6038291eecc96898491e685c63f69187dca7e8fc
7,078
def filter_polygons(state, header): """ Removes any non-polygon sources from the state file. We are only interested in parsing parcel data, which is marked as Polygon in the state file. """ filtered_state = [] for source in state: if 'Polygon' in source[header.index('geometry type')]: filtered_state.append(source) return filtered_state
d100e6a4e87dccdc42c7217dc1e793e4353237e2
7,081
import unicodedata def unicode_normalize(text): """Return the given text normalized to Unicode NFKC.""" normalized_text = unicodedata.normalize('NFKC', text) return normalized_text
1b6defacd09665412a1b31dd48f5291c4984d044
7,083
import importlib def import_obj(clsname, default_module=None): """ Import the object given by clsname. If default_module is specified, import from this module. """ if default_module is not None: if not clsname.startswith(default_module + '.'): clsname = '{0}.{1}'.format(default_module, clsname) mod, clsname = clsname.rsplit('.', 1) mod = importlib.import_module(mod) try: obj = getattr(mod, clsname) except AttributeError: raise ImportError('Cannot import {0} from {1}'.format(clsname, mod)) return obj
8cb064348b7b38e1e3f659240f4bc9677237d3fd
7,084
def _evaluateMockedResponse(table, cmd_dict, success=True, is_save=True): """ Evaluates if the response from a mock is as expected. :param Table table: :param dict cmd_dict: :param bool success: value of success :param bool is_save: value of is_save """ response, returned_is_save = \ table.processCommand(cmd_dict) return (response['success'] == success) \ and returned_is_save == is_save
20b5a3ff800e012cffc65fd0876656ced9e45f89
7,085
def cap_rating(flight_phase, short_period_cap, short_period_damping): """ Give a rating of the short period mode flight quality level: Level1 Level2 or Level3 and 4 according to MIL-STD-1797A Args: flight_phase (string) : 'A', 'B' or 'C' short_period_cap (float): Control Anticipation Parameter [1/(g*s^2)] short_period_damping (float ): short period damping [-] Returns: cap_rate (int) : 1, 2, 3 or 4 if the dmaping is out Level3 limit : corresponding to the flight quality rating """ if flight_phase == 'A' : if 0.35 <= short_period_damping <= 1.3 : if 0.28 <=short_period_cap <= 3.6: cap_rate = 1 elif 0.16 <=short_period_cap <= 310: cap_rate = 2 else: cap_rate = 3 elif 0.25 <= short_period_damping <= 2 : if 0.16 <=short_period_cap <= 10: cap_rate = 2 else: cap_rate = 3 elif 0.15 <= short_period_damping : cap_rate = 3 else: cap_rate = None elif flight_phase == 'B' : if 0.3 <= short_period_damping <= 2 : if 0.085 <= short_period_cap <= 3.6 : cap_rate = 1 elif 0.038 <= short_period_cap <= 10 : cap_rate = 2 else: cap_rate = 3 elif 0.2 <= short_period_damping <= 0.3 : if 0.038 <=short_period_cap <= 10: cap_rate = 2 else: cap_rate = 3 else : cap_rate = 3 else: #  flight_phase == 'C' if 0.35 <= short_period_damping <= 1.3 : if 0.16 <=short_period_cap <= 3.6: cap_rate = 1 elif 0.05 <=short_period_cap <= 10: cap_rate = 2 else: cap_rate = 3 elif 0.25 <= short_period_damping <= 2 : if 0.05 <=short_period_cap <= 10: cap_rate = 2 else: cap_rate = 3 elif 0.15 <= short_period_damping : cap_rate = 3 else: cap_rate = None return cap_rate
2ecebb417f32320213a916ca1366f36dd473fc62
7,087
from datetime import datetime def create_directory_content_output(share_name: str, raw_response: dict, directory_path: str = "") -> dict: """ Create XSOAR context output for list directory command. Args: share_name (str): Share name. raw_response (dict): Request raw response. directory_path (str): Source directory path. Returns: dict: XSOAR command context output. """ xml_path = ['Directory', 'File'] outputs = {"Name": share_name, "Content": {"Path": directory_path, "DirectoryId": raw_response['DirectoryId']}} time_headers = ['CreationTime', 'LastAccessTime', 'LastWriteTime', 'ChangeTime'] for path in xml_path: for element in raw_response.get(path): # type: ignore for header in time_headers: str_time = element['Properties'].get(header) # type: ignore str_time = str_time[:-2] + 'Z' element['Properties'][header] = FormatIso8601( # type: ignore datetime.strptime(str_time, GENERAL_DATE_FORMAT)) # type: ignore element['Properties']['Last-Modified'] = FormatIso8601( # type: ignore datetime.strptime(element['Properties']['Last-Modified'], DATE_FORMAT)) # type: ignore element['Property'] = element.pop('Properties') # type: ignore outputs["Content"].update(raw_response) # type: ignore return outputs
a23b3ce74f4274c2ee59823a3f9a976943b94a5a
7,089
import json def loads_json(fid, **kwargs): """ Loads a JSON file and returns it as a dict. :param path: String or another object that is accepted by json.loads :param kwargs: See ``json.dump()``. :return: Content of the JSON file. """ assert isinstance(fid, str), fid return json.loads(fid, **kwargs)
ccdb62568982f6a0862dd5c8167b2e1d177137ea
7,090
import subprocess def shutdown_pi(): """ Shuts down the Pi system """ subprocess.Popen(["sudo", "shutdown", "-h", "now"]) return True
b0f80afecd4682529daed2a14497c07c36598c64
7,091
def weighting(distance): """Weighting function for pyresample.""" weight = 1 / distance**2 return weight
2af699d6daef7a3d375fe80ee79f7414047921c8
7,093
def average_precision_at_k(targets, ranked_predictions, k=None): """Computes AP@k given targets and ranked predictions.""" if k: ranked_predictions = ranked_predictions[:k] score = 0.0 hits = 0.0 for i, pred in enumerate(ranked_predictions): if pred in targets and pred not in ranked_predictions[:i]: hits += 1.0 score += hits / (i + 1.0) divisor = min(len(targets), k) if k else len(targets) return score / divisor
57e21a1ca8b8f7fccc0b5c59dbfc799d8618fc93
7,095
def _PureShape(shape): """Make sure shape does not contain int tensors by calling int().""" return [int(x) for x in shape]
1cebacd516cbf223833342ecffb6f8d9fe22ff2d
7,096
def char_to_number(value: str, required_type: str) -> str: """Converts the string representation of a number (int or float) to a number Args: value (str): String representation of a number required_type (str): Output type desired (bigint or double) Raises: Exception: The conversion to a python integer (using int built in method) failed Exception: The conversion to a python float (using float built in method) failed NotImplementedError: required_type was neither bigint nor double Returns: str: Numerical representation of the input string """ value = value.strip("'").strip('"') # Remove potential character markers if required_type == "bigint": try: assert f"{int(value)}" == value # Make sure the str representation does not change return value except (TypeError, AssertionError, ValueError): msg = ( f"A 'bigint' type is expected by Presto for this function, but {value} " "was provided which either cannot be casted or does not seem to represent an integer." ) raise Exception(msg) elif required_type == "double": try: assert f"{float(value)}" == value return value except (TypeError, AssertionError, ValueError): msg = ( f"A 'double' type is expected by Presto for this function, but {value} " "was provided which either cannot be casted or does not seem to represent a float." ) raise Exception(msg) else: raise NotImplementedError
cb58f24100f961d4f260473cee4fa82621eb5887
7,097
def _validate_index_string(index_string: str) -> bool: """ Handle validation of index string from the argument. a string should be in format 't{%d}' or 'r{%d}' or 'd{%d}' Parameters: index_string: the string we want to validate Returns: a boolean indicating whether the index string is valid """ if len(index_string) < 2: print("Invalid index string length!") return False elif index_string[0] != 't' and index_string[0] != 'r' and index_string[0] != 'd': print("Invalid index string prefix!") return False elif not index_string[1:].isnumeric(): print("Index need to have a number suffix!") return False else: return True
aa6d7d0aba2a0378d2e1c836b78b0ec943cc1bdb
7,098
def get_r_GU(r_HPU): """1時間平均のガスユニットの暖房出力分担率 (15) Args: r_HPU(ndarray): 1時間平均のヒートポンプユニット暖房出力分担率 (-) Returns: ndarray: 1時間平均のガスユニットの暖房出力分担率 """ return 1 - r_HPU
dc4c7a913ff7402a10d9aad08bab4f3646cff904
7,099
def onset_by_rain(date, df, window=5, rain_threshold=5): """ Finds true storm onset by finding the first date around the landfall that rain exceeds a threshold Args: date: the date to look around df: df with a date and rain column window: number of days around date to find max (total window size is window*2) rain_threshold: mm of rain to consider a storm to have started Returns: a datetime object and the corresponding index """ mask = df['Date'] == date storm_row = df[mask] storm_ind = int(storm_row.index[0]) sub_df = df.iloc[(storm_ind - window):(storm_ind + window)] if sub_df.Rain.dropna().empty: # if there's no rain data return date, storm_ind ind = sub_df.Rain.idxmax() val = df.Rain.iloc[ind] while val > rain_threshold: ind -= 1 val = df.Rain.iloc[ind] # ind += 1 return df['Date'].iloc[ind], ind
87e4d1f35114974a004c5b923aea05ed835cf9a7
7,100
def get_company(): """ Gets company for contact entered """ company = input("Please input a company\n") while True: if company == "": print("Please input a company please") else: break return company
324d49384ab80aad3c8c09a3707ad9fd01929166
7,101
def get_odds_labels(nfunc, adfam=False): """Labels used for odds in results_df.""" if adfam: col_names = [r'$P(T={},N={})$'.format(1, i + 1) for i in range(nfunc)] col_names += [r'$P(T={},N={})$'.format(2, i + 1) for i in range(nfunc)] col_names += [r'$P(T=1)$'] else: col_names = [r'$P(N={})$'.format(i + 1) for i in range(nfunc)] return col_names
ca874d9da52dfe49625305cde62ef7489439eb00
7,104
def get_validation_context(self): """ Retrieves the validation context. :rtype: String :return: The validation context. """ return self.validation_context
b025b742a6fd5a537752f897eb8ed88ed56e5a21
7,105
def _cache_deserialize(func): """Simple caching decorator""" def cache_decorator(self, *args, **kwargs): if self._deserialization_done: return self._deserialization_result self._deserialization_result = func(self, *args, **kwargs) self._deserialization_done = True return self._deserialization_result return cache_decorator
787e4cc382f08bffc00c6811cae8272d8d759b0f
7,107
def correct_directory_path(directory_path): """ Attempts to convert the directory path to a proper one by removing any double slashes next to one another. Args: directory_path: String of a potential directory path. Returns: Returns the fixed path. """ last_char = None new_string = "" for char in directory_path: if last_char and (last_char == "/" and char == "/"): pass else: new_string += char last_char = char if new_string[-1] != "/": new_string += "/" return new_string
b403ebaa93765a7df4e6033f5bc5f924f2fe312a
7,108
def fused_laplacian_pyramid(gauss_pyramid_mod1, gauss_pyramid_mod2, lap_pyramid_mod1, lap_pyramid_mod2): """ A funtion that builds a fused Laplacian pyramid of two modalities of the same image :param gauss_pyramid_mod1: The Gaussian pyramid of modality 1, a list of grayscale images, the first one in highest resolution :param gauss_pyramid_mod2: The Gaussian pyramid of modality 2, a list of grayscale images, the first one in highest resolution :param lap_pyramid_mod1: The Laplacian pyramid of modality 1, a list of grayscale images, the last one in highest resolution :param lap_pyramid_mod2: The Laplacian pyramid of modality 2, a list of grayscale images, the last one in highest resolution :return: The fused Laplacian pyramid of two modalities, a list of grayscale images, the last one in highest resolution, """ fused_laplacian = [] len_lap = len(lap_pyramid_mod1) for l in range(len_lap): fused_laplacian_temp = gauss_pyramid_mod1[len_lap-l-1]*lap_pyramid_mod1[l] + gauss_pyramid_mod2[len_lap-l-1]*lap_pyramid_mod2[l] fused_laplacian.append(fused_laplacian_temp) return fused_laplacian
59f600de9a56587146efe46692754d4f757e3645
7,109
def increase(value): """Test template tag that returns an increased value.""" return value + 1
41e4a7efd8541bcadf1f2e4224cb3dc170dfb50b
7,110
def subfolders_in(whole_path): """ Returns all subfolders in a path, in order >>> subfolders_in('/') ['/'] >>> subfolders_in('/this/is/a/path') ['/this', '/this/is', '/this/is/a', '/this/is/a/path'] >>> subfolders_in('this/is/a/path') ['this', 'this/is', 'this/is/a', 'this/is/a/path'] """ path_fragments = whole_path.lstrip('/').split('/') if whole_path.startswith('/'): path_fragments[0] = '/' + path_fragments[0] path = path_fragments[0] subfolders = [path] for fragment in path_fragments[1:]: path += '/' + fragment subfolders.append(path) return subfolders
a7389811a8acacea87abd55ba47892203e0b95e5
7,112
def get_filter_set_field_tuples(filter_set): """Return a list of tuples of filter set filter names and values. This is used together with above `join_filter_set_field_values` to create a table of filters used in an analysis view. NOTE: - This function and `join_filter_set_field_values` are kept separate so that we don't have to write HTML markup here. See `templates/website/view-editor-filterset.html` - It might be less of a hassle to prepare the data in the corresponding view, i.e. `website.views.deep_analysis`. """ return [ ("Report Sender Email", filter_set.reportsender_set.all, False), ("Report Receiver Domain", filter_set.reportreceiverdomain_set.all, False), ("Mail Sender Source IP", filter_set.sourceip_set.all, False), ("Aligned DKIM Result", filter_set.aligneddkimresult_set.all, True), ("Aligend SPF Result", filter_set.alignedspfresult_set.all, True), ("Disposition", filter_set.disposition_set.all, True), ("Raw SPF Domain", filter_set.rawspfdomain_set.all, False), ("Raw SPF Result", filter_set.rawspfresult_set.all, True), ("Raw DKIM Domain", filter_set.rawdkimdomain_set.all, False), ("Raw DKIM Result", filter_set.rawdkimresult_set.all, True), ("Multiple DKIM Only", filter_set.multipledkim_set.all, False), ]
928b313227972ed855aa6944f72f333fde10862a
7,113
import string import random def string_generator(size=6, chars=string.ascii_letters): """Generator string based on the parameters passed. @param size: Size limit of string. Default: 6 @param chars: string.ascii_letters Default: 200 - Use the constant string class """ return ''.join(random.choice(chars) for x in range(size))
696d6b9219f41dbdda7226d6571f2b89b344774b
7,115
def pytest_funcarg__tracker_config(request): """ The tracker configuration as ``TrackerConfig`` object, or ``None``, if there is no tracker configuration. Tracker configuration is taken from the class this test is defined in. If there is a ``testname`` for this test, the tracker config is taken from the ``tracker_config`` map defined in the class, falling back to the ``default_tracker_config`` defined in the class. If there is no ``testname``, the ``default_tracker_config`` is used right away. If the test isn't defined in a class, ``None`` is returned. """ cls = request.cls if cls is None: return None testname = getattr(request, 'param', None) if testname is None: return cls.default_tracker_config else: return cls.tracker_config.get(testname, cls.default_tracker_config)
8591a59b4f825ba7f7a034959a8c4ffe7a241013
7,118
def merge(nums1, m: int, nums2, n) -> None: """ Do not return anything, modify nums1 in-place instead. """ if len(nums1) >= m + n: for num in nums2: nums1[m] = num m += 1 nums1.sort() return nums1
2c2e372e05b71981cab4796f62116e72e278c352
7,119
import re def isChr(s, c): """ :param s: String :param c: Chromosome, number or X or Y :return: Whether s matches c """ if 'X' == c: return 'X' in c if 'Y' == c: return 'Y' in c return str(c) in re.findall("[1-9][0-9]*", s)
b54666e3cf2c376bfbd687734eb40fc65f233c20
7,121
def patch_configuration(client, configuration_id, patches): """ :param patches: List of patches in format [ {"op": op, "path": path, "value": value}, {"op": op, "path": path, "value": value} ] :return: requests Response """ data = [] data = patches return client._patch("/company/configurations/{}/".format(configuration_id), json=data)
a229390fd8845238e1b5eeddb9922f79772b9840
7,122
from functools import reduce from operator import add from operator import sub from operator import mul from operator import truediv def calc_apply(operator, args): """Apply the named operator to a list of args. >>> calc_apply('+', as_scheme_list(1, 2, 3)) 6 >>> calc_apply('-', as_scheme_list(10, 1, 2, 3)) 4 >>> calc_apply('-', as_scheme_list(10)) -10 >>> calc_apply('*', nil) 1 >>> calc_apply('*', as_scheme_list(1, 2, 3, 4, 5)) 120 >>> calc_apply('/', as_scheme_list(40, 5)) 8.0 >>> calc_apply('/', as_scheme_list(10)) 0.1 """ if not isinstance(operator, str): raise TypeError(str(operator) + ' is not a symbol') if operator == '+': return reduce(add, args, 0) elif operator == '-': if len(args) == 0: raise TypeError(operator + ' requires at least 1 argument') elif len(args) == 1: return -args.first else: return reduce(sub, args.second, args.first) elif operator == '*': return reduce(mul, args, 1) elif operator == '/': if len(args) == 0: raise TypeError(operator + ' requires at least 1 argument') elif len(args) == 1: return 1/args.first else: return reduce(truediv, args.second, args.first) else: raise TypeError(operator + ' is an unknown operator')
09fdb5ec5606bcfcf84606b925666de836a2afb2
7,124
def to_str(bit_str): """Transform bit string in a string of characters""" chars = [] for i in range(int(len(bit_str) / 8)): byte = bit_str[i * 8:(i + 1) * 8] if byte == "11111111": break chars.append(chr(int(''.join([str(bit) for bit in byte]), 2))) return ''.join(chars)
db75acd548805eb06a383c8443a0a85631d8e65d
7,125
import os def get_scale_factor(value_dict, max_length=os.get_terminal_size().columns): """ Gets the scale factor from a dict of keys with numerical values """ max_value = max(value_dict.values(), key=abs) try: scale = max_length / abs(max_value) except ZeroDivisionError: scale = 1 return scale
91aee90577a9ad9615128345276a98f83bc459d6
7,126
import platform def identify_operating_system(): """identify current operating system Returns: (str): 'Windows', 'Linux', or 'Darwin' for mac """ return platform.system()
05f4ec68aa535cdbef61c4cbf36d9cfc5a75fbcc
7,128
def _get_object(lst, _id): """ Internal function to grab data referenced inside response['included'] """ for item in lst: if item['id'] == _id: return item
f18c6f330750f72d9495d1283ef28ef41cf58a97
7,130