content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def has_form_encoded_header(header_lines):
"""Return if list includes form encoded header"""
for line in header_lines:
if ":" in line:
(header, value) = line.split(":", 1)
if header.lower() == "content-type" \
and "x-www-form-urlencoded" in value:
return True
return False | e4fe797e4884161d0d935853444634443e6e25bb | 6,014 |
def attr(*args, **kwargs):
"""Decorator that adds attributes to classes or functions
for use with unit tests runner.
"""
def wrapped(element):
for name in args:
setattr(element, name, True)
for name, value in kwargs.items():
setattr(element, name, value)
return element
return wrapped | 77d20af87cef526441aded99bd6e24e21e5f81f9 | 6,016 |
def nn(value: int) -> int:
"""Casts value to closest non negative value"""
return 0 if value < 0 else value | 08672feaefa99881a110e3fc629d4a9256f630af | 6,017 |
def app_base(request):
"""
This should render the required HTML to start the Angular application. It is the only entry point for
the pyramid UI via Angular
:param request: A pyramid request object, default for a view
:return: A dictionary of variables to be rendered into the template
"""
dev_endpoints = ['localhost', '0.0.0.0', '127.0.', '192.168.', '10.19.', 'dev.squizzlezig.com']
is_dev = False
for point in dev_endpoints:
if request.host.split(':', 1)[0].startswith(point) or request.remote_addr.startswith(point):
is_dev = True
return { 'is_dev': is_dev, 'some_key': request.registry.settings['some_key']} | 3a097e920b33248b436e2eea00e05b5708b35779 | 6,018 |
import json
def get_droplet_ip():
"""get droplet ip from cache."""
cached_droplet_info_file = 'droplet_info.json'
with open(cached_droplet_info_file, 'r') as info_f:
droplet_info = json.load(info_f)
return droplet_info['networks']['v4'][0]['ip_address'] | 21d0bfbbe6aebd7e88cc6465d49b221da271753a | 6,019 |
def lat_long_to_idx(gt, lon, lat):
"""
Take a geotransform and calculate the array indexes for the given lat,long.
:param gt: GDAL geotransform (e.g. gdal.Open(x).GetGeoTransform()).
:type gt: GDAL Geotransform tuple.
:param lon: Longitude.
:type lon: float
:param lat: Latitude.
:type lat: float
"""
return (int((lat - gt[3]) / gt[5]),
int((lon - gt[0]) / gt[1])) | 3fafcc4750daa02beaedb330ab6273eab6abcd56 | 6,020 |
def BSMlambda(delta: float, S: float, V: float) -> float:
"""Not really a greek, but rather an expression of leverage.
Arguments
---------
delta : float
BSM delta of the option
V : float
Spot price of the option
S : float
Spot price of the underlying
Returns
-------
float
lambda
Note
----
Percentage change in the option price per percentage change in the underlying asset's price.
"""
return delta*(S / V) | ea9bf546a7cf46b3c2be01e722409663b05248e1 | 6,021 |
import pwd
def uid_to_name(uid):
"""
Find the username associated with a user ID.
:param uid: The user ID (an integer).
:returns: The username (a string) or :data:`None` if :func:`pwd.getpwuid()`
fails to locate a user for the given ID.
"""
try:
return pwd.getpwuid(uid).pw_name
except Exception:
return None | f9054e4959a385d34c18d88704d376fb4b718e47 | 6,022 |
def table_parse(table):
"""
"""
data = []
rows = table.find_all('tr')
for row in rows:
cols = row.find_all('td')
cols = [ele.text.strip() for ele in cols]
data.append([ele for ele in cols if ele])
return data | 528008ada0ad7d594554ed5d577472a126df0cd1 | 6,023 |
import pandas
import numpy
def scan_mv_preprocessing_fill_pivot_nan(df):
"""
Value imputation.
Impute missing data in pivot table.
Parameters
----------
df : dataframe
Pivot table data with potentially missing values.
Returns
-------
df : dataframe
Pivot table data with no missing values.
"""
df_new = pandas.DataFrame()
for group in set(df.index.get_level_values('Group')):
df_group = df.loc[df.index.get_level_values('Group') == group]
for analyte in df_group.columns[~df_group.columns.isin(['Component Name'])]:
series_fill = df_group[analyte].copy()
# Missing at random
series_fill[pandas.isna(series_fill)] = round(numpy.nanmean(series_fill))
# Missing not at random
if True in set(pandas.isna(series_fill)):
series_fill = numpy.nanmin(df_new[analyte])/2
df_group[analyte] = series_fill
df_new = df_new.append(df_group)
# Get group and analytes with all nan
df_filled = df_new.copy()
return df_filled | e88d1b2b0a3d4fc27afe29a10512116323046cef | 6,024 |
def expandingPrediction(input_list, multiple=5):
"""
:param input_list:
:param multiple:
:return:
"""
expanded_list = []
for prediction in input_list:
for i in range(multiple):
expanded_list.append(prediction)
return expanded_list | 9a502adb15160e656bd727748eb5dae73858d7f8 | 6,025 |
import numpy
def grab(sequence, random = numpy.random):
"""
Return a randomly-selected element from the sequence.
"""
return sequence[random.randint(len(sequence))] | 1760dc08b5971647f55248bd1b1f04d700dac38e | 6,026 |
def _bytes_chr_py2(i):
"""
Returns a byte string of length 1 whose ordinal value is i in Python 2.
Do not call directly, use bytes_chr instead.
"""
return chr(i) | de524d1ec303cc297d7981570ef30aa9ae6840ed | 6,027 |
def quadratic_formula(polynomial):
"""
input is single-variable polynomial of degree 2
returns zeros
"""
if len(polynomial.term_matrix) == 3:
if polynomial.term_matrix[2][1] == 1:
a, b = polynomial.term_matrix[1][0], polynomial.term_matrix[2][0]
return 0, -b/a
a, c = polynomial.term_matrix[1][0], polynomial.term_matrix[2][0]
return (-c/a)**.5, -(-c/a)**.5
if len(polynomial.term_matrix) == 2:
a, b, c, = polynomial.term_matrix[1][0], 0, 0
elif len(polynomial.term_matrix) == 3:
a, b, c = polynomial.term_matrix[1][0], polynomial.term_matrix[2][0], 0
else:
a, b, c = polynomial.term_matrix[1][0], polynomial.term_matrix[2][0], polynomial.term_matrix[3][0]
ans1 = (-b + (b**2 - 4*a*c)**.5)/2*a
ans2 = (-b - (b**2 - 4*a*c)**.5)/2*a
if ans1 == ans2:
return ans1
return ans1, ans2 | 5501abff2fadcd237e3cb0efc4bca615eef455da | 6,028 |
import argparse
def main_args_parser() -> argparse.Namespace:
""" Implements an easy user-friendly command-line interface.
It creates three main subparser (classify, train, eval) and add the appropriated arguments for each subparser.
Returns
-------
args: argparse.Namespace
input arguments provided by the user
"""
#region - MAIN parser
parser = argparse.ArgumentParser(description='Handwritten long number recognition')
subparsers = parser.add_subparsers(dest='mode',
help='<required> program execution mode: classify with a pre-trained model or re-train the model',
required=True)
#endregion
#region - CLASSIFY subparser
parser_classify = subparsers.add_parser('classify',
help='classify an input image using the pre-trained model',
description='CLASSIFY mode: classify an input image using a pre-trained model')
image_from = parser_classify.add_mutually_exclusive_group()
image_from.add_argument('-f', '--folder',
type=str,
help='input image from folder, if not specified from webcam',
metavar='PATH_TO_IMAGE',
default=None)
which_model = parser_classify.add_mutually_exclusive_group()
which_model.add_argument('-a', '--augmentation',
action='store_true',
help='use model trained WITH data augmentation')
which_model.add_argument('-m', '--model',
type=str,
help='user custom model from path',
metavar='PATH_TO_MODEL')
parser_classify.add_argument('-d', '--device',
type=str,
help='(default=cpu) device to be used for computations {cpu, cuda:0, cuda:1, ...}',
default='cpu')
#endregion
#region - TRAIN subparser
parser_train = subparsers.add_parser('train',
help='re-train the model in your machine and save it to reuse in classify phase',
description='TRAIN mode: re-train the model in your machine and save it to reuse in classify phase')
parser_train.add_argument('-a', '--augmentation',
action='store_true',
help='set data-augmentation procedure ON (RandomRotation and RandomResizedCrop)')
parser_train.add_argument('-s', '--splits',
nargs=2,
type=float,
help='(default=[0.7,0.3]) proportions for the dataset split into training and validation set',
default=[0.7,0.3],
metavar=('TRAIN', 'VAL'))
parser_train.add_argument('-b', '--batch_size',
type=int,
help='(default=64) mini-batch size',
default=64)
parser_train.add_argument('-e', '--epochs',
type=int,
help='(default=10) number of training epochs',
default=10)
parser_train.add_argument('-l', '--learning_rate',
type=float,
help='(default=10) learning rate',
default=0.001)
parser_train.add_argument('-w', '--num_workers',
type=int,
help='(default=3) number of workers',
default=3)
parser_train.add_argument('-d', '--device',
type=str,
help='(default=cpu) device to be used for computations {cpu, cuda:0, cuda:1, ...}',
default='cpu')
#endregion
#region - EVAL subparser
parser_eval = subparsers.add_parser('eval',
help='evaluate the model accuracy on the test set of MNIST',
description='EVAL mode: evaluate the model accuracy on the test set of MNIST')
parser_eval.add_argument('model',
type=str,
help='<required> path to the model to be evaluated',
metavar='PATH_TO_MODEL')
parser_eval.add_argument('-d', '--device',
type=str,
help='(default=cpu) device to be used for computations {cpu, cuda:0, cuda:1, ...}',
default='cpu')
#endregion
args = parser.parse_args()
return args | ef2e079e6fd95576d34d3dc6ab3f07aac6dc6aad | 6,029 |
import sys
def is_windows() -> bool:
"""
Returns True if the current system is Windows. Returns False otherwise.
"""
return sys.platform == "win32" | bb4f78364bd2182b79b7517466eb251731826464 | 6,031 |
def adjacent_powerset(iterable):
"""
Returns every combination of elements in an iterable where elements remain ordered and adjacent.
For example, adjacent_powerset('ABCD') returns ['A', 'AB', 'ABC', 'ABCD', 'B', 'BC', 'BCD', 'C', 'CD', 'D']
Args:
iterable: an iterable
Returns:
a list of element groupings
"""
return [iterable[a:b] for a in range(len(iterable)) for b in range(a + 1, len(iterable) + 1)] | 951418b30d541e1dcdd635937ae609d429e3cd70 | 6,032 |
import argparse
def getArguments():
"""
Gets the name of the gameFile.
:return: The arguments provided by the user
"""
parser = argparse.ArgumentParser()
parser.add_argument('gameFile', help='The ini formatted file with the game configuration')
return parser.parse_args() | b8f3d440e3cd2976e946e7745fb06ff86f179f8a | 6,033 |
def end_position(variant_obj):
"""Calculate end position for a variant."""
alt_bases = len(variant_obj['alternative'])
num_bases = max(len(variant_obj['reference']), alt_bases)
return variant_obj['position'] + (num_bases - 1) | e49110a1102ea2ca53053858597247799065f8e1 | 6,034 |
def validate_tag_update(update):
"""
Property: ResourceUpdateConstraint.TagUpdateOnProvisionedProduct
"""
valid_tag_update_values = [
"ALLOWED",
"NOT_ALLOWED",
]
if update not in valid_tag_update_values:
raise ValueError("{} is not a valid tag update value".format(update))
return update | c2abd7af00be52cf8cfecb5790d88a04d3207253 | 6,036 |
import os
import argparse
def writable_prefix(prefix):
"""
Checks if this prefix is writable and exists.
:param prefix: str - prefix to check
:return: str - prefix
"""
directory = os.path.dirname(prefix)
if not os.path.exists(directory):
error = "Output directory %s does not exist (%s)" % (directory, prefix)
# report.log_str(error, priority=logging.ERROR)
raise argparse.ArgumentTypeError(error)
if not os.access(directory, os.W_OK):
error = "Output directory %s is not writable (%s)" % (directory, prefix)
# report.log_str(error, priority=logging.ERROR)
raise argparse.ArgumentTypeError(error)
return prefix | 89fd163c7d3bd9aaca3e26e4b72aef0c98236d8b | 6,037 |
from datetime import datetime
def tick_format(ticktime):
"""
Format the tick date/time
"""
datetime_object = datetime.strptime(ticktime, '%Y-%m-%dT%H:%M:%S.%fZ')
return datetime_object.strftime("%H:%M:%S UTC %A %d %B") | 6fa02f7627bc947646046a47ab7298aad68399d8 | 6,038 |
import functools
def decorator_with_keywords(func=None, **dkws):
# NOTE: ONLY ACCEPTS KW ARGS
"""
A decorator that can handle optional keyword arguments.
When the decorator is called with no optional arguments like this:
@decorator
def function ...
The function is passed as the first argument and decorate returns the decorated function, as expected.
If the decorator is called with one or more optional arguments like this:
@decorator(optional_argument1='some value')
def function ....
Then decorator is called with the function argument with value None, so a function that decorates
is returned, as expected.
"""
# print('WHOOP', func, dkws)
def _decorate(func):
@functools.wraps(func)
def wrapped_function(*args, **kws):
# print('!!')
return func(*args, **kws)
return wrapped_function
if func:
return _decorate(func)
return _decorate | 64c4ddd26cc04a43cbf559600652113db81b79ae | 6,041 |
from datetime import datetime
def parse_line(line):
"""
Extract all the data we want from each line.
:param line: A line from our log files.
:return: The data we have extracted.
"""
time = line.split()[0].strip()
response = line.split(' :')
message = response[len(response) - 1].strip('\n')
channel = response[1].split('#')
username = channel[0].split('!')
username = username[0]
channel = channel[1]
time = datetime.strptime(time, '%Y-%m-%d_%H:%M:%S')
return time, channel, username, message | 72b4362b7628d31996075941be00e4ddcbd5edbc | 6,042 |
from typing import Counter
def reindex(labels):
"""
Given a list of labels, reindex them as integers from 1 to n_labels
Also orders them in nonincreasing order of prevalence
"""
old2new = {}
j = 1
for i, _ in Counter(labels).most_common():
old2new[i] = j
j += 1
old2newf = lambda x: old2new[x]
return [old2newf(a) for a in labels] | c12afd3b6431f10ccc43cce858e71bc504088a6e | 6,044 |
def _enable_disable_pim_config(tgen, topo, input_dict, router, build=False):
"""
Helper API to enable or disable pim on interfaces
Parameters
----------
* `tgen` : Topogen object
* `topo` : json file data
* `input_dict` : Input dict data, required when configuring from testcase
* `router` : router id to be configured.
* `build` : Only for initial setup phase this is set as True.
Returns
-------
list of config
"""
config_data = []
# Enable pim on interfaces
for destRouterLink, data in sorted(topo[router]["links"].items()):
if "pim" in data and data["pim"] == "enable":
# Loopback interfaces
if "type" in data and data["type"] == "loopback":
interface_name = destRouterLink
else:
interface_name = data["interface"]
cmd = "interface {}".format(interface_name)
config_data.append(cmd)
config_data.append("ip pim")
# pim global config
if "pim" in input_dict[router]:
pim_data = input_dict[router]["pim"]
del_action = pim_data.setdefault("delete", False)
for t in [
"join-prune-interval",
"keep-alive-timer",
"register-suppress-time",
]:
if t in pim_data:
cmd = "ip pim {} {}".format(t, pim_data[t])
if del_action:
cmd = "no {}".format(cmd)
config_data.append(cmd)
return config_data | 4408ac212126895ba161f834e7e076a0c14d864f | 6,047 |
import os
def find_testclass(package, program, testclass, file_required=False):
"""Find the relative path of the test-class file"""
name = f'{program.lower()}.clas.testclasses.abap'
for root, _, files in os.walk('.'):
if name in files:
return os.path.join(root, name)[2:]
if file_required:
return None
return package + '/' + program + '=>' + testclass | 9da2aeaadb042da868b0c6d6e5069842cc014654 | 6,048 |
import csv
def get_score_sent_pairs_from_tsv(tsv_filepath, encoding="ISO-8859-1"):
"""expects tokenized sentences in tsv file!"""
with open(tsv_filepath, encoding=encoding) as tsvfile:
reader = csv.reader(tsvfile, delimiter='\t')
score_sent_pairs = [[float(row[0]), row[1]] for row in reader]
return score_sent_pairs | 44f5c150d40b407b50a93cd0ad968658fd5ef431 | 6,050 |
def service_class(cls):
"""
A class decorator enabling the instances of the class to be used
as a ``services``-provider in `JSONRpc Objects`_
and `BSONRpc Objects`_.
Use decorators ``request``, ``notification``, ``rpc_request`` and
``rpc_notification`` to expose methods for the RPC peer node.
"""
cls._request_handlers = {}
cls._notification_handlers = {}
for name, method in cls.__dict__.items():
if hasattr(method, '_request_handler'):
cls._request_handlers[name] = method
if hasattr(method, '_notification_handler'):
cls._notification_handlers[name] = method
return cls | 7c146b1d04415cd494e62fb9ee310364c345c217 | 6,051 |
def locate_address(ip_list, ip_attack):
"""
for each line in the file pointer
define the ip ranges and country codes
if the attacking ip is in between the range then return country code
:param ip_list - list of ip address ranges and country code:
:param ip_attack - attacking ip as an integer:
:return country_code - country code as a string:
"""
for line in ip_list:
start_ip = line[0]
end_ip = line[1]
country_code = line[2]
if ip_attack >= start_ip and ip_attack <= end_ip:
return country_code
else:
pass | 82a8f9ed0cf79a2ba39d21348779687c1f8c19a8 | 6,052 |
import string
def is_valid_matlab_field_label(label):
""" Check that passed string is a valid MATLAB field label """
if not label.startswith(tuple(string.ascii_letters)):
return False
VALID_CHARS = set(string.ascii_letters + string.digits + "_")
return set(label).issubset(VALID_CHARS) | ea1358e94f4fc936cb12b9cad5d7285ee39dba55 | 6,053 |
def reading2celsius(self, reading):
""" Converts sensor reading to celsius """
celsius = reading / 50 - 273.15
return celsius | 72e6933002c9725165145451e10bbf98c162b625 | 6,054 |
def variantCombinations(items):
""" Calculates variant combinations for given list of options. Each item in the items list represents
unique value with it's variants.
:param list items: list of values to be combined
>>> c = variantCombinations([["1.1", "1.2"], ["2.1", "2.2"], ["3.1", "3.2"]])
>>> len(c)
8
>>> for combination in c:print combination
['1.1', '2.1', '3.1']
['1.1', '2.1', '3.2']
['1.1', '2.2', '3.1']
['1.1', '2.2', '3.2']
['1.2', '2.1', '3.1']
['1.2', '2.1', '3.2']
['1.2', '2.2', '3.1']
['1.2', '2.2', '3.2']
"""
assert isinstance(items, list) and list
if len(items) == 1:
result = items[0]
else:
result = []
subItems = variantCombinations(items[1:])
for masterItem in items[0]:
for subItem in subItems:
if isinstance(subItem, list):
item = [masterItem]
item.extend(subItem)
result.append(item)
else:
result.append([masterItem, subItem])
return result | 72bfdb19db3cf692e4260a5f75d10324e562f20e | 6,055 |
def get_lattice_points(strand):
"""
格子点の情報を取得
@param ストランドの格子点の対
@return ストランドの格子点の始点と終点
"""
strand_list = eval(strand)
strand_from = strand_list[0]
strand_to = strand_list[1]
return strand_from, strand_to | a69902c15b9d8ce9f518891f4dea55d9aca186cf | 6,056 |
def mask(bigtiff,profile,out_mask):
"""
mask a 1 or 3 band image, band by band for memory saving
"""
if profile['count']==4:
bigtiff1 = bigtiff[0,:,:]
bigtiff1[out_mask==1] = profile['nodata']
bigtiff[0,:,:] = bigtiff1
del bigtiff1
bigtiff2 = bigtiff[1,:,:]
bigtiff2[out_mask==1] = profile['nodata']
bigtiff[1,:,:] = bigtiff2
del bigtiff2
bigtiff3 = bigtiff[2,:,:]
bigtiff3[out_mask==1] = profile['nodata']
bigtiff[2,:,:] = bigtiff3
del bigtiff3
bigtiff4 = bigtiff[3,:,:]
bigtiff4[out_mask==1] = profile['nodata']
bigtiff[3,:,:] = bigtiff4
del bigtiff4
else:
bigtiff1 = bigtiff[0,:,:]
bigtiff1[out_mask==1] = profile['nodata']
bigtiff[0,:,:] = bigtiff1
del bigtiff1
return bigtiff | 0e31612da8d80f5fb4d8f35a0664708294e98312 | 6,058 |
def quadratic_sum(n: int) -> int:
"""calculate the quadratic num from 1 ~ n"""
sum = 0
for n in range(1, n + 1):
sum += n ** 2
return sum | e47a3ee49888c85cc06c72c428d983885ed7009f | 6,059 |
import numpy as np
def nan_helpfcn(myarray):
"""
Helper function to return the locations of Nan values as a boolean array, plus a function to return the index of the array.
Code inspired by: http://stackoverflow.com/questions/6518811/interpolate-nan-values-in-a-numpy-array
Input:
- myarray, 1d numpy array with possible NaNs, e.g. np.array([1,2,NaN,4,NaN,6])
Output:
- nans, logical indices of NaNs, e.g. for the example above [False,False,True,False,True,False]
- indf, this gives us the indices of the matrix - shifted. this is a lambda function. e.g. indes(nans) where nans was [False,False,True,False,True,False] gives [2,4]
This is functionally equivalent to np.array(range(len(myarray)))
Example:
>>> myarray = np.array=[1,2,np.NaN,4,np.NaN,6])
>>> nanlocs,indf=nan_helpfcn(myarray)
>>>nanlocs
[False,False,True,False,True,False]
>>> indf[nanlocs]
[2,4]
>>> indf[~nanlocs]
[0,1,3,5]
"""
return np.isnan(myarray), lambda z: z.nonzero()[0] | b5770e6bdfda85bc71fd954aacc4c31dbbd47f13 | 6,061 |
import six
def all_strs_text(obj):
"""
PyYAML refuses to load strings as 'unicode' on Python 2 - recurse all over
obj and convert every string.
"""
if isinstance(obj, six.binary_type):
return obj.decode('utf-8')
elif isinstance(obj, list):
return [all_strs_text(x) for x in obj]
elif isinstance(obj, tuple):
return tuple(all_strs_text(x) for x in obj)
elif isinstance(obj, dict):
return {six.text_type(k): all_strs_text(v) for k, v in six.iteritems(obj)}
else:
return obj | 20b27cf809ed7fbf12b30a357d6aecfeeed88461 | 6,062 |
def fahrenheit2celsius(f: float) -> float:
"""Utility function to convert from Fahrenheit to Celsius."""
return (f - 32) * 5/9 | 5161b29998553ad6ff497e698058f330433d90b3 | 6,063 |
def pollard_brent_f(c, n, x):
"""Return f(x) = (x^2 + c)%n. Assume c < n.
"""
x1 = (x * x) % n + c
if x1 >= n:
x1 -= n
assert x1 >= 0 and x1 < n
return x1 | 5037b3feac2f131645fbe6ceb00f0d18417a7c04 | 6,064 |
def tpu_ordinal_fn(shard_index_in_host, replicas_per_worker):
"""Return the TPU ordinal associated with a shard."""
return shard_index_in_host % replicas_per_worker | 773313750ce78cf5d32776752cb75201450416ba | 6,065 |
def create_category_hiearchy(cats, categoryType):
"""A function that creates a dict of the root and subroot categories"""
dict_out = {}
for key in cats.keys():
name = cats[key]['name']
parent_name = cats[key]['parent']['name']
cat_type = cats[key]['categoryType']
if cat_type == categoryType:
# Check if parent name is Root and should be the key
if parent_name == 'Root':
# Check to see if key exists
if name not in dict_out.keys():
# If not, add key to dict and empty list
dict_out[name] = []
else:
if parent_name == 'Root':
continue
# Check if parent_name already key
if parent_name not in dict_out.keys():
# If not, add the key and empty list
dict_out[parent_name] = []
# Add the subcategory
dict_out[parent_name].append(name)
return dict_out | f0b19f2a6f56e49855a019a18d9357a31cfaeb2a | 6,066 |
def ordinal(n):
"""Converts an integer into its ordinal equivalent.
Args:
n: number to convert
Returns:
nth: ordinal respresentation of passed integer
"""
nth = "%d%s" % (n, "tsnrhtdd"[(n // 10 % 10 != 1) * (n % 10 < 4) * n % 10 :: 4])
return nth | 7f438c89a6b0f7adbc42f2eb1e619ca4bf862b4a | 6,068 |
def check_date(date):
"""check if date string has correct format.
Args:
date as a string mmddyyyy
Returns:
a boolean indicating if valid (True) or not (False)
"""
if len(date) != 8:
return False
if not date.isdigit():
return False
# months are between '01' ~ '12'
if (date[0] != '1' and date[0] != '0'):
return False
if date[0] == '1':
if (date[1] != '0') and (date[1] != '1') and (date[1] != '2'):
return False
# dates are between 0 ~ 31
if (date[2] != '0') and (date[2] != '1') \
and (date[2] != '2') and (date[2] != '3'):
return False
return True | 8972498d94d459ba48851049780e46b057855d9f | 6,069 |
def return_lines_as_list(file):
"""
:rtype: list of str
"""
# read lines
lines = file.readlines()
def strip(string):
"""
Removes whitespace from beginning and end of string
:type string: str
"""
return string.strip()
# Coverts our lines to list
return list(map(strip, lines)) | 69e3d45fa3df107a8852d10e104a543c014a6c79 | 6,070 |
def getObjectInfo(fluiddb, objectId):
"""
Get information about an object.
"""
return fluiddb.objects[objectId].get(showAbout=True) | baad59e6585e04a8c2a8cca1df305327b80f3768 | 6,071 |
import csv
def snp2dict(snpfile):
"""Get settings of dict from .snp file exported from save&restore app.
Parameters
----------
snpfile : str
Filename of snp file exported from save&restore app.
Returns
-------
r : dict
Dict of pairs of PV name and setpoint value.
"""
with open(snpfile, 'r') as fp:
csv_data = csv.reader(fp, delimiter=',', skipinitialspace=True)
next(csv_data)
header = next(csv_data)
ipv, ival = header.index('PV'), header.index('VALUE')
settings = {line[ipv]: line[ival] for line in csv_data if line}
return settings | cb902b3f8796685ed065bfeb8ed2d6d83c0fe80b | 6,073 |
import numpy
def _handle_zeros_in_scale(scale, copy=True, constant_mask=None):
"""
Set scales of near constant features to 1.
The goal is to avoid division by very small or zero values.
Near constant features are detected automatically by identifying
scales close to machine precision unless they are precomputed by
the caller and passed with the `constant_mask` kwarg.
Typically for standard scaling, the scales are the standard
deviation while near constant features are better detected on the
computed variances which are closer to machine precision by
construction.
Parameters
----------
scale : array
Scale to be corrected.
copy : bool
Create copy.
constant_mask : array
Masking array.
Returns
-------
scale : array
Corrected scale.
"""
# if we are fitting on 1D arrays, scale might be a scalar
if numpy.isscalar(scale):
if scale == .0:
scale = 1.
return scale
elif isinstance(scale, numpy.ndarray):
if constant_mask is None:
# Detect near constant values to avoid dividing by a very small
# value that could lead to suprising results and numerical
# stability issues.
constant_mask = scale < 10 * numpy.finfo(scale.dtype).eps
if copy:
# New array to avoid side-effects
scale = scale.copy()
scale[constant_mask] = 1.0
return scale | 129f28dbf74f04929fcbccf8de42ee1c8dd5a09e | 6,074 |
def xy_potential(_):
"""
Potential for square XY model with periodic boundary conditions
"""
def potential(_, passive_rates):
pot = -passive_rates.sum(dim=(-1, -2, -3)) # sum over all sites and directions
return pot
return potential | b809e57464a9cd893cd33cd7ad38dffdb0d12a40 | 6,075 |
def _get_filter_syntax(_filter_info, _prefix=True):
"""This function retrieves the proper filter syntax for an API call."""
if type(_filter_info) != tuple and type(_filter_info) != list:
raise TypeError("Filter information must be provided as a tuple (element, criteria) or a list of tuples.")
elif type(_filter_info) == tuple:
_filter_info = [_filter_info]
_syntax = ""
if len(_filter_info[0]) > 0:
_define_prefix = {True: '&', False: ''}
_syntax_prefix = _define_prefix.get(_prefix)
for _filter_tuple in _filter_info:
_element, _criteria = _filter_tuple
_syntax = f"{_syntax_prefix}filter={_element}({_criteria})&"
_syntax = _syntax[:-1]
return _syntax | b1817a2a3f004ba2bd44a8f8f272ad685e4d5ebe | 6,076 |
import math
def pol2cart(r,theta):
"""
Translate from polar to cartesian coordinates.
"""
return (r*math.cos(float(theta)/180*math.pi), r*math.sin(float(theta)/180*math.pi)) | 69753e1cadd36ec70da1bf2cf94641d4c7f78179 | 6,077 |
def clean_title(title: str) -> str:
"""Strip unwanted additional text from title."""
for splitter in [" (", " [", " - ", " (", " [", "-"]:
if splitter in title:
title_parts = title.split(splitter)
for title_part in title_parts:
# look for the end splitter
for end_splitter in [")", "]"]:
if end_splitter in title_part:
title_part = title_part.split(end_splitter)[0]
for ignore_str in ["feat.", "featuring", "ft.", "with ", "explicit"]:
if ignore_str in title_part.lower():
return title.split(splitter + title_part)[0].strip()
return title.strip() | 5625c6c64b166560b1804b7048fd3d604536251a | 6,078 |
def steps_smoother(steps, resolution):
"""
:param delta_steps: array of delta positions of 2 joints for each of the 4 feet
:return: array of positions of 2 joints for each of the 4 feet
"""
smoothed_steps = []
for i in range(len(steps)):
step = steps[i]
next_step = steps[(i + 1) % len(steps)]
for j in range(resolution):
smoothed_step = []
for k in range(4):
positions = step[k]
next_positions = next_step[k]
pos0 = positions[0] + j * \
((next_positions[0] - positions[0]) / resolution)
pos1 = positions[1] + j * \
((next_positions[1] - positions[1]) / resolution)
smoothed_step.append([pos0, pos1])
smoothed_steps.append(smoothed_step)
return smoothed_steps | a27e09af169e79438895d0e15c0b536213962429 | 6,080 |
def leaper(x, y, int1, int2):
"""sepcifically for the rook, permutes the values needed around a position for no_conflict tests"""
return [(x+int1, y+int2), (x-int1, y+int2), (x+int1, y-int2), (x-int1, y-int2), (x+int2, y+int1), (x-int2, y+int1), (x+int2, y-int1), (x-int2, y-int1)] | 6f7afc071c8adbc72a6391179e2df522574e5197 | 6,081 |
def calc_offsets(obj):
"""
The search "hit" should have a 'fullsnip' annotation which is a the entire
text of the indexable resource, with <start_sel> and <end_sel> wrapping each
highlighted word.
Check if there's a selector on the indexable, and then if there's a box-selector
use this to generate a list of xywh coordinates by retrieving the selector by
its index from a list of lists
"""
if hasattr(obj, "fullsnip"):
words = obj.fullsnip.split(" ")
offsets = []
if words:
for i, word in enumerate(words):
if "<start_sel>" in word and "<end_sel>" in word:
offsets.append(i)
if offsets:
if obj.selector:
if (boxes := obj.selector.get("box-selector")) is not None:
box_list = []
for x in offsets:
try:
box_list.append(boxes[x])
except (IndexError, ValueError):
pass
if box_list:
return box_list # [boxes[x] for x in offsets if boxes[x]]
else:
return
return | 6af4827a57cf20f317ce2a40a669c14d3f6380f3 | 6,082 |
import argparse
import sys
def parse_args():
"""
Parse command-line arguments
Returns
-------
Parser argument namespace
"""
parser = argparse.ArgumentParser(description="Fibermorph")
parser.add_argument(
"--output_directory", default=None,
help="Required. Full path to and name of desired output directory. "
"Will be created if it doesn't exist.")
parser.add_argument(
"--input_directory", default=None,
help="Required. Full path to and name of desired directory containing "
"input files.")
parser.add_argument(
"--resolution_mm", type=int, default=132,
help="Integer. Number of pixels per mm.")
parser.add_argument(
"--resolution_mu", type=float, default=4.25,
help="Float. Number of pixels per micron.")
parser.add_argument(
"--file_extension", type=str, default=".RW2",
help="Optional. String. Extension of input files to use in input_directory when using raw2gray function. Default is .RW2.")
parser.add_argument(
"--jobs", type=int, default=1,
help="Integer. Number of parallel jobs to run. Default is 1.")
parser.add_argument(
"--window_size", type=float, default=1.0,
help="Float. Desired size for window of measurement in mm. Default is 1.0.")
parser.add_argument(
"--minsize", type=int, default=20,
help="Integer. Minimum diameter in microns for sections. Default is 20.")
parser.add_argument(
"--maxsize", type=int, default=150,
help="Integer. Maximum diameter in microns for sections. Default is 150.")
parser.add_argument(
"--save_image", type=bool, default=False,
help="Boolean. Default is False. Whether the curvature function should save images for intermediate image "
"processing steps.")
# Create mutually exclusive flags for each of fibermorph's modules
module_group = parser.add_mutually_exclusive_group(required=True)
module_group.add_argument(
"--raw2gray", action="store_true", default=False,
help="")
module_group.add_argument(
"--curvature", action="store_true", default=False,
help="")
module_group.add_argument(
"--section", action="store_true", default=False,
help="")
module_group.add_argument(
"--demo_real_curv", action="store_true", default=False,
help="")
module_group.add_argument(
"--demo_real_section", action="store_true", default=False,
help="")
module_group.add_argument(
"--demo_dummy_curv", action="store_true", default=False,
help="")
module_group.add_argument(
"--demo_dummy_section", action="store_true", default=False,
help="")
module_group.add_argument(
"--demo_teardown_data", action="store_true", default=False,
help="")
module_group.add_argument(
"--demo_delete_results_cache", action="store_true", default=False,
help="")
args = parser.parse_args()
# Validate arguments
demo_mods = [
args.demo_real_curv,
args.demo_real_section,
args.demo_dummy_curv,
args.demo_dummy_section,
args.demo_teardown_data,
args.demo_delete_results_cache]
if any(demo_mods) is False:
if args.input_directory is None and args.output_directory is None:
sys.exit("ExitError: need both --input_directory and --output_directory")
if args.input_directory is None:
sys.exit("ExitError: need --input_directory")
if args.output_directory is None:
sys.exit("ExitError: need --output_directory")
return args | e788d4519b539ba52e99989653c7aa3091cd9a39 | 6,083 |
def spread(self, value="", **kwargs):
"""Turns on a dashed tolerance curve for the subsequent curve plots.
APDL Command: SPREAD
Parameters
----------
value
Amount of tolerance. For example, 0.1 is ± 10%.
"""
return self.run("SPREAD,%s" % (str(value)), **kwargs) | a92c8e230eadd4e1fde498fa5650a403f419eaeb | 6,084 |
import re
def repair_attribute_name(attr):
"""
Remove "weird" characters from attribute names
"""
return re.sub('[^a-zA-Z-_\/0-9\*]','',attr) | f653a5cb5ed5e43609bb334f631f518f73687853 | 6,085 |
def HasPositivePatterns(test_filter):
"""Returns True if test_filter contains a positive pattern, else False
Args:
test_filter: test-filter style string
"""
return bool(len(test_filter) > 0 and test_filter[0] != '-') | 9038bf799efbe4008a83d2da0aba89c0197c16a1 | 6,087 |
def highest_palindrome_product(digits):
"""Returns the highest palindrome number resulting from the
multiplication of two numbers with the given amount of digits.
"""
def is_palindrome(target):
"""Returns True if target (str or int) is a palindrome.
"""
string = str(target)
return list(string) == list(string)[::-1]
# Creating the two highest possible numbers with the given amount of
# digits:
highest_number1 = highest_number2 = int("9"*digits)
palindromes_list = []
while True:
result = highest_number1 * highest_number2
if is_palindrome(result):
palindromes_list.append(result)
# Finding the products between all two numbers with the given
# amount of digits:
if highest_number2 == int("1" + "0"*(digits-1)):
if highest_number1 == int("1" + "0"*(digits-1)):
break
else:
highest_number2 = highest_number1
highest_number1 -=1
else:
highest_number2 -= 1
return max(palindromes_list) | e509de1c977c6e4ecf9ab8304ef1afe65a447188 | 6,088 |
import os
def get_template_dir(format):
"""
Given a format string return the corresponding standard template
directory.
"""
return os.path.join(os.path.dirname(__file__), 'templates', format) | c204575b877c08700a7c236577016ed7e267f88b | 6,089 |
import os
def get_wildcard_dir(path):
"""If given path is a dir, make it a wildcard so the JVM will include all JARs in the directory."""
ret = []
if os.path.isdir(path):
ret = [(os.path.join(path, "*"))]
elif os.path.exists(path):
ret = [path]
return ret | a2688463c02c9558140d52da567e8744ed775e99 | 6,091 |
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors | 33b9a4bfd44a70d93ae7b50df870d46765bf0cb7 | 6,092 |
def module_of_callable(c):
"""Find name of module where callable is defined
Arguments:
c {Callable} -- Callable to inspect
Returns:
str -- Module name (as for x.__module__ attribute)
"""
# Ordinal function defined with def or lambda:
if type(c).__name__ == 'function':
return c.__module__
# Some callable, probably it's a class with __call_ method, so define module of declaration rather than a module of instantiation:
return c.__class__.__module__ | 116e46a3e75fcd138e271a3413c62425a9fcec3b | 6,093 |
def dot2string(dot):
"""Return a string repr of dots."""
return "*" * int(dot) | e2822bfe20dab5702ec4052445718398e66d993e | 6,095 |
import json
def invocation_parameter(s) :
"""argparse parameter conversion function for invocation request
parameters, basically these parameters are JSON expressions
"""
try :
expr = json.loads(s)
return expr
except :
return str(s) | cca1a9c3514def152295b10b17ef44480ccca5a9 | 6,097 |
def filter_functions(input_set, filter_set):
"""
Keeps only elements in the filter set
:param input_set:
:param filter_set:
:return:
"""
ns = {}
filter_low = {x.lower() for x in filter_set}
for x in input_set:
xl = x.lower()
if xl in filter_low:
ns[x] = input_set[x]
return ns | cabc321b6730df4a0c7987b83c6a2c3d6fb69c02 | 6,098 |
def dumpj(game):
"""Dump a game to json"""
return game.to_json() | bac5480ea2b3136cbd18d0690af27f94e4a2b6a3 | 6,100 |
import types
def _get_functions_names(module):
"""Get names of the functions in the current module"""
return [name for name in dir(module) if
isinstance(getattr(module, name, None), types.FunctionType)] | 581384740dc27c15ac9710d66e9b0f897c906b96 | 6,101 |
import ast
def ex_rvalue(name):
"""A variable store expression."""
return ast.Name(name, ast.Load()) | 4afff97283d96fd29740de5b7a97ef64aad66efe | 6,102 |
def stateDiff(start, end):
"""Calculate time difference between two states."""
consumed = (end.getTimestamp() - start.getTimestamp()).total_seconds()
return consumed | 1f76903e2486e2c378f338143461d1d15f7993a6 | 6,103 |
import copy
import random
def staticDepthLimit(max_depth):
"""Implement a static limit on the depth of a GP tree, as defined by Koza
in [Koza1989]. It may be used to decorate both crossover and mutation
operators. When an invalid (too high) child is generated, it is simply
replaced by one of its parents.
This operator can be used to avoid memory errors occuring when the tree
gets higher than 90-95 levels (as Python puts a limit on the call stack
depth), because it ensures that no tree higher than *max_depth* will ever
be accepted in the population (except if it was generated at initialization
time).
:param max_depth: The maximum depth allowed for an individual.
:returns: A decorator that can be applied to a GP operator using \
:func:`~deap.base.Toolbox.decorate`
.. note::
If you want to reproduce the exact behavior intended by Koza, set
the *max_depth* param to 17.
.. [Koza1989] J.R. Koza, Genetic Programming - On the Programming of
Computers by Means of Natural Selection (MIT Press,
Cambridge, MA, 1992)
"""
def decorator(func):
def wrapper(*args, **kwargs):
keep_inds = [copy.deepcopy(ind) for ind in args]
new_inds = list(func(*args, **kwargs))
for i, ind in enumerate(new_inds):
if ind.height > max_depth:
new_inds[i] = random.choice(keep_inds)
return new_inds
return wrapper
return decorator | cdcb1e58a681b622ced58e9aa36562e1fedb6083 | 6,104 |
import re
def titlecase(string):
"""Turn string of words into titlecased words.
:type string: str
:param string: A string of words.
"""
return re.sub(
r"[A-Za-z]+('[A-Za-z]+)?",
lambda mo: mo.group(0)[0].upper() + mo.group(0)[1:].lower(),
string,
) | 77976d2ccad5b6b924b76d587a6883cf660497d0 | 6,105 |
def html_table_header():
"""Return the HTML row with header cells used in all tables."""
markup = ("<tr>" +
"<th>Column name</th>" +
"<th>DataType</th>" +
"<th><abbr title='Primary Key'>PK</abbr></th>" +
"<th><abbr title='Foreign Key'>FK</abbr></th>" +
"<th><abbr title='Not Null'>NN</abbr></th>" +
"<th><abbr title='Unique'>UQ</abbr></th>" +
"<th><abbr title='Binary'>BIN</abbr></th>" +
"<th><abbr title='Unsigned'>UN</abbr></th>" +
"<th><abbr title='Zero Fill'>ZF</abbr></th>" +
"<th><abbr title='Auto Increment'>AI</abbr></th>" +
"<th>Default</th>" +
"<th>Comment</th>" +
"</tr>")
return markup | 0fc65ca33cf23594dad007a3b0b16f1244ace62e | 6,106 |
def checkLoggedIn(session):
"""
checks if any player
has logged in yet
"""
try:
return session["roll"] is not None
except KeyError:
session["roll"] = None
return False | 436f51212abc9fe00abf11266cb90159a7f60bd4 | 6,107 |
def quantile(values, q):
"""
Returns q-th quantile.
"""
values = sorted(values)
size = len(values)
idx = int(round(size * q)) - 1
if idx == 0:
raise ValueError("Sample size too small: %s" % len(values))
return values[idx] | 614f6d9dbdf586b802d6380e2880df3659faa0c2 | 6,108 |
def replace_last(source_string, replace_what, replace_with):
""" Function that replaces the last ocurrence of a string in a word
:param source_string: the source string
:type source_string: str
:param replace_what: the substring to be replaced
:type replace_what: str
:param replace_with: the string to be inserted
:type replace_with: str
:returns: string with the replacement
:rtype: str
:Example:
>>> import chana.lemmatizer
>>> chana.lemmatizer.replace_last('piati','ti','ra')
'piara'
"""
head, _sep, tail = source_string.rpartition(replace_what)
return head + replace_with + tail | 6fbc36824b960fb125b722101f21b5de732194c5 | 6,109 |
def unescape(text):
"""Unescapes text
>>> unescape(u'abc')
u'abc'
>>> unescape(u'\\abc')
u'abc'
>>> unescape(u'\\\\abc')
u'\\abc'
"""
# Note: We can ditch this and do it in tokenizing if tokenizing
# returned typed tokens rather than a list of strings.
new_text = []
escape = False
for c in text:
if not escape and c == u'\\':
escape = True
continue
new_text.append(c)
escape = False
return u''.join(new_text) | 7db9fa5bb786ea5c1f988ee26eed07abe66a2942 | 6,110 |
def removeZeros(infile, outfile, prop=0.5, genecols=2):
"""Remove lines from `infile' in which the proportion of zeros is equal to or higher than `prop'. `genecols' is the number of columns containing gene identifiers at the beginning of each row. Writes filtered lines to `outfile'."""
nin = 0
nout = 0
with open(infile, "r") as f:
hdr = f.readline()
columns = hdr.split("\t")
ncols = len(columns)-genecols
maxzeros = ncols*prop
with open(outfile, "w") as out:
out.write(hdr)
while True:
line = f.readline()
if line == '':
break
nin += 1
pline = line.rstrip("\r\n").split("\t")
nzeros = 0
for v in pline[genecols:]:
if float(v) == 0:
nzeros += 1
if nzeros < maxzeros:
out.write(line)
nout += 1
return (nin, nout) | 43feba21513be4a8292c08918e16b3e34a73c341 | 6,112 |
def _NormalizeDiscoveryUrls(discovery_url):
"""Expands a few abbreviations into full discovery urls."""
if discovery_url.startswith('http'):
return [discovery_url]
elif '.' not in discovery_url:
raise ValueError('Unrecognized value "%s" for discovery url')
api_name, _, api_version = discovery_url.partition('.')
return [
'https://www.googleapis.com/discovery/v1/apis/%s/%s/rest' % (
api_name, api_version),
'https://%s.googleapis.com/$discovery/rest?version=%s' % (
api_name, api_version),
] | f361d01006a6e7f7487e06db375ae703ffde0021 | 6,114 |
def getPositionPdf(i):
"""Return the position of the square on the pdf page"""
return [int(i/5), i%5] | 859fd00c1475cfcb4cd93800299181b77fdd6e93 | 6,116 |
def dur_attributes_to_dur(d_half, d_semiqvr):
"""
Convert arrays of d_hlf and d_sqv to d.
- See eq. (2) of the paper.
"""
def d_hlf_dur_sqv_to_d(d_hlf, d_sqv):
return 8 * d_hlf + d_sqv
d = d_hlf_dur_sqv_to_d(d_half, d_semiqvr)
return d | aeea74f929ef94d94178444df66a30d0d017fd4e | 6,117 |
import shutil
import traceback
def copy_dir(source, destination):
"""
Copy a directory tree and returns destination path.
Parameters:
source (string): source containing root directory path
destination (string): target root directory path
Returns:
destination (string): copied destination path
"""
try:
shutil.copytree(
source, destination, ignore=shutil.ignore_patterns('.svn'))
return destination
except Exception:
print(traceback.format_exc()) | 5751da6232a64902f0030271671f3e74ecda97e0 | 6,118 |
def _scale_fct_fixed(*args, scale=0):
"""
This is a helper function that is necessary because multiprocessing requires
a picklable (i.e. top-level) object for parallel computation.
"""
return scale | 75eb728f37466aee8664d5fe435d379cf5d7c6f2 | 6,119 |
def filter_score_grouped_pair(post_pair):
"""
Filter posts with a positive score.
:param post_pair: pair of post_id, dict with score, text blocks, and comments
:return: boolean indicating whether post has a positive score
"""
_, post_dict = post_pair
post_score = post_dict['score']
return post_score and int(post_score) > 0 | c824eacd43b44c85fc7acf102fdde2413a7c4d0e | 6,120 |
def get_explicit_positions(parsed_str_format):
"""
>>> parsed = parse_str_format("all/{}/is/{2}/position/{except}{this}{0}")
>>> get_explicit_positions(parsed)
{0, 2}
"""
return set(
map(
int,
filter(
lambda x: isinstance(x, str) and str.isnumeric(x),
(x[1] for x in parsed_str_format),
),
)
) | f6f3720443385f5d514de15d3d63d45cd4ef3408 | 6,122 |
def add_upper_log_level(logger, method_name, event_dict):
"""
Add the log level to the event dict.
"""
event_dict["level"] = method_name.upper()
return event_dict | 36ccdf335473136fe8188ff99ed539920ee39fa7 | 6,126 |
def is_zero_dict( dict ):
"""
Identifies empty feature vectors
"""
has_any_features = False
for key in dict:
has_any_features = has_any_features or dict[key]
return not has_any_features | eefb3df1547917fbc11751bbf57212f95388e8b2 | 6,127 |
def getuniqueitems(userchoices):
"""return a list of unique items given a bunch of userchoices"""
items = []
for userchoice in userchoices:
if userchoice.item not in items:
items.append(userchoice.item)
return items | a7885556604153cf756fb6a29c2e870c27d47337 | 6,128 |
import binascii
import os
def image_url_salt():
"""Helper function that generates salt to append to an image URL to prevent browser caches from loading
old images"""
return binascii.b2a_hex(os.urandom(4)).decode('us-ascii') | 0b8c803682ff21b56c039a12da184d699ffc47e7 | 6,129 |
def zigzag2(i, curr=.45, upper=.48, lower=.13):
"""
Generalized version of the zig-zag function.
Returns points oscillating between two bounds
linearly.
"""
if abs(i) <= (upper-curr):
return curr + i
else:
i = i - (upper-curr)
i = i%(2*(upper-lower))
if i < (upper-lower):
return upper-i
else:
return 2*lower-upper+i | a51624af520121eb7285b2a8a5b4dc5ffa552147 | 6,131 |
def discriminateEvents(events, threshold):
"""
Discriminate triggers when different kind of events are on the same channel.
A time threshold is used to determine if two events are from the same trial.
Parameters
----------
events : instance of pandas.core.DataFrame
Dataframe containing the list of events obtained with
mne.find_events(raw).
threshold : float
Time threshold in milliseconds. Keeps an event if the time difference
with the next one is superior than threshold.
Returns:
newData : instance of pandas.series.Series
List of trial number filling the requirements.
"""
# calculate the rolling difference (between n and n+1)
events['diff'] = events[0].diff()
# replace the nan with the first value
events['diff'].iloc[0] = events.iloc[0, 0]
# select events with time distance superior to threshold
events = events[events['diff']>threshold]
events = events.reset_index(drop=True)
del events['diff']
return events | 0078548ea463c01d88b574185b3dcb5632e5cd13 | 6,133 |
import random
import string
def generate_room_number():
"""
Generates a room number composed of 10 digits.
"""
return "".join(random.sample(string.digits, 10)) | 133e7463106df89fb68de6a7dfa7c718bc1bc5ba | 6,134 |
def parse_movie(line, sep='::'):
"""
Parses a movie line
Returns: tuple of (movie_id, title)
"""
fields = line.strip().split(sep)
movie_id = int(fields[0]) # convert movie_id to int
title = fields[1]
return movie_id, title | 9d7a13ca3ddf823ff22582f648434d4b6df00207 | 6,136 |
import logging
def formatMessage(data):
"""
Format incoming message before passing to Discord
"""
logging.info("Formatting message payload...")
time = (data.occurredAt).split("T")
message = [":alarm_clock: __**Meraki Alert**__ :alarm_clock: "]
message.append(f"**Device:** {data.deviceName}")
message.append(f"**Message info:** {data.alertType}")
message.append(f"**Occurred at:** {time[0]} - {time[1][:8]}")
if len(data.alertData) > 0:
message.append(f"**Additional data:** ```fix\r\n{data.alertData}\r\n```")
sendmessage = ""
for each in message:
sendmessage += each + "\r\n"
return sendmessage | 21d7a50951aeecb6917479622f4131b7ddcfda00 | 6,137 |
import time
def twait(phrase, tn, tout=-1, logging='off', rcontent=False, screenprint=False):
""" telnetlib wait with optional timeout and optional logging"""
# Adding code to allow lists for phrase
finalcontent = ' '
#This is the time of the epoch
startTime = int(time.time())
while True:
# This is the current time
currentTime = int(time.time())
if tout != -1:
# This is the time since the start of this loop
# if it exceeds the timeout value passed to it
# then exit with a return of 0
if (currentTime - startTime) > tout:
if logging == 'on':
#Adding the -e-e-> to differentiate from device output
if screenprint:
print('-e-e->It has been ' + str(tout) + ' seconds. Timeout!')
if not rcontent:
return 0
return 0, finalcontent
# Eager reading back from the device
content = (tn.read_very_eager().decode().strip())
if content.strip() != '':
finalcontent += content
# if the returned content isn't blank. This stops
# it from spamming new line characters
if content.strip() != '':
if screenprint:
print(content, end='')
# content was found! Return a 1 for success
if isinstance(phrase, str):
if phrase in content:
if not rcontent:
return 1
return 1, finalcontent
if isinstance(phrase, list):
count = 1
for p in phrase:
if p in content:
if not rcontent:
return count
return count, finalcontent
count += 1 | 9c4308e873321fd556d8eec2668981fc2843ae87 | 6,138 |
def strip_newlines(s, nleading=0, ntrailing=0):
"""strip at most nleading and ntrailing newlines from s"""
for _ in range(nleading):
if s.lstrip(' \t')[0] == '\n':
s = s.lstrip(' \t')[1:]
elif s.lstrip(' \t')[0] == '\r\n':
s = s.lstrip(' \t')[2:]
for _ in range(ntrailing):
if s.rstrip(' \t')[-2:] == '\r\n':
s = s.rstrip(' \t')[:-2]
elif s.rstrip(' \t')[-1:] == '\n':
s = s.rstrip(' \t')[:-1]
return s | cd9c55d4ac7828d9506567d879277a463d896c46 | 6,141 |
def xyz_to_lab(x_val, y_val, z_val):
"""
Convert XYZ color to CIE-Lab color.
:arg float x_val: XYZ value of X.
:arg float y_val: XYZ value of Y.
:arg float z_val: XYZ value of Z.
:returns: Tuple (L, a, b) representing CIE-Lab color
:rtype: tuple
D65/2° standard illuminant
"""
xyz = []
for val, ref in (x_val, 95.047), (y_val, 100.0), (z_val, 108.883):
val /= ref
val = pow(val, 1 / 3.0) if val > 0.008856 else 7.787 * val + 16 / 116.0
xyz.append(val)
x_val, y_val, z_val = xyz # pylint: disable=unbalanced-tuple-unpacking
cie_l = 116 * y_val - 16
cie_a = 500 * (x_val - y_val)
cie_b = 200 * (y_val - z_val)
return cie_l, cie_a, cie_b | c2478772659a5d925c4db0b6ba68ce98b6537a59 | 6,142 |
def find_largest_helper(n, maximum):
"""
:param n: int, the number to find the largest digit
:param maximum: int, the largest digit
:return: int, the largest digit
"""
if n % 10 > maximum:
maximum = n % 10
if n / 10 == 0:
return maximum
else:
return find_largest_helper(n // 10, maximum) | ddd49839be6a3ab6ece7cabde41f3978df1ba6f3 | 6,144 |
import os
def get_namefiles(pth, exclude=None):
"""Search through a path (pth) for all .nam files.
Parameters
----------
pth : str
path to model files
exclude : str or lst
File or list of files to exclude from the search (default is None)
Returns
-------
namefiles : lst
List of namefiles with paths
"""
namefiles = []
for root, _, files in os.walk(pth):
namefiles += [
os.path.join(root, file) for file in files if file.endswith(".nam")
]
if exclude is not None:
if isinstance(exclude, str):
exclude = [exclude]
exclude = [e.lower() for e in exclude]
pop_list = []
for namefile in namefiles:
for e in exclude:
if e in namefile.lower():
pop_list.append(namefile)
for e in pop_list:
namefiles.remove(e)
return namefiles | c80da9dba1dbea8505d119ba61745c7c43b4f25b | 6,146 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.