content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
from typing import Dict
import pkgutil
import io
def _load_categories(filepath: str) -> Dict[str, str]:
"""Load data for domain category matching.
Args:
filepath: relative path to csv file containing domains and categories
Returns:
Dictionary mapping domains to categories
"""
data = pkgutil.get_data(__name__, filepath)
if not data:
raise FileNotFoundError(f"Couldn't find file {filepath}")
content = io.TextIOWrapper(io.BytesIO(data), encoding='utf-8')
categories = {}
for line in content.readlines():
domain, category = line.strip().split(',')
categories[domain] = category
return categories | 25ec3c4808d4a9624112e277c0597c868a12572c | 8,922 |
import csv
def readTsv(fileLines, d="\t"):
"""
convenience method for reading TSV file lines into csv.DictReader obj.
"""
reader = csv.DictReader(fileLines, delimiter=d)
return reader | 9da19510d787e393d6b00b9c0d64c405830ce5ce | 8,924 |
import requests
import pprint
def get_location_data(location_list):
"""iterate through the list of locations and extract data from the API for each one. output the data in json format."""
location_data_json = {}
address = 'http://unlock.edina.ac.uk/ws/search?name=' # address of API
country_code = 'GB'
dups = set([x for x in location_list if location_list.count(x) > 1])
print(dups)
for location in location_list:
url = address+location+"&countrycode="+country_code+"&format=json"
print("Trying URL ",location_list.index(location)," of ",len(location_list),":", url)
data = requests.get(url)
data_json = data.json()
print("DATA JSON FOR:", location)
pprint.pprint(data_json)
print('============')
location_data_json[location] = data_json
return location_data_json | 6c3eebe16018dae1c58cc381e47cfa8311e32388 | 8,925 |
def get_execution_platform(command, filename):
"""
<Purpose>
Returns the execution platform based on a best-guess approach using
the specified command, as well as the a file's extension. The
command takes precedence over the file extension. If the extension
is not recognized, then it will be assumed that it is repyV2.
<Arguments>
command: The command that should be parsed.
filename: The file whose repy version should be returned.
<Side Effects>
None
<Exceptions>
None
<Returns>
A string indicating which version of repy a program is in, based on
its file extension. This will be either "v1" or "v2".
"""
if command.endswith('v2'):
return 'repyV2'
elif command.endswith('v1'):
return 'repyV1'
# Information on extensions for repy programs can be found on #1286.
if filename.endswith('.r2py'):
return 'repyV2'
else:
return 'repyV1' | ceb6afab191269b032bc6122978f630682cac9ca | 8,928 |
import random
import string
def get_random_string():
""" generates a random string """
random_string = ''.join(
[random.choice(string.ascii_letters + string.digits) for n in range(32)]
)
return random_string | 1abcea093ded9a04bd7b35f29a0309452e72ab4d | 8,929 |
def text():
"""Returns text equivalent of the W3C text"""
return {
"@context": "http://www.w3.org/ns/anno.jsonld",
"type": "Annotation",
"body": {
"creator": "user",
"type": "TextualBody",
"value": "string"
},
"generator": {
"homepage": "http://mnemosyne.ml",
"id": "string",
"name": "Mnemosyne",
"type": "Mnemosyne"
},
"target": {
"id": "string",
"type": "TextQuoteSelector",
"exact": "string",
"format": "string",
"source": "string",
"prefix": 0,
"suffix": 0,
"refinedBy": {
"type": "TextPositionSelector",
"start": "/div[2]",
"end": "/div[2]"
},
},
} | 128e75331f178311738b472c1ad9f24a9689bdf4 | 8,931 |
import os
def _generate_bytes(size_bytes: int, times: int = 1):
"""
Generates a list of <times> random bytes objects,
each the size of <size_bytes>.
"""
return [os.urandom(size_bytes) for _ in range(times)] | 789f912354cd02d836f9ca1caa177cb8c8a1da37 | 8,934 |
import torch
def bucketize(tensor, bucket_boundaries):
"""Equivalent to numpy.digitize
Notes
-----
Torch does not have a built in equivalent yet. I found this snippet here:
https://github.com/pytorch/pytorch/issues/7284
"""
result = torch.zeros_like(tensor, dtype=torch.int32)
for boundary in bucket_boundaries:
result += (tensor > boundary).int()
return result | ee48e11de50e52278ddf940e32c04e330dceed97 | 8,935 |
import re
import inspect
def _get_task_path(wrapped, instance) -> str:
"""Get the synthetic URL path for a task, based on the `wrapt` parameters."""
funcname = wrapped.__name__
if funcname.startswith("_") and not funcname.endswith("_"):
funcname = re.sub(r"^_+", repl="", string=funcname, count=1)
if instance is None:
return funcname
else:
if inspect.isclass(instance):
return "/".join([instance.__name__, funcname])
else:
return "/".join([instance.__class__.__name__, funcname]) | 16ca96d29abddfa104afc5a0ec466e0bd1d202dc | 8,936 |
import os
def collect_dmripreproc_output(dmriprep_dir, subject_id, session_id = None):
"""
Collect the dmripreproc output files for a specific subject and session.
"""
dmri_output = dict()
dmri_files = os.listdir(dmriprep_dir)
# Get path for this subject
subject_name = "sub-" + subject_id
# Get session
if session_id:
session_name = "ses-" + session_id
else:
session_name = ""
# sub_path
rel_sub_path = os.path.join(subject_name, session_name, "dwi")
dmri_files_path = os.path.join(dmriprep_dir, rel_sub_path)
dmri_files = os.listdir(dmri_files_path)
# Get files from output
dmri_output['eddy_file'] = os.path.join(dmri_files_path, 'eddy_corrected.nii.gz')
dmri_output['bvec'] = os.path.join(dmri_files_path, 'eddy_corrected.eddy_rotated_bvecs')
dmri_output['bval'] = os.path.join(dmri_files_path, next(f for f in dmri_files if f.endswith('.bval')))
dmri_output['eddy_avg_b0'] = os.path.join(dmri_files_path,'eddy_corrected_avg_b0.nii.gz')
dmri_output['eddy_mask'] = os.path.join(dmri_files_path, 'eddy_corrected_roi_mask.nii.gz')
return dmri_output | e792cb21245bacc88d0a939a728208ae77126007 | 8,937 |
def make_email(slug):
"""Get the email address for the given slug"""
return '{}@djangogirls.org'.format(slug) | f07dc679d4ee2d3e13939e5b13897b98766f5037 | 8,939 |
import os
import glob
def list_files_of_extensions(folder, extensions):
"""
List files in the specified folder which have the specified extensions.
Do not traverse subfolders.
folder: string path to folder containing files
extensions: only files with these extensions will be returned
Return a list of paths to the files
"""
# dir_no_slash = os.path.normpath(img_dir)
list_patterns = [os.path.join(folder, "*" + ex) for ex in extensions]
fnames = []
for glob_pat in list_patterns:
listed = glob.glob(glob_pat)
fnames.extend(listed)
return fnames | 2f8d5aeada3799d9171c83c241f8896fe6d7786c | 8,940 |
def s_input(prompt : str = ">", accepted_inputs : list = ["break"], case_sensitive : bool = False, fail_message : str = "") -> str:
"""Keeps asking for user input until the answer is acceptable.
Args:
prompt (str, optional): User is prompted with this each time. Defaults to ">".
accepted_inputs (list, optional): List of inputs that allows the user to continue. Defaults to ["break"].
case_sensitive (bool, optional): Whether or not the input is case sensitive. Defaults to False.
fail_message (str, optional): The message to print when the input is invalid. Leave blank for no message.
Returns:
str: The valid user input. Will be lowercase if case_sensitive is False.
"""
user_input = ""
first = True #For checking if the fail message should print or not
while user_input not in accepted_inputs:
if fail_message != "" and not first:
print(fail_message) #Prints the assigned fail message if it isn't the first iteration
user_input = input(prompt) #Gets user input
if not case_sensitive:
user_input = user_input.lower() #Sets the input to lower if needed
first = False #Ensures that it is not the first iteration anymore
return user_input | 8adda3fefe9111167af387e569d080e88e239e4e | 8,941 |
import os
import sqlite3
def get_ports(db_name):
"""Extract all local_port that are being used.
Args:
db_name: Database name used to read.
Return:
ports: local_port from all users.
"""
# CHECK IF DB EXISTS OR NOT
if os.path.isfile(db_name):
try:
# CONNECT TO DB
conn = sqlite3.connect(db_name)
# DB CONNECTION
c = conn.cursor()
# CHECK IF EUID EXISTS
ports = list(c.execute('SELECT local_port FROM jupyter_talon').fetchall())
return [port[0] for port in ports]
except Exception as e:
print("DB READ FAILED!", e)
return None
else:
print("DB %s DOES NOT EXIST!" % db_name)
return None | fc980d76bf4d0144ed272432dceb9aabbbd94c32 | 8,942 |
def WRAP(r, Hr, wrapname):
"""Wrapping
(macro)
e.g. WRAP(r, ['D', 'E', 'F'], 'X') (or r.wrap(['D', 'E', 'F'], 'X')
(Tutorial D equivalent: r WRAP { D, E, .., F } AS X
"""
return r.extend([wrapname], lambda t:{wrapname:t.project(Hr)}).remove(Hr) | 4b981704a85a289fb0d16e653a851a99298cd793 | 8,943 |
def is_empty(iterable):
"""
This filter checks whether the given iterable is empty.
:param iterable: The requested iterable
:type iterable: ~collections.abc.Iterable
:return: Whether or not the given iterable is empty
:rtype: bool
"""
return not bool(iterable) | 0163a8ff1c2e38fbe3f343b852a5dce39fb76536 | 8,944 |
import requests
from bs4 import BeautifulSoup
def priprav_bs(url, params):
"""BeautifulSoup z celé stránky
url: str
params: dict
Vrátí: bs4.BeautifulSoup
"""
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:80.0) Gecko/20100101 Firefox/80.0'}
r = requests.get(url, params=params, headers=headers)
r.encoding = 'utf-8'
return BeautifulSoup(r.text, 'html.parser') | 30775e56960829413211524d615ca0dea6bc8b0c | 8,945 |
from functools import reduce
def solve(ar):
"""
Given an array of 5 integers, return the minimal and maximal sum of 4 out of
5 of the integers.
"""
# Just sort the list of integers in place and take the sum of the first 4
# then the last 4.
ar.sort()
minSum = reduce((lambda x, y: x + y), ar[0:4])
maxSum = reduce((lambda x, y: x + y), ar[1:5])
return (minSum, maxSum) | 68d650c51cbe611c51c0b5754c61b541cb1838f8 | 8,946 |
def calculate_border(grid_dims, width, height):
"""Calculate each line in all borders.
Args:
grid_dims: tuple of the number of tiles in grid. In format `(row, column)`
width: float width in pixels
height: float height in pixels
Returns:
list: containing dictionaries keys `(x, y)` and values for the two points for each line in grid
"""
return [
{
'x': [c_idx * width] * 2,
'y': [0, height * grid_dims[0]],
} for c_idx in range(grid_dims[1] + 1)
] + [
{
'x': [0, width * grid_dims[1]],
'y': [r_idx * height] * 2,
} for r_idx in range(grid_dims[0] + 1)
] | b4ec0e063034547783e871abf1a46d943648df67 | 8,947 |
def _parse_face(face_row):
"""Parses a line in a PLY file which encodes a face of the mesh."""
face = [int(index) for index in face_row.strip().split()]
# Assert that number of vertices in a face is 3, i.e. it is a triangle
if len(face) != 4 or face[0] != 3:
raise ValueError(
'Only supports face representation as a string with 4 numbers.')
return face[1:] | c0cf7472705544c3089a6c1c82190bcb8bd5f463 | 8,948 |
def f(x):
"""return x*x"""
return x * x | f6f06aa4c83d4dfbdfe6d0d8cec93f4a0fef3d4d | 8,951 |
def _check_to_numpy(plugin, tensor):
"""Check the tensor and return a numpy.ndarray."""
np_value = tensor.asnumpy()
if plugin == 'scalar':
if np_value.size == 1:
return np_value
raise ValueError('The tensor holds more than one value, but the scalar plugin expects on value.')
if plugin == 'image':
if np_value.ndim == 4:
return np_value
raise ValueError('The tensor seems not to hold a valid image.')
if plugin in ('tensor', 'histogram'):
if np_value.ndim > 0:
return np_value
raise ValueError('The tensor should not be empty.')
return np_value | 919ff69a7eebf2d32d72f343ff51c9e41c915c5b | 8,952 |
import logging
import os
import sys
def setup_logger(name, level=logging.INFO):
"""Function setup as many loggers as you want"""
if not os.path.exists('log'):
os.makedirs('log') # pragma: no cover
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(level)
logger.addHandler(handler)
return logger | a67eac96359d2f86ecbca7e7c52c113f142ac112 | 8,953 |
def selection_sort(lst):
"""This function will do a sort on a string of input."""
for i in range(len(lst)):
if len(lst) == 0:
return False
min_position = i
for j in range(i+1, len(lst)):
if lst[min_position] > lst[j]:
min_position = j
temp = lst[i]
lst[i] = lst[min_position]
lst[min_position] = temp
return lst | 7e963a4b6a1e04c07ecf8af34ca9c425e831629b | 8,955 |
def get_default_palette():
"""
Return the default color palette, which slightly
tweaks the Digital Standards for better flexibility on
light/dark backgrounds.
"""
palette = {}
# the standard set of colors
palette["blue"] = "#2176d2"
palette["green"] = "#58c04d"
palette["yellow"] = "#f3c613"
palette["orange"] = "#f99300"
palette["red"] = "#f40000"
palette["electric-blue"] = "#25cef7"
palette["purple"] = "#d233ff"
palette["dark-blue"] = "#2176d2"
# grays
palette["almost-gray"] = "#353d42"
palette["dark-gray"] = "#2a3135"
palette["medium-gray"] = "#666666"
palette["light-gray"] = "#868b8e"
palette["sidewalk"] = "#cfcfcf"
# black/white
palette["black"] = "#000000"
palette["white"] = "#ffffff"
palette["almost-black"] = "#121516"
palette["almost-white"] = "#f5f5f5"
return palette | 2eb1239ad9fde6185b47732f3f2540a19ae54770 | 8,956 |
def get_indent(text):
"""Get text indentation.
Args:
text: Text to analyze.
Returns:
Number of leftmost spaces.
Notes:
Input text must be tab expanded, otherwise indent will be incorrect.
"""
return len(text) - len(text.lstrip()) | 5d5d53417f35f84bf48aa66fa8f35cf1930e77b7 | 8,958 |
def get_mark(name, task):
"""Getting marks of students for certain student and task"""
return int(input('Mark for {}, task {} > '.format(name, task))) | 00d8a0bf1ab97f600a3e6d2c8f488563419a95e1 | 8,959 |
def _format_line(k, v):
"""
Format a readable line.
"""
return "0x%08x: %20s --> %s\n" % (v.start, str(k), str(v)) | 46f77e43a695933b89987e854cd6c6d91d05c1db | 8,961 |
def get_sample_sheet_text(p7_index_length, p5_index_length):
"""
Gets the sample sheet text that will demux cells into one set of files
"""
sample_sheet_template = """[DATA]
Lane,Sample_ID,Sample_Name,index,index2
%s"""
line = ',fake,fake,%s,%s' % ('N' * p7_index_length, 'N' * p5_index_length)
return sample_sheet_template % line | 6b9c04b7d353bfdf2067213ce197152fdf92f90b | 8,962 |
def stacked_bar(tag, title, datalist):
"""
data list should be a list of dictionary formatted as follows
{"name": "A"
"data": {
"R1_mapped": 50,
"R2_mapped": 50,
"R1_unmapped": 50,
"R2_unmapped": 50,
}
}
"""
dataitems = ""
for item in datalist:
datatext = []
for k,v in item['data'].items():
datatext.append('{y:%s,label:"%s"}' % (v,k))
datatext = ",\n ".join(datatext)
params = {
"name": item['name'],
"datatext": datatext}
dataitems += """
{
type: "stackedBar100",
showInLegend: true,
name: "%(name)s",
dataPoints: [
%(datatext)s
]
},
""" % params
metadata = {
'tag': tag,
'title': title,
'dataitems': dataitems}
script = """
<script type="text/javascript">
window.onload = function () {
var chart = new CanvasJS.Chart("chartContainer%(tag)s",
{
theme: "theme2",
title:{
text: "%(title)s"
},
animationEnabled: true,
axisY:{
title: "percent"
},
legend :{
horizontalAlign: 'center',
verticalAlign: 'bottom'
},
toolTip: {
shared: true
},
data:[
%(dataitems)s
]
});
chart.render();
}
</script>
"""
return script % metadata | 2d19d8b15e10d54690f69940aa5cdedff317af00 | 8,964 |
def cli(ctx, workflow_id, label):
"""Get a list of workflow input IDs that match the given label. If no input matches the given label, an empty list is returned.
Output:
list of workflow inputs matching the label query
"""
return ctx.gi.workflows.get_workflow_inputs(workflow_id, label) | 266434ed4cf55c822cb231114d20f0388f70b879 | 8,965 |
import os
def _get_rpg_files(path_to_files, level):
"""Returns list of RPG files for one day sorted by filename."""
files = os.listdir(path_to_files)
files = [f"{path_to_files}{file}" for file in files
if file.endswith(str(level))]
files.sort()
return files | 27ffcedb5c7f5fde6bf440c164327c81c40c0d73 | 8,966 |
def feh_calc(theta, V, EW, power):
"""Calculate the metallicity for a given magnitude and EW."""
a, b, c, d, e = theta
# Wdash = reduced_ew(theta, V, EW)
FeH = a + b*V + c*EW + d*EW**power + e*V*EW
# FeH = d + f*np.power(Wdash, 1) + g*np.power(Wdash, 2)
return FeH | 9591aa54d077579c3d7c625615c2beb7758b4402 | 8,968 |
def metadata_parser(f):
"""
Parses a metadata file into dictionary.
The metadata file is expected to have the following format:
id;name;dtype
where:
- id denotes packet id (unsigned char or 1 byte uint)
- name is the data channel name (str)
- dtype is expected datatype (str)
:param f: A file object with the path to metadata.
:type f: file object
:return: metadata, a dict where id and name is one-to-one, and both
are keywords.
"""
metadata = {'ids': {}, 'names': {}}
for line in f:
line = line.strip().split(';')
if (line[0] in metadata['ids'] or line[1] in metadata['names']):
print('Warning: overlapping entry on id %s or name "%s"' % (line[0], line[1]))
entry = {
'id': int(line[0]),
'name': line[1],
'type': line[2]
}
metadata['ids'][line[0]] = entry
metadata['names'][line[1]] = entry
return metadata | 91ccec2a0231f35e0693173e67bfda5498f941f5 | 8,969 |
def fio_json_output_with_error(fio_json_output):
"""
Example of fio --output-format=json output, with io_u error. Based on
actual test run.
"""
err_line = (
"fio: io_u error on file /mnt/target/simple-write.0.0: "
"No space left on device: write offset=90280222720, buflen=4096"
)
return err_line + "\n" + fio_json_output | babfcd242a47091dc9b8acd29f24b6ebb398c679 | 8,970 |
def supprimeExtension(str):
"""
Fonction qui supprime l'extension de notre str
param : str : string -> chaine de caractere qu'on souhaite supprimer l'extension.
return string : chaine de caractere sans extenstion
"""
#appliquer cette fonction avant supprimePonctuation
return "".join(str.split(".")[0:-1]) | 63dda5c2987121f537501bddb4076aa432fbf51e | 8,971 |
def has_equal_properties(obj, property_dict):
"""
Returns True if the given object has the properties indicated by the keys of the given dict, and the values
of those properties match the values of the dict
"""
for field, value in property_dict.items():
try:
if getattr(obj, field) != value:
return False
except AttributeError:
return False
return True | d96b17124121af5db31c9db096b5010aff01b233 | 8,972 |
import os
def get_path_components(path):
"""
http://stackoverflow.com/questions/3167154/how-to-split-a-dos-path-into-its-components-in-python
"""
folders = []
while True:
path, folder = os.path.split(path)
if folder != "":
folders.append(folder)
else:
if path != "":
folders.append(path)
break
folders.reverse()
return folders | b91cb85af3097935028c94fdc354b6712e615ca4 | 8,973 |
def _GetCoveredBuilders(trybot_config):
"""Returns a dict mapping masters to lists of builders covered in config."""
covered_builders = {}
for master, builders in trybot_config.iteritems():
covered_builders[master] = builders.keys()
return covered_builders | e759be62c1c57045dca98e40f83beda6a7ddf7e5 | 8,974 |
import os
from typing import OrderedDict
def posix_times(space):
""" posix_times - Get process times """
utime, stime, cu_time, cs_time, rtime = os.times()
rdct_w = OrderedDict()
rdct_w['ticks'] = space.newint(int(rtime))
rdct_w['utime'] = space.newint(int(utime))
rdct_w['stime'] = space.newint(int(stime))
rdct_w['cutime'] = space.newint(int(cu_time))
rdct_w['cstime'] = space.newint(int(cs_time))
return space.new_array_from_rdict(rdct_w) | 7d24fe5c5c3606182b7e687e4616924cdaa3a4d3 | 8,975 |
def decode_from_bioes(tags):
"""
Decode from a sequence of BIOES tags, assuming default tag is 'O'.
Args:
tags: a list of BIOES tags
Returns:
A list of dict with start_idx, end_idx, and type values.
"""
res = []
ent_idxs = []
cur_type = None
def flush():
if len(ent_idxs) > 0:
res.append({
'start': ent_idxs[0],
'end': ent_idxs[-1],
'type': cur_type})
for idx, tag in enumerate(tags):
if tag is None:
tag = 'O'
if tag == 'O':
flush()
ent_idxs = []
elif tag.startswith('B-'): # start of new ent
flush()
ent_idxs = [idx]
cur_type = tag[2:]
elif tag.startswith('I-'): # continue last ent
ent_idxs.append(idx)
cur_type = tag[2:]
elif tag.startswith('E-'): # end last ent
ent_idxs.append(idx)
cur_type = tag[2:]
flush()
ent_idxs = []
elif tag.startswith('S-'): # start single word ent
flush()
ent_idxs = [idx]
cur_type = tag[2:]
flush()
ent_idxs = []
# flush after whole sentence
flush()
return res | 0308b2e0e527a03a2e2168143fb6b022c3a19496 | 8,976 |
def generate_body(du_dept_dict):
"""Return HTML that will be used for the body content of the box report BlogPage"""
body = '<ul>'
for item in du_dept_dict.items():
body += '<li>{}: {} GB</li>'.format(item[0], int(item[1]) / 1000000) # Convert to GB
body += '</ul>'
return body | 66e67abb1870440106ee6914b2110d29fcc3e1cd | 8,977 |
def lookup(cubeWithData, cubeWithMap, sharedIndex):
"""
Returns the value of cubeWithData indexed by the index of cubeWithMap.
cubeWithData must be indexed by sharedIndex and cubeWithData values must correspond to elements of sharedIndex.
For example: Let's say you have a cube with an estimated inflation rate by Country ("inflation_rate" is the name of the cube; "country" is the name of the index) and you want to assign it to the corresponding Company depending on its location. On the other hand, there's a many-to-one map where each Company is allocated to a single Country ("country_to_company_allocation"). The sharedIndex, in this case, is Country ("country").
As a result,
cp.lookup( inflation_rate , country_to_company_allocation , country )
will return the estimated inflation rate by Company.
"""
_final_cube = ((cubeWithMap == sharedIndex) * cubeWithData).sum(sharedIndex)
return _final_cube | 979e4c3be85be484d1deb3ef48b78dae9f0527cf | 8,979 |
def notimplemented(f):
"""Takes a function f with a docstring and replaces it with a function which
raises NotImplementedError(f.__doc__). Useful to avoid having to retype
docstrings on methods designed to be overridden elsewhere."""
def wrapper(self,*args,**kws):
raise NotImplementedError(f.__doc__)
wrapper.__doc__ = f.__doc__
wrapper.__name__ = f.__name__
return wrapper | eefdee57d0ebb0727e9238bc7f678d90b36100a6 | 8,980 |
def get_channel_index(image, label):
"""
Get the channel index of a specific channel
:param image: The image
:param label: The channel name
:return: The channel index (None if not found)
"""
labels = image.getChannelLabels()
if label in labels:
idx = labels.index(label)
return idx
return None | 3980e83f61ac755f1fbcadef27964a405a0aaf31 | 8,981 |
def error(v1, v2):
"""Returns the relative error with respect to the first value.
Positive error if program output is greater than AREMA table.
Negative error if program output is less than AREMA table.
"""
e = (v2 - v1)/v1
return e | c213751d1da06991f8cbef08c3c98b41f4fcc6af | 8,982 |
def parse_both_2(image_results):
""" parses the tags and repos from a image_results with the format:
{
'image': [{
'pluginImage': {
'ibmContainerRegistry': 'internalRepo/name'
'publicRegistry': 'repo/name'
},
'driverImage': {
'ibmContainerRegistry': 'internalRepo/name'
'publicRegistry': 'repo/name'
},
'pluginBuild': 'X.X.X',
'driverBuild': 'X.X.X',
'pullPolicy': 'Always'
}],
'pluginImage': [{EXACT SAME CONTENTS AS ABOVE}],
'driverImage': [{EXACT SAME CONTENTS AS ABOVE}]
}
Current known apps with this format:
ibm-object-storage-plugin
"""
tags = []
repos = []
image_info = image_results['image'][0]
for k, v in image_info.items():
if "Build" in k:
tags.append(v)
elif "Image" in k:
repos.append(v['publicRegistry'])
return tags, repos | 020cde41855d3bca26797cd9786e2733a50b6a00 | 8,983 |
import os
def splitexts(path, exts=None):
"""
Split each extension of a given file (.tar.gz, .tar, .pp, etc).
"""
exts = []
ext = os.path.splitext(path)
while True:
if len(ext[1]) < 1:
break
else:
exts.append(ext[1])
ext = os.path.splitext(ext[0])
exts.reverse()
return (path, exts) | 67d9a29cabadaaa161eddddfc93dae6646258929 | 8,984 |
def get_bit(byte, bit_num):
""" Return bit number bit_num from right in byte.
@param int byte: a given byte
@param int bit_num: a specific bit number within the byte
@rtype: int
>>> get_bit(0b00000101, 2)
1
>>> get_bit(0b00000101, 1)
0
"""
return (byte & (1 << bit_num)) >> bit_num | 4f25c4ccdc4c3890fb4b80d42d90bfb94d6799c3 | 8,985 |
def find_dominant_axis(vertices):
"""
measures which axis has least variance and returns 0, 1, or 2 (x, y, z)
so the 3D coordinates can be reduced to 2D
"""
xArray = [x[0] for x in vertices]
yArray = [x[1] for x in vertices]
zArray = [x[2] for x in vertices]
xVariance = max(xArray) - min(xArray)
yVariance = max(yArray) - min(yArray)
zVariance = max(zArray) - min(zArray)
allVars = [xVariance, yVariance, zVariance]
return allVars.index(min(allVars)) | f9a5adc52278a14e3a2a939bb0da57085a0bcb7a | 8,986 |
def authorize_payment(payment):
"""Activate client's payment authorization page from a PayPal Payment."""
for link in payment.links:
if link.rel == "approval_url":
# Convert to str to avoid Google App Engine Unicode issue
approval_url = str(link.href)
return approval_url | e489a1a2029535a8400f427a523665c49c872ef6 | 8,987 |
def process_operator_filter(field, value):
""" Process a mongo operador attached to a field like name__in, pay__gte
Args:
field (str): The field name
value (str): The value
"""
params = field.split('__')
if len(params) is not 2:
return {}
field, operator = params
operator_func = globals().get(operator + '_operator')
if not callable(operator_func):
return {}
return operator_func(field, value) | 339fe67263edb318060abc5b122a6019148f031b | 8,988 |
import hashlib
def CalcMD5(filepath):
"""generate a md5 code by a file path"""
with open(filepath,'rb') as f:
md5obj = hashlib.md5()
md5obj.update(f.read())
return md5obj.hexdigest() | fb339db0ec37dd46c9caeaf6ca74a114a9e85a87 | 8,989 |
def accepted_mimetypes(request, default='text/html'):
""" returns the accepted mimetypes of an HTTP/1.1 request
It returns a dictionary of the accepted mimetypes as
keys and their priorities as values.
"""
accepted_strings = request.requestHeaders.getRawHeaders(
'Accept', [default])
accepted_strings = ','.join(accepted_strings)
splits = [a.split(';q=') for a in accepted_strings.split(',')]
return dict(
[(a[0].strip().lower(), len(a) == 2 and float(a[1]) or 1)
for a in splits]) | 785a53d43db807c903bff84e08e370ac26b71101 | 8,990 |
def num2hex(num, width=1):
"""将数字转换为指定长度的十六进制字符串
Args:
num (int): 输入数字
width (int, optional): 指定字符串长度. Defaults to 1.
Returns:
str: 输入数字的十六进制字符串表示
"""
return '{:0>{width}}'.format(hex(num)[2:].replace('L', ''), width=width) | 3a328ece233f32402fd9ff7db14e948d8a63c35f | 8,991 |
import ipaddress
def is_ip_address(ipaddr):
""" Simple helper to determine if given string is an ip address or subnet """
try:
ipaddress.ip_interface(ipaddr)
return True
except ValueError:
return False | 56abc5a1a82f6a2e0c7532182867fdfae76a3b89 | 8,992 |
import json
def isjson(value):
"""
Return whether or not given value is valid JSON.
If the value is valid JSON, this function returns ``True``, otherwise ``False``.
Examples::
>>> isjson('{"Key": {"Key": {"Key": 123}}}')
True
>>> isjson('{ key: "value" }')
False
:param value: string to validate JSON
"""
try:
decoded_json = json.loads(value)
except ValueError:
return False
return True | 0527a07500337c8ce8e39a428c71556d6e91c5dd | 8,993 |
def is_pythagorean_triplet(a, b, c):
"""Determine whether the provided numbers are a Pythagorean triplet.
Arguments:
a, b, c (int): Three integers.
Returns:
Boolean: True is the provided numbers are a Pythagorean triplet, False otherwise.
"""
return (a < b < c) and (a**2 + b**2 == c**2) | c879eb0f441f1b0f79fcfed7361d584954dcff3f | 8,994 |
import logging
def get_child_logger(*names: str):
"""Returns a child logger of the project-level logger with the name toolshed.<name>."""
return logging.getLogger("toolshed." + '.'.join(names)) | a1760d34a620ffa3caf8abaca6cfb911209cf074 | 8,996 |
def close_con(session):
"""
Nothing to say
:param session: A section object
:return:
"""
session.close()
return 'Session closed' | 9e33de814328ac855d4c478de836dde543350a32 | 8,997 |
def find_correct_weight(program_weights, program, correction):
"""Return new weight for node."""
return program_weights[program] + correction | 994c25efef10fa37971372f444a879e816708830 | 8,998 |
from typing import Mapping
import os
def expand_environment_variables(config):
"""Expand environment variables in a nested config dictionary
This function will recursively search through any nested dictionaries
and/or lists.
Parameters
----------
config : dict, iterable, or str
Input object to search for environment variables
Returns
-------
config : same type as input
Examples
--------
>>> expand_environment_variables({'x': [1, 2, '$USER']}) # doctest: +SKIP
{'x': [1, 2, 'my-username']}
"""
if isinstance(config, Mapping):
return {k: expand_environment_variables(v) for k, v in config.items()}
elif isinstance(config, str):
return os.path.expandvars(config)
elif isinstance(config, (list, tuple, set)):
return type(config)([expand_environment_variables(v) for v in config])
else:
return config | 6990c5092ece4436e41e619f3c8f68dee066dbcd | 8,999 |
import re
def normalize_twitter_hashtag(text):
"""hashtagを共通の文字列に置き換える(含んでいることを表したい)"""
return re.sub(r"#\w+", "#hashtag", text) | bccff4f413732b5d401e5dd362b299998f707f1c | 9,001 |
def get_instance_id(finding):
"""
Given a finding, go find and return the corresponding AWS Instance ID
:param finding:
:return:
"""
for kv in finding['attributes']:
if kv['key'] == 'INSTANCE_ID':
return kv['value']
return None | f4f6826dc02664b95ca8fdc91d89a6429192b871 | 9,002 |
from typing import Dict
def encode_images(format_dict: Dict) -> Dict[str, str]:
"""b64-encodes images in a displaypub format dict
Perhaps this should be handled in json_clean itself?
Parameters
----------
format_dict : dict
A dictionary of display data keyed by mime-type
Returns
-------
format_dict : dict
A copy of the same dictionary,
but binary image data ('image/png', 'image/jpeg' or 'application/pdf')
is base64-encoded.
"""
return format_dict | c1dd645767d272a257cdd257d9854c2abad82353 | 9,003 |
def dVdc_calc(Vdc,Ppv,S,C):
"""Calculate derivative of Vdc"""
dVdc = (Ppv - S.real)/(Vdc*C)
return dVdc | 59d2708726e078efb74efce0bac2e397ba846d89 | 9,004 |
def check_order(df, topcol, basecol, raise_error=True):
"""
Check that all rows are either depth ordered or elevation_ordered.
Returns 'elevation' or 'depth'.
"""
assert basecol in df.columns, f'`basecol` {basecol} not present in {df.columns}'
if (df[topcol] > df[basecol]).all():
return 'elevation'
elif (df[topcol] < df[basecol]).all():
return 'depth'
elif raise_error:
raise ValueError('Dataframe has inconsistent top/base conventions')
else:
return None | 9b4e7b9938bb2fe14ab99d5c111883a0f6d73337 | 9,005 |
import argparse
def parse_args():
"""set and check parameters."""
parser = argparse.ArgumentParser()
parser.add_argument("--result_path", type=str, default="", help="root path of predicted images")
args_opt = parser.parse_args()
return args_opt | 9d417966f4ec71f9e25f16c88a8178dc519686b8 | 9,007 |
def ipv4_subnet_details(addr, mask):
"""
Function that prints the subnet related details- Network, Broadcast, Host IP range and number of host addresses
:param addr: IP address
:param mask: subnet mask
:return: result dictionary containing the details
"""
network_address = []
broadcast_address = []
num_ips = 1 # to keep track of total no of ips in this subnet by multiplying (wcmask per octet+1) in the loop
wildcard_mask = {
"255": "0",
"254": "1",
"252": "3",
"248": "7",
"240": "15",
"224": "31",
"192": "63",
"128": "127",
"0": "255",
}
for _octet, _mask in zip(
addr.split("."), mask.split(".")
): # iterate over octet values and mask values simultaneously
network_address.append(
str(int(_octet) & int(_mask))
) # bitwise AND of the octet value and the mask--> gives the network ip
broadcast_address.append(
str(int(_octet) | int(wildcard_mask[_mask]))
) # bitwise OR of octet value and wc_mask--> gives the broadcast address
num_ips *= int(wildcard_mask[_mask]) + 1 # multiplies num hosts per octet
host_address_low = network_address[:3] + [
str(int(network_address[3]) + 1)
] # add 1 to last octet of network address to get the first usable host ip
host_address_high = broadcast_address[:3] + [
str(int(broadcast_address[3]) - 1)
] # subtract 1 from the last octet of the broadcast address to get the last usable host ip
host_ips = num_ips - 2 # subtract 2 that is network and bc address
result = dict()
result["nw_addr"] = ".".join(network_address)
result["bc_addr"] = ".".join(broadcast_address)
result[
"host_addr_range"
] = f"{'.'.join(host_address_low)} to {'.'.join(host_address_high)}"
result["usable_ips"] = host_ips
return result | 7d872a63b9a0968eabbe9af7ccfbfda311346bc8 | 9,008 |
def _get_sdk_name(platform):
"""Returns the SDK name for the provided platform.
Args:
platform: The `apple_platform` value describing the target platform.
Returns:
A `string` value representing the SDK name.
"""
return platform.name_in_plist.lower() | 0bc7f446472f44e52ea0b11cda7397e48848f0ef | 9,009 |
import pathlib
def file_exists(file_path):
""" Returns true if file exists, false if it doesnt """
file = pathlib.Path(file_path)
return file.is_file() | d8219f71cf891d2d4e9c95670bd90b957becfdc5 | 9,011 |
import hashlib
import json
def hasher(obj):
"""Returns non-cryptographic hash of a JSON-serializable object."""
h = hashlib.md5(json.dumps(obj).encode())
return h.hexdigest() | 967ba4a1513bbe4a191900458dfce7a1001a8125 | 9,012 |
def _root_sort_key(root):
"""
Allow root comparison when sorting.
Args:
root (str or re.Pattern): Root.
Returns:
str: Comparable root string.
"""
try:
return root.pattern
except AttributeError:
return root | 51a7e51b58cbdf8c3277844903950282a5368815 | 9,013 |
def get_layer(keras_tensor):
"""
Returns the corresponding layer to a keras tensor.
"""
layer = keras_tensor._keras_history[0]
return layer | 6b3c950d9bf9c81895c4e7d4d436cd48359143bd | 9,014 |
def MFInt(indices):
"""
indices a (N,) array of integers
"""
return " ".join([('%i' % i) for i in indices]) | 8e5e928c5d38a3ef55e0bf8d177f1db8d5435d11 | 9,015 |
import random
def subsample_files_in_tree(root, filename_pattern, size):
"""
Sub-sample list of filenames under root folder.
This ensures to keep sample balance among folders.
Arguments:
root: Root folder to search files from.
filename_pattern: Wildcard pattern like: '*.png'.
size:
(0, 1): size to sub-sample; 0.5 for 50%.
1 or 1.: 100%.
integer > 1: Number of samples.
Returns:
List of sub-sampled files.
Note that number of files in a folder could be less than size,
if original number of files is less than size. No oversampling.
"""
files = []
folders = [f for f in root.glob('**') if f.is_dir()]
for folder in folders:
candidates = [str(f) for f in folder.glob(filename_pattern)]
n_sample = int(len(candidates) * size) if size < 1. else \
len(candidates) if int(size) == 1 else min(size, len(candidates))
if n_sample <= 0: continue
files.extend(random.sample(candidates, n_sample))
return files | 6bffdf683071d712f0b1ccb382a145a74f642d24 | 9,016 |
def pane_list(pane, ids=None, list_all=False):
"""Get a list of panes.
This makes it easier to target panes from the command line.
"""
if ids is None:
ids = []
if list_all or pane.identifier != -1:
ids.append(pane)
for p in pane.panes:
pane_list(p, ids, list_all=list_all)
return ids | 845ecb7e74ed1bfb67f6cdb25bfed512ea2c995d | 9,017 |
def check_indent(codestr):
"""If the code is indented, add a top level piece of code to 'remove' the indentation"""
i = 0
while codestr[i] in ["\n", "\t", " "]:
i = i + 1
if i == 0:
return codestr
if codestr[i-1] == "\t" or codestr[i-1] == " ":
if codestr[0] == "\n":
# Since we're adding a line, we need to remove one line of any empty padding
# to ensure line numbers are correct
codestr = codestr[1:]
return "if 1:\n" + codestr
return codestr | a5537c68056386f834f09fc50b26219d48129d4f | 9,018 |
import random
def pick_card(deck_to_pick_from):
"""Returns a random card from the deck"""
return random.choice(deck_to_pick_from) | 2267058ed9833d7b67dbc3142c98a88a4e3cefb3 | 9,019 |
def road_curvature_F5(curvature, ego_vehicle_speed):
"""
The method estimate the magnitude of the road curvature using the curvature of a number of regions of the road
ahead the vehicle. The closest to vehicle regions are more important so we use higher values of weights in an
equation with weights for each region multiplied with the curvature value of the region
:param curvature: The curvature of a number of regions of the road ahead the vehicle
:param ego_vehicle_speed: Speed of ego vehicle
:return:
"""
max_curvature = 2.8
max_safe_speed = 40.0
ego_vehicle_speed = ego_vehicle_speed*3.6 # m/s -> km/h
regions_num = len(curvature)
curvature = [(0.0 if -0.01 < ci < 0.01 else ci) for ci in curvature]
k1 = 0.0
sum_weights = 0.00001
for i in range(regions_num):
k1 += (regions_num-i)*curvature[i]
sum_weights += regions_num-i
k1 = abs(k1/(sum_weights*max_curvature))
k2 = ego_vehicle_speed/max_safe_speed
k2 = 0 if ego_vehicle_speed < max_safe_speed else k2
k = k1 * (1 + 5*k2)
return k if k < 1.0 else 1.0 | fbade1e7d63931aa05d67979e6ca76864e6f6c09 | 9,020 |
def split_apt(field):
"""
Parses the ADDRESS field (<site address>, <apt number> <municipality>) from the CLEMIS CFS Report
and returns the apartment number.
"""
if ',' in field:
f = field.split(', ')
f = f[1]
f = f.split(' ')
apt = f[0]
else:
apt = None
return apt | 881f73ebe3de52ebd3ff31448ad488e2586be5bf | 9,022 |
import re
def camel_to_snake_case(name):
"""
AssimilatedVatBox --> assimilated_vat_box
"""
exceptional = {
"avg_c_p_a": "avg_cpa",
"avg_c_p_m": "avg_cpm",
"avg_c_p_t": "avg_cpt"
}
sn = re.sub(r'(?<!^)(?=[A-Z])', '_', name).lower()
sn = sn.split(" ")[0] # i.e. "duration (in second)" -> "duration"
return exceptional.get(sn, sn) | fb3b6aac0d1ae3f0e328e59605da1b5e967d45e3 | 9,024 |
def hex(value):
"""
return `value` in hex format
:param value:
:return:
"""
return hex(value) | 5692717c5660cd5b02d47695f8e00aea6c33e19c | 9,027 |
def get_inverse_transform(transform):
"""Generates a transform which is the inverse of the provided transform"""
inverse_transform = [0] * len(transform)
for i in range(len(transform)):
inverse_transform[transform[i]] = i
return inverse_transform | 80d577292c98a84eecbcfb84cef935245385b63b | 9,028 |
def fill_zeros(symbol: int):
"""
将 数字 symbol 填满为 00开头的6位号
:param symbol:
:return:
"""
if symbol < 100000:
b_ = 6 - len(str(symbol))
return ('0' * b_ + str(symbol))
else:
return str(symbol) | 63244c8714a4eba12b484d18b313e9770f9c91ce | 9,029 |
def extract_fields(gh_json, fields):
"""
extract_fields Extract field from GH API data
Extract fields from GH API data and standardize name of keys
Parameters
----------
gh_json : json
JSON content from Github
fields : dict
A list of fields to extract and the name we want to use as standard.
"""
data = list()
for entry in gh_json:
cell = dict()
for field in fields:
cell[fields[field]] = entry[field]
data.append(cell)
return data | 0c14128c6e400075b982e0eb92eca65d329d6b5d | 9,030 |
import math
def distance(waypoints, wp1, wp2):
"""Compute distance between to waypoint indices"""
dist = 0
def d_l( a, b ):
return math.sqrt((a.x - b.x) ** 2 + (a.y - b.y) ** 2 + (a.z - b.z) ** 2)
for i in range(wp1, wp2+1):
dist += d_l(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
wp1 = i
return dist | 908b16eac01e912011ad436c5be5206f5a21576c | 9,031 |
def phred(q):
"""Convert 0...1 to 0...30
No ":".
No "@".
No "+".
"""
n = int(q * 30 + 33)
if n == 43:
n += 1
if n == 58:
n += 1
return chr(n) | 29a61010b1813afc6d32e640bb6865a9e18ac3a0 | 9,033 |
from functools import reduce
def sum(l):
"""
Returns the sum of the items in the container class.
This is more general than the build-in 'sum' function, because it is not specific for numbers.
This function uses the '+' operator repeatedly on the items in the contrainer class.
For example, if each item is a list, this will return the concatenation of all of them
"""
return reduce(lambda x,y: x+y, l) | c64bc8aec1af669af69494aa37fd515d3d7efad5 | 9,036 |
def checkValues_coordinate0(coordinate0, reference_point):
"""Function that check the range of the input coordinates to be rigth"""
# AP
if 90 > coordinate0[0] > -90:
pass # Entry in correct range
else:
raise Exception(
"Coordinate AP ({}) out of range for lambda, should be between -90mm and 90mm: "
.format(len(coordinate0)))
# ML
if 90 > coordinate0[1] > -90:
pass # Entry in correct range
else:
raise Exception(
"Coordinate ML ({}) out of range, should be between -90mm and 90mm: "
.format(len(coordinate0)))
# DV
if 90 > coordinate0[2] > -90:
pass # Entry in correct range
else:
raise Exception(
"Coordinate DV ({}) out of range, should be between -90mm and 90mm: "
.format(len(coordinate0)))
return True | d55cbce8cff1fa47b4c958cfcad41c0d0c9a338f | 9,037 |
import re
def get_keyword(title):
"""
:param title: policy title (str)
:return: keyword (str)
"""
matchObj = re.match('(.*)关于(.*)的(.*)', title)
if matchObj:
title = matchObj.group(2)
matchObj2 = re.match(r'(.*)“(.*)”(.*)', title)
if matchObj2:
title = matchObj2.group(2)
matchObj3 = re.match('(.*)《(.*)》(.*)', title)
if matchObj3:
title = matchObj3.group(2)
title = re.sub(u"\\(.*?\\)", "", title)
prefix = ['印发', '开展', '支持', '加强', '做好', '公布', '设立', '完善', '推进']
suffix = ['试点', '工作', '暂行']
year = [str(x)+'年度' for x in range(1970, 2020)] + [str(x)+'年' for x in range(1970, 2020)] + [str(x) for x in range(1970, 2020)]
for y in year:
if title.find(y) != -1:
title = title.replace(y, '')
start, end = 0, len(title)
for p in prefix:
if title.find(p) != -1:
start = max(start, title.find(p)+2)
for s in suffix:
if title.rfind(s) != -1:
end = min(end, title.rfind(s))
title = title[start: end]
return title | da9a5f5ff9fbc0d9c77057301bb16b578476b9a7 | 9,038 |
import re
def get_version(versionfile):
"""Extract the __version__ from a given Python module."""
match = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', open(versionfile).read(), re.M)
if match:
return match.group(1)
else:
raise RuntimeError("Unable to find version string in {file}.".format(file=versionfile)) | f319b575d74e3ecea3895785e1101f72913488ec | 9,039 |
import socket
def hostname_resolves(hostname):
"""Checks to see if hostname is DNS resolvable"""
try:
socket.gethostbyname(hostname)
return 1
except socket.error:
return 0 | 7339b03da62863d109c543e85f04eace1261a31e | 9,040 |
import math
def _gain2db(gain):
"""
Convert linear gain in range [0.0, 1.0] to 100ths of dB.
Power gain = P1/P2
dB = 10 log(P1/P2)
dB * 100 = 1000 * log(power gain)
"""
if gain <= 0:
return -10000
return max(-10000, min(int(1000 * math.log10(min(gain, 1))), 0)) | 1bd602e0db397b3730c4f2b3439aeb351e6bd854 | 9,041 |
import random
def generate_int(data_format):
"""
Generate an integer based on the given data width and sign.
"""
is_signed = data_format['is_signed']
width = data_format['width']
if is_signed:
result = random.randrange(-2 ** (width - 1) + 1, 2 ** (width - 1) - 1)
else:
result = random.randrange(0, 2 ** width - 1)
return result | 644d8e71b949ff01290d357732509d1f0a62db08 | 9,042 |
def mock_run_applescript(script):
"""Don't actually run any applescript in the unit tests, ya dingbat.
This function should return whatever type of object
dialogs._run_applescript returns.
Returns:
tuple
"""
return (1, "", "") | fdcb8e1e0e283963cec55c8fa1d98e745bd5e784 | 9,043 |
import re
def _defaults_to_code(val):
"""
Make sure that any defaults that are surrounded by << >> are in code quotes so that they render properly.
e.g.: <<display_name>> converts to '<<display_name>>'
"""
return re.sub(r"(<{2}.*>{2})", r"`\1`", val) | f98aa716fab13143a29659ff746336913d9d4ee7 | 9,045 |
def total_schedule(schedule):
"""Return the total number of 15 minute windows in which the schedule
is set to replicate in a week. If the schedule is None it is
assumed that the replication will happen in every 15 minute
window.
This is essentially a bit population count.
"""
if schedule is None:
return 84 * 8 # 84 bytes = 84 * 8 bits
total = 0
for byte in schedule:
while byte != 0:
total += byte & 1
byte >>= 1
return total | 9c0231a0f6e2e4617b5c958ea337420f73811309 | 9,047 |
def diff_mark(m, t):
"""
Subtract from a marking the postset of t and adds the preset
Parameters
------------
m
Marking
t
Transition
Returns
------------
diff_mark
Difference marking
"""
for a in t.out_arcs:
p = a.target
w = a.weight
if p in m and w <= m[p]:
m[p] = m[p] - w
if m[p] == 0:
del m[p]
for a in t.in_arcs:
p = a.source
w = a.weight
if not p in m:
m[p] = 0
m[p] = m[p] + w
return m | 164480506364779ea562e7e276e2b6ebe3a2f1c6 | 9,048 |
def get_input():
"""
Ask user for inputs about event: Title, Main Topic, Distance to Location.
Checks for correct types and value ranges. If not correct, restarts asking 5 times.
:return: Tuple(Str, Int, Int) => "Title", [0|1], [0,infinity[ OR None (to quit App)
"""
attempt = 0
title, topic, distance = None, None, None
while True:
""" Here be dragons """
if attempt >= 5: # Stop asking, if too many false inputs
print("Too many false inputs. Stopping application.")
return None
break
else: # Title of Event?
title = input("Title of event: ")
if not title:
print("No valid title entered. Try again!")
attempt += 1
continue
else:
try: # Main Topic Data?
topic = int(input("Is the main topic of event 'Data'? (0=no | 1=yes): "))
if topic not in {0, 1}:
print("Input must be 0 or 1. Try again!")
attempt += 1
continue
except ValueError:
print("Input must be an Integer (0 or 1). Try again!")
attempt += 1
continue
try: # Distance to Location?
distance = int(float(input("How far away is the location? (km): ")))
if distance < 0:
print("Input must be a positive number. Try again!")
attempt += 1
continue
except ValueError:
print("Input must be an Integer (distance in km). Try again!")
attempt += 1
continue
break
return title, topic, distance | 4c5ef7eea691a5080b61686173faf2530f4586c3 | 9,049 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.