content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
import calendar
def match_date(date, date_pattern):
"""
Match a specific date, a four-tuple with no special values, with a date
pattern, four-tuple possibly having special values.
"""
# unpack the date and pattern
year, month, day, day_of_week = date
year_p, month_p, day_p, day_of_week_p = date_pattern
# check the year
if year_p == 255:
# any year
pass
elif year != year_p:
# specific year
return False
# check the month
if month_p == 255:
# any month
pass
elif month_p == 13:
# odd months
if (month % 2) == 0:
return False
elif month_p == 14:
# even months
if (month % 2) == 1:
return False
elif month != month_p:
# specific month
return False
# check the day
if day_p == 255:
# any day
pass
elif day_p == 32:
# last day of the month
last_day = calendar.monthrange(year + 1900, month)[1]
if day != last_day:
return False
elif day_p == 33:
# odd days of the month
if (day % 2) == 0:
return False
elif day_p == 34:
# even days of the month
if (day % 2) == 1:
return False
elif day != day_p:
# specific day
return False
# check the day of week
if day_of_week_p == 255:
# any day of the week
pass
elif day_of_week != day_of_week_p:
# specific day of the week
return False
# all tests pass
return True | d794cf211589840697007ecec7cd9e3ba0655b0f | 1,352 |
def char_to_num(x: str) -> int:
"""Converts a character to a number
:param x: Character
:type x: str
:return: Corresponding number
:rtype: int
"""
total = 0
for i in range(len(x)):
total += (ord(x[::-1][i]) - 64) * (26 ** i)
return total | f66ee13d696ec1872fbc2a9960362456a5c4cbe9 | 1,353 |
import os
def search_paths_for_executables(*path_hints):
"""Given a list of path hints returns a list of paths where
to search for an executable.
Args:
*path_hints (list of paths): list of paths taken into
consideration for a search
Returns:
A list containing the real path of every existing directory
in `path_hints` and its `bin` subdirectory if it exists.
"""
executable_paths = []
for path in path_hints:
if not os.path.isdir(path):
continue
path = os.path.abspath(path)
executable_paths.append(path)
bin_dir = os.path.join(path, 'bin')
if os.path.isdir(bin_dir):
executable_paths.append(bin_dir)
return executable_paths | f6546fba4c3ac89b975d2c0757064edae3dca340 | 1,355 |
import json
def get_rate_limit(client):
"""
Get the Github API rate limit current state for the used token
"""
query = '''query {
rateLimit {
limit
remaining
resetAt
}
}'''
response = client.execute(query)
json_response = json.loads(response)
return json_response['data']['rateLimit'] | ec5f853014f25c841e71047da62ca41907b02e13 | 1,356 |
def can_hold_bags(rule: str, bag_rules: dict) -> dict:
"""
Returns a dict of all bags that can be held by given bag color
:param rule: Color of a given bag
:param bag_rules: Dictionary of rules
:type rule: str
:type bag_rules: dict
:return:
"""
return bag_rules[rule] | b7554c32bd91f9a05cd84c9249d92cc6354458a9 | 1,357 |
from typing import OrderedDict
def join_label_groups(grouped_issues, grouped_prs, issue_label_groups,
pr_label_groups):
"""Combine issue and PR groups in to one dictionary.
PR-only groups are added after all issue groups. Any groups that are
shared between issues and PRs are added according to the order in the
issues list of groups. This results in "label-groups" remaining in the
same order originally specified even if a group does not have issues
in it. Otherwise, a shared group may end up at the end of the combined
dictionary and not in the order originally specified by the user.
"""
issue_group_names = [x['name'] for x in issue_label_groups]
pr_group_names = [x['name'] for x in pr_label_groups]
shared_groups = []
for idx, group_name in enumerate(issue_group_names):
if len(pr_group_names) > idx and group_name == pr_group_names[idx]:
shared_groups.append(group_name)
else:
break
label_groups = OrderedDict()
# add shared groups first
for group_name in shared_groups:
# make sure to copy the issue group in case it is added to
label_groups[group_name] = grouped_issues.get(group_name, [])[:]
# add any remaining issue groups
for group_name, group in grouped_issues.items():
if group_name in shared_groups:
continue
label_groups[group_name] = group[:]
# add any remaining PR groups (extending any existing groups)
for group_name, group in grouped_prs.items():
label_groups.setdefault(group_name, []).extend(group)
return label_groups | b51a70a60bde3580326816eaf0d3b76cb51062ac | 1,358 |
def answer(input):
"""
>>> answer("1234")
1234
"""
lines = input.split('\n')
for line in lines:
return int(line) | b9ce42d88a09976444563493a01741475dce67c5 | 1,359 |
from typing import List
def unique_chars(texts: List[str]) -> List[str]:
"""
Get a list of unique characters from list of text.
Args:
texts: List of sentences
Returns:
A sorted list of unique characters
"""
return sorted(set("".join(texts))) | 02bc9ce28498bd129fdb68c2f797d138ca584490 | 1,361 |
def check(s):
"""
:param s:str. the input of letters
:return: bool.
"""
if len(s) == 7 and len(s.split(' ')) == 4:
for unit in s.split(' '):
if unit.isalpha():
return True | 86e1270af299ba83b68d0dab9f8afc3fc5b7d7c5 | 1,363 |
def log_new_fit(new_fit, log_gplus, mode='residual'):
"""Log the successful refits of a spectrum.
Parameters
----------
new_fit : bool
If 'True', the spectrum was successfully refit.
log_gplus : list
Log of all previous successful refits of the spectrum.
mode : str ('positive_residual_peak', 'negative_residual_peak', 'broad', 'blended')
Specifies the feature that was refit or used for a new successful refit.
Returns
-------
log_gplus : list
Updated log of successful refits of the spectrum.
"""
if not new_fit:
return log_gplus
modes = {'positive_residual_peak': 1, 'negative_residual_peak': 2, 'broad': 3, 'blended': 4}
log_gplus.append(modes[mode])
return log_gplus | 16761ca135efbdb9ee40a42cb8e9e1d62a5dc05e | 1,365 |
def count_str(text, sub, start=None, end=None):
"""
Computes the number of non-overlapping occurrences of substring ``sub`` in ``text[start:end]``.
Optional arguments start and end are interpreted as in slice notation.
:param text: The string to search
:type text: ``str``
:param sub: The substring to count
:type sub: ``str``
:param start: The start of the search range
:type start: ``int``
:param end: The end of the search range
:type end: ``int``
:return: The number of non-overlapping occurrences of substring ``sub`` in ``text[start:end]``.
:rtype: ``int``
"""
assert isinstance(text,str), '%s is not a string' % text
return text.count(sub,start,end) | 1578f868a4f1a193ec9907494e4af613ca2a6d4d | 1,366 |
def _gnurl( clientID ):
"""
Helper function to form URL to Gracenote_ API service.
:param str clientID: the Gracenote_ client ID.
:returns: the lower level URL to the Gracenote_ API.
:rtype: str
"""
clientIDprefix = clientID.split('-')[0]
return 'https://c%s.web.cddbp.net/webapi/xml/1.0/' % clientIDprefix | 6d1935c8b634459892e4ec03d129c791b1d8a06a | 1,367 |
def utf8_bytes(string):
""" Convert 'string' to bytes using UTF-8. """
return bytes(string, 'UTF-8') | 8e5423d2b53e8d5fbeb07017ccd328236ef8bea5 | 1,368 |
def _get_value(session_browser, field):
"""Get an input field's value."""
return session_browser.evaluate_script('$("#id_%s").val()' % field) | 7ed2d130b83af7e6fdb6cce99efb44846820585a | 1,369 |
import functools
def standarize_ms(datas, val_index, max=(2^32 - 1)):
"""
Standarize milliseconds lapsed from Arduino reading.
Note: Only takes height for one circulation of ms from Arduino.
datas:
List of data readings
val_index:
Index of ms value in reading data entry
max:
Max time of ms - since the Arduino will output
a circular value from the time it starts.
For correct value, see https://www.arduino.cc/en/Reference/Millis.
"""
def _standarize_value(initial_value, reading):
reading[val_index] = int(reading[val_index]) - initial_value;
if(reading[val_index] <= 0):
reading[val_index] += max
return reading
initial_value = int(datas[0][val_index])
___standarize_value = functools.partial(_standarize_value, initial_value=initial_value)
res = map(lambda x: _standarize_value(initial_value, x), datas)
res = list(res)
res[0][val_index] = 0 | 84bf498ff3c88b3415433fa9d5be7b6865b3216b | 1,371 |
from pathlib import Path
from typing import Iterator
import itertools
import os
def lsR(root: Path) -> Iterator[Path]:
"""Recursive list a directory and return absolute path"""
return filter(lambda p: ".git" not in p.parts, itertools.chain.from_iterable(
map(
lambda lsdir: list(map(lambda f: Path(lsdir[0]) / f, lsdir[2])),
os.walk(root),
)
)) | 67771d5e305d30ac72aeaab16b72ad5a85fe1493 | 1,372 |
def __valid_ddb_response_q(response):
"""private function to validate a given DynamoDB query response."""
if 'ResponseMetadata' in response:
if 'HTTPStatusCode' in response['ResponseMetadata']:
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
return True
return False | f4e71c4f5d058ba20013b3a405ffeff637e03ae8 | 1,373 |
def gram_matrix(x):
"""Create the gram matrix of x."""
b, c, h, w = x.shape
phi = x.view(b, c, h * w)
return phi.bmm(phi.transpose(1, 2)) / (c * h * w) | 11de97b67f3f8ecb7d7d009de16c1a5d153ab8ff | 1,375 |
import re
def build_or_pattern(patterns, escape=False):
"""Build a or pattern string from a list of possible patterns
"""
or_pattern = []
for pattern in patterns:
if not or_pattern:
or_pattern.append('(?:')
else:
or_pattern.append('|')
or_pattern.append('(?:%s)' % re.escape(pattern) if escape else pattern)
or_pattern.append(')')
return ''.join(or_pattern) | 225cc20504a85342694e14ea76b9bf3ed8b6d11b | 1,377 |
import os
def clear_pkt_loss():
"""
:return:
"""
pkt_loss_file_path = os.path.join(os.getcwd(), 'pkt_loss.yaml')
if os.path.isfile(pkt_loss_file_path):
os.remove(pkt_loss_file_path)
return pkt_loss_file_path | 0437b0aa910e8135409b54b890e1807208ef153e | 1,379 |
import argparse
def get_args():
"""引数解析
Returns:
argparse.Namespace: 引数情報
"""
parser = argparse.ArgumentParser(
prog="app.py",
usage="realtime or audio file",
description="detect music change point.",
add_help=True
)
parser.add_argument(
"--cfg", type=str,
default="./settings.yaml",
help="setting file path"
)
parser.add_argument(
"--file", type=str,
default=None,
help="audio file path"
)
return parser.parse_args() | ee63f4043524bbe393d17f0a25a687540d209faa | 1,380 |
def matchesType(value, expected):
"""
Returns boolean for whether the given value matches the given type.
Supports all basic JSON supported value types:
primitive, integer/int, float, number/num, string/str, boolean/bool, dict/map, array/list, ...
"""
result = type(value)
expected = expected.lower()
if result is int:
return expected in ("integer", "number", "int", "num", "primitive")
elif result is float:
return expected in ("float", "number", "num", "primitive")
elif result is str:
return expected in ("string", "str", "primitive")
elif result is bool:
return expected in ("boolean", "bool", "primitive")
elif result is dict:
return expected in ("dict", "map")
elif result is list:
return expected in ("array", "list")
return False | 24949f01a1bc3ae63a120d91549ae06ba52298a8 | 1,382 |
import itertools
from typing import Counter
def get_top_words(keywords):
"""
Orders the topics from most common to least common for displaying.
"""
keywords = itertools.chain.from_iterable(map(str.split, keywords))
top_words = list(Counter(keywords))
return top_words | 307a5a0e0e900e411097a84d19daf0ca7187c9bc | 1,383 |
import os
def login(i):
"""
Input: {
(sudo) - if 'yes', add sudo
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
s='docker login'
if i.get('sudo','')=='yes':
s='sudo '+s
os.system(s)
return {'return':0} | 2272134b81ee330ef56a36158886369ae7496ade | 1,385 |
def get_tablenames(cur):
""" Conveinience: """
cur.execute("SELECT name FROM sqlite_master WHERE type='table'")
tablename_list_ = cur.fetchall()
tablename_list = [str(tablename[0]) for tablename in tablename_list_ ]
return tablename_list | 311335c38d9ea19396da3292513e3e1d7bd5caf0 | 1,386 |
def get_symminfo(newsymms: dict) -> str:
"""
Adds text about the symmetry generators used in order to add symmetry generated atoms.
"""
line = 'Symmetry transformations used to generate equivalent atoms:\n'
nitems = len(newsymms)
n = 0
for key, value in newsymms.items():
sep = ';'
if n == nitems:
sep = ''
n += 1
line += "#{}: {}{} ".format(key, value, sep)
if newsymms:
return line
else:
return '' | 2b3fdeebac85ea3329839406e611ba051f45ddce | 1,388 |
def get_attr(item, name, default=None):
"""
similar to getattr and get but will test for class or dict
:param item:
:param name:
:param default:
:return:
"""
try:
val = item[name]
except (KeyError, TypeError):
try:
val = getattr(item, name)
except AttributeError:
val = default
return val | 0c68c7e54ef901e18a49d327188f29f72f54da01 | 1,390 |
def float2(val, min_repeat=6):
"""Increase number of decimal places of a repeating decimal.
e.g. 34.111111 -> 34.1111111111111111"""
repeat = 0
lc = ""
for i in range(len(val)):
c = val[i]
if c == lc:
repeat += 1
if repeat == min_repeat:
return float(val[:i+1] + c * 10)
else:
lc = c
repeat = 1
return float(val) | 07fc521e877387242a1e6cf951a6d5cbdc925aaf | 1,391 |
def format_sec_to_hms(sec):
"""Format seconds to hours, minutes, seconds.
Args:
sec: float or int
Number of seconds in a period of time
Returns: str
Period of time represented as a string on the form ``0d\:00h\:00m``.
"""
rem_int, s_int = divmod(int(sec), 60)
h_int, m_int, = divmod(rem_int, 60)
return "{}h {:02d}m {:02d}s".format(h_int, m_int, s_int) | aa2cc5d6584cdebf4d37292435ecd46bb6adc4a4 | 1,395 |
def seq(fr,to,by):
"""An analogous function to 'seq' in R
Parameters:
1. fr: from
2. to: to
3. by: by (interval)
"""
if fr<to:
return range(fr,to+abs(by),abs(by))
elif fr>to:
if by>0:
aseq = range(fr,to-by,-1*by)
else:
aseq = range(fr,to+by,by)
else:
aseq = [fr]
if aseq[-1]>to: return aseq[:-1]
else: return aseq | 39b7878f81e93c137eed1e435e438b1645b09f9f | 1,397 |
def _get_config_from_context(ctx):
"""
:param ctx:
:return:
:rtype: semi.config.configuration.Configuration
"""
return ctx.obj["config"] | c085f69fd87ad5f72c8453e6f01771d943b2c481 | 1,398 |
from typing import AnyStr
import os
def directory_is_empty(path: AnyStr) -> bool:
"""
:param path: a directory path
:return: True if directory is empty, False otherwise
"""
return not any(os.scandir(path)) | 906011bcffe994f34382d54171190c864e72ee6b | 1,399 |
def which_db_version(cursor):
"""
Return version of DB schema as string.
Return '5', if iOS 5.
Return '6', if iOS 6 or iOS 7.
"""
query = "select count(*) from sqlite_master where name = 'handle'"
cursor.execute(query)
count = cursor.fetchone()[0]
if count == 1:
db_version = '6'
else:
db_version = '5'
return db_version | 07b1dbcea3fb4bf65bba5c578257440d39b6784c | 1,400 |
import re
def repeating_chars(text: str, *, chars: str, maxn: int = 1) -> str:
"""Normalize repeating characters in `text`.
Truncating their number of consecutive repetitions to `maxn`.
Duplicates Textacy's `utils.normalize_repeating_chars`.
Args:
text (str): The text to normalize.
chars: One or more characters whose consecutive repetitions are to be
normalized, e.g. "." or "?!".
maxn: Maximum number of consecutive repetitions of `chars` to which
longer repetitions will be truncated.
Returns:
str
"""
return re.sub(r"({}){{{},}}".format(re.escape(chars), maxn + 1), chars * maxn, text) | 9dc326947a900d3531dcd59bf51d5c3396a42fea | 1,401 |
import re
def findurls(s):
"""Use a regex to pull URLs from a message"""
regex = r"(?i)\b(((https?|ftp|smtp):\/\/)?(www.)?[a-zA-Z0-9_.-]+\.[a-zA-Z0-9_.-]+(\/[a-zA-Z0-9#]+\/?)*\/*)"
url = re.findall(regex,s)
return [x[0] for x in url] | 801947e893a23a4e440c8e5fc838d6aa89671e0c | 1,402 |
def collide_rect(left, right):
"""collision detection between two sprites, using rects.
pygame.sprite.collide_rect(left, right): return bool
Tests for collision between two sprites. Uses the pygame.Rect colliderect
function to calculate the collision. It is intended to be passed as a
collided callback function to the *collide functions. Sprites must have
"rect" attributes.
New in pygame 1.8.0
"""
return left.rect.colliderect(right.rect) | 2111b4d6298cc435d61e12f301d5373cc07c54ff | 1,403 |
import json
def create_response(key, value):
"""Return generic AWS Lamba proxy response object format."""
return {
"statusCode": 200,
"headers": {"Content-Type": "application/json"},
"body": json.dumps({key: value})
} | 9236a9e4504e6fbebe841b8cc6b6ad4602dae463 | 1,404 |
import glob
def obtenerListaArchivos(path: str):
""" genera una lista de los archivos alojados en str """
lista = glob.glob(path, recursive=True)
return lista | 3b9582dbf086a2af673cc75277041f32d001e215 | 1,406 |
def luminance(qcolor):
""" Gives the pseudo-equivalent greyscale value of this color """
r,g,b = qcolor.red(), qcolor.green(), qcolor.blue()
return int(0.2*r + 0.6*g + 0.2*b) | 9e1821da2c0c6e8d76aefe56d6ed659a728737bb | 1,407 |
import argparse
def createparser():
"""Create an :class:`argparse.ArgumentParser` instance
:return: parser instance
:rtype: :class:`argparse.ArgumentParser`
"""
parser = argparse.ArgumentParser(prog=__package__,
description=__doc__)
s = parser.add_subparsers()
# create compare subcommand
parser_compare = s.add_parser("compare",
help="Compare two versions"
)
parser_compare.set_defaults(which="compare")
parser_compare.add_argument("version1",
help="First version"
)
parser_compare.add_argument("version2",
help="Second version"
)
# create bump subcommand
parser_bump = s.add_parser("bump",
help="Bumps a version"
)
parser_bump.set_defaults(which="bump")
sb = parser_bump.add_subparsers(title="Bump commands",
dest="bump")
# Create subparsers for the bump subparser:
for p in (sb.add_parser("major",
help="Bump the major part of the version"),
sb.add_parser("minor",
help="Bump the minor part of the version"),
sb.add_parser("patch",
help="Bump the patch part of the version"),
sb.add_parser("prerelease",
help="Bump the prerelease part of the version"),
sb.add_parser("build",
help="Bump the build part of the version")):
p.add_argument("version",
help="Version to raise"
)
return parser | d5aa807be432d9e1aaa5d155b12fa1366c5fe050 | 1,408 |
def get_activation(preact_dict, param_name, hook_type):
"""
Hooks used for in sensitivity schedulers (LOBSTE, Neuron-LOBSTER, SERENE).
:param preact_dict: Dictionary in which save the parameters information.
:param param_name: Name of the layer, used a dictionary key.
:param hook_type: Hook type.
:return: Returns a forward_hook if $hook_type$ is forward, else a backward_hook.
"""
def forward_hook(model, inp, output):
preact_dict[param_name] = output
def backward_hook(module, grad_input, grad_output):
preact_dict[param_name] = None
preact_dict[param_name] = grad_output[0].detach().cpu()
return forward_hook if hook_type == "forward" else backward_hook | 8d5766178ef972e010b5be3a3826774f051dd3bd | 1,409 |
import re
def format_query(str_sql):
"""Strips all newlines, excess whitespace, and spaces around commas"""
stage1 = str_sql.replace("\n", " ")
stage2 = re.sub(r"\s+", " ", stage1).strip()
stage3 = re.sub(r"(\s*,\s*)", ",", stage2)
return stage3 | 5adb0f9c3314ba04bbf92c88e3ef17802b2afeb0 | 1,410 |
def make_ytick_labels(current_ticks, n, numstring = ""):
"""
"""
new_ticks = []
for item in current_ticks:
if int(item) == item:
new_ticks.append(f"{int(item)}{numstring}")
else:
new_ticks.append(f"{item:.1f}{numstring}")
return new_ticks | 2685126dc72305ccb7b4bf652fe645e9a39affd3 | 1,411 |
import re
def check_token(token):
"""
Returns `True` if *token* is a valid XML token, as defined by XML
Schema Part 2.
"""
return (token == '' or
re.match(
"[^\r\n\t ]?([^\r\n\t ]| [^\r\n\t ])*[^\r\n\t ]?$", token)
is not None) | b4e1d313fb64aad4c1c244cb18d3629e13b1c3af | 1,412 |
def get_phoible_feature_list(var_to_index):
"""
Function that takes a var_to_index object and return a list of Phoible segment features
:param var_to_index: a dictionary mapping variable name to index(column) number in Phoible data
:return :
"""
return list(var_to_index.keys())[11:] | a53995cd927d1cdc66fadb2a8e6af3f5e2effff0 | 1,413 |
def n_states_of_vec(l, nval):
""" Returns the amount of different states a vector of length 'l' can be
in, given that each index can be in 'nval' different configurations.
"""
if type(l) != int or type(nval) != int or l < 1 or nval < 1:
raise ValueError("Both arguments must be positive integers.")
return nval ** l | 98770fa5a5e62501bf365a4a5a40a932b2ba2450 | 1,415 |
def remove_items_from_dict(a_dict, bad_keys):
"""
Remove every item from a_dict whose key is in bad_keys.
:param a_dict: The dict to have keys removed from.
:param bad_keys: The keys to remove from a_dict.
:return: A copy of a_dict with the bad_keys items removed.
"""
new_dict = {}
for k in a_dict.keys():
if k not in bad_keys:
new_dict[k] = a_dict[k]
return new_dict | 7c665e372c2099441f8a661f1194a76a21edf01c | 1,416 |
def writeObject(img_array, obj_array, bbox):
"""Writes depression objects to the original image.
Args:
img_array (np.array): The output image array.
obj_array (np.array): The numpy array containing depression objects.
bbox (list): The bounding box of the depression object.
Returns:
np.array: The numpy array containing the depression objects.
"""
min_row, min_col, max_row, max_col = bbox
roi = img_array[min_row:max_row, min_col:max_col]
roi[obj_array > 0] = obj_array[obj_array > 0]
return img_array | 141cf9c3f47766a4020d737e743215db04761f54 | 1,417 |
def process_model(current_val):
"""
:param current_val: model generated by sat solver, atom is satisfied if in modal.
:return tuple of sets comprising true and false atoms.
"""
true_atoms, false_atoms = set(), set()
for atom in current_val:
if current_val[atom]:
true_atoms.add(str(atom))
else:
false_atoms.add(str(atom))
return true_atoms, false_atoms | 9cf90aec097091841c0f0ac820317f373a92e4c1 | 1,418 |
import re
def filter_strace_output(lines):
"""
a function to filter QEMU logs returning only the strace entries
Parameters
----------
lines : list
a list of strings representing the lines from a QEMU log/trace.
Returns
-------
list
a list of strings representing only the strace log entries
the entries will also be cleaned up if a page dump occurs in the middle of them
"""
#we only want the strace lines, so remove/ignore lines that start with the following:
line_starts= ['^[\d,a-f]{16}-', # pylint: disable=anomalous-backslash-in-string
'^page',
'^start',
'^host',
'^Locating',
'^guest_base',
'^end_',
'^brk',
'^entry',
'^argv_',
'^env_',
'^auxv_',
'^Trace',
'^--- SIGSEGV',
'^qemu'
]
filter_string = '|'.join(line_starts)
filtered = []
prev_line = ""
for line in lines:
if re.match(filter_string,line):
continue
# workaround for https://gitlab.com/qemu-project/qemu/-/issues/654
if re.search("page layout changed following target_mmap",line):
prev_line = line.replace("page layout changed following target_mmap","")
continue
if re.match('^ = |^= ', line):
line = prev_line+line
filtered.append(line)
return filtered | 01b6c048ebdf890e9124c387fc744e56cc6b7f4d | 1,419 |
def magerr2Ivar(flux, magErr):
"""
Estimate the inverse variance given flux and magnitude error.
The reason for this is that we need to correct the magnitude or
flux for Galactic extinction.
Parameters
----------
flux : scalar or array of float
Flux of the obejct.
magErr : scalar or array of float
Error of magnitude measurements.
"""
fluxErr = flux * ((10.0 ** (magErr/2.5)) - 1.0)
return 1.0 / (fluxErr ** 2.0) | 37c48c26f1b876ca4d77dc141b1728daaea24944 | 1,422 |
import logging
def conditions_summary(conditions):
"""
Return a dict of consumer-level observations, say, for display on a
smart mirror or tablet.
"""
keys = ['timestamp', 'dewpoint', 'barometricPressure', 'windDirection',
'windSpeed', 'windGust', 'precipitationLastHour', 'temperature',
'relativeHumidity', 'heatIndex']
summary = dict()
for key in keys:
try:
summary[key] = conditions['properties'][key]
except Exception as exc:
summary[key] = 'none'
logging.error('Error trying to read summary for key {0}: {1}', key, exc)
return summary | aa4c95fd892c63bd05abd24188b8931375973bc0 | 1,423 |
def InsertOrganisation(cur, con, entity_name: str = "Organisation") -> int:
""" Inserts a new Organisation into the database """
# Get information about the video game
print(f"Enter new {entity_name}'s details:")
row = {}
row["Name"] = input(f"Enter the name of the {entity_name}: ") or None
row["Headquarters"] = input(
f"Enter the headquarters of {entity_name} (Optional): ") or None
row["Founded"] = input(
f"Enter the date when the {entity_name} was founded in YYYY-MM-DD format: ") or None
row["Earnings"] = input(
f"Enter earnings of {entity_name} in USD (Optional): ") or 0
# Query to be executed
query = """INSERT INTO Organisations (Name, Headquarters,
Founded, Earnings)
VALUES (%(Name)s, %(Headquarters)s,
%(Founded)s, %(Earnings)s)
"""
print("\nExecuting")
print(query)
# Execute query
cur.execute(query, row)
# Get ID of last inserted organisation
cur.execute("SELECT LAST_INSERT_ID() AS OrganisationID")
return cur.fetchone()["OrganisationID"] | de22b6eeb446efab58a2124f1b26da1e9edb12ed | 1,424 |
def state(obj):
"""Gets the UnitOfWork state of a mapped object"""
return obj.__ming__.state | 1072265fe175ffcd581d14af5d4ee85f2941a5e4 | 1,425 |
def save_file_in_path(file_path, content):
"""Write the content in a file
"""
try:
with open(file_path, 'w', encoding="utf-8") as f:
f.write(content)
except Exception as err:
print(err)
return None
return file_path | 7b1e453a9b2a8c1211e111a6e8db432811d84a7a | 1,426 |
def merge_dicts(*list_of_dicts):
"""Merge a list of dictionaries and combine common keys into a list of values.
args:
list_of_dicts: a list of dictionaries. values within the dicts must be lists
dict = {key: [values]}
"""
output = {}
for dikt in list_of_dicts:
for k, v in dikt.items():
if not output.get(k):
output[k] = v
else:
output[k].extend(v)
output[k] = list(set(output[k]))
return output | 3d629bb9bc6af2a637a622fea158447b24c00bd0 | 1,427 |
import subprocess
def sh(arg):
"""
Execute command in a background shell.
Args:
arg (str or list): shell command, or a list of shell commands.
"""
if isinstance(arg, list):
return [sh(a) for a in arg]
else:
return subprocess.check_output(arg, shell=True).decode("utf-8").strip() | bfde2eaca0b25a0c8012f5541b72a6f142d1180f | 1,429 |
def uncapitalize(string: str):
"""De-capitalize first character of string
E.g. 'How is Michael doing?' -> 'how is Michael doing?'
"""
if len(string):
return string[0].lower() + string[1:]
return "" | 1a294f171d16d7a4c41fb0546feca3c03b7ae37a | 1,430 |
from pathlib import Path
def ORDER_CTIME(path: Path) -> int:
"""パスのソート用関数です。作成日時でソートを行います。
"""
return path.stat().st_ctime_ns | 435571222b26e0c83904305784d6c8868b5bf497 | 1,431 |
def ensure_dict(value):
"""Convert None to empty dict."""
if value is None:
return {}
return value | 191b1a469e66750171648e715501690b2814b8b2 | 1,432 |
def merge_dicts(dicts, handle_duplicate=None):
"""Merge a list of dictionaries.
Invoke handle_duplicate(key, val1, val2) when two dicts maps the
same key to different values val1 and val2, maybe logging the
duplication.
"""
if not dicts:
return {}
if len(dicts) == 1:
return dicts[0]
if handle_duplicate is None:
return {key: val for dict_ in dicts for key, val in dict_.items()}
result = {}
for dict_ in dicts:
for key, val in dict_.items():
if key in result and val != result[key]:
handle_duplicate(key, result[key], val)
continue
result[key] = val
return result | 44c06ab30bb76920ff08b5978a6aa271abd3e449 | 1,434 |
import os
def is_source_ext(filename):
"""
Tells if filename (filepath) is a source file. For our purposes "sources"
are any files that can #include and can be included.
"""
_, ext = os.path.splitext(filename)
return ext in [".h", ".hh", ".hpp", ".inc", ".c", ".cc", ".cxx", ".cpp", ".f", ".F"] | b388768191e3efbfe2ddf3925a3ebf9f3a6693ac | 1,435 |
def sparse_add(sv1, sv2):
"""dict, dict -> dict
Returns a new dictionary that is the sum of the other two.
>>>sparse_add(sv1, sv2)
{0: 5, 1: 6, 2: 9}
"""
newdict = {}
keys = set(sv1.keys()) | set(sv2.keys())
for key in keys:
x = sv1.get(key, 0) + sv2.get(key, 0)
newdict[key] = x
return (newdict) | ced3420a585084a246ad25f7686fb388f2c05542 | 1,436 |
def escape(s):
"""
Returns the given string with ampersands, quotes and carets encoded.
>>> escape('<b>oh hai</b>')
'<b>oh hai</b>'
>>> escape("Quote's Test")
'Quote's Test'
"""
mapping = (
('&', '&'),
('<', '<'),
('>', '>'),
('"', '"'),
("'", '''),
)
for tup in mapping:
s = s.replace(tup[0], tup[1])
return s | 2b4971c4e87e613cad457dde6d62806d299cdbcd | 1,438 |
def _get_db_columns_for_model(model):
"""
Return list of columns names for passed model.
"""
return [field.column for field in model._meta._fields()] | 181999f28ca659bf296bcb4dda7ac29ddfe61071 | 1,439 |
import json
def load_chunks(chunk_file_location, chunk_ids):
"""Load patch paths from specified chunks in chunk file
Parameters
----------
chunks : list of int
The IDs of chunks to retrieve patch paths from
Returns
-------
list of str
Patch paths from the chunks
"""
patch_paths = []
with open(chunk_file_location) as f:
data = json.load(f)
chunks = data['chunks']
for chunk in data['chunks']:
if chunk['id'] in chunk_ids:
patch_paths.extend([[x,chunk['id']] for x in chunk['imgs']])
if len(patch_paths) == 0:
raise ValueError(
f"chunks {tuple(chunk_ids)} not found in {chunk_file_location}")
return patch_paths | c01ec6076141356ae6f3a1dc40add28638739359 | 1,440 |
def get_model_input(batch, input_id=None):
"""
Get model input from batch
batch: batch of model input samples
"""
if isinstance(batch, dict) or isinstance(batch, list):
assert input_id is not None
return batch[input_id]
else:
return batch | 1b12ee86257bfbd5ab23404251bed39c0021f461 | 1,441 |
def issym(b3):
"""test if a list has equal number of positive
and negative values; zeros belong to both. """
npos = 0; nneg = 0
for item in b3:
if (item >= 0):
npos +=1
if (item <= 0):
nneg +=1
if (npos==nneg):
return True
else:
return False | e8cc57eec5bc9ef7f552ad32bd6518daa2882a3e | 1,442 |
def get_parents(tech_id, model_config):
"""
Returns the full inheritance tree from which ``tech`` descends,
ending with its base technology group.
To get the base technology group,
use ``get_parents(...)[-1]``.
Parameters
----------
tech : str
model_config : AttrDict
"""
tech = model_config.techs[tech_id].essentials.parent
parents = [tech]
while True:
tech = model_config.tech_groups[tech].essentials.parent
if tech is None:
break # We have reached the top of the chain
parents.append(tech)
return parents | 7220a57b770232e335001a0dab74ca2d8197ddfa | 1,443 |
def get_month_n_days_from_cumulative(monthly_cumulative_days):
"""
Transform consecutive number of days in monthly data to actual number of days.
EnergyPlus monthly results report a total consecutive number of days for each day.
Raw data reports table as 31, 59..., this function calculates and returns
actual number of days for each month 31, 28...
"""
old_num = monthly_cumulative_days.pop(0)
m_actual_days = [old_num]
for num in monthly_cumulative_days:
new_num = num - old_num
m_actual_days.append(new_num)
old_num += new_num
return m_actual_days | 5ede033023d357a60ba5eb7e9926325d24b986e8 | 1,444 |
import re
def apply_template(assets):
"""
Processes the template.
Used for overwrite ``docutils.writers._html_base.Writer.apply_template``
method.
``apply_template(<assets>)``
``assets`` (dictionary)
Assets to add at the template, see ``ntdocutils.writer.Writer.assets``.
returns
function - Template processor.
Example
=======
.. code:: python
apply_template({
"before_styles": '<link rel="stylesheet" href="styles.css" />',
"scripts": '<script src="script.js"></script>'
'<script src="other_script.js"></script>'
})
"""
def apply_template(self):
template_file = open(self.document.settings.template, "rb")
template = str(template_file.read(), "utf-8")
template_file.close()
# Escape ``%`` that don't are special fields
pattern = r"%(?!\((" + "|".join(self.visitor_attributes) + r")\)s)"
template = re.subn(pattern, "%%", template)[0]
subs = self.interpolation_dict()
return template.format(**assets) % subs
return apply_template | 51042e25f701935d668d91a923155813ce60b381 | 1,446 |
def harvest(post):
"""
Filter the post data for just the funding allocation formset data.
"""
data = {k: post[k] for k in post if k.startswith("fundingallocation")}
return data | 67f400caf87f2accab30cb3c519e7014792c84d7 | 1,447 |
def get_menu_option():
"""
Function to display menu options and asking the user to choose one.
"""
print("1. View their next 5 fixtures...")
print("2. View their last 5 fixtures...")
print("3. View their entire current season...")
print("4. View their position in the table...")
print("5. View the club roster...")
print("6. View season statistics...")
print("7. View team information...")
print("8. Sign up to your club's weekly newsletter...")
print("9. Calculate odds on next game...")
print()
return input("CHOOSE AN OPTION BELOW BY ENTERING THE MENU NUMBER OR ENTER 'DONE' ONCE YOU ARE FINISHED: ") | 69e71555d9896d0c462b2e7b542ec87aea9213eb | 1,448 |
def clut8_rgb888(i):
"""Reference CLUT for wasp-os.
Technically speaking this is not a CLUT because the we lookup the colours
algorithmically to avoid the cost of a genuine CLUT. The palette is
designed to be fairly easy to generate algorithmically.
The palette includes all 216 web-safe colours together 4 grays and
36 additional colours that target "gaps" at the brighter end of the web
safe set. There are 11 greys (plus black and white) although two are
fairly close together.
:param int i: Index (from 0..255 inclusive) into the CLUT
:return: 24-bit colour in RGB888 format
"""
if i < 216:
rgb888 = ( i % 6) * 0x33
rg = i // 6
rgb888 += (rg % 6) * 0x3300
rgb888 += (rg // 6) * 0x330000
elif i < 252:
i -= 216
rgb888 = 0x7f + (( i % 3) * 0x33)
rg = i // 3
rgb888 += 0x4c00 + ((rg % 4) * 0x3300)
rgb888 += 0x7f0000 + ((rg // 4) * 0x330000)
else:
i -= 252
rgb888 = 0x2c2c2c + (0x101010 * i)
return rgb888 | ca95c95306f7f4762add01f2ffc113f348e29d3b | 1,450 |
import json
from typing import OrderedDict
def to_json_dict(json_data):
"""Given a dictionary or JSON string; return a dictionary.
:param json_data: json_data(dict, str): Input JSON object.
:return: A Python dictionary/OrderedDict with the contents of the JSON object.
:raises TypeError: If the input object is not a dictionary or string.
"""
if isinstance(json_data, dict):
return json_data
elif isinstance(json_data, str):
return json.loads(json_data, object_hook=OrderedDict)
else:
raise TypeError(f"'json_data' must be a dict or valid JSON string; received: {json_data!r}") | e1264d88a4424630f7348cbe7794ca072c057bdf | 1,451 |
def log_at_level(logger, message_level, verbose_level, msg):
"""
writes to log if message_level > verbose level
Returns anything written in case we might want to drop down and output at a
lower log level
"""
if message_level <= verbose_level:
logger.info(msg)
return True
return False | 4b88ee137f7c2cb638b8a058b2dceb534329c0d9 | 1,454 |
def _collect_scalars(values):
"""Given a list containing scalars (float or int) collect scalars
into a single prefactor. Input list is modified."""
prefactor = 1.0
for i in range(len(values)-1, -1, -1):
if isinstance(values[i], (int, float)):
prefactor *= values.pop(i)
return prefactor | bea7e54eec16a9b29552439cd12ce29b9e82d40b | 1,455 |
import itertools
def all_inputs(n):
"""
returns an iterator for all {-1,1}-vectors of length `n`.
"""
return itertools.product((-1, +1), repeat=n) | 526dff9332cf606f56dcb0c31b5c16a0124478ed | 1,456 |
def init_time(p, **kwargs):
"""Initialize time data."""
time_data = {
'times': [p['parse']],
'slots': p['slots'],
}
time_data.update(**kwargs)
return time_data | 2aff3819d561f0dc9e0c9b49702b8f3fbb6e9252 | 1,458 |
import socket
def get_socket_with_reuseaddr() -> socket.socket:
"""Returns a new socket with `SO_REUSEADDR` option on, so an address
can be reused immediately, without waiting for TIME_WAIT socket
state to finish.
On Windows, `SO_EXCLUSIVEADDRUSE` is used instead.
This is because `SO_REUSEADDR` on this platform allows the socket
to be bound to an address that is already bound by another socket,
without requiring the other socket to have this option on as well.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if 'SO_EXCLUSIVEADDRUSE' in dir(socket):
sock.setsockopt(socket.SOL_SOCKET,
getattr(socket, 'SO_EXCLUSIVEADDRUSE'), 1)
else:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return sock | 6edbc0f0aaaeaebd9c6d0f31257de0b4dfe7df1c | 1,460 |
import functools
import traceback
def log_errors(func):
"""
A wrapper to print exceptions raised from functions that are called by callers
that silently swallow exceptions, like render callbacks.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
# Exceptions from calls like this aren't well-defined, so just log the
# error and don't reraise it.
traceback.print_exc()
return wrapper | a15c26de36a8c784da0333382f27fc06b0ed78a0 | 1,461 |
def add_pred_to_test(test_df, pred_np, demo_col_list, days):
"""
derived from Tensorflow
INPUT:
- df (pandas DataFrame)
- group (string)
OUTPUT:
- show_group_stats_viz
"""
test_df = test_df.copy()
for c in demo_col_list:
test_df[c] = test_df[c].astype(str)
test_df['score'] = pred_np
test_df['label_value'] = test_df['time_in_hospital'].apply(lambda x: 1 if x >=days else 0)
return test_df | aec48bd6201e1a9a1ebd6f96c4c8b7cfd9304607 | 1,462 |
import functools
def skippable(*prompts, argument=None):
"""
Decorator to allow a method on the :obj:`CustomCommand` to be
skipped.
Parameters:
----------
prompts: :obj:iter
A series of prompts to display to the user when the method is being
skipped.
argument: :obj:`str`
By default, the management command argument to indicate that the method
should be skipped will be `skip_<func_name>`. If the argument should
be different, it can be explicitly provided here.
"""
def decorator(func):
@functools.wraps(func)
def inner(instance, *args, **kwargs):
parameter = argument or "skip_%s" % func.__name__
if parameter in kwargs and kwargs[parameter] is True:
instance.prompt(*prompts,
style_func=instance.style.HTTP_NOT_MODIFIED)
return False
else:
return func(instance, *args, **kwargs)
return inner
return decorator | 879106f4cc0524660fb6639e56d688d40b115ac4 | 1,464 |
import hashlib
def _cache_name(address):
"""Generates the key name of an object's cache entry"""
addr_hash = hashlib.md5(address).hexdigest()
return "unsub-{hash}".format(hash=addr_hash) | 6933b1170933df5e3e57af03c81322d68a46d91f | 1,465 |
import os
from pathlib import Path
from typing import Union
from sys import version
def guess_ghostscript() -> str:
"""Guess the path to ghostscript. Only guesses well on Windows.
Should prevent people from needing to add ghostscript to PATH.
"""
if os.name != 'nt':
return 'gs' # I'm not sure where to look on non-Windows OSes so just guess 'gs'.
def sort_by_version(v: Path) -> Union[version.Version, version.LegacyVersion]:
return version.parse(v.name[2:]) # When this is an inline lambda mypy and pylint fuss.
locations = 'C:\\Program Files\\gs', 'C:\\Program Files (x86)\\gs'
files = 'gswin64c.exe', 'gswin32c.exe', 'gs.exe'
for location in locations:
path = Path(location)
if path.exists():
versions = [v for v in path.iterdir() if v.is_dir() and v.name.startswith('gs')]
versions.sort(key=sort_by_version, reverse=True)
for v in versions:
for file in files:
exe = v / 'bin' / file
if exe.exists():
return str(exe)
return 'gswin64c' | 09e8761185f6029025d8d6cc6861672870e781b2 | 1,466 |
def supplemental_div(content):
"""
Standardize supplemental content listings
Might not be possible if genus and tree content diverge
"""
return {'c': content} | b42e868ef32f387347cd4a97328794e6628fe634 | 1,467 |
import pathlib
import os
def _to_absolute_uri(uri):
"""
Converts the input URI into an absolute URI, relative to the current working
directory.
:param uri: A URI, absolute or relative.
:return: An absolute URI.
"""
if ":" in uri: #Already absolute. Is either a drive letter ("C:/") or already fully specified URI ("http://").
return pathlib.Path(uri).as_uri() #Pathlib can take care of both these cases.
return pathlib.Path(os.path.abspath(uri)).as_uri() | b80c56d298e16ed1abd958950cc45b5e45e26111 | 1,468 |
def fizzbuzz(end=100):
"""Generate a FizzBuzz game sequence.
FizzBuzz is a childrens game where players take turns counting.
The rules are as follows::
1. Whenever the count is divisible by 3, the number is replaced with
"Fizz"
2. Whenever the count is divisible by 5, the number is replaced with "Buzz"
3. Whenever the count is divisible by both 3 and 5, the number is replaced
with "FizzBuzz"
Parameters
----------
end : int
The FizzBuzz sequence is generated up and including this number.
Returns
-------
sequence : list of str
The FizzBuzz sequence.
Examples
--------
>>> fizzbuzz(3)
['1', '2', 'Fizz']
>>> fizzbuzz(5)
['1', '2', 'Fizz', '4', 'Buzz']
References
----------
https://blog.codinghorror.com/why-cant-programmers-program/
"""
sequence = []
for i in range(1, end + 1):
if i % (3 * 5) == 0:
sequence.append('FizzBuzz')
elif i % 3 == 0:
sequence.append('Fizz')
elif i % 5 == 0:
sequence.append('Buzz')
else:
sequence.append(str(i))
return sequence | b68b1c39674fb47d0bd12d387f347af0ef0d26ca | 1,469 |
def notification_list(next_id=None): # noqa: E501
"""notification_list
Get all your certificate update notifications # noqa: E501
:param next_id:
:type next_id: int
:rtype: NotificationList
"""
return 'do some magic!' | 4fe4467f89ad4bf1ba31bd37eace411a78929a26 | 1,470 |
import os
def _delete_dest_path_if_stale(master_path, dest_path):
"""Delete dest_path if it does not point to cached image.
:param master_path: path to an image in master cache
:param dest_path: hard link to an image
:returns: True if dest_path points to master_path, False if dest_path was
stale and was deleted or it didn't exist
"""
dest_path_exists = os.path.exists(dest_path)
if not dest_path_exists:
# Image not cached, re-download
return False
master_path_exists = os.path.exists(master_path)
if (not master_path_exists
or os.stat(master_path).st_ino != os.stat(dest_path).st_ino):
# Image exists in cache, but dest_path out of date
os.unlink(dest_path)
return False
return True | cbe2387e5a0b9a27afcfc9a0bd34cfeb6f164ae4 | 1,471 |
import six
import base64
import zlib
def deflate_and_base64_encode(string_val):
"""
Deflates and the base64 encodes a string
:param string_val: The string to deflate and encode
:return: The deflated and encoded string
"""
if not isinstance(string_val, six.binary_type):
string_val = string_val.encode('utf-8')
return base64.b64encode(zlib.compress(string_val)[2:-4]) | 31fc19cf134bc22b3fc45b4158c65aef666716cc | 1,472 |
import pickle
def load_pickle(file_path):
"""
load the pickle object from the given path
:param file_path: path of the pickle file
:return: obj => loaded obj
"""
with open(file_path, "rb") as obj_des:
obj = pickle.load(obj_des)
# return the loaded object
return obj | 4770a152dad9c7d123f95a53642aff990f3590f7 | 1,473 |
import json
import re
def create_summary_text(summary):
"""
format a dictionary so it can be printed to screen or written to a plain
text file
Args:
summary(dict): the data to format
Returns:
textsummary(str): the summary dict formatted as a string
"""
summaryjson = json.dumps(summary, indent=3)
textsummary = re.sub('[{},"]', '', summaryjson)
return textsummary | 3a8dd508b760a0b9bfe925fa2dc07d53dee432af | 1,476 |
def maximo_basico(a: float, b: float) -> float:
"""Toma dos números y devuelve el mayor.
Restricción: No utilizar la función max"""
if a > b:
return a
return b | f98db565243587015c3b174cf4130cbc32a00e22 | 1,477 |
def is_pattern_error(exception: TypeError) -> bool:
"""Detect whether the input exception was caused by invalid type passed to `re.search`."""
# This is intentionally simplistic and do not involve any traceback analysis
return str(exception) == "expected string or bytes-like object" | 623246404bbd54bc82ff5759bc73be815d613731 | 1,479 |
def parse_fastq_pf_flag(records):
"""Take a fastq filename split on _ and look for the pass-filter flag
"""
if len(records) < 8:
pf = None
else:
fastq_type = records[-1].lower()
if fastq_type.startswith('pass'):
pf = True
elif fastq_type.startswith('nopass'):
pf = False
elif fastq_type.startswith('all'):
pf = None
else:
raise ValueError("Unrecognized fastq name: %s" % (
"_".join(records),))
return pf | 9a46022aa6e07ed3ca7a7d80933ee23e26d1ca9a | 1,480 |
import os
def _DropEmptyPathSegments(path):
"""Removes empty segments from the end of path.
Args:
path: A filesystem path.
Returns:
path with trailing empty segments removed. Eg /duck/// => /duck.
"""
while True:
(head, tail) = os.path.split(path)
if tail:
break
path = head
return path | 23060efc37343bf7ccec483847264c5f6a7c811b | 1,482 |
def _format_author(url, full_name):
""" Helper function to make author link """
return u"<a class='more-info' href='%s'>%s</a>" % (url, full_name) | 50f001c2358b44bb95da628cc630a2ed3ea8ddfd | 1,483 |
def validinput(x0, xf, n):
"""Checks that the user input is valid.
Args:
x0 (float): Start value
xf (float): End values
n (int): Number of sample points
Returns:
False if x0 > xf or if
True otherwise
"""
valid = True
if x0 > xf:
valid = False
if int(n) != n:
valid = False
if not valid:
print("Please recheck your input")
return valid | 096e0702eb8fe47486d4f03e5b3c55c0835807cd | 1,484 |
import glob
def find_paths(initial_path, extension):
"""
From a path, return all the files of a given extension inside.
:param initial_path: the initial directory of search
:param extension: the extension of the files to be searched
:return: list of paths inside the initial path
"""
paths = glob.glob(initial_path+r'/**/*.' + extension, recursive=True)
return paths | 0220127050b765feaf423c195d020d65ece8d22e | 1,487 |
Subsets and Splits