content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
import os
def _is_unc_path(some_path) -> bool:
"""True if path starts with 2 backward (or forward, due to python path hacking) slashes."""
return (
len(some_path) > 1
and some_path[0] == some_path[1]
and some_path[0] in (os.sep, os.altsep)
) | a2cd156bf5367cc73673ad86505aa094da3fae86 | 9,449 |
def mapped_state(state):
"""
Mapped a given state into the state as neural network input, insert trivial vehicles until vehs_num = 18
:param state: given state
:return: new state
"""
num_diff = 18 - len(state)
for i in range(num_diff):
state.append([0, 0, 0, 0, 0, 2])
return state | 1732565f6ef307fb67c92f2adac9c81591acf4c5 | 9,450 |
import os
def fetch_result_csv_fp(dir):
"""
Find result CSV in dir. Currently just finds the first non-system file CSV in dir, assuming only one exists; more sophisticated checks need to be added.
:param dir: directory to search in.
:return: path to csv.
"""
csv = [os.path.join(dir, x) for x in os.listdir(dir) if x.endswith(".csv") and not x.startswith(".")]
if len(csv) > 1: # check that only one csv file is in dir; if not, use the first one by default
print("[WARNING] multiple feature files detected in results, defaulting to use {}".format(csv))
# "unlist" result; use first file by default if len > 1
csv = csv[0]
return csv | 70fe1a13f82412dd529daef604bb7edadab15dfe | 9,451 |
def iteritems(dictionary):
"""Replacement to account for iteritems/items switch in Py3."""
if hasattr(dictionary, "iteritems"):
return dictionary.iteritems()
return dictionary.items() | 591bba21a1026d2e9d380555623c8ca78d3ed466 | 9,453 |
def validate_testapps(apis, api_configs):
"""Ensures the chosen apis are valid, based on the config."""
if "all" in apis:
return [key for key in api_configs]
for api in apis:
if api not in api_configs:
raise RuntimeError("Testapp given as flag not found in config: %s" % api)
return apis | 4726ae4e28bb57e2fa812cd0fa3721d38ba1103a | 9,454 |
import copy
def prune_basis(basis, use_copy=True):
"""
Removes primitives that have a zero coefficient, and
removes duplicate shells
This only finds EXACT duplicates, and is meant to be used
after uncontracting
If use_copy is True, the input basis set is not modified.
"""
if use_copy:
basis = copy.deepcopy(basis)
for k, el in basis['elements'].items():
if not 'electron_shells' in el:
continue
for sh in el['electron_shells']:
new_exponents = []
new_coefficients = []
exponents = sh['exponents']
# transpose of the coefficient matrix
coeff_t = list(map(list, zip(*sh['coefficients'])))
# only add if there is a nonzero contraction coefficient
for i in range(len(sh['exponents'])):
if not all([float(x) == 0.0 for x in coeff_t[i]]):
new_exponents.append(exponents[i])
new_coefficients.append(coeff_t[i])
# take the transpose again, putting the general contraction
# as the slowest index
new_coefficients = list(map(list, zip(*new_coefficients)))
sh['exponents'] = new_exponents
sh['coefficients'] = new_coefficients
# Remove any duplicates
shells = el.pop('electron_shells')
el['electron_shells'] = []
for sh in shells:
if sh not in el['electron_shells']:
el['electron_shells'].append(sh)
return basis | 8847539ce717868e9383db52e39008bbae2882f0 | 9,455 |
def _dol_to_lod(dol):
"""Convert a dict-of-lists into a list-of-dicts.
Reverse transformation of :func:`_lod_to_dol()`.
"""
keys = list(dol.keys())
lod = []
for i in range(len(dol[keys[0]])):
lod.append({k: v[i] for k, v in dol.items()})
return lod | 9e8a98d2502797ae27cae88ab2a0ec7fda4aff34 | 9,457 |
def map_to_parent(t_class, parents_info_level):
"""
parents_info_level: {<classid>: [parent]}, only contains classid that has a super-relationship
"""
if t_class in parents_info_level:
assert len(parents_info_level[t_class]) < 2, f"{t_class} has two or more parents {parents_info_level[t_class]}"
parent = parents_info_level[t_class][0]
else:
parent = t_class
return parent | a92cf3671aff03724ac89ef024089df06ae28d4a | 9,461 |
import uuid
def get_temp_entity_table_name() -> str:
"""Returns a random table name for uploading the entity dataframe"""
return "feast_entity_df_" + uuid.uuid4().hex | 62d8b0ca2d58fb813db88caa753f93e412c62ad0 | 9,462 |
def remove_raw_object(cls):
"""
Decorator.
If a Record class is decorated, raw_object will be removed once all mark properties are cached.
"""
cls.remove_raw_object = True
return cls | 9aab8feb4201237864de5eae891a98ed3894d862 | 9,464 |
def columnOfMatrix(matrix, i):
"""
Метод для получения i-го столбца матрицы
"""
return [row[i] for row in matrix] | 605c95e7cf90dca575bdcf8d4bc434b850131c21 | 9,465 |
def p_list_formatter(primer_list):
"""
Reformat the primer list (remove unnecessary characters from biopython2 output).
Args:
primer_list (list): list from list_from_gen output
Returns:
primer_dimers (list): list with unnecessary chars removed.
"""
reformat_p_list = []
primer_dimers = []
reformat_p_list = [each_item.replace('\\n', ' ').split() for each_item in primer_list]
for each_item in reformat_p_list:
primer_dimers.append((each_item[0].replace('(', '').replace('\'', ''),
each_item[2].replace('\'', ''), each_item[4],
each_item[5].replace('\')', '')))
return primer_dimers | 0b1585221a13c3560d511127af782875fcd71732 | 9,466 |
def status():
"""Status check for server"""
return "Running...\n" | 89b001639d8983cfb2d8705bfcdfdcc6fc8750a4 | 9,468 |
def _repr_rule(iptc_rule, ipv6=False):
""" Return a string representation of an iptc_rule """
s = ''
if ipv6==False and iptc_rule.src != '0.0.0.0/0.0.0.0':
s += 'src {} '.format(iptc_rule.src)
elif ipv6==True and iptc_rule.src != '::/0':
s += 'src {} '.format(iptc_rule.src)
if ipv6==False and iptc_rule.dst != '0.0.0.0/0.0.0.0':
s += 'dst {} '.format(iptc_rule.dst)
elif ipv6==True and iptc_rule.dst != '::/0':
s += 'dst {} '.format(iptc_rule.dst)
if iptc_rule.protocol != 'ip':
s += 'protocol {} '.format(iptc_rule.protocol)
if iptc_rule.in_interface is not None:
s += 'in {} '.format(iptc_rule.in_interface)
if iptc_rule.out_interface is not None:
s += 'out {} '.format(iptc_rule.out_interface)
if ipv6 == False and iptc_rule.fragment:
s += 'fragment '
for m in iptc_rule.matches:
s += '{} {} '.format(m.name, m.get_all_parameters())
if iptc_rule.target and iptc_rule.target.name and len(iptc_rule.target.get_all_parameters()):
s += '-j {} '.format(iptc_rule.target.get_all_parameters())
elif iptc_rule.target and iptc_rule.target.name:
s += '-j {} '.format(iptc_rule.target.name)
return s | e01c3b27ec6ee831a7d88fc87e69e707639ef0b6 | 9,470 |
def add_padding_0_bits(bits_string: str, required_length: int) -> tuple:
"""
Adds 0 to bits string.
Returns tuple - (bits string with padding, number of added 0s)
"""
extra_0_bits_count = 0
while len(bits_string) < required_length:
bits_string += '0'
extra_0_bits_count += 1
return bits_string, extra_0_bits_count | fd3eee071821d087b710c33a0beef72431836505 | 9,471 |
def IsInstance(type_):
"""Returns a function which can be used to check whether or not a value is of the given type."""
def Do(item):
return isinstance(item, type_)
return Do | 06f916e8658761a03619834692d78a44d145b514 | 9,472 |
def show_nums_to_user():
"""
093
Ask the user to enter five numbers. Sort them into order and present them to the user.
Ask them to select one of the numbers. Remove it from the original array and save it in a
new array.
:return: new array with one (popped) item.
"""
arr, trigger = [], 5
while trigger:
arr.append(int(input(f"{trigger} to go; Enter a number: ")))
trigger -= 1
arr.sort()
print(f"Choose a number from your array: {arr}")
tmp = int(input("Enter the number to be removed: "))
popped, new_array = 0, []
for idx, num in enumerate(arr):
if tmp == num:
popped = arr.pop(idx)
new_array.append(popped)
return new_array | 3b5a365dd729623ede7079d5d131ecc9f40897e7 | 9,474 |
def seq_type(seq):
"""
Determines whether a sequence consists of 'N's only
(i.e., represents a gap)
"""
return 'gap' if set(seq.upper()) == {'N'} else 'bases' | 5555e5cd0ccdbf8f5e7b475c5c983ab54a17fb07 | 9,475 |
from typing import Optional
import contextlib
import socket
def resolve(hostname: str) -> Optional[str]:
""" Get the IP address of a subdomain """
with contextlib.suppress(socket.gaierror):
return socket.gethostbyname(hostname) | cb4924f3120f97e558fabccbab1c30612e52dc2c | 9,477 |
def fast_non_dominated_sort(values1, values2):
""" Function to carry out NSGA-II's fast non dominated sort """
S = [[] for i in range(0, len(values1))]
front = [[]]
n = [0 for i in range(0, len(values1))]
rank = [0 for i in range(0, len(values1))]
for p in range(0, len(values1)):
S[p] = []
n[p] = 0
for q in range(0, len(values1)):
if (values1[p] > values1[q] and values2[p] > values2[q]) or (values1[p] >= values1[q] and values2[p] > values2[q]) or (values1[p] > values1[q] and values2[p] >= values2[q]):
if q not in S[p]:
S[p].append(q)
elif (values1[q] > values1[p] and values2[q] > values2[p]) or (values1[q] >= values1[p] and values2[q] > values2[p]) or (values1[q] > values1[p] and values2[q] >= values2[p]):
n[p] = n[p] + 1
if n[p] == 0:
rank[p] = 0
if p not in front[0]:
front[0].append(p)
i = 0
while front[i] != []:
Q = []
for p in front[i]:
for q in S[p]:
n[q] = n[q] - 1
if n[q] == 0:
rank[q] = i+1
if q not in Q:
Q.append(q)
i = i+1
front.append(Q)
del front[len(front)-1]
return front | ee7275aea0c2b0acadd6fb523fc24e5618165bfe | 9,478 |
import numpy
def rechannel(channels):
"""A data node to rechannel data.
Parameters
----------
channels : int or list
The channel mapping.
Receives
------
data : ndarray
The original signal.
Yields
------
data : ndarray
The rechanneled signal.
"""
if channels == 0:
return lambda data: (data if data.ndim == 1 else numpy.mean(data, axis=1))
elif isinstance(channels, int):
return lambda data: (data if data.ndim == 1 else numpy.mean(data, axis=1))[:, None][:, [0]*channels]
else:
return lambda data: (data[:, None] if data.ndim == 1 else data)[:, channels] | 993863df43b0fe1a074617a59dc6dca97e2f3076 | 9,481 |
def get_ar(bbox):
"""
:param bbox: top left, right down
:return: aspect ratio
"""
[x1, y1, x2, y2] = bbox
return (y2 - y1) / (x2 - x1) | d79eb51eafec917b1754558a9a87307734fd8ac4 | 9,482 |
def validate_image_pull_credentials(image_pull_credentials):
"""
Validate ImagePullCredentialsType for Project
Property: Environment.ImagePullCredentialsType
"""
VALID_IMAGE_PULL_CREDENTIALS = ("CODEBUILD", "SERVICE_ROLE")
if image_pull_credentials not in VALID_IMAGE_PULL_CREDENTIALS:
raise ValueError(
"Project ImagePullCredentialsType must be one of: %s"
% ", ".join(VALID_IMAGE_PULL_CREDENTIALS)
)
return image_pull_credentials | f4953fefbca3ca5906ca58497152b25a07247c9a | 9,485 |
def num_bits(i):
"""Returns the number of bits in an unsigned integer."""
n = 0
while i:
n += 1
i &= i - 1
return n | 3eb664bd642717556af0b2c09314000d70209b44 | 9,486 |
import argparse
def get_args():
"""parse arguments
:returns: parsed arguments
"""
#Command line arguments parser. Described as in their 'help' sections.
parser = argparse.ArgumentParser(description="Replication of Zaremba et al. (2014).\
\n https://arxiv.org/abs/1409.2329")
parser.add_argument("--layer_num", type=int, default=2,\
help="The number of LSTM layers the model has.")
parser.add_argument("--hidden_size", type=int, default=650,\
help="The number of hidden units per layer.")
parser.add_argument("--lstm_type", type=str, choices=["pytorch","custom","vmlmf","vm_group"], \
default="pytorch", help="Which implementation of LSTM to use."
+ "Note that 'pytorch' is about 2 times faster.")
parser.add_argument("--dropout", type=float, default=0.5, \
help="The dropout parameter.")
parser.add_argument("--winit", type=float, default=0.05, \
help="The weight initialization parameter.")
parser.add_argument("--batch_size", type=int, default=20, \
help="The batch size.")
parser.add_argument("--seq_length", type=int, default=35, \
help="The sequence length for bptt.")
parser.add_argument("--learning_rate", type=float, default=1, \
help="The learning rate.")
parser.add_argument("--total_epochs", type=int, default=39, \
help="Total number of epochs for training.")
parser.add_argument("--factor_epoch", type=int, default=6, \
help="The epoch to start factoring the learning rate.")
parser.add_argument("--factor", type=float, default=1.2, \
help="The factor to decrease the learning rate.")
parser.add_argument("--max_grad_norm", type=float, default=5, \
help="The maximum norm of gradients we impose on training.")
parser.add_argument("--device", type=str, choices = ["cpu", "gpu"],\
default = "gpu", help = "Whether to use cpu or gpu.")
parser.add_argument("--gpu_id", type=int, default=0, help="gpu_id")
parser.add_argument("--wRank", type=int, default=300, help="wRank of vmlmf.")
parser.add_argument("--uRanks", type=int,nargs="+", default=300, \
help="uRank of vmlmf.")
args = parser.parse_args()
return args | 28ddf1b900eb1295b0da3d9ab4edbf76ec889cf0 | 9,487 |
def remove_quoted_text(line):
"""get rid of content inside quotes
and also removes the quotes from the input string"""
while line.count("\"") % 2 == 0 and line.count("\"") > 0:
first = line.find("\"")
second = line.find("\"", first+1)
line = line[0:first] + line[second+1:]
while line.count("\'") % 2 == 0 and line.count("'") > 0:
first = line.find("\'")
second = line.find("\'", first+1)
line = line[0:first] + line[second+1:]
return line | ef0776ddfd9d60474077fd106474728de10b9e8f | 9,489 |
import os
import logging
import json
def _GetJsonFileCreator(name, json_object):
"""Creates a creator function for an extended source context file.
Args:
name: (String) The name of the file to generate.
json_object: Any object compatible with json.dump.
Returns:
(callable()) A creator function that will create the file and return a
cleanup function that will delete the file.
"""
if os.path.exists(name):
logging.warn('%s already exists. It will not be updated.', name)
return lambda: (lambda: None)
def Cleanup():
os.remove(name)
def Generate():
try:
with open(name, 'w') as f:
json.dump(json_object, f)
except IOError as e:
logging.warn('Could not generate [%s]: %s', name, e)
return Cleanup
return Generate | db74748d51303087fae6aec5b6b6f6deb7d665c0 | 9,490 |
def get_index_of_feature(feature_list, item):
"""
Gets the index of the feature in the provided feature list
:rtype : int
:param feature_list: List of features to search from
:param item: The feature to search
:return: The index where the feature was founded, -1 otherwise
"""
# getting the indexes where 'item' occurs
idxs = [k for k in range(len(feature_list)) if feature_list[k][0] == item]
# counting the indexes
idxs_count = len(idxs)
# if the feature appears more than one time
if idxs_count > 1:
raise Exception("""
There was a problem in the feature extraction process.\r\n
The feature is counted more than one time.""")
# the index if any, -1 if the feature doesn't appear
return idxs[0] if idxs_count == 1 else -1 | 2f2d79d4caf953b60ecf841a23d86e8b4a00b937 | 9,491 |
def fix_ghdx_birth_weights(df):
"""Ensure the child birth weight is in grams and is a legal value.
The original survey allowed answers to weights to be coded in grams or
kilograms. The GHDx data has recoded the values into grams. However, a
few cases are clearly still coded in kilograms. The survey also used the
value 9999 as a sentinel for missing weight in grams. The updated survey
instrument allows child birth weights between 0.5 kg and 8 kg. We will
use this as the valid range.
Args:
df (dataframe): GHDx data.
Returns:
(dataframe): the same dataframe is returned with inplace modifications.
"""
if 'c1_08b' in df:
df.loc[df.c1_08b <= 8, 'c1_08b'] = df.c1_08b * 1000 # g => kg
df.loc[(df.c1_08b > 8) & (df.c1_08b < 500), 'c1_08b'] = float('nan')
return df | a11e50a1a1db780389ad99051e7cf93a025155ae | 9,492 |
import math
def sol_rad_from_t(et_radiation, cs_radiation, temperature_min, temperature_max, coastal):
"""
Estimate incoming solar (or shortwave) radiation, *Rs*, (radiation hitting
a horizontal plane after scattering by the atmosphere) from min and max
temperature together with an empirical adjustment coefficient for
'interior' and 'coastal' regions.
The formula is based on equation 50 in Allen et al (1998) which is the
Hargreaves radiation formula (Hargreaves and Samani, 1982, 1985). This
method should be used only when solar radiation or sunshine hours data are
not available. It is only recommended for locations where it is not
possible to use radiation data from a regional station (either because
climate conditions are heterogeneous or data are lacking).
**NOTE**: this method is not suitable for island locations due to the
moderating effects of the surrounding water.
:param et_radiation: Extraterrestrial radiation [MJ m-2 day-1]. Can be
estimated using ``et_rad()``.
:param cs_radiation: Clear sky radiation [MJ m-2 day-1]. Can be estimated
using ``cs_rad()``.
:param temperature_min: Daily minimum temperature [deg C].
:param temperature_max: Daily maximum temperature [deg C].
:param coastal: ``True`` if site is a coastal location, situated on or
adjacent to coast of a large land mass and where air masses are
influenced by a nearby water body, ``False`` if interior location
where land mass dominates and air masses are not strongly influenced
by a large water body.
:return: Incoming solar (or shortwave) radiation (Rs) [MJ m-2 day-1].
:rtype: float
"""
# Determine value of adjustment coefficient [deg C-0.5] for
# coastal/interior locations
if coastal:
adj = 0.19
else:
adj = 0.16
sol_rad = adj * math.sqrt(temperature_max - temperature_min) * et_radiation
# The solar radiation value is constrained by the clear sky radiation
return min(sol_rad, cs_radiation) | 6952aa6509897494551839e412d5a15e51b5e30c | 9,495 |
def moving_avg(v, N):
"""
simple moving average.
Parameters
----------
v : list
data ta to average
N : integer
number of samples per average.
Returns
-------
m_avg : list
averaged data.
"""
s, m_avg = [0], []
for i, x in enumerate(v, 1):
s.append(s[i - 1] + x)
if i >= N:
avg = (s[i] - s[i - N]) / N
m_avg.append(avg)
return m_avg | 2e71eefb91ac694eaf06c2167e38ef497671145e | 9,496 |
def _get_commits(output):
"""Returns the commits message in the output. All commits must have
been made by `Alice Author` or `PY C` to be found.
"""
commits = []
save = False
cnt = 0
for row in output.split("\n"):
if row.strip() in ["Alice Author", "Alice Äuthòr", "PY C"]:
save = True
if save:
cnt += 1
if cnt == 7:
commits.append(row.strip())
save = False
cnt = 0
return commits | 0514d0c3279c7e14810403412284e6d07eb03d16 | 9,497 |
def _make_specific_identifier(param_name, identifier):
# type: (str, str) -> str
"""
Only adds an underscore between the parameters.
"""
return "{}_{}".format(param_name, identifier) | 5268d366f04c616d8180e7cc4167030efdec9070 | 9,498 |
def ReadTag(buffer, pos):
"""Read a tag from the buffer, and return a (tag_bytes, new_pos) tuple.
We return the raw bytes of the tag rather than decoding them. The raw
bytes can then be used to look up the proper decoder. This effectively allows
us to trade some work that would be done in pure-python (decoding a varint)
for work that is done in C (searching for a byte string in a hash table).
In a low-level language it would be much cheaper to decode the varint and
use that, but not in Python.
"""
start = pos
while ord(buffer[pos]) & 0x80:
pos += 1
pos += 1
return (buffer[start:pos], pos) | 91c0a1e86816066768b15a3a2e7b8a8468900661 | 9,499 |
from typing import Union
import re
def get_brackets(title: str) -> Union[str, None]:
"""
Return the substring of the first instance of bracketed text.
"""
regex_brackets = re.search(r"\[(.*?)\]", title)
if regex_brackets is None:
return None
else:
return regex_brackets.group() | f1d985cf79ae881e8aca168c065d40e640a9c1ff | 9,500 |
def _dof(mean_tau, sd_tau2):
"""
Returns the degrees of freedom for the chi-2 distribution from the mean and
variance of the uncertainty model, as reported in equation 5.5 of Al Atik
(2015)
"""
return (2.0 * mean_tau ** 4.) / (sd_tau2 ** 2.) | 9a4a395c9aea7b965a477550c7f254bf744cadc5 | 9,501 |
import sys
def hastty():
""" Whether (it looks like) a tty is available.
"""
try:
return sys.stdin and sys.stdin.isatty()
except Exception: # pragma: no cover
return False | 2baebe972f0c58f58d90cab133e4e5d774b2cb25 | 9,503 |
def cria_coordenada(linha,coluna):
"""int x int -> tuple
Esta funcao recebe duas coordenadas do tipo inteiro, a primeira correspondente a linha e a segunda a coluna e devolve um elemento do tipo coordenada, ou seja, o tuplo (linha,coluna)"""
if 1<=linha<=4 and 1<=coluna<=4 and isinstance(linha,int) and isinstance(coluna,int):
return (linha,coluna)
raise ValueError('cria_coordenada: argumentos invalidos') | b2d216a11706e4234cf9943525cfca72118a98f8 | 9,504 |
from typing import List
import json
def compare_apache_profiles(baseline_file, test_file, threshold=0.5) -> List:
"""
Compare baseline Apache access log profile against test profile.
:param baseline_file: file containing baseline profile
:param test_file: file containing test profile
:param threshold: percent (in decimal format) difference test profile must be from baseline to be considered malicious
:return: list of results
"""
results = []
with open(baseline_file, 'r') as f:
baseline_profile = json.load(f)
with open(test_file, 'r') as f:
test_profile = json.load(f)
baseline_all_avg_per_min = baseline_profile['requests']['all']['avg_per_min']
test_all_avg_per_min = test_profile['requests']['all']['avg_per_min']
baseline_limit_avg_per_min = baseline_all_avg_per_min * (1 + threshold)
if test_all_avg_per_min > baseline_limit_avg_per_min:
result = {'category': 'Potential DoS Attack',
'details': {'baseline_profile_avg_per_min': baseline_all_avg_per_min,
'test_profile_avg_per_min': test_all_avg_per_min,
'baseline_profile_upper_limit': baseline_limit_avg_per_min,
'baseline_profile_threshold_percent': threshold * 100}}
results.append(result)
return results | 0b94ad318fcb61be559767cbdba51def1b6db61f | 9,505 |
def cyclic_sort(nums):
"""
0 1 2 3 4 5
[1, 2, 3, 4, 5]
^
[1, 2, 3, 4, 5, 6]
^
"""
for i in range(0, len(nums)):
while nums[i] != i + 1:
j = nums[i] - 1
nums[i], nums[j] = nums[j], nums[i]
return nums | fc9f7061121d4509b260e03df4edeee63c0eb6b9 | 9,506 |
def uuid_mole_index(moles, mole_uuid):
"""Return the index of the first mole with the specified uuid."""
for i, mole in enumerate(moles):
if mole["uuid"] == mole_uuid:
return i
return None | 851877da59f6a6dd8c06b9bb2d462f6239d512e7 | 9,507 |
def f_read_raw_mat_length(filename, data_format='f4'):
"""f_read_raw_mat_length(filename,data_format='float',end='l')
Read length of data
"""
f = open(filename,'rb')
tmp = f.seek(0, 2)
bytes_num = f.tell()
f.close()
if data_format == 'f4':
return int(bytes_num / 4)
else:
return bytes_num | 07ad3d0c425fd01f3985c4f3daf5a762bf59f76c | 9,508 |
def _analyze_gens(gens):
"""Support for passing generators as `*gens` and `[gens]`. """
if len(gens) == 1 and hasattr(gens[0], '__iter__'):
return tuple(gens[0])
else:
return tuple(gens) | f32eb1faf7f1aae2f8d4eff72609732db8655899 | 9,509 |
def preparation_time_in_minutes(number_of_layers: int) -> int:
"""
Args:
number_of_layers (int): the number of layers you want to add to the lasagna.
Returns:
int: how many times you would spend making them.
"""
return 2 * number_of_layers | 90b6e6f518acbba514af5d3ab77dff2e5735535a | 9,510 |
import os
def normpath(path):
"""Returns the fully resolved absolute path to a file.
This function will return the absolute path to a file as seen from the
directory the script was called from.
"""
if path and path[0] == '/':
return os.path.normpath(path)
return os.path.normpath(os.path.join(os.environ['ORIG_CWD'], path)) | b07e26e7a7b03abf72024c0f1165658b255a4114 | 9,511 |
def _encode_entity_id(entity_type, entity_id):
"""See xidCgiDelimiter = "?xid=" """
return "xid=%s.%s" % (entity_type, entity_id) | fc6e36a4027f7a63e05e8dfab7f40681b3e26585 | 9,513 |
def find_average_record(sen_set, voting_dict):
"""
Input: a set of last names, a voting dictionary
Output: a vector containing the average components of the voting records
of the senators in the input set
Example:
>>> voting_dict = {'Klein': [-1,0,1], 'Fox-Epstein': [-1,-1,-1], 'Ravella': [0,0,1]}
>>> find_average_record({'Fox-Epstein','Ravella'}, voting_dict)
[-0.5, -0.5, 0.0]
"""
avg_record = list()
for key, val in voting_dict.items():
if key in sen_set:
for i in range(len(val)):
if avg_record:
avg_record[i] += val[i]
else:
avg_record = val
break
return [ a / len(sen_set)for a in avg_record] | ef0c0aeb5a75c0335de57ae6cca1c013fe59b8b0 | 9,514 |
def cache(self, key, fn):
"""
:param self: the object that will hold the cached value
:param key: the key/attribute for the cached value
:param fn: the function returning the value to be cached
:return: the value returned by fn on first call
"""
if not hasattr(self, key):
value = fn(self)
setattr(self, key, value)
return getattr(self, key) | 4d7c67d2e3892e4b2db57625075c4515ff89da99 | 9,517 |
def enum_class_getitem(context, builder, sig, args):
"""
Return an enum member by index name.
"""
enum_cls_typ, idx = sig.args
member = enum_cls_typ.instance_class[idx.literal_value]
return context.get_constant_generic(builder, enum_cls_typ.dtype,
member.value) | 554aee101408e072050134e2f6990f79780d516e | 9,520 |
def get_commit_link(repo_name: str, commit_sha: str) -> str:
"""
Build a commit URL for manual browser access using full repository name and commit SHA1
:param repo_name: full repository name (i.e. `{username}/{repoanme}`)
:param commit_sha: 40 byte SHA1 for a commit
:return: A commit URL
"""
return "https://github.com/{}/commit/{}".format(repo_name, commit_sha) | a1cf8d30f3e5c5ce3c5fefc719cca7c1c4d92331 | 9,524 |
import os
def get_file_list(csv_dir, entity):
"""Get list of CSV files for a given entity"""
csv_file_list = os.listdir(csv_dir)
file_list = []
for file in csv_file_list:
if entity in file:
file_list.append(file)
return file_list | 98528b02e58c8d3eb9afddd27b3cd883a7453d72 | 9,525 |
def msi_file_finder(pth):
"""
Return True if pth represents a msi file file.
"""
return bool(pth.fname.endswith('.msi.txt')) | 35c0d0dac72d44cbdd6f87b280a70917d264db0c | 9,526 |
def get_accuracy(y_true, y_predicted):
"""Compute the accuracy for given predicted class labels.
Parameters
----------
y_true: numpy array
The true class labels of shape=(number_points,).
y_predicted: numpy array
The predicted class labels of shape=(number_points,).
Returns
---------
accuracy: float
The accuracy of the predictions.
"""
correct_answers = (y_predicted == y_true).astype(int) # will have 1 when the prediction and the actual label match
accuracy = float(sum(correct_answers)) / float(len(correct_answers))
return accuracy | c88a6b51021c9e01133852fa84c6f434043391a5 | 9,532 |
def get_connections(network):
"""
Function creates a dictionary with agent id's as a key and adjacent nodes as a value.
:param network: object of graph
:type network: networkx.classes.graph.Graph
"""
return {agent_id: list(adj_agents.keys()) for (agent_id, adj_agents) in network.adjacency()} | fa614b599c577de5c2554818366159e918c1335b | 9,533 |
def meta_name(file_name):
"""Generate the name of the meta file"""
return "{}.json".format(file_name) | fc168d19c145c4f93fb8d92e3c0daa109aad31b6 | 9,534 |
import os
def load_darshan_header():
"""
Returns a CFFI compatible header for darshan-utlil as a string.
:return: String with a CFFI compatible header for darshan-util.
"""
curdir, curfile = os.path.split(__file__)
filepath = os.path.join(curdir, 'data', 'darshan-api.h')
# filepath = os.path.join(curdir, 'data', 'generated.h')
print(filepath)
with open(filepath, 'r') as f:
try:
return f.read()
except IOError:
raise RuntimeError('Failed to read API definition header for darshan.') | c136debfd59bea3925bf63bd1a1221d6316bb093 | 9,535 |
def concat_body_paragraphs(body_candidates):
"""
Concatenate paragraphs constituting the question body.
:param body_candidates:
:return:
"""
return ' '.join(' '.join(body_candidates).split()) | a0faaa0ae0be0cda007c2af1f6e47f3b745862b3 | 9,536 |
import os
import json
def read_annotation_file(annotation_file_path):
"""
Read all the annotation information from annotated json file
:param annotation_file_path: dir which contains all annotated json file
:return: final_annotation: a dictionary contains all information
"""
final_annotation_dict = {}
files = os.listdir(annotation_file_path)
for file_name in files:
if file_name.endswith("annotation.json"):
with open(os.path.join(annotation_file_path,file_name))as f:
data = json.load(f)
print(len(data))
final_annotation_dict = dict(final_annotation_dict.items() + data.items())
return final_annotation_dict | 201082f107291289d4e2133534b67cf8b08cab34 | 9,537 |
import re
def matchNumbersOnly(value):
"""Match strings with numbers and '.' only."""
if re.match('^[0-9.]+$', value):
return True
return False | 04d782431b79e78f93269c662c747d1f7348c9ec | 9,538 |
import numpy
def metric_weekday(weekday_1, weekday_2):
"""Calculate the distance between two weekdays as a positive integer.
:param weekday_1: A positive integer between 0 and 6
:param weekday_2: A positive integer between 0 and 6
:returns: The absolute valued distance between two weekdays
:rtype: int
"""
return numpy.absolute(weekday_1 - weekday_2) | 71a0a5d5a5166458597e0007063a4c19e8f195a9 | 9,540 |
def vax_pacient(ime, priimek, cepivo):
"""Funkcija v bazi popravi podatek o cepljenu dolocenega pacienta. Ce osebe ni v bolnici, je nemoremo cepiti. Pravice ima samo zdravnik."""
return None | e9628a857fee3918b9ad3c6ed4293933c970e30c | 9,541 |
def get_sample_mean(values: list) -> float:
"""
Calculates the sample mean (overline x) of the elements in a list
:param values: list of values
:return: sample mean
"""
sample_mean = sum(values) / len(values)
return sample_mean | 182befe514f406340f0b1f37e892ad1add1f0ed2 | 9,543 |
def polyfill_integers(generator, low, high=None, size=None, dtype="int32",
endpoint=False):
"""Sample integers from a generator in different numpy versions.
Parameters
----------
generator : numpy.random.Generator or numpy.random.RandomState
The generator to sample from. If it is a ``RandomState``,
:func:`numpy.random.RandomState.randint` will be called,
otherwise :func:`numpy.random.Generator.integers`.
low : int or array-like of ints
See :func:`numpy.random.Generator.integers`.
high : int or array-like of ints, optional
See :func:`numpy.random.Generator.integers`.
size : int or tuple of ints, optional
See :func:`numpy.random.Generator.integers`.
dtype : {str, dtype}, optional
See :func:`numpy.random.Generator.integers`.
endpoint : bool, optional
See :func:`numpy.random.Generator.integers`.
Returns
-------
int or ndarray of ints
See :func:`numpy.random.Generator.integers`.
"""
if hasattr(generator, "randint"):
if endpoint:
if high is None:
high = low + 1
low = 0
else:
high = high + 1
return generator.randint(low=low, high=high, size=size, dtype=dtype)
return generator.integers(low=low, high=high, size=size, dtype=dtype,
endpoint=endpoint) | b4061e8ec7cb9927bbe4fcce1c847aecdc10052b | 9,544 |
def _get_prediction(outputs):
"""Checks if multiple outputs were provided, and selects"""
if isinstance(outputs, (list, tuple)):
return outputs[0]
return outputs | f6565614c3d43ca15c1b52025469561fe61ae5ab | 9,546 |
import difflib
import pprint
def pprint_diff(first, second, first_name='first', second_name='second'):
"""Compare the pprint representation of two objects and yield diff lines."""
return difflib.unified_diff(
pprint.pformat(first).splitlines(),
pprint.pformat(second).splitlines(),
fromfile=first_name, tofile=second_name, lineterm='') | 5c88916b47cfa970d6ab15caa650540d2dab3c3b | 9,548 |
def inc(i):
"""Increments number.
Simple types like int, str are passed to function by value (value is copied to new memory slot).
Class instances and e.g. lists are passed by reference."""
i += 1
return i | 2959f0a2d57891821a159a4a51c1b146c0cb0395 | 9,549 |
import re
def extract_video_id(string):
"""Extract what looks like a YouTube video id from a string"""
if len(string) == 11:
return string
matches = re.search("v?=(.{11})", string)
if matches is not None:
return matches.group(1)
matches = re.search(r"youtu\.be/(.{11})", string)
if matches is not None:
return matches.group(1)
return None | 30b680f48b6e9101492e29ff76f5bed1703a37d2 | 9,550 |
def ascii(value):
"""Return the string of value
:param mixed value: The value to return
:rtype: str
"""
return '{0}'.format(value) | 11cf1af6567c53a5583d8bdcb6da2431f6b79ba9 | 9,551 |
def find_closest_date(date, list_of_dates):
"""
This is a helper function that works on Python datetimes. It returns the closest date value,
and the timedelta from the provided date.
"""
match = min(list_of_dates, key = lambda x: abs(x - date))
delta = match - date
return match, delta | 57f9ecbf764539fcea495057ba4b908df700b8db | 9,552 |
def replace_str_index(text, index=0, replacement=''):
"""
Utility function
:param text:
:param index:
:param replacement:
:return:
"""
return '%s%s%s' % (text[:index], replacement, text[index + 1:]) | a09aa93b6bb567731e77d5b7b91504cf88142fb5 | 9,553 |
from collections import Counter
def solve_part_one(id_list: list) -> int:
"""
Calculates the checksum for a list of IDs
:param id_list: Python list containing a list of ID strings
:return: Checksum as defined by the problem
"""
twos, threes = 0, 0
for id in id_list:
id_counter = Counter(id)
if 2 in id_counter.values():
twos += 1
if 3 in id_counter.values():
threes += 1
checksum = twos * threes
return checksum | a4fe4d7b8205492e132175199121f3ed5a58b7b9 | 9,554 |
def is_prerelease(version_str):
"""
Checks if the given version_str represents a prerelease version.
"""
return any([c.isalpha() for c in version_str]) | c6454bb350b2c4e55dbc271f23253aa2e3472802 | 9,556 |
import re
def class_name_to_resource_name(class_name: str) -> str:
"""Converts a camel case class name to a resource name with spaces.
>>> class_name_to_resource_name('FooBarObject')
'Foo Bar Object'
:param class_name: The name to convert.
:returns: The resource name.
"""
s = re.sub('(.)([A-Z][a-z]+)', r'\1 \2', class_name)
return re.sub('([a-z0-9])([A-Z])', r'\1 \2', s) | b0ac6692c441b0f4cfca4a9b680dc612552795f4 | 9,557 |
def datetime_to_date(dt, org):
"""
Convert a datetime to a date using the given org's timezone
"""
return dt.astimezone(org.timezone).date() | 92565cf65b0c485e6f8649a9a47619f516d0fd35 | 9,558 |
import requests
def current_server_id() -> str:
"""Helper to get the current server id"""
rsp = requests.get("http://localhost:10000/api/servers")
if rsp.status_code != 200:
raise ValueError("Failed to fetch current server id")
return rsp.json()['current_server'] | 8ec4efcc0eeea0b5b62ce5446aece5abdf6fbd66 | 9,560 |
def linkcode_resolve(domain, info):
"""Oddly this function is required for the linkcode extension."""
if domain != "py":
return None
if not info["module"]:
return None
filename = info["module"].replace(".", "/")
return "https://github.com/farisachugthai/dynamic_ipython/%s.py" % filename | 6f4517a977b468847d1888eafeb5c3eb8698a160 | 9,561 |
import os
def get_name(main_title, folder, sep="_"):
""" Obtains the next valid name basd on given filename and count
Args:
main_title (str): Main portion to define naming scheme.
folder (str): Folder to be stored in. No '/' or '\' required.
sep (str): A separator between maint_title and count.
Returns:
(str) A valid filename within the given folder.
"""
count = 0
if not os.path.isdir(folder):
os.mkdir(folder)
while os.path.isfile(folder + "/" + main_title + sep + str(count) + ".bmp"):
count += 1
return main_title + sep + str(count) | e9519ef1626282e1a36e234b3cc57bed06b4ee7d | 9,562 |
def get_class_name(obj):
"""
Returns the name of the class of the given object
:param obj: the object whose class is to be determined
:return: the name of the class as a string
"""
return obj.__class__.__name__ | 93be3acf545376dc1df43684da75a59e967d2b2f | 9,565 |
from torch.nn.parallel.scatter_gather import scatter_kwargs, gather
from torch.nn.parallel.replicate import replicate
from torch.nn.parallel.parallel_apply import parallel_apply
import os
import torch
def data_parallel_decorator(ModuleClass):
"""
A decorator for forward function to use multiGPU training
while maintaining network layout (and thus saved parameter
names).
Note: the way pytorch replicate a module only guarantees
that pytorch internal parameters, buffers and modules are
properly copied and set over devices. Any other class attributes
are copied in a shallow copy manner - any list, dictionary, etc.
that uses class attributes may not be set properly, and
will point to the original copy instead of the copied one
on the new GPU.
"""
assert not hasattr(ModuleClass, '_forward_worker'), 'data_parallel_decorator cannot be used on a class twice'
ModuleClass._forward_worker = ModuleClass.forward
def wrapped(self, *inputs, **module_kwargs):
if (not hasattr(self, '_is_replica')) and inputs[0][0].is_cuda:
inputs=inputs[0]
device_count = torch.cuda.device_count()
if inputs[0].shape[0] % device_count != 0:
cuda_visible_devices = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ else ''
raise ValueError('batch size (%d) must be divisible by the number of GPUs (%d) used\n CUDA_VISIBLE_DEVICES: %s' % (inputs[0].shape[0], device_count, cuda_visible_devices))
if device_count > 1:
# modified from pytorch (torch.nn.parallel.DataParallel)
device_ids = list(range(device_count))
output_device = device_ids[0]
inputs, kwargs = scatter_kwargs(inputs, module_kwargs, device_ids)
replicas = replicate(self, device_ids[:len(inputs)])
# add a _is_replica flag to avoid infinite loop
# from recursively calling parallel_apply
for replica in replicas:
replica._is_replica = True
outputs = parallel_apply(replicas, inputs, kwargs)
return gather(outputs, output_device)
return self._forward_worker(inputs)
ModuleClass.forward = wrapped
return ModuleClass | 283802699af0d8850008bcfa9a3ec010862ce739 | 9,566 |
def dataset(client):
"""Create a dataset."""
with client.with_dataset(name='dataset') as dataset:
dataset.authors = {
'name': 'me',
'email': '[email protected]',
}
return dataset | 4f77cd30c58ad74e48280be193f4dd30b0fb5584 | 9,567 |
def strategy_best(cookies, cps, history, time_left, build_info):
"""
The best strategy that you are able to implement.
"""
build_items_list = build_info.build_items()
max_cps_div_cost_item = None
for idx in range(len(build_items_list)):
if build_info.get_cost(build_items_list[idx]) <= cookies + cps * time_left:
if max_cps_div_cost_item == None:
max_cps_div_cost_item = build_items_list[idx]
else:
cps_div_cost_temp1 = build_info.get_cps(build_items_list[idx])/build_info.get_cost(build_items_list[idx])
cps_div_cost_temp2 = build_info.get_cps(max_cps_div_cost_item)/build_info.get_cost(max_cps_div_cost_item)
if cps_div_cost_temp1 > cps_div_cost_temp2:
max_cps_div_cost_item = build_items_list[idx]
return max_cps_div_cost_item | 77999615f6cde7b7c18c2234e62553db759633be | 9,568 |
import grp
def is_existing_group(group_name):
"""Asserts the group exists on the host.
Returns:
bool, True if group exists on the box, False otherwise
"""
try:
grp.getgrnam(group_name)
return True
except KeyError:
return False | 8831281684107d9f4c4511cb4cf3493494269650 | 9,570 |
import re
def re_search(text: str, expression: str) -> bool:
"""
Test regex match. This method is comparatively
very slow and should be avoided where possible.
"""
return re.search(expression, text) is not None | e86daa552a3f769f46bca794b9d2a4587cbed11c | 9,571 |
import os
import uuid
def make_unique_filename(initial_filename):
"""Add a random part to a filename so it's unique. File extension is preserved."""
before_ext, ext = os.path.splitext(initial_filename)
ext = ext.replace('.', '') # Remove the dot, if already there.
random_part = uuid.uuid4()
return f"{before_ext}-{random_part}.{ext}" | 6456af3ede2404aba5889d5d876437055026c7c1 | 9,572 |
def median(lst):
"""
Get the median value of a list
Arguments:
lst (list) -- list of ints or floats
Returns:
(int or float) -- median value in the list
"""
n = len(lst)
if n < 1:
return None
if n % 2 == 1:
return sorted(lst)[n//2]
else:
return sum(sorted(lst)[n//2-1:n//2+1])/2.0 | b6b7eefdf63490e35e74063995cabd38f4c12089 | 9,573 |
def orop(funeval, *aa):
""" Lazy version of `or' """
for a in aa:
if funeval(a): return True
return False | f8ed7f88bbbd894cddf5d56a015ba7fe554342e4 | 9,574 |
import weakref
def weakref_props(*properties):
"""A class decorator to assign properties that hold weakrefs to objects.
This decorator will not overwrite existing attributes and methods.
Parameters
----------
properties : list of str
A list of property attributes to assign to weakrefs.
Examples
--------
>>> @weakref_props('a', 'b')
... class Test(object):
... pass
>>> test = Test()
>>> test2 = Test()
>>> test.a = test2
>>> test.b = Test()
>>> test.c = 1
>>> sorted(test.__dict__.keys())
['_a', '_b', 'c']
>>> test.a == test2
True
>>> test.b is None # Dead link
True
>>> test.c == 1
True
>>> del test.a
>>> test.a is None
True
"""
def func(cls):
def property_func(attr):
def _set_attr(self, value):
name = '_' + attr if not attr.startswith('_') else attr
setattr(self, name, weakref.ref(value))
def _get_attr(self):
name = '_' + attr if not attr.startswith('_') else attr
value = getattr(self, name, None)
return value() if value is not None else None
def _del_attr(self):
name = '_' + attr if not attr.startswith('_') else attr
delattr(self, name)
docstr = "A weakref to the object stored in '{}'".format(attr)
return _get_attr, _set_attr, _del_attr, docstr
for prop in properties:
if hasattr(cls, prop):
continue
fget, fset, fdel, docstr = property_func(prop)
setattr(cls, prop, property(fget=fget, fset=fset, fdel=fdel,
doc=docstr))
return cls
return func | 4ae42fc4e2dccbb7193a377e122f96c4f7d5112d | 9,575 |
from pathlib import Path
def lglob(self: Path, pattern="*"):
"""Like Path.glob, but returns a list rather than a generator"""
return list(self.glob(pattern)) | eba1b9d6300a1e1aca5c47bedd6ac456430e4d89 | 9,576 |
def unflatten(flattened: dict) -> dict:
"""
Unflattens a dictionary
:param flattened: Flattened dictionary
:return: Unflattened dictionary
"""
unflattened = {}
for key, value in flattened.items():
parts = key.split(".")
d = unflattened
for part in parts[:-1]:
if part not in d:
d[part] = dict()
d = d[part]
d[parts[-1]] = value
return unflattened | 1a54f4289cb77e5d6355f91e6a26477bce420c6c | 9,577 |
from pathlib import Path
import requests
def upload_notebook(notebook: Path, enable_annotations: bool, enable_discovery: bool, nbss_url: str):
"""
Upload a notebook file to an nbss instance with
"""
upload_url = f"{nbss_url.rstrip('/')}/api/v1/notebook"
with open(notebook, 'rb') as f:
return requests.post(
upload_url,
data={
'enable-annotations': enable_annotations,
'enable-discovery': enable_discovery
},
files={
'notebook': f,
}
).text.strip() | 5247fe18a1a95076e7c6e35879fa438486a1ce65 | 9,578 |
def compat_assert_outcomes():
"""
Use RunResult.assert_outcomes() in a way that's consistent across pytest
versions.
For more info, on how/why this is inconsistent between pytest versions:
https://github.com/pytest-dev/pytest/issues/6505
"""
def _compat_assert_outcomes(run_result, **kwargs):
unplural = {
'errors': 'error',
'warnings': 'warning',
}
try:
run_result.assert_outcomes(**kwargs)
except TypeError:
# Unpluralize the nouns and try again.
run_result.assert_outcomes(**{
unplural.get(key, key): val
for key, val in kwargs.items()
})
return _compat_assert_outcomes | 63fe48f7fc3c8b56f296df19ce9c297169e4163a | 9,579 |
from typing import Sequence
def parse_attrs(attrs):
"""Parse an attrs sequence/dict to have tuples as keys/items."""
if isinstance(attrs, Sequence):
ret = [item.split(".") for item in attrs]
else:
ret = dict()
for key, value in attrs.items():
ret[tuple(key.split("."))] = value
return ret | 15779e14fbdb9783d91732aa3420ccb18ee6c620 | 9,580 |
def change_style(style, representer):
"""
Change the style of a particular YAML representer.
"""
def new_representer(dumper, data):
scalar = representer(dumper, data)
scalar.style = style
return scalar
return new_representer | 1365e8ed6ad8a237404aa3bc2274ab7e8137e50b | 9,581 |
def scanD(d, ck, minSupport):
"""
计算候选数据集CK在数据集D中的支持度,
并返回支持度大于最小支持度 minSupport 的数据
Args:
D 数据集
Ck 候选项集列表
minSupport 最小支持度
Returns:
retList 支持度大于 minSupport 的集合
supportData 候选项集支持度数据
"""
# ssCnt 临时存放选数据集 Ck 的频率. 例如: a->10, b->5, c->8
ssCnt = {}
for tid in d:
for can in ck:
if can.issubset(tid):
ssCnt[can] = ssCnt.get(can, 0) + 1
numItems = (float(len(d)))
retList = []
supportData = {}
for key in ssCnt:
# 支持度 = 候选项(key)出现的次数 / 所有数据集的数量
support = ssCnt[key]/numItems
# 在 retList 的首位插入元素,只存储支持度满足频繁项集的值
if support >= minSupport:
retList.insert(0, key)
# 存储所有的候选项(key)和对应的支持度(support)
supportData[key] = support
return retList, supportData | a37fccca461774777bf082ca1b1e2bf3528cc220 | 9,582 |
import codecs
def _readfile(fname, strip="\n"):
"""Shortcut for reading a text file."""
with codecs.open(fname, 'r', 'UTF8') as fp:
content = fp.read()
return content.strip(strip) if strip else content | 5708a91ed7ceb8743bf0e6a40962c80e74996368 | 9,583 |
def ignore(x):
"""Method to indicate bypassing property validation"""
return x | cc9ae3c1e15fab3e7f55190278356c11d87d9744 | 9,585 |
def eval_request_bool(val, default=False):
"""
Evaluates the boolean value of a request parameter.
:param val: the value to check
:param default: bool to return by default
:return: Boolean
"""
assert isinstance(default, bool)
if val is not None:
val = val.lower()
if val in ['False', 'false', '0', 'n', 'no', 'off']:
return False
if val in ['True', 'true', '1', 'y', 'yes', 'on']:
return True
return default | 99909909846f3194abc8c83ad84411c3ccd1245c | 9,586 |
def get_category_index_from_categories(categories, category):
"""
Gets the index of a category from the categories dictionary. If the category
doesn't exist, it creates a new entry for that category name and returns
the new index number.
"""
if category not in categories:
categories[category] = len(categories)
return categories[category] | 94ce8e2926c1de55383d5fd11e531d9c81792f9c | 9,589 |
def create_repas_noel_column(X):
"""Crée la variable pour le repas de Noël"""
X["repas_noel"] = X["repas_noel"] * X["effectif"]
return X | adfd6d07007d8e0f30903e16ed43f37dd55d92a2 | 9,590 |
def intervalLength(aa, wrapAt=360.):
"""Returns the length of an interval."""
if wrapAt is None:
return (aa[1] - aa[0])
else:
return (aa[1] - aa[0]) % wrapAt | dbceac2d1606d1bedf7c12b4114c17b70db78a86 | 9,591 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.