content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def SplitRecursively(x, num_splits, axis=-1):
"""Splits Tensors in 'x' recursively.
Args:
x: a Tensor, or a list or NestMap containing Tensors to split.
num_splits: number of splits per Tensor.
axis: the split axis.
Returns:
A list of split values of length 'num_splits'.
- If 'x' is a Tensor, a list of split Tensors.
- If 'x' is a list, a list of lists, where each sublist has the same length
as 'x' and the k'th element in each sublist corresponds to a split of the
k'th element from 'x'.
- If 'x' is a `.NestedMap`, a list of `.NestedMap`, where each field
corresponds to a split from the same field of 'x'.
"""
if isinstance(x, tf.Tensor):
return tf.split(x, num_splits, axis=axis)
elif isinstance(x, list):
splits = [SplitRecursively(element, num_splits, axis) for element in x]
splits = list(zip(*splits))
return [list(t) for t in splits]
elif isinstance(x, NestedMap):
results = [NestedMap() for _ in range(num_splits)]
for key, val in x.items():
val_splits = SplitRecursively(val, num_splits, axis)
for i in range(num_splits):
results[i][key] = val_splits[i]
return results
else:
raise TypeError('Unexpected type for SplitRecursively: %s' % type(x)) | 75b7f1bae3af09dc27147b234a1a17ee8c6c988a | 21,693 |
import time
def generate_entry(request, properties, data, mtime=None):
"""
Takes a properties dict and a data string and generates a generic
entry using the data you provided.
:param request: the Request object
:param properties: the dict of properties for the entry
:param data: the data content for the entry
:param mtime: the mtime tuple (as given by ``time.localtime()``).
if you pass in None, then we'll use localtime.
"""
entry = EntryBase(request)
entry.update(properties)
entry.set_data(data)
if mtime:
entry.set_time(mtime)
else:
entry.set_time(time.localtime())
return entry | 3fcbaa919a2abb3d35e5d096b65b8d3ddf020b51 | 21,694 |
def info():
"""Refresh teh client session using the refresh token"""
global client
client = client.refresh_session(app_id, app_secret)
return "Refreshed" | 0bd7070d020a40af17a105663e8cfca6b7bbc800 | 21,695 |
def weight_point_in_circle(
point: tuple,
center: tuple,
radius: int,
corner_threshold: float = 1.5
):
"""
Function to decide whether a certain grid coordinate should be a full, half or empty tile.
Arguments:
point (tuple): x, y of the point to be tested
center (tuple): x, y of the origin (center) point
radius (int): radius of certainly empty tiles, does not include half tiles
corner_threshold (float): threshold that decides if the tile should be a half tile instead of empty
Returns:
int: the type of the tested tile
0 if empty tile
1 if full tile
2 if half tile
"""
diff_x, diff_y = map(lambda x, y: abs(x - y), center, point) # subtract point from center then abs for both x and y
if (diff_y > radius) or (diff_x > radius):
return 0 # eliminate any obviously out of bounds tiles
# precalculate pythagoras distance squared
dist_squared = (diff_x * diff_x) + (diff_y * diff_y)
# precalculate radius sqaured
radius_squared = radius * radius
# precalculate rounded distance
rounded_distance = round(dist_squared)
if rounded_distance < radius_squared: # distance within radius
return 1 # full tile
elif rounded_distance < radius_squared * corner_threshold and diff_x < radius: # distance on edge
return 2 # half tile
# outside of any thresholds
return 0 | db0da5e101184975385fb07e7b22c5e8a6d4fd47 | 21,696 |
def encipher_shift(plaintext, plain_vocab, shift):
"""Encrypt plain text with a single shift layer.
Args:
plaintext (list of list of Strings): a list of plain text to encrypt.
plain_vocab (list of Integer): unique vocabularies being used.
shift (Integer): number of shift, shift to the right if shift is positive.
Returns:
ciphertext (list of Strings): encrypted plain text.
"""
ciphertext = []
cipher = ShiftEncryptionLayer(plain_vocab, shift)
for _, sentence in enumerate(plaintext):
cipher_sentence = []
for _, character in enumerate(sentence):
encrypted_char = cipher.encrypt_character(character)
cipher_sentence.append(encrypted_char)
ciphertext.append(cipher_sentence)
return ciphertext | 4bdd9d00523aa546ca697e45f31db6739ad71723 | 21,697 |
def esmf_interp_points(ds_in, locs_lon, locs_lat, lon_field_name='lon',
lat_field_name='lat'):
"""Use ESMF toolbox to interpolate grid at points."""
# generate grid object
grid = esmf_create_grid(ds_in[lon_field_name].values.astype(np.float),
ds_in[lat_field_name].values.astype(np.float))
# generate location stream object
locstream = esmf_create_locstream_spherical(locs_lon.values.astype(np.float),
locs_lat.values.astype(np.float))
# generate regridding object
srcfield = ESMF.Field(grid, name='srcfield')
dstfield = ESMF.Field(locstream, name='dstfield')
regrid = ESMF.Regrid(srcfield, dstfield,
regrid_method=ESMF.RegridMethod.BILINEAR,
unmapped_action=ESMF.UnmappedAction.ERROR)
# construct output dataset
coords = {c: locs_lon[c] for c in locs_lon.coords}
dims_loc = locs_lon.dims
nlocs = len(locs_lon)
ds_out = xr.Dataset(coords=coords, attrs=ds_in.attrs)
for name, da_in in ds_in.data_vars.items():
# get the dimensions of the input dataset; check if it's spatial
dims_in = da_in.dims
if lon_field_name not in dims_in or lat_field_name not in dims_in:
continue
# get the dimension/shape of output
non_lateral_dims = dims_in[:-2]
dims_out = non_lateral_dims + dims_loc
shape_out = da_in.shape[:-2] + (nlocs,)
# create output dataset
da_out = xr.DataArray((np.ones(shape_out)*np.nan).astype(da_in.dtype),
name=name,
dims=dims_out,
attrs=da_in.attrs,
coords={c: da_in.coords[c] for c in da_in.coords
if c in non_lateral_dims})
dstfield.data[...] = np.nan
if len(non_lateral_dims) > 0:
da_in_stack = da_in.stack(non_lateral_dims=non_lateral_dims)
da_out_stack = xr.full_like(da_out, fill_value=np.nan).stack(non_lateral_dims=non_lateral_dims)
for i in range(da_in_stack.shape[-1]):
srcfield.data[...] = da_in_stack.data[:, :, i].T
dstfield = regrid(srcfield, dstfield, zero_region=ESMF.Region.SELECT)
da_out_stack.data[:, i] = dstfield.data
da_out.data = da_out_stack.unstack('non_lateral_dims').transpose(*dims_out).data
else:
srcfield.data[...] = da_in.data[:, :].T
dstfield = regrid(srcfield, dstfield, zero_region=ESMF.Region.SELECT)
da_out.data = dstfield.data
ds_out[name] = da_out
return ds_out | b90322b852c2523a3265c9e0877675dc59717ec9 | 21,698 |
def update_loan_record(id_: int, loan_record: LoanRecord) -> bool:
"""Update a loan record from the database
Args:
id_: loan record id which wants to be modified.
loan_record: new information for updating."""
updated_data = {key: value for key, value in loan_record.items() if value is not None} # type: ignore
with session_scope() as session:
result = session.query(LoanRequest).filter(LoanRequest.id == id_).update(updated_data)
session.commit()
return bool(result) | 933d6551cf7c719bc98a4c3b37392ceba3f9e3f4 | 21,699 |
def check_results(results):
"""Examines a list of individual check results and returns an overall
result for all checks combined.
"""
if CheckResults.SCALE_UP in results:
return CheckResults.SCALE_UP
if all(r == CheckResults.SCALE_DOWN for r in results):
return CheckResults.SCALE_DOWN
return CheckResults.DONT_SCALE | a7a18caca42c9a6f555110418b96b5cc6b9d203c | 21,700 |
def edit_route(link_id):
"""edit link"""
link = dynamo.tables[TABLE_NAME].get_item(Key={'id': link_id})['Item']
form = LinkForm(link=link['link'], tags=','.join(link['tags']))
if form.validate_on_submit():
link, tags = form.parsed_data()
dynamo.tables[TABLE_NAME].update_item(
Key={'id': link_id},
UpdateExpression='set link = :link, tags = :tags',
ExpressionAttributeValues={':link': link, ':tags': tags},
ReturnValues='UPDATED_NEW')
return redirect(url_for('app.index_route'))
return render_template('addedit.html', form=form) | b20f22345d3de2b474e04821bdfbee5391f1d493 | 21,702 |
def build_empty_pq():
"""Build empty pq."""
return PriorityQ() | a18dc1ac16ceb2475f47e1f55e0617957c1e0cad | 21,703 |
def add_flags(flags):
"""Add KZC flags"""
def f(test, way):
test.args += flags
return f | 58c6db2bb46c321ce3e3592ac8be2ee6d1feecb6 | 21,704 |
def public_jsonp_service(view):
"""
More explicitly named to call attention to the extra little p
"""
return _json_service_wrapper(JSONPResponse, view) | 76588ade3d537a102dc6ca3bf540bc32da928e30 | 21,706 |
def manage_data(xls_file: str) -> list:
"""
转换xls手动标注的数据为待处理的格式
:param xls_file: 目标文件路径
:return: 转换后的字典列表
"""
f = pd.read_excel(xls_file, index=False)
cnt = 0
result = []
while cnt < len(f) - 1:
if f.text[cnt] == f.text[cnt + 1]:
temp_dic = {'text': f.text[cnt], 'spo_list': []}
while cnt < len(f) - 1 and f.text[cnt] == f.text[cnt + 1]:
temp_dic['spo_list'].append(f.iloc[cnt, 1:].to_dict())
cnt += 1
temp_dic['spo_list'].append(f.iloc[cnt, 1:].to_dict())
cnt += 1
result.append(temp_dic)
else:
temp_dic = {'text': f.text[cnt],
'spo_list': [f.iloc[cnt, 1:].to_dict()]}
result.append(temp_dic)
cnt += 1
return result | 3198013b713f50b650bc5b3542905d1e860a6871 | 21,707 |
def get_arrival_times(inter_times):
"""Convert interevent times to arrival times."""
return inter_times.cumsum() | 7197fc6315d3eaca118ca419f23aed7c0d7cd064 | 21,708 |
def generate_custom_background(size, background_color, nb_blobs=3000,
kernel_boundaries=(50, 100)):
""" Generate a customized background to fill the shapes
Parameters:
background_color: average color of the background image
nb_blobs: number of circles to draw
kernel_boundaries: interval of the possible sizes of the kernel
"""
img = np.zeros(size, dtype=np.uint8)
img = img + get_random_color(background_color)
blobs = np.concatenate([random_state.randint(0, size[1], size=(nb_blobs, 1)),
random_state.randint(0, size[0], size=(nb_blobs, 1))],
axis=1)
for i in range(nb_blobs):
col = get_random_color(background_color)
cv.circle(img, (blobs[i][0], blobs[i][1]),
random_state.randint(20), col, -1)
kernel_size = random_state.randint(kernel_boundaries[0], kernel_boundaries[1])
cv.blur(img, (kernel_size, kernel_size), img)
return img | 782883d29cf67dbb33662fbb22b457783320101d | 21,709 |
def rotate_z(domain, nrot=4):
"""take BoxCollection and return equivalent CylinderCollection by
rotating about the second axis. thus, transform coordinates of
points like (x, z) --> (x, 0, z)."""
return rotate(domain, d=1, nrot=nrot) | ca1b197d758a18b86675a14be952065055dea05f | 21,710 |
def filter_roi(roi_data, nb_nonzero_thr):
"""Filter slices from dataset using ROI data.
This function filters slices (roi_data) where the number of non-zero voxels within the ROI slice (e.g. centerline,
SC segmentation) is inferior or equal to a given threshold (nb_nonzero_thr).
Args:
roi_data (nd.array): ROI slice.
nb_nonzero_thr (int): Threshold.
Returns:
bool: True if the slice needs to be filtered, False otherwise.
"""
# Discard slices with less nonzero voxels than nb_nonzero_thr
return not np.any(roi_data) or np.count_nonzero(roi_data) <= nb_nonzero_thr | 9e325f77436e152377bee84d7e82d3f80424f288 | 21,711 |
def from_numpy(shape, dt):
"""
Upcast a (shape, dtype) tuple if possible.
>>> from_numpy((5,5), dtype('int32'))
dshape('5, 5, int32')
"""
dtype = np.dtype(dt)
if dtype.kind == 'S':
measure = String(dtype.itemsize, 'A')
elif dtype.kind == 'U':
measure = String(dtype.itemsize / 4, 'U8')
elif dtype.fields:
rec = [(a,CType.from_dtype(b[0])) for a,b in dtype.fields.items()]
measure = Record(rec)
else:
measure = CType.from_dtype(dtype)
if shape == ():
return measure
else:
return DataShape(parameters=(map(Fixed, shape)+[measure])) | 249701a885dc01d13fe356ce4117300e79d803a5 | 21,712 |
import optparse
def ParseArgs():
"""Parse the command line options."""
option_parser = optparse.OptionParser()
option_parser.add_option(
'--from', dest='sender', metavar='EMAIL',
help='The sender\'s email address')
option_parser.add_option(
'--to', action='append', metavar='EMAIL', dest='recipients', default=[],
help='The recipient\'s address (reapeatable)')
option_parser.add_option(
'--subject', metavar='TEXT|@FILE', help='The subject of the email')
option_parser.add_option(
'--message', metavar='TEXT|@FILE', help='The body of the message')
option_parser.add_option(
'--attach', metavar='FILE', action='append', dest='attachments',
default=[], help='The path of a file to attach')
option_parser.add_option(
'--ignore-missing', action='store_true', default=False,
help='No errors on attempts to attach non-existing files')
option_parser.add_option('--server', help='The SMTP server to use')
option_parser.add_option('--password', help='The password to use')
options, _args = option_parser.parse_args()
if not options.sender:
option_parser.error('--from is required')
if not options.recipients:
option_parser.error('At least one --to is required')
if not options.subject:
option_parser.error('--subject is required')
if not options.message:
option_parser.error('--message is reuqired')
if not options.server:
option_parser.error('--server is required')
options.subject = ResolveParameter(options.subject)
options.message = ResolveParameter(options.message)
return options | eb1ee1c5fb66f76882aef0787e2c4716146526f4 | 21,713 |
def pyramidnet110_a84_cifar100(classes=100, **kwargs):
"""
PyramidNet-110 (a=84) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=110,
alpha=84,
bottleneck=False,
model_name="pyramidnet110_a84_cifar100",
**kwargs) | f005c26e80e87536b5032685f27944560e5d8fc7 | 21,714 |
def contains_vendored_imports(python_path):
"""
Returns True if ``python_path`` seems to contain vendored imports from botocore.
"""
# We're using a very rough heuristic here: if the source code contains
# strings that look like a vendored import, we'll flag.
#
# Because Python is dynamic, there are lots of ways you could be
# importing the vendored modules that wouldn't be caught this way, but:
#
# 1. Doing it in a complete foolproof way is incredibly complicated, and
# I don't care that much.
# 2. If you're writing your Lambda code in a deliberately obfuscated way,
# you have bigger problems than vendor deprecations.
#
# In practice, Python imports are usually near the top of the file, so we
# read it line-by-line. This means if we find an import, we can skip
# reading the rest of the file.
#
with open(python_path, "rb") as python_src:
for line in python_src:
if (
b"import botocore.vendored" in line
or b"from botocore.vendored import " in line
):
return True
return False | 90ed6939d7f43cac29eb66c3e27e911b9cc62532 | 21,715 |
def filter_uniq(item):
"""Web app, feed template, creates unique item id"""
detail = item['item']
args = (item['code'], item['path'], str(detail['from']), str(detail['to']))
return ':'.join(args) | 914fa4e3fcdf6bc7e6a30b46c8f33eecd08adcf1 | 21,716 |
import joblib
import time
import logging
import warnings
import pickle
def load_pickle(filename, verbose=2, use_joblib=False):
"""
Note: joblib can be potentially VERY slow.
"""
with open(filename, 'rb') as file:
if verbose >= 2:
start = time.time()
logging.info(f'Loading PICKLE from {filename}...')
if use_joblib:
warnings.warn('Joblib is slower in newer versions of Python.')
obj = joblib.load(file)
else:
try:
obj = pickle.load(file)
except EOFError as e:
logging.error(f'Load FAILED for {filename}.')
raise e
if verbose >= 2:
logging.info(f'Load done in {np.round(time.time()-start, 4)} seconds.')
return obj | 680c4b72e47efeb58ec1bd93e4899a3ae6b99709 | 21,717 |
from typing import Match
async def make_match(*args, register=False, **kwargs) -> Match:
"""Create a Match object. There should be no need to call this directly; use matchutil.make_match instead,
since this needs to interact with the database.
Parameters
----------
racer_1_id: int
The DB user ID of the first racer.
racer_2_id: int
The DB user ID of the second racer.
max_races: int
The maximum number of races this match can be. (If is_best_of is True, then the match is a best of
max_races; otherwise, the match is just repeating max_races.)
match_id: int
The DB unique ID of this match.
suggested_time: datetime.datetime
The time the match is suggested for. If no tzinfo, UTC is assumed.
r1_confirmed: bool
Whether the first racer has confirmed the match time.
r2_confirmed: bool
Whether the second racer has confirmed the match time.
r1_unconfirmed: bool
Whether the first racer wishes to unconfirm the match time.
r2_unconfirmed: bool
Whether the second racer wishes to unconfirm the match time.
match_info: MatchInfo
The types of races to be run in this match.
cawmentator_id: int
The DB unique ID of the cawmentator for this match.
sheet_id: int
The sheetID of the worksheet the match was created from, if any.
register: bool
Whether to register the match in the database.
Returns
---------
Match
The created match.
"""
if 'match_id' in kwargs and kwargs['match_id'] in match_library:
return match_library[kwargs['match_id']]
match = Match(*args, commit_fn=matchdb.write_match, **kwargs)
await match.initialize()
if register:
await match.commit()
match_library[match.match_id] = match
return match | 67346038696558f19b08a65cf45e88646b1186e4 | 21,718 |
def is_anonymous(context: TreeContext) -> bool:
"""Returns ``True`` if the current node is anonymous."""
# return context[-1].anonymous
tn = context[-1].tag_name
return not tn or tn[0] == ':' | 0169a93cada0d371b3b4628ff3fabbbef6ef60f2 | 21,719 |
def expand_to_beam_size(tensor, beam_size):
"""Tiles a given tensor by beam_size."""
tensor = tf.expand_dims(tensor, axis=1)
tile_dims = [1] * tensor.shape.ndims
tile_dims[1] = beam_size
return tf.tile(tensor, tile_dims) | e38adceceeecdab737f89f246125674ac87e4702 | 21,720 |
def delete_cluster(access_token, project_id, cluster_id):
"""删除集群"""
url = f"{BCS_CC_API_PRE_URL}/projects/{project_id}/clusters/{cluster_id}/"
params = {"access_token": access_token}
return http_delete(url, params=params) | f48d7f8a6278e528792601938817d883751d7a41 | 21,722 |
def atoi(s, base=None): # real signature unknown; restored from __doc__
"""
atoi(s [,base]) -> int
Return the integer represented by the string s in the given
base, which defaults to 10. The string s must consist of one
or more digits, possibly preceded by a sign. If base is 0, it
is chosen from the leading characters of s, 0 for octal, 0x or
0X for hexadecimal. If base is 16, a preceding 0x or 0X is
accepted.
"""
return 0 | 420c9a68c1fe829a665eaba830df757114a81b47 | 21,724 |
def gzip_requested(accept_encoding_header):
"""
Check to see if the client can accept gzipped output, and whether or
not it is even the preferred method. If `identity` is higher, then no
gzipping should occur.
"""
encodings = parse_encoding_header(accept_encoding_header)
# Do the actual comparisons
if('gzip' in encodings):
return encodings['gzip'] >= encodings['identity']
elif('*' in encodings):
return encodings['*'] >= encodings['identity']
else:
return False | a07ca3d77095467791fc97d1a525ee878715e929 | 21,725 |
def precip_units(units):
"""
Return a standardized name for precip units.
"""
kgm2s = ['kg/m2/s', '(kg/m^2)/s', 'kg/m^2/s', 'kg m^-2 s^-1',
'kg/(m^2 s)', 'kg m-2 s-1']
mmday = ['mm/day', 'mm day^-1']
if units.lower() in kgm2s:
return 'kg m^-2 s^-1'
elif units.lower() in mmday:
return 'mm day^-1'
else:
raise ValueError('Unknown units ' + units) | e5f94c3dd41b68d2e7b6b7aa1905fd5508a12fab | 21,727 |
from typing import Union
from typing import List
def enumerate_quantities(
df: pd.DataFrame, cols: Union[List[str], None] = None, qty_col: str = "quantity"
) -> pd.DataFrame:
"""Creates new dataframe to convert x,count to x*count."""
if not cols:
raise ValueError("parameter cols must be an iterable of strings")
new_cols: List = [
sum(df.apply(lambda x: [x[col]] * x[qty_col], axis=1).tolist(), [])
for col in cols
]
new_df = pd.DataFrame(new_cols, index=cols).T
return new_df | 0defc1575ead9b70f658be5ed6795b22c3b39ac7 | 21,728 |
def calcul_acc(labels, preds):
"""
a private function for calculating accuracy
Args:
labels (Object): actual labels
preds (Object): predict labels
Returns:
None
"""
return sum(1 for x, y in zip(labels, preds) if x == y) / len(labels) | 3dc22c8707c181dda50e2a37f2cd822b2a31590d | 21,729 |
def makeMolFromAtomsAndBonds(atoms, bonds, spin=None):
"""
Create a new Molecule object from a sequence of atoms and bonds.
"""
mol = Molecule(pybel.ob.OBMol())
OBMol = mol.OBMol
for atomicnum in atoms:
a = pybel.ob.OBAtom()
a.SetAtomicNum(atomicnum)
OBMol.AddAtom(a)
for bond in bonds:
if len(bond) != 3:
raise Exception('Bond must be specified by two indices and a bond order')
OBMol.AddBond(bond[0] + 1, bond[1] + 1, bond[2])
mol.assignSpinMultiplicity()
if spin is not None:
OBMol.SetTotalSpinMultiplicity(spin)
OBMol.SetHydrogensAdded()
return mol | 570dafe641bbade0d070942ea8e708d7e454e011 | 21,730 |
def get_preprocessor(examples, tokenize_fn, pad_ids):
"""
Input:
examples: [List[str]] input texts
tokenize_fn: [function] encodes text into IDs
Output:
tf input features
"""
def generator():
for example in examples:
tokens = tokenize_fn(example)
yield pad_ids + tokens
return generator | 0b2fb2217e04183fee027faedd163a8f8a048e9a | 21,732 |
def is_propositional_effect(eff: BaseEffect):
""" An effect is propositional if it is either an add or a delete effect. """
return isinstance(eff, (AddEffect, DelEffect)) | be440b2192dd6b89fcaff5756e774e7543f408cf | 21,733 |
def read_user(msg):
"""Read user input.
:param msg: A message to prompt
:type msg: ``str``
:return: ``True`` if user gives 'y' otherwhise False.
:rtype: ``bool``
"""
user_input = input("{msg} y/n?: ".format(msg=msg))
return user_input == 'y' | 662e95002130a6511e6e9a5d6ea85805f6b8f0f5 | 21,734 |
import scipy
def frechet_distance(real, fake):
"""Frechet distance.
Lower score is better.
"""
n = real.shape[0]
mu1, sigma1 = np.mean(real, axis=0), np.cov(real.reshape(n, -1), rowvar=False)
mu2, sigma2 = np.mean(fake, axis=0), np.cov(fake.reshape(n, -1), rowvar=False)
diff = mu1 - mu2
covmean, _ = scipy.linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = (
"fid calculation produces singular product; "
"adding %s to diagonal of cov estimates"
) % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = scipy.linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError("Imaginary component {}".format(m))
covmean = covmean.real
assert np.isfinite(covmean).all() and not np.iscomplexobj(covmean)
tr_covmean = np.trace(covmean)
frechet_dist = diff.dot(diff)
frechet_dist += np.trace(sigma1) + np.trace(sigma2)
frechet_dist -= 2 * tr_covmean
return frechet_dist | 55ed2a4f21b8987925083c925e7df6de7b305c06 | 21,735 |
import requests
def get_tenants(zuul_url):
""" Fetch list of tenant names """
is_witelabel = requests.get(
"%s/info" % zuul_url).json().get('tenant', None) is not None
if is_witelabel:
raise RuntimeError("Need multitenant api")
return [
tenant["name"]
for tenant in requests.get("%s/tenants" % zuul_url).json()
] | 97944d2de2a8dfc2dd50dbea46a135a184e7aa37 | 21,736 |
def ant():
"""Configuration for MuJoCo's ant task."""
locals().update(default())
# Environment
env = 'Ant-v2'
max_length = 1000
steps = 2e7 # 20M
return locals() | 1b99dba9f38b735056055c564e1143c5eb77401a | 21,737 |
def docker_run(task, image, pull_image=True, entrypoint=None, container_args=None,
volumes=None, remove_container=True, **kwargs):
"""
This task runs a docker container. For details on how to use this task, see the
:ref:`docker-run` guide.
:param task: The bound task reference.
:type task: :py:class:`girder_worker.task.Task`
:param image: The docker image identifier.
:type image: str
:param pull_image: Whether to explicitly pull the image prior to running the container.
:type pull_image: bool
:param entrypoint: Alternative entrypoint to use when running the container.
:type entrypoint: str
:param container_args: Arguments to pass to the container.
:type container_args: list
:param volumes: Volumes to expose to the container.
:type volumes: dict
:param remove_container: Whether to delete the container after the task is done.
:type remove_container: bool
:return: Fulfilled result hooks.
:rtype: list
"""
return _docker_run(
task, image, pull_image, entrypoint, container_args, volumes,
remove_container, **kwargs) | 6ddc61d47c7b78bf532195a8cddd37f3c730b675 | 21,738 |
def get_accuracy(pred, target):
"""gets accuracy either by single prediction
against target or comparing their codes """
if len(pred.size()) > 1:
pred = pred.max(1)[1]
#pred, target = pred.flatten(), target.flatten()
accuracy = round(float((pred == target).sum())/float(pred.numel()) * 100, 3)
return accuracy | f30e57602e4a06b0a0e3cd131bf992cf8f9b514e | 21,739 |
import numpy
def ifourier_transform(F,dt,n):
"""
See Also
-------
fourier_transform
"""
irfft = numpy.fft.irfft
shift = numpy.fft.fftshift
return (1.0/dt)*shift(irfft(F,n=n)) | d068cdbbe95f58d4210d2e799dfaee878fb9bf98 | 21,740 |
def preprocess_labels(labels, encoder=None, categorical=True):
"""Encode labels with values among 0 and `n-classes-1`"""
if not encoder:
encoder = LabelEncoder()
encoder.fit(labels)
y = encoder.transform(labels).astype(np.int32)
if categorical:
y = np_utils.to_categorical(y)
return y, encoder | 3d92ce70f6ae7f713b27f5a31e92f0aab919584b | 21,741 |
def import_minimal_log(path, parameters=None, variant=DEFAULT_VARIANT_LOG):
"""
Import a Parquet file (as a minimal log with only the essential columns)
Parameters
-------------
path
Path of the file to import
parameters
Parameters of the algorithm, possible values:
Parameters.COLUMNS -> columns to import from the Parquet file
variant
Variant of the algorithm, possible values:
- Variants.PYARROW
Returns
-------------
df
Pandas dataframe
"""
if parameters is None:
parameters = {}
parameters[COLUMNS] = [constants.CASE_CONCEPT_NAME, xes.DEFAULT_NAME_KEY, xes.DEFAULT_TIMESTAMP_KEY]
return exec_utils.get_variant(variant).import_log(path, parameters=parameters) | 530f60799318b90c90d08d427965041e4bda6dba | 21,742 |
def get_input_assign(input_signal, input_value):
""" Get input assignation statement """
input_assign = ReferenceAssign(
input_signal,
Constant(input_value, precision=input_signal.get_precision())
)
return input_assign | 9b3e372423d323af3a718ab909c26f2ba42bfea6 | 21,743 |
def prune_repos(region: str=None, registry_prefix: str=None, repo: str=None, current_tag: str=None, all_tags: str=None):
"""
Pull the image from the registry if it doesn't exist locally
:param region:
:param registry_prefix:
:param repo:
:param current_tag:
:param all_tags:
:return:
"""
# Get the tags from the all_tags JSON
all_tags_list = get_tags_from_all_tags(all_tags)
# Add the current_tag to the recent (local) tags. Just to be safe
recent_tags = all_tags_list + [current_tag]
# Get the tags for the repo from ECR
ecr_tags = get_tags_from_ecr(region, repo)
# Get all the tags in the registry that are *not* the ones we want
bad_tags = [tag for tag in ecr_tags if tag not in recent_tags]
# Delete the obsolete images
for tag in bad_tags:
output = get_stdout('''{AWS} ecr batch-delete-image --region {region} --repository-name {repo} --image-ids imageTag={tag}'''
.format(AWS=AWS, region=region, repo=repo, tag=tag))
return True | d5d21230f4440e4909a9ff0288a794471b5fb016 | 21,744 |
def convert_size_bytes_to_gb(size_in_bytes):
""":rtype: float"""
return float(size_in_bytes) / GB | 7d3946dc431aa6a531fa11ef8e5391279f8b553a | 21,745 |
def merge_swab(survey_df, swab_df):
"""
Process for matching and merging survey and swab result data.
Should be executed after merge with blood test result data.
"""
survey_antibody_swab_df, none_record_df = execute_merge_specific_swabs(
survey_df=survey_df,
labs_df=swab_df,
barcode_column_name="swab_sample_barcode",
visit_date_column_name="visit_datetime",
received_date_column_name="pcr_result_recorded_datetime",
void_value="Void",
)
survey_antibody_swab_df = survey_antibody_swab_df.drop(
"abs_offset_diff_vs_visit_hr_swab",
"count_barcode_swab",
"count_barcode_voyager",
"diff_vs_visit_hr_swab",
"pcr_flag",
"time_order_flag",
"time_difference_flag",
)
df_all_iqvia, df_lab_residuals, df_failed_records = merge_process_filtering(
df=survey_antibody_swab_df,
none_record_df=none_record_df,
merge_type="swab",
barcode_column_name="swab_sample_barcode",
lab_columns_list=[column for column in swab_df.columns if column != "swab_sample_barcode"],
)
return df_all_iqvia, df_lab_residuals, df_failed_records | 38253b473c45a967dc1aeccdd61e94566014a347 | 21,746 |
def confirm_space(environ, start_response):
"""
Confirm a spaces exists. If it does, raise 204. If
not, raise 404.
"""
store = environ['tiddlyweb.store']
space_name = environ['wsgiorg.routing_args'][1]['space_name']
try:
space = Space(space_name)
store.get(Recipe(space.public_recipe()))
store.get(Recipe(space.private_recipe()))
except NoRecipeError:
raise HTTP404('%s does not exist' % space_name)
start_response('204 No Content', [])
return [''] | aff453f96bb85895115dff9796387bc223151c81 | 21,747 |
def find_ppp_device_status(address=None, username=None):
"""Find device status node based on address and/or username.
This is currently only used by the web UI. For the web UI this is the best
guess for identifying the device related to a forced web forward; which
allows the web UI to default username for user login, for instance.
"""
def _f1(d):
return (address is not None) and (d.getS(ns.pppAddress, rdf.IPv4Address) == address)
def _f2(d):
return (username is not None) and (d.getS(ns.username, rdf.String) == username)
# There may be multiple matching devices in corner cases, e.g. two devices
# in RDF with the same IP address. License monitor reconcile process should
# eliminate these discrepancies eventually but here we may still encounter
# them from time to time.
#
# If there are multiple matching entries, we take the newest one and assume
# that is the desired one. If the entries have a different username, this
# may match to the wrong user. This is not critical: the web UI does not
# allow the user to make any user-related changes until the user has logged
# in (providing his password). This function only provides the default value
# for login username.
#
# So: return device with latest startTime (newest connection), or first in
# list if no startTime is found. [filter_ppp_device_statuses_single does this.]
return filter_ppp_device_statuses_single([_f1, _f2]) | 71d44185a5df8f72b1281102faef66ea4f8a1de1 | 21,748 |
def get_L_dash_prm_bath_OS_90(house_insulation_type, floor_bath_insulation):
"""主開口方向から時計回りに90°の方向の外気に面した浴室の土間床等の外周部の長さ (m)
Args:
house_insulation_type(str): 床断熱住戸'または'基礎断熱住戸'
floor_bath_insulation(str): 床断熱住戸'または'基礎断熱住戸'または'浴室の床及び基礎が外気等に面していない'
Returns:
float: 主開口方向から時計回りに90°の方向の外気に面した浴室の土間床等の外周部の長さ (m)
"""
return get_table_3(38, house_insulation_type, floor_bath_insulation) | 13362e5f035865b38ea6562aa7a836ce95298590 | 21,749 |
import json
def generate_books(request, form):
"""
Returns a list of books.
"""
list_of_books = Book.generate_existing_books(form.cleaned_data['part'])
return HttpResponse(json.dumps(list_of_books), content_type='application/json') | d75deab68c4cb4cdc9f14e4a313ffd060ab01004 | 21,751 |
def window_reverse_4d(windows, window_size, H_q, W_q, H_s, W_s):
"""
Args:
windows: (num_windows*B, window_size, window_size, window_size, window_size, C)
window_size (int): size of window
H_q (int): Height of query image
W_q (int): Width of query image
H_s (int): Height of support image
W_s (int): Width of support image
Returns:
x: (B, H_q, W_q, H_s, W_s, C)
"""
kwargs = {
'H_q': H_q // window_size,
'W_q': W_q // window_size,
'H_s': H_s // window_size,
'W_s': W_s // window_size
}
x = rearrange(windows, '(B H_q W_q H_s W_s) W_1 W_2 W_3 W_4 C -> B (H_q W_1) (W_q W_2) (H_s W_3) (W_s W_4) C', **kwargs)
return x | 8ef2743ec15c140807a9c269680f8bd3810703a3 | 21,752 |
def numeric(symbols, negative, value):
"""Implement the algorithm for `type: numeric`."""
if value == 0:
return symbols[0]
is_negative = value < 0
if is_negative:
value = abs(value)
prefix, suffix = negative
reversed_parts = [suffix]
else:
reversed_parts = []
length = len(symbols)
value = abs(value)
while value != 0:
reversed_parts.append(symbols[value % length])
value //= length
if is_negative:
reversed_parts.append(prefix)
return ''.join(reversed(reversed_parts)) | 4eb41904f1ead6e6f8f6d6a5a7855d917a0029b7 | 21,754 |
from typing import Dict
def scale_value_dict(dct: Dict[str, float], problem: InnerProblem):
"""Scale a value dictionary."""
scaled_dct = {}
for key, val in dct.items():
x = problem.get_for_id(key)
scaled_dct[key] = scale_value(val, x.scale)
return scaled_dct | f7ad0cf51129d7abfb85fdba8d64f1c69bba2bad | 21,755 |
def green(string: str) -> str:
"""Add green colour codes to string
Args:
string (str): Input string
Returns:
str: Green string
"""
return "\033[92m" + string + "\033[0m" | b6bdefe3e467e88c044b9289ea26a59ccf564f1a | 21,757 |
def from_6x6_to_21x1(T):
"""Convert symmetric second order tensor to first order tensor."""
C2 = np.sqrt(2)
V = np.array([[T[0, 0], T[1, 1], T[2, 2],
C2 * T[1, 2], C2 * T[0, 2], C2 * T[0, 1],
C2 * T[0, 3], C2 * T[0, 4], C2 * T[0, 5],
C2 * T[1, 3], C2 * T[1, 4], C2 * T[1, 5],
C2 * T[2, 3], C2 * T[2, 4], C2 * T[2, 5],
T[3, 3], T[4, 4], T[5, 5],
C2 * T[3, 4], C2 * T[4, 5], C2 * T[5, 3]]]).T
return V | 177d766ee251dfb52396f88b4e77d101956afe79 | 21,758 |
def post_add_skit_reply():
""" removes a skit if authored by the current user """
email = is_authed(request)
if email and csrf_check(request):
# same as args, form data is also immutable
request.form = dict(request.form)
request.form['email'] = email
p_resp = proxy(RUBY, request)
return create_response(p_resp)
return BADUSER | 17b64bba949bb2df57cbdf796c0b895387672018 | 21,759 |
from datetime import datetime
def register(request):
"""Register new account."""
token_int = int(datetime.datetime.strftime(
datetime.datetime.now(), '%Y%m%d%H%M%S%f'))
token = short_url.encode_url(token_int)
if (not request.playstore_url and not request.appstore_url
and not request.winstore_url and not request.default_url):
return False, 'Insufficient information to register.', None
account = models.Accounts(
playstore_url=request.playstore_url,
appstore_url=request.appstore_url,
winstore_url=request.winstore_url,
default_url=request.default_url,
title=request.title,
banner=request.banner,
description=request.description,
token=token
)
account.put()
return True, None, token | 649c413011ec76bdb2244bbbfe7f4810230d3202 | 21,760 |
def stringify_parsed_email(parsed):
"""
Convert a parsed email tuple into a single email string
"""
if len(parsed) == 2:
return f"{parsed[0]} <{parsed[1]}>"
return parsed[0] | 6552987fe6a06fdbb6bd49e5d17d5aadaae3c832 | 21,761 |
import math
def standard_simplex_vol(sz: int):
"""Returns the volume of the sz-dimensional standard simplex"""
result = cm_matrix_det_ns(np.identity(sz, dtype=DTYPE))
if result == math.inf:
raise ValueError(f'Cannot compute volume of standard {sz}-simplex')
return result | 1b0d806312ee722f3251e1099e604a18d4e762a7 | 21,762 |
def all_saveable_objects(scope=None):
""" Copied private function in TF source. This is what tf.train.Saver saves if var_list=None is passed. """
return (tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope) +
tf.get_collection(tf.GraphKeys.SAVEABLE_OBJECTS, scope)) | 4c0b8ec0dd65160a113d4e6151a1a5b6d8454926 | 21,763 |
def base_to_str( base ):
"""Converts 0,1,2,3 to A,C,G,T"""
if 0 == base: return 'A'
if 1 == base: return 'C'
if 2 == base: return 'G'
if 3 == base: return 'T'
raise RuntimeError( 'Bad base: %d' % base ) | f1c98b7c24fae91c1f809abe47929d724c886168 | 21,764 |
def attr(accessing_obj, accessed_obj, *args, **kwargs):
"""
Usage:
attr(attrname)
attr(attrname, value)
attr(attrname, value, compare=type)
where compare's type is one of (eq,gt,lt,ge,le,ne) and signifies
how the value should be compared with one on accessing_obj (so
compare=gt means the accessing_obj must have a value greater than
the one given).
Searches attributes *and* properties stored on the checking
object. The first form works like a flag - if the
attribute/property exists on the object, the value is checked for
True/False. The second form also requires that the value of the
attribute/property matches. Note that all retrieved values will be
converted to strings before doing the comparison.
"""
# deal with arguments
if not args:
return False
attrname = args[0].strip()
value = None
if len(args) > 1:
value = args[1].strip()
compare = 'eq'
if kwargs:
compare = kwargs.get('compare', 'eq')
def valcompare(val1, val2, typ='eq'):
"compare based on type"
try:
return CF_MAPPING.get(typ, 'default')(val1, val2)
except Exception:
# this might happen if we try to compare two things that cannot be compared
return False
# first, look for normal properties on the object trying to gain access
if hasattr(accessing_obj, attrname):
if value:
return valcompare(str(getattr(accessing_obj, attrname)), value, compare)
return bool(getattr(accessing_obj, attrname)) # will return Fail on False value etc
# check attributes, if they exist
if (hasattr(accessing_obj, 'has_attribute') and accessing_obj.has_attribute(attrname)):
if value:
return (hasattr(accessing_obj, 'get_attribute')
and valcompare(accessing_obj.get_attribute(attrname), value, compare))
return bool(accessing_obj.get_attribute(attrname)) # fails on False/None values
return False | 8b3944ee8ef64938314766cc21e893ccbf48d9e1 | 21,766 |
def group_connected(polygon_map, mask=None):
"""Group all connected nodes."""
# Wrap :c:`group_connected()` from ``polygon_map.c``.
polygon_map = mask_polygon_map(polygon_map, mask)
queue = Queue(len(polygon_map) + 1)
group_ids = np.full(len(polygon_map), -1, np.intp, order="C")
groups_count: int
groups_count = slug.dll.group_connected(ptr(polygon_map),
ptr(polygon_map.ctypes.shape),
ptr(group_ids), queue._raw._ptr)
return group_ids, groups_count | 2239feab1ef914156e01ab430f0c561609de0b18 | 21,767 |
def dictmask(data, mask, missing_keep=False):
"""dictmask masks dictionary data based on mask"""
if not isinstance(data, dict):
raise ValueError("First argument with data should be dictionary")
if not isinstance(mask, dict):
raise ValueError("Second argument with mask should be dictionary")
if not isinstance(missing_keep, bool):
raise ValueError("Argument missing_keep should be bool type")
res = {}
for k, v in data.items():
if k not in mask:
if missing_keep is True:
res[k] = v
continue
if mask[k] is None or mask[k] is False:
continue
if mask[k] is True or data[k] is None:
res[k] = v
continue
if isinstance(data[k], dict) and isinstance(mask[k], dict):
res[k] = dictmask(data[k], mask[k])
continue
if isinstance(data[k], list) and isinstance(mask[k], list):
if len(mask[k]) != 1:
raise ValueError("Mask inside list should have only one item")
res2 = []
for i in range(len(data[k])):
res2.append(dictmask(data[k][i], mask[k][0], missing_keep))
res[k] = res2
else:
raise ValueError(
f"Cannot proceed key {k} with values of different types:"
f"{type(data[k])}, {type(mask[k])}"
)
return res | d18f6effb4367628ba85095024189d0f6694dd52 | 21,768 |
def _prepare_policy_input(
observations, vocab_size, observation_space, action_space
):
"""Prepares policy input based on a sequence of observations."""
if vocab_size is not None:
(batch_size, n_timesteps) = observations.shape[:2]
serialization_kwargs = init_serialization(
vocab_size, observation_space, action_space, n_timesteps
)
actions = np.zeros(
(batch_size, n_timesteps - 1) + action_space.shape,
dtype=action_space.dtype,
)
reward_mask = np.ones((batch_size, n_timesteps - 1), dtype=np.int32)
(policy_input, _) = serialization_utils.serialize_observations_and_actions(
observations=observations,
actions=actions,
mask=reward_mask,
**serialization_kwargs
)
return policy_input
else:
return observations | 9799357e00453a1259551c3af1b5bf5b58603186 | 21,771 |
def RGB2raw(R, G, B):
"""Convert RGB channels to Raw image."""
h, w = R.shape
raw = np.empty(shape=(2*h, 2*w), dtype=R.dtype)
raw[::2, ::2] = R
raw[1::2, 1::2] = B
raw[1::2, 0::2] = G
raw[0::2, 1::2] = G
return raw | 7adb2ccef65c85c7e5d1ac223f397ef2f90dd9d3 | 21,772 |
def get_algs_from_ciphersuite_name(ciphersuite_name):
"""
Return the 3-tuple made of the Key Exchange Algorithm class, the Cipher
class and the HMAC class, through the parsing of the ciphersuite name.
"""
tls1_3 = False
if ciphersuite_name.startswith("TLS"):
s = ciphersuite_name[4:]
if s.endswith("CCM") or s.endswith("CCM_8"):
kx_name, s = s.split("_WITH_")
kx_alg = _tls_kx_algs.get(kx_name)
hash_alg = _tls_hash_algs.get("SHA256")
cipher_alg = _tls_cipher_algs.get(s)
hmac_alg = None
else:
if "WITH" in s:
kx_name, s = s.split("_WITH_")
kx_alg = _tls_kx_algs.get(kx_name)
else:
tls1_3 = True
kx_alg = _tls_kx_algs.get("TLS13")
hash_name = s.split('_')[-1]
hash_alg = _tls_hash_algs.get(hash_name)
cipher_name = s[:-(len(hash_name) + 1)]
if tls1_3:
cipher_name += "_TLS13"
cipher_alg = _tls_cipher_algs.get(cipher_name)
hmac_alg = None
if cipher_alg is not None and cipher_alg.type != "aead":
hmac_name = "HMAC-%s" % hash_name
hmac_alg = _tls_hmac_algs.get(hmac_name)
elif ciphersuite_name.startswith("SSL"):
s = ciphersuite_name[7:]
kx_alg = _tls_kx_algs.get("SSLv2")
cipher_name, hash_name = s.split("_WITH_")
cipher_alg = _tls_cipher_algs.get(cipher_name.rstrip("_EXPORT40"))
kx_alg.export = cipher_name.endswith("_EXPORT40")
hmac_alg = _tls_hmac_algs.get("HMAC-NULL")
hash_alg = _tls_hash_algs.get(hash_name)
return kx_alg, cipher_alg, hmac_alg, hash_alg, tls1_3 | cc2ab3fcae87feeb7877bad091446fb2d20be6b0 | 21,773 |
def centroid(window):
"""Centroid interpolation for sub pixel shift"""
ip = lambda x : (x[2] - x[0])/(x[0] + x[1] + x[2])
return ip(window[:, 1]), ip(window[1]) | e1cf0398261637f682c74340f99566d19e342b66 | 21,774 |
def _format_warning(message, category, filename, lineno, line=None):
"""
Replacement for warnings.formatwarning that disables the echoing of
the 'line' parameter.
"""
return "{}:{}: {}: {}\n".format(filename, lineno, category.__name__, message) | 8267150c5890759d2f2190ccf4b7436ea8f55204 | 21,775 |
from typing import List
def precision_at_k(predictions: List[int], targets: List[int], k: int = 10) -> float:
"""Computes `Precision@k` from the given predictions and targets sets."""
predictions_set = set(predictions[:k])
targets_set = set(targets)
result = len(targets_set & predictions_set) / float(len(predictions_set))
return result | 4c6e566db7c488416139545f5f845ff80b7af434 | 21,776 |
def wordify_open(p, word_chars):
"""Prepend the word start markers."""
return r"(?<![{0}]){1}".format(word_chars, p) | 8b267aaca897d6435a84f22064f644727ca6e83c | 21,777 |
def Mt_times_M(M):
"""Compute M^t @ M
Args:
M : (batched) matrix M
Returns:
tf.Tensor: solution of M^t @ M
"""
if isinstance(M, tf.Tensor):
linop = tf.linalg.LinearOperatorFullMatrix(M)
return linop.matmul(M, adjoint=True)
elif isinstance(M, (tf.linalg.LinearOperatorFullMatrix,
tf.linalg.LinearOperatorLowerTriangular)):
return M.matmul(M.to_dense(), adjoint=True)
elif is_diagonal_linop(M):
return diagonal_M_times_Mt(M)
else:
raise TypeError("cannot compute M_times_Mt, invalid type") | cfb8023711186821faf0ff8bfa1277d6585d40de | 21,778 |
import typing
def make_values(ints: typing.Iterable[int]):
"""Make datasets.
"""
return [
('int', ints),
('namedtuple', [IntNamedTuple(i) for i in ints]),
('class', [IntObject(i) for i in ints]),
] | 700bbd4a43bff38a0154bbc595c1cd10cc2ec9d9 | 21,779 |
import collections
def attach_trans_dict(model, objs):
"""Put all translations from all non-deferred translated fields from objs
into a translations dict on each instance."""
# Get the ids of all the translations we need to fetch.
try:
deferred_fields = objs[0].get_deferred_fields()
except IndexError:
return
fields = [
field
for field in model._meta.translated_fields
if field.attname not in deferred_fields
]
ids = [
getattr(obj, field.attname)
for field in fields
for obj in objs
if getattr(obj, field.attname, None) is not None
]
if ids:
# Get translations in a dict, ids will be the keys. It's important to
# consume the result of sorted_groupby, which is an iterator.
qs = Translation.objects.filter(id__in=ids, localized_string__isnull=False)
else:
qs = []
all_translations = {
field_id: sorted(list(translations), key=lambda t: t.locale)
for field_id, translations in sorted_groupby(qs, lambda t: t.id)
}
def get_locale_and_string(translation, new_class):
"""Convert the translation to new_class (making PurifiedTranslations
and LinkifiedTranslations work) and return locale / string tuple."""
converted_translation = new_class()
converted_translation.__dict__ = translation.__dict__
return (converted_translation.locale.lower(), str(converted_translation))
# Build and attach translations for each field on each object.
for obj in objs:
if not obj:
continue
obj.translations = collections.defaultdict(list)
for field in fields:
t_id = getattr(obj, field.attname, None)
field_translations = all_translations.get(t_id, None)
if not t_id or field_translations is None:
continue
obj.translations[t_id] = [
get_locale_and_string(t, field.remote_field.model)
for t in field_translations
] | 933e87b050eac0dfbead141c0c3c56a2add9751f | 21,780 |
def list_known_protobufs():
"""
Returns the list of known protobuf model IDs
"""
return [k for k in proto_data_structure] | a4b80f948792a4d2a965eac507a118719fa106f5 | 21,781 |
def hash_value(*args):
"""
hash_value(NodeConstHandle t) -> std::size_t
hash_value(BufferConstHandle t) -> std::size_t
hash_value(FileConstHandle t) -> std::size_t
"""
return _RMF.hash_value(*args) | c85277fec7f329eeba26d053a43205bf1eda0662 | 21,782 |
import re
def read_examples(input_file):
"""Read a list of `InputExample`s from an input file."""
examples = []
unique_id = 0
# with tf.gfile.GFile(input_file, "r") as reader:
with open(input_file, "r") as reader:
while True:
line = tokenization.convert_to_unicode(reader.readline())
if not line:
break
line = line.strip()
text_a = None
text_b = None
m = re.match(r"^(.*) \|\|\| (.*)$", line)
if m is None:
text_a = line
else:
text_a = m.group(1)
text_b = m.group(2)
examples.append(
InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b))
unique_id += 1
return examples | 82380f9409bd91e16fc4324fc345d335fa9e96dc | 21,783 |
from datasets import load_dataset
def get_finance_sentiment_dataset(split: str='sentences_allagree') -> list:
"""
Load financial dataset from HF: https://huggingface.co/datasets/financial_phrasebank
Note that there's no train/validation/test split: the dataset is available in four possible
configurations depending on the percentage of agreement of annotators. By default, load just
sentences for which all annotators agree.
"""
dataset = load_dataset("financial_phrasebank", split)
return dataset['train'] | 9f83d4c501ed16e5e617c32090787d1377ec70fd | 21,784 |
def list_databases():
"""
List tick databases and associated aggregate databases.
Returns
-------
dict
dict of {tick_db: [agg_dbs]}
"""
response = houston.get("/realtime/databases")
houston.raise_for_status_with_json(response)
return response.json() | d2ba438a0496f5863ad1c16cb8e694b54276d01e | 21,785 |
def nice_range(bounds):
"""
Given a range, return an enclosing range accurate to two digits.
"""
step = bounds[1] - bounds[0]
if step > 0:
d = 10 ** (floor(log10(step)) - 1)
return floor(bounds[0]/d)*d, ceil(bounds[1]/d)*d
else:
return bounds | 66f538649b8f1c55d301b2a0e293a4968b3665d9 | 21,786 |
def connect_to_portal(config):
"""
The portal/metadata schema is completely optional.
"""
if config.portal_schema:
return aws.connect_to_db(**config.rds_config, schema=config.portal_schema) | 2aa76ae2ad8d9ea16ea7ba227627f02d3c044d70 | 21,787 |
import json
def get_words_for_source():
""" Gets JSON to populate words for source """
source_label = request.args.get("source")
source = create_self_summary_words(source_label)
return json.dumps(source) | 2a2fc02a6f77cd109f3e0fded026676109ae014c | 21,788 |
def raster(event_times_list):
"""
Creates a raster plot
Parameters
----------
event_times_list : iterable
a list of event time iterables
color : string
color of vlines
Returns
-------
ax : an axis containing the raster plot
"""
color='k'
ax = plt.gca()
for ith, trial in enumerate(event_times_list):
plt.vlines(trial, ith + .5, ith + 1.5, color=color)
plt.ylim(.5, len(event_times_list) + .5)
return ax | 3c5b485bdc3992602a7c7bb227329b2e74c611d9 | 21,789 |
def FindOneDocument(queryDocument, database='NLP', collection="Annotations", host='localhost', port='27017'):
"""
This method returns the first document in the backing store that matches the criteria specified in queryDocument.
:param queryDocument: [dict] A pymongo document used to query the MongoDB instance.
:param database: [string] The name of the MongoDB database instance that holds "collection". Defaults to NLP.
:param collection: [string] The name of the collection that stores the document instances. Defaults to "Annotations".
:param host: [string] The host IP address, defaults to localhost.
:param port: [string] The port on which the MongoDB server is listening. Defaults to 27017.
:return: [object | None] A single Document object if the query matches any documents, otherwise None.
"""
client = MongoClient('mongodb://%s:%s/' % (host, port))
collection = client[database][collection]
mongoDoc = collection.find_one(queryDocument)
client.close()
return constructAnnotationDocument(mongoDoc) | fb5f683f4451144ae3cbe374162bef36918130ba | 21,790 |
def user_info(context, **kwargs):
"""
Отображает информацию о текущем авторизованом пользователе, либо ссылки на авторизацию и регистрацию
Пример использования::
{% user_info %}
:param context: контекст
:param kwargs: html атрибуты оборачивающего тега
:return:
"""
request = context['request']
return {'user': request.user, 'data': kwargs} | 20321056fd5fdf8f51e79fb66d335272e85ada0d | 21,791 |
def to_bytes(val):
"""Takes a text message and return a tuple
"""
if val is NoResponse:
return val
val = val.replace('\\r', '\r').replace('\\n', '\n')
return val.encode() | 9f5a45d9c69a18eec22c85c6691f8b3d46742af4 | 21,793 |
def _create_fake_data_fn(train_length=_DATA_LENGTH, valid_length=50000, num_batches=40):
""" Creates fake dataset
Data is returned in NCHW since this tends to be faster on GPUs
"""
logger = _get_logger()
logger.info("Creating fake data")
data_array = _create_data(_BATCHSIZE, num_batches, (_HEIGHT, _WIDTH), _CHANNELS)
labels_array = _create_labels(_BATCHSIZE, num_batches, 1000)
def fake_data_generator():
for i in range(num_batches):
yield data_array[i * _BATCHSIZE : (i + 1) * _BATCHSIZE], labels_array[
i * _BATCHSIZE : (i + 1) * _BATCHSIZE
]
train_data = tf.data.Dataset().from_generator(
fake_data_generator,
output_types=(tf.float32, tf.int32),
output_shapes=(
tf.TensorShape([None, _CHANNELS, _HEIGHT, _WIDTH]),
tf.TensorShape([None]),
),
)
train_data = train_data.shuffle(40 * _BATCHSIZE).repeat().prefetch(_BUFFER)
validation_data = tf.data.Dataset().from_generator(
fake_data_generator,
output_types=(tf.float32, tf.int32),
output_shapes=(
tf.TensorShape([None, _CHANNELS, _HEIGHT, _WIDTH]),
tf.TensorShape([None]),
),
)
validation_data = validation_data.prefetch(_BUFFER)
def _train_input_fn():
return train_data.make_one_shot_iterator().get_next()
def _validation_input_fn():
return validation_data.make_one_shot_iterator().get_next()
_train_input_fn.length = train_length
_validation_input_fn.length = valid_length
_train_input_fn.classes = 1000
_validation_input_fn.classes = 1000
return _train_input_fn, _validation_input_fn | 33533cc4b1d43aaeba48db8470c93cbc058ad3dc | 21,794 |
def watershed(src):
"""
Performs a marker-based image segmentation using the watershed algorithm.
:param src: 8-bit 1-channel image.
:return: 32-bit single-channel image (map) of markers.
"""
# cv2.imwrite('{}.png'.format(np.random.randint(1000)), src)
gray = src.copy()
img = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)
# h, w = gray.shape[:2]
# block_size = (min(h, w) // 4 + 1) * 2 + 1
# thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, block_size, 0)
_ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# noise removal
kernel = np.ones((3, 3), np.uint8)
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2)
# sure background area
sure_bg = cv2.dilate(opening, kernel, iterations=3)
# Finding sure foreground area
dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5)
# dist_transform = opening & gray
# cv2.imshow('dist_transform', dist_transform)
# _ret, sure_bg = cv2.threshold(dist_transform, 0.2 * dist_transform.max(), 255, cv2.THRESH_BINARY_INV)
_ret, sure_fg = cv2.threshold(dist_transform, 0.2 * dist_transform.max(), 255, cv2.THRESH_BINARY)
# Finding unknown region
# sure_bg = np.uint8(sure_bg)
sure_fg = np.uint8(sure_fg)
# cv2.imshow('sure_fg', sure_fg)
unknown = cv2.subtract(sure_bg, sure_fg)
# Marker label
lingret, marker_map = cv2.connectedComponents(sure_fg)
# Add one to all labels so that sure background is not 0, but 1
marker_map = marker_map + 1
# Now, mark the region of unknown with zero
marker_map[unknown == 255] = 0
marker_map = cv2.watershed(img, marker_map)
return marker_map | 6915b6a924e64d12340e02b28085290685dddc9b | 21,795 |
def sub(x, y):
"""Returns the difference of compositions.
Parameters
----------
x : NumPy array, shape (n,) or (k,n)
The composition that will be subtracted from.
y : NumPy array, shape (n,) or (k,n)
The composition to be subtracted.
Returns
-------
z : NumPy array, shape (n,) or (k,n)
The result of y subtracted from x.
"""
z = perturbation(x, power(y, -1.0)) # 1.0 and not 1 forces coercion
return z | c9e1fb31abb22a6efb9903c6e9f7cdc06cc110d0 | 21,796 |
def _broadcast_concatenate(arrays, axis):
"""Concatenate arrays along an axis with broadcasting."""
arrays = _broadcast_arrays(arrays, axis)
res = np.concatenate(arrays, axis=axis)
return res | 5032ec0a90dde25d74906dbf661248086c785485 | 21,797 |
def get_final_bmi(data_dic, agex_low, agex_high, mrnsForFilter=[], filter=True):
"""
Function to get the distinct bmi percentile readings for predictions.
Returns outcome percentiles and labels
#### PARAMETERS ####
data_dic: dictionary of patient data
agex_low: low age range for outcome prediction
agex_high: high age range for outcome prediction
mrnsForFilter: list of mrns to get outcomes for
filter: default==True; if True returns mrn filtered data only, otherwise returns all data with either a 0 or ''
"""
outcome = np.zeros(len(data_dic.keys()), dtype=float)
outcome_pct = np.zeros(len(data_dic.keys()), dtype=float)
outcome_labels = [''] * len(data_dic.keys())
indices = np.zeros(len(data_dic.keys()))
for (ix, k) in enumerate(data_dic):
if (len(mrnsForFilter) > 0) & (str(data_dic[k]['mrn']) not in mrnsForFilter):
continue
bmi, pct, label = get_final_bmi_single(data_dic[k], agex_low, agex_high)
if pct == 0 and label == '':
continue
outcome[ix] = bmi
outcome_pct[ix] = pct
outcome_labels[ix] = label
indices[ix] = 1
if filter:
indices = (indices == 1)
return outcome[indices], outcome_pct[indices], np.array(outcome_labels)[indices]
else:
return outcome, outcome_pct, np.array(outcome_labels) | e9793adf7470a695bd730f66817b735451df71a2 | 21,798 |
import sqlite3
def add_group_sub(uid:int, group_id:int) -> bool: #添加订阅信息
"""
向已存在的表中插入群记录, 如果群已经存在则什么都不做
:param uid: 唯一标识用户的数字uid
:param group_id: 监听该用户的群id
"""
connection = sqlite3.connect(DB_PATH)
cursor = connection.cursor()
success = True
group_exist = cursor.execute(
f'select count(*) from _{uid} where group_id={group_id};').fetchone()[0]
if not group_exist:
cursor.execute(f'insert into _{uid} values({group_id}, 1);') # 默认开启翻译
connection.commit()
else:
success = False
logger.warning(f'群{group_id} 已存在表_{uid}中')
cursor.close()
connection.close()
return success | de71f03aa56bf4ae963877281e7e70876f5e72ff | 21,799 |
def is_array_of(obj, classinfo):
"""
Check if obj is a list of classinfo or a tuple of classinfo or a set of classinfo
:param obj: an object
:param classinfo: type of class (or subclass). See isinstance() build in function for more info
:return: flag: True or False
"""
flag = False
if isinstance(obj, classinfo):
pass
elif all(isinstance(item, classinfo) for item in obj):
flag = True
return flag | 5fecce974b5424cff7d5e6a4a9f9bd1482e10e85 | 21,801 |
from atom.models import MODELS
def create_acronym(fullname):
"""Create an acronym for an estimator.
The acronym consists of the capital letters in the name if
there are at least two. If not, the entire name is used.
Parameters
----------
fullname: str
Estimator's __name__.
Returns
-------
str
Created acronym.
"""
acronym = "".join([c for c in fullname if c.isupper()])
if len(acronym) < 2 or acronym.lower() in MODELS:
return fullname
else:
return acronym | 8343fc670080634b1b9b556122cddb509ee36e72 | 21,802 |
import uuid
def transact_update_path(path):
"""input transact update to DynamoDB"""
# transact_write_itemsはclientAPIなので注意
def update_path(path):
"""input put learning path to DynamoDB"""
input = defaultdict(
dict,
TableName="primary_table",
Key={"PK": {"S": path.PK}, "SK": {"S": path.PK}},
UpdateExpression="SET updatedAt=:date, updatedUser=:user",
ExpressionAttributeValues={
":date": {"S": timestamp_jst()},
":user": {"S": path.user},
},
)
if path.name is not None:
input["UpdateExpression"] += ", #name=:name"
# name is reserved
input["ExpressionAttributeNames"]["#name"] = "name"
input["ExpressionAttributeValues"][":name"] = {"S": path.name}
if path.description is not None:
input["UpdateExpression"] += ", description=:description"
input["ExpressionAttributeValues"][":description"] = {"S": path.description}
if path.note is not None:
input["UpdateExpression"] += ", note=:note"
input["ExpressionAttributeValues"][":note"] = {"S": path.note}
if path.invalid is not None:
input["UpdateExpression"] += ", invalid=:invalid"
input["ExpressionAttributeValues"][":invalid"] = {"BOOL": path.invalid}
return {"Update": input}
def update_path_to_video(appended, path_id, user):
"""input put video path to DynamoDB"""
def get_videos(appended):
for uri in appended:
video_id = uri.split("/")[2]
video = get_video_from_db(video_id)
yield video
def generate_input(video, path_id, user):
path_ids = video.get("learningPathIds", [])
# escape empty
# なぜか集合のケースがあるので明示的にリストにする
path_ids = list(path_ids)
if not path_ids[0]:
path_ids.remove("")
path_ids.append(path_id)
input = dict(
TableName="primary_table",
Key={"PK": {"S": video["PK"]}, "SK": {"S": video["PK"]}},
UpdateExpression="SET updatedAt=:date, updatedUser=:user"
+ ", learningPathIds=:paths",
ExpressionAttributeValues={
":date": {"S": timestamp_jst()},
":user": {"S": user},
":paths": {"SS": path_ids},
},
)
return {"Update": input}
it_videos = get_videos(appended)
it_inputs = (generate_input(video, path_id, user) for video in it_videos)
return it_inputs
def remove_path_from_video(removed, path_id, user):
"""input remove video path from DynamoDB"""
def get_videos(removed):
for uri in removed:
video_id = uri.split("/")[2]
video = get_video_from_db(video_id)
yield video
def generate_input(video, path_id, user):
path_ids = video.get("learningPathIds", [])
path_ids = list(path_ids)
if path_id in path_ids:
path_ids.remove(path_id)
# escape empty
if len(path_ids) <= 0:
path_ids = [""]
input = dict(
TableName="primary_table",
Key={"PK": {"S": video["PK"]}, "SK": {"S": video["PK"]}},
UpdateExpression="SET updatedAt=:date, updatedUser=:user"
+ ", learningPathIds=:paths",
ExpressionAttributeValues={
":date": {"S": timestamp_jst()},
":user": {"S": user},
":paths": {"SS": path_ids},
},
)
return {"Update": input}
it_videos = get_videos(removed)
it_inputs = (generate_input(video, path_id, user) for video in it_videos)
return it_inputs
def update_video_order(orders, path_id):
"""input append or update video orders"""
def get_orders(orders, path_id):
for order in orders:
req_order = {"PK": path_id, "uri": order.uri}
current_order = get_order(req_order=ReqOrder(**req_order))
if current_order:
yield True, order
else:
yield False, order
def generate_input(order_with_update_or_append, path_id):
is_update, order = order_with_update_or_append
if is_update:
input = dict(
TableName="primary_table",
Key={"PK": {"S": path_id}, "SK": {"S": order.uri}},
UpdateExpression="SET #order=:order",
# order is reserved
ExpressionAttributeNames={"#order": "order"},
ExpressionAttributeValues={":order": {"N": str(order.order)}},
)
return {"Update": input}
else:
input = dict(
PK={"S": path_id},
SK={"S": order.uri},
indexKey={"S": "Video"},
createdAt={"S": str(uuid.uuid1())[:8]},
order={"N": str(order.order)},
)
item = {"TableName": "primary_table", "Item": input}
return {"Put": item}
it_orders = get_orders(orders, path_id)
it_inputs = (generate_input(order, path_id) for order in it_orders)
return it_inputs
def remove_video_order(removed, path_id):
"""input remove video orders"""
inputs = [
dict(
TableName="primary_table", Key={"PK": {"S": path_id}, "SK": {"S": uri}}
)
for uri in removed
]
orders = [{"Delete": input} for input in inputs]
return orders
transact_items = []
# 再生リストのメタ情報を更新する
transact_items.append(update_path(path))
# 再生リストに動画が追加された場合、動画のメタデータに再生リストIDを追加する
transact_items.extend(
update_path_to_video(appended=path.appended, path_id=path.PK, user=path.user)
)
# 再生リストから動画が削除された場合、動画のメタデータから再生リストIDを削除する
transact_items.extend(
remove_path_from_video(removed=path.removed, path_id=path.PK, user=path.user)
)
# 再生リストから削除された動画の再生順を削除する
transact_items.extend(remove_video_order(removed=path.removed, path_id=path.PK))
# 再生リストの更新順を更新する
transact_items.extend(update_video_order(orders=path.orders, path_id=path.PK))
return transact_items | 5cecf9ffe8ad4acf83b4ec353abaaa3c964fdb0b | 21,803 |
from typing import Counter
def codon_usage(seq, aminoacid):
"""Provides the frequency of each codon encoding a given aminoacid in a DNA sequence"""
tmpList = []
for i in range(0, len(seq) - 2, 3):
if DNA_Codons[seq[i:i + 3]] == aminoacid:
tmpList.append(seq[i:i + 3])
freqDict = dict(Counter(tmpList))
totalWight = sum(freqDict.values())
for seq in freqDict:
freqDict[seq] = round(freqDict[seq] / totalWight, 2)
return freqDict | 9e271e9c68ebd1860f3897d5a63919bf5bd5f0bf | 21,804 |
from textwrap import dedent
def make_check_stderr_message(stderr, line, reason):
"""
Create an exception message to use inside check_stderr().
"""
return dedent("""\
{reason}:
Caused by line: {line!r}
Complete stderr: {stderr}
""").format(stderr=stderr, line=line, reason=reason) | a6510e8036ab27e6386e6bc8e6c33727849282c0 | 21,805 |
import numpy
def diffusion_step(matrix, row_damping=0, column_damping=0):
"""
Return the diffusion adjacency matrix produced by the input matrix
with the specified row and column normalization exponents.
Note: the row normalization is performed second, so if a value
of row_damping=1 is used, the output will be a row-stochastic
matrix regardless of choice of column normalization. Matrix will
not be modified in place.
Parameters
==========
matrix : numpy.ndarray
adjacency matrix for a given metaedge, where the source nodes are
rows and the target nodes are columns
row_damping : int or float
exponent to use in scaling each node's row by its in-degree
column_damping : int or float
exponent to use in scaling each node's column by its column-sum
Returns
=======
numpy.ndarray
Normalized matrix with dtype.float64.
"""
# returns a newly allocated array
matrix = copy_array(matrix)
# Perform column normalization
if column_damping != 0:
column_sums = numpy.array(matrix.sum(axis=0)).flatten()
matrix = normalize(matrix, column_sums, 'columns', column_damping)
# Perform row normalization
if row_damping != 0:
row_sums = numpy.array(matrix.sum(axis=1)).flatten()
matrix = normalize(matrix, row_sums, 'rows', row_damping)
return matrix | f6636b0e4557ffad0253284d914f4d662695055e | 21,806 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.