content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def text_3d(string, depth=0.5):
"""Create 3D text."""
vec_text = _vtk.vtkVectorText()
vec_text.SetText(string)
extrude = _vtk.vtkLinearExtrusionFilter()
extrude.SetInputConnection(vec_text.GetOutputPort())
extrude.SetExtrusionTypeToNormalExtrusion()
extrude.SetVector(0, 0, 1)
extrude.SetScaleFactor(depth)
tri_filter = _vtk.vtkTriangleFilter()
tri_filter.SetInputConnection(extrude.GetOutputPort())
tri_filter.Update()
return pyvista.wrap(tri_filter.GetOutput())
|
7f851303bf9eea1a4777e70f697d7679a37cfcf8
| 25,409 |
from typing import Optional
def get_secret_version(project: Optional[str] = None,
secret: Optional[str] = None,
version: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSecretVersionResult:
"""
Get a Secret Manager secret's version. For more information see the [official documentation](https://cloud.google.com/secret-manager/docs/) and [API](https://cloud.google.com/secret-manager/docs/reference/rest/v1/projects.secrets.versions).
:param str project: The project to get the secret version for. If it
is not provided, the provider project is used.
:param str secret: The secret to get the secret version for.
:param str version: The version of the secret to get. If it
is not provided, the latest version is retrieved.
"""
__args__ = dict()
__args__['project'] = project
__args__['secret'] = secret
__args__['version'] = version
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('gcp:secretmanager/getSecretVersion:getSecretVersion', __args__, opts=opts, typ=GetSecretVersionResult).value
return AwaitableGetSecretVersionResult(
create_time=__ret__.create_time,
destroy_time=__ret__.destroy_time,
enabled=__ret__.enabled,
id=__ret__.id,
name=__ret__.name,
project=__ret__.project,
secret=__ret__.secret,
secret_data=__ret__.secret_data,
version=__ret__.version)
|
bda99d80fe49f7e799272c48418b605a327f6dfa
| 25,410 |
import logging
def logged_class(cls):
"""Class Decorator to add a class level logger to the class with module and
name."""
cls.logger = logging.getLogger("{0}.{1}".format(cls.__module__, cls.__name__))
return cls
|
4a6c878c0061b2b1587e8efcdfae21de87a96e71
| 25,411 |
def crop_point(image, height_rate, width_rate):
"""Crop the any region of the image.
Crop region area = height_rate * width_rate *image_height * image_width
Args:
image: a Image instance.
height_rate: flaot, in the interval (0, 1].
width_rate: flaot, in the interval (0, 1].
Returns:
a Image instance.
Raises:
ValueError: if central_crop_fraction is not within (0, 1].
"""
assert 0<height_rate<=1 and 0<width_rate<=1, 'height_rate and width_rate should be in the interval (0, 1].'
left = image.size[0]*np.random.uniform(0, 1-width_rate)
upper = image.size[1]*np.random.uniform(0, 1-height_rate)
right = left+image.size[0]*width_rate
lower = upper+image.size[1]*height_rate
return image.crop((left, upper, right, lower))
|
f00992597aa5d03fd2aab724668071a17120efbd
| 25,413 |
import ast
def test_name_rename():
"""
Test a simple transformer to rename
"""
class Renamer(NodeTransformer):
def visit_Name(self, node, meta):
node.id = node.id + '_visited'
return node
renamer = Renamer()
mod = ast.parse("bob = frank")
transform(mod, renamer)
bob_node = mod.body[0].targets[0]
frank_node = mod.body[0].value
assert bob_node.id == "bob_visited"
assert frank_node.id == "frank_visited"
|
a609211bb1f7fc0055abe4cf99cdc0e131f0924b
| 25,414 |
import logging
import random
import string
def fileobj_video(contents=None):
"""
Create an "mp4" video file on storage and return a File model pointing to it.
if contents is given and is a string, then write said contents to the file.
If no contents is given, a random string is generated and set as the contents of the file.
"""
if contents:
logging.warning("input = {}".format(contents))
filecontents = contents
else:
filecontents = "".join(random.sample(string.printable, 20)).encode('utf-8')
logging.warning("contents = {}".format(filecontents))
temp_file_dict = create_studio_file(filecontents, preset=format_presets.VIDEO_HIGH_RES, ext='mp4')
return temp_file_dict['db_file']
|
d3fb1d9c9e97c53853489486e37af1f2497ae0d3
| 25,415 |
def _toIPv4AddrString(intIPv4AddrInteger):
"""Convert the IPv4 address integer to the IPv4 address string.
:param int intIPv4AddrInteger: IPv4 address integer.
:return: IPv4 address string.
:rtype: str
Example::
intIPv4AddrInteger Return
---------------------------------
3221225985 -> '192.0.2.1'
Test:
>>> _toIPv4AddrString(3221225985)
'192.0.2.1'
"""
return (
str((intIPv4AddrInteger >> 24) & 0xFF) + '.' +
str((intIPv4AddrInteger >> 16) & 0xFF) + '.' +
str((intIPv4AddrInteger >> 8) & 0xFF) + '.' +
str( intIPv4AddrInteger & 0xFF))
|
ac5f55146eedaf0b7caca19327ae0a88c9d5282a
| 25,416 |
def expand_case_matching(s):
"""Expands a string to a case insensitive globable string."""
t = []
openers = {"[", "{"}
closers = {"]", "}"}
nesting = 0
drive_part = WINDOWS_DRIVE_MATCHER.match(s) if ON_WINDOWS else None
if drive_part:
drive_part = drive_part.group(0)
t.append(drive_part)
s = s[len(drive_part) :]
for c in s:
if c in openers:
nesting += 1
elif c in closers:
nesting -= 1
elif nesting > 0:
pass
elif c.isalpha():
folded = c.casefold()
if len(folded) == 1:
c = "[{0}{1}]".format(c.upper(), c.lower())
else:
newc = ["[{0}{1}]?".format(f.upper(), f.lower()) for f in folded[:-1]]
newc = "".join(newc)
newc += "[{0}{1}{2}]".format(folded[-1].upper(), folded[-1].lower(), c)
c = newc
t.append(c)
return "".join(t)
|
7d9f32e641671cf570c7c95397d9de559bab84b4
| 25,417 |
import re
def look_behind(s: str, end_idx: int) -> str:
"""
Given a string containing semi-colons, find the span of text after the last
semi-colon.
"""
span = s[: (end_idx - 1)]
semicolon_matches = [
(m.group(), m.start(), m.end()) for m in re.finditer(r"(?<=(;))", span)
]
if len(semicolon_matches) == 0:
start_idx = 0
else:
start_idx = semicolon_matches[-1][2]
return span[start_idx:end_idx].strip()
|
0cc478e73edd713fa72743f36e29001bb214e26c
| 25,418 |
def sum_fspec(files, outname=None):
"""Take a bunch of (C)PDSs and sums them."""
# Read first file
ftype0, contents = get_file_type(files[0])
pdstype = ftype0.replace("reb", "")
outname = _assign_value_if_none(
outname, "tot_" + ftype0 + HEN_FILE_EXTENSION
)
def check_and_distribute_files(files):
for i, f in enumerate(files):
ftype, contents = get_file_type(files[0])
if i == 0:
contents0, ftype0 = contents, ftype
else:
assert ftype == ftype0, "Files must all be of the same kind"
contents.fftlen = contents.segment_size
yield contents
tot_contents = average_periodograms(check_and_distribute_files(files))
log.info("Saving %s to %s" % (pdstype, outname))
save_pds(tot_contents, outname)
return tot_contents
|
8d8eb0f9f75e44b2d1e34abcff4479a055c40483
| 25,419 |
import six
import pytz
def make_aware(dt, tz=None):
"""
Convert naive datetime object to tz-aware
"""
if tz:
if isinstance(tz, six.string_types):
tz = pytz.timezone(tz)
else:
tz = pytz.utc
if dt.tzinfo:
return dt.astimezone(dt.tzinfo)
else:
return tz.localize(dt)
|
b5003de5055c5d283f47e33dfdd6fbe57d6fce96
| 25,420 |
def decrypt(ctxt, kx, spice, blocksize):
""" Main decryption function
Args:
ctxt: ciphertext
kx: key expansion table
spice: spice
blocksize: size of block
Returns:
Decrypted ciphertext
"""
spice = int_to_arr(spice, 512)
ctxt_arr = int_to_arr(ctxt, blocksize)
args = (ctxt_arr, kx, spice, blocksize)
lmask = (1 << blocksize % 64) - 1
if blocksize < 36:
s = tiny_decrypt(*args)
elif blocksize < 65:
s = short_decrypt(*args, lmask)
elif blocksize < 129:
s = medium_decrypt(*args, lmask)
elif blocksize < 513:
s = long_decrypt(*args, lmask)
else:
s = extended_decrypt(*args, lmask)
return s
|
56ca0b7cb61f3eca1c20fcf174b14b6ba79c5dfe
| 25,421 |
from itertools import product
def listCombination(lists) -> list:
"""
输入多个列表组成的列表,返回多列表中元素的所有可能组合
:param lists: 多个列表组成的列表
:return: 所有元素可能的组合
"""
result = []
resultAppend = result.append
for i in product(*lists):
resultAppend(i)
return result
|
6023cdc205b2780c5cd2cf56113d48a0675b98bf
| 25,422 |
import warnings
def write_frame(frame, name, con, flavor='sqlite', if_exists='fail', **kwargs):
"""DEPRECIATED - use to_sql
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame
name : string
con : DBAPI2 connection
flavor : {'sqlite', 'mysql'}, default 'sqlite'
The flavor of SQL to use.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default False
Write DataFrame index as a column
Notes
-----
This function is deprecated in favor of ``to_sql``. There are however
two differences:
- With ``to_sql`` the index is written to the sql database by default. To
keep the behaviour this function you need to specify ``index=False``.
- The new ``to_sql`` function supports sqlalchemy engines to work with
different sql flavors.
See also
--------
pandas.DataFrame.to_sql
"""
warnings.warn("write_frame is depreciated, use to_sql", FutureWarning)
# for backwards compatibility, set index=False when not specified
index = kwargs.pop('index', False)
return to_sql(frame, name, con, flavor=flavor, if_exists=if_exists,
index=index, **kwargs)
|
0fe3bf204d48489c65ad2497c54f009000ce89b8
| 25,424 |
def edge_slope(e):
"""Calculate the slope of an edge, 'inf' for vertical edges"""
v = edge_vector(e)
try:
return v.z / round(v.xy.length, 4)
except ZeroDivisionError:
return float("inf")
|
742427d4f97712504fcb9dc09c2168178f500ac8
| 25,425 |
import torch
def token_downup(target_dict, source_dict):
"""Transform token features between different distribution.
Returns:
x_out (Tensor[B, N, C]): token features.
Args:
target_dict (dict): dict for target token information
source_dict (dict): dict for source token information.
"""
x_s = source_dict['x']
idx_token_s = source_dict['idx_token']
idx_token_t = target_dict['idx_token']
T = target_dict['token_num']
B, S, C = x_s.shape
N_init = idx_token_s.shape[1]
weight = target_dict['agg_weight'] if 'agg_weight' in target_dict.keys() else None
if weight is None:
weight = x_s.new_ones(B, N_init, 1)
weight = weight.reshape(-1)
# choose the way with fewer flops.
if N_init < T * S:
# use sparse matrix multiplication
# Flops: B * N_init * (C+2)
idx_token_t = idx_token_t + torch.arange(B, device=x_s.device)[:, None] * T
idx_token_s = idx_token_s + torch.arange(B, device=x_s.device)[:, None] * S
coor = torch.stack([idx_token_t, idx_token_s], dim=0).reshape(2, B * N_init)
# torch.sparse.spmm does not support fp16
with torch.cuda.amp.autocast(enabled=False):
# torch.sparse does not support grad for sparse matrix
weight = weight.float().detach()
# build a matrix with shape [B*T, B*S]
A = torch.sparse.FloatTensor(coor, weight, torch.Size([B * T, B * S]))
# normalize the matrix
all_weight = A.type(torch.float32) @ x_s.new_ones(B * S, 1).type(torch.float32) + 1e-6
weight = weight / all_weight[(idx_token_t).reshape(-1), 0]
A = torch.sparse.FloatTensor(coor, weight, torch.Size([B * T, B * S]))
# sparse matmul
x_out = A.type(torch.float32) @ x_s.reshape(B * S, C).type(torch.float32)
else:
# use dense matrix multiplication
# Flops: B * T * S * (C+2)
idx_batch = torch.arange(B, device=x_s.device)[:, None].expand(B, N_init)
coor = torch.stack([idx_batch, idx_token_t, idx_token_s], dim=0).reshape(3, B * N_init)
weight = weight.detach() # detach to reduce training time
# build a matrix with shape [B, T, S]
A = torch.sparse.FloatTensor(coor, weight, torch.Size([B, T, S])).to_dense()
# normalize the matrix
A = A / (A.sum(dim=-1, keepdim=True) + 1e-6)
# dense matmul
x_out = A @ x_s
x_out = x_out.reshape(B, T, C).type(x_s.dtype)
return x_out
|
2fab5ef8aabc9231b0342d74fb07b2b89000dca3
| 25,427 |
def get_score(train_data,train_labels,test_data,test_labels,problem_type):
"""
Returns the f1 score resulting from 3NN classification if problem_type = 'classification',
or the mse from regression if problem_type = 'regression'
"""
if (problem_type=="classification"):
predictor = KNeighborsClassifier(n_neighbors=3)
else:
predictor = KNeighborsRegressor(n_neighbors=3)
predictor.fit(train_data,train_labels)
predicted_labels = predictor.predict(test_data)
if (problem_type=="regression"):
score = mean_squared_error(test_labels,predicted_labels)
else:
score = accuracy_score(test_labels,predicted_labels)
return score
|
fc957b09d0d0a60ea21b0fc50fbca94ac8ba4647
| 25,428 |
def build_client_datasets_fn(train_dataset, train_clients_per_round):
"""Builds the function for generating client datasets at each round.
Args:
train_dataset: A `tff.simulation.ClientData` object.
train_clients_per_round: The number of client participants in each round.
Returns:
A function which returns a list of `tff.simulation.ClientData` objects at a
given round round_num.
"""
def client_datasets(round_num):
del round_num # Unused.
sampled_clients = np.random.choice(
train_dataset.client_ids, size=train_clients_per_round, replace=False)
return [
train_dataset.create_tf_dataset_for_client(client)
for client in sampled_clients
]
return client_datasets
|
bcdb7d9c450401bff88635b5bd74c0eb6a0e7da5
| 25,429 |
def get_simple_grid(xbounds, ybounds, shift_origin=None):
""" """
xbounds = np.atleast_1d(xbounds)
if len(xbounds)==1:
xmin,xmax = 0,xbounds[0]
else:
xmin,xmax = xbounds
ybounds = np.atleast_1d(ybounds)
if len(ybounds)==1:
ymin,ymax = 0,ybounds[0]
else:
ymin,ymax = ybounds
pixels = np.mgrid[xmin:xmax,ymin:ymax]
pixels2_flat = np.concatenate(pixels.T, axis=0)
if shift_origin is not None:
# not += because conflict between int and float array
pixels2_flat = pixels2_flat+ shift_origin
return Grid(pixels2_flat, UNIT_SQUARE)
|
8580e37ca98dc5d8214b7da563b31e8819b870cd
| 25,430 |
def query_hecate(session, ra, dec, _radius, _verbose: bool = True):
""" Query the HECATE catalog """
m=0
gal_offset = []; mag = []; filt = []; dist = []; dist_err = []; gal_ra = []; gal_dec = []; distflag = []; source = []
# set up query
try:
query = session.query(HecateQ3cRecord)
query = hecate_q3c_orm_filters(query, {'cone': f'{ra},{dec},{_radius}'})
except Exception as _e3:
if _verbose:
print(f"{_e3}")
print(f"Failed to execute query for RA, Dec = ({ra},{dec})")
if len(query.all()) > 0:
m+=1
for _x in HecateQ3cRecord.serialize_list(query.all()):
if _x['bt']== _x['bt']:
mag.append(_x['bt'])
filt.append('B')
gal = SkyCoord(_x['ra']*u.deg, _x['dec']*u.deg)
cand = SkyCoord(ra*u.deg, dec*u.deg)
gal_offset.append(cand.separation(gal).arcsec)
gal_ra.append(_x['ra'])
gal_dec.append(_x['dec'])
dist.append(_x['d']) # Mpc
dist_err.append(_x['e_d']) # Mpc
source.append('HECATE')
return m, gal_ra, gal_dec, gal_offset, mag, filt, dist, dist_err, source
|
bf321d6151226d801479ef5c9170bed597cac903
| 25,431 |
def all_main_characters(raw_data: AniListRawResponse) -> list[Character]:
"""Returns all of the main characters from the data."""
characters: list[Character] = anime_media(raw_data)["mainCharacters"]["nodes"]
return characters
|
9ec3f0cc2757fdbec24923aa0953a0c8f094bd24
| 25,432 |
from typing import List
def sequence_to_ngram(sequence: str, N: int) -> List[str]:
"""
Chops a sequence into overlapping N-grams (substrings of length N)
:param sequence: str Sequence to convert to N-garm
:type sequence: str
:param N: Length ofN-grams (int)
:type N: int
:return: List of n-grams
:rtype: List[str]
"""
return [sequence[i : i + N] for i in range(len(sequence) - N + 1)]
|
8cbe97ee34c75ca3aad038236bd875ea0c3450cd
| 25,433 |
def _convert_for_receive(profile):
"""Convert profile to be fed into the receive model.
Args:
profile (pandas.DataFrame): Profile to convert.
Returns:
pandas.DataFrame: Converted profile.
"""
without_profile = profile[profile.age.isna()].reset_index(drop=True)
profile = profile[~profile.age.isna()].reset_index(drop=True)
profile = _transform_age_group(
_transform_generation(
_transform_gender(
_explode_membership_date(
_extract_age_bins(
profile)))))
return profile, without_profile
|
16196499547c7f6e25e75ee8e814d8c89f8ea30d
| 25,434 |
def _format_path(path):
"""Format path to data for which an error was found.
:param path: Path as a list of keys/indexes used to get to a piece of data
:type path: collections.deque[str|int]
:returns: String representation of a given path
:rtype: str
"""
path_with_brackets = (
''.join('[{!r}]'.format(fragment) for fragment in path)
)
return '{}'.format(path_with_brackets)
|
1809080453af154824e867cd8104cedbd616b937
| 25,435 |
def GetLayouts():
"""Returns the layout proxies on the active session.
Layout proxies are used to place views in a grid."""
return servermanager.ProxyManager().GetProxiesInGroup("layouts")
|
8099264d77e4daab61d24eb22edb397aeacfa294
| 25,437 |
def decomposition_super1(centroid, highway, coherence,coordinates,input):
"""
Function to perform Experiment 2: Differential Decomposition with level-specific weight
Args:
centroid: Cluster centroid of super pixels
highway: Super pixels after Stage I Super pixeling
coherence: Coherence value at super pixel level
coordinates: Coordinates of pixels in each highway clusters
input: 4 channel input data
Returns:
decom_super_coh: Coherence estimate passed from super pixel to pixel level
"""
c = 0
decom_super_coh = []
for i in range (0, 300):
new = []
for j in range (0, 300):
new.append(0)
decom_super_coh.append(new)
# Normalizing centroids and input_sl
input_min = input.min(axis=(0, 1), keepdims=True)
input_max = input.max(axis=(0, 1), keepdims=True)
input_norm = (input - input_min)/(input_max - input_min)
c_min = centroid.min(axis=(0, 1), keepdims=True)
c_max = centroid.max(axis=(0, 1), keepdims=True)
c_norm = (centroid - c_min)/(c_max - c_min)
# Traversing through each cluster coordinates to calculate
# distance between pixels and cluster coordinates
# To assign coherence value to pixel level
for cluster in coordinates:
clusterCenter = c_norm[0][c]
for point in cluster:
x,y = point[0],point[1]
superPixel = input_norm[x,y]
distance = norm(clusterCenter-superPixel)
coh = (coherence[c]*(1-distance))
decom_super_coh[x][y] = coh
c+=1
return decom_super_coh
|
9649d1d267bc442cd999f3afd622156f4c5e1895
| 25,440 |
from typing import List
import math
def get_deck_xs(bridge: Bridge, ctx: BuildContext) -> List[float]:
"""X positions of nodes on the bridge deck.
First the required X positions 'RX' are determined, positions of loads and
abutments etc.. After that a number of X positions are calculated between
each pair of adjacent X positions 'RX_i' and 'RX_j', such that the maximum
distance between X positions does not exceed 'bridge.base_mesh_deck_max_x'.
"""
all_xs = set()
# From piers.
for pier in bridge.supports:
for x in pier.x_min_max_top():
all_xs.add(round_m(x))
# Bridge ends.
all_xs.add(round_m(bridge.x_min))
all_xs.add(round_m(bridge.x_max))
# From loads.
for point in ctx.add_loads:
all_xs.add(round_m(point.x))
# From material propertes.
for x in get_deck_section_grid(bridge)[0]:
all_xs.add(round_m(x))
# Additional nodes requested by the Bridge.
for x in bridge.additional_xs:
all_xs.add(round_m(x))
all_xs = sorted(all_xs)
print_i(f"Required node X positions on deck (from all sources) =\n {all_xs}")
deck_xs = set()
for i in range(len(all_xs) - 1):
x0, x1 = all_xs[i], all_xs[i + 1]
num = math.ceil((x1 - x0) / bridge.base_mesh_deck_max_x) + 1
for x in np.linspace(x0, x1, num=num):
deck_xs.add(round_m(x))
return sorted(deck_xs)
|
75447a1929035685aeb14f212beea74bb7b814ad
| 25,441 |
def helicsGetFederateByName(fed_name: str) -> HelicsFederate:
"""
Get an existing `helics.HelicsFederate` from a core by name.
The federate must have been created by one of the other functions and at least one of the objects referencing the created federate must still be active in the process.
**Parameters**
- **`fed_name`** - The name of the federate to retrieve.
**Returns**: `helics.HelicsFederate`.
"""
f = loadSym("helicsGetFederateByName")
err = helicsErrorInitialize()
result = f(cstring(fed_name), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsFederate(result)
|
a262ee67a4b87212be401442d99482a569862f92
| 25,442 |
import functools
import logging
def persistant_property(*key_args):
"""Utility decorator for Persistable-based objects. Adds any arguments as properties
that automatically loads and stores the value in the persistence table in the database.
These arguments are created as permanent persistent properties."""
def _decorator(cls):
@functools.wraps(cls)
def wrapper(*args, **kwargs):
for key in key_args:
# this _closure function is required since we're using a for loop and a closure
# see http://www.discoversdk.com/blog/closures-in-python-3
def _closure(key=key):
internal_key = f'_{key}' # internal value
internal_key_loaded = f'_{key}_loaded' # boolean set to True after it's loaded
def _getter(self):
try:
self.load_persistent_property(key)
except Exception as e:
logging.error(f"unable to load persistence key {key}: {e}")
return getattr(self, internal_key)
def _setter(self, value):
try:
retry(self.save_persistent_property(key, value))
except Exception as e:
logging.error(f"unable to save persistence key {key}: {e}")
setattr(self, internal_key, value)
setattr(cls, internal_key, None)
setattr(cls, internal_key_loaded, False)
setattr(cls, key, property(_getter, _setter))
_closure(key)
return cls(*args, **kwargs)
return wrapper
return _decorator
|
bba4de85830496d414c80960b59422d51af30572
| 25,443 |
import pkg_resources
import csv
def states():
"""
Get a dictionary of Backpage city names mapped to their respective states.
Returns:
dictionary of Backpage city names mapped to their states
"""
states = {}
fname = pkg_resources.resource_filename(__name__, 'resources/City_State_Pairs.csv')
with open(fname, 'rU') as csvfile:
reader = csv.reader(csvfile, delimiter = ',')
for row in reader:
states[row[0]] = row[1]
return states
|
4781170b9f8c8ab654ebb39dd733577351571b3e
| 25,444 |
def matrix_set_diag(input_x, diagonal, k=0, alignment="RIGHT_LEFT"):
"""
Calculate a batched matrix tensor with new batched diagonal values.
Args:
input_x (Tensor): a :math:`(..., M, N)` matrix to be set diag.
diagonal (Tensor): a :math`(..., max_diag_len)`, or `(..., num_diags, max_diag_len)` vector to be placed to
output's diags.
k (Tensor): a scalar or 1D list. it's max length is to which indicates the diag's lower index and upper index.
(k[0], k[1]).
alignment (str): Some diagonals are shorter than `max_diag_len` and need to be padded.
`align` is a string specifying how superdiagonals and subdiagonals should be aligned,
respectively. There are four possible alignments: "RIGHT_LEFT" (default),
"LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals to
the right (left-pads the row) and subdiagonals to the left (right-pads the row).
Returns:
- Tensor, :math:`(...,M, N)`. a batched matrix with the same shape and values as `input`,
except for the specified diagonals of the innermost matrices.
Supported Platforms:
``CPU`` ``GPU``
Examples:
>>> import numpy as onp
>>> from mindspore.common import Tensor
>>> from mindspore.scipy.ops_wrapper import matrix_set_diag
>>> input_x = Tensor(
>>> onp.array([[[7, 7, 7, 7],[7, 7, 7, 7], [7, 7, 7, 7]],
>>> [[7, 7, 7, 7],[7, 7, 7, 7],[7, 7, 7, 7]]])).astype(onp.int)
>>> diagonal = Tensor(onp.array([[1, 2, 3],[4, 5, 6]])).astype(onp.int)
>>> output = matrix_set_diag(input_x, diagonal)
>>> print(output)
>>> [[[1 7 7 7]
[7 2 7 7]
[7 7 3 7]]
[[4 7 7 7]
[7 5 7 7]
[7 7 6 7]]
"""
matrix_set_diag_net = MatrixSetDiag(alignment)
k_vec = mnp.zeros((2,), dtype=mstype.int32)
if isinstance(k, int):
k_vec += k
elif isinstance(k, (list, tuple)):
k_vec = k
else:
_raise_value_error("input k to indicate diagonal region is invalid.")
k_vec = _to_tensor(k_vec, dtype=mstype.int32)
output = matrix_set_diag_net(input_x, diagonal, k_vec)
return output
|
e8dddc42438ae2bc8bf70ef9a0db1b1cdce9dad3
| 25,445 |
def execute_payment(pp_req):
"""Executes a payment authorized by the client."""
payment = paypalrestsdk.Payment.find(pp_req['paymentId'])
if payment.execute({"payer_id": pp_req['PayerID']}):
return True
return False
|
4d7f94610b6f8360371099d3774fd4902e47b6c7
| 25,446 |
def create_structural_eqs(X, Y, G, n_nodes_se=40, n_nodes_M=100, activation_se='relu'):
"""
Method to create structural equations (F:U->X) and the original prediction model (M:X->Y). This also calculates and stores residuals.
Parameters
----------
X : pandas DataFrame
input features of the dataset
Y : pandas Series
target to be predicted
G : networkx.classes.digraph.DiGraph
causal graph of the data
n_nodes_se : int
number of nodes for the neural network of the strutural equations (SE)
n_nodes_M: int
number of nodes in the neural network of the original model (M)
activation_se: str
type of activation for the structural equations
Returns
----------
struct_eq: keras.engine.functional.Functional - keras Model
structural equations (F:U->X)
final : keras.engine.functional.Functional - keras Model
model in the latent space. Final model that uses structural equations and original prediction model: M^:U->Y. M^(u)=M(F(u))
Additionally:
In the folder data, residuals are stored
In the folder data, the original prediction model (M - stored as "nn_model"), the model for child nodes and structural equations are stored.
Performance metrics are printed in the terminal
"""
# split dataset
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.1, random_state=42)
# take all nodes except target >>> classification
nodes = [n for n in list(G.nodes) if n != Y.name]
# Standardise data
scaler = StandardScaler()
scaler.fit(X_train)
X_train.loc[:, :] = scaler.transform(X_train)
X_test.loc[:, :] = scaler.transform(X_test)
# get root nodes
root_nodes = [n for n, d in G.in_degree() if d == 0]
# define variables where residuals and residual inputs will be stored
U_train = X_train[root_nodes].copy()
U_test = X_test[root_nodes].copy()
res_inputs = []
#define tf inputs, one for each node
node_inputs = {n: keras.Input(shape=(1,), name=n) for n in nodes}
# define dic to store the final X = F(U) with U = (roots, residuals) for each node
# fill the root nodes directly with input layers
X_n = {r: node_inputs[r] for r in root_nodes}
# auxiliary while-loop variables
added_nodes = []
root_nodes_tmp = root_nodes
while set(root_nodes_tmp) != set(nodes):
# loop until all nodes are either root or dealt with (root_nodes_tmp
# contains root nodes and is updated with dealt with nodes)
for n in nodes:
parents = list(G.predecessors(n))
# go on only when:
# n has parents
# parents are root_nodes or nodes already dealt with
# n is not a root node and has not been dealt with yet
if G.in_degree[n] != 0 and set(parents).issubset(set(root_nodes_tmp)) and not n in root_nodes_tmp:
print("dealing with ", n, " with parents: ", parents)
# build the model from parents to node n
if len(parents) == 1:
parent = parents[0]
inputs = node_inputs[parent]
conc = tf.identity(inputs)
X_train_p = X_train[parent].values
X_test_p = X_test[parent].values
else:
inputs = [node_inputs[p] for p in parents]
conc = layers.Concatenate()(inputs)
X_train_p = [X_train[p].values for p in parents]
X_test_p = [X_test[p].values for p in parents]
x = layers.Dense(n_nodes_se, activation=activation_se)(conc)
x = layers.Dense(n_nodes_se, activation=activation_se)(x)
out = layers.Dense(1)(x)
ff = keras.Model(inputs=inputs, outputs=out, name=n)
ff.compile(loss=tf.losses.MeanSquaredError(), optimizer=tf.optimizers.Adam(learning_rate=0.0001))
hist = ff.fit(X_train_p, X_train[n].values, batch_size=512,
epochs=200, verbose=0, validation_split=0.25, callbacks=[early_stopping])
#plot history
# plt.plot(hist.history['val_loss'])
# plt.plot(hist.history['loss'])
# plt.show()
#
# plt.figure()
# pred_tmp=ff.predict(X_test_p)
# plt.plot(X_test[n].values, pred_tmp.reshape(1,-1)[0], '.', alpha=0.2)
score = ff.evaluate(X_train_p, X_train[n].values, verbose=0)
print('The TRAIN score for model node ', n, ' is ', score)
score = ff.evaluate(X_test_p, X_test[n].values, verbose=0)
print('The TEST score for model node ', n, ' is ', score)
# save picture of the model
#dot_img_file = 'model_nn' + node_tmp +'.png'
#keras.utils.plot_model(nn, to_file=dot_img_file, show_shapes=True)
# plot model graph
# keras.utils.plot_model(ff, show_shapes=True)
# Calculate residuals as the value of the node - the prediction of the model for that node
pred = ff.predict(X_train_p).reshape(X_train.shape[0],)
U_train['r_' + n] = X_train[n].values - pred
pred = ff.predict(X_test_p).reshape(X_test.shape[0],)
U_test['r_' + n] = X_test[n].values - pred
# build input for residual of node n
res = keras.Input(shape=(1,), name="r_" + n)
res_inputs.append(res)
# create the reconstructed node as the built model ff + the residual
X_n[n] = layers.Add(name=n + "_reconstructed")([ff([X_n[p] for p in parents]), res])
# Save nn of the structural equation
ff.save('models/'+str(n)+'.h5')
added_nodes.append(n)
# Add the node in the roots node, so the graph can be explored in the next dependence level
root_nodes_tmp = root_nodes_tmp + added_nodes
added_nodes = []
# Define the structural equation model
inputs = [X_n[r] for r in root_nodes] + res_inputs
# Reorder the inputs and list "nodes" is
col_name_inputs = [i.name[:-2].split('r_')[-1] for i in inputs]
inputs = list(np.array(inputs)[[col_name_inputs.index(col) for col in nodes]])
# concatenate outputs to build a stacked tensor (actually a vector),
# respecting the order of the original nodes (i.e. same order of X_in)
X_out = tf.concat([X_n[x] for x in nodes], axis=1, name='X_out')
struct_eq_tmp = keras.Model(inputs=inputs, outputs=X_out, name="struct_eq_tmp")
dim_input_se = U_train.shape[1]
inputs = keras.Input(shape=(dim_input_se,), name="U")
# define the model struct_eq U->X
x = keras.layers.Lambda(lambda x: tf.split(x, num_or_size_splits=dim_input_se, axis=1))(inputs)
out_x = struct_eq_tmp(x)
struct_eq = keras.Model(inputs=inputs, outputs=out_x, name="struct_eq")
struct_eq.compile(loss=tf.losses.MeanSquaredError(), optimizer=tf.optimizers.Adam())
struct_eq.save('models/nn_struct_eq.h5')
# Save residual dataset
columns_dataset_u = [i.split('r_')[-1] for i in U_train.columns]
columns_dataset_u = list(np.array(U_train.columns)[[columns_dataset_u.index(col) for col in nodes]])
U_train[columns_dataset_u].to_csv('data/res_train.csv', index=False)
U_test[columns_dataset_u].to_csv('data/res_test.csv', index=False)
### Build M, standard ML model
# model going from features X to target Y
# the inputs are precisely the node inputs
# X matrix -> Y
X_in = keras.Input(shape=(len(nodes)), name='X_in')
x = layers.Dense(n_nodes_M, activation='relu')(X_in)
x = layers.Dense(int(n_nodes_M/2), activation='relu')(x)
out = layers.Dense(2, activation='softmax')(x)
M = keras.Model(inputs=X_in, outputs=out, name="M")
M.compile(loss='sparse_categorical_crossentropy', optimizer=tf.optimizers.Adam(learning_rate=0.001))
hist=M.fit(X_train, y_train, batch_size=512, epochs=200, verbose=0,
validation_split=0.25, callbacks=[early_stopping])
# plt.plot(hist.history['val_loss'])
# plt.plot(hist.history['loss'])
# plt.show()
M.save('models/nn_model.h5')
### Build a model from root_nodes + residuals to Y, i.e. Y^ = M(F(U))
# matrix U -> Y
inputs = keras.Input(shape=(U_train.shape[1],), name="U")
out = M(struct_eq(inputs))
final = keras.Model(inputs=inputs, outputs=out, name="final")
final.compile(loss='sparse_categorical_crossentropy', optimizer=tf.optimizers.Adam())
# final.summary()
# dot_img_file = 'final.png'
# keras.utils.plot_model(final, to_file=dot_img_file, show_shapes=True)
# final.save('final.h5')
### make predictions
# Load final model (the weights are already computed in model M and
# structural equation F, no need to fit)
pred = final.predict(U_test)[:, 1]
# Print report
print(classification_report(y_test, pred > 0.5))
return struct_eq, final
|
f8ebc336360fa7d04ac1aa90dbb8165e54181f6b
| 25,447 |
import random
import math
def random_walk(humans, dt, energy, temperature):
"""
calculates location, speed and acceleration by adding random values to the speed
Args:
humans (list): list of all humans
dt (float): time step in which the movement is calculated
energy (float): amount of movement
Returns:
humans (list): list of all humans
"""
new_energy = 0
old_humans = humans
for i, h in enumerate(humans):
infection(humans, h, i)
new_location = h.location + dt * h.velocity
velocity_gen_x = random.gauss(0, 1)
velocity_gen_y = random.gauss(0, 1)
velocity_random = [
velocity_gen_x * float(temperature)/15, velocity_gen_y * float(temperature)/15]
new_velocity = h.velocity + velocity_random
# handle maximum velocity based on total energy
new_energy += np.linalg.norm(new_velocity)**2
factor = math.sqrt(energy / new_energy)
new_velocity = new_velocity*factor
abs_speed = np.linalg.norm(new_velocity)**2
factor_v = math.sqrt(abs_speed / energy)
if factor_v > 3*(1/len(humans)):
scaling = 0.03/factor_v
new_velocity = new_velocity*scaling
h.update(new_location, new_velocity)
return humans
|
bfc04b4d0ae1a5c6a7c510a72a9ae2607a225a37
| 25,449 |
def format_name(name_format: str, state: State):
"""Format a checkpoint filename according to the ``name_format`` and the training :class:`~.State`.
The following format variables are available:
+------------------------+-------------------------------------------------------+
| Variable | Description |
+========================+=======================================================+
| ``{rank}`` | The global rank, as returned by |
| | :func:`~.dist.get_global_rank`. |
+------------------------+-------------------------------------------------------+
| ``{local_rank}`` | The local rank of the process, as returned by |
| | :func:`~.dist.get_local_rank`. |
+------------------------+-------------------------------------------------------+
| ``{world_size}`` | The world size, as returned by |
| | :func:`~.dist.get_world_size`. |
+------------------------+-------------------------------------------------------+
| ``{local_world_size}`` | The local world size, as returned by |
| | :func:`~.dist.get_local_world_size`. |
+------------------------+-------------------------------------------------------+
| ``{node_rank}`` | The node rank, as returned by |
| | :func:`~.dist.get_node_rank`. |
+------------------------+-------------------------------------------------------+
| ``{epoch}`` | The total epoch count, as returned by |
| | :meth:`~composer.core.time.Timer.epoch`. |
+------------------------+-------------------------------------------------------+
| ``{batch}`` | The total batch count, as returned by |
| | :meth:`~composer.core.time.Timer.batch`. |
+------------------------+-------------------------------------------------------+
| ``{batch_in_epoch}`` | The batch count in the current epoch, as returned by |
| | :meth:`~composer.core.time.Timer.batch_in_epoch`. |
+------------------------+-------------------------------------------------------+
| ``{sample}`` | The total sample count, as returned by |
| | :meth:`~composer.core.time.Timer.sample`. |
+------------------------+-------------------------------------------------------+
| ``{sample_in_epoch}`` | The sample count in the current epoch, as returned by |
| | :meth:`~composer.core.time.Timer.sample_in_epoch`. |
+------------------------+-------------------------------------------------------+
| ``{token}`` | The total token count, as returned by |
| | :meth:`~composer.core.time.Timer.token`. |
+------------------------+-------------------------------------------------------+
| ``{token_in_epoch}`` | The token count in the current epoch, as returned by |
| | :meth:`~composer.core.time.Timer.token_in_epoch`. |
+------------------------+-------------------------------------------------------+
.. note::
If using DeepSpeed, and ``name_format`` does not end with an tarfile archive extension (``'.tar'``, ``'.tgz'``,
``'.tar.gz'``, ``'.tar.bz2'``, or ``'.tar.lzma'``), then ``'.tar'`` will be appended. DeepSpeed uses a tarball
format as it saves model and optimizer states in separate files within the tarball.
Consider the following scenario, where the current epoch count is ``1`` and the current batch count is ``42``:
* When not using DeepSpeed, then the rank zero process will call this function:
.. testsetup:: composer.utils.checkpoint.format_name.no_deepspeed
from composer.utils.checkpoint import format_name
state.timer._batch._value = 42
state.timer._epoch._value = 1
.. doctest:: composer.utils.checkpoint.format_name.no_deepspeed
>>> format_name("ep{epoch}-ba{batch}", state)
'ep1-ba42'
* When using DeepSpeed, each rank (process) will call this function. ``'{rank}'`` should appear within
``name_format``, so each rank (process) will write to its own file. For example, on the rank zero process:
.. testsetup:: composer.utils.checkpoint.format_name.deepspeed
from composer.utils.checkpoint import format_name
original_is_model_deepspeed = State.is_model_deepspeed
setattr(State, 'is_model_deepspeed', property(lambda x: True))
state.timer._batch._value = 42
state.timer._epoch._value = 1
.. doctest:: composer.utils.checkpoint.format_name.deepspeed
>>> format_name("ep{epoch}-ba{batch}-rank{rank}", state)
'ep1-ba42-rank0.tar'
.. testcleanup:: composer.utils.checkpoint.format_name.deepspeed
setattr(State, 'is_model_deepspeed', original_is_model_deepspeed)
"""
checkpoint_name = name_format.format(
rank=dist.get_global_rank(),
local_rank=dist.get_local_rank(),
world_size=dist.get_world_size(),
local_world_size=dist.get_local_world_size(),
node_rank=dist.get_node_rank(),
epoch=int(state.timer.epoch),
batch=int(state.timer.batch),
batch_in_epoch=int(state.timer.batch_in_epoch),
sample=int(state.timer.sample),
sample_in_epoch=int(state.timer.sample_in_epoch),
token=int(state.timer.token),
token_in_epoch=int(state.timer.token_in_epoch),
)
if state.is_model_deepspeed and not _is_archive(checkpoint_name):
# Deepspeed requires tarballs; appending `.tar`
checkpoint_name += ".tar"
return checkpoint_name
|
72c9d5a50f1c05e726702f33befe3373a0ba4486
| 25,450 |
from mpi4py import MPI
def allsync(local_values, comm=None, op=None):
"""Perform allreduce if MPI comm is provided."""
if comm is None:
return local_values
if op is None:
op = MPI.MAX
return comm.allreduce(local_values, op=op)
|
d10174d7774e5691193ae4c08d7fe6838e8c1ee4
| 25,451 |
def vec_bin_array(arr, m):
"""
Arguments:
arr: Numpy array of positive integers
m: Number of bits of each integer to retain
Returns a copy of arr with every element replaced with a bit vector.
Bits encoded as int8's.
"""
to_str_func = np.vectorize(lambda x: np.binary_repr(x).zfill(m))
strs = to_str_func(arr)
ret = np.zeros(list(arr.shape) + [m], dtype=np.int8)
for bit_ix in range(0, m):
fetch_bit_func = np.vectorize(lambda x: x[bit_ix] == '1')
ret[...,bit_ix] = fetch_bit_func(strs).astype("int8")
return ret
|
bb56f94413ef611b9a685b835203aad9064b3092
| 25,452 |
from typing import Iterator
from typing import List
def parse_bafs(stream: Iterator[str]) -> List[BAF]:
"""Parses allelic counts output from GATK ModelSegments, which is a SAM-style
header comprising lines starting with @ followed by single line with column
names (CONTIG, POSITION, REF_COUNT, ALT_COUNT, REF_NUCLEOTIDE, ALT_NUCLEOTIDE)."""
skip_header(stream)
bafs: List[BAF] = []
for line in stream:
chromosome, position, ref_count, alt_count, ref_nucleotide, alt_nucleotide = line.split('\t')
baf = BAF(chromosome=chromosome, position=int(position), ref_count=int(ref_count), alt_count=int(alt_count), ref_nucleotide=ref_nucleotide, alt_nucleotide=alt_nucleotide)
bafs.append(baf)
return bafs
|
b490b007841afd707576780f436175aec6526f14
| 25,453 |
import mpmath
def logpdf(x, chi, c):
"""
Logarithm of the PDF of the ARGUS probability distribution.
"""
if c <= 0:
raise ValueError('c must be positive')
if chi <= 0:
raise ValueError('chi must be positive')
if x < 0 or x > c:
return mpmath.mp.ninf
with mpmath.extradps(5):
x = mpmath.mpf(x)
chi = mpmath.mpf(chi)
c = mpmath.mpf(c)
z = x/c
t1 = (3*mpmath.log(chi)
- mpmath.log(2*mpmath.pi)/2
- mpmath.log(_psi(chi)))
t2 = -mpmath.log(c) + mpmath.log(z)
t3 = mpmath.log1p(-z**2)/2
t4 = -chi**2/2*(1 - z**2)
return t1 + t2 + t3 + t4
|
8df44305dfaeaa9b725de7a9224259929c4c8900
| 25,454 |
def sample(colors: list, max_colors: int = 8, sensitivity: int = 75) -> list:
"""
Sample most common colors from a PIL Image object.
:param colors: list of RGB color tuples eg. [(0, 0, 0), (255, 255, 255)]
:param max_colors: maximum number of colors to return
:param sensitivity: how perceptively different (Euclidean Distance) a color
must be from others to be included in the sampled palette.
:returns: list of most common colors in RGB tuples (255, 255, 255)
"""
# reduce all found colors using supplied sensitivity
sampled_colors = []
for color in colors:
# if max_color limit reached stop looking
if len(sampled_colors) == max_colors:
break
# clean-up any slight color differences in PIL sampling
color = normalize_rgb_values(color)
# if most common color (first color) append it
if sampled_colors == []:
sampled_colors.append(color)
continue
# calculate Euclidean distance for a color against colors
# already appended to determine if it shoule be ignored
if not any(
color_distance(color, found) <= sensitivity for found in sampled_colors
):
sampled_colors.append(color)
return sampled_colors
|
5d90dfa3d097ea923f25deafda5b907a41c5909d
| 25,455 |
def tf_example_to_feature_description(example,
num_timesteps=DEFAULT_NUM_TIMESTEPS):
"""Takes a string tensor encoding an tf example and returns its features."""
if not tf.executing_eagerly():
raise AssertionError(
'tf_example_to_reverb_sample() only works under eager mode.')
example = tf.train.Example.FromString(example.numpy())
ret = {}
for k, v in example.features.feature.items():
l = len(v.float_list.value)
if l % num_timesteps:
raise ValueError('Unexpected feature length %d. It should be divisible '
'by num_timesteps: %d' % (l, num_timesteps))
size = l // num_timesteps
ret[k] = tf.io.FixedLenFeature([num_timesteps, size], tf.float32)
return ret
|
edf4f829b1c0746a34093ea36672a094412794f1
| 25,456 |
import numpy
def generateStructuredGridPoints(nx, ny, v0, v1, v2, v3):
"""
Generate structured grid points
:param nx: number of x cells
:param ny: number of y cells
:param v0: south west corner
:param v1: south east corner
:param v2: north east corner
:param v3: north west corner
:returns array of size (nx, ny, 3)
"""
# parametric
nx1 = nx + 1
ny1 = ny + 1
x = numpy.linspace(0., 1., nx1)
y = numpy.linspace(0., 1., ny1)
xx1, yy1 = numpy.meshgrid(x, y, indexing='ij')
xx0 = 1.0 - xx1
yy0 = 1.0 - yy1
# structured points
spts = numpy.zeros(list(xx0.shape) + [3], numpy.float64)
for j in range(3):
spts[..., j] = xx0*yy0*v0[j] + \
xx1*yy0*v1[j] + \
xx1*yy1*v2[j] + \
xx0*yy1*v3[j]
return spts
|
0de9a3a3a47b26c3c3d56088c7ec55d241edeff3
| 25,458 |
def Keywords(lang_id=0):
"""Returns Specified Keywords List
@param lang_id: used to select specific subset of keywords
"""
return [PY_KW, PY_BIN]
|
1a0f0ac7d22e4da00438d823c50258cb5ade8574
| 25,459 |
def clear(keyword):
"""``clear`` property validation."""
return keyword in ('left', 'right', 'both', 'none')
|
c16cc980b9af82b4210e3c8c430cd65934596aa1
| 25,460 |
from typing import List
def PermissionsListOfUser(perm_list: List[str]) -> List[str]:
"""
Takes a list of items and asserts that all of them are in the permissions list of
a user.
:param perm_list: A list of permissions encoded as ``str``
:return: The input perm_list
:raises Invalid: If the user does not have a permission in the list
"""
if isinstance(perm_list, list):
for perm in perm_list:
if not flask.g.user.has_permission(perm):
break
else:
return perm_list
raise Invalid('permissions must be in the user\'s permissions list')
|
811adedcdc9b90a066d6253269de33e0813c8d7b
| 25,461 |
import collections
def PrepareForBuild(input_proto, output_proto, _config):
"""Prepare to build toolchain artifacts.
The handlers (from _TOOLCHAIN_ARTIFACT_HANDLERS above) are called with:
artifact_name (str): name of the artifact type.
chroot (chroot_lib.Chroot): chroot. Will be None if the chroot has not
yet been created.
sysroot_path (str): sysroot path inside the chroot (e.g., /build/atlas).
Will be an empty string if the sysroot has not yet been created.
build_target_name (str): name of the build target (e.g., atlas). Will be
an empty string if the sysroot has not yet been created.
input_artifacts ({(str) name:[str gs_locations]}): locations for possible
input artifacts. The handler is expected to know which keys it should
be using, and ignore any keys that it does not understand.
profile_info ({(str) name: (str) value}) Dictionary containing profile
information.
They locate and modify any ebuilds and/or source required for the artifact
being created, then return a value from toolchain_util.PrepareForBuildReturn.
This function sets output_proto.build_relevance to the result.
Args:
input_proto (PrepareForToolchainBuildRequest): The input proto
output_proto (PrepareForToolchainBuildResponse): The output proto
_config (api_config.ApiConfig): The API call config.
"""
if input_proto.chroot.path:
chroot = controller_util.ParseChroot(input_proto.chroot)
else:
chroot = None
input_artifacts = collections.defaultdict(list)
for art in input_proto.input_artifacts:
item = _TOOLCHAIN_ARTIFACT_HANDLERS.get(art.input_artifact_type)
if item:
input_artifacts[item.name].extend(
['gs://%s' % str(x) for x in art.input_artifact_gs_locations])
profile_info = _GetProfileInfoDict(input_proto.profile_info)
results = set()
sysroot_path = input_proto.sysroot.path
build_target = input_proto.sysroot.build_target.name
for artifact_type in input_proto.artifact_types:
# Unknown artifact_types are an error.
handler = _TOOLCHAIN_ARTIFACT_HANDLERS[artifact_type]
if handler.prepare:
results.add(handler.prepare(
handler.name, chroot, sysroot_path, build_target, input_artifacts,
profile_info))
# Translate the returns from the handlers we called.
# If any NEEDED => NEEDED
# elif any UNKNOWN => UNKNOWN
# elif any POINTLESS => POINTLESS
# else UNKNOWN.
if toolchain_util.PrepareForBuildReturn.NEEDED in results:
output_proto.build_relevance = PrepareForBuildResponse.NEEDED
elif toolchain_util.PrepareForBuildReturn.UNKNOWN in results:
output_proto.build_relevance = PrepareForBuildResponse.UNKNOWN
elif toolchain_util.PrepareForBuildReturn.POINTLESS in results:
output_proto.build_relevance = PrepareForBuildResponse.POINTLESS
else:
output_proto.build_relevance = PrepareForBuildResponse.UNKNOWN
return controller.RETURN_CODE_SUCCESS
|
5c77d9ad318e0b5dd604e5127e9864aac50e7d77
| 25,462 |
from pathlib import Path
import json
def for_properties(path: Path = Path('config.json')):
"""
Simple externalized configuration loader. Properties are loaded from a file containing a JSON object.
:param path: Path to the file.
:return: Simple namespace with the key/value pairs matching the loaded json object.
"""
if not path or not path.exists():
raise ValueError(f"Configuration file [{path}] doesn't exist")
return json.loads(path.read_text(), object_hook=lambda d: SimpleNamespace(**d))
|
44e377ff28cef3b77adbbcc653f6b1ec196f2a2d
| 25,463 |
def get_instance_tags(ec2_client: boto3.Session.client, instance_id: str):
"""Get instance tags to parse through for selective hardening"""
tag_values = []
tags = ec2_client.describe_tags(
Filters=[
{
"Name": "resource-id",
"Values": [
instance_id,
],
},
],
)["Tags"]
for tag in tags:
tag_values.append(tag["Value"])
return tag_values
|
94506f230e44d730a89b15263ef74367c777f654
| 25,465 |
from typing import Dict
def getmasterxpub(client: HardwareWalletClient, addrtype: AddressType = AddressType.WIT, account: int = 0) -> Dict[str, str]:
"""
Get the master extended public key from a client
:param client: The client to interact with
:return: A dictionary containing the public key at the ``m/44'/0'/0'`` derivation path.
Returned as ``{"xpub": <xpub string>}``.
"""
return {"xpub": client.get_master_xpub(addrtype, account).to_string()}
|
58e20780672b0c7cd1dc0912ef3565e83e220a53
| 25,467 |
from typing import Any
def serialize(
obj: Any,
annotation: Any,
config: SerializerConfig
) -> str:
"""Convert the object to JSON
Args:
obj (Any): The object to convert
annotation (Annotation): The type annotation
config (SerializerConfig): The serializer configuration
Returns:
str: The serialized object
"""
if _is_typed(annotation):
return typed_serialize(obj, annotation, config)
else:
return untyped_serialize(obj, config)
|
6fc0fab725798c4d5b643c2dfb6a76929173f601
| 25,468 |
def validate_dvprel(prop_type, pname_fid, validate):
"""
Valdiates the DVPREL1/2
.. note:: words that start with integers (e.g., 12I/T**3) doesn't
support strings
"""
if validate:
msg = 'DVPREL1: prop_type=%r pname_fid=%r is invalid' % (prop_type, pname_fid)
#if prop_type == 'CELAS2':
#assert pname_fid in ['K', 'GE', 'S'], msg
#elif prop_type == 'CELAS4':
#assert pname_fid in ['K'], msg
if prop_type == 'PELAS':
if pname_fid in ['K1', 3]:
pname_fid = 'K1'
elif pname_fid in ['GE1', 4]:
pname_fid = 'GE1'
else:
raise NotImplementedError('PELAST pname_fid=%r is invalid' % pname_fid)
#assert pname_fid in [3, 4, 'K1', 'GE1'], msg
elif prop_type == 'PELAST':
if pname_fid in ['TKID', 3]:
pname_fid = 'TKID'
else:
raise NotImplementedError('PELAST pname_fid=%r is invalid' % pname_fid)
assert pname_fid in [3, 4, 'TKID'], msg
elif prop_type == 'PROD':
if pname_fid in ['A', 4]:
pname_fid = 'A'
elif pname_fid in ['J', 5]:
pname_fid = 'J'
#elif pname_fid in ['C', 6]:
#pname_fid = 'C'
else:
raise NotImplementedError('PROD pname_fid=%r is invalid' % pname_fid)
assert pname_fid in [4, 'A', 5, 'J'], msg
elif prop_type == 'PTUBE':
assert pname_fid in [4, 5], msg
#elif prop_type == 'CBAR':
#assert pname_fid in ['X1', 'X2'], msg
elif prop_type == 'PBAR':
assert pname_fid in [4, 5, 6, 7, 12, 13, 14, 15, 16, 17, 18, 19, 'A', 'I1', 'J'], msg
elif prop_type == 'PBARL':
assert pname_fid in [12, 13, 14, 15, 16, 17, 'DIM1', 'DIM2', 'DIM3', 'DIM4'], msg
#elif prop_type == 'CBEAM':
#assert pname_fid in ['X1', 'X2', 'X3', 'W1A', 'W2A', 'W3A', 'W1B', 'W2B', 'W3B'], msg
elif prop_type == 'PBEAM':
assert pname_fid in ['I1', 'I2', 'A', 'J',
'I1(B)', 'I2(B)',
'-8', '-9', '-10', '-14'], msg # -8
elif prop_type == 'PBEAML':
assert pname_fid in ['DIM1', 'DIM2', 'DIM3', 'DIM4', 'DIM5', 'DIM6',
'DIM1(A)',
'DIM1(B)', 'DIM2(B)', 'I1(B)', 'I2(B)',
'NSM'], msg # 'DIM(B)'
#elif prop_type == 'CQUAD4':
#assert pname_fid in ['T1', 'T2', 'T3', 'T4'], msg
elif prop_type == 'PSHELL':
if pname_fid in ['T', 4]:
pname_fid = 'T'
elif pname_fid in [6]: # 12I/T**3 doesn't support strings
pass
else:
raise NotImplementedError('PSHELL pname_fid=%r is invalid' % pname_fid)
#if cp_name in '12I/T**3':
#cp_name =
#assert pname_fid in ['T', 4, 6], msg
elif prop_type == 'PCOMP':
if isinstance(pname_fid, str):
word, num = break_word_by_trailing_integer(pname_fid)
if word not in ['T', 'THETA']:
raise RuntimeError(msg)
else:
assert pname_fid in [3, #3-z0
# 13-t1, 14-theta1, 17-t2, 18-theta2
13, 14, 17, 18,
23, 24, 27, 28,
33, 34, 37, 38,
43, 44, 47, 48], msg
elif prop_type == 'PCOMPG':
#if pname_fid in ['T', 4]:
#pname_fid = 'T'
#elif pname_fid in [6]: # 12I/T**3 doesn't support strings
#pass
#else:
#raise NotImplementedError('PSHELL pname_fid=%r is invalid' % pname_fid)
#if cp_name in '12I/T**3':
assert pname_fid in ['Z0', 'SB',
15, 25, 75, 85], msg
#elif prop_type == 'CBUSH':
#assert pname_fid in ['X1', 'X2', 'X3', 'S', 'S1'], msg
elif prop_type == 'PBUSH':
assert pname_fid in [18,
'K1', 'K2', 'K3', 'K4', 'K5', 'K6',
'B2',
'GE1', 'GE3', 'GE4', 'GE5', 'GE6',
'-13'], msg
elif prop_type == 'PBUSH1D':
assert pname_fid in ['K', 'C'], msg
elif prop_type == 'PBUSHT':
assert pname_fid in ['TBID1', 'TGEID1', 'TGEID2'], msg
# CGAP
elif prop_type == 'PGAP':
assert pname_fid in [5], msg
elif prop_type == 'PVISC':
assert pname_fid in ['CE1'], msg
#elif prop_type == 'CDAMP2':
#assert pname_fid in ['B'], msg
elif prop_type == 'PDAMP':
assert pname_fid in [3, 'B1'], msg
#elif prop_type == 'CMASS2':
#assert pname_fid in ['M'], msg
#elif prop_type == 'CMASS4':
#assert pname_fid in ['M'], msg
elif prop_type == 'PMASS':
assert pname_fid in [3], msg
#elif prop_type == 'CONM2':
#assert pname_fid in ['M', 'X1', 'X2', 'I11', 'I22'], msg
elif prop_type == 'PSHEAR':
if pname_fid in ['T', 4]:
pname_fid = 'T'
else:
raise NotImplementedError('PSHEAR pname_fid=%r is invalid' % pname_fid)
elif prop_type == 'PWELD':
assert pname_fid in ['D'], msg
elif prop_type == 'PBEND':
raise RuntimeError('Nastran does not support the PBEND')
else:
raise NotImplementedError(msg)
return pname_fid
|
fa3129485081d4b7312fda74e87b1203c97e9adc
| 25,469 |
def is_ligature(archar):
"""Checks for Arabic Ligatures like LamAlef.
(LAM_ALEF, LAM_ALEF_HAMZA_ABOVE, LAM_ALEF_HAMZA_BELOW, LAM_ALEF_MADDA_ABOVE)
@param archar: arabic unicode char
@type archar: unicode
@return:
@rtype:Boolean
"""
return archar in LIGUATURES
|
721c6135064e21ba681c43fc776c0f64f290e2d3
| 25,470 |
def _get_embl_key(line):
"""Return first part of a string as a embl key (ie 'AC M14399;' -> 'AC')"""
# embl keys have a fixed size of 2 chars
return line[:2]
|
b54f1a94f120f7ac63a0dd2a22bd47d5a5d5eeb9
| 25,471 |
import scipy
def get_region_data(region, lastday=-1, printrows=0, correct_anomalies=True,
correct_dow='r7'):
"""Get case counts and population for one municipality.
It uses the global DFS['mun'], DFS['cases'] dataframe.
Parameters:
- region: region name (see below)
- lastday: last day to include.
- printrows: print this many of the most recent rows
- correct_anomalies: correct known anomalies (hiccups in reporting)
by reassigning cases to earlier dates.
- correct_dow: None, 'r7' (only for extrapolated rolling-7 average)
Special municipalities:
- 'Nederland': all
- 'HR:Zuid', 'HR:Noord', 'HR:Midden', 'HR:Midden+Zuid', 'HR:Midden+Noord':
holiday regions.
- 'MS:xx-yy': municipalities with population xx <= pop/1000 < yy'
- 'P:xx': province
Use data up to lastday.
Return:
- df: dataframe with added columns:
- Delta: daily increase in case count (per capita).
- Delta_dowc: daily increase, day-of-week correction applied
based on national pattern in most recent 7 weeks.
- Delta7r: daily increase as 7-day rolling average
(last 3 days are estimated).
- DeltaSG: daily increase, smoothed with (15, 2) Savitsky-Golay filter.Region selec
- pop: population.
"""
df1, npop = nl_regions.select_cases_region(DFS['cases'], region)
# df1 will have index 'Date_of_report', columns:
# 'Total_reported', 'Hospital_admission', 'Deceased'
assert correct_dow in [None, 'r7']
if lastday < -1 or lastday > 0:
df1 = df1.iloc[:lastday+1]
if len(df1) == 0:
raise ValueError(f'No data for region={region!r}.')
# nc: number of cases
nc = df1['Total_reported'].diff()
if printrows > 0:
print(nc[-printrows:])
nc.iat[0] = 0
df1['Delta'] = nc/npop
if correct_anomalies:
_correct_delta_anomalies(df1)
nc = df1['Delta'] * npop
nc7 = nc.rolling(7, center=True).mean()
nc7[np.abs(nc7) < 1e-10] = 0.0 # otherwise +/-1e-15 issues.
nc7a = nc7.to_numpy()
# last 3 elements are NaN, use mean of last 4 raw (dow-corrected) to
# get an estimated trend and use exponential growth or decay
# for filling the data.
if correct_dow == 'r7':
# mean number at t=-1.5 days
dow_correction = get_dow_correction((lastday-49, lastday)) # (7,) array
df1['Delta_dowc'] = df1['Delta'] * dow_correction[df1.index.dayofweek]
nc1 = np.mean(nc.iloc[-4:] * dow_correction[nc.index[-4:].dayofweek])
else:
nc1 = nc.iloc[-4:].mean() # mean number at t=-1.5 days
log_slope = (np.log(nc1) - np.log(nc7a[-4]))/1.5
nc7.iloc[-3:] = nc7a[-4] * np.exp(np.arange(1, 4)*log_slope)
# 1st 3 elements are NaN
nc7.iloc[:3] = np.linspace(0, nc7.iloc[3], 3, endpoint=False)
df1['Delta7r'] = nc7/npop
df1['DeltaSG'] = scipy.signal.savgol_filter(
nc/npop, 15, 2, mode='interp')
return df1, npop
|
e35b41eca663d25d065d2b656745e7c41e038dc1
| 25,474 |
def MONTH(*args) -> Function:
"""
Returns the month of the year a specific date falls in, in numeric format.
Learn more: https//support.google.com/docs/answer/3093052
"""
return Function("MONTH", args)
|
86a44c35e989ccc149935515550d3176549ee82e
| 25,475 |
import logging
import time
def Install(browser):
"""Installs |browser|, if necessary. It is not possible to install
an older version of the already installed browser currently.
Args:
browser: specific browst to install.
Returns:
whether browser is installed.
"""
# Only dynamic installation of browsers for Windows now.
if not util.IsWindows():
return True
logging.info('Wants to install ' + browser['name'])
version = GetVersionNumber(browser['family'])
if version is None:
logging.info('No version of %s is installed' % browser['family'])
else:
logging.info('Version %s of %s is installed already'
% (version, browser['family']))
if not IsBrowserInstalled(browser):
install_cmd = None
# Download browser.
logging.info('Downloading ' + browser['name'])
if browser['family'] == 'ie':
if browser['name'] == 'ie7':
install_cmd = util.Download(_IE_7_URLS[util.GetOSPrefix()],
SOFTWARE_PATH)
elif browser['name'] == 'ie8':
install_cmd = util.Download(_IE_8_URLS[util.GetOSPrefix()],
SOFTWARE_PATH)
install_cmd += ' /passive /no-default'
elif browser['family'] == 'firefox':
if util.IsWindows():
install = util.Download(_FIREFOX_VERSIONS[browser['name']],
SOFTWARE_PATH)
install_cmd = install + ' -ms'
elif browser['family'] == 'chrome':
if util.IsWindows():
install_cmd = util.Download(_CHROME_VERSIONS[browser['name']],
SOFTWARE_PATH)
else:
logging.error('Browser %s is not currently supported' % browser['name'])
# Run installation.
if install_cmd is not None:
logging.info('Installing browser: ' + install_cmd)
if install_cmd is None or util.RunStr(install_cmd) != 0:
logging.error('Could not install %s' % browser['name'])
return False
# Do post installation things.
if browser['family'] == 'chrome':
first_run = file(HOME_PATH + '\\Local Settings\\'
'Application Data\\Google\\Chrome\\Application\\'
'First Run', 'w')
first_run.close()
# Wait for Chrome to install. Reboot to get rid of that first run UI.
time.sleep(90)
util.Reboot()
logging.error('Could not reboot. Needed for Chrome installation.')
return False
else:
logging.info(browser['name'] + ' already installed')
return True
|
93bc5b7ad0b1bc8b4d4496a9ae608d2b9fa1848f
| 25,477 |
def get_query_string_from_process_type_string(process_type_string: str) -> str: # pylint: disable=invalid-name
"""
Take the process type string of a Node and create the queryable type string.
:param process_type_string: the process type string
:type process_type_string: str
:return: string that can be used to query for subclasses of the process type using 'LIKE <string>'
:rtype: str
"""
if ':' in process_type_string:
return f'{process_type_string}.'
path = process_type_string.rsplit('.', 2)[0]
return f'{path}.'
|
1380ad90a98da26237176890c52a75684e92964e
| 25,478 |
def get_column(fn):
"""Get column from Cellomics filename.
Parameters
----------
fn : string
A filename from the Cellomics high-content screening system.
Returns
-------
column : string
The channel of the filename.
Examples
--------
>>> fn = 'MFGTMP_140206180002_A01f00d0.TIF'
>>> get_column(fn)
'01'
"""
sem = cellomics_semantic_filename(fn)
column = sem['well'][1:]
return column
|
5582b6952af2cfcc6c2bcf0aeb7d472420766c9c
| 25,479 |
def add_tables():
"""
Generates tables in postgres database according to SQLAlchemy
model when this script is invoked directly via terminal.
"""
return database.Base.metadata.create_all(bind=database.engine)
|
e7da7d2ccef81197faa3393a4e0a04cf1a656b7d
| 25,480 |
def label_vertices(ast, vi, vertices, var_v):
"""Label each node in the AST with a unique vertex id
vi : vertex id counter
vertices : list of all vertices (modified in place)
"""
def inner(ast):
nonlocal vi
if type(ast) != dict:
if type(ast) == list:
# print(vi)
pass
return ast
ast["vertex_id"] = vi
vertices.append(ast["tag"])
# if not (ast['tag'] in ['EVar', 'LvVar'] and ast['contents'] in var_v):
vi += 1
for k, v in ast.items():
if k != "tag":
inner(v)
return ast
return inner(ast)
|
1216c3ff1f5995e24f0f3a245fad5db820335f4d
| 25,483 |
def bias_variable(shape):
"""Create a bias variable with appropriate initialization."""
#initial = tf.constant(0.1, shape=shape)
initial = tf.constant(0.0, shape=shape)
return tf.Variable(initial)
|
046c9fc01bba5af90b166e16d3dce9a294decc58
| 25,485 |
def object_gatekeeper(obj, is_auth, ignore_standalone=False):
"""
It's OK to use available_to_public here because the underlying logic is identical.
"""
if not obj:
return False
if is_auth:
return True
else:
try:
return obj.available_to_public
except:
pass
return False
|
66f0749788f462ba9a0dfee6edf890245aca15ba
| 25,487 |
def l1_l2_regularizer(scale_l1=1.0, scale_l2=1.0, scope=None):
"""Returns a function that can be used to apply L1 L2 regularizations.
Args:
scale_l1: A scalar multiplier `Tensor` for L1 regularization.
scale_l2: A scalar multiplier `Tensor` for L2 regularization.
scope: An optional scope name.
Returns:
A function with signature `l1_l2(weights)` that applies a weighted sum of
L1 L2 regularization.
Raises:
ValueError: If scale is negative or if scale is not a float.
"""
scope = scope or 'l1_l2_regularizer'
return sum_regularizer([l1_regularizer(scale_l1),
l2_regularizer(scale_l2)],
scope=scope)
|
6fe25d5f90d23d192c2b0d9897d5e025d534813c
| 25,488 |
def test013_ip_range():
"""
to run:
kosmos 'j.data.types.test(name="iprange")'
"""
ipv4 = j.data.types.get("iprange", default="192.168.0.0/28")
assert ipv4.default_get() == "192.168.0.0/28"
assert ipv4.check("192.168.23.255/28") is True
assert ipv4.check("192.168.23.300/28") is False
assert ipv4.check("192.168.23.255/32") is True
ipv6 = j.data.types.get("iprange")
assert ipv6.default_get() == "::/128"
assert ipv6.check("2001:db00::0/24") is True
assert ipv6.check("2001:db00::1/24") is True
assert ipv6.check("2001:db00::0/ffff:ff00::") is False
j.data.types._log_info("TEST DONE LIST")
return "OK"
|
4ac18b32aef77b5d4c1080150dd218a8f96efcf3
| 25,489 |
def _create_hive_cursor():
"""
Initializes a hive connection and returns a cursor to it
:return: hive cursor
"""
_print_info('Initializing hive cursor.')
return _initialize_hive_connection()
|
52e0250b1a163a6ae8f43bbb3ce723cd79518e98
| 25,490 |
def to_vector_single(text, embeddings, maxlen=300):
"""
Given a string, tokenize it, then convert it to a sequence of word embedding
vectors with the provided embeddings, introducing <PAD> and <UNK> padding token
vector when appropriate
"""
tokens = tokenizeAndFilterSimple(clean_text(text))
window = tokens[-maxlen:]
# TBD: use better initializers (uniform, etc.)
x = np.zeros((maxlen, embeddings.embed_size), )
# TBD: padding should be left and which vector do we use for padding?
# and what about masking padding later for RNN?
for i, word in enumerate(window):
x[i,:] = embeddings.get_word_vector(word).astype('float32')
return x
|
3000691c9bbb75c9c86b6d740ff2559e10228db4
| 25,492 |
import numpy
def eval_tensor_density(
tens: tf_compat.Tensor, sess: tf_compat.Session = None
) -> float:
"""
Get the density (fraction of non zero values) in a tensor
:param tens: the tensor to get the density for
:param sess: the session to use for evaluating the tensor,
if not supplied will use the default session
:return: the density of the tensor
"""
if not sess:
sess = tf_compat.get_default_session()
val_array = sess.run(tens)
num_nonzeros = numpy.count_nonzero(val_array)
density = float(num_nonzeros) / float(val_array.size)
return density
|
38ed298cdef732a1465a4221a9fbac82535b6d2c
| 25,493 |
import collections
def get(key, default):
"""Get a config bloc from the YAML config file.
Args:
default (dict): The default bloc if the key is not available
Returns:
dict: The config bloc (or the default one)
"""
if not key.lower() in _YAML_DICT or isinstance(_YAML_DICT[key.lower()], collections.Mapping):
return default
else:
return _YAML_DICT[key.lower()]
|
40a7ac19bf64667bccd183c28c2fb0c772c8f748
| 25,494 |
def adaptsim(f, a, b, eps=1e-8, max_iter=10000):
"""自适应 Simpson 求积
P.S. 这个函数名来自 Gander, W. and W. Gautschi, “Adaptive
Quadrature – Revisited,” BIT, Vol. 40, 2000, pp. 84-101.
该文档可以在 https://people.inf.ethz.ch/gander/ 找到。
但该函数的实现并没有使用此文中的递归方法。
Args:
f: 要求积的函数
a, b: 求积区间
eps: 目标精度,达到则停止,返回积分值
max_iter: 最大迭代次数,超出这个次数迭代不到目标精度,则 raise 一个 Exception
Returns: (I, m, p)
I: 积分的近似值
m: 分层数
p: 分点
Raises:
Exception: 无法在 max_iter 步内迭代到目标精度
"""
p = [a, b] # 分点
p0 = p
ep = [eps]
m = 0
q = 0
I = 0
for _iter_times in range(int(max_iter)):
n1 = len(ep)
n = len(p0)
if n <= 1:
break
h = p0[1] - p0[0]
s0 = h / 6 * ( f(p0[0]) + 4 * f(p0[0] + h/2) + f(p0[0] + h ) )
s1 = h / 12 * ( f(p0[0]) + 4 * f(p0[0] + h/4) + f(p0[0] + h/2) )
s2 = h / 12 * ( f(p0[0] + h/2) + 4 * f(p0[0] + 3*h/4) + f(p0[0] + h) )
if abs(s0 - s1 - s2) <= 15 * ep[0]:
I += s1 + s2
p0 = p0[1:]
if n1 >= 2:
ep = ep[1:]
q += 1
else:
m += 1
p0 = [p0[0], p0[0] + h/2] + p0[1:]
if n1 == 1:
ep = [ep[0]/2, ep[0]/2]
else:
ep = [ep[0]/2, ep[1]/2] + ep[1:]
if q == 0:
p = p0
else:
p = p[:q] + p0
else:
raise Exception('无法在 max_iter 步内迭代到目标精度')
return I, m, p
|
b24ed3c2493b8ece19a69cf781a75e7a9e0f9cd0
| 25,495 |
def get_next_position(grid):
"""Returns best next position to send."""
width = len(grid[0])
unprepared = [inspect_around_position(grid, x)
for x in range(1, width - 1)]
return unprepared.index(max(unprepared)) + 2
|
8d1a75766e830ee49c895a5fe90adc3208011c3d
| 25,496 |
from typing import Optional
def which_subdir(sha: str) -> Optional[str]:
""" Determine which subset (if any) sha is represented in """
fname = sha + '.json'
for k, v in subdir_contents.items():
if fname in v:
subdir_contents[k].remove(fname)
return k
subdir_contents[MISSING_FILE].add(fname)
return MISSING_FILE
|
f5a32354724604f15710bbf9a69c1e5d38e84a83
| 25,497 |
import numpy as np
def smoothedEnsembles(data,lat_bounds,lon_bounds):
"""
Smoothes all ensembles by taking subsamples
"""
### Import modules
print('\n------- Beginning of smoothing the ensembles per model -------')
### Save MM
newmodels = data.copy()
mmean = newmodels[-1,:,:,:,:] # 7 for MMmean
otherens = newmodels[:7,:,:,:,:]
newmodeltest = np.empty(otherens.shape)
for modi in range(otherens.shape[0]):
for sh in range(otherens.shape[1]):
ensnum = np.arange(otherens.shape[1])
slices = np.random.choice(ensnum,size=otherens.shape[0],replace=False)
modelsmooth = otherens[modi]
slicenewmodel = np.nanmean(modelsmooth[slices,:,:,:],axis=0)
newmodeltest[modi,sh,:,:,:] = slicenewmodel
### Add new class
smoothClass = np.append(newmodeltest,mmean[np.newaxis,:,:,:],axis=0)
print('--Size of smooth twin --->',newmodeltest.shape)
print('--NEW Size of smoothedclass--->',smoothClass.shape)
print('------- Ending of smoothing the ensembles per model -------')
return smoothClass
|
ed8fe2bc3d4e77384179d6a1a1406ca9446dc973
| 25,498 |
def conv7x7_block(in_channels,
out_channels,
strides=1,
padding=3,
use_bias=False,
use_bn=True,
bn_eps=1e-5,
activation="relu",
data_format="channels_last",
**kwargs):
"""
7x7 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 3
Padding value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
strides=strides,
padding=padding,
use_bias=use_bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation,
data_format=data_format,
**kwargs)
|
9de1518d95417877a0bf5e094ebb907c3534434f
| 25,499 |
def alpha_to_weights(alpha):
"""归一化. 最终截面绝对值和为2. """
alpha = alpha - np.nanmean(alpha, axis=1, keepdims=True)
mask_pos = (alpha > 0)
mask_neg = (alpha < 0)
alpha_pos = imposter(alpha)
alpha_pos[mask_pos] = alpha[mask_pos]
alpha_pos = alpha_pos / np.nansum(alpha_pos, 1, keepdims=True)
alpha_neg = imposter(alpha)
alpha_neg[mask_neg] = alpha[mask_neg]
alpha_neg = -alpha_neg / np.nansum(alpha_neg, 1, keepdims=True)
alpha[mask_pos] = alpha_pos[mask_pos]
alpha[mask_neg] = alpha_neg[mask_neg]
return alpha
|
a2a4436b3457fe644a130d463cf501c6cd623f2c
| 25,500 |
def complete_with_fake_data_for_warmup(minimum_n_rows_to_fit, X=None, fv_size=None):
"""Makes fake data to warmup a partial fit process.
If no X is given, will return a random minimum_n_rows_to_fit x fv_size matrix (with values between 0 and 1)
If X is given, will repeat the rows in a cycle until the minimum_n_rows_to_fit is reached
>>> X = complete_with_fake_data_for_warmup(3, fv_size=2);
>>> X.shape
(3, 2)
>>> import numpy as np
>>> complete_with_fake_data_for_warmup(5, X=np.array([[1,2,3], [4,5,6]]))
array([[1, 2, 3],
[4, 5, 6],
[1, 2, 3],
[4, 5, 6],
[1, 2, 3]])
"""
if X is None:
assert fv_size is not None, 'You need to have some data, or specify an fv_size'
return np.random.rand(minimum_n_rows_to_fit, fv_size)
else:
nrows, fv_size = X.shape
missing_n_rows = max(0, minimum_n_rows_to_fit - nrows)
if missing_n_rows > 0:
return np.array(X.tolist() * int(1 + np.ceil(missing_n_rows / nrows)))[
:minimum_n_rows_to_fit
]
else:
return X
|
e201fc50f06945b57a166e4c006252cc892865ed
| 25,501 |
from typing import Dict
from pathlib import Path
def check_integrity(signify: Dict[str, str], snapshot: Path, url: str) -> bool:
"""Check the integrity of the snapshot and retry once if failed files.
signify -- the signify key and a signify signed file with SHA256 checksums
snapshot -- the directory where the snapshot is stored
url -- the snapshots' mirror URL
"""
whole, failed = verify(signify, snapshot)
# if there are some failed files, retry once, five minutes
# after. Downloads can fail or just get the mirror in the middle
# of a sync.
if failed:
sleep(300)
for f in failed:
get_binary(parse.urljoin(url, f), f)
whole, failed = verify(signify, snapshot)
return whole
|
4e33ba5a2652eaba229815eec93dede4aaf6ef5f
| 25,502 |
def exp_slow(b, c):
"""
Returns the value b^c.
Property: b^c = b * b^(c-1)
Parameter b: the number to raise to a power
Precondition: b is a number
Parameter c: the exponent
Precondition: c is an int >= 0
"""
# get in the habit of checking what you can
assert type(b) in [float, int], repr(b)+' is not a number'
assert type(c) == int, repr(c)+' is not an int'
assert c >= 0, repr(c)+' is negative'
# Allows us to write to global variable. EVIL! Do not use!
global count_frames
# Work on small data (BASE CASE)
if c == 0:
return 1
# Break up into halves (RECURSIVE CASE)
left = b
right = exp_slow(b, c-1)
# Used to count the number of frames
count_frames = count_frames+1
# Combine the answer
return left * right
|
0d58a98f2b7785c9ac69c8a3f4539cdf71d3f27b
| 25,503 |
def pick_theme(manual):
"""
Return theme name based on manual input, prefs file, or default to "plain".
"""
if manual:
return manual
pref_init()
parser = cp.ConfigParser()
parser.read(PREFS_FILE)
try:
theme = parser.get("theme", "default")
except (cp.NoSectionError, cp.NoOptionError):
theme = "plain"
return theme
|
6e815a0f46b5de1f1a0ef16ffa0ba21b79ee048f
| 25,504 |
import socket
def ip2host(ls_input):
"""
Parameters : list of a ip addreses
----------
Returns : list of tuples, n=2, consisting of the ip and hostname
"""
ls_output = []
for ip in ls_input:
try:
x = socket.gethostbyaddr(ip)
ls_output.append((ip, x[0]))
except Exception as e:
print('Error: ', e)
ls_output.append((ip, None))
return ls_output
|
234b42bf0406ae5fb67d2c1caba9f7f3a1e92a0c
| 25,505 |
from typing import Tuple
from typing import List
from pathlib import Path
def process_all_content(file_list: list, text_path: str) -> Tuple[list, list]:
"""
Analyze the whole content of the project, build and return lists
if toc_items and landmarks.
INPUTS:
file_list: a list of all content files
text_path: the path to the contents folder (src/epub/text)
OUTPUTS:
a tuple containing the list of Toc items and the list of landmark items
"""
toc_list: List[TocItem] = []
landmarks: List[TocItem] = []
# We make two passes through the work, because we need to know
# how many bodymatter items there are. So we do landmarks first.
for textf in file_list:
file_path = Path(text_path) / textf
try:
with open(file_path, encoding="utf8") as file:
dom = se.easy_xml.EasyXhtmlTree(file.read())
except Exception as ex:
raise se.InvalidFileException(f"Couldn’t open file: [path][link=file://{file_path}]{file_path}[/][/]. Exception: {ex}") from ex
add_landmark(dom, textf, landmarks)
# Now we test to see if there is only one body item
body_items = [item for item in landmarks if item.place == Position.BODY]
single_file = (len(body_items) == 1)
nest_under_halftitle = False
place = Position.NONE
for textf in file_list:
with open(Path(text_path) / textf, "r", encoding="utf-8") as file:
dom = se.easy_xml.EasyXhtmlTree(file.read())
body = dom.xpath("//body")
if body:
place = get_place(body[0])
else:
raise se.InvalidInputException("Couldn't locate body node")
if place == Position.BACK:
nest_under_halftitle = False
process_headings(dom, textf, toc_list, nest_under_halftitle, single_file)
if textf == "halftitlepage.xhtml":
nest_under_halftitle = True
# We add this dummy item because outputtoc always needs to look ahead to the next item.
last_toc = TocItem()
last_toc.level = 1
last_toc.title = "dummy"
toc_list.append(last_toc)
return landmarks, toc_list
|
51514892d173adf8a4fe9c3196781c558bc24c6a
| 25,506 |
import aiohttp
import json
def fuel(bot, mask, target, args):
"""Show the current fuel for Erfurt
%%fuel [<city> <value> <type>]...
"""
"""Load configuration"""
config = {
'lat': 50.9827792,
'lng': 11.0394426,
'rad': 10
}
config.update(bot.config.get(__name__, {}))
sort_type = 'all'
sort_value = 'dist'
lat = config['lat']
lng = config['lng']
fuel_types = ['e5', 'e10', 'diesel', 'all']
if config['api_key'] == "your_apikey":
return "I don't have your api key!"
if '<city>' not in args or len(args['<city>']) < 1:
bot.log.info('Fetching fuel info for Erfurt')
lat = config['lat']
lng = config['lng']
else:
if " ".join(args['<city>']) == 'sort':
bot.log.info('Fetching fuel info for Erfurt')
lat = config['lat']
lng = config['lng']
if '<value>' not in args or len(args['<value>']) < 1:
sort_type = 'all'
sort_value = 'dist'
else:
sort_type = " ".join(args['<value>'])
sort_value = 'price'
else:
if " ".join(args['<city>']) == 'help':
bot.log.info('Printing some Help')
cmd = '!'
bot.privmsg(target, '( ͡° ͜ʖ ͡°)')
bot.privmsg(target, 'Example commands:')
bot.privmsg(target, cmd + 'fuel')
bot.privmsg(target, cmd + 'fuel help')
bot.privmsg(target, cmd + 'fuel sort <fuel>')
bot.privmsg(target, cmd + 'fuel sort e5')
bot.privmsg(target, cmd + 'fuel sort e10')
bot.privmsg(target, cmd + 'fuel sort diesel')
bot.privmsg(target, cmd + 'fuel <place>')
bot.privmsg(target, cmd + 'fuel erfurt')
bot.privmsg(target, cmd + 'fuel <place> sort <fuel>')
bot.privmsg(target, cmd + 'fuel erfurt sort e5')
bot.privmsg(target, cmd + 'fuel bytespeicher sort e10')
bot.privmsg(target, cmd + 'fuel krautspace sort diesel')
return ""
else:
bot.log.info('Fetching fuel info for ' +
str(" ".join(args['<city>'])))
geolocator = Nominatim()
location = geolocator.geocode(" ".join(args['<city>']))
lat = location.latitude
lng = location.longitude
if " ".join(args['<value>']) == 'sort':
if '<type>' not in args or len(args['<type>']) < 1:
sort_type = 'all'
sort_value = 'dist'
else:
sort_type = " ".join(args['<type>'])
sort_value = 'price'
if sort_type not in fuel_types:
return "Not supported fuel."
try:
url = "https://creativecommons.tankerkoenig.de/json/list.php?" + \
"lat=" + str(lat) + \
"&lng=" + str(lng) + \
"&rad=" + str(config['rad']) + \
"&sort=" + str(sort_value) + \
"&type=" + str(sort_type) + \
"&apikey=" + str(config['api_key'])
with aiohttp.Timeout(10):
with aiohttp.ClientSession(loop=bot.loop) as session:
resp = yield from session.get(url)
if resp.status != 200:
bot.privmsg(target, "Error while retrieving station list")
raise Exception()
r = yield from resp.read()
data = json.loads(r.decode('utf-8'))
messages = []
for x in range(len(data['stations'])):
brand = data[u'stations'][x][u"brand"]
station_id = data['stations'][x][u"id"]
postCode = data['stations'][x][u"postCode"]
bot.log.info('Fetching fuel info for Erfurt station ' +
str(station_id))
url = \
"https://creativecommons.tankerkoenig.de/json/detail.php?" + \
"id=" + station_id + \
"&apikey=" + str(config['api_key'])
with aiohttp.Timeout(10):
with aiohttp.ClientSession(loop=bot.loop) as session:
resp = yield from session.get(url)
if resp.status != 200:
bot.privmsg(target, "Error while retrieving fuel data")
raise Exception()
r = yield from resp.read()
details = json.loads(r.decode('utf-8'))
e5 = str(details['station']['e5'])
e10 = str(details['station']['e10'])
diesel = str(details['station']['diesel'])
dist = u"{:0.2} km".format(vincenty((details['station']['lat'],
details['station']['lng']),
(lat, lng)).meters / 1000)
if brand == '':
brand = 'GLOBUS'
print_str = \
u" {:20}".format(brand + ', ' + str(postCode) + ': ') + \
u"{:5} ".format(e5) + \
u"{:5} ".format(e10) + \
u"{:5} ".format(diesel) + \
u"{:5} ".format(dist)
messages.append(print_str)
headline = u"{:23}".format('fuel prices:') + \
u"{:6} ".format('e5') + \
u"{:6} ".format('e10') + \
u"{:6} ".format('diesel') + \
u"{:6} ".format('dist')
if len(messages) > 0:
bot.privmsg(target, headline)
for m in messages:
bot.privmsg(target, m)
else:
return "No fuel data found!"
except KeyError:
bot.privmsg(target, "Error while retrieving fuel data")
raise Exception()
|
371ecd5e8a7c99032f2544d8256e89475a8d0cd5
| 25,507 |
def findElemArray2D(x, arr2d):
"""
:param x: a scalar
:param arr2d: a 2-dimensional numpy ndarray or matrix
Returns a tuple of arrays (rVec, cVec), where the corresponding elements in
each are the rows and cols where arr2d[r,c] == x.
Returns [] if x not in arr2d. \n
Example: \n
arr2d = np.array([[1,2],[3,1]]), x = 1
findElemArray2D(x, arr2d) --> ([0, 1], [0, 1]).
i.e., arr2d[0][0] and arr2d[1][1] both == x.
.. note::
The type of each tuple member is the same as type(arr2d)
"""
res = np.where(arr2d == x)
if len(res[0]):
return res[0].flatten(), res[1].flatten()
else:
return [], []
|
37428b16b6f634483d584ef878eea90646d77028
| 25,509 |
import itertools
def merge(cluster_sentences):
"""
Merge multiple lists.
"""
cluster_sentences = list(itertools.chain(*cluster_sentences))
return cluster_sentences
|
ec5c9bf7a89bf0d047050d3684876ed481617706
| 25,510 |
def reverse_str(s: str) -> str:
"""Reverse a given string"""
# Python strings are immutable
s = list(s)
s_len = len(s)
# Using the extra idx as a temp space in list
s.append(None)
for idx in range(s_len // 2):
s[s_len] = s[idx]
s[idx] = s[s_len - idx - 1]
s[s_len - idx - 1] = s[s_len]
return "".join(s)[:s_len]
|
8568ed59d004afde11bd97e0dba58189a447f954
| 25,511 |
def readme():
"""Get text from the README.rst"""
with open('README.rst') as f:
return f.read()
|
3cf992e2f983d71445e743599dc8b78411bab288
| 25,512 |
def exact_account(source_account_id):
"""
Get the BU id, OU id by the account id in dynamodb table.
"""
try:
response = dynamodb_table.get_item(Key={'AccountId': source_account_id})
except Exception as e:
failure_notify("Unable to query account id {0}, detailed exception {1}".format(source_account_id, e))
print(response)
mgt_account_id = response['Item']['MgtId']
ou_id = response['Item']['OuId']
return mgt_account_id, ou_id, source_account_id
|
07ff5ef933d00208a5b1aba573c24c5f5987a558
| 25,513 |
import re
def filter_output(output, regex):
"""Filter output by defined regex. Output can be either string, list or tuple.
Every string is split into list line by line. After that regex is applied
to filter only matching lines, which are returned back.
:returns: list of matching records
"""
result = []
if isinstance(output, str):
for line in output.splitlines():
result += re.findall(regex, line)
elif isinstance(output, (list, tuple)):
for item in output:
result.append(filter_output(item, regex))
else:
raise RuntimeError('Only strings and lists are supported by filter_output(), '
'but output has type {}'.format(type(output)))
return result
|
d9760a644bb83aee513391966522946a6514ab72
| 25,514 |
def carteiralistar(request):
"""
Metódo para retornar o template de listar carteiras
"""
usuario = request.user
try:
# Pega o objeto carteira se já existir
carteira = CarteiraCriptomoeda.objects.get(usuario=usuario)
# Pega a chave da API e o saldo
chave_api = carteira.chave_api
saldo = carteira.saldo
valor_operacao = carteira.valor_operacao
num_operacoes = carteira.num_operacoes
simulacao = carteira.simulacao
existe_carteira = True
# Se não tiver carteira cadastrada deixe em branco
except ObjectDoesNotExist:
chave_api = ""
saldo = ""
valor_operacao = ""
num_operacoes = ""
simulacao = ""
existe_carteira = False
return render(request, 'site-pytradebot/carteiralistar.html',
{'usuario':usuario, 'chave_api':chave_api, 'saldo':saldo,
'valor_operacao':valor_operacao, 'num_operacoes':num_operacoes,
'simulacao':simulacao, 'existe_carteira':existe_carteira})
|
32fa51e5c8e6d5a3765b72755cefe24b0ce906a2
| 25,515 |
def scrub(text, stop_chars=DEFAULT_STOP_CHARS, reorder_chars=DEFAULT_REORDER_CHARS):
"""
Scrub text. Runs the relevant functions in an appropriate order.
"""
text = reorder_stop_chars(text, stop_chars=stop_chars, reorder_chars=reorder_chars)
text = remove_columns(text)
text = split_as_one_sentence_per_line(text, stop_chars=stop_chars)
text = remove_excessive_whitespace(text)
return text
|
c24a072e83b6936c04a2e591d2072b0e49849758
| 25,516 |
def simulate_evoked_osc(info, fwd, n_trials, freq, label, loc_in_label=None,
picks=None, loc_seed=None, snr=None, mu=None,
noise_type="white", return_matrix=True,
filtering=None, phase_lock=False):
"""Simulate evoked oscillatory data based on a given fwd model and dipole.
Parameters:
-----------
info : MNE info object
data info, e.g., from raw
fwd : MNE forward object
forward model object
freq : float
freq of simulated oscillation
n_trials : int
number of trials
label : MNE label
source space label to simulate data in
loc_in_label : None | int
Specify the random generator state for dipole simulation within the
label. Defaults to np.random.RandomState if None.
picks : None | string
Channel types to pick from evoked, can be 'mag' or 'grad'. None
defaults to all.
seed : None | int
Seed for the time series simulation, only relevant for location in
label.
snr : None | float
If not None, signal-to-noise ratio in dB for resulting signal (adding
noise).
mu : None | float
To directly manipulate noise level (e.g. to keep constant across
conditions).
noise_type : str
Type of noise. Supported is at the moment: "white" and "brownian".
return_matrix : bool
If True, a matrix of epochs will be returned and the evoked object will
be averaged across trials.
filtering : None | dict
If None (default), no filtering is done. If filtering should be done,
the dictionary needs to contain the following keys:
"hp" : high pass cutoff, float.
"lp" : low pass cutoff, float.
"fir_design" : FIR design, string, see evoked.filter()
"lp_tw" : transition width for low pass, float, optional.
"hp_tw" : transition width for high pass, float, optional.
phase_lock : bool
If True, the oscillation will be phase-locked across trials.
Returns:
--------
evoked : MNE evoked object
Simulated sensor data.
stc : MNE source time course object
Simulated source space data.
epochs : np.array
Matrix with epochs, if return_matrix is True.
"""
if loc_seed is not None:
np.random.seed(loc_seed)
if loc_in_label is None:
loc_in_label = np.random.RandomState()
np.random.seed() # reset to random seed to not get funky results for noise
times = np.arange(0., n_trials, 1./info['sfreq'])
stc = simulate_sparse_stc(fwd['src'], n_dipoles=1, times=times,
random_state=loc_in_label, labels=label,
data_fun=lambda
times: generate_signal(times, freq, n_trials,
phase_lock=phase_lock))
# go to sensor space
evoked = apply_forward(fwd, stc, info, verbose=False, use_cps=False)
# pick channel types if applicable
if picks is not None:
evoked.pick_types(meg=picks)
if filtering is not None:
if "lp_tw" not in filtering:
filtering["lp_tw"] = "auto"
if "hp_tw" not in filtering:
filtering["hp_tw"] = "auto"
if snr is not None:
snr = 10 ** (snr/20) # convert dB to ratio
if noise_type == "white":
noise_data = np.random.randn(*evoked.data.shape)
elif noise_type == "brownian":
# make white noise first
noise_data = np.random.randn(*evoked.data.shape)
elif noise_type == "pink":
noise_data = make_pink_noise(evoked.data.shape[1], 10,
evoked.data.shape[0])
else:
raise ValueError('So far, only white, brownian, and pink noise is '
'implemented, got %s' % noise_type)
if filtering is not None:
# filter the noise
noise_evoked = evoked.copy()
noise_evoked.data[:] = noise_data
noise_evoked.filter(filtering["hp"], filtering["lp"],
fir_design=filtering["fir_design"],
l_trans_bandwidth=filtering["hp_tw"],
h_trans_bandwidth=filtering["lp_tw"],
verbose=False)
noise_data = noise_evoked.data
# scale the noise
# shape: trials x sensor x time
noise_matrix = noise_data.reshape([len(evoked.ch_names),
n_trials, -1]).transpose(
1, 0, 2)
signal_matrix = evoked._data.reshape([len(evoked.ch_names),
n_trials, -1]).transpose(1, 0, 2)
if mu is None:
mu = np.linalg.norm(signal_matrix, 'fro', axis=(1, 2))
mu /= (snr * np.sqrt(len(evoked.ch_names) *
(len(times) / n_trials)))
if noise_type == 'brownian':
noise_matrix = np.cumsum(mu[:, np.newaxis,
np.newaxis] * noise_matrix,
axis=1)
signal_matrix += noise_matrix
else:
signal_matrix += (mu[:, np.newaxis, np.newaxis] * noise_matrix)
evoked.data = signal_matrix.transpose(1, 0, 2).reshape(
[len(evoked.ch_names), int(n_trials * (len(times) / n_trials))])
# evoked.data *= 1e-11
if filtering is not None:
# filter all the data again
evoked.filter(filtering["hp"], filtering["lp"],
fir_design=filtering["fir_design"],
l_trans_bandwidth=filtering["hp_tw"],
h_trans_bandwidth=filtering["lp_tw"],
verbose=False)
# take care of trials:
if return_matrix is True:
epochs = evoked._data
epochs = epochs.reshape([len(evoked.ch_names),
n_trials, -1]).transpose(1, 0, 2)
evoked.crop(0., evoked.times[int((times.shape[0] / n_trials) - 1)])
evoked._data[:, :] = epochs.mean(axis=0)
return evoked, stc, epochs, mu
else:
return evoked, stc, mu
|
45a7fe74c4f84c96cdbf0aa09059778180064460
| 25,517 |
import requests
def token_request():
"""
Request a Access Token from Vipps.
:return: A Access Token
"""
headers = config['token_request']
url = base_url + '/accesstoken/get'
response = requests.post(url, headers=headers)
return response.json()
|
3363179cf526422c53a0eafc8c353ba3f7f29e9f
| 25,518 |
from apex import amp
def train(args, train_dataset, model, tokenizer, labels, pad_token_label_id):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps,
num_training_steps=t_total)
if args.fp16:
try:
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (
torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
model.train()
cuda_indices = [0, 1, 2, 3, 6, 7]
batch = tuple(t.to(args.device) if i in cuda_indices else t for i, t in enumerate(batch))
inputs = {"input_ids": batch[0],
"attention_mask": batch[1],
"token_type_ids": batch[2],
"span_labels": batch[3],
"span_size": batch[4],
"span_list": batch[5],
"slot_labels": batch[6],
"slot_mask": batch[7],
"rel_size": batch[8],
"rel_list": batch[9],
"question_length": batch[10],
"span_null_label_id": labels[0].index('O'),
"global_step": global_step,
"args": args}
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc)
# span_logits = outputs[1][0]
# span_pred = [torch.max(sl, 2)[1] for sl in span_logits].detach().cpu().numpy()
# print(span_pred.shape)
# exit()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
scheduler.step() # Update learning rate schedule
optimizer.step()
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
results, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="test",filename= os.path.join(args.data_dir, "{}.jsonl".format("test")))
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model,
"module") else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
|
9d475baa8865f932dd09265d7269eb58f3f31dc2
| 25,519 |
def extract_tunneled_layer(tunnel_packet: scapy.layers.l2.Ether, offset: int, protocol: str):
"""
Extract tunneled layer from packet capture.
Args:
tunnel_packet (scapy.layers.l2.Ether): the PDU to extract from
offset (int): the byte offset of the tunneled protocol in data field of 'packet')
protocol (str): the tunneled protocol to search for
Returns:
extracted_packet (scapy.layers.l2.Ether):
"""
data = tunnel_packet[Raw].load[offset:]
extracted_packet = Ether(dst=tunnel_packet[Ether].dst, src=tunnel_packet[Ether].src, type=tunnel_packet[Ether].type) / IP(data)
return extracted_packet
|
69596ba7cc5c9db41a2622aa68be1cad89855eb0
| 25,520 |
def draw_bbox(img, detections, cmap, random_color=True, figsize=(10, 10), show_text=True):
"""
Draw bounding boxes on the img.
:param img: BGR img.
:param detections: pandas DataFrame containing detections
:param random_color: assign random color for each objects
:param cmap: object colormap
:param plot_img: if plot img with bboxes
:return: None
"""
img = np.array(img)
scale = max(img.shape[0:2]) / 416
line_width = int(2 * scale)
for _, row in detections.iterrows():
x1, y1, x2, y2, cls, score, w, h = row.values
color = list(np.random.random(size=3) * 255) if random_color else cmap[cls]
cv2.rectangle(img, (x1, y1), (x2, y2), color, line_width)
if show_text:
text = f'{cls} {score:.2f}'
font = cv2.FONT_HERSHEY_DUPLEX
font_scale = max(0.3 * scale, 0.3)
thickness = max(int(1 * scale), 1)
(text_width, text_height) = cv2.getTextSize(text, font, fontScale=font_scale, thickness=thickness)[0]
cv2.rectangle(img, (x1 - line_width//2, y1 - text_height), (x1 + text_width, y1), color, cv2.FILLED)
cv2.putText(img, text, (x1, y1), font, font_scale, (255, 255, 255), thickness, cv2.LINE_AA)
plt.figure(figsize=figsize)
plt.imshow(img)
plt.show()
return img
|
f88bb4267d9d389dce589ee26058f4ad1e0fb096
| 25,521 |
def print_total_eval_info(data_span_type2model_str2epoch_res_list,
metric_type='micro',
span_type='pred_span',
model_strs=('DCFEE-O', 'DCFEE-M', 'GreedyDec', 'Doc2EDAG'),
target_set='test'):
"""Print the final performance by selecting the best epoch on dev set and emitting performance on test set"""
dev_type = 'dev'
test_type = 'test'
avg_type2prf1_keys = {
'macro': ('MacroPrecision', 'MacroRecall', 'MacroF1'),
'micro': ('MicroPrecision', 'MicroRecall', 'MicroF1'),
}
name_key = 'EventType'
p_key, r_key, f_key = avg_type2prf1_keys[metric_type]
def get_avg_event_score(epoch_res):
eval_res = epoch_res[1]
avg_event_score = eval_res[-1][f_key]
return avg_event_score
dev_model_str2epoch_res_list = data_span_type2model_str2epoch_res_list[(dev_type, span_type)]
test_model_str2epoch_res_list = data_span_type2model_str2epoch_res_list[(test_type, span_type)]
has_header = False
mstr_bepoch_list = []
print('=' * 15, 'Final Performance (%) (avg_type={})'.format(metric_type), '=' * 15)
for model_str in model_strs:
if model_str not in dev_model_str2epoch_res_list or model_str not in test_model_str2epoch_res_list:
continue
# get the best epoch on dev set
dev_epoch_res_list = dev_model_str2epoch_res_list[model_str]
best_dev_epoch, best_dev_res = max(dev_epoch_res_list, key=get_avg_event_score)
test_epoch_res_list = test_model_str2epoch_res_list[model_str]
best_test_epoch = None
best_test_res = None
for test_epoch, test_res in test_epoch_res_list:
if test_epoch == best_dev_epoch:
best_test_epoch = test_epoch
best_test_res = test_res
assert best_test_epoch is not None
mstr_bepoch_list.append((model_str, best_test_epoch))
if target_set == 'test':
target_eval_res = best_test_res
else:
target_eval_res = best_dev_res
align_temp = '{:20}'
head_str = align_temp.format('ModelType')
eval_str = align_temp.format(model_str)
head_temp = ' \t {}'
eval_temp = ' \t & {:.1f} & {:.1f} & {:.1f}'
ps = []
rs = []
fs = []
for tgt_event_res in target_eval_res[:-1]:
head_str += align_temp.format(head_temp.format(tgt_event_res[0][name_key]))
p, r, f1 = (100 * tgt_event_res[0][key] for key in [p_key, r_key, f_key])
eval_str += align_temp.format(eval_temp.format(p, r, f1))
ps.append(p)
rs.append(r)
fs.append(f1)
head_str += align_temp.format(head_temp.format('Average'))
ap, ar, af1 = (x for x in [np.mean(ps), np.mean(rs), np.mean(fs)])
eval_str += align_temp.format(eval_temp.format(ap, ar, af1))
head_str += align_temp.format(head_temp.format('Total ({})'.format(metric_type)))
g_avg_res = target_eval_res[-1]
ap, ar, af1 = (100 * g_avg_res[key] for key in [p_key, r_key, f_key])
eval_str += align_temp.format(eval_temp.format(ap, ar, af1))
if not has_header:
print(head_str)
has_header = True
print(eval_str)
return mstr_bepoch_list
|
e5b754facbf0d203cb143514e143844170400280
| 25,522 |
def build_sentence_representation(s):
""" Build representation of a sentence by analyzing predpatt output.
Returns a weighted list of lists of terms.
"""
s = merge_citation_token_lists(s)
s = remove_qutation_marks(s)
lemmatizer = WordNetLemmatizer()
raw_lists = []
rep_lists = []
rep_lists_alt = [] # to be consistent with double annotating for 3 and 3.1
try:
pp = PredPatt.from_sentence(s, cacheable=False) # for speed tests
except Exception as e:
print('= = = PredPatt exception = = =')
print('input:\n{}'.format(s))
print('exception:\n{}'.format(e))
return rep_lists, rep_lists_alt
if len(pp.events) == 0:
return rep_lists, rep_lists_alt
if CIT_BASED:
for e in pp.events:
depth, rep = build_tree_representation(e)
if INCLUDE_PREDICATE:
pred = get_predicate(e.root)
rep = ['{}:{}'.format(pred, r) for r in rep]
if len(rep) > 0:
raw_lists.append([depth, rep])
weight = 1
for rl in sorted(raw_lists, key=itemgetter(0)):
rep_lists.append([weight, rl[1]])
weight *= .5
if len(rep_lists) == 0:
fallback = build_noun_representation(
pp.events[0], global_root=True
)
if INCLUDE_PREDICATE:
pred = get_predicate(pp.events[0].root)
fallback = ['{}:{}'.format(pred, f) for f in fallback]
if len(fallback) > 0:
rep_lists = [[.25, fallback]]
else:
# make a PPv3 and a PPv3.1 representation
# - - - 3.1 - - -
reps = []
for e in pp.events:
rep = build_noun_representation(e) # 3.1
if INCLUDE_PREDICATE:
pred = get_predicate(e.root)
rep = ['{}:{}'.format(pred, f) for f in rep]
reps.extend(rep)
if len(reps) > 0:
rep_lists = [[1, reps]]
# - - - 3 - - -
reps_alt = []
for e in pp.events:
rep = build_noun_representation(e, global_root=True) # 3
if INCLUDE_PREDICATE:
pred = get_predicate(e.root)
rep = ['{}:{}'.format(pred, f) for f in rep]
reps_alt.extend(rep)
if len(reps) > 0:
rep_lists_alt = [[1, reps_alt]]
rep_lists = normalize_rep_lists(rep_lists, lemmatizer)
rep_lists_alt = normalize_rep_lists(rep_lists_alt, lemmatizer)
return rep_lists, rep_lists_alt
|
dd070aef016cc034a79412528aabc951605aa83c
| 25,523 |
def create_glucose_previous_day_groups(day_groups: dict) -> dict:
"""
Create a dictionary of glucose subseries, unique to each day in the parent glucose series.
Subseries data of each dictionary item will lag item key (date) by 1 day.
Keys will be (unique dates in the parent series) + 1 day.
Values will be the subseries with timestamp dates matching 1 day prior to the key.
Args:
day_groups: A dictionary of daily glucose series.
Keys individual dates with glucose data.
Values will be the glucose subseries with timestamp dates matching the key.
Returns: The dictionary of glucose subsamples.
Keys will be (unique dates in the parent series) + 1 day.
Values will be the subseries with timestamp dates matching 1 day prior to the key.
"""
previous_day_groups = {}
for previous_day, previous_glucose in day_groups.items():
today = previous_day + pd.Timedelta('1D')
previous_day_groups[today] = previous_glucose
return previous_day_groups
|
6b5373b25ab286291cc351bc115c016c83ea660b
| 25,525 |
def mean_abs_scaling(series: pd.Series, minimum_scale=1e-6):
"""Scales a Series by the mean of its absolute value. Returns the scaled Series
and the scale itself.
"""
scale = max(minimum_scale, series.abs().mean())
return series / scale, scale
|
00f397993a3c51761ef634371d6e26885602e340
| 25,526 |
def count_total_parameters():
"""
Returns total number of trainable parameters in the current tf graph.
https://stackoverflow.com/a/38161314/1645784
"""
total_parameters = 0
for variable in tf.trainable_variables():
# shape is an array of tf.Dimension
shape = variable.get_shape()
variable_parameters = 1
for dim in shape:
variable_parameters *= dim.value
total_parameters += variable_parameters
return total_parameters
|
8ee1b116ac3338158c7a43acc570776940bb7e0f
| 25,528 |
def create_gradient_rms_plot(sticher_dict: dict[str, GDEFSticher], cutoff_percent=8, moving_average_n=1,
x_offset=0, plotter_style: PlotterStyle = None) -> Figure:
"""
Creates a matplotlib figure, showing a graph of the root meean square of the gradient of the GDEFSticher objects in
data_dict. The key value in data_dict is used as label in the legend.
:param sticher_dict:
:param cutoff_percent:
:param moving_average_n:
:param x_offset:
:param plotter_style:
:return:
"""
if plotter_style is None:
plotter_style = PlotterStyle(300, (8, 4))
y_label = f"roughness(gradient) (moving average n = {moving_average_n})"
plotter_style.set(y_label=y_label)
data_list = []
pixel_width_list = []
label_list = []
for key, sticher in sticher_dict.items():
gradient_data = create_absolute_gradient_array(sticher.values, cutoff_percent / 100.0)
data_list.append(gradient_data)
pixel_width_list.append(sticher.pixel_width)
label_list.append(key)
result = create_rms_plot(data_list, pixel_width=pixel_width_list, label_list=label_list,
moving_average_n=moving_average_n, x_offset=x_offset,
plotter_style=plotter_style)
return result
|
e628250d2c1d4548e6b52d48a8313ffa1b5131fe
| 25,529 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.