content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def add_port_fwd(
zone, src, dest, proto="tcp", dstaddr="", permanent=True, force_masquerade=False
):
"""
Add port forwarding.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' firewalld.add_port_fwd public 80 443 tcp
force_masquerade
when a zone is created ensure masquerade is also enabled
on that zone.
"""
if force_masquerade and not get_masquerade(zone):
add_masquerade(zone)
cmd = "--zone={0} --add-forward-port=port={1}:proto={2}:toport={3}:toaddr={4}".format(
zone, src, proto, dest, dstaddr
)
if permanent:
cmd += " --permanent"
return __firewall_cmd(cmd)
|
15c3cc5cbfb3e2921232df0508ea18d727d7861c
| 25,288 |
def reduce_puzzle(values):
"""Reduce a Sudoku puzzle by repeatedly applying all constraint strategies
Parameters
----------
values(dict)
a dictionary of the form {'box_name': '123456789', ...}
Returns
-------
dict or False
The values dictionary after continued application of the constraint strategies
no longer produces any changes, or False if the puzzle is unsolvable
"""
stalled = False
while not stalled:
solved_values_before = len([box for box in values.keys() if len(values[box]) == 1])
values = eliminate(values)
values = only_choice(values)
values = naked_twins(values)
solved_values_after = len([box for box in values.keys() if len(values[box]) == 1])
stalled = solved_values_before == solved_values_after
if len([box for box in values.keys() if len(values[box]) == 0]):
return False
return values
|
0a952caf700216e67c7dd81958dd62d7734bb0fe
| 25,289 |
import struct
def read_int(handle):
"""
Helper function to parse int from file handle
Args:
handle (file): File handle
Returns:
numpy.int32
"""
return struct.unpack("<i", handle.read(4))[0]
|
cd175251fed79c8d79ea4a73d713457c06cbda6b
| 25,290 |
def prem_to_av(t):
"""Premium portion put in account value
The amount of premiums net of loadings, which is put in the accoutn value.
.. seealso::
* :func:`load_prem_rate`
* :func:`premium_pp`
* :func:`pols_if_at`
"""
return prem_to_av_pp(t) * pols_if_at(t, "BEF_DECR")
|
7d72e7e2e0b10ffb3958383817ddb7cc5f72a06a
| 25,291 |
from scipy.spatial import cKDTree
def remove_close(points, radius):
"""
Given an nxd set of points where d=2or3 return a list of points where no point is closer than radius
:param points: a nxd list of points
:param radius:
:return:
author: revised by weiwei
date: 20201202
"""
tree = cKDTree(points)
# get the index of every pair of points closer than our radius
pairs = tree.query_pairs(radius, output_type='ndarray')
# how often each vertex index appears in a pair
# this is essentially a cheaply computed "vertex degree"
# in the graph that we could construct for connected points
count = np.bincount(pairs.ravel(), minlength=len(points))
# for every pair we know we have to remove one of them
# which of the two options we pick can have a large impact
# on how much over-culling we end up doing
column = count[pairs].argmax(axis=1)
# take the value in each row with the highest degree
# there is probably better numpy slicing you could do here
highest = pairs.ravel()[column + 2 * np.arange(len(column))]
# mask the vertices by index
mask = np.ones(len(points), dtype=np.bool)
mask[highest] = False
if tol.strict:
# verify we actually did what we said we'd do
test = cKDTree(points[mask])
assert len(test.query_pairs(radius)) == 0
return points[mask], mask
|
42f8727488018e7f27802e81dbecfd300b38f45a
| 25,292 |
import torch
def pose_mof2mat_v1(mof, rotation_mode='euler'):
"""
### Out-of-Memory Issue ###
Convert 6DoF parameters to transformation matrix.
Args:
mof: 6DoF parameters in the order of tx, ty, tz, rx, ry, rz -- [B, 6, H, W]
Returns:
A transformation matrix -- [B, 3, 4, H, W]
"""
bs, _, hh, ww = mof.size()
mof = mof.permute(0,2,3,1).reshape(-1,6) # [B*N, 6]
translation = mof[:,:3].unsqueeze(-1) # [B*N, 3, 1]
rot = mof[:,3:] # [B*N, 3]
if rotation_mode == 'euler':
rot_mat = euler2mat(rot) # [B*N, 3, 3]
elif rotation_mode == 'quat':
rot_mat = quat2mat(rot) # [B*N, 3, 3]
transform_mat = torch.cat([rot_mat, translation], dim=2) # [B*N, 3, 4]
transform_mat = transform_mat.reshape(bs, hh, ww, 3, 4).permute(0,3,4,1,2) # [B, 3, 4, H, W]
# pdb.set_trace()
return transform_mat
|
78d42b36e64c0b6ba0eab46b6c19a96d44ed29fb
| 25,293 |
def update(isamAppliance, local, remote_address, remote_port, remote_facility, check_mode=False, force=False):
"""
Updates logging configuration
"""
json_data = {
"local": local,
"remote_address": remote_address,
"remote_port": remote_port,
"remote_facility": remote_facility
}
change_required, warnings = _check(isamAppliance, json_data)
if force is True or change_required is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
return isamAppliance.invoke_put("Updating logging configuration attributes", module_uri, json_data,
requires_modules=requires_modules,
requires_version=requires_versions,
requires_model=requires_model)
else:
return isamAppliance.create_return_object(warnings=warnings)
|
ed8af5d9d1b2f59726622ca6ebdfb4a20b88982f
| 25,295 |
from typing import Tuple
def get_model_and_tokenizer(
model_name_or_path: str,
tokenizer_name_or_path: str,
auto_model_type: _BaseAutoModelClass,
max_length: int = constants.DEFAULT_MAX_LENGTH,
auto_model_config: AutoConfig = None,
) -> Tuple[AutoModelForSequenceClassification, AutoTokenizer]:
"""Get transformer model and tokenizer
Args:
model_name_or_path (str): model name
tokenizer_name_or_path (str): tokenizer name
auto_model_type (_BaseAutoModelClass): auto model object such as AutoModelForSequenceClassification
max_length (int): max length of text
auto_model_config (AutoConfig): AutoConfig object
Returns:
Tuple[AutoModelForSequenceClassification, AutoTokenizer]: model and tokenizer
"""
logger.info(f"Loading model: {model_name_or_path}")
if auto_model_config:
model = auto_model_type.from_pretrained(
model_name_or_path, config=auto_model_config
)
else:
model = auto_model_type.from_pretrained(model_name_or_path)
tokenizer = AutoTokenizer.from_pretrained(
tokenizer_name_or_path, max_length=max_length
)
return model, tokenizer
|
b65cc9cb6e32c65b4d91becb51358146651fddb8
| 25,296 |
def get_forecast_by_coordinates(
x: float,
y: float,
language: str = "en"
) -> str:
"""
Get the weather forecast for the site closest to the coordinates (x, y).
Uses the scipy kd-tree nearest-neighbor algorithm to find the closest
site.
Parameters
----------
x : float
Longitude of the query point.
y : float
Latitude of the query point.
language: str
The language to retrieve the forecast in. Allowed values: "en", "fr".
Returns
-------
str
The XML weather forecast.
"""
nearest_site = get_weather_site_by_coordinates(x, y)
site_code = nearest_site['properties']['Codes']
province_code = nearest_site['properties']['Province Codes']
forecast = get_forecast_by_site_code(
site_code=site_code,
province_code=province_code,
language=language
)
return forecast
|
061007f152328929f2ed1c70f5a8b1401f3268f7
| 25,297 |
def get_calculated_energies(stem, data=None):
"""Return the energies from the calculation"""
if data is None:
data = {}
stem = stem.find('calculation')
for key, path in VASP_CALCULATED_ENERGIES.items():
text = get_text_from_section(stem, path, key)
data[key] = float(text.split()[0])
return data
|
d2c0fcf9023874e6890b34e6950dc81e8d2073ce
| 25,298 |
def reshape(spectra):
"""Rearrange a compressed 1d array of spherical harmonics to 2d
Args:
spectra (np.ndarray): 1 dimensional storage of 2d spherical modes
Returns:
np.ndarray:
2-dimensional array of the reshaped input with zonal and meridional
wavenumber coordinates
"""
# Account for complex inputs as two dimensions
if spectra.ndim == 2:
spectra = spectra[:, 0] + spectra[:, 1]*1j
if spectra.ndim != 1:
raise ValueError('Spectra must be a 1-dimensional array')
# Deduce truncation from shape
trunc = find_trunc(len(spectra))
# Zeros for output
spectra_2d = np.zeros((trunc, trunc))
idx0 = 0
idx1 = trunc
for i in range(trunc):
spectra_2d[i, i:trunc] = spectra[idx0:idx1]
idx0 += trunc - i
idx1 += trunc - i - 1
return spectra_2d
|
af6bf177a860dd8f7d37a9d3586ab54f76e05a1f
| 25,300 |
def dict_factory(cursor, row):
"""
This method is used to convert tuple type to dict after execute SQL queries in python.
:param cursor:
:param row:
:return:
"""
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
|
4f44cb368cff38db313e476a9b3a3bcabcca3bb3
| 25,301 |
import torch
def predict_batch(model, x_batch, dynamics, fast_init):
"""
Compute the softmax prediction probabilities for a given data batch.
Args:
model: EnergyBasedModel
x_batch: Batch of input tensors
dynamics: Dictionary containing the keyword arguments
for the relaxation dynamics on u
fast_init: Boolean to specify if fast feedforward initilization
is used for the prediction
Returns:
Softmax classification probabilities for the given data batch
"""
# Initialize the neural state variables
model.reset_state()
# Clamp the input to the test sample, and remove nudging from ouput
model.clamp_layer(0, x_batch.view(-1, model.dimensions[0]))
model.set_C_target(None)
# Generate the prediction
if fast_init:
model.fast_init()
else:
model.u_relax(**dynamics)
return torch.nn.functional.softmax(model.u[-1].detach(), dim=1)
|
61102cfa3bcb3e7d52e9f3eca8c97db4d726c1a7
| 25,302 |
def render_openapi(api, request):
"""Prepare openapi specs."""
# Setup Specs
options = dict(api.openapi_options)
options.setdefault('servers', [{
'url': str(request.url.with_query('').with_path(api.prefix))
}])
spec = APISpec(
options['info'].pop('title', f"{ api.app.cfg.name.title() } API"),
options['info'].pop('version', '1.0.0'),
options.pop('openapi_version', '3.0.0'),
**options, plugins=[MarshmallowPlugin()])
spec.tags = {}
# Setup Authorization
if api.authorize:
_, _, schema = parse_docs(api.authorize)
spec.options['security'] = []
for key, value in schema.items():
spec.components.security_scheme(key, value)
spec.options['security'].append({key: []})
# Setup Paths
routes = api.router.routes()
for route in routes:
if route.path in SKIP_PATH:
continue
spec.path(route.path, **route_to_spec(route, spec))
return spec.to_dict()
|
96e49080a6c66f05210676c71a6e169f4caeca95
| 25,303 |
def apply_transformations(initial_representation: list, events: list) -> float:
"""Apply the transformations in the events list to the initial representation"""
scale = 1
rot_angle = 0
trans_vector = [0, 0]
for item in events:
for event in item["events"]:
if event["type"] == "TRANSLATION":
trans_vector[X_COORDINATE] += event["trigger"]["transformation"][X_COORDINATE]
trans_vector[Y_COORDINATE] += event["trigger"]["transformation"][Y_COORDINATE]
elif event["type"] == "ROTATION":
rot_angle += event["trigger"]["transformation"]
elif event["type"] == "UNIFORM_SCALE":
scale *= event["trigger"]["transformation"]
# Apply multiplication
polygon = geometry.Polygon(initial_representation)
s_polygon = affinity.scale(polygon,
xfact=scale,
yfact=scale,
origin=(0, 0))
r_s_polygon = affinity.rotate(s_polygon,
rot_angle,
origin=(0, 0))
t_r_s_polygon = affinity.translate(r_s_polygon,
xoff=trans_vector[0],
yoff=trans_vector[1])
return polygon_to_vertices_list(t_r_s_polygon)
|
a96be8bafe3b3cde3411f9c8efbf95629319b3cb
| 25,304 |
def affine_transform(transform, points):
"""
Transforms a set of N x 2 points using the given Affine object.
"""
reshaped_points = np.vstack([points.T, np.ones((1, points.shape[0]))])
transformed = np.dot(affine_to_matrix(transform), reshaped_points)
return transformed.T[:,:2]
|
b976466d688003c7822f363949168d4acb6addb0
| 25,305 |
from datetime import datetime
def get_current_time_in_millisecs():
"""Returns time in milliseconds since the Epoch."""
return get_time_in_millisecs(datetime.datetime.utcnow())
|
17348e6c4994b3da0669be0ab39a7e8b01fd0e1c
| 25,306 |
import math
def get_test_paths(paths, snaps):
"""
Return $snaps paths to be tested on GLUE
"""
if snaps == -1:
return paths
interval = len(paths) * 1. / snaps
test_paths = []
for i in range(1, snaps+1):
idx = int(math.ceil(interval * i)) - 1
test_paths.append(paths[idx])
return test_paths
|
a2ac1f89740e85b6322e553559850c0e686a28c8
| 25,308 |
def is_success(code):
"""Return that the client's request was successfully received, understood, and accepted."""
return 200 <= code <= 299
|
8a6e64c0f218ca5a866a444c730e1ebf7628727e
| 25,309 |
def is_palindrome_v3(s):
""" (str) -> bool
Return True if and only if s is a palindrome.
>>> is_palindrome_v3('noon')
True
>>> is_palindrome_v3('racecar')
True
>>> is_palindrome_v3('dented')
False
>>> is_palindrome_v3('')
True
>>> is_palindrome_v3(' ')
True
"""
j = len(s) - 1
for i in range(len(s) // 2):
if s[i] != s[j - i]:
return False
return True
|
70f3393e39b30198879dbc5856c0c73b4be9601d
| 25,310 |
from typing import BinaryIO
def read(fd: BinaryIO) -> Entity:
"""Read mug scene from `fd` file object.
Args:
fd: File object to read from.
Returns:
Root entity.
"""
if fd.read(4) != b'MUGS':
raise ValueError("not a valid mug file format")
return read_recursive(fd)
|
2cb0b695ec1d75987940e862226166be22bad91d
| 25,311 |
def transform(x,y):
"""
This function takes an input vector of x values and y values, transforms them
to return the y in a linearized format (assuming nlogn function was used
to create y from x)
"""
final = []
for i in range(0, len(y)):
new = y[i]#/x[i]
final.append(2 ** new)
return final
|
119db625f5ebf469794bf3bdacd20a1c70ccd133
| 25,312 |
def _GRIsAreEnabled():
"""Returns True if GRIs are enabled."""
return (properties.VALUES.core.enable_gri.GetBool() or
properties.VALUES.core.resource_completion_style.Get() == 'gri')
|
55713933090509944dc42333bb478c11db9a4176
| 25,313 |
def fatorial(n, show=False):
"""
-> Calcula o Fatorial de um número
:param n: O número a ser calculado.
:param show: (opcional) Mostrar ou não a conta.
:return: O valor do Fatorial de um número n.
"""
contador = n
resultado = guardado = 0
print('-' * 35)
while contador >= 0:
guardado = n * (n - 1)
resultado += guardado
contador -= 1
if show:
for contando in range(n, 0, -1):
print(contando, end='')
if contando > 1:
print(' x ', end='')
print(' = ', end='')
return resultado
|
ddb4d91b3813e270e746bb0d463a5da595366b86
| 25,314 |
def start_job(job, hal_id, refGenome, opts):
"""Set up the structure of the pipeline."""
hal = hal_id
# Newick representation of the HAL species tree.
newick_string = get_hal_tree(hal)
job.fileStore.logToMaster("Newick string: %s" % (newick_string))
tree = newick.loads(newick_string)[0]
rerooted = reroot_tree(tree, refGenome)
job.fileStore.logToMaster("Rerooted newick string: %s" % (newick.dumps([rerooted])))
if opts.targetGenomes is not None:
# We don't need the alignment to all genomes, just a subset.
prune_tree(rerooted, opts.targetGenomes)
job.fileStore.logToMaster("Pruned newick string: %s" % newick.dumps(rerooted))
def setup_jobs(node):
"""Recursively set up jobs for this node and its children."""
prev_data = [setup_jobs(child) for child in node.descendants]
# At this point all of the jobs for the lower parts of the tree have been set up.
lifted_data = [prev_lifted for _, prev_lifted in prev_data]
merge_job = job.wrapJobFn(merge_blocks_job, node.name, [n.name for n in node.descendants], lifted_data, hal_id, opts)
for prev_job, _ in prev_data:
prev_job.addFollowOn(merge_job)
if node.is_leaf:
job.addChild(merge_job)
if node.ancestor is None:
return merge_job.rv()
else:
# Find out whether we have to lift up or down
original_node = find_node_by_name(tree, node.name)
if original_node.ancestor is None or node.ancestor.name != original_node.ancestor.name:
lift_down_job = merge_job.addFollowOnJobFn(lift_job, 'down', node.name, node.ancestor.name, merge_job.rv(), hal_id, opts)
return lift_down_job, lift_down_job.rv()
else:
lift_up_job = merge_job.addFollowOnJobFn(lift_job, 'up', node.name, node.ancestor.name, merge_job.rv(), hal_id, opts)
return lift_up_job, lift_up_job.rv()
blocks_on_ref = setup_jobs(rerooted)
all_genomes = [node.name for node in tree.walk()]
return job.addFollowOnJobFn(maf_export_job, hal, all_genomes, blocks_on_ref, opts).rv()
|
5ddade182d045d7a6384d7e6c8d133650dafebbd
| 25,315 |
def cleanup_databases():
"""
Returns:
bool: admin_client fixture should ignore any existing databases at
start of test and clean them up.
"""
return False
|
63fa94389609b8e28779d1e9e55e9b1ecde502b6
| 25,318 |
def spot_centroid(regions):
"""Returns centroids for a list of regionprops.
Args:
regions (regionprops): List of region proposals (skimage.measure).
Returns:
list: Centroids of regionprops.
"""
return [r.centroid for r in regions]
|
f53f403dddf0af123afd207e33cc06254a0f2538
| 25,321 |
def one_election_set_reg(request, election):
"""
Set whether this is open registration or not
"""
# only allow this for public elections
if not election.private_p:
open_p = bool(int(request.GET['open_p']))
election.openreg = open_p
election.save()
return HttpResponseRedirect(settings.SECURE_URL_HOST + reverse(voters_list_pretty, args=[election.uuid]))
|
5486c8b277528204260189f1f248aadc20cd9354
| 25,323 |
def overrides(conf, var):
"""This api overrides the dictionary which contains same keys"""
if isinstance(var, list):
for item in var:
if item in conf:
for key, value in conf[item].items():
conf[key] = value
elif var in conf:
for key, value in conf[var].items():
conf[key] = value
return conf
|
18375dc43a0d684feaf9089756ecb45eb5a366f3
| 25,324 |
def annual_to_daily_rate(rate, trading_days_in_year=TRADING_DAYS_IN_YEAR):
"""
Infer daily rate from annual rate
:param rate: the annual rate of return
:param trading_days_in_year: optional, trading days in year (default = 252)
:return: the daily rate
"""
return subdivide_rate(rate, trading_days_in_year)
|
a93ce4e3b0ba247f37b5a867025670a7f064022e
| 25,325 |
def make_hashable(data):
"""Make the given object hashable.
It makes it ready to use in a `hash()` call, making sure that
it's always the same for lists and dictionaries if they have the same items.
:param object data: the object to hash
:return: a hashable object
:rtype: object
"""
if isinstance(data, (list, tuple)):
return tuple((make_hashable(item) for item in data))
elif isinstance(data, dict):
return tuple(
(key, make_hashable(value))
for key, value in sorted(data.items())
)
else:
return data
|
e4b88978ddee6d4dfc354845184a0e80b1f434bf
| 25,326 |
from typing import Dict
from typing import Tuple
def prepare_model_parameters(
parameters: Dict[str, FloatOrDistVar], data: DataFrame,
beta_fun, splines, spline_power
) -> Tuple[Dict[str, FloatLike], Dict[str, NormalDistVar]]:
"""Prepares model input parameters and returns independent and dependent parameters
Also shifts back simulation to start with only exposed people.
"""
# Set up fit parameters
## Dependent parameters which will be fitted
pp = {key: val for key, val in parameters.items() if isinstance(val, GVar)}
## Independent model meta parameters
xx = {key: val for key, val in parameters.items() if key not in pp}
# This part ensures that the simulation starts with only exposed persons
## E.g., we shift the simulation backwards such that exposed people start to
## become infected
xx["offset"] = int(
expon.ppf(0.99, 1 / pp["incubation_days"].mean)
) # Enough time for 95% of exposed to become infected
# pp["logistic_x0"] += xx["offset"]
xx['beta_fun'] = beta_fun
xx['knots'] = splines
xx['spline_power'] = spline_power
## Store the actual first day and the actual last day
xx["day0"] = data.index.min()
xx["day-1"] = data.index.max()
## And start earlier in time
xx["dates"] = date_range(
xx["day0"] - timedelta(xx["offset"]), freq="D", periods=xx["offset"]
).union(data.index)
# initialize the spline parameters on the flexible beta
if xx['beta_fun'] == "flexible_beta":
pp['beta_splines'] = gvar([pp['pen_beta'].mean for i in range(len(xx['knots']))],
[pp['pen_beta'].sdev for i in range(len(xx['knots']))])
pp.pop("pen_beta")
pp.pop('logistic_k')
pp.pop('logistic_x0')
pp.pop('logistic_L')
## Thus, all compartment but exposed and susceptible are 0
for key in ["infected", "recovered", "icu", "vent", "hospital"]:
xx[f"initial_{key}"] = 0
pp["initial_exposed"] = (
xx["n_hosp"] / xx["market_share"] / pp["hospital_probability"]
)
xx["initial_susceptible"] -= pp["initial_exposed"].mean
return xx, pp
|
c68692148115aa8aa5b9a0b600d6b335cd7ed99f
| 25,327 |
import logging
def paper_features_to_author_features(
author_paper_index, paper_features):
"""Averages paper features to authors."""
assert paper_features.shape[0] == NUM_PAPERS
assert author_paper_index.shape[0] == NUM_AUTHORS
author_features = np.zeros(
[NUM_AUTHORS, paper_features.shape[1]], dtype=paper_features.dtype)
for author_i in range(NUM_AUTHORS):
paper_indices = author_paper_index[author_i].indices
author_features[author_i] = paper_features[paper_indices].mean(
axis=0, dtype=np.float32)
if author_i % 10000 == 0:
logging.info("%d/%d", author_i, NUM_AUTHORS)
return author_features
|
6394c57d2b48461287cc77cca021d25c377efaae
| 25,328 |
def set_chart_time_horizon(request) -> JsonResponse:
"""
Set the x-axis (time horizon) of a chart.
API Call:
/set_chart_time_horizon?
monitor_name=<monitor name>&
value=<time horizon to set>
:param request: HTTP request that expects a 'monitor_name' and 'value' argument. 'value' represents the new time horizon to be set. Valid time horizons are: 'day', 'week', 'month', 'year', or an integer representing a number of most recent hours to display.
:return: The new value after being set or the old value if it was not set.
"""
kwargs = _parse_args(request, 'monitor_name', 'value')
rv = MonitorServiceManager().set_value(kwargs.get('monitor_name'), 'charting_time_horizon', kwargs.get('value'))
return JsonResponse(rv, safe=False)
|
94eb982090da2e761032aa5fef4f210c2ff312ee
| 25,329 |
def get_single_io_arg(info):
"""
Get single input/output arg from io info
:param info:
:return:input/output arg
"""
if 'valid' not in info:
raise ValueError("Json string Errors, key:valid not found.")
if info['valid']:
check_arg_info(info)
del info['valid']
del info['name']
if 'range' in info:
for i in range(len(info['range'])):
if info['range'][i][1] == -1:
info['range'][i][1] = None
res = info
else:
res = None
return res
|
6062f5e1ea61d6a999d106e40e72610b2373b26a
| 25,330 |
from typing import Dict
from typing import Any
def new_credentials(
client_id: str, consumer_secret: str, data: Dict[str, Any]
) -> Credentials:
"""Create Credentials from config and json."""
return Credentials(
access_token=str_or_raise(data.get("access_token")),
token_expiry=arrow.utcnow().timestamp + data.get("expires_in"),
token_type=str_or_raise(data.get("token_type")),
refresh_token=str_or_raise(data.get("refresh_token")),
userid=int_or_raise(data.get("userid")),
client_id=str_or_raise(client_id),
consumer_secret=str_or_raise(consumer_secret),
)
|
2f582de9863ed15122678d8268c5f2a9c8a0484e
| 25,331 |
def get_chains(table, ipv6=False):
""" Return the existing chains of a table """
iptc_table = _iptc_gettable(table, ipv6)
return [iptc_chain.name for iptc_chain in iptc_table.chains]
|
659119d90befb241a205f2a70eeafc273098a91a
| 25,332 |
def unscale_fundamental_matrix(fundamental_matrix, M):
"""
Unscale fundamental matrix by coordinate scaling factor
:param fundamental_matrix:
:param M: Scaling factor
:return: Unscaled fundamental matrix
"""
T = np.diag([1 / M, 1 / M, 1])
unscaled_F = T.T.dot(fundamental_matrix).dot(T)
return unscaled_F
|
968c924870a93363cf78bfcc593b5c4367375692
| 25,334 |
def initialize_server_request(request):
"""Shortcut for initialization."""
# Django converts Authorization header in HTTP_AUTHORIZATION
# Warning: it doesn't happen in tests but it's useful, do not remove!
auth_header = {}
if 'Authorization' in request.META:
auth_header = {'Authorization': request.META['Authorization']}
elif 'HTTP_AUTHORIZATION' in request.META:
auth_header = {'Authorization': request.META['HTTP_AUTHORIZATION']}
oauth_request = OAuthRequest.from_request(request.method,
request.build_absolute_uri(),
headers=auth_header,
parameters=dict(request.REQUEST.items()),
query_string=request.environ.get('QUERY_STRING', ''))
if oauth_request:
oauth_server = OAuthServer(DataStore(oauth_request))
oauth_server.add_signature_method(OAuthSignatureMethod_PLAINTEXT())
oauth_server.add_signature_method(OAuthSignatureMethod_HMAC_SHA1())
else:
oauth_server = None
return oauth_server, oauth_request
|
f844a677e48a933401dbd6028ce7faf8e58e77cd
| 25,335 |
from .sky_coordinate import SkyCoord
from typing import Sequence
def _parse_coordinate_arg(coords, frame, units, init_kwargs):
"""
Single unnamed arg supplied. This must be:
- Coordinate frame with data
- Representation
- SkyCoord
- List or tuple of:
- String which splits into two values
- Iterable with two values
- SkyCoord, frame, or representation objects.
Returns a dict mapping coordinate attribute names to values (or lists of
values)
"""
is_scalar = False # Differentiate between scalar and list input
# valid_kwargs = {} # Returned dict of lon, lat, and distance (optional)
components = {}
skycoord_kwargs = {}
frame_attr_names = list(frame.representation_component_names.keys())
repr_attr_names = list(frame.representation_component_names.values())
repr_attr_classes = list(frame.representation.attr_classes.values())
n_attr_names = len(repr_attr_names)
# Turn a single string into a list of strings for convenience
if isinstance(coords, str):
is_scalar = True
coords = [coords]
if isinstance(coords, (SkyCoord, BaseCoordinateFrame)):
# Note that during parsing of `frame` it is checked that any coordinate
# args have the same frame as explicitly supplied, so don't worry here.
if not coords.has_data:
raise ValueError('Cannot initialize from a frame without coordinate data')
data = coords.data.represent_as(frame.representation_type)
values = [] # List of values corresponding to representation attrs
repr_attr_name_to_drop = []
for repr_attr_name in repr_attr_names:
# If coords did not have an explicit distance then don't include in initializers.
if (isinstance(coords.data, UnitSphericalRepresentation) and
repr_attr_name == 'distance'):
repr_attr_name_to_drop.append(repr_attr_name)
continue
# Get the value from `data` in the eventual representation
values.append(getattr(data, repr_attr_name))
# drop the ones that were skipped because they were distances
for nametodrop in repr_attr_name_to_drop:
nameidx = repr_attr_names.index(nametodrop)
del repr_attr_names[nameidx]
del units[nameidx]
del frame_attr_names[nameidx]
del repr_attr_classes[nameidx]
if coords.data.differentials and 's' in coords.data.differentials:
orig_vel = coords.data.differentials['s']
vel = coords.data.represent_as(frame.representation, frame.get_representation_cls('s')).differentials['s']
for frname, reprname in frame.get_representation_component_names('s').items():
if (reprname == 'd_distance' and not hasattr(orig_vel, reprname) and
'unit' in orig_vel.get_name()):
continue
values.append(getattr(vel, reprname))
units.append(None)
frame_attr_names.append(frname)
repr_attr_names.append(reprname)
repr_attr_classes.append(vel.attr_classes[reprname])
for attr in frame_transform_graph.frame_attributes:
value = getattr(coords, attr, None)
use_value = (isinstance(coords, SkyCoord)
or attr not in coords._attr_names_with_defaults)
if use_value and value is not None:
skycoord_kwargs[attr] = value
elif isinstance(coords, BaseRepresentation):
if coords.differentials and 's' in coords.differentials:
diffs = frame.get_representation_cls('s')
data = coords.represent_as(frame.representation_type, diffs)
values = [getattr(data, repr_attr_name) for repr_attr_name in repr_attr_names]
for frname, reprname in frame.get_representation_component_names('s').items():
values.append(getattr(data.differentials['s'], reprname))
units.append(None)
frame_attr_names.append(frname)
repr_attr_names.append(reprname)
repr_attr_classes.append(data.differentials['s'].attr_classes[reprname])
else:
data = coords.represent_as(frame.representation)
values = [getattr(data, repr_attr_name) for repr_attr_name in repr_attr_names]
elif (isinstance(coords, np.ndarray) and coords.dtype.kind in 'if'
and coords.ndim == 2 and coords.shape[1] <= 3):
# 2-d array of coordinate values. Handle specially for efficiency.
values = coords.transpose() # Iterates over repr attrs
elif isinstance(coords, (Sequence, np.ndarray)):
# Handles list-like input.
vals = []
is_ra_dec_representation = ('ra' in frame.representation_component_names and
'dec' in frame.representation_component_names)
coord_types = (SkyCoord, BaseCoordinateFrame, BaseRepresentation)
if any(isinstance(coord, coord_types) for coord in coords):
# this parsing path is used when there are coordinate-like objects
# in the list - instead of creating lists of values, we create
# SkyCoords from the list elements and then combine them.
scs = [SkyCoord(coord, **init_kwargs) for coord in coords]
# Check that all frames are equivalent
for sc in scs[1:]:
if not sc.is_equivalent_frame(scs[0]):
raise ValueError("List of inputs don't have equivalent "
"frames: {0} != {1}".format(sc, scs[0]))
# Now use the first to determine if they are all UnitSpherical
allunitsphrepr = isinstance(scs[0].data, UnitSphericalRepresentation)
# get the frame attributes from the first coord in the list, because
# from the above we know it matches all the others. First copy over
# the attributes that are in the frame itself, then copy over any
# extras in the SkyCoord
for fattrnm in scs[0].frame.frame_attributes:
skycoord_kwargs[fattrnm] = getattr(scs[0].frame, fattrnm)
for fattrnm in scs[0]._extra_frameattr_names:
skycoord_kwargs[fattrnm] = getattr(scs[0], fattrnm)
# Now combine the values, to be used below
values = []
for data_attr_name, repr_attr_name in zip(frame_attr_names, repr_attr_names):
if allunitsphrepr and repr_attr_name == 'distance':
# if they are *all* UnitSpherical, don't give a distance
continue
data_vals = []
for sc in scs:
data_val = getattr(sc, data_attr_name)
data_vals.append(data_val.reshape(1,) if sc.isscalar else data_val)
concat_vals = np.concatenate(data_vals)
# Hack because np.concatenate doesn't fully work with Quantity
if isinstance(concat_vals, u.Quantity):
concat_vals._unit = data_val.unit
values.append(concat_vals)
else:
# none of the elements are "frame-like"
# turn into a list of lists like [[v1_0, v2_0, v3_0], ... [v1_N, v2_N, v3_N]]
for coord in coords:
if isinstance(coord, str):
coord1 = coord.split()
if len(coord1) == 6:
coord = (' '.join(coord1[:3]), ' '.join(coord1[3:]))
elif is_ra_dec_representation:
coord = _parse_ra_dec(coord)
else:
coord = coord1
vals.append(coord) # Assumes coord is a sequence at this point
# Do some basic validation of the list elements: all have a length and all
# lengths the same
try:
n_coords = sorted(set(len(x) for x in vals))
except Exception:
raise ValueError('One or more elements of input sequence does not have a length')
if len(n_coords) > 1:
raise ValueError('Input coordinate values must have same number of elements, found {0}'
.format(n_coords))
n_coords = n_coords[0]
# Must have no more coord inputs than representation attributes
if n_coords > n_attr_names:
raise ValueError('Input coordinates have {0} values but '
'representation {1} only accepts {2}'
.format(n_coords,
frame.representation_type.get_name(),
n_attr_names))
# Now transpose vals to get [(v1_0 .. v1_N), (v2_0 .. v2_N), (v3_0 .. v3_N)]
# (ok since we know it is exactly rectangular). (Note: can't just use zip(*values)
# because Longitude et al distinguishes list from tuple so [a1, a2, ..] is needed
# while (a1, a2, ..) doesn't work.
values = [list(x) for x in zip(*vals)]
if is_scalar:
values = [x[0] for x in values]
else:
raise ValueError('Cannot parse coordinates from first argument')
# Finally we have a list of values from which to create the keyword args
# for the frame initialization. Validate by running through the appropriate
# class initializer and supply units (which might be None).
try:
for frame_attr_name, repr_attr_class, value, unit in zip(
frame_attr_names, repr_attr_classes, values, units):
components[frame_attr_name] = repr_attr_class(value, unit=unit,
copy=False)
except Exception as err:
raise ValueError('Cannot parse first argument data "{0}" for attribute '
'{1}'.format(value, frame_attr_name), err)
return skycoord_kwargs, components
|
6ed456c07b407476a9c02d7d87c9351d27fb7513
| 25,336 |
from typing import Tuple
def build_names(dependency: Dependency, version_in_url: bool = True) -> Tuple[RemoteResolver, str, str]:
"""
A function to build directory and file names based on the given dependency..
:param dependency: the dependency to create the file container for.
:param version_in_url: a flag noting whether the dependency version should be included
in the URL we build.
:return: a tuple containing an appropriate remote resolver, a classified base file name
and a base file name.
"""
resolver = create_remote_resolver(
dependency.group, dependency.name, dependency.version if version_in_url else None
)
name = dependency.name
version = dependency.version
classifier = dependency.classifier
base_name = f'{name}-{version}'
classified_name = f'{base_name}-{classifier}' if classifier else base_name
return resolver, classified_name, base_name
|
83a3c063e69a6a8e16e0af6f2e6f43d9f423cd3e
| 25,337 |
def convert_apc_examples_to_features(examples, label_list, max_seq_len, tokenizer, opt=None):
"""Loads a data file into a list of `InputBatch`s."""
configure_spacy_model(opt)
bos_token = tokenizer.bos_token
eos_token = tokenizer.eos_token
label_map = {label: i for i, label in enumerate(label_list, 1)}
opt.IOB_label_to_index = label_map
features = []
for (ex_index, example) in enumerate(examples):
text_tokens = example.text_a[:]
aspect_tokens = example.text_b[:]
IOB_label = example.IOB_label
# aspect_label = example.aspect_label
aspect_label = ['B-ASP'] * len(aspect_tokens)
polarity = [-999] + example.polarity + [-999]
positions = np.where(np.array(polarity) > 0)[0].tolist()
tokens = []
labels = []
valid = []
label_mask = []
enum_tokens = [bos_token] + text_tokens + [eos_token] + aspect_tokens + [eos_token]
IOB_label = [bos_token] + IOB_label + [eos_token] + aspect_label + [eos_token]
enum_tokens = enum_tokens[:max_seq_len]
IOB_label = IOB_label[:max_seq_len]
aspect = ' '.join(example.text_b)
try:
text_left, _, text_right = [s.strip() for s in ' '.join(example.text_a).partition(aspect)]
except:
text_left = ' '.join(example.text_a)
text_right = ''
aspect = ''
text_raw = text_left + ' ' + aspect + ' ' + text_right
validate_example(text_raw, aspect, '')
prepared_inputs = prepare_input_for_atepc(opt, tokenizer, text_left, text_right, aspect)
lcf_cdm_vec = prepared_inputs['lcf_cdm_vec']
lcf_cdw_vec = prepared_inputs['lcf_cdw_vec']
for i, word in enumerate(enum_tokens):
token = tokenizer.tokenize(word)
tokens.extend(token)
cur_iob = IOB_label[i]
for m in range(len(token)):
if m == 0:
label_mask.append(1)
labels.append(cur_iob)
valid.append(1)
else:
valid.append(0)
tokens = tokens[0:min(len(tokens), max_seq_len - 2)]
labels = labels[0:min(len(labels), max_seq_len - 2)]
valid = valid[0:min(len(valid), max_seq_len - 2)]
segment_ids = [0] * len(example.text_a[:]) + [1] * (max_seq_len - len([0] * len(example.text_a[:])))
segment_ids = segment_ids[:max_seq_len]
label_ids = []
for i, token in enumerate(tokens):
if len(labels) > i:
label_ids.append(label_map[labels[i]])
input_ids_spc = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids_spc)
label_mask = [1] * len(label_ids)
while len(input_ids_spc) < max_seq_len:
input_ids_spc.append(0)
input_mask.append(0)
label_ids.append(0)
label_mask.append(0)
while len(valid) < max_seq_len:
valid.append(1)
while len(label_ids) < max_seq_len:
label_ids.append(0)
label_mask.append(0)
assert len(input_ids_spc) == max_seq_len
assert len(input_mask) == max_seq_len
assert len(segment_ids) == max_seq_len
assert len(label_ids) == max_seq_len
assert len(valid) == max_seq_len
assert len(label_mask) == max_seq_len
features.append(
InputFeatures(input_ids_spc=input_ids_spc,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_ids,
polarity=polarity,
valid_ids=valid,
label_mask=label_mask,
tokens=example.text_a,
lcf_cdm_vec=lcf_cdm_vec,
lcf_cdw_vec=lcf_cdw_vec,
aspect=aspect,
positions=positions
)
)
return features
|
8eb8570e4d6f2f5fa22583ff216cbf216ac10145
| 25,338 |
from datetime import datetime
from pathlib import Path
def write_output(opts: AppOptions, out_lines):
"""
Writes the modified document lines to a new file with "MODIFIED" and
a date_time tag added to the file name. Returns the file name.
"""
ds = datetime.now().strftime("%Y%m%d_%H%M%S")
out_name = f"{opts.doc_path.stem}_MODIFIED_{ds}{opts.doc_path.suffix}"
out_path = Path(opts.doc_path).parent.joinpath(out_name)
assert not out_path.exists()
print(f"\nSaving '{out_path}'")
with open(out_path, "w") as out_file:
for s in out_lines:
out_file.write(f"{s}\n")
return str(out_path)
|
613bb6a3290e8a86eb8c945248b6088db3e9dc91
| 25,339 |
def ssh_pub_key(key_file):
"""Creates a string of a public key from the private key file.
"""
key = paramiko.RSAKey(filename=key_file)
pub = "{0} {1} autogenerated by polyphemus"
pub = pub.format(key.get_name(), key.get_base64())
return pub
|
bf46c62031a7d761278612e5835b829a32f7fbe2
| 25,340 |
def _variable_with_weight_decay(name, shape, stddev, wd, use_xavier=True, use_zeros=False, init=None):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
use_xavier: bool, whether to use xavier initializer
use_zeros: bool, whether to use zero initializer
Returns:
Variable Tensor
"""
if use_xavier:
#initializer = tf.contrib.layers.xavier_initializer()
initializer = tf.initializers.glorot_normal()
elif use_zeros:
initializer = tf.constant_initializer(0.0)
else:
initializer = tf.truncated_normal_initializer(stddev=stddev)
var = _variable_on_cpu(name, shape, initializer)
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
|
a3d43147508bd10040521dc59cfc49de81bbb70b
| 25,341 |
def build_mobile_vit(config):
"""Build MobileViT by reading options in config object
Args:
config: config instance contains setting options
Returns:
model: MobileViT model
"""
model = MobileViT(in_channels=config.MODEL.IN_CHANNELS,
dims=config.MODEL.DIMS, # XS: [16, 32, 48, 48, 48, 64, 80, 96, 384]
hidden_dims=config.MODEL.HIDDEN_DIMS, # XS: [96, 120, 144], # d: hidden dims in mobilevit block
num_classes=config.MODEL.NUM_CLASSES)
return model
|
7c8eb92214240b922f4df242f937ba05985c3d2c
| 25,342 |
from functools import reduce
from bs4 import BeautifulSoup
def search_youtube(query, retries = 4, max_num_results = -1):
""" Unlimited youtube search by web scrapping """
transformed_query = reduce(lambda s_ant, s_sig : s_ant + '+' + s_sig, query) if len(query) != 0 else ''
scrapped_data = []
num_of_requests = 0
for i in range(retries):
page = get_html(transformed_query)
num_of_requests += 1
if "</ol>" in page.text:
break
logger.info(f" Number of requests : {num_of_requests}")
soup = BeautifulSoup(page.content, 'html.parser')
item_list = soup.find('ol', class_='item-section')
if item_list is None:
raise Exception(" Html without list of results ")
items = item_list.find_all('li')
scrapped_data = [x for x in map(extract_data, items) if x is not None]
return scrapped_data if max_num_results <= 0 else scrapped_data[:max_num_results]
|
119305a5e6ad562fa9f537d42fd36617338b2472
| 25,344 |
from typing import List
from typing import Tuple
from typing import OrderedDict
def define_empty_source_parallel_buckets(max_seq_len_target: int,
bucket_width: int = 10) -> List[Tuple[int, int]]:
"""
Returns (source, target) buckets up to (None, max_seq_len_target). The source
is empty since it is supposed to not contain data that can be bucketized.
The target is used as reference to create the buckets.
:param max_seq_len_target: Maximum target bucket size.
:param bucket_width: Width of buckets on longer side.
"""
target_step_size = max(1, bucket_width)
target_buckets = define_buckets(max_seq_len_target, step=target_step_size)
# source buckets are always 0 since there is no text
source_buckets = [0 for b in target_buckets]
target_buckets = [max(2, b) for b in target_buckets]
parallel_buckets = list(zip(source_buckets, target_buckets))
# deduplicate for return
buckets = list(OrderedDict.fromkeys(parallel_buckets))
buckets.sort()
return buckets
|
3e5261191c5a55f4d82ee84b9000e0996e06c9f9
| 25,346 |
def _hue_scaling(args):
"""return scaled hue values as described in
http://dlmf.nist.gov/help/vrml/aboutcolor
args : ndarray of args / angle of complex numbers between in the open
interval [0, 2*pi)
q : scaled values returned in the interval [0, 1)
"""
q = 4.0*_np.mod((args/(2*_np.pi) + 1), 1)
mask1 = (q >= 0) * (q < 1)
mask2 = (q >= 1) * (q < 2)
mask3 = (q >= 2) * (q < 3)
mask4 = (q >= 3) * (q < 4)
q[mask1] = (60.0/360)*q[mask1]
q[mask2] = (60.0/360)*(2.0*q[mask2] - 1)
q[mask3] = (60.0/360)*(q[mask3] + 1)
q[mask4] = (60.0/360)*2.0*(q[mask4] - 1)
return q
|
42fa445d5e790eed13d692613771d02cc55fad94
| 25,347 |
from qharv.inspect import axes_pos
def get_orbs(fp, orbs, truncate=False, tol=1e-8):
""" return the list of requested Kohn-Sham orbitals
Args:
fp (h5py.File): wf h5 file
orbs (list): a list of 3-tuples, each tuple species the KS state
by (kpoint/twist, spin, band) i.e. (ik, ispin, ib)
truncate (bool, optional): remove PWs with ``small'' coefficient
tol (float, optional): define ``small'' as |ck|^2 < tol
"""
gvecs = get(fp, 'gvectors')
qvecs = get_twists(fp)
axes = get(fp, 'axes')
raxes = axes_pos.raxes(axes)
kvecsl = []
psigl = []
for orb in orbs:
ik, ispin, ib = orb
# PW basis
kvecs = np.dot(gvecs+qvecs[ik], raxes)
npw = len(kvecs)
# PW coefficients
psig = get_orb_in_pw(fp, ik, ispin, ib)
sel = np.ones(npw, dtype=bool)
if truncate: # cut down on the # of PWs
pg2 = (psig.conj()*psig).real
sel = pg2 > tol
kvecsl.append(kvecs[sel])
psigl.append(psig[sel])
return kvecsl, psigl
|
d4b7bf639b81f1d776f29bb736a51b7b8684c42d
| 25,348 |
def geturlcgivars(baseurl, port):
"""
Extract CGI variables from baseurl
>>> geturlcgivars("http://host.org/base", "80")
('host.org', '80', '/base')
>>> geturlcgivars("http://host.org:8000/base", "80")
('host.org', '8000', '/base')
>>> geturlcgivars('/base', 8000)
('', '8000', '/base')
>>> geturlcgivars("base", '8000')
('', '8000', '/base')
>>> geturlcgivars("http://host", '8000')
('host', '8000', '/')
>>> geturlcgivars("http://host/", '8000')
('host', '8000', '/')
"""
u = util.url(baseurl)
name = u.host or ''
if u.port:
port = u.port
path = u.path or ""
if not path.startswith('/'):
path = '/' + path
return name, str(port), path
|
116ace2bb8275de5faddb4c40e9de05d8a7aee95
| 25,349 |
def prepare_log_for_upload(symbolized_output, return_code):
"""Prepare log for upload."""
# Add revision information to the logs.
app_revision = environment.get_value('APP_REVISION')
job_name = environment.get_value('JOB_NAME')
components = revisions.get_component_list(app_revision, job_name)
component_revisions = (
revisions.format_revision_list(components, use_html=False) or
'Not available.\n')
revisions_header = (
'Component revisions (build r{app_revision}):\n{component_revisions}\n'.
format(
app_revision=app_revision, component_revisions=component_revisions))
return_code_header = 'Return code: %s\n\n' % return_code
return revisions_header + return_code_header + symbolized_output
|
78e41f67be726f2fb0fb4fb59aa23d124a371055
| 25,351 |
import math
def bucketize(point, bucket_size):
"""floor the point to the next lower multiple of bucket_size"""
return bucket_size * math.floor(point / bucket_size)
|
ff152cb5b646df1fe883bd943033c05e83623f31
| 25,352 |
def find_unit(df):
"""find unit in the df, add column to df indicating which token contains unit
and return the unit as string."""
doc_unit = ""
# thousand = "(\$)(0){3}|thousand|€(\s*)thous|TEUR|T(\s*)€|Tsd|Tausend"
# million = "millions|million|£(\s*)m|$(\s*)m|€(\s*)m|mn|mio(\s*)€|in(\s+)mio|MM|\d+(M){1}"
# billion = "billion|Bn|Mrd|Md"
units = {"thousand": THOUSAND, "million": MILLION, "billion": BILLION}
for key, value in units.items():
if df.apply(lambda x: x.str.contains(value, case=True).any(), axis=1).any(
axis=None
):
# If doc_unit is set two times => set undefined
if doc_unit:
doc_unit = "1"
break
# Set doc currency
else:
doc_unit = key
# Create column for unit in df marking the token which contains unit
df.loc[:, "unit"] = False
for key, value in units.items():
df.loc[df["text"].str.contains(value, case=True), "unit"] = True
# Set default unit to 1
if not doc_unit:
doc_unit = "1"
return doc_unit
|
a46eede3e5dc928f90969ff89df15c5e0039991d
| 25,353 |
import torch
def regular_channels(audio ,new_channels):
"""
torchaudio-file([tensor,sample_rate])+target_channel -> new_tensor
"""
sig ,sr =audio
if sig.shape[0 ]==new_channels:
return audio
if new_channels==1:
new_sig =sig[:1 ,:] # 直接取得第一个channel的frame进行操作即可
else:
# 融合(赋值)第一个通道
new_sig =torch.cat([sig ,sig] ,dim=0) # c*f->2c*f
# 顺带提一句——
return [new_sig ,sr]
|
5b055d965f35fc4cf0f434b34e8f579f321fee89
| 25,354 |
from typing import Union
from typing import Callable
def is_less_than(maximum: Union[int, float, Decimal]) -> Callable[[Union[int, float, Decimal]], bool]:
"""
:param maximum: A number
:return: A predicate that checks if a value is less than the given number
"""
def predicate(i: Union[int, float, Decimal]):
"""
:param i: A number
:return: Whether the number is less than the maximum
"""
return i < maximum
predicate.__name__ = f'_{is_less_than.__name__}_{maximum}'
return predicate
|
9f381ae1901581f24af1f3b51f9d64a3b976d6e9
| 25,355 |
def a_function(my_arg, another):
"""
This is the brief description of my function.
This is a more complete example of my function. It can include doctest,
code blocks or any other reST structure.
>>> a_function(10, [MyClass('a'), MyClass('b')])
20
:param int my_arg: The first argument of the function. Just a number.
:param another: The other argument of the important function.
:type another: A list of :class:`MyClass`
:return: The length of the second argument times the first argument.
:rtype: int
"""
return my_arg * len(another)
|
8624edfe3ec06b53e065a6672c3b21682cdefe06
| 25,356 |
from datetime import datetime
def _safe_filename(filename):
"""
Generates a safe filename that is unlikely to collide with existing objects
in Google Cloud Storage.
``filename.ext`` is transformed into ``filename-YYYY-MM-DD-HHMMSS.ext``
"""
filename = secure_filename(filename)
date = datetime.datetime.utcnow().strftime("%Y-%m-%d-%H%M%S")
basename, extension = filename.rsplit('.', 1)
return "{0}-{1}.{2}".format(basename, date, extension)
|
2ea94c9d18240cf1f1c4dadd19f2bd87b39e6538
| 25,358 |
import collections
def _make_with_custom_variables(func, variables):
"""Calls func and replaces any trainable variables.
This returns the output of func, but whenever `get_variable` is called it
will replace any trainable variables with the tensors in `variables`, in the
same order. Non-trainable variables will re-use any variables already
created.
Args:
func: Function to be called.
variables: A list of tensors replacing the trainable variables.
Returns:
The return value of func is returned.
"""
variables = collections.deque(variables)
def custom_getter(getter, name, **kwargs):
if kwargs["trainable"]:
return variables.popleft()
else:
kwargs["reuse"] = True
return getter(name, **kwargs)
return _wrap_variable_creation(func, custom_getter)
|
a36923223d22b6b09d9697da3a072c3eef4e739a
| 25,360 |
def create_loss_and_learner(
model, labels, learning_rate,
momentum_coef=0.0, wdecay=0.0, nesterov=False,
gradient_clip_norm=None, gradient_clip_value=None):
"""
Auxiliary function to create loss function (cross entropy and softmax)
and trainer using stochastic gradient descent with momentum.
Arguments:
model - imported model
labels - placeholder for one-hot labels array
learning_rate - learning rate for trainer
momentum_coef - coefficient of momentum (deafult 0.0)
wdecay - amount of weight decay (default 0.0)
nesterov - use nesterov accelerated gradient (dafault False)
gradient_clip_norm - target gradient norm (default None)
gradient_clip_value - value to element-wise clip gradients (default None)
Returns:
Loss function (mean for batch)
"""
if model.axes.lengths != labels.axes.lengths:
labels = ng.Transpose(labels)
assert model.axes.lengths == labels.axes.lengths
model = ng.cast_axes(model, axes=labels.axes)
loss = ng.cross_entropy_multi(ng.softmax(model), labels)
optimizer = GradientDescentMomentum(
learning_rate, momentum_coef, wdecay,
gradient_clip_norm, gradient_clip_value, nesterov
)
return ng.sequential([optimizer(loss), ng.mean(loss, out_axes=())])
|
49669aa109f748b13b3c52efea7cbadb94d492b7
| 25,361 |
def mesh_subdivide_tri(mesh, k=1):
"""Subdivide a mesh using simple insertion of vertices.
Parameters
----------
mesh : Mesh
The mesh object that will be subdivided.
k : int
Optional. The number of levels of subdivision. Default is ``1``.
Returns
-------
Mesh
A new subdivided mesh.
Examples
--------
>>> box = Box.from_corner_corner_height([0.0, 0.0, 0.0], [1.0, 1.0, 0.0], 1.0)
>>> mesh = Mesh.from_shape(box)
>>> k = 2
>>> subd = mesh_subdivide_tri(mesh, k=k)
>>> mesh is subd
False
>>> type(mesh) is type(subd)
True
>>> k1 = sum(len(mesh.face_vertices(fkey)) for fkey in mesh.faces())
>>> subd.number_of_faces() == (k1 if k == 1 else k1 * 3 ** (k - 1))
True
"""
cls = type(mesh)
subd = mesh_fast_copy(mesh)
for _ in range(k):
for fkey in list(subd.faces()):
subd.insert_vertex(fkey)
return cls.from_data(subd.data)
|
54912f1777dc1d508a9ab55289e90bcb28b9586f
| 25,363 |
import cartopy.crs as ccrs
def make_projection(proj_params):
"""
turn a set of proj4 parameters into a cartopy laea projection
introduced in read_resample.ipynb
Parameters
----------
proj_params: dict
dictionary with parameters lat_0, lon_0 datum and ellps
Returns
-------
cartopy projection object
"""
globe_w = ccrs.Globe(datum=proj_params["datum"],ellipse=proj_params['ellps'])
projection_w=ccrs.LambertAzimuthalEqualArea(central_latitude=float(proj_params['lat_0']),
central_longitude= float(proj_params['lon_0']),globe=globe_w)
return projection_w
|
1852115888107b5ae9353dd746d56fb3896c1992
| 25,364 |
import pandas
import logging
def _shape(df):
""" Return DataFrame shape even if is not a Pandas dataframe."""
if type(df) == pandas.DataFrame or type(df) == pandas.Series:
return df.shape
try:
shape = (len(df), len(df.columns))
except Exception as e:
logging.error(e)
raise e
return shape
|
d5af0e3f92ee649091d9fc8b904e60931fb0f2f7
| 25,365 |
from typing import List
from pathlib import Path
def hvplot_line(
df, title, x, y: List[str], output_dir: Path, vlines=None, save_figure=True, **kwargs
):
"""Draw line splot with optional vertical lines.
Example:
hvplot_line(
df,
title=col,
x="time", # This is index name
y=col_name,
vlines=outliers,
output_dir=args.output_dir / "single",
save_figure=True,
width=1500,
height=500,
# by="timestamp.month",
# groupby=["timestamp.year", "timestamp.month"],
)
Args:
df ([type]): Input dataframe
title ([type]): Graph title
x ([type]): Column name for x-axis, can be index's name
y (List[str]): Column name for y-axis
output_dir (Path): Output dir for html files
vlines ([type], optional): Vertiline of interest. Defaults to None.
save_figure (bool, optional): True to save html file. Defaults to True.
Returns:
[type]: [description]
"""
output_dir.mkdir(parents=True, exist_ok=True)
p = df.hvplot(
x=x,
y=y,
title=title,
kind="line",
xlabel="Time",
ylabel="Value",
size=10,
grid=True,
legend=True,
fontsize=15,
rot=45,
**kwargs,
)
if vlines is not None:
for x in vlines:
p = p * hv.VLine(pd.to_datetime(x)).opts(color="red", alpha=0.3)
if save_figure:
hvplot.save(p, output_dir / f"{title}.html")
return p
|
1f7400bc12b492648074b2cb2dc52835cb509a1e
| 25,366 |
def load_zstack(fn):
"""
Returns zstack, [zmin, zmax]
"""
with open(fn, "rb") as f:
d = np.fromfile(f,dtype=header_dtype,count=1,sep="")
version, shape, zrange = d[0]
zstack = np.fromfile(f,dtype='<f4',sep="").reshape(shape)
return zstack, zrange
|
19fd400b0b341569b448e45d482df264310c9cf7
| 25,367 |
def parseFile(path):
"""
Read sections headed by :SectionName into lists by section name in a dictionary
blank lines, line preceeding and ending whitespace and #Comments are stripped
"""
d={}
currentList=None
f = open(pathPrefix()+path, 'r')
for t in f.readlines():
# Remove comments
i=t.find('#')
if i!=-1:
t=t[:i]
# Strip excess whitespace
t=t.strip()
if len(t)>0:
if t[0]==':':
currentList=[]
d[t[1:]]=currentList
else:
if currentList!=None:
currentList.append(t)
return d
|
db5a73b5a46fc1026df775de994da150d33e3aad
| 25,368 |
def setName(name):
"""
Sets the name of the robot.
This is cleared with a power cycle and displayed on the robot screen during idle times
Name will be shortened to 11 characters
Args:
name (any): Name to set for the robot. Will be cast to a string
Returns:
None
"""
name = str(name)[:11]
return _rc.writeAttribute(OPTYPE.ROBOT_NAME, stringToBytes(name) + [0])
|
5f1e12635df0cf4c95b3ce90e97a51492a477914
| 25,369 |
def twoBodyCMmom(m_0, m_1, m_2):
"""relative momentum for 0 -> 1 + 2"""
M12S = m_1 + m_2
M12D = m_1 - m_2
if hasattr(M12S, "dtype"):
m_0 = tf.convert_to_tensor(m_0, dtype=M12S.dtype)
# m_eff = tf.where(m_0 > M12S, m_0, M12S)
# p = (m_eff - M12S) * (m_eff + M12S) * (m_eff - M12D) * (m_eff + M12D)
# if p is negative, which results from bad data, the return value is 0.0
# print("p", tf.where(p==0), m_0, m_1, m_2)
p = (m_0 - M12S) * (m_0 + M12S) * (m_0 - M12D) * (m_0 + M12D)
zeros = tf.zeros_like(m_0)
ret = tf.where(p > 0, tf.sqrt(p) / (2 * m_0), zeros)
return ret
|
5b8576dc33a4570f976efc1bab67a432032b02ff
| 25,371 |
def svn_repos_post_commit_hook(*args):
"""svn_repos_post_commit_hook(svn_repos_t repos, apr_pool_t pool) -> char"""
return _repos.svn_repos_post_commit_hook(*args)
|
bbb9eb6fe81e80ef4729791448e5abe1d7b6ab12
| 25,372 |
def slot_selection_is_free(effect):
"""
all slots ar selected when participant applies
"""
activity = effect.instance.activity
return activity.slot_selection == 'free'
|
153efa36dda70de02613201540d97e8f22f2bdd9
| 25,374 |
def get_consumption_tax(amount, tax_rate, decimal_type):
"""消費税を取得する。
:param amount:
:param tax_rate:
:param decimal_type:
:return:
"""
if not amount:
return 0
return get_integer(decimal_type, float(amount) * float(tax_rate))
|
d982d2ebc65770477a9ec06e1305115ecf2eab9e
| 25,375 |
def homology(long_sequence, short_sequence):
"""
Cross-compare to find the strand of long sequence with the highest similarity with the short sequence.
:param long_sequence: str
:param short_sequence: str
:return ans: str, the strand of long sequence with the highest similarity with the short sequence
"""
# number of characters in the long sequence
i = len(long_sequence)
# number of characters in the short sequence
j = len(short_sequence)
# number of the same element between long- and short- sequence in a certain part of the long sequence
max_match = 0
# position where the max_match begins in long sequence
max_match_point = 0
ans = ''
# (i - j + 1) = times needed for cross-comparison
for k in range(i - j + 1):
match = 0
for n in range(j):
# if find the same element in the same position of long- and short- sequence, count one
if short_sequence[n] == long_sequence[n+k]:
match += 1
# find the biggest match, and the start position(k) in long sequence
if match > max_match:
max_match = match
max_match_point = k
# the strand of long sequence with the highest similarity with the short sequence
ans = long_sequence[max_match_point:(max_match_point + j)]
return ans
|
1865e7b60cfce3b1ca4e7884377a5a218ecba96a
| 25,376 |
def oneliner_to_phylip(line):
"""Convert one-liner to phylip format."""
seqs = line.strip(";\n").split(',')
label_seqs = zip(seqs[:-1:2], seqs[1::2])
taxa_count = len(label_seqs)
seq_length = len(label_seqs[0][1])
# pad all names to length of longest name + 1 space
max_name_length = max([len(val) for val in seqs[:-1:2]]) + 1
# add header
header = "%s %s\n" % (taxa_count, seq_length)
alignment = '\n'.join(['%s%s' % (i[0].ljust(max_name_length), i[1]) for i in label_seqs])
return header + alignment
|
783d9e68172d4d30de44564b88001d30af4d8e45
| 25,377 |
def get_final_histogram(n_states, logfile, temp):
"""
This function analyzes the log file and performs the following tasks:
1. Output the counts of each lambda state at the last time frame (for plotting histogram)
2. Estimate the uncertainty of free energy difference from the final histogram
Paraneters
----------
n_states : int
Number of lambda states
logfile : str
The filename of the log file
Returns
-------
counts : np.array
The counts of each lambda state
Example
-------
>>> get_final_histogram(40, 'solvent_0.log')
[8678. 8437. 8680. 9007. 8606. 7642. 8269. 7878. 7689. 7906. 7451. 7416.
7939. 7470. 7540. 7858. 7664. 7423. 7527. 7322. 7325. 7538. 7173. 7034.
6943. 6910. 6935. 6805. 6463. 6371. 6249. 6425. 6353. 6618. 6789. 6810.
6426. 6408. 6675. 6271.]
"""
f = open(logfile, 'r')
lines = f.readlines()
f.close()
lines.reverse() # from this point, lines has been reverse
line_n = 0
counts = np.zeros(n_states)
for l in lines:
line_n += 1
if 'MC-lambda information' in l:
for i in range(n_states):
# start from lines[line_n - 3]
counts[i] = float(lines[line_n - 3 - i].split()[5])
break
kb = 1.38064852E-23 # Boltzmann constant
Na = 6.0221409E23 # Avogadro's number
error = np.abs(np.log(counts[0] / counts[-1])) # dimensionless error
if temp is None:
print('The uncertainty of the free energy difference is %5.3f kT.' % error)
temp = 298.15 # default
error *= (kb * Na * temp / 1000) * 0.23900573613
print('Or at 298.15K, the uncertainty is %5.3f kcal/mol' % error)
else:
error *= (kb * Na * float(temp) / 1000) * \
0.23900573613 # unit: kcal/mol
print('The uncertainty of the free energy difference is %5.3f kcal/mol.' % error)
return counts
|
a72ecd4aa6b47e2c8d1368a03ea5b5887abc27c2
| 25,378 |
import logging
def create_sql_delete_stmt(del_list, name):
"""
:param del_list: list of records that need to be formatted in SQL delete statement.
:param name: the name of the table
:return: SQL statement for deleting the specific records
"""
sql_list = ", ".join(del_list)
sql_stmt = f"DELETE FROM method_usage.pandas_{name} WHERE {name}_id IN ({sql_list})"
logging.info(f"{len(del_list)} {name} in delete statement")
return sql_stmt
|
aec744198f1b0dd30836f431ac51a4080911f8ae
| 25,379 |
def parse_track(trackelement):
"""Extract info from every track entry and output to list."""
print(trackelement)
if trackelement.find('artist').getchildren():
#artist info is nested in loved/banned tracks xml
artistname = trackelement.find('artist').find('name').text
artistmbid = trackelement.find('artist').find('mbid').text
else:
artistname = trackelement.find('artist').text
artistmbid = trackelement.find('artist').get('mbid')
if trackelement.find('album') is None:
#no album info for loved/banned tracks
albumname = ''
albummbid = ''
else:
albumname = trackelement.find('album').text
albummbid = trackelement.find('album').get('mbid')
trackname = trackelement.find('name').text
trackmbid = trackelement.find('mbid').text
date = trackelement.find('date').get('uts')
output = [date, trackname, artistname, albumname, trackmbid, artistmbid, albummbid]
for i, v in enumerate(output):
if v is None:
output[i] = ''
return output
|
e5cd49d765885fd0e701864831f9a2958076ca93
| 25,380 |
from typing import Mapping
def _coord_matrix(model, pos, noutp):
"""
Create an array representing inputs and outputs of a simple model.
The array has a shape (noutp, model.n_inputs).
Parameters
----------
model : `astropy.modeling.Model`
model
pos : str
Position of this model in the expression tree.
One of ['left', 'right'].
noutp : int
Number of outputs of the compound model of which the input model
is a left or right child.
"""
if isinstance(model, Mapping):
axes = []
for i in model.mapping:
axis = np.zeros((model.n_inputs,))
axis[i] = 1
axes.append(axis)
m = np.vstack(axes)
mat = np.zeros((noutp, model.n_inputs))
if pos == 'left':
mat[: model.n_outputs, :model.n_inputs] = m
else:
mat[-model.n_outputs:, -model.n_inputs:] = m
return mat
if not model.separable:
# this does not work for more than 2 coordinates
mat = np.zeros((noutp, model.n_inputs))
if pos == 'left':
mat[:model.n_outputs, : model.n_inputs] = 1
else:
mat[-model.n_outputs:, -model.n_inputs:] = 1
else:
mat = np.zeros((noutp, model.n_inputs))
for i in range(model.n_inputs):
mat[i, i] = 1
if pos == 'right':
mat = np.roll(mat, (noutp - model.n_outputs))
return mat
|
841b2ba8df26e424f2fcafc0d3180c3409078896
| 25,381 |
def showp2rev(context, mapping):
"""Integer. The repository-local revision number of the changeset's
second parent, or -1 if the changeset has no second parent."""
ctx = context.resource(mapping, 'ctx')
return ctx.p2().rev()
|
854225fca900e5e46ecd18efeab9fc8d3e1f9168
| 25,382 |
from typing import Optional
def puan_kam(text: str = 'สวัสดี',
first: Optional[bool] = None,
keep_tone: Optional[bool] = None,
all: Optional[bool] = False,
skip_tokenize: Optional[bool] = None):
"""Puan kum (ผวนคำ) is a Thai toung twister, This API convert string into kampuan
Play around with the options to see different results.
-Args:
- **text** (str): Defaults to 'สวัสดี'.
- input string 'ไปเที่ยว' -> auto tokenize will apply and split to ไป and เที่ยว
- list of string which accepted 3 formats: ['ไป','กิน','ข้าว'] | 'ไป','กิน','ข้าว' | ไป,กิน,ข้าว, the list input will also neglect auto tokenization.
- **first** (bool, optional): if True use the first word to puan together with the last word otherwise will select second word and last word
(None will let us decide). Defaults to None.
- **keep_tone** (bool, optional): force whether to keep the tone when doing the puan (None will let us decide). Defaults to None.
- **all** (bool, optional): if True will provide all 4 puan results. Defaults to False.
- **skip_tokenize** (bool, optional): if True will skip tokenzation and use user provided list of words (input pure string will force to False or dont skip tokenization). Defaults to None.
-Returns:
- **results**: List of คำผวน
"""
if not check_thai_ch(text):
raise HTTPException(400, detail=f'Input contains non Thai')
text = process_text_2_list(text)
try:
split_words = kp.puan_kam_preprocess(text, skip_tokenize=skip_tokenize)
except ValueError:
try:
split_words = kp.puan_kam_preprocess(
text, skip_tokenize=not(skip_tokenize))
except ValueError as e:
raise HTTPException(422, detail=f'Input error: {e}')
if all is not None and all:
return {'input': text,
'results': kp.puan_kam_all(text=split_words)}
else:
if first is None and keep_tone is None:
return {'input': text,
'results': kp.puan_kam(text=split_words)}
else:
return {'input': text,
'results': kp.puan_kam_base(text=split_words, keep_tone=keep_tone, use_first=first)}
|
5b49aab2437a906ea23122722a45bb472011bbbb
| 25,383 |
import wx
def get_app_wx(*args, **kwargs):
"""Create a new wx app or return an exiting one."""
app = wx.GetApp()
if app is None:
if 'redirect' not in kwargs:
kwargs['redirect'] = False
app = wx.PySimpleApp(*args, **kwargs)
return app
|
ad4f79e57562e199833d0d948934ec6e9211eea4
| 25,385 |
def do_part_1():
"""
Solve the puzzle.
"""
data = input_lines(1)
total = 0
for line in data:
val, op = interpret_line(line)
total = op(total, val)
print(total)
return total
|
af8c96a3963bf2732b0281a2b425da73a1ab26e5
| 25,386 |
def handle_rpc_errors(fnc):
"""Decorator to add more context to RPC errors"""
@wraps(fnc)
def wrapper(*args, **kwargs):
try:
return fnc(*args, **kwargs)
except grpc.RpcError as exc:
# lnd might be active, but not possible to contact
# using RPC if the wallet is encrypted. If we get
# an rpc error code Unimplemented, it means that lnd is
# running, but the RPC server is not active yet (only
# WalletUnlocker server active) and most likely this
# is because of an encrypted wallet.
exc.code().value
exc.details()
if exc.code() == grpc.StatusCode.UNIMPLEMENTED:
# raise WalletEncryptedError from None
print("unimplemented")
raise exc
elif exc.code() == grpc.StatusCode.UNAVAILABLE:
print("UNAVAILABLE")
print(f"ERROR MESSAGE: {exc.details()}")
elif exc.code() == grpc.StatusCode.UNKNOWN and exc.details() == "wallet locked, unlock it to enable full RPC access":
print("WALLET IS LOCKED!")
raise exc
elif exc.code() == grpc.StatusCode.UNKNOWN:
print("unknown")
print(f"ERROR MESSAGE: {exc.details()}")
elif exc.code() == grpc.StatusCode.NOT_FOUND:
print("NOT FOUND")
print(f"ERROR MESSAGE: {exc.details()}")
elif exc.code() == grpc.StatusCode.PERMISSION_DENIED:
print("PERMISSION_DENIED")
print(f"ERROR MESSAGE: {exc.details()}")
else:
raise exc
return exc
except Exception as exc:
print("unknown exception")
print(exc)
return wrapper
|
4f1cb19918fa5410bef4f14540a35c9f75d113bd
| 25,387 |
import ast
def _check_BoolOp_expr(boolop, t, env):
"""Boolean Operations."""
assert boolop.__class__ is ast.BoolOp
op = boolop.op
es = boolop.values
assert op.__class__ in bool_ops, "%s not in bool ops" % cname(op)
# (BoolOp) assignment rule.
return all(check_expr(e, t, env) for e in es)
|
528f55894a60ec8f781e5600a82f32697382e4bb
| 25,388 |
def get_matproj(dbpath, cutoff, api_key, dataset_properties):
"""
Args:
dbpath (str): path to the local database
cutoff (float): cutoff radius
api_key (str): personal api_key for materialsproject.org
dataset_properties (list): properties of the dataset
Returns:
AtomsData object
"""
return MaterialsProject(dbpath, cutoff, api_key,
properties=dataset_properties)
|
d40b3578a5ac315e1b4d5180113d517d88b5305b
| 25,389 |
def parse_change_values_from_opts(opts):
"""
Convert optparse style options into a dictionary for changing.
:param opts: optparse style options
:returns: a dictonary with change values to filter devices,
supported parameters are ip, port, replication_ip,
replication_port
"""
change_values = {}
for key in ('change_ip', 'change_port', 'change_replication_ip',
'change_replication_port', 'change_device', 'change_meta'):
value = getattr(opts, key, None)
if value:
if key == 'change_ip' or key == 'change_replication_ip':
value = validate_and_normalize_address(value)
change_values[key.replace('change_', '')] = value
return change_values
|
d8168163c308b510f12a5cd2baf9dd800b80912a
| 25,390 |
def xr_linear_trends_2D(da, dim_names, with_nans=False):
""" calculate linear trend of 2D field in time
! slow, use xr_2D_trends instead
input:
da .. 3D xr DataArray with (dim_names) dimensions
dim_names .. tuple of 2 strings: e.g. lat, lon dimension names
output:
da_trend .. slope of linear regression
"""
if type(da.time.values[0]) in [np.datetime64, cftime._cftime.Datetime360Day]:
x, time_ = datetime_to_float(da)
# time_to_float = True
def xr_linear_trend_with_nans(x):
""" function to compute a linear trend coefficient of a timeseries """
if np.isnan(x).any():
x = x.dropna(dim='time')
if x.size>1:
pf = np.polynomial.polynomial.polyfit(x.time, x, 1)
else:
pf = np.array([np.nan, np.nan])
else:
pf = np.polynomial.polynomial.polyfit(x.time, x, 1)
return xr.DataArray(pf[1])
(dim1, dim2) = dim_names
# stack lat and lon into a single dimension called allpoints
stacked = da.stack(allpoints=[dim1, dim2])
# apply the function over allpoints to calculate the trend at each point
if with_nans==False:
trend = stacked.groupby('allpoints').apply(xr_linear_trend)
# unstack back to lat lon coordinates
da_trend = trend.unstack('allpoints')
if with_nans==True:
trend = stacked.groupby('allpoints').apply(xr_linear_trend_with_nans)
# unstack back to lat lon coordinates
da_trend = trend.unstack('allpoints')
# if time_to_float: da_trend.time.values = time_
# print(da_trend)
if 'allpoints_level_0' in da_trend.coords.keys():
da_trend = da_trend.rename({'allpoints_level_0':dim1, 'allpoints_level_1':dim2})
return da_trend
|
ba948729cd8e8bde037ad91bde788443cd7eb54c
| 25,392 |
def sanitize_email(email):
"""
Returns an e-mail address in lower-case and strip leading and trailing
whitespaces.
>>> sanitize_email(' [email protected] ')
'[email protected]'
"""
return email.lower().strip()
|
b99e9c38db4fe889e1d0a9175d6535c4790f2f43
| 25,393 |
import numba
def draw_perm_reps(data_1, data_2, func, size=1, args=()):
"""
Generate permutation replicates of `func` from `data_1` and
`data_2`
Parameters
----------
data_1 : array_like
One-dimensional array of data.
data_2 : array_like
One-dimensional array of data.
func : function
Function, with call signature `func(x, y, *args)` to compute
replicate statistic from permutation sample. It must return
a single, scalar value.
size : int, default 1
Number of pairs bootstrap replicates to draw.
args : tuple, default ()
Arguments to be passed to `func`.
Returns
-------
output : ndarray
Permutation replicates.
"""
# Convert to Numpy arrays
data_1 = utils._convert_data(data_1)
data_2 = utils._convert_data(data_2)
if args == ():
if func == diff_of_means:
return _draw_perm_reps_diff_of_means(data_1, data_2, size=size)
elif func == studentized_diff_of_means:
if len(data_1) == 1 or len(data_2) == 1:
raise RuntimeError("Data sets must have at least two entries")
return _draw_perm_reps_studentized_diff_of_means(data_1, data_2, size=size)
# Make a Numba'd function for drawing reps.
f, numba_success = utils._make_two_arg_numba_func(func, args)
if numba_success:
jit = numba.jit
else:
jit = utils._dummy_jit
@jit(nopython=True)
def _draw_perm_reps(data_1, data_2):
n1 = len(data_1)
x = np.concatenate((data_1, data_2))
perm_reps = np.empty(size)
for i in range(size):
np.random.shuffle(x)
perm_reps[i] = f(x[:n1], x[n1:], args)
return perm_reps
return _draw_perm_reps(data_1, data_2)
|
6a5e46e39ace1815297812fbe17acd7db7fb89db
| 25,394 |
def get_attachment_form(parser, token):
"""
Get a (new) form object to upload a new attachment
Syntax::
{% get_attachment_form for [object] as [varname] %}
{% get_attachment_for for [app].[model] [object_id] as [varname] %}
"""
return AttachmentFormNode.handle_token(parser, token)
|
5ae8c049eef6618f358755e5363520a3bc126780
| 25,396 |
def greet(name):
"""Greet message, formatted differently for johnny."""
if name == "Johnny":
return "Hello, my love!"
return "Hello, {name}!".format(name=name)
|
86efdaccd65a870fd80e402491e9468669cdcd40
| 25,397 |
import numpy
def get_naca_points(naca_digits, number_of_points=100,
sharp_trailing_edge=True,
abscissa_map=lambda x: 0.03*x+0.97*x**2,
verbose=False):
"""
Return a list of coordinates of NACA 4-digit and 5-digit series
airfoils.
"""
if verbose:
def explain(*s):
print(" ".join(str(s_i) for s_i in s))
else:
def explain(*s):
pass
explain("Airfoil: NACA-%s" % naca_digits)
if sharp_trailing_edge:
explain("Sharp trailing edge")
edge_coeff = 0.1036
else:
explain("Blunt trailing edge")
edge_coeff = 0.1015
raw_abscissae = numpy.linspace(0, 1, number_of_points, endpoint=True)
abscissae = numpy.empty_like(raw_abscissae)
for i in range(number_of_points):
abscissae[i] = abscissa_map(raw_abscissae[i])
digits_int = int(naca_digits)
if len(naca_digits) == 4:
thickness = (digits_int % 100)
max_camber_pos = (digits_int % 1000) - thickness
max_camber = (digits_int % 10000) - max_camber_pos - thickness
thickness = thickness / 1e2
max_camber_pos = max_camber_pos / 1e3
max_camber = max_camber / 1e5
explain("Thickness:", thickness)
explain("Position of maximum camber:", max_camber_pos)
explain("Maximum camber:", max_camber)
if max_camber == 0 and max_camber_pos == 0:
explain("Symmetric 4-digit airfoil")
points = FourDigitsSymmetric(thickness, edge_coeff)
elif max_camber != 0 and max_camber_pos != 0:
explain("Cambered 4-digit airfoil")
points = FourDigitsCambered(thickness, max_camber,
max_camber_pos, edge_coeff)
else:
raise NotImplementedError(
"You must decide whether your airfoil shall be cambered or not!")
elif len(naca_digits) == 5:
thickness = (digits_int % 100)
max_camber_pos = (digits_int % 10000) - thickness
thickness = thickness / 1e2
max_camber_pos = max_camber_pos / 2e4
explain("Thickness:", thickness)
explain("Position of maximum camber:", max_camber_pos)
identifier = digits_int // 100
if identifier == 210:
m = 0.058
k1 = 361.4
elif identifier == 220:
m = 0.126
k1 = 51.64
elif identifier == 230:
m = 0.2025
k1 = 15.957
elif identifier == 240:
m = 0.29
k1 = 6.643
elif identifier == 250:
m = 0.391
k1 = 3.23
else:
raise NotImplementedError("5-digit series only implemented for "
"the first three digits in 210, 220, 230, 240, 250!")
explain("5-digit airfoil")
points = FiveDigits(thickness, m, k1, edge_coeff)
else:
raise NotImplementedError(
"Only the 4-digit and 5-digit series are implemented!")
points_upper = numpy.zeros((len(abscissae), 2))
points_lower = numpy.zeros((len(abscissae), 2))
for i in range(len(abscissae)):
points_upper[i] = points(abscissae[i], "upper")
points_lower[i] = points(abscissae[i], "lower")
if sharp_trailing_edge:
return list(points_upper)[1:-1] + list(points_lower[::-1])
else:
return list(points_upper)[1:] + list(points_lower[::-1])
|
ec7f4b2c0104639e727febc6ea1a0ab0f1575f9e
| 25,398 |
def get_request_now():
"""
When constructing the SOAP request, the timestamps have to be naive but with localtime values.
E.g. if the current offset is utc+1 and the utc now is 2016/03/30 0:00, the SOAP endpoint expects 2016/03/30 1:00
without tzinfo. That's pretty ugly but ¯\_(ツ)_/¯
In order to do that, this function gets the utc value, translates it into a local one and makes it naive by
deleting the tzinfo.
"""
now = timezone.localtime(timezone.now())
return timezone.make_naive(now)
|
8563eeb9757a7d09b7a0c3253e64cb30443aaa55
| 25,399 |
def _make_decorator(
obj: Wrappable,
to_wrap: tp.Iterable[str]
) -> tp.Callable[[tp.Type[WrapperInjector]], tp.type[WrapperInjector]]:
"""Makes the decorator function to use for wrapping.
Parameters
----------
obj : :obj:`ModuleType`, :obj:`type` or :obj:`object`
The source object to wrap the `to_wrap` attributes of.
to_wrap : Iterable[str]
The names of the attributes of `obj` to wrap.
Returns
-------
Callable[[Type[WrapperInjector]], Type[WrapperInjector]]
The decorator to use for wrapping a new :obj:`WrapperInjector`
class.
"""
def _wrapper(cls: tp.Type[WrapperInjector]) -> tp.Type[WrapperInjector]:
cls.__wrapped__ = tuple(to_wrap)
to_wrap = {x: getattr(obj, x) for x in to_wrap}
for k, v in to_wrap.items():
if isinstance(v, FunctionType):
setattr(cls, k, cls.__wrap_function(v))
else:
setattr(cls, k, cls.__wrap_method(v))
return cls
return _wrapper
|
33ab2d7670f518f15c55ebafcde3667447c73c4d
| 25,400 |
import numbers
import warnings
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='warn',
random_state=None, check_input=True,
max_squared_sum=None, sample_weight=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Note that there will be no speedup with liblinear solver, since it does
not handle warm-starting.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Input data, target values.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial', 'auto'}, default: 'ovr'
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
and otherwise selects 'multinomial'.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
.. versionchanged:: 0.20
Default will change from 'ovr' to 'auto' in 0.22.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`. Used when ``solver`` == 'sag' or
'liblinear'.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept. For
``multiclass='multinomial'``, the shape is (n_classes, n_cs,
n_features) or (n_classes, n_cs, n_features + 1).
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array, shape (n_cs,)
Actual number of iteration for each Cs.
Notes
-----
You might get slightly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
.. versionchanged:: 0.19
The "copy" parameter was removed.
"""
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
solver = _check_solver(solver, penalty, dual)
# Preprocessing.
if check_input:
X = check_array(X, accept_sparse='csr', dtype=np.float64,
accept_large_sparse=solver != 'liblinear')
y = check_array(y, ensure_2d=False, dtype=None)
check_consistent_length(X, y)
_, n_features = X.shape
classes = np.unique(y)
random_state = check_random_state(random_state)
multi_class = _check_multi_class(multi_class, solver, len(classes))
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If sample weights exist, convert them to array (support for lists)
# and check length
# Otherwise set them to 1 for all examples
if sample_weight is not None:
sample_weight = np.array(sample_weight, dtype=X.dtype, order='C')
check_consistent_length(y, sample_weight)
default_weights = False
else:
default_weights = (class_weight is None)
sample_weight = np.ones(X.shape[0], dtype=X.dtype)
daal_ready = use_daal and solver in ['lbfgs', 'newton-cg'] and not sparse.issparse(X)
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "balanced", then
# the class_weights are assigned after masking the labels with a OvR.
le = LabelEncoder()
if isinstance(class_weight, dict) or multi_class == 'multinomial':
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight *= class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
mask_classes = np.array([-1, 1])
mask = (y == pos_class)
y_bin = np.ones(y.shape, dtype=X.dtype)
y_bin[~mask] = -1.
# for compute_class_weight
if class_weight == "balanced":
class_weight_ = compute_class_weight(class_weight, mask_classes,
y_bin)
sample_weight *= class_weight_[le.fit_transform(y_bin)]
daal_ready = daal_ready and (default_weights or np.allclose(sample_weight, np.ones_like(sample_weight)))
if daal_ready:
w0 = np.zeros(n_features + 1, dtype=X.dtype)
y_bin[~mask] = 0.
else:
w0 = np.zeros(n_features + int(fit_intercept), dtype=X.dtype)
else:
daal_ready = daal_ready and (default_weights or np.allclose(sample_weight, np.ones_like(sample_weight)))
if solver not in ['sag', 'saga']:
if daal_ready:
Y_multi = le.fit_transform(y).astype(X.dtype, copy=False)
else:
lbin = LabelBinarizer()
Y_multi = lbin.fit_transform(y)
if Y_multi.shape[1] == 1:
Y_multi = np.hstack([1 - Y_multi, Y_multi])
else:
# SAG multinomial solver needs LabelEncoder, not LabelBinarizer
Y_multi = le.fit_transform(y).astype(X.dtype, copy=False)
if daal_ready:
w0 = np.zeros((classes.size, n_features + 1),
order='C', dtype=X.dtype)
else:
w0 = np.zeros((classes.size, n_features + int(fit_intercept)),
order='F', dtype=X.dtype)
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
if daal_ready:
w0[-coef.size:] = np.roll(coef, 1, -1) if coef.size != n_features else coef
else:
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_classes = classes.size
if n_classes == 2:
n_classes = 1
if (coef.shape[0] != n_classes or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
if daal_ready:
w0[:, -coef.shape[1]:] = np.roll(coef, 1, -1) if coef.shape[1] != n_features else coef
else:
if n_classes == 1:
w0[0, :coef.shape[1]] = -coef
w0[1, :coef.shape[1]] = coef
else:
w0[:, :coef.shape[1]] = coef
C_daal_multiplier = 1
# commented out because this is Py3 feature
#def _map_to_binary_logistic_regression():
# nonlocal C_daal_multiplier
# nonlocal w0
# C_daal_multiplier = 2
# w0 *= 2
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
if solver in ['lbfgs', 'newton-cg']:
if daal_ready and classes.size == 2:
w0_saved = w0
w0 = w0[-1:, :]
w0 = w0.ravel()
target = Y_multi
if solver == 'lbfgs':
if daal_ready:
if classes.size == 2:
# _map_to_binary_logistic_regression()
C_daal_multiplier = 2
w0 *= 2
daal_extra_args_func = _daal4py_logistic_loss_extra_args
else:
daal_extra_args_func = _daal4py_cross_entropy_loss_extra_args
func = _daal4py_loss_and_grad
else:
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
if daal_ready:
if classes.size == 2:
# _map_to_binary_logistic_regression()
C_daal_multiplier = 2
w0 *= 2
daal_extra_args_func = _daal4py_logistic_loss_extra_args
else:
daal_extra_args_func = _daal4py_cross_entropy_loss_extra_args
func = _daal4py_loss_
grad = _daal4py_grad_
hess = _daal4py_grad_hess_
else:
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
warm_start_sag = {'coef': w0.T}
else:
target = y_bin
if solver == 'lbfgs':
if daal_ready:
func = _daal4py_loss_and_grad
daal_extra_args_func = _daal4py_logistic_loss_extra_args
else:
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
if daal_ready:
daal_extra_args_func = _daal4py_logistic_loss_extra_args
func = _daal4py_loss_
grad = _daal4py_grad_
hess = _daal4py_grad_hess_
else:
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
warm_start_sag = {'coef': np.expand_dims(w0, axis=1)}
coefs = list()
n_iter = np.zeros(len(Cs), dtype=np.int32)
for i, C in enumerate(Cs):
if solver == 'lbfgs':
if daal_ready:
extra_args = daal_extra_args_func(classes.size, w0, X, target, 0., 0.5 / C / C_daal_multiplier,
fit_intercept, value=True, gradient=True, hessian=False)
else:
extra_args = (X, target, 1. / C, sample_weight)
iprint = [-1, 50, 1, 100, 101][
np.searchsorted(np.array([0, 1, 2, 3]), verbose)]
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=extra_args,
iprint=iprint, pgtol=tol, maxiter=max_iter)
if daal_ready and C_daal_multiplier == 2:
w0 *= 0.5
if info["warnflag"] == 1:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.", ConvergenceWarning)
# In scipy <= 1.0.0, nit may exceed maxiter.
# See https://github.com/scipy/scipy/issues/7854.
n_iter_i = min(info['nit'], max_iter)
elif solver == 'newton-cg':
if daal_ready:
def make_ncg_funcs(f, value=False, gradient=False, hessian=False):
daal_penaltyL2 = 0.5 / C / C_daal_multiplier
_obj_, X_, y_, n_samples = daal_extra_args_func(
classes.size, w0, X, target, 0., daal_penaltyL2, fit_intercept,
value=value, gradient=gradient, hessian=hessian)
_func_ = lambda x, *args: f(x, _obj_, *args)
return _func_, (X_, y_, n_samples, daal_penaltyL2)
loss_func, extra_args = make_ncg_funcs(func, value=True)
grad_func, _ = make_ncg_funcs(grad, gradient=True)
grad_hess_func, _ = make_ncg_funcs(hess, gradient=True)
w0, n_iter_i = _newton_cg(grad_hess_func, loss_func, grad_func, w0, args=extra_args,
maxiter=max_iter, tol=tol)
else:
args = (X, target, 1. / C, sample_weight)
w0, n_iter_i = _newton_cg(hess, func, grad, w0, args=args,
maxiter=max_iter, tol=tol)
elif solver == 'liblinear':
coef_, intercept_, n_iter_i, = _fit_liblinear(
X, target, C, fit_intercept, intercept_scaling, None,
penalty, dual, verbose, max_iter, tol, random_state,
sample_weight=sample_weight)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
elif solver in ['sag', 'saga']:
if multi_class == 'multinomial':
target = target.astype(np.float64)
loss = 'multinomial'
else:
loss = 'log'
if penalty == 'l1':
alpha = 0.
beta = 1. / C
else:
alpha = 1. / C
beta = 0.
w0, n_iter_i, warm_start_sag = sag_solver(
X, target, sample_weight, loss, alpha,
beta, max_iter, tol,
verbose, random_state, False, max_squared_sum, warm_start_sag,
is_saga=(solver == 'saga'))
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg', 'sag'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
if daal_ready:
if classes.size == 2:
multi_w0 = w0[np.newaxis, :]
else:
multi_w0 = np.reshape(w0, (classes.size, -1))
else:
n_classes = max(2, classes.size)
multi_w0 = np.reshape(w0, (n_classes, -1))
if n_classes == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(np.require(multi_w0, requirements='O'))
else:
coefs.append(np.require(w0, requirements='O'))
n_iter[i] = n_iter_i
if daal_ready:
if fit_intercept:
for i, ci in enumerate(coefs):
coefs[i] = np.roll(ci, -1, -1)
else:
for i, ci in enumerate(coefs):
coefs[i] = np.delete(ci, 0, axis=-1)
return coefs, np.array(Cs), n_iter
|
8f42d708532d255f0d61f7e695eab778b5aad99c
| 25,401 |
def merge_labels_below_minsize(labels: np.array,
min_size: int,
connectivity: int = 8) -> np.array:
"""
Takes labels below min_size and merges a label with a connected neighbor
(with respect to the connectivity description). Ignores label 0 as
background.
Parameters
----------
labels : np.array
2d label array. Assumes 0 is background and ignores.
min_size : int
Keeps only segments of at least this size.
connectivity : int
4 or 8 connectivity accepted. Default 8. If
`apply_mask_buffer` was used to compute distance,
then connectivity must be 8.
See: https://en.wikipedia.org/wiki/Pixel_connectivity
Returns
-------
np.array:
Updated 2d label array
Note
----
Does not recursively update size and simply assigns a label to its
neighbor based on initialize size.
"""
size_features = get_superpixel_area_as_features(labels)
unique_labels = np.arange(0, labels.max() + 1)
labels_to_merge = list(unique_labels[size_features.ravel() < min_size])
neighbor_dict = get_RAG_neighbors(labels,
label_subset=labels_to_merge,
connectivity=connectivity)
def merger(label_arr):
label = label_arr[0]
neighbors = neighbor_dict.get(label)
# Do nothing if label is background or doesn't meet size criterion.
if (label == 0) or (label not in labels_to_merge):
return label
if len(neighbors) > 0:
return neighbors[0]
# If neighbor is isolated then assign it to background
else:
return 0
label_features = apply_func_to_superpixels(merger,
labels,
labels, dtype=int)
labels = get_array_from_features(labels, label_features)
labels, _, _ = relabel_sequential(labels)
return labels
|
9d9af5e1ddcc288149304d8138f8076dccab1ad8
| 25,402 |
def custom_cnn_model(config, labels, model_weights=None):
"""
Convolutional Neural network architecture based on 'Photonic Human Identification based on
Deep Learning of Back Scattered Laser Speckle Patterns' paper.
:param conf: Configuration list of models hyper & learning params
:param labels: List of data labels
:param model_weights: Weights of pre-trained model
:return: CNN model
"""
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(config[1], config[2], input_shape=(config[0], config[0], 1)),
tf.keras.layers.MaxPooling2D(pool_size=(config[3], config[3])),
tf.keras.layers.Dropout(config[4]),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config[5]),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Activation(activation=tf.nn.relu),
tf.keras.layers.Dropout(config[6]),
tf.keras.layers.Dense(config[7]),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Activation(activation=tf.nn.relu),
tf.keras.layers.Dropout(config[8]),
tf.keras.layers.Dense(len(labels), activation=tf.nn.softmax)
])
if model_weights is not None:
print('loading pre-trained model')
model.load_weights(model_weights, by_name=True)
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
|
1440c1ac911c4fc4dce06e10ced5b94ae97f407b
| 25,403 |
def check_day_crossover(tRxSeconds, tTxSeconds):
"""
Checks time propagation time for day crossover
:param tRxSeconds: received time in seconds of week
:param tTxSeconds: transmitted time in seconds of week
:return: corrected propagation time
"""
tau = tRxSeconds - tTxSeconds
if tau > DAYSEC / 2:
del_sec = round(tau/DAYSEC)*DAYSEC
rho_sec = tau - del_sec
if rho_sec > 10:
tau = 0.0
else:
tau = rho_sec
return tau
|
f44b5ef90e130fbcbc741f79bea69ee79f55a574
| 25,404 |
def matrix2xyx_extrinsic(rotation_matrices: np.ndarray) -> np.ndarray:
"""
Rx(k3) @ Ry(k2) @ Rx(k1) = [[c2, s1s2, c1s2],
[s2s3, -s1c2s3+c1c3, -c1c2s3-s1c3],
[-s2c3, s1c2c3+c1s3, c1c2c3-s1s3]]
"""
rotation_matrices = rotation_matrices.reshape((-1, 3, 3))
angles_radians = np.zeros((rotation_matrices.shape[0], 3))
# Angle 2 can be taken directly from matrices
angles_radians[:, 1] = np.arccos(rotation_matrices[:, 0, 0])
# Gimbal lock case (s2 = 0)
tolerance = 1e-4
# Find indices where this is the case
gimbal_idx = np.abs(rotation_matrices[:, 0, 2]) < tolerance
# Calculate angle 1 and set angle 3 = 0 for those indices
r23 = rotation_matrices[gimbal_idx, 1, 2]
r22 = rotation_matrices[gimbal_idx, 1, 1]
angles_radians[gimbal_idx, 0] = np.arctan2(-r23, r22)
angles_radians[gimbal_idx, 2] = 0
# Normal case (s2 > 0)
idx = np.invert(gimbal_idx)
r12 = rotation_matrices[idx, 0, 1]
r13 = rotation_matrices[idx, 0, 2]
r21 = rotation_matrices[idx, 1, 0]
r31 = rotation_matrices[idx, 2, 0]
angles_radians[idx, 0] = np.arctan2(r12, r13)
angles_radians[idx, 2] = np.arctan2(r21, -r31)
# convert to degrees
euler_angles = np.rad2deg(angles_radians)
return euler_angles
|
74185b505e54239128e4b22eb709e1d08f50b206
| 25,405 |
def _merge_low_rank_eigendecomposition(S1, V1, S2, V2, rank=None):
"""Private helper function for merging SVD based low rank approximations.
Given factors S1, V1 and S2, V2 of shapes [K1], [M, K1] and [K2], [M, K2]
respectively of singular value decompositions
A1 = U1 @ np.diag(S1) @ V1.T
A2 = U2 @ np.diag(S2) @ V2.T
merge them into factors S, V of shape [K], [M, K] of an approximate
decomposition A = U @ np.diag(S) @ V.T, where A is the concatenation of A1
and A2 along the first axis. This is done without the need of calculating
U1, U2, and U.
This is useful for merging eigendecompositions V @ np.diag(S**2) @ V.T of
autocorrelation (or similarly covariance) matrices A.T @ A that do not
require U. Using truncated singular value decompositons can be used for
merging low rank approximations.
Parameters
----------
S1 : array
Singular values of first matrix.
V1 : array
Factor of the singular value decomposition of first matrix.
S2 : array
Singular values of second matrix.
V2 : array
Factor of the singular value decomposition of second matrix.
rank : int
Number of singular values to keep after merging. If set to `None`
no truncation will be done, thus rank will equal the sum of
singular values given in S1 and S2.
Returns
-------
S : array
(Truncated) singular values of the singular value decomposition of
concatenated matrix.
V : array
Factor of the singular value decomposition of concatenated matrix.
Notes
-----
The algorithm for combining SVD based low rank approximations is
described in more detail in [1]_.
References
----------
.. [1] Radim, Rehurek,
"Scalability of Semantic Analysis in Natural Language Processing",
2011.
"""
rank1, rank2 = S1.size, S2.size
if not rank or rank > rank1 + rank2:
rank = rank1 + rank2
if rank > min(V1.shape[0], V2.shape[0]):
rank = min(V1.shape[0], V2.shape[0])
Z = np.matmul(V1.T, V2)
Q, R = np.linalg.qr(V2 - np.matmul(V1, Z), mode="reduced")
Zfill = np.zeros([rank2, rank1])
B = np.concatenate(
[
np.concatenate([np.diag(S1), np.matmul(Z, np.diag(S2))], axis=1),
np.concatenate([Zfill, np.matmul(R, np.diag(S2))], axis=1),
],
axis=0,
)
U, S, VT = _truncated_svd(B, rank=rank)
V = np.matmul(V1, U[:rank1, :]) + np.matmul(Q, U[rank1:, :])
return S, V
|
9a04fb922e87b78ee217890c0094f3cdd8690f60
| 25,406 |
def usgs(path):
"""Reads USGS-formatted ASCII files.
Reads the ascii format spectral data from USGS and returns an object with the mean
and +/- standard deviation. Reference: https://www.sciencebase.gov/catalog/item/5807a2a2e4b0841e59e3a18d
Args:
path: file path the the USGS spectra text file.
Returns:
s: an earthlib spectralObject with the USGS reflectance data.
"""
# open the file and read header info
with open(path, "r") as f:
x_start = "gibberish"
for line in f:
if x_start in line:
break
if "Name:" in line:
spectrum_name = line.strip().split("Name:")[-1].strip()
if "X Units:" in line:
band_unit = line.strip().split()
band_unit = band_unit[-1].strip("()").capitalize()
if "Y Units:" in line:
refl_unit = line.strip().split()
refl_unit = refl_unit[-1].strip("()").capitalize()
if "First X Value:" in line:
x_start = line.strip().split()[-1]
if "Number of X Values:" in line:
n_values = int(line.strip().split()[-1])
# now that we got our header info, create the arrays
band_centers = _np.empty(n_values)
reflectance = _np.empty(n_values)
line = line.strip().split()
band_centers[0] = float(line[0])
reflectance[0] = float(line[1])
# resume reading through file
i = 1
for line in f:
line = line.strip().split()
band_centers[i] = float(line[0])
reflectance[i] = float(line[1])
i += 1
# some files read last -> first wavelength
if band_centers[0] > band_centers[-1]:
band_centers = band_centers[::-1]
reflectance = reflectance[::1]
# convert units to nanometers and scale 0-1
if band_unit.lower() == "micrometers":
band_centers *= 1000.0
band_unit = "Nanometers"
if refl_unit.lower() == "percent":
reflectance /= 100.0
# create the spectral object
s = spectralObject(
1,
n_values,
band_centers=band_centers,
band_unit=band_unit,
band_quantity="Wavelength",
)
# assign relevant values
s.spectra[0] = reflectance
if spectrum_name:
s.names[0] = spectrum_name
return s
|
329df0fe919cf126ae363f384619c8fc5419b073
| 25,407 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.