content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def fbx_mat_properties_from_texture(tex):
"""
Returns a set of FBX metarial properties that are affected by the given texture.
Quite obviously, this is a fuzzy and far-from-perfect mapping! Amounts of influence are completely lost, e.g.
Note tex is actually expected to be a texture slot.
"""
# Mapping Blender -> FBX (blend_use_name, blend_fact_name, fbx_name).
blend_to_fbx = (
# Lambert & Phong...
("diffuse", "diffuse", b"DiffuseFactor"),
("color_diffuse", "diffuse_color", b"DiffuseColor"),
("alpha", "alpha", b"TransparencyFactor"),
("diffuse", "diffuse", b"TransparentColor"), # Uses diffuse color in Blender!
("emit", "emit", b"EmissiveFactor"),
("diffuse", "diffuse", b"EmissiveColor"), # Uses diffuse color in Blender!
("ambient", "ambient", b"AmbientFactor"),
# ("", "", b"AmbientColor"), # World stuff in Blender, for now ignore...
("normal", "normal", b"NormalMap"),
# Note: unsure about those... :/
# ("", "", b"Bump"),
# ("", "", b"BumpFactor"),
# ("", "", b"DisplacementColor"),
# ("", "", b"DisplacementFactor"),
# Phong only.
("specular", "specular", b"SpecularFactor"),
("color_spec", "specular_color", b"SpecularColor"),
# See Material template about those two!
("hardness", "hardness", b"Shininess"),
("hardness", "hardness", b"ShininessExponent"),
("mirror", "mirror", b"ReflectionColor"),
("raymir", "raymir", b"ReflectionFactor"),
)
tex_fbx_props = set()
for use_map_name, name_factor, fbx_prop_name in blend_to_fbx:
# Always export enabled textures, even if they have a null influence...
if getattr(tex, "use_map_" + use_map_name):
tex_fbx_props.add(fbx_prop_name)
return tex_fbx_props
|
363c9f60084a55aa8d9c01c2f06d4d30d5e45993
| 29,590 |
def get_from_STEAD(key=None,
h5file_path='/mnt/GPT_disk/DL_datasets/STEAD/waveforms.hdf5'):
"""
Input:
key, h5file_path
Output:
data, p_t, s_t
"""
HDF5 = h5py.File(h5file_path, 'r')
if key.split('_')[-1] == 'EV':
dataset = HDF5.get('earthquake/local/'+str(key))
p_t = int(dataset.attrs['p_arrival_sample'])
s_t = int(dataset.attrs['s_arrival_sample'])
elif key.split('_')[-1] == 'NO':
dataset = HDF5.get('non_earthquake/noise/'+str(key))
p_t = None
s_t = None
data = np.array(dataset).astype(np.float32)
return data, p_t, s_t
|
9bab2db49eab81abe72cb27e86d3cdf787c4e902
| 29,591 |
import traceback
def _safeFormat(formatter, o):
"""
Helper function for L{safe_repr} and L{safe_str}.
"""
try:
return formatter(o)
except:
io = NativeStringIO()
traceback.print_exc(file=io)
className = _determineClassName(o)
tbValue = io.getvalue()
return "<%s instance at 0x%x with %s error:\n %s>" % (
className, id(o), formatter.__name__, tbValue)
|
610e8063fa91d211e749be829c2d562fa1b86ea6
| 29,592 |
from datetime import datetime
import pytz
def now_func():
"""Return current datetime
"""
func = get_now_func()
dt = func()
if isinstance(dt, datetime.datetime):
if dt.tzinfo is None:
return dt.replace(tzinfo=pytz.utc)
return dt
|
c715be9fde2d245c79536d792b775678bc743aaa
| 29,594 |
import itertools
def flatten_search_result(search_result):
"""
Converts all nested objects from the provided search result into non-nested `field->field-value` dicts.
Raw values (such as memory size, timestamps and durations) are transformed into easy-to-read values.
:param search_result: result to flatten
:return: the flattened result
"""
return list(
itertools.chain.from_iterable(
map(
lambda result_entry: transform_definition_result(
definition_id=result_entry[0],
definition_result=result_entry[1]
),
search_result['definitions'].items()
)
)
)
|
380b244bcee0d968532db512b6bf79cc062ef962
| 29,595 |
def xroot(x, mu):
"""The equation of which we must find the root."""
return -x + (mu * (-1 + mu + x))/abs(-1 + mu + x)**3 - ((-1 + mu)*(mu + x))/abs(mu + x)**3
|
5db07cc197f1bc4818c4591597099cd697576df2
| 29,597 |
import random
def spliter(data_dict, ratio=[6, 1, 1], shuffle=True):
"""split dict dataset into train, valid and tests set
Args:
data_dict (dict): dataset in dict
ratio (list): list of ratio for train, valid and tests split
shuffle (bool): shuffle or not
"""
if len(ratio) != 3:
raise ValueError(f'ratio must include three int numbers')
train = {'x': list(), 'y': list()}
valid = {'x': list(), 'y': list()}
tests = {'x': list(), 'y': list()}
for _, [samples, labels] in data_dict.items():
samples_lens = len(samples)
train_ratio = round(samples_lens * (ratio[0] / sum(ratio)))
tests_ratio = round(samples_lens * (ratio[2] / sum(ratio)))
valid_ratio = samples_lens - train_ratio - tests_ratio
data = list(zip(samples, labels))
if shuffle:
random.shuffle(data)
x, y = zip(*data)
train['x'].extend(x[:train_ratio])
train['y'].extend(y[:train_ratio])
valid['x'].extend(x[train_ratio:train_ratio + valid_ratio])
valid['y'].extend(y[train_ratio:train_ratio + valid_ratio])
tests['x'].extend(x[-tests_ratio:])
tests['y'].extend(y[-tests_ratio:])
return train, valid, tests
|
793af274e3962d686f2ef56b34ae5bc0a53aac5b
| 29,598 |
import scipy
def smooth(x, window_len=None, window='flat', method='zeros'):
"""Smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
:param x: the input signal (numpy array)
:param window_len: the dimension of the smoothing window; should be an
odd integer
:param window: the type of window from 'flat', 'hanning', 'hamming',
'bartlett', 'blackman'
flat window will produce a moving average smoothing.
:param method: handling of border effects\n
'zeros': zero padding on both ends (len(smooth(x)) = len(x))\n
'reflect': pad reflected signal on both ends (same)\n
'clip': pad signal on both ends with the last valid value (same)\n
None: no handling of border effects
(len(smooth(x)) = len(x) - len(window_len) + 1)
"""
if window_len is None:
return x
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len < 3:
return x
if window not in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is one of 'flat', 'hanning', 'hamming',"
"'bartlett', 'blackman'")
if method == 'zeros':
s = np.r_[np.zeros((window_len - 1) // 2), x,
np.zeros(window_len // 2)]
elif method == 'reflect':
s = np.r_[x[(window_len - 1) // 2:0:-1], x,
x[-1:-(window_len + 1) // 2:-1]]
elif method == 'clip':
s = np.r_[x[0] * np.ones((window_len - 1) // 2), x,
x[-1] * np.ones(window_len // 2)]
else:
s = x
if window == 'flat':
w = np.ones(window_len, 'd')
else:
w = getattr(np, window)(window_len)
return scipy.signal.fftconvolve(w / w.sum(), s, mode='valid')
|
148c1f4b420ce825d3b658e90329dac7b9360c2c
| 29,599 |
from typing import Callable
from typing import List
from typing import Iterable
def copy_signatures(
target_function: Callable,
template_functions: List[TemplateFunction],
exclude_args: Iterable[str] = None,
) -> Callable:
"""A decorator that copies function signatures from one or more template functions to a
target function.
Args:
target_function: Function to modify
template_functions: Functions containing params to apply to ``target_function``
"""
# Start with 'self' parameter if this is an instance method
fparams = {}
if 'self' in signature(target_function).parameters or ismethod(target_function):
fparams['self'] = forge.self
# Add and combine parameters from all template functions, excluding duplicates, self, and *args
for func in template_functions:
new_fparams = {
k: v
for k, v in forge.copy(func).signature.parameters.items()
if k != 'self' and v.kind != Parameter.VAR_POSITIONAL
}
fparams.update(new_fparams)
# Manually remove any excluded parameters
for key in ensure_list(exclude_args):
fparams.pop(key, None)
fparams = deduplicate_var_kwargs(fparams)
revision = forge.sign(*fparams.values())
return revision(target_function)
|
39f6054002b01433e84a0722c2851fff999eb6cf
| 29,600 |
def landing():
"""
displays the landing page.
"""
return render_template('landing.html')
|
b7ea7741f84bbbd2e35522547d24cf7740359b11
| 29,601 |
def dict2pdb(d):
"""Transform an atom dictionary into a valid PDB line."""
(x, y, z) = d['coords']
args = (d['at_type'], d['ser_num'], d['at_name'], d['alt_loc'],
d['res_name'][-3:], d['chain_id'], d['res_id'], d['res_ic'],
x , y , z , d['occupancy'], d['bfactor'], d['seg_id'], d['element'])
return PDB_COORDS_STRING % args
|
0cbcee043f0663bbb4f88b5070760fa079cd6111
| 29,603 |
def spark_collect():
"""Spark's collect
:input RDD data: The RDD to collect.
:output list result: The collected list.
"""
def inner(data: pyspark.rdd.RDD) -> ReturnType[list]:
o = data.collect()
return ReturnEntry(result=o)
return inner
|
99554cba28df7175b7b6e688d526f8b26767b909
| 29,604 |
import torch
def freeze_layers(
model: torch.nn.Sequential,
n_layers_to_train: int
) -> torch.nn.Sequential:
"""
Function to freeze given number of layers for selected model
:param model: Instance of Pytorch model
:param n_layers_to_train: number of layers to train, counting from the last one.
The rest of the layers is going to be frozen.
:return: Model with frozen layers.
"""
n_layers = len(list(model.children()))
for idx, child in enumerate(model.children()):
if idx < (n_layers - n_layers_to_train):
for param in child.parameters():
param.requires_grad = False
return model
|
bfeca127c684de0815493ef621dce790b3a090f3
| 29,605 |
from io import StringIO
import csv
def test_derive_files_CyTOF_analysis():
"""Check that CyTOF analysis CSV is derived as expected."""
ct = load_ct_example("CT_cytof_with_analysis")
artifact_format_specific_data = {
"cell_counts_assignment": {"B Cell (CD27-)": 272727, "B Cell (Memory)": 11111},
"cell_counts_compartment": {"B Cell": 8888, "Granulocyte": 22222},
"cell_counts_profiling": {
"B Cell (CD27-) CD1chi CD38hi": "138hi",
"B Cell (CD27-) CD1chi CD38lo": "138lo",
},
}
def fetch_artifact(url: str, as_string: bool) -> StringIO:
for (ftype, data) in artifact_format_specific_data.items():
if ftype in url:
csv = '"","CellSubset","N"\n'
csv += "\n".join(
f'"{i}","{k}",{v}' for i, (k, v) in enumerate(data.items())
)
return StringIO(csv)
raise Exception(f"Unknown file for url {url}")
result = derive_files(DeriveFilesContext(ct, "cytof_analysis", fetch_artifact))
assert len(result.artifacts) == 3
artifacts = {a.file_type.replace(" ", "_"): a for a in result.artifacts}
# checking that there are 1 file per `file_type`
assert len(artifacts) == 3
for ar_format, artifact in artifacts.items():
format_specific_truth = artifact_format_specific_data[ar_format]
req_header_fields = list(format_specific_truth.keys())
cimac_ids = sorted(["CTSTP01S2.01", "CTSTP01S1.01"])
dictreader = csv.DictReader(StringIO(artifact.data))
recs = []
for row in dictreader:
recs.append(row)
rec = ",".join(row[f] for f in req_header_fields)
should_be = ",".join(
str(format_specific_truth[f]) for f in req_header_fields
)
assert rec == should_be
assert sorted([r["cimac_id"] for r in recs]) == cimac_ids
assert sorted([r["cimac_participant_id"] for r in recs]) == sorted(
list(map(participant_id_from_cimac, cimac_ids))
)
|
22cfa9b7c96d35b6be4e2f2bc5bc9eb0e4a536ca
| 29,607 |
def get_headers(wsgi_env):
"""
Extracts HTTP_* variables from a WSGI environment as
title-cased header names.
"""
return {
key[5:].replace('_', '-').title(): value
for key, value in wsgi_env.iteritems() if key.startswith('HTTP_')}
|
01e7140a670957c691fec01dd90d53bdc29425bd
| 29,608 |
def get_chats_between_users(user1, user2):
"""
Returns all chat objects between 2 users sorted by date posted.
Note: It does not check if they are a connection
"""
return Chat.objects.filter(Q(sender = user1, receiver = user2) | Q(sender = user2, receiver = user1)).order_by('-date_posted')
|
1f9aba3f10e59776fb3e2eaa1bfcdf55eccf5fd6
| 29,609 |
def merge(source, destination):
"""
Deep-Merge two dictionaries.
Taken from http://stackoverflow.com/a/20666342/1928484.
>>> a = { 'first' : { 'all_rows' : { 'pass' : 'dog', 'number' : '1' } } }
>>> b = { 'first' : { 'all_rows' : { 'fail' : 'cat', 'number' : '5' } } }
>>> merge(b, a) == { 'first' : { 'all_rows' : { 'pass' : 'dog', 'fail' : 'cat', 'number' : '5' } } }
True
"""
for key, value in source.items():
if isinstance(value, dict):
# get node or create one
node = destination.setdefault(key, {})
merge(value, node)
else:
destination[key] = value
return destination
|
43524fcba1bd073311cad9128daac9c31cf84b6c
| 29,610 |
def SequenceToConjunction(node):
"""Transform all SEQUENCE nodes into CONJUNCTION nodes.
Sequences have the same semantic meaning as conjunctions, so we transform them
to conjunctions to make query matching code simpler.
Arguments:
node: Root of the tree to transform.
Returns:
A tree with all SEQUENCE nodes replaced with CONJUNCTION nodes.
"""
return ConvertNodes(
node, QueryParser.SEQUENCE, QueryParser.CONJUNCTION, 'CONJUNCTION')
|
bb7bf69ed8c6329f4aa0eafef67a0925a4818a3d
| 29,611 |
import json
def email_addresses_view_all(self) -> dict:
"""
Retrieve all existing EmailAddresses.
:param self:
:return
{
"ResponseType":"EmailAddresses",
"Version":"1.0",
"EmailAddresses":
[
{
"Version":"1.0",
"Id":"<ID>",
"Mail":"<Mail>",
"Type":<"Type">,
"Description":"<Description>",
"Verified":true|false,
"Person":
{
"Type":("CO"|"Dept"|"Org"|"Organization"),
"Id":"<ID>"
}
"Created":"<CreateTime>",
"Modified":"<ModTime>"
},
{...}
]
}:
Response Format
HTTP Status Response Body Description
200 OK EmailAddress Response EmailAddresses returned
401 Unauthorized Authentication required
500 Other Error Unknown error
"""
url = self._CO_API_URL + '/email_addresses.json'
resp = self._s.get(
url=url
)
if resp.status_code == 200:
return json.loads(resp.text)
else:
resp.raise_for_status()
|
c30fdc8bec641adc723ca924db364c172088bbd8
| 29,612 |
def adjusted_r2_score(ctx, pipeline, initial_metrics, num_rows):
"""Calculates the Adjusted R2 Score.
See: https://en.wikipedia.org/wiki/Coefficient_of_determination#Adjusted_R2
Parameters:
ctx (RecordingContext): The current context.
pipeline (Pipeline): The pipeline being evaluated.
initial_metrics (dict): A dict of the metrics we've collected so far.
num_rows (int): The number of rows in this segment of the dataset.
Returns:
float: The Adjusted R2 Score.
"""
# Only count root columns that end up in a formula with a nonzero weight.
# (A "root column" in a column that appeared in the initial matrix, before
# any preprocessing, and was used to derive other columns.)
roots = _nonzero_root_columns(ctx, pipeline)
num_cols = len(roots)
ratio = (num_rows - 1) / (num_rows - num_cols - 1)
if ratio > 0:
r2 = initial_metrics['r2_score']
return 1 - (1 - r2) * ratio
raise expected('more rows than columns',
f'{num_rows} rows and {num_cols} columns.')
|
406b7e0fa4a55bac08e0e7e53000622828013baf
| 29,613 |
import torch
def bilinear_sampling(img, dp):
"""
warp rgbd images using projected depth * pixel and bilinear sampling
"""
b, hw, _ = dp.shape
_, _, h, w = img.shape
dp = dp.reshape(-1, 3) # reshape dp to (b*h*w, 3)
# homogeneous coord (wx, wy, w) -> real coord (x, y, 1)
# restrict depth not to negative value
u = dp[:, 0] / torch.clip(dp[:, 2], 1e-4, 10000)
v = dp[:, 1] / torch.clip(dp[:, 2], 1e-4, 10000)
# in deepvoxel, x, y is opposite (ignore this comment)
u, v = v, u
u0 = u.type(torch.int32)
u1 = u0 + 1
v0 = v.type(torch.int32)
v1 = v0 + 1
# define weights
w1 = (u1 - u) * (v1 - v)
w2 = (u - u0) * (v1 - v)
w3 = (u1 - u) * (v - v0)
w4 = (u - u0) * (v - v0)
# make image coord for all images in batch size
img_coord = torch.divide(torch.arange(b * hw), hw, rounding_mode='floor').type(torch.long)
# find a point that is not in out-of-grid section after warping
not_out = (u >= 0) * (u < h - 1) * (v >= 0) * (v < w - 1) * (dp[:, 2] > 1e-4)
# make out-points to 0 using not_out
u0 = (u0 * not_out).type(torch.long)
u1 = (u1 * not_out).type(torch.long)
v0 = (v0 * not_out).type(torch.long)
v1 = (v1 * not_out).type(torch.long)
w1 = (w1 * not_out)
w2 = (w2 * not_out)
w3 = (w3 * not_out)
w4 = (w4 * not_out)
# bilinear sampling
warped = w1[:, None] * img[img_coord, :, u0, v0] + w2[:, None] * img[img_coord, :, u1, v0] + \
w3[:, None] * img[img_coord, :, u0, v1] + w4[:, None] * img[img_coord, :, u1, v1]
return warped, not_out
|
529f24e80ca2bf785efc95db4d20cbbc13d66e2f
| 29,614 |
def mname(obj):
""" Get the full dotted name of the test method """
mod_name = obj.__class__.__module__.replace('h5py.tests.','')
return "%s.%s.%s" % (mod_name, obj.__class__.__name__, obj._testMethodName)
|
85a4a4f1aec25d57212f31968d5a42f8dc8d39e0
| 29,615 |
def sharpe(simple_returns, riskfree_rate, period=period.MONTHLY):
"""Compute the sharpe ratio of series of returns. Commonly used to measure the performance
of an investment compared to a risk-free asset, after adjusting for its risk.
Return the difference between the returns of the investment and the risk-free return,
divided by the standard deviation of the investment.
Parameters
----------
simple_returns : qp.ReturnDataFrame or qp.ReturnSeries
Input array or object that can be converted to an array.
riskfree_rate: float
Risk free rate, with the same periodicity as simple retuns (e.g. daily, monthly, ...).
period : period, default period.MONTHLY
Defines the periodicity of the 'returns' data for purposes of
annualizing.
Returns
-------
sharpe_ratio : qp.ReturnSeries or np.float64
References
----------
.. [1] "Sharpe Ratio", *Wikipedia*, https://en.wikipedia.org/wiki/Sharpe_ratio.
"""
excess_return = simple_returns.annualized(period) - riskfree_rate
return excess_return / simple_returns.effect_vol(period)
|
abd50018545f8ba2bc34e7d45cfefd2727594c66
| 29,616 |
def get_all_listing_types():
"""
Get all listing types
"""
return models.ListingType.objects.all()
|
cfed714fd52f64680f9eaffdd9068318dfcb1cfe
| 29,617 |
import torch
import logging
def load_model(config_path, model_path):
""" Not knowing exactly what model we may have saved, try to load both types. """
def load_gpt():
try:
mconf = GPTConfig.from_json(config_path)
model = GPT(mconf)
model.load_state_dict(torch.load(model_path, map_location=device))
model.to(device)
logging.info("Successfully loaded GPT")
return model
except Exception as e:
return None
def load_ffnet():
try:
mconf = FFConfig.from_json(config_path)
model = FFNet(mconf)
model.load_state_dict(torch.load(model_path, map_location=device))
model.to(device)
logging.info("Successfully loaded FFNet")
return model
except Exception as e:
return None
model = load_gpt() or load_ffnet()
return model
|
0ae334fd6b4e851424b90335d4369cfb1ee4acc5
| 29,618 |
from unittest.mock import Mock
def protocol(monkeypatch):
"""Rflinkprotocol instance with mocked handle_packet."""
monkeypatch.setattr(PacketHandling, "handle_packet", Mock())
return PacketHandling(None)
|
d4808028847a2e52e26eace8ae45ceb366690579
| 29,619 |
from typing import Dict
from typing import Any
def new_from_at_rest(ethereum_network_at_rest: Dict[str, Any]) -> "EthereumNetwork":
"""Instantiate a new EthereumNetwork model from storage
Args:
ethereum_network_at_rest: The dto of the at-rest network from storage
Returns:
Instantiated EthereumNetwork client
Raises:
NotImplementedError: When the version of the dto passed in is unknown
"""
dto_version = ethereum_network_at_rest.get("version")
if dto_version == "1":
return EthereumNetwork(
name=ethereum_network_at_rest["name"],
rpc_address=ethereum_network_at_rest["rpc_address"],
chain_id=ethereum_network_at_rest["chain_id"],
b64_private_key=ethereum_network_at_rest["private_key"],
)
else:
raise NotImplementedError(f"DTO version {dto_version} not supported for ethereum network")
|
03626be090ad5582329e61b6d0d1c4fa000316e6
| 29,620 |
def get_auth_twitter_consumer(hashed_twitter_id: str) -> tweepy.OAuthHandler:
"""auth twitter with saved tokens.
Args:
hashed_twitter_id(str):
Returns:
tweepy.OAuthHandler:
"""
user = user_db_session.query(User.token, User.token_secret).filter(User.hashed_user_id == hashed_twitter_id).one()
auth = get_auth_twitter()
auth.set_access_token(user.token, user.token_secret)
twitter_api = tweepy.API(auth)
return twitter_api
|
3720153629181920b0a7152c5801d5f9fee38023
| 29,621 |
def retrieve_object_coordinates_from_db(
con,
object_ids,
key_object,
verbose=False):
"""
object_ids is a pd.DataFrame with rows representing objects and columns
Plate_Name
Image_Metadata_WellID
Image_Metadata_FieldID
<key_object>_Number_Object_Number
Retrieve image information from
<Plate_Name>_Per_<key_object>
Return a DataFrame for each (object, dye) with columns
Plate_Name
ImageNumber
<key_object>_Number_Object_Number
<key_object>_AreaShape_Center_X
<key_object>_AreaShape_Center_Y
"""
required_columns = [
"Plate_Name",
'Image_Metadata_WellID',
'Image_Metadata_FieldID',
f'{key_object}_Number_Object_Number']
for required_column in required_columns:
if required_column not in object_ids.columns:
raise Exception(f"Missing required column {required_column}")
object_coordinates = []
cursor = con.cursor()
for object_index in range(object_ids.shape[0]):
object_params = object_ids.iloc[object_index]
if verbose:
print(f"Getting coordinates for object:")
print(f" Plate_Name: '{object_params['Plate_Name']}'")
print(f" Image_Metadata_WellID: '{object_params['Image_Metadata_WellID']}'")
print(f" Image_Metadata_FieldID: '{object_params['Image_Metadata_FieldID']}'")
print(f" {key_object}_Number_Object_Number: '{object_params[f'{key_object}_Number_Object_Number']}'")
#Object Info
query = f"""
SELECT
key_object.{key_object}_AreaShape_Center_X,
key_object.{key_object}_AreaShape_Center_Y
FROM
{f"{object_params['Plate_Name']}_Per_Image"} AS image,
{f"{object_params['Plate_Name']}_Per_{key_object}"} AS key_object
WHERE
image.Image_Metadata_WellID = '{object_params['Image_Metadata_WellID']}' AND
image.Image_Metadata_FieldID = '{object_params['Image_Metadata_FieldID']}'
key_object.ImageNumber = image.ImageNumber AND
key_object.{key_object}_Number_Object_Number = {object_params[f'{key_object}_Number_Object_Number']};
"""
if verbose:
print(query)
cursor.execute(query)
values = cursor.fetchone()
object_coordinates.append(dict(
object_params.to_dict(), **{
f"{key_object}_AreaShape_Center_X" : values[0],
f"{key_object}_AreaShape_Center_Y" : values[1]}))
cursor.close()
object_coordinates = pd.DataFrame(object_coordinates)
return object_coordinates
|
bc9996de17197223ca828821f8a89b6881ff934e
| 29,622 |
def zip(fn):
"""
Higher-order tensor zip function.
fn_zip = zip(fn)
c = fn_zip(a, b)
Args:
fn: function from two floats-to-float to apply
a (:class:`Tensor`): tensor to zip over
b (:class:`Tensor`): tensor to zip over
Returns:
:class:`Tensor` : new tensor data
"""
f = tensor_zip(njit()(fn))
def ret(a, b):
c_shape = shape_broadcast(a.shape, b.shape)
out = a.zeros(c_shape)
f(*out.tuple(), *a.tuple(), *b.tuple())
return out
return ret
|
7e6d990352edac102423c364b2745f4009a0ce44
| 29,623 |
import re
def config_file_has_self_install_dirs(config_file):
"""Config file has self install dirs"""
has_self_install_dirs = False
with open(config_file) as _f:
for line in _f:
if re.search(r'^hard_drive_\d+_label\s*=\s*(amigaosdir|kickstartdir|userpackagesdir)', line, re.I) or \
re.search(r'^(hardfile2|uaehf\d+|filesystem2)=.*[,:](amigaosdir|kickstartdir|userpackagesdir)[,:]', line, re.I):
has_self_install_dirs = True
break
return has_self_install_dirs
|
5e095570ea20156cc3d38cf7379f199b5b8af5bc
| 29,624 |
def implied_volatility(price, F, K, r, t, flag):
"""Returns the Black delta of an option.
:param price:
:type price: float
:param F: underlying futures price
:type F: float
:param K: strike price
:type K: float
:param r: annual risk-free interest rate
:type r: float
:param t: time to expiration in years
:type t: float
:param flag: 'c' or 'p' for call or put.
:type flag: str
:returns: float
>>> F = 101.0
>>> K = 102.0
>>> t = .5
>>> r = .01
>>> flag = 'p'
>>> sigma_in = 0.2
>>> price = black(flag, F, K, t, r, sigma_in)
>>> expected_price = 6.20451158097
>>> abs(expected_price - price) < 0.00001
True
>>> sigma_out = implied_volatility(price, F, K, r, t, flag)
>>> sigma_in == sigma_out or abs(sigma_in - sigma_out) < 0.00001
True
>>> F = 100
>>> K = 100
>>> sigma = .2
>>> flag = 'c'
>>> t = .5
>>> r = .02
>>> discounted_call_price = black(flag, F, K, t, r, sigma)
>>> iv = implied_volatility(discounted_call_price, F, K, r, t, flag)
>>> expected_discounted_call_price = 5.5811067246
>>> expected_iv = 0.2
>>> abs(expected_discounted_call_price - discounted_call_price) < 0.00001
True
>>> abs(expected_iv - iv) < 0.00001
True
"""
f = lambda sigma: price - black(flag, F, K, t, r, sigma)
return brentq(
f,
a=1e-12,
b=100,
xtol=1e-15,
rtol=1e-15,
maxiter=1000,
full_output=False
)
|
24def9d6b21518574d4aba0e72bde7bf2a723252
| 29,625 |
def cc_audio_offset(audio_file_1, audio_file_2):
"""
Get the audio offset between two WAV files, uses cross correlation
Code influenced by https://github.com/rpuntaie/syncstart/blob/main/syncstart.py
:param audio_file_1:
:param audio_file_2:
:return:
"""
sample_rate_1, audio_1 = wavfile.read(audio_file_1)
sample_rate_2, audio_2 = wavfile.read(audio_file_2)
assert sample_rate_1 == sample_rate_2, "Assert that the sample rate of WAV files is equal"
fs = sample_rate_1 # Sampling frequency
# If stereo, use one of the channels
if audio_1.shape[1] == 2:
audio_1 = audio_1[:, 0]
# If stereo, use one of the channels
if audio_2.shape[1] == 2:
audio_2 = audio_2[:, 0]
ls1 = len(audio_1)
ls2 = len(audio_2)
pad_size = ls1 + ls2 + 1
pad_size = 2 ** (int(np.log(pad_size) / np.log(2)) + 1)
s1pad = np.zeros(pad_size)
s1pad[:ls1] = audio_1
s2pad = np.zeros(pad_size)
s2pad[:ls2] = audio_2
# Calculate the cross correlation
corr = fft.ifft(fft.fft(s1pad) * np.conj(fft.fft(s2pad)))
ca = np.absolute(corr)
x_max = np.argmax(ca)
if x_max > pad_size // 2:
file, offset = audio_file_2, (pad_size - x_max) / fs
else:
file, offset = audio_file_1, x_max / fs
return file, offset
|
8983d5299cf2b67efa0e6c6dbd37bbdcd872ca09
| 29,627 |
def add_monoatomic(mof,ads_species,ads_pos):
"""
Add adsorbate to the ASE atoms object
Args:
mof (ASE Atoms object): starting ASE Atoms object of structure
ads_species (string): adsorbate species
ads_pos (numpy array): 1D numpy array for the proposed
adsorption position
Returns:
mof (ASE Atoms object): ASE Atoms object with adsorbate
"""
try:
adsorbate = Atoms([Atom(ads_species,ads_pos)])
except:
raise ValueError('Unsupported adsorbate: '+ads_species)
mof.extend(adsorbate)
return mof
|
963432ab247ae6b729e60627bad583d1939c483b
| 29,628 |
def df_aidxf_ctl():
"""Example df_aidxf of a molecule with a fragment combination of type connection tripodal linker."""
mol = Chem.MolFromSmiles('C1CC2CC1CCCCC1CCNC3CCCCCCC4CC2CC4CCCC13')
return pd.DataFrame([
['mol_ctl', 'XXX', 'QA', 0, [9, 10, 11, 12, 13, 28], 56.0, mol, Chem.MolFromSmiles('C1CCNCC1')],
['mol_ctl', 'XXX', 'QI', 0, [0, 4, 3, 2, 1, 22, 21, 20, 24, 23], 33.0, mol, Chem.MolFromSmiles('C1CCC(C1)C1CCCC1')],
], columns=['idm', 'inchikey', 'idf', 'idf_idx', '_aidxf', 'mol_perc', 'mol', 'mol_frag']
)
|
66eef548be3e8d61749a61996afb83af2e0ff055
| 29,630 |
def make_dunder_main(manifest):
"""Generate a __main__.py file for the given manifest."""
prelude = manifest.get("prelude_points", [])
main = manifest.get("entry_point")
scripts = prelude + [main]
return MAIN_TEMPLATE.format(**locals())
|
7504eb97c8b83d2a377bd7e00d06b294eed824c6
| 29,631 |
def prefix_exists(bucket, key):
"""ensure_object_exists
:param bucket:
:param key:
"""
objects = S3_CLIENT.list_objects(Bucket=bucket, Prefix=key)
contents = objects.get('Contents')
if contents:
return len(contents) > 0
return False
|
7575449b319b21c69df0e9d1f3a3a88d8404a145
| 29,632 |
def mae(y, y_pred):
"""mean absolute error for two ranks(encoded as strings)"""
errors = [abs(i - find_position(y, c))
for i,c in enumerate(y_pred) ]
return np.mean(errors)
|
23fa9a51dd80c15c723ac2ad2487ec0f2a1119c6
| 29,633 |
def create_update_model_fn():
""" """
def update_model(model):
""" """
utils.apply_mask(model, use_cuda=False)
return model
return update_model
|
85bd6cdcf37ab705119959c1f9385b992f7b2702
| 29,635 |
def index_select_batch(data, indices):
"""Gather `indices` as batch indices from `data`, which can either be typical nd array or a
list of nd array"""
assert isinstance(indices, (tuple, list)) or (isndarray(indices) and len(indices.shape) == 1)
if isndarray(data):
return data[indices]
assert len(data) > 0 and len(indices) > 0
sample = np.array(data[0]) # Try to convert the first element to a typical nd array.
output = np.empty((len(indices), ) + sample.shape, dtype=sample.dtype)
for i, j in enumerate(indices):
output[i] = data[j]
return output
|
c66e787fcc599628a71569dce77dcda2c175345f
| 29,636 |
import select
import time
def start_process(cmd):
"""
Start BrowserStackLocal foreground process.
Monitor its output to find a specific text that indicates that the process has
successfully established connection to BrowserStack server and ready to serve.
Credits to https://stackoverflow.com/questions/10756383
:param list[str] cmd:
:rtype: subprocess.Popen | None
"""
print("Starting BrowserStackLocal process ", end='', flush=True)
process = Popen(cmd, stdout=PIPE)
poll_obj = select.poll()
poll_obj.register(process.stdout, select.POLLIN)
connected = False
start_time = time.time()
while time.time() - start_time < TIMEOUT and not connected:
poll_result = poll_obj.poll(0)
if poll_result:
line = process.stdout.readline()
if b'*** Error: ' in line:
print('\n' + line[line.index(b'Error:'):].decode().strip())
break
if line == EXPECTED_OUTPUT_MESSAGE:
connected = True
print('.', end='', flush=True)
time.sleep(POLL_PERIOD)
if connected:
print(' Done.', flush=True)
return process
else:
if time.time() - start_time >= TIMEOUT:
print(' Timeout ({:d} sec).'.format(TIMEOUT), flush=True)
print('Terminating the process ...', flush=True)
stop_process(process.pid)
|
7b53d4c0beb9c38068f850a783dbd9df3a377df1
| 29,638 |
def tnr_diff(y_true: pd.Series, y_pred: pd.Series, pa_name: str, priv_grp: int = 1):
""" Returns the between-group difference of True Negative Rates
Args:
y_true (pd.Series): true target values
y_pred (pd.Series): predicted target values
prtc_attr (str): name of the protected attribute
priv_grp (int, optional): . Defaults to 1.
Returns:
Number
"""
return difference(
true_negative_rate, y_true, y_pred, prot_attr=pa_name, priv_group=priv_grp
)
|
a7f266e743d59076ea8fda3b039e11dccba0e68a
| 29,639 |
def call(value):
"""Call is meant to be used with the Method filter. It attempts to call
the method specified.
Usage:
{% load custom_filters %}
{% if foo|method:"has_access"|call %}
This will invoke foo.has_access()
"""
if not callable(value):
return "[%s is not callable]" % value
return value()
|
1bb4217b74bf69b55d4c2cae1c29a396e19f5153
| 29,640 |
def permute_by_indices(list_of_things, *list_of_index_transpositions):
"""Given a list_of_things and a list of pairs of transpositions of indices
[(i, j), (k, m), ...], return the list_of_things with the i-th an j-th
values swapped, the k-th- and m-th values swapped, and so on.
Examples
--------
>>> permute_by_indices(['a', 'b', 'c'], [(0, 1)])
['b', 'a', 'c']
>>> permute_by_indices(['a', 'b', 'c'], [(0, 2), (1, 2)])
['c', 'a', 'b']
"""
result = list_of_things
for i, j in list_of_index_transpositions:
result[j], result[i] = result[i], result[j]
return result
|
31d7f73028fcb4c3a43750d1ade0c27e1b563dbb
| 29,641 |
import re
def _string_to_int(string: str) -> int:
"""
a helper function convert from string to int, like S1 -> 1
Args:
string (str): input string
Returns:
(int): return value if this is a int, return 0 if this is not a int
"""
r = re.findall('\d+', string)
if (len(r) > 0):
return int(r[0])
else:
return 0
|
d4dbea658e6092edb27b85154b319e098c588a76
| 29,642 |
from pathlib import Path
def create_moving_path(**kwargs):
"""
User interface function to create Path object for moving load.
:keyword:
* start_point (`Point`): Start point of path
* end_point (`Point`): End point of path
* increments (`int`): Increment of path steps. Default is 50
* mid_point (`Point`): Default = None
:returns: :class:`~ospgrillage.load.Path` object
"""
return Path(**kwargs)
|
05de795c61e7b3fc4c3f4c2aa14505b4a6fcf986
| 29,643 |
def np_ortho(shape, random_state, scale=1.):
"""
Builds a numpy variable filled with orthonormal random values
Parameters
----------
shape, tuple of ints or tuple of tuples
shape of values to initialize
tuple of ints should be single shape
tuple of tuples is primarily for convnets and should be of form
((n_in_kernels, kernel_width, kernel_height),
(n_out_kernels, kernel_width, kernel_height))
random_state, numpy.random.RandomState() object
scale, float (default 1.)
default of 1. results in orthonormal random values sacled by 1.
Returns
-------
initialized_ortho, array-like
Array-like of random values the same size as shape parameter
References
----------
Exact solutions to the nonlinear dynamics of learning in deep linear
neural networks
A. Saxe, J. McClelland, S. Ganguli
"""
if type(shape[0]) is tuple:
shp = (shape[1][0], shape[0][0]) + shape[1][1:]
flat_shp = (shp[0], np.prd(shp[1:]))
else:
shp = shape
flat_shp = shape
g = random_state.randn(*flat_shp)
U, S, VT = linalg.svd(g, full_matrices=False)
res = U if U.shape == flat_shp else VT # pick one with the correct shape
res = res.reshape(shp)
return (scale * res).astype(_type)
|
c5369bee504bf367717de41bca24689211bcd866
| 29,644 |
import ast
def _bind_generic_argument_list(field_list):
# type: (syntax.GenericArgumentList) -> ast.GenericArgumentList
"""Bind a generic argument list."""
ast_field_list = _bind_field_list(field_list, ast.GenericArgumentList)
ast_field_list.fields = [_bind_field_list_entry(f) for f in field_list.fields]
return ast_field_list
|
9958c44b11510f437ddb8b671954d60975edbd14
| 29,645 |
from io import StringIO
import six
from typing import Generator
def message_to_string(msg):
"""
Converts python message to string in a proper way.
"""
with closing(StringIO()) as fp:
if six.PY3:
g = Generator(fp, mangle_from_=False, policy=_compat32_crlf)
g.flatten(msg, unixfrom=False)
return fp.getvalue()
g = Generator(fp, mangle_from_=False)
g.flatten(msg, unixfrom=False)
# In Python 2 Generator.flatten uses `print >> ` to write to fp, that
# adds `\n` regardless of generator.NL value. So we resort to a hackish
# way of replacing LF with RFC complaint CRLF.
for i, v in enumerate(fp.buflist):
if v == '\n':
fp.buflist[i] = _CRLF
return fp.getvalue()
|
9824d18c0435db5659fdf261bce78d7c19a57490
| 29,646 |
def calculate_diag_OR(clusters,clusters_ind,status):
"""Calculate the Odds Ratio for the probability of being diagnosed linked to being in a certain cluster
Parameters: clusters (dict): dictionary with cluster number as key and list of patients in cluster as value
clusters_ind (list): indices of cluster to take into account
status (str): status of the patient (if patient's case is solved or not)
Returns: OR_diag (dict): dictionary with cluster number as key and the Odds Ratio (OR) for each cluster
"""
count_diag_clusters={cluster: 0 for cluster in clusters_ind}
for cluster in clusters_ind:
for patient in clusters[cluster]:
if status.loc[patient]["\\13_Status\\"]=="solved":
count_diag_clusters[cluster]+=1
OR_diag,IC={},{}
def IC_func(sign,OR,a,b,c,d):
if (a==0 or b==0 or c==0 or d==0):
return None
if sign=="up":
return np.exp(np.log(OR)+1.96*np.sqrt(1/a+1/b+1/c+1/d))
else:
return np.exp(np.log(OR)-1.96*np.sqrt(1/a+1/b+1/c+1/d))
for cluster in count_diag_clusters:
count_diag_notin=np.sum([count_diag_clusters[cl] for cl in clusters_ind if not(cl==cluster)])
OR_diag[cluster] = \
(
(count_diag_clusters[cluster]/count_diag_notin)/ \
((len(clusters[cluster])-count_diag_clusters[cluster])/ \
np.sum([len(clusters[cl])-count_diag_clusters[cl] for cl in clusters_ind]))
)
IC[cluster]={"up": IC_func(
"up",OR_diag[cluster],count_diag_clusters[cluster],
(len(clusters[cluster])-count_diag_clusters[cluster]),
count_diag_notin,
np.sum([len(clusters[cl])-count_diag_clusters[cl] for cl in clusters_ind]),
),
"low": IC_func(
"low",OR_diag[cluster],count_diag_clusters[cluster],
(len(clusters[cluster])-count_diag_clusters[cluster]),
count_diag_notin,
np.sum([len(clusters[cl])-count_diag_clusters[cl] for cl in clusters_ind])
)
}
return OR_diag,IC
|
7adf6ccc40b6b45bc5b990498ec35fb8f62b3125
| 29,647 |
def build(obj, parent=None):
"""
Safely builds the object by calling its method 'build' only if 'obj' possesses a 'build' method. Otherwise, will convert it to a string using the 'str' function. If a parent is passed, all packages and preamble lines needed to the object will be added to the packages and preamble of the parent.
"""
if isinstance(obj, TexObject):
built_obj = obj.build()
if parent is not None:
for package_name, package in obj.packages.items():
parent.add_package(package_name, *package.options, **package.kwoptions)
for line in obj.preamble:
parent.add_to_preamble(line)
return built_obj
elif hasattr(obj, 'build'):
built_obj = obj.build()
else:
return str(obj)
|
9011659825a9b2fce887aa9c74d98d70f4cb7a2f
| 29,648 |
def _no_negative_zero(val):
"""Make sure -0 is never output. Makes diff tests easier."""
if val == 0:
return 0
return val
|
345802e297cc1e1c77a5b1db664715bfc42f3da6
| 29,649 |
import numpy
def weights(basis, X, deriv=None):
"""
Calculates the interpolant value or derivative weights for points X.
:param basis: interpolation function in each direction, eg,
``['L1', 'L1']`` for bilinear.
:type basis: list of strings
:param X: locations to calculate interpolant weights
:type X: list or numpy array (npoints, ndims)
:param deriv: derivative in each dimension, e.g., ``deriv=[1, 1]``
:type deriv: list of integers
:return: basis weights (ndims)
:rtype: numpy array, size: (npoints, nweights)
>>> import numpy
>>> x = numpy.array([[0.13, 0.23], [0.77, 0.06]])
>>> weights(['L1', 'L2'], x, deriv=[0, 1])
array([[-1.8096, -0.2704, 1.8792, 0.2808, -0.0696, -0.0104],
[-0.6348, -2.1252, 0.8096, 2.7104, -0.1748, -0.5852]])
"""
basis_functions, dimensions = _get_basis_functions(basis, deriv)
X = _process_x(X, dimensions)
W = []
for bf in basis_functions:
if bf[0].__name__[0] == 'T':
W.append(bf[0](X[:, bf[1]]))
else:
W.append(bf[0](X[:, bf[1]])[0])
BPInd = _get_basis_product_indices(basis, dimensions, W)
if BPInd is None:
return W[0]
WW = numpy.zeros((X.shape[0], len(BPInd)))
if dimensions == 3:
for ind, ii in enumerate(BPInd):
WW[:, ind] = W[0][:, ii[0]] * W[1][:, ii[1]] * W[2][:, ii[2]]
else:
for ind, ii in enumerate(BPInd):
WW[:, ind] = W[0][:, ii[0]] * W[1][:, ii[1]]
return WW
|
17724a48779852dc3794799ebcb8eed9ee288164
| 29,650 |
def svn_stream_readline(*args):
"""
svn_stream_readline(svn_stream_t stream, svn_stringbuf_t stringbuf, char eol,
svn_boolean_t eof, apr_pool_t pool) -> svn_error_t
"""
return apply(_core.svn_stream_readline, args)
|
2edb1fca651078336ac31ff7a54f563c5e7f1846
| 29,651 |
from typing import Iterable
from typing import Dict
from typing import Hashable
import json
def stream_json(data: Iterable[Dict[Hashable, any]]) -> StreamingHttpResponse:
"""
Stream all elements in `data` as JSON array using the StreamingHttpResponse class.
Parameters
----------
data: Iterable[Dict[Hashable, any]]
An Iterable of JSON-encodable elements.
Returns
-------
StreamingHttpResponse
A streaming HTTP response class with an iterator as content. Content-type = 'application/json'
Notes
-----
The function is recommended to be used with a large set of transmitted data.
"""
assert isinstance(data, Iterable), "stream_json: data must be of iterable type"
def _iter(ret):
first = True
yield '['
for x in ret:
if first:
first = False
yield json.dumps(x, ensure_ascii=False)
else:
yield ', '
yield json.dumps(x, ensure_ascii=False)
yield ']'
return StreamingHttpResponse(
_iter(data),
content_type="application/json",
)
|
ba562d71170724e1994567dd70c407123169d126
| 29,652 |
def data_anscombes_quartet(id):
""" Generate one case for each of the Anscombe's quartet """
if id == 1:
x = [10.0, 8.0, 13.0, 9.0, 11.0, 14.0, 6.0, 4.0, 12.0, 7.0, 5.0]
y = [8.04, 6.95, 7.58, 8.81, 8.33, 9.96, 7.24, 4.26, 10.84, 4.82, 5.68]
elif id == 2:
x = [10.0, 8.0, 13.0, 9.0, 11.0, 14.0, 6.0, 4.0, 12.0, 7.0, 5.0]
y = [9.14, 8.14, 8.74, 8.77, 9.26, 8.10, 6.13, 3.10, 9.13, 7.26, 4.74]
elif id == 3:
x = [10.0, 8.0, 13.0, 9.0, 11.0, 14.0, 6.0, 4.0, 12.0, 7.0, 5.0]
y = [7.46, 6.77, 12.74, 7.11, 7.81, 8.84, 6.08, 5.39, 8.15, 6.42, 5.73]
elif id == 4:
x = [8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 19.0, 8.0, 8.0, 8.0]
y = [6.58, 5.76, 7.71, 8.84, 8.47, 7.04, 5.25, 12.50, 5.56, 7.91, 6.89]
else:
raise ValueError("Anscombes quartet contains, well... 4 datasets. "
"Invalid i=%s" % id)
x = np.array(x, dtype=float)
y = np.array(y, dtype=float)
return Dataset(x, y)
|
cfd1f2ec1fbc2af2c9bc360bcd72f85da271a4a9
| 29,653 |
def rinko_curve_fit_eq(X, a, b, c, d, e, f, g, h):
"""
Same as rinko_oxy_eq, but in a form that is more suitible for scipy's curve fit routine
X contains pressure, temperature, voltage, and OS (the normal arguments for rinko_oxy_eq)
"""
press, temp, oxyvo, os = X
o2_cal = RinkoO2Cal(a, b, c, d, e, f, g, h)
#Calculate pprime
pprime = rinko_pprime_aro_cav(oxyvo,temp,o2_cal)
# Calculate P (DO physical value in %)
#p = rinko_saturation(pprime, o2_cal)
# Correct for pressure * d is pressure in Mpa *
d = press * 0.01
p_corr = rinko_correct_for_pressure(pprime,d,o2_cal)
# Divide by 100 to get percents in the form of 0.xx
p_corr = p_corr / 100
# Multiply by OS to get DO (os can be in either ml/l or umol/kg)
DO = p_corr * os
return DO
|
e811c68341af549dea5f5ad3342fe0f3593695d9
| 29,654 |
def SearchList(items, message="Select an item:", title='FontParts'):
"""
A dialgo to search a given list.
Optionally a `message`, `title` and `allFonts` can be provided.
::
from fontParts.ui import SearchList
result = SearchList(["a", "b", "c"])
print(result)
"""
return dispatcher["SearchList"](items=items, message=message, title=title)
|
eba3139a78748e929ca6f5487deee0379ba383e8
| 29,658 |
import logging
async def bsi_score_trend(
business_id: int,
time_period: str = Query(
..., enum=DATEFILTER.date_filter),
auth: Depends = Depends(get_current_user),
) -> dict[str, int]:
"""
Shows the media presence of the company on a monthly basis. This is calculated using the ‘Business Sentiment Index’ formula.
- **business_id**: unique id of the business
- **time_period**: Last Year, Last 6 Months or Current Month
\f
:param item: User input.
"""
try:
params = {
'business_id': business_id,
'date': time_period,
}
rate_limit(auth.client_id, CALLS, PERIOD)
return BusinessTrendsService.get_bsi_trend(params)
except Exception as error:
logging.error(error)
raise exception.internal_server_error()
|
a9449c02823b3f36f7c6d4503abeb78d60ff20ef
| 29,659 |
def create_role(party: Party, role_info: dict) -> PartyRole:
"""Create a new party role and link to party."""
party_role = PartyRole(
role=JSON_ROLE_CONVERTER.get(role_info.get('roleType').lower(), ''),
appointment_date=role_info['appointmentDate'],
cessation_date=role_info['cessationDate'],
party=party
)
return party_role
|
8793158c1a7d8ca883da6e12e9b47e57206445f3
| 29,660 |
import struct
from re import M
def murmur2_32(byte_data, seed=DEFAULT_SEED):
"""
Creates a murmur2 32 bit integer hash from the given byte_data and seed.
:param bytes byte_data: the bytes to hash
:param int seed: seed to initialize this with
:return int: 32 bit hash
"""
length = len(byte_data)
# Initialize the hash to a 'random' value
h = (seed ^ length) & int32
# Mix 4 bytes at a time into the hash
index = 0
while length >= 4:
k = struct.unpack('<i', byte_data[index:index+4])[0]
k = k * M & int32
k = k ^ (k >> R & int32)
k = k * M & int32
h = h * M & int32
h = (h ^ k) & int32
index += 4
length -= 4
# Handle the last few bytes of the input array
if length >= 3:
h = (h ^ byte_data[index+2] << 16) & int32
if length >= 2:
h = (h ^ byte_data[index+1] << 8) & int32
if length >= 1:
h = (h ^ byte_data[index]) & int32
h = h * M & int32
# Do a few final mixes of the hash to ensure the last few bytes are
# well-incorporated.
h = h ^ (h >> 13 & int32)
h = h * M & int32
h = h ^ (h >> 15 & int32)
return h
|
34962f0949c7718d4c977d9c3e69e3e86ba78eff
| 29,661 |
def cell_segmenter(im, thresh='otsu', radius=20.0, image_mode='phase',
area_bounds=(0,1e7), ecc_bounds=(0, 1)):
"""
This function segments a given image via thresholding and returns
a labeled segmentation mask.
Parameters
----------
im : 2d-array
Image to be segmented. This may be of either float or integer
data type.
thresh : int, float, or 'otsu'
Value used during thresholding operation. This can either be a value
(`int` or `float`) or 'otsu'. If 'otsu', the threshold value will be
determined automatically using Otsu's thresholding method.
radius : float
Radius for gaussian blur for background subtraction. Default value
is 20.
image_mode : 'phase' or 'fluorescence'
Mode of microscopy used to capture the image. If 'phase', objects with
intensity values *lower* than the provided threshold will be selected.
If `fluorescence`, values *greater* than the provided threshold will be
selected. Default value is 'phase'.
area_bounds : tuple of ints.
Range of areas of acceptable objects. This should be provided in units
of square pixels.
ecc_bounds : tuple of floats
Range of eccentricity values of acceptable objects. These values should
range between 0.0 and 1.0.
Returns
-------
im_labeled : 2d-array, int
Labeled segmentation mask.
"""
# Apply a median filter to remove hot pixels.
med_selem = skimage.morphology.square(3)
im_filt = skimage.filters.median(im, selem=med_selem)
# Perform gaussian subtraction
im_sub = bg_subtract(im_filt, radius)
# Determine the thresholding method.
if thresh is 'otsu':
thresh = skimage.filters.threshold_otsu(im_sub)
# Determine the image mode and apply threshold.
if image_mode is 'phase':
im_thresh = im_sub < thresh
elif image_mode is 'fluorescence':
im_thresh = im_sub > thresh
else:
raise ValueError("image mode not recognized. Must be 'phase'"
+ " or 'fluorescence'")
# Label the objects.
im_label = skimage.measure.label(im_thresh)
# Apply the area and eccentricity bounds.
im_filt = area_ecc_filter(im_label, area_bounds, ecc_bounds)
# Remove objects touching the border.
im_border = skimage.segmentation.clear_border(im_filt, buffer_size=5)
# Relabel the image.
im_border = im_border > 0
im_label = skimage.measure.label(im_border)
return im_label
|
f97650c74dd8c7d32b6e1e6851395c988c6056f0
| 29,662 |
def convert_id_to_location(df, locations_df):
"""Converts c3 `id` to covid forecast hub `location_name`.
c3 uses `id` as the PK while covid forecast hub uses `location_name`
(labeled just `location` there instead of `location_name`) as the PK.
locations_df contains `id` and `location_name` mapping.
df is a long form df DataFrame[dates, id; {metric_name}].
"""
assert_long_df(df)
df = df.copy()
df["location_name"] = df.index.get_level_values("id").map(
lambda x: locations_df.set_index("id")["location_name"][x]
)
df = df.set_index("location_name", append=True)
df = df.droplevel("id")
return df
|
fe789c292603003eedb971aee094c4c4919fb7fc
| 29,663 |
def extract_scoring_history_field(aModel, fieldOfInterest, takeFirst=False):
"""
Given a fieldOfInterest that are found in the model scoring history, this function will extract the list
of field values for you from the model.
:param aModel: H2O model where you want to extract a list of fields from the scoring history
:param fieldOfInterest: string representing a field of interest.
:return: List of field values or None if it cannot be found
"""
return extract_from_twoDimTable(aModel._model_json["output"]["scoring_history"], fieldOfInterest, takeFirst)
|
158d668830cadb61ee9affb47d601dbf3a43e55d
| 29,664 |
def get_num_beats(times, voltages):
""" Calculates the number of beats in the sample.
:param times: List of time data
:param voltages: List of voltage data
:return: Int representing the number of detected beats
"""
return get_beats_times(times, voltages).size
|
9bc0a5ea664377c529a9dcecf3f82f72ef6e4fdb
| 29,665 |
def find_all_indexes_r(text, pattern, itext=0, ipattern=0, indices=None):
"""Recursive implementation of find_all_indexes. The time complexity should
be equialent to find all indexs not recursive."""
if indices is None:
indices = []
if len(text) == itext + ipattern:
if len(pattern) == ipattern and len(pattern) != 0:
indices.append(itext)
return indices
elif len(pattern) == 0:
indices.append(itext)
itext += 1
elif len(pattern) == ipattern:
indices.append(itext)
ipattern = 0
itext += 1
elif pattern[ipattern] == text[ipattern + itext]:
ipattern += 1
else:
ipattern = 0
itext += 1
return find_all_indexes_r(text, pattern, itext, ipattern, indices)
|
bcd447140c92d8ffbbe0577e469d2e9e4cc9edad
| 29,666 |
import functools
def image_scale(pts, scale):
"""scale to original image size"""
def __loop(x, y):
return [x[0] * y, x[1] * y]
return list(map(functools.partial(__loop, y=1/scale), pts))
|
943d4f46fb6cd9e9433a9ad53b9baf60e3646273
| 29,667 |
def current_session_view(request):
""" Current SEssion and Term """
if request.method == 'POST':
form = CurrentSessionForm(request.POST)
if form.is_valid():
session = form.cleaned_data['current_session']
term = form.cleaned_data['current_term']
AcademicSession.objects.filter(name=session).update(current=True)
AcademicSession.objects.exclude(name=session).update(current=False)
AcademicTerm.objects.filter(name=term).update(current=True)
AcademicTerm.objects.exclude(name=term).update(current=False)
else:
form = CurrentSessionForm(initial={
"current_session": AcademicSession.objects.get(current=True),
"current_term": AcademicTerm.objects.get(current=True)
})
return render(request, 'corecode/current_session.html', {"form":form})
|
736bdab60ba7f0ef2747ce5203253d8c5dd86738
| 29,670 |
def post_css_transform(doc, url):
"""
User-customizable CSS transform.
Given a CSS document (with URLs already rewritten), returns
modified CSS document.
"""
global config
if config.hack_skin and not config.special_mode:
if config.skin == MONOBOOK_SKIN:
doc = monobook_hack_skin_css(doc, url)
else:
raise ValueError('unknown skin')
return doc
|
73ec165288315ab59cbccf663cd181c836ff8c73
| 29,671 |
import json
async def get_user_ads(context, account, community=None):
"""List all ad posts created by account. If `community` is provided,
it lists all ads submitted to that community and their state."""
db = context['db']
account_id = await get_account_id(db, account)
params = {'account_id': account_id}
if community:
community_id = await get_community_id(db, community)
assert community_id, 'community not found: %s' % community
params['community_id'] = community_id
sql = """SELECT p.title, p.body, p.json, a.type, a.properties,
s.time_units, s.bid_amount, s.bid_token,
s.start_time, s.status, s.mod_notes
FROM hive_ads a
JOIN hive_posts_cache p ON a.post_id = p.post_id
JOIN hive_ads_state s ON a.post_id = s.post_id"""
if community:
sql += """ AND s.community_id = :community_id"""
sql += """ WHERE a.account_id = :account_id"""
res = await db.query_all(sql, **params)
all_ads = None
# compile list of dicts from result
if res:
all_ads = []
for _ad in res:
_json = json.loads(_ad[2])
del _json['native_ad']
entry = {
'title': _ad[0],
'body': _ad[1],
'json': _json,
'ad_type': _ad[3],
'ad_properties': json.loads(_ad[4])
}
if community:
entry['time_units'] = _ad[5]
entry['bid_amount'] = _ad[6]
entry['bid_token'] = _ad[7]
entry['start_time'] = _ad[8]
entry['status'] = _ad[9]
entry['mod_notes'] = _ad[10]
all_ads.append(entry)
return all_ads or None
|
945e8375025ff19d9d48a717739d041c10d975d3
| 29,672 |
def MDP2Trans(MDPs, J, action_in_states = False, combined = True):
"""
Input: a list (len-N) of trajectory [state matrix [T * 3], actions, rewards] - I need to modify evaluate.ipynb
Output: a list of (s,a,s',u) (combined together)
"""
def MDP2Trans_one_traj(i):
obs, actions, utilities = MDPs[i]
T = obs.shape[0]
result = []
for t in range(J - 1, T - 1):
s = ObsAct2State(obs, actions, t, J)
ss = ObsAct2State(obs, actions, t + 1, J)
a = actions[t]
u = utilities[t]
result.append([s, a, ss, u])
return result
r = rep_seeds(MDP2Trans_one_traj, len(MDPs) - 1)
if combined:
return flatten(r) # put every patient together; not into a metrix
else:
return r
|
ec7e4eb2a5b399126b88dc1f41cfb318b0f565bf
| 29,673 |
def get_pullrequests(creds, repository_owner, repository_name, pullrequest_state_strings, limit):
"""
Return list of pull request information as follows. The list is empty when no pull requests are found.
Or, return None on any errors.
state State of the Pull Request.
number Pull Request number.
title Title of the Pull Request.
description Pull Request description.
submitter Username who submitted the Pull Request.
date Pull Request submission date.
pr_url URL to the Pull Request page.
pr_diff_url URL to the Pull Request (diff) page.
pullrequest_state_strings is a list of none or any combination of 'OPEN', 'MERGED' and 'DECLINED'.
limit is to specify number of pull requests to query.
"""
if len(pullrequest_state_strings) >= 1:
query = '?' + '+'.join('state=' + s for s in pullrequest_state_strings)
else:
query = '?state=OPEN+state=MERGED+state=DECLINED'
return _get_pullrequests(creds, repository_owner, repository_name, query, limit)
|
77ee49a994caa54bba6c480933dfc836859c086d
| 29,674 |
def remove_nuisance_word_hits(result_str):
"""
>>> a = '#@@@the@@@# cat #@@@in@@@# #@@@the@@@# hat #@@@is@@@# #@@@so@@@# smart'
>>> remove_nuisance_word_hits(a)
'the cat in the hat is so smart'
"""
ret_val = rcx_remove_nuisance_words.sub("\g<word>", result_str)
return ret_val
|
56123d09f08221d8e61d8a0bc9a60d8a493e0d2b
| 29,675 |
def haversine(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
c = 2 * asin(sqrt(a))
r = 3956 # radius of earth in miles mean of poles and equator radius
return c * r
|
04963a9059bc2440da630adad12b9c560319f605
| 29,676 |
from typing import List
from typing import Union
from typing import Optional
def regression(
method,
x_train_cols: List[Union[str, int]],
y_train_col: List[Union[str, int]],
x_pred_cols: Optional[List[Union[str, int]]] = None,
input_ts="-",
columns=None,
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
skiprows=None,
index_type="datetime",
names=None,
print_input=False,
por=False,
):
"""Regression of data."""
for to in y_train_col:
for fro in x_train_cols:
if to == fro:
raise ValueError(
tsutils.error_wrapper(
f"""
You can't have columns in both "x_train_cols", and "y_train_col"
keywords. Instead you have "{to}" in both.
"""
)
)
tsd = tsutils.common_kwds(
input_ts,
skiprows=skiprows,
names=names,
index_type=index_type,
start_date=start_date,
end_date=end_date,
pick=columns,
round_index=round_index,
dropna=dropna,
clean=clean,
por=por,
)
if print_input is True:
ntsd = tsd.copy()
else:
ntsd = tsd
ntsd = tsutils.asbestfreq(ntsd)
testfreqstr = ntsd.index.freqstr.lstrip("0123456789")
if testfreqstr[0] == "A":
ntsd[ntsd.index.name + "_"] = ntsd.index.year - ntsd.index[0].year
elif testfreqstr[0] == "M":
ntsd[ntsd.index.name + "_"] = (ntsd.index.year - ntsd.index[0].year) * 12 + (
ntsd.index.month - ntsd.index[0].month
)
else:
try:
# In case ntsd.index.freqstr is a multiple, for example "15T".
ntsd[ntsd.index.name + "_"] = (ntsd.index - ntsd.index[0]) // pd.Timedelta(
ntsd.index.freqstr
)
except ValueError:
ntsd[ntsd.index.name + "_"] = (ntsd.index - ntsd.index[0]) // pd.Timedelta(
"1" + ntsd.index.freqstr
)
if x_pred_cols is None:
nx_pred_cols = x_train_cols
else:
nx_pred_cols = x_pred_cols
x_train_cols = tsutils.make_iloc(ntsd.columns, x_train_cols)
y_train_col = tsutils.make_iloc(ntsd.columns, y_train_col)
wtsd = ntsd.iloc[:, x_train_cols + y_train_col]
# Train on 'any' dropna rows
wtsddna = wtsd.dropna()
# Train on last column
y_train = wtsddna.iloc[:, -1].values
# with all other columns
x_train = wtsddna.iloc[:, :-1].values
regr = _FUNCS[method]()
regr.fit(x_train, y_train)
if x_pred_cols is None:
x_pred = x_train
else:
nx_pred_cols = tsutils.make_iloc(ntsd.columns, x_pred_cols)
x_pred = ntsd.iloc[:, nx_pred_cols].dropna()
y_pred = regr.predict(x_pred)
if x_pred_cols is None:
if method == "RANSAC":
regr = regr.estimator_
rdata = []
rdata.append(["Coefficients", regr.coef_])
rdata.append(["Intercept", regr.intercept_])
rdata.append(["Mean squared error", mean_squared_error(y_train, y_pred)])
rdata.append(["Coefficient of determination", r2_score(y_train, y_pred)])
return rdata
result = pd.DataFrame(y_pred, index=x_pred.index)
result = result.reindex(index=wtsd.index)
return tsutils.return_input(print_input, tsd, result)
|
980b462c01698892b858720259d037bc039edf18
| 29,677 |
import secrets
def get_password(length: int, exclude_punctuation: bool = False) -> str:
"""
Return password.
:param int length: password length
:param bool exclude_punctuation: generate password without special chars
:return: password
:rtype: str
"""
validate_length(length)
alphabet = ALPHABET_WO_PUNCTUATION if exclude_punctuation else ALPHABET
sequences = (
REQUIRED_SEQUENCES_WO_PUNCTUATION if exclude_punctuation else REQUIRED_SEQUENCES
)
password = []
for _ in range(0, length):
password.append(secrets.choice(alphabet))
idx_list = list([x for x in range(0, length)])
for sequence in sequences:
idx = secrets.choice(idx_list)
idx_list.remove(idx)
password[idx] = secrets.choice(sequence)
return "".join(password)
|
87b172db33d26e96619eb533ededb9e5eec7b84d
| 29,679 |
def _read_image(filepath, image_size=None):
"""Read an image and optionally resize it (size must be: height, width)."""
cv2 = tfds.core.lazy_imports.cv2
with tf.io.gfile.GFile(filepath, 'rb') as f:
image = cv2.imdecode(
np.fromstring(f.read(), dtype=np.uint8), flags=cv2.IMREAD_GRAYSCALE)
if image_size:
# Note: cv2.resize actually expects (width, size).
image = cv2.resize(image, (image_size[1], image_size[0]))
assert image.shape == image_size
return image
|
9d0ee02af36ad452d842a651cccae9f7c30e3d8e
| 29,680 |
def get_txt_version():
"""Get version string from version.txt."""
try:
with open("version.txt", "r") as fp:
return fp.read().strip()
except IOError:
return None
|
62951a878bfb52ae6b00543e1816b9ff298bb907
| 29,681 |
def clear_outliers(points: np.ndarray, confidence:np.ndarray, iterations=1, to_dump=0.05):
"""
clears outliers based on horizontal distance between point and regression line
:param points: array of points in (x,y) form
:param confidence: confidence of points corresponding to points array
:param iterations: number of iterations to clear outliers
:param to_dump: ratio of points to dump (will not go below min points for straight lane)
:return: updated points and confidence arrays with outliers removed
"""
for _ in range(iterations):
# number of points to delete, length of points times proportion plus 1 (round up)
number_deleted = int(len(points) * to_dump) + 1
if len(points) <= MIN_STRAIGHT_LANE_LENGTH:
# don't reduce below min_points
return points, confidence
# recalculate distances each time to get the updated distances
gradient, y_intercept = linear_regression_matrix(points, confidence)
point_distance = point_distance_wrapper((gradient, y_intercept))
# get all the distances for the points form the current regression line
distances = np.apply_along_axis(point_distance, 1, points)
# sort by distance, need mergesort instead of unstable quicksort to ensure all indices are
# represented, sorted in ascending order, so last points are to be deleted
sorted_distances = np.argsort(distances, kind="mergesort")
points = np.delete(points, sorted_distances[-number_deleted:], 0)
confidence = np.delete(confidence, sorted_distances[-number_deleted:], 0)
return points, confidence
|
47a7eddfe16ef072036f20e4c0895dfae5ab61d2
| 29,684 |
def parse(content_type):
"""parse the Content-Type header.
"""
return (
Ok(content_type)
.then(lex)
.then(label)
.then(parse_lexeme)
)
|
e0d045d660eb1501be1e658696f4798088cdb201
| 29,685 |
def _getter(path):
"""Loads configparser config"""
config = _configparser()
# ok if config file does not exist
config.read(path, encoding=OPEN['encoding'])
# return dict(config.items())
# behaves like dict
return config
|
98e233865d001a1ce4aeb7c815be433817b1734f
| 29,686 |
def unexpected_exception_handler(request: Request, exc: Exception) -> JSONResponse:
"""Обработчик любых непредвиденных и необработанных ошибок"""
service_code = request.app.service_code
classify_exception_and_send_to_sentry(exc)
error_message = "Возникла непредвиденная ошибка. Пожалуйста, обратитесь к администратору."
content = ExceptionModel(error_message=error_message, service_code=service_code)
return JSONResponse(content.dict(), status_code=400)
|
dd3254cd3f5073fe6d7222b6c69787302986fbc2
| 29,687 |
def chebyshev_polynomial(X, k):
# 返回一个稀疏矩阵列表
"""Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices."""
print("Calculating Chebyshev polynomials up to order {}...".format(k))
T_k = list()
T_k.append(sp.eye(X.shape[0]).tocsr()) # T0(X) = I
T_k.append(X) # T1(X) = L~
# 定义切比雪夫递归公式
def chebyshev_recurrence(T_k_minus_one, T_k_minus_two, X):
"""
:param T_k_minus_one: T(k-1)(L~)
:param T_k_minus_two: T(k-2)(L~)
:param X: L~
:return: Tk(L~)
"""
# 将输入转化为csr矩阵(压缩稀疏行矩阵)
X_ = sp.csr_matrix(X, copy=True)
# 递归公式:Tk(L~) = 2L~ * T(k-1)(L~) - T(k-2)(L~)
return 2 * X_.dot(T_k_minus_one) - T_k_minus_two
for i in range(2, k + 1):
T_k.append(chebyshev_recurrence(T_k[-1], T_k[-2], X))
# 返回切比雪夫多项式列表
return T_k
|
6ced4e56f3351987676161d8265f197025d1bafe
| 29,688 |
def PIsCompatable (in1, in2):
"""
Tells if two CArrays have compatable geometry
returns True or False
* in1 = first input Python CArray
* in2 = second input Python CArray
"""
################################################################
# Checks
if not PIsA(in1):
print("Actually ",in1.__class__)
raise TypeError("in1 MUST be a Python Obit CArray")
if not PIsA(in2):
print("Actually ",in2.__class__)
raise TypeError("in2 MUST be a Python Obit CArray")
return Obit.CArrayIsCompatable(in1.me, in2.me)!=0
# end PIsCompatable
|
5a62a549d5a3a6384fdd4759556ad7d5b9b81f4d
| 29,689 |
from .sqs.connection import AsyncSQSConnection
from typing import Any
def connect_sqs(
aws_access_key_id: str | None = None,
aws_secret_access_key: str | None = None,
**kwargs: Any
) -> AsyncSQSConnection:
"""Return async connection to Amazon SQS."""
return AsyncSQSConnection(
aws_access_key_id, aws_secret_access_key, **kwargs
)
|
35d43e3a5be93846207cea6f410799304bdd8032
| 29,690 |
import json
def load_json_from_string(string):
"""Load schema from JSON string"""
try:
json_data = json.loads(string)
except ValueError as e:
raise ValueError('Given string is not valid JSON: {}'.format(e))
else:
return json_data
|
66f96373a8e02bf69289e5e4594ac319906475f5
| 29,691 |
def advance_searches(string, pos=0, has_strikethrough=False, has_math=False):
"""
These tokens are special cases,
because they start and end with the same character
therefore, we need to re-search as we progress, to reset the opening character
"""
code_match = code_pattern.search(string, pos)
strike_match = math_match = None
if has_strikethrough:
strike_match = Strikethrough.pattern.search(string, pos)
if has_math:
math_match = Math.pattern.search(string, pos)
return code_match, strike_match, math_match
|
c276a8a26892d7b59356defa9c2b7da309ee8568
| 29,692 |
from scipy import stats
import numpy as np
def two_sample_ttest(arr1, arr2):
"""Performs independent two-sample t-test between two arrays"""
# Remove nan if any
arr1 = arr1[~np.isnan(arr1)]
arr2 = arr2[~np.isnan(arr2)]
tval, pval = stats.ttest_ind(arr1, arr2, nan_policy='omit')
sig = get_sig(pval) # print out asterisk
degree_of_freedom = len(arr1) + len(arr2) - 2
msg1= f"t({degree_of_freedom}) = {tval : 3f}"
if pval < 0.001: # mark significance
msg2 = 'p < 0.001'
else:
msg2 = (f"p={pval :.3f}")
msg = msg1 + ', ' + msg2
return tval, pval, msg, sig
|
6175130dccb5bedcad90c6ac918282043bb47ae1
| 29,693 |
import typing
def parse(instructions: typing.List[str]) -> Wire:
"""Calculate the points upon a wire and create a class from them."""
points = [Point(0, 0)]
for instruction in instructions:
dir = instruction[:1]
dist = int(instruction[1:])
for _ in range(dist):
if dir == "R":
next_point = points[-1].right()
elif dir == "L":
next_point = points[-1].left()
elif dir == "U":
next_point = points[-1].up()
else:
next_point = points[-1].down()
points.append(next_point)
return Wire(points)
|
fedd7a49b182e2e9650e512a60ec5d49f71cb03d
| 29,694 |
def one_shot_gamma_alpha_matrix(k, t, U):
"""Assume U is a sparse matrix and only tested on one-shot experiment"""
Kc = np.clip(k, 0, 1 - 1e-3)
gamma = -(np.log(1 - Kc) / t)
alpha = U.multiply((gamma / k)[:, None])
return gamma, alpha
|
c73ba2ae67a80ac1b16d415cea043baccea5bb7c
| 29,695 |
import logging
def generate_device_watchdog_answer(diameter_request,
origin_host,
origin_realm):
"""
Method used with the purpose of handling DWR requests
and sending DWA responses.(Device Watchdog)
Builds the DWA message, the header and AVPs list separately,
then create a Diameter response based on them.
"""
logging.info("Responding to Device Watchdog Request ...")
# Generating a standard Diameter response
generic_response = generate_generic_diameter_message(diameter_request,
origin_host,
origin_realm)
dwa_header = generic_response['header']
dwa_avps = generic_response['avps']
# Customizing it for Device Watchdog Answer
dwa_avps.append(encodeAVP(
'Result-Code', diameter_base. result_codes['DIAMETER_SUCCESS']))
dwa_avps.append(encodeAVP(
'Origin-State-Id', diameter_request.avps['Origin-State-Id']))
# Create the Diameter response message by joining the header and the AVPs
dwa_message = createRes(dwa_header, dwa_avps)
return dwa_message
|
43ef49f5091d994134e2025c013b90bfb121f1ed
| 29,696 |
from typing import Optional
from typing import Dict
from typing import Union
from typing import Any
def apply(tree: ProcessTree, parameters: Optional[Dict[Union[str, Parameters], Any]] = None) -> EventLog:
"""
Generate a log by a playout operation
Parameters
---------------
tree
Process tree
parameters
Parameters of the algorithm, including:
- Parameters.NO_TRACES: number of traces of the playout
Returns
--------------
log
Simulated log
"""
if parameters is None:
parameters = {}
no_traces = exec_utils.get_param_value(Parameters.NO_TRACES, parameters, 1000)
log = semantics.generate_log(tree, no_traces=no_traces)
return log
|
26e2c4b476bc3d5f5481f5c4bb193897e276facd
| 29,697 |
import aiohttp
import urllib
async def parse(url: str, session: ClientSession, **kwargs) -> set:
"""Find HREFs in the HTML of `url`."""
found = set()
try:
html = await fetch_html(url=url, session=session, **kwargs)
except (
aiohttp.ClientError,
aiohttp.http_exceptions.HttpProcessingError,
) as e:
logger.error(
"aiohttp exception for %s [%s]: %s",
url,
getattr(e, "status", None),
getattr(e, "message", None),
)
return found
except Exception as e:
logger.exception(
"Non-aiohttp exception occured: %s", getattr(e, "__dict__", {})
)
return found
else:
for link in HREF_RE.findall(html):
try:
abslink = urllib.parse.urljoin(url, link)
except (urllib.error.URLError, ValueError):
logger.exception("Error parsing URL: %s", link)
pass
else:
found.add(abslink)
logger.info("Found %d links for %s", len(found), url)
return found
|
daef240220bb3a4482b306b4322ff9d08a0b2872
| 29,699 |
import logging
def auth(options) -> bool:
"""Set up auth and nothing else."""
objectStore = FB_ObjectStore(options)
if options.get("test", False):
# If we only care if it's valid, just check and leave
auth_token = objectStore.get_cached_auth_token()
# Validate and return cached auth token.
if auth_token and objectStore.validate_auth_token(auth_token):
logging.getLogger("pantri").info("Auth token is valid.")
return True
logging.getLogger("pantri").info("Auth token is invalid.")
return False
objectStore.get_auth_token()
logging.getLogger("pantri").info("Auth token is valid.")
return True
|
be2acdd2564ee05e746ba99fbb10a50e9dc92eb1
| 29,700 |
def _parse_detector(detector):
"""
Check and fix detector name strings.
Parameters
----------
detector : `str`
The detector name to check.
"""
oklist = ['n0', 'n1', 'n2', 'n3', 'n4', 'n5', 'n6', 'n7', 'n8', 'n9',
'n10', 'n11']
altlist = [str(i) for i in range(12)]
if detector in oklist:
return detector
elif detector in altlist:
return 'n' + detector
else:
raise ValueError('Detector string could not be interpreted')
|
f78d7eb5004b3cb6d3276b0c701263c71668e36e
| 29,701 |
def mobius_area_correction_spherical(tria, mapping):
"""
Find an optimal Mobius transformation for reducing the area distortion of a spherical conformal parameterization
using the method in [1].
Input:
tria : TriaMesh (vertices, triangle) of genus-0 closed triangle mesh
mapping: nv x 3 vertex coordinates of the spherical conformal parameterization
Output:
map_mobius: nv x 3 vertex coordinates of the updated spherical conformal parameterization
x: the optimal parameters for the Mobius transformation, where
f(z) = \frac{az+b}{cz+d}
= ((x(1)+x(2)*1j)*z+(x(3)+x(4)*1j))/((x(5)+x(6)*1j)*z+(x(7)+x(8)*1j))
If you use this code in your own work, please cite the following paper:
[1] G. P. T. Choi, Y. Leung-Liu, X. Gu, and L. M. Lui,
"Parallelizable global conformal parameterization of simply-connected surfaces via partial welding."
SIAM Journal on Imaging Sciences, 2020.
Adopted by Martin Reuter from Matlab code at
https://github.com/garyptchoi/spherical-conformal-map
with this
Copyright (c) 2019-2020, Gary Pui-Tung Choi
https://scholar.harvard.edu/choi
and has been distributed with the Apache 2 License
"""
# Compute the tria areas with normalization
area_t = tria.tria_areas()
area_t = area_t / area_t.sum()
# Project the sphere onto the plane
z = stereographic(mapping)
def area_map(xx):
v = inverse_stereographic(((xx[0]+xx[1]*1j)*z+(xx[2]+xx[3]*1j))/((xx[4]+xx[5]*1j)*z+(xx[6]+xx[7]*1j)))
area_v = TriaMesh(v, tria.t).tria_areas()
return area_v / area_v.sum()
# objective function: mean(abs(log(area_map/area_t)))
def d_area(xx):
a = np.abs(np.log(area_map(xx)/area_t))
return (a[np.isfinite(a)]).mean()
# Optimization setup
x0 = np.array([1, 0, 0, 0, 0, 0, 1, 0]) # initial guess
# lower and upper bounds
bnds = ((-100, 100), (-100, 100), (-100, 100), (-100, 100),
(-100, 100), (-100, 100), (-100, 100), (-100, 100))
# Optimization (may further supply gradients for better result, not yet implemented)
# options = optimoptions('fmincon','Display','iter');
# x = fmincon(d_area,x0,[],[],[],[],lb,ub,[],options);
options = {"disp": True}
result = minimize(d_area, x0, bounds=bnds, options=options)
x = result.x
# obtain the conformal parameterization with area distortion corrected
fz = ((x[0]+x[1]*1j)*z+(x[2]+x[3]*1j))/((x[4]+x[5]*1j)*z+(x[6]+x[7]*1j))
map_mobius = inverse_stereographic(fz)
return map_mobius, result
|
08d58e2e2ff13533ac733ef49a4f57d4e1d6f41d
| 29,702 |
def rel_ordered(x1,x2,x3,x4):
"""
given 4 collinear points, return true if the direction
from x1->x2 is the same as x3=>x4
requires x1!=x2, and x3!=x4
"""
if x1[0]!=x2[0]:
i=0 # choose a coordinate which is varying
else:
i=1
assert x1[i]!=x2[i]
assert x3[i]!=x4[i]
return (x1[i]<x2[i]) == (x3[i]<x4[i])
|
2649250e2ea2619c7f6c21b8dd2cebaeec10647b
| 29,703 |
def min_rl(din):
"""
A MIN function should "go high" when any of its
inputs arrives. Thus, OR gates are all that is
needed for its implementation.
Input: a list of 1-bit WireVectors
Output: a 1-bit WireVector
"""
if len(din) == 1:
dout = din[0]
else:
dout = din[0] | min_rl(din[1:])
return dout
|
06f0bbce664367307669ddb28c60c65b79de91d3
| 29,704 |
def fetch(u, data, indices, indptr, lmbda):
"""
"""
is_skip = 1
if lmbda > 0:
u0, u1 = indptr[u], indptr[u + 1]
val, ind = data[u0:u1], indices[u0:u1]
if u1 > u0:
is_skip = 0
else:
val = np.empty(0, dtype=data.dtype)
ind = np.empty(0, dtype=np.int32)
return val, ind, is_skip
|
cd1d9ffe7ead7711b5e6cd1f2601c1456ce1baaa
| 29,705 |
def pgrrec(CONST_STRING, lon, lat, alt, re, f):
"""pgrrec(ConstSpiceChar * CONST_STRING, SpiceDouble lon, SpiceDouble lat, SpiceDouble alt, SpiceDouble re, SpiceDouble f)"""
return _cspyce0.pgrrec(CONST_STRING, lon, lat, alt, re, f)
|
a1a9889e8c9e2d4b34e480688aeb8a26cb7699b7
| 29,707 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.