content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import hmac
def get_url(request):
"""
Use devId and key and some hashing thing to get the url, needs /v3/api as input
"""
devId = DEV_ID
key = KEY
request = request + ('&' if ('?' in request) else '?')
raw = request + f"devid={devId}"
raw = raw.encode()
hashed = hmac.new(key, raw, sha1)
signature = hashed.hexdigest()
raw = raw.decode()
return 'http://timetableapi.ptv.vic.gov.au'+raw+f'&signature={signature}' | 57e6d8dc6c0f282b227559aed5cd9c1f96f7d5b7 | 16,635 |
def _is_mapped_class(cls):
"""Return True if the given object is a mapped class,
:class:`.Mapper`, or :class:`.AliasedClass`."""
if isinstance(cls, (AliasedClass, mapperlib.Mapper)):
return True
if isinstance(cls, expression.ClauseElement):
return False
if isinstance(cls, type):
manager = attributes.manager_of_class(cls)
return manager and _INSTRUMENTOR in manager.info
return False | 7f09c1f4908bb62977de07ad4366fb8e6cc84cc2 | 16,636 |
from bs4 import BeautifulSoup
def get_all_links_in_catalog(html) -> list:
"""Получает список всех ссылок на пункты из каталога."""
_soup = BeautifulSoup(html, 'html.parser')
_items = _soup.find('div', class_='catalog_section_list').find_all('li', class_='name')
links_list = []
for item in _items:
links_list.append(item.find('a', class_='dark_link').get('href'))
return links_list | 53e4fd9aaad8755ddd19328ae5d5f972cfbcdc3c | 16,637 |
def digitize(n):
"""Convert a number to a reversed array of digits."""
l = list(str(n))
n_l = []
for d in l:
n_l.append(int(d))
n_l.reverse()
return n_l | e4355b68da41e4be87ce18b53afb2a406eb120c7 | 16,638 |
def _available_algorithms():
"""Verify which algorithms are supported on the current machine.
This is done by verifying that the required modules and solvers are available.
"""
available = []
for algorithm in ALGORITHM_NAMES:
if "gurobi" in algorithm and not abcrules_gurobi.gb:
continue
if algorithm == "gmpy2-fractions" and not mpq:
continue
available.append(algorithm)
return available | cd9310cb78d780154c56763cdf14573bc67ae7b5 | 16,640 |
import re
def symbols(*names, **kwargs):
"""
Emulates the behaviour of sympy.symbols.
"""
shape=kwargs.pop('shape', ())
s = names[0]
if not isinstance(s, list):
s = re.split('\s|,', s)
res = []
for t in s:
# skip empty strings
if not t:
continue
sym = Symbol(t, shape, **kwargs)
res.append(sym)
res = tuple(res)
if len(res) == 0: # var('')
res = None
elif len(res) == 1: # var('x')
res = res[0]
# otherwise var('a b ...')
return res | bcaf1827ccee67098e619c3ec825f3b1aeb3f798 | 16,641 |
def create_intent(intent, project_id, language_code):
"""Create intent in dialogflow
:param intent: dict, intent for api
:param project_id: str, secret project id
:param language_code: event with update tg object
:return:
"""
client = dialogflow.IntentsClient()
parent = client.project_agent_path(project_id)
response = client.create_intent(parent, intent, language_code=language_code)
return response | 59a150d4456d26f4cd8fa93a2cbfc131278d3ba0 | 16,642 |
from typing import List
def construct_object_types(list_of_oids: List[str]) -> List[hlapi.ObjectType]:
"""Builds and returns a list of special 'ObjectType'
from pysnmp"""
object_types: List[hlapi.ObjectType] = []
for oid in list_of_oids:
object_types.append(hlapi.ObjectType(hlapi.ObjectIdentity(oid)))
return object_types | 24eeb7dbd0de49e702acc574c9264d3e7bcdf904 | 16,643 |
def base_sampler(models, nevents, floating_params=None):
"""
Creates samplers from models.
Args:
models (list(model)): models to sample
nevents (list(int)): number of in each sampler
floating_params (list(parameter), optionnal): floating parameter in the samplers
Returns:
Samplers
"""
assert all(is_valid_pdf(m) for m in models)
assert len(nevents) == len(models)
if floating_params:
floating_params_names = [f.name for f in floating_params]
samplers = []
fixed_params = []
for m in models:
def to_fix(p):
if floating_params:
return p.name in floating_params_names
else:
return False
fixed = [p for p in m.get_params() if not to_fix(p)]
fixed_params.append(fixed)
for i, (m, p) in enumerate(zip(models, fixed_params)):
sampler = m.create_sampler(n=nevents[i], fixed_params=p)
samplers.append(sampler)
return samplers | af575d4a175239c2af4fe0e61658005a12225e5a | 16,644 |
def menu_maker():
"""Top Menu Maker In each html page
"""
result = "<center>"
for i,item in enumerate(page_name):
if item == "Home":
targets_blank = ""
else:
targets_blank = 'target="blank"'
# Hyper Link To Each Page In HTML File
result += '\t<a href="' \
+ actual_name[i] + '.html"' + targets_blank + '>' + name_standard(item) + "</a>\n"
result += " \n"
result += "</center>"
result = result + "\t\t" + break_line # Add Break line to End Of The Menu
return result | 6f9b38926d3eab31d1e5d32a49564f083df4f3cc | 16,645 |
import http
def project_generate_private_link_post(auth, node, **kwargs):
""" creata a new private link object and add it to the node and its selected children"""
node_ids = request.json.get('node_ids', [])
name = request.json.get('name', '')
anonymous = request.json.get('anonymous', False)
if node._id not in node_ids:
node_ids.insert(0, node._id)
nodes = [AbstractNode.load(node_id) for node_id in node_ids]
try:
new_link = new_private_link(
name=name, user=auth.user, nodes=nodes, anonymous=anonymous
)
except ValidationError as e:
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long=e.message)
)
return new_link | bd006f64d02bf36509297b1a0778e3488093c682 | 16,646 |
def access_token_old_api(authen_code):
"""
通过此接口获取登录用户身份(疑似是一个旧接口)
:param authen_code:
:return:
"""
# 先获取app_access_token
app_access_token = _get_app_access_token()
if not app_access_token:
return None
access_token_old_url = cfg.access_token_old_url
headers = {"Content-Type": "application/json"}
payload = {
"app_id": cfg.app_id,
"app_secret": cfg.app_secret,
"app_access_token": app_access_token,
"grant_type": "authorization_code",
"code": authen_code,
}
result = post_http_request(access_token_old_url, headers=headers, payload=payload)
return result | efb34044bc07aee817050ef39e8d8a72da7611fd | 16,647 |
def denoising(image):
"""improve image quality by remove unimportant details"""
denoised = cv2.fastNlMeansDenoisingColored(image, None, 10, 10, 7, 21)
return denoised | b5407c1fcd84b49afe5c17e6a221d9da423444f6 | 16,648 |
def teams():
"""Redirect the to the Slack team authentication url."""
return redirect(auth.get_redirect('team')) | 7ea84c5319c7f64a24c7ae42bd0b7467934d8cba | 16,649 |
def _clarans(metric):
"""Clustering Large Applications based on RANdomized Search."""
# choose which implementation to use, hybrid or cpu
get_clusters = _get_clusters(metric, method='cpu')
@jit(nopython=True)
def clarans(data, k, numlocal, maxneighbor):
"""Clustering Large Applications based on RANdomized Search.
Parameters
----------
data : (n,) ndarray
Data set.
k : int
Number of desired clusters.
metric : function
Function to compute pairwise distances.
numlocal : int
Number of times to repeat the search for other local minima.
maxneighbor : int
Maximum number of the neighbors to look at.
Returns
-------
clusterid : (n,) ndarray
An array containing the number of the cluster to which each object
was assigned, where the cluster number is defined as the object
number of the objects representing the cluster centroid.
error : float
The within-cluster sum of distances of the clustering solution.
Algorithm
---------
1. Choose an arbitrary node from the data set.
2. Consider a random neighbor of the current node.
3. If the random neighbor has a lower error than the current node, set
it as the current node.
4. Repeat step 2-3 ``maxneighbor`` times.
5. Repeat step 1-4 ``numlocal`` times and retain the best clustering.
Notes
-----
The best way to explain CLARANS is via a graph abstraction. In fact,
the process of finding k medoids can be viewed abstractly as searching
through a certain graph. In this graph, a set of k objects is called
node. Two nodes are neighbors if their sets differ by only one object.
Since a node represent a collection of k objects, they can be seen as
medoids and hence induce a clustering.
Each node can be assigned an error that is defined to be the total
dissimilarity (i.e. sum of distances) between every object and the
medoid of its cluster.
References
----------
.. R.T. Ng, Jiawei Han, "CLARANS: a method for clustering objects for
spatial data mining"
"""
n = data.shape[0]
choices = np.arange(n)
best_medoids = np.empty(k, dtype=np.uint32)
best_error = np.inf
min_dist = 0
for _ in range(numlocal):
# step 1
# choose an arbitrary node as starting medoids and compute its
# error
medoids = np.empty(k, dtype=np.uint32)
for i in range(k):
np.random.shuffle(choices)
medoids[i] = choices[-1]
choices = choices[:-1]
error = 0
for i in range(n):
min_dist = np.inf
for med in medoids:
dist = metric(data[i], data[med])
if dist < min_dist:
min_dist = dist
error += min_dist
for _ in range(maxneighbor):
# step 2
# find a random neighbor, i.e. change only one of the medoids
# with a random object (that is not already a medoid) of the
# whole data set
random_neigh = np.copy(medoids)
np.random.shuffle(choices)
non_med = choices[-1]
non_med_i = np.random.choice(k)
random_neigh[non_med_i] = non_med
# step 3
# compute the error of the random neighbor and compare it with
# the current node (i.e. current medoids)
new_error = 0
for i in range(n):
min_dist = np.inf
for med in random_neigh:
dist = metric(data[i], data[med])
if dist < min_dist:
min_dist = dist
new_error += min_dist
# choose the induced clustering with lower error
if new_error < error:
error = new_error
choices[-1] = medoids[non_med_i]
medoids = random_neigh
# retain the clustering solution with the lowest error
if error < best_error:
best_error = error
best_medoids = medoids
return get_clusters(data, best_medoids)
return clarans | a56321ba094b78eaa6df18917b7c3ad32a3a6bec | 16,650 |
def create_outlier_mask(df, target_var, number_of_stds, grouping_cols=None):
"""
Create a row-wise mask to filter-out outliers based on target_var.
Optionally allows you to filter outliers by group for hier. data.
"""
def flag_outliers_within_groups(df, target_var,
grouping_cols, number_of_stds):
groups = df.groupby(grouping_cols)
means = groups[target_var].transform('mean')
stds = groups[target_var].transform('std')
upper_bound = means + stds * number_of_stds
lower_bound = means - stds * number_of_stds
return df[target_var].between(lower_bound, upper_bound)
def flag_outliers_without_groups(df, target_var, number_of_stds):
mean_val = df[target_var].mean()
std_val = df[target_var].std()
upper_bound = (mean_val + (std_val * number_of_stds))
lower_bound = (mean_val - (std_val * number_of_stds))
return (df[target_var] > lower_bound) & (df[target_var] < upper_bound)
if grouping_cols:
mask = flag_outliers_within_groups(
df=df, target_var=target_var,
number_of_stds=number_of_stds, grouping_cols=grouping_cols
)
else:
mask = flag_outliers_without_groups(
df=df, target_var=target_var,
number_of_stds=number_of_stds
)
return mask | 95a7e3e5a0cb8dcc4aa3da1af7e9cb4111cf6b81 | 16,651 |
import contextlib
def closing_all(*args):
"""
Return a context manager closing the passed arguments.
"""
return contextlib.nested(*[contextlib.closing(f) for f in args]) | 075056e1a92c63d5c1db0cda68d7cb447868653b | 16,652 |
def _non_max_suppression(objects, threshold):
"""Returns a list of indexes of objects passing the NMS.
Args:
objects: result candidates.
threshold: the threshold of overlapping IoU to merge the boxes.
Returns:
A list of indexes containings the objects that pass the NMS.
"""
if len(objects) == 1:
return [0]
if len(objects) == 0:
return []
boxes = np.array([o.bbox for o in objects])
xmins = boxes[:, 0]
ymins = boxes[:, 1]
xmaxs = boxes[:, 2]
ymaxs = boxes[:, 3]
areas = (xmaxs - xmins) * (ymaxs - ymins)
scores = [o.score for o in objects]
idxs = np.argsort(scores)
selected_idxs = []
while idxs.size != 0:
selected_idx = idxs[-1]
selected_idxs.append(selected_idx)
overlapped_xmins = np.maximum(xmins[selected_idx], xmins[idxs[:-1]])
overlapped_ymins = np.maximum(ymins[selected_idx], ymins[idxs[:-1]])
overlapped_xmaxs = np.minimum(xmaxs[selected_idx], xmaxs[idxs[:-1]])
overlapped_ymaxs = np.minimum(ymaxs[selected_idx], ymaxs[idxs[:-1]])
w = np.maximum(0, overlapped_xmaxs - overlapped_xmins)
h = np.maximum(0, overlapped_ymaxs - overlapped_ymins)
intersections = w * h
unions = areas[idxs[:-1]] + areas[selected_idx] - intersections
ious = intersections / unions
idxs = np.delete(
idxs, np.concatenate(([len(idxs) - 1], np.where(ious > threshold)[0]))
)
return selected_idxs | 9952386f5a6c6f11b1fdbd37eaca6c273ea4b506 | 16,653 |
def binary_search(x,l):
""" Esse algorítmo é o algorítmo de busca binária, mas ele retorna
qual o índice o qual devo colocar o elemento para que a lista
permaneça ordenada.
Input: elemento x e lista l
Output: Índice em que o elemento deve ser inserido para manter a ordenação da lista
"""
lo = 0 # Cota inferior inicial (Lower bound)
up = len(l) # Cota superior inicial (Upper bound)
while lo < up:
mid = int((lo+up)/2) #Ponto Médio
if l[mid] < x:
lo = mid + 1
else:
up = mid
return up | 457c403ffeb2eb5529c2552bdbe8d7beee9199f2 | 16,654 |
def check_abrp(config):
"""Check for geocodio options and return"""
try:
abrpOptions = config.abrp.as_dict()
except:
return {}
options = {}
abrp_keys = ["enable", "api_key", "token"]
for key in abrp_keys:
if key not in abrpOptions.keys():
_LOGGER.error(f"Missing required '{key}' option in 'abrp' settings")
return {}
options[key] = abrpOptions.get(key, None)
return options | fa9c0f1643ae2793cf66498dbb8f27a033edeafd | 16,655 |
import click
def connect(config, job, attach):
"""
Connect to job.
JOB may be specified by name or ID, but ID is preferred.
"""
jobs = config.trainml.run(config.trainml.client.jobs.list())
found = search_by_id_name(job, jobs)
if None is found:
raise click.UsageError("Cannot find specified job.")
if found.type != "notebook":
try:
if attach:
config.trainml.run(found.connect(), found.attach())
return config.trainml.run(found.disconnect())
else:
return config.trainml.run(found.connect())
except:
try:
config.trainml.run(found.disconnect())
except:
pass
raise
else:
if found.status == "waiting for data/model download":
try:
if attach:
config.trainml.run(found.connect(), found.attach())
config.trainml.run(found.disconnect())
click.echo("Launching...", file=config.stdout)
browse(found.notebook_url)
else:
return config.trainml.run(found.connect())
except:
try:
config.trainml.run(found.disconnect())
except:
pass
raise
else:
config.trainml.run(found.wait_for("running"))
click.echo("Launching...", file=config.stdout)
browse(found.notebook_url) | 8a572a92eb9a0cd31af05218dec3ab369109cb31 | 16,656 |
def convert_magicc7_to_openscm_variables(variables, inverse=False):
"""
Convert MAGICC7 variables to OpenSCM variables
Parameters
----------
variables : list_like, str
Variables to convert
inverse : bool
If True, convert the other way i.e. convert OpenSCM variables to MAGICC7
variables
Returns
-------
``type(variables)``
Set of converted variables
"""
if inverse:
return apply_string_substitutions(
variables, OPENSCM_TO_MAGICC7_VARIABLES_MAPPING
)
else:
return apply_string_substitutions(
variables, MAGICC7_TO_OPENSCM_VARIABLES_MAPPING
) | 952bca9f07f8e032b33328c1b03470fd3150eabd | 16,657 |
import asyncio
import aiohttp
async def fetch_disclosure(start, end):
"""期间沪深二市所有类型的公司公告
Args:
start (date like): 开始日期
end (date like): 结束日期
Returns:
list: list of dict
"""
start, end = pd.Timestamp(start), pd.Timestamp(end)
start_str = start.strftime(r'%Y-%m-%d')
end_str = end.strftime(r'%Y-%m-%d')
sem = asyncio.BoundedSemaphore(MAX_WORKER)
tasks = []
async with aiohttp.ClientSession() as session:
for column in COLUMNS.keys():
tasks.append(
_fetch_disclosure(sem, session, column, start_str, end_str))
data = await asyncio.gather(*tasks)
res = []
for d in data:
res.extend(parse_data(d))
return res | efb6b7706ed73c09c65e5d05567b3fdf38aee887 | 16,658 |
from re import T
def get_loader(
image_dir,
attr_path,
selected_attrs,
crop_size=178,
image_size=128,
batch_size=16,
dataset="CelebA",
mode="train",
affectnet_emo_descr="emotiw",
num_workers=1,
):
"""Build and return a data loader."""
transform = []
if mode == "train":
transform.append(T.RandomHorizontalFlip())
transform.append(T.CenterCrop(crop_size))
transform.append(T.Resize(image_size))
transform.append(T.ToTensor())
transform.append(T.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)))
transform = T.Compose(transform)
if dataset == "CelebA":
dataset = CelebA(image_dir, attr_path, selected_attrs, transform, mode)
elif dataset == "RaFD":
dataset = ImageFolder(image_dir, transform)
elif dataset == "AffectNet":
dataset = AffectNet(image_dir, affectnet_emo_descr, transform, mode)
data_loader = data.DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=(mode == "train"),
num_workers=num_workers,
)
return data_loader | 082d1b81b73df7c817fad024911fe431f8cf4a74 | 16,659 |
import json
def remove_samples(request, product_id):
"""Removes passed samples from product with passed id.
"""
parent_product = Product.objects.get(pk=product_id)
for temp_id in request.POST.keys():
if temp_id.startswith("product") is False:
continue
temp_id = temp_id.split("-")[1]
remove_sample(product=parent_product, sample_id=temp_id)
# This isn't necessary but it cleans the cache. See lfs.cache listeners
# for more
parent_product.save()
html = [["#samples-inline", manage_samples_inline(request, product_id, as_string=True)]]
result = json.dumps({
"html": html,
"message": _(u"Samples have been removed.")
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json') | e9d0f112f17af463cfe7ddba2bd606d78fb50b3f | 16,660 |
def csu_to_field(field, radar, units='unitless',
long_name='Hydrometeor ID',
standard_name='Hydrometeor ID',
dz_field='ZC'):
"""
Adds a newly created field to the Py-ART
radar object. If reflectivity is a masked array,
make the new field masked the same as reflectivity.
"""
fill_value = -32768
masked_field = np.ma.asanyarray(field)
masked_field.mask = masked_field == fill_value
if hasattr(radar.fields[dz_field]['data'], 'mask'):
setattr(masked_field, 'mask',
np.logical_or(masked_field.mask,
radar.fields[dz_field]['data'].mask))
fill_value = radar.fields[dz_field]['_FillValue']
field_dict = {'data': masked_field,
'units': units,
'long_name': long_name,
'standard_name': standard_name,
'_FillValue': fill_value}
return field_dict | c8052f51bbed2c16c744201b862fa43868d7d527 | 16,661 |
def calculate_com(structure):
"""
Calculates center of mass of the structure (ligand or protein).
Parameters
----------
structure : biopython Structure object
PDB of choice loaded into biopython (only chains of interest).
Returns
-------
A list defining center of mass of the structure.
"""
structure_mass = 0.0
com = np.zeros(3)
for atom in structure.get_atoms():
com = com + np.array(list(atom.get_vector())) * atom.mass
structure_mass += atom.mass
com = com / structure_mass
return com | 35d6ed62d3943dff0aa1ef0c3a0d04b9235b84ac | 16,663 |
def generate_config(context):
""" Generate the deployment configuration. """
resources = []
name = context.properties.get('name', context.env['name'])
resources = [
{
'name': name,
'type': 'appengine.v1.version',
'properties': context.properties
}
]
outputs = [
{
'name': 'name',
'value': '$(ref.{}.name)'.format(name)
},
{
'name': 'createTime',
'value': '$(ref.{}.createTime)'.format(name)
},
{
'name': 'versionUrl',
'value': '$(ref.{}.versionUrl)'.format(name)
}
]
return {'resources': resources, 'outputs': outputs} | 9a997b87a8d4d8f46edbbb9d2da9f523e5e2fdc6 | 16,664 |
def check_regs(region_df, chr_name=None, start_name=None, stop_name=None,
strand_name=None, sample_name=None):
""" Modifies a region dataframe to be coherent with the GMQL data model
:param region_df: a pandas Dataframe of regions that is coherent with the GMQL data model
:param chr_name: (optional) which column of :attr:`~.region_df` is the chromosome
:param start_name: (optional) which column of :attr:`~.region_df` is the start
:param stop_name: (optional) which column of :attr:`~.region_df` is the stop
:param strand_name: (optional) which column of :attr:`~.region_df` is the strand
:return: a modified pandas Dataframe
"""
if sample_name is None:
region_df.index = np.repeat(default_id_sample, len(region_df))
else:
region_df = search_column(region_df, id_sample_aliases,
id_sample_types, 'id_sample', sample_name)
region_df = region_df.set_index("id_sample", drop=True)
region_df = region_df.sort_index()
region_df = search_column(region_df, chr_aliases, chr_types, 'chr', chr_name)
region_df = search_column(region_df, start_aliases, start_types, 'start', start_name)
region_df = search_column(region_df, stop_aliases, stop_types, 'stop', stop_name)
region_df = search_column(region_df, strand_aliases, strand_types, 'strand', strand_name)
return region_df | ea00a9b755c8dc2943717254ecdb3390bbefe288 | 16,665 |
from typing import List
from typing import Optional
from typing import Sequence
from typing import Union
from typing import Dict
from typing import Any
from typing import cast
def build_assets_job(
name: str,
assets: List[OpDefinition],
source_assets: Optional[Sequence[Union[ForeignAsset, OpDefinition]]] = None,
resource_defs: Optional[Dict[str, ResourceDefinition]] = None,
description: Optional[str] = None,
config: Union[ConfigMapping, Dict[str, Any], PartitionedConfig] = None,
tags: Optional[Dict[str, Any]] = None,
) -> JobDefinition:
"""Builds a job that materializes the given assets.
The dependencies between the ops in the job are determined by the asset dependencies defined
in the metadata on the provided asset nodes.
Args:
name (str): The name of the job.
assets (List[OpDefinition]): A list of assets or multi-assets - usually constructed using
the :py:func:`@asset` or :py:func:`@multi_asset` decorator.
source_assets (Optional[Sequence[Union[ForeignAsset, OpDefinition]]]): A list of assets
that are not materialized by this job, but that assets in this job depend on.
resource_defs (Optional[Dict[str, ResourceDefinition]]): Resource defs to be included in
this job.
description (Optional[str]): A description of the job.
Examples:
.. code-block:: python
@asset
def asset1():
return 5
@asset
def asset2(asset1):
return my_upstream_asset + 1
my_assets_job = build_assets_job("my_assets_job", assets=[asset1, asset2])
Returns:
JobDefinition: A job that materializes the given assets.
"""
check.str_param(name, "name")
check.list_param(assets, "assets", of_type=OpDefinition)
check.opt_list_param(source_assets, "source_assets", of_type=(ForeignAsset, OpDefinition))
check.opt_str_param(description, "description")
source_assets_by_key = build_source_assets_by_key(source_assets)
op_defs = build_op_deps(assets, source_assets_by_key.keys())
root_manager = build_root_manager(source_assets_by_key)
return GraphDefinition(
name=name,
node_defs=cast(List[NodeDefinition], assets),
dependencies=op_defs,
description=description,
input_mappings=None,
output_mappings=None,
config=None,
).to_job(
resource_defs=merge_dicts(resource_defs or {}, {"root_manager": root_manager}),
config=config,
tags=tags,
) | 8e2353677e5085f0c1eb53ee24687e020912b2e5 | 16,666 |
def createBinarySearchTree(vs):
"""
Generate a balanced binary search tree based on the given array.
Args:
vs - an integer array
{4, 5, 5, 7, 2, 1, 3}
4
/ \
2 5
/ \ / \
1 3 5 7
"""
def _helper(vs, left, right):
if left > right:
return None
mid = (left + right) >> 1
node = TreeNode(vs[mid])
node.left = _helper(vs, left, mid - 1)
if node.left:
node.left.parent = node
node.right = _helper(vs, mid + 1, right)
if node.right:
node.right.parent = node
return node
vs = sorted(vs)
root = _helper(vs, 0, len(vs) - 1)
return root | 5e1f7723a4b218d980d7d72ca8f949160ff8042d | 16,667 |
def remove_end_same_as_start_transitions(df, start_col, end_col):
"""Remove rows corresponding to transitions where start equals end state.
Millington 2009 used a methodology where if a combination of conditions
didn't result in a transition, this would be represented in the model by
specifying a transition with start and end state being the same, and a
transition time of 0 years.
AgroSuccess will handle 'no transition' rules differently, so these dummy
transitions should be excluded.
"""
def start_different_to_end(row):
if row[start_col] == row[end_col]:
return False
else:
return True
return df[df.apply(start_different_to_end, axis=1)] | f4b3ddca74e204ed22c75a4f635845869ded9988 | 16,668 |
def sieve(iterable, inspector, *keys):
"""Separates @iterable into multiple lists, with @inspector(item) -> k for k in @keys defining the separation.
e.g., sieve(range(10), lambda x: x % 2, 0, 1) -> [[evens], [odds]]
"""
s = {k: [] for k in keys}
for item in iterable:
k = inspector(item)
if k not in s:
raise KeyError(f"Unexpected key <{k}> found by inspector in sieve.")
s[inspector(item)].append(item)
return [s[k] for k in keys] | 6ebb76dfb3131342e08a0be4127fba242d126130 | 16,670 |
def get_model(config: BraveConfig) -> embedding_model.MultimodalEmbeddingModel:
"""Construct a model implementing BraVe.
Args:
config: Configuration for BraVe.
Returns:
A `MultimodalEmbeddingModel` to train BraVe.
"""
init_fn, parameterized_fns = _build_parameterized_fns(config)
loss_fn = _build_loss_fn(config, parameterized_fns)
forward_fns = {
'broad_video': parameterized_fns.broad_video_embedding,
'broad_audio': parameterized_fns.broad_audio_embedding,
'narrow_video': parameterized_fns.narrow_video_embedding,
}
return embedding_model.MultimodalEmbeddingModel(
init_fn=init_fn,
forward_fns=forward_fns,
loss_fn=loss_fn,
evaluate_fn=_build_eval_fn(forward_fns),
train_dataset_builder_fn=_train_dataset_builder(config),
) | eceff13cf9ec5bd5cdd126af52bbd4eb6fad6ebe | 16,671 |
def upilab6_1_5 () :
"""
6.1.5. Exercice UpyLaB 6.2 - Parcours vert bleu rouge
(D’après une idée de Jacky Trinh le 19/02/2018)
Monsieur Germain est une personne très âgée. Il aimerait préparer une liste de courses à faire à l’avance. Ayant un
budget assez serré, il voudrait que sa liste de courses soit dans ses capacités. Son seul petit souci est qu’il a une
très mauvaise vue et n’arrive donc pas à voir le prix associé à chaque produit contenu dans le catalogue de courses.
Écrire une fonction calcul_prix(produits, catalogue) où :
produits est un dictionnaire contenant, comme clés, les produits souhaités par Monsieur Germain et comme valeurs
associées, la quantité désirée de chacun d’entre eux,
catalogue est un dictionnaire contenant tous les produits du magasin avec leur prix associé.
La fonction retourne le montant total des achats de Monsieur Germain.
Exemple : L’appel suivant de la fonction :
calcul_prix({"brocoli":2, "mouchoirs":5, "bouteilles d'eau":6},
{"brocoli":1.50, "bouteilles d'eau":1, "bière":2,
"savon":2.50, "mouchoirs":0.80})
doit retourner : 13.0
"""
def calcul_prix(produits, catalogue):
somme = 0
for p in produits:
somme += catalogue[p] * produits[p]
return somme
test = [({'pack de fruits': 1, 'poisson': 2, 'jambon': 1, 'citron': 1, 'tomate': 1, 'pâtes': 1, 'sucre': 1,
'pack de légumes': 1, 'café': 1, 'brocoli': 1, 'déodorant': 1, 'bière': 1},
{'confiture': 3.15, 'vin': 6.3, 'poisson': 6.45, 'jambon': 2.1, 'pain': 1.25, 'shampooing': 2.5,
"bouteilles d'eau": 1, 'tomate': 0.75, 'yaourts': 2.85, 'sucre': 0.65, 'pack de légumes': 4.2,
'café': 4.75, 'brocoli': 1.5, 'riz': 3.1, 'jus de fruits': 2.25, 'déodorant': 2.2, 'dentifrice': 1.95,
'fromage': 2.65, 'chocolats': 3.2, 'pack de fruits': 3.3, 'viande': 5.2, 'petits gâteaux': 4.35,
'citron': 0.9, 'mouchoirs': 0.8, 'frites': 3.55, 'farine': 0.95, 'pâtes': 1.1, 'savon': 1.9,
'bière': 2, 'huile': 1.65}),
({'chocolats': 1, 'jambon': 1, 'citron': 1, 'fromage': 2, 'yaourts': 1, 'pâtes': 2, 'savon': 1,
'pack de légumes': 1, 'café': 2, 'brocoli': 1, 'riz': 2, 'mouchoirs': 1},
{'confiture': 3.15, 'vin': 6.3, 'poisson': 6.45, 'jambon': 2.1, 'pain': 1.25, 'shampooing': 2.5,
"bouteilles d'eau": 1, 'tomate': 0.75, 'yaourts': 2.85, 'sucre': 0.65, 'pack de légumes': 4.2,
'café': 4.75, 'brocoli': 1.5, 'riz': 3.1, 'jus de fruits': 2.25, 'déodorant': 2.2, 'dentifrice': 1.95,
'fromage': 2.65, 'chocolats': 3.2, 'pack de fruits': 3.3, 'viande': 5.2, 'petits gâteaux': 4.35,
'citron': 0.9, 'mouchoirs': 0.8, 'frites': 3.55, 'farine': 0.95, 'pâtes': 1.1, 'savon': 1.9, 'bière': 2,
'huile': 1.65})]
reponse =[36.35, 40.650000000000006]
for produits, catalogue in test :
print("Le pépé a besoin de : ")
for article in produits :
print(produits[article], " x l'article : ", article)
cout = calcul_prix(produits, catalogue)
print( "cela coûtera", cout)
printt("tes réussi ? : ", cout == reponse[test.index((produit,catalogue))]) | 198a11e4059c39550bb398a473711073677a41d4 | 16,672 |
import torch
def construct_filtering_input_data(xyz_s, xyz_t, data, overlapped_pair_tensors, dist_th=0.05, mutuals_flag=None):
"""
Prepares the input dictionary for the filtering network
Args:
xyz_s (torch tensor): coordinates of the sampled points in the source point cloud [b,n,3]
xyz_t (torch tensor): coordinates of the correspondences from the traget point cloud [b,n,3]
data (dict): input data from the data loader
dist_th (float): distance threshold to determine if the correspondence is an inlier or an outlier
mutuals (torch tensor): torch tensor of the mutually nearest neighbors (can be used as side information to the filtering network)
Returns:
filtering_data (dict): input data for the filtering network
"""
filtering_data = {}
Rs, ts = extract_transformation_matrices(data['T_global_0'], overlapped_pair_tensors)
ys = transformation_residuals(xyz_s, xyz_t, Rs, ts)
xs = torch.cat((xyz_s,xyz_t),dim=-1) # [b, n, 6]
if mutuals_flag is not None:
xs = torch.cat((xs,mutuals_flag.reshape(-1,1)), dim=-1) # [b, n, 7]
# Threshold ys based on the distance threshol
ys_binary = (ys < dist_th).type(xs.type())
# Construct the data dictionary
filtering_data['xs'] = xs
filtering_data['ys'] = ys
filtering_data['ts'] = ts
filtering_data['Rs'] = Rs
return filtering_data | ca316834cc87e1527e4563407138aa92a46b92a3 | 16,673 |
def rmean(x, N):
""" cutting off the edges. """
s = int(N-1)
return np.convolve(x, np.ones((N,))/N)[s:-s] | eb34bd21523e685184155e65ccddc34e2eb6a428 | 16,674 |
def add_variant_to_existing_lines(group, variant, total_quantity):
"""
Adds variant to existing lines with same variant.
Variant is added by increasing quantity of lines with same variant,
as long as total_quantity of variant will be added
or there is no more lines with same variant.
Returns quantity that could not be fulfilled with existing lines.
"""
# order descending by lines' stock available quantity
lines = group.lines.filter(
product=variant.product, product_sku=variant.sku,
stock__isnull=False).order_by(
F('stock__quantity_allocated') - F('stock__quantity'))
quantity_left = total_quantity
for line in lines:
quantity = (
line.stock.quantity_available
if quantity_left > line.stock.quantity_available
else quantity_left)
line.quantity += quantity
line.save()
Stock.objects.allocate_stock(line.stock, quantity)
quantity_left -= quantity
if quantity_left == 0:
break
return quantity_left | 1e958db4c684f0bf3f2d821fc06f422cc60d0168 | 16,675 |
def calculate_position(c, t):
"""
Calculates a position given a set of quintic coefficients and a time.
Args
c: List of coefficients generated by a quintic polynomial
trajectory generator.
t: Time at which to calculate the position
Returns
Position
"""
return c[0] * t**5 + c[1] * t**4 + c[2] * t**3 + c[3] * t**2 + c[4] * t + c[5] | 927737b41006df13e7bf751b06756eea02542491 | 16,676 |
def get_dqa(df):
"""Method to get DQA issues."""
try:
df0 = df[(df.dob == '') | (df.dqa_sex != 'OK') |
(df.dqa_age != 'OK') | (df.case_status == 'Pending')]
df1 = df0[['cpims_id', 'child_names', 'age', 'case_category',
'dqa_sex', 'dqa_dob', 'dqa_age', 'case_status',
'case_date']].drop_duplicates()
# print(df1)
except Exception as e:
print('Error getting data frame - %s' % (e))
brdf = Blank()
brdf.index = []
return brdf
else:
return df1 | f2c30e87937ce4fac1dd00cd597ee52946d80d07 | 16,677 |
import pickle
def get_3C_coords(name):
"""
Formatted J2000 right ascension and declination and IAU name
Returns the formatted J2000 right ascension and declination and IAU name
given the 3C name.
Example
>>> ra,dec,iau = get_3C_coords('3C286')
>>> print ra,dec,iau
13h31m08.287984s 30d30'32.958850" 1331+305
@param name : 3C name, like 3C123
@return: ra, dec, IAU_name
"""
dbfile = open(cal_dir+'3C_VLA_cals','r')
data = pickle.load(dbfile)
dbfile.close()
return data[name] | 1e48ca0535c6cdb5eb2330f3dcfd666e40eef33f | 16,678 |
import json
def get(player):
"""Get the cipher that corresponding to the YouTube player version.
Args:
player (dict): Contains the 'sts' value and URL of the YouTube player.
Note:
If the cipher is missing in known ciphers, then the 'update' method will be used.
"""
if DIR.exists() and CIPHERS.exists():
try:
with CIPHERS.open('r') as file:
ciphers = json.load(file)
cipher = ciphers.get(player['sts'])
if cipher is not None:
return cipher
else:
return update(player)
except json.decoder.JSONDecodeError:
return update(player)
else:
return update(player) | dd658d8aad775fa7871e3efa642b0aad89f8f801 | 16,679 |
def divide(x, y):
"""A version of divide that also rounds."""
return round(x / y) | 1bf9e5859298886db7c928613f459f163958ca7b | 16,680 |
def create_root_ca_cert(root_common_name, root_private_key, days=365):
"""
This method will create a root ca certificate.
:param root_common_name: The common name for the certificate.
:param root_private_key: The private key for the certificate.
:param days: The number of days for which the certificate is valid. The default is 1 year or 365 days.
:return: The root certificate.
:rtype: :class:`x509.Certificate`
"""
file_root_certificate = "demoCA/newcerts/ca_cert.pem"
root_public_key = root_private_key.public_key()
subject = x509.Name(
[x509.NameAttribute(NameOID.COMMON_NAME, str.encode(root_common_name).decode("utf-8"))]
)
builder = create_cert_builder(
subject=subject, issuer_name=subject, public_key=root_public_key, days=days, is_ca=True
)
root_cert = builder.sign(
private_key=root_private_key, algorithm=hashes.SHA256(), backend=default_backend()
)
with open(file_root_certificate, "wb") as f:
f.write(root_cert.public_bytes(serialization.Encoding.PEM))
return root_cert | 5bf83b8ba56c6dde9f6c2ed022c113350425aa33 | 16,681 |
def hist1d(arr, bins=None, amp_range=None, weights=None, color=None, show_stat=True, log=False,\
figsize=(6,5), axwin=(0.15, 0.12, 0.78, 0.80),\
title=None, xlabel=None, ylabel=None, titwin=None):
"""Makes historgam from input array of values (arr), which are sorted in number of bins (bins) in the range (amp_range=(amin,amax))
"""
#print 'hist1d: title=%s, size=%d' % (title, arr.size)
if arr.size==0: return None, None, None
fig = plt.figure(figsize=figsize, dpi=80, facecolor='w', edgecolor='w', frameon=True)
if titwin is not None: fig.canvas.set_window_title(titwin)
elif title is not None: fig.canvas.set_window_title(title)
axhi = fig.add_axes(axwin)
hbins = bins if bins is not None else 100
hi = axhi.hist(arr.ravel(), bins=hbins, range=amp_range, weights=weights, color=color, log=log) #, log=logYIsOn)
if amp_range is not None: axhi.set_xlim(amp_range) # axhi.set_autoscale_on(False) # suppress autoscailing
if title is not None: axhi.set_title(title, color='k', fontsize=20)
if xlabel is not None: axhi.set_xlabel(xlabel, fontsize=14)
if ylabel is not None: axhi.set_ylabel(ylabel, fontsize=14)
if show_stat:
weights, bins, patches = hi
add_stat_text(axhi, weights, bins)
return fig, axhi, hi | c74771de0df0e9f4d65490a09346d2af18d53cc7 | 16,682 |
def format_validate_parameter(param):
"""
Format a template parameter for validate template API call
Formats a template parameter and its schema information from the engine's
internal representation (i.e. a Parameter object and its associated
Schema object) to a representation expected by the current API (for example
to be compatible to CFN syntax).
"""
# map of Schema object types to API expected types
schema_to_api_types = {
param.schema.STRING: api.PARAM_TYPE_STRING,
param.schema.NUMBER: api.PARAM_TYPE_NUMBER,
param.schema.LIST: api.PARAM_TYPE_COMMA_DELIMITED_LIST,
param.schema.MAP: api.PARAM_TYPE_JSON,
param.schema.BOOLEAN: api.PARAM_TYPE_BOOLEAN
}
res = {
api.PARAM_TYPE: schema_to_api_types.get(param.schema.type,
param.schema.type),
api.PARAM_DESCRIPTION: param.description(),
api.PARAM_NO_ECHO: 'true' if param.hidden() else 'false',
api.PARAM_LABEL: param.label()
}
if param.has_value():
res[api.PARAM_DEFAULT] = param.value()
constraint_description = []
# build constraints
for c in param.schema.constraints:
if isinstance(c, constr.Length):
if c.min is not None:
res[api.PARAM_MIN_LENGTH] = c.min
if c.max is not None:
res[api.PARAM_MAX_LENGTH] = c.max
elif isinstance(c, constr.Range):
if c.min is not None:
res[api.PARAM_MIN_VALUE] = c.min
if c.max is not None:
res[api.PARAM_MAX_VALUE] = c.max
elif isinstance(c, constr.AllowedValues):
res[api.PARAM_ALLOWED_VALUES] = list(c.allowed)
elif isinstance(c, constr.AllowedPattern):
res[api.PARAM_ALLOWED_PATTERN] = c.pattern
elif isinstance(c, constr.CustomConstraint):
res[api.PARAM_CUSTOM_CONSTRAINT] = c.name
if c.description:
constraint_description.append(c.description)
if constraint_description:
res[api.PARAM_CONSTRAINT_DESCRIPTION] = " ".join(
constraint_description)
return res | 4ed21c80bf567beca448065089bfe22fef6cfb17 | 16,683 |
import string
def get_template(name):
"""Retrieve the template by name
Args:
name: name of template
Returns:
:obj:`string.Template`: template
"""
file_name = "{name}.template".format(name=name)
data = resource_string("pyscaffoldext.beeproject.templates", file_name)
return string.Template(data.decode("UTF-8")) | 933e597b48b5ed01a29d191fd0fe04371b1baeb6 | 16,684 |
def box3d_overlap(boxes, qboxes, criterion=-1, z_axis=1, z_center=1.0):
"""kitti camera format z_axis=1.
"""
bev_axes = list(range(7))
bev_axes.pop(z_axis + 3)
bev_axes.pop(z_axis)
# t = time.time()
# rinc = box_np_ops.rinter_cc(boxes[:, bev_axes], qboxes[:, bev_axes])
rinc = rotate_iou_gpu_eval(boxes[:, bev_axes], qboxes[:, bev_axes], 2)
# print("riou time", time.time() - t)
box3d_overlap_kernel(boxes, qboxes, rinc, criterion, z_axis, z_center)
return rinc | 45aa39e9f55f8198ccbe5faf6a00cf27279057fa | 16,685 |
def _apply_graph_transform_tool_rewrites(g, input_node_names,
output_node_names):
# type: (gde.Graph, List[str], List[str]) -> tf.GraphDef
"""
Use the [Graph Transform Tool](
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/
graph_transforms/README.md)
to perform a series of pre-deployment rewrites.
Args:
g: GDE representation of the core graph.
input_node_names: Names of placeholder nodes that are used as inputs to
the graph for inference. Placeholders NOT on this list will be
considered dead code.
output_node_names: Names of nodes that produce tensors that are outputs
of the graph for inference purposes. Nodes not necessary to produce
these tensors will be considered dead code.
Returns: GraphDef representation of rewritten graph.
"""
# Invoke the Graph Transform Tool using the undocumented Python APIs under
# tensorflow.tools.graph_transforms
after_tf_rewrites_graph_def = graph_transforms.TransformGraph(
g.to_graph_def(),
inputs=input_node_names,
outputs=output_node_names,
# Use the set of transforms recommended in the README under "Optimizing
# for Deployment"
transforms=['strip_unused_nodes(type=float, shape="1,299,299,3")',
'remove_nodes(op=Identity, op=CheckNumerics)',
'fold_constants(ignore_errors=true)',
'fold_batch_norms',
'fold_old_batch_norms']
)
return after_tf_rewrites_graph_def | 15d9609357d45fd164fd1569d35669148e66acd8 | 16,686 |
def big_bcast(comm, objs, root=0, return_split_info=False, MAX_BYTES=INT_MAX):
"""
Broadcast operation that can exceed the MPI limit of ~4 GiB.
See documentation on :meth:`big_gather` for details.
Parameters
----------
comm: mpi4py.MPI.Intracomm
MPI communicator to use.
objs: objects
Data to gather from all processes.
root: int
Rank of process to receive the data.
return_split_info: bool
On root process, also a return a dictionary describing
how the data were split. Used for testing.
MAX_BYTES: int
Maximum bytes per chunk.
Defaults to the INT_MAX of 32 bit integers. Used for testing.
Returns
-------
list of objects:
Length Npus list, such that the n'th entry is the data gathered from
the n'th process.
This is only filled on the root process. Other processes get None.
dict:
If return_split_info, the root process also gets a dictionary containing:
- ranges: A list of tuples, giving the start and end byte of each chunk.
- MAX_BYTES: The size limit that was used.
Notes
-----
Running this on MPI.COMM_WORLD means that every process gets a full copy of
`objs`, potentially using up available memory. This function is currently used
to send large data once to each node, to be put in shared memory.
"""
bufsize = None
nopickle = False
shape = None
dtype = None
if comm.rank == root:
if isinstance(objs, np.ndarray):
shape = objs.shape
dtype = objs.dtype
buf = objs.tobytes()
nopickle = True
else:
buf = dumps(objs)
bufsize = len(buf)
# Sizes of send buffers to be sent from each rank.
bufsize = comm.bcast(bufsize, root=root)
nopickle = comm.bcast(nopickle, root=root)
if nopickle:
shape = comm.bcast(shape, root=root)
dtype = comm.bcast(dtype, root=root)
if comm.rank != root:
buf = np.empty(bufsize, dtype=bytes)
# Ranges of output bytes for each chunk.
start = 0
end = 0
ranges = []
while end < bufsize:
end = min(start + MAX_BYTES, bufsize)
ranges.append((start, end))
start += MAX_BYTES
for start, end in ranges:
comm.Bcast([buf[start:end], MPI.BYTE], root=root)
if nopickle:
result = np.frombuffer(buf, dtype=dtype)
result = result.reshape(shape)
else:
result = loads(buf)
split_info_dict = {'MAX_BYTES': MAX_BYTES, 'ranges': ranges}
if return_split_info:
return result, split_info_dict
return result | 341591b207ef793b32e6b727f14533dbe119312d | 16,687 |
def get_task(appname, taskqueue, identifier):
"""Gets identified task in a taskqueue
Request
-------
```
GET http://asynx.host/apps/:appname/taskqueues/:taskqueue/tasks/:identifier
```
Parameters:
- appname: url param, string, the application name
under which the queue lies
- taskqueue: url param, string, the name of the taskqueue
in which the task belongs
- identifier: url param, string, the identifier to the task.
the identifier can be:
- id, form: {integer} or id:{integer};
- uuid, form: uuid:{string}
- cname, form: cname:{string}
Request body:
Do not supply a request body with this method
Response
--------
Task resource same as `insert_task`.
"""
try:
kind, kind_id = validate(forms.identifier_form, identifier)
except MultipleInvalid as e:
raise IdentifierNotFound(str(e))
tq = TaskQueue(appname, taskqueue)
if kind == 'id':
task = tq.get_task(kind_id)
elif kind == 'uuid':
task = tq.get_task_by_uuid(kind_id)
elif kind == 'cname':
task = tq.get_task_by_cname(kind_id)
return jsonify(task) | c11aadab178776a6246163f2146e9a91d949e3bc | 16,688 |
def assign_style_props(df, color=None, marker=None, linestyle=None,
cmap=None):
"""Assign the style properties for a plot
Parameters
----------
df : pd.DataFrame
data to be used for style properties
"""
if color is None and cmap is not None:
raise ValueError('`cmap` must be provided with the `color` argument')
# determine color, marker, and linestyle for each line
n = len(df[color].unique()) if color in df.columns else \
len(df[list(set(df.columns) & set(IAMC_IDX))].drop_duplicates())
defaults = default_props(reset=True, num_colors=n, colormap=cmap)
props = {}
rc = run_control()
kinds = [('color', color), ('marker', marker), ('linestyle', linestyle)]
for kind, var in kinds:
rc_has_kind = kind in rc
if var in df.columns:
rc_has_var = rc_has_kind and var in rc[kind]
props_for_kind = {}
for val in df[var].unique():
if rc_has_var and val in rc[kind][var]:
props_for_kind[val] = rc[kind][var][val]
# cycle any way to keep defaults the same
next(defaults[kind])
else:
props_for_kind[val] = next(defaults[kind])
props[kind] = props_for_kind
# update for special properties only if they exist in props
if 'color' in props:
d = props['color']
values = list(d.values())
# find if any colors in our properties corresponds with special colors
# we know about
overlap_idx = np.in1d(values, list(PYAM_COLORS.keys()))
if overlap_idx.any(): # some exist in our special set
keys = np.array(list(d.keys()))[overlap_idx]
values = np.array(values)[overlap_idx]
# translate each from pyam name, like AR6-SSP2-45 to proper color
# designation
for k, v in zip(keys, values):
d[k] = PYAM_COLORS[v]
# replace props with updated dict without special colors
props['color'] = d
return props | 93bd50e81a988594a42bce26a48d9d24e0e9c6ba | 16,690 |
def to_dbtext(text):
"""Helper to turn a string into a db.Text instance.
Args:
text: a string.
Returns:
A db.Text instance.
"""
if isinstance(text, unicode):
# A TypeError is raised if text is unicode and an encoding is given.
return db.Text(text)
else:
try:
return db.Text(text, encoding='utf-8')
except UnicodeDecodeError:
return db.Text(text, encoding='latin-1') | 74704f42e8cb05be24df3b32e8964382da9c488e | 16,691 |
import zmq
import time
def zmq_init(pub_port, sub_port_list):
"""
Initialize the ZeroMQ publisher and subscriber.
`My` publisher publishes `my` data to the neighbors. `My` subscriber listen
to the ports of other neighbors. `sub_port_list` stores all the possible
neighbors' TCP ports.
The data packs are wrapped as an XBee interface, compatable with the XBee
transmission and reception functions in this module.
Args:
pub_port(str/int): TCP port for the publisher.
sub_port_list(list): TCP port list for the subscriber to listen to.
Returns:
list: `my` publisher and `my` subscriber (i.e. listener).
"""
pub = zmq.Context().socket(zmq.PUB)
pub.bind('tcp://*:%s' % pub_port)
sub = zmq.Context().socket(zmq.SUB)
for port in sub_port_list:
if sub_port_list[port] != pub_port:
sub.connect('tcp://127.0.0.1:%s' % sub_port_list[port])
time.sleep(0.05)
sub.setsockopt(zmq.SUBSCRIBE, 'XBEE')
return [pub, sub] | fcde81e7387d49e99cd864cea233b1ba02ac679c | 16,692 |
def dot(u, v):
"""
Returns the dot product of the two vectors.
>>> u1 = Vec([1, 2])
>>> u2 = Vec([1, 2])
>>> u1*u2
5
>>> u1 == Vec([1, 2])
True
>>> u2 == Vec([1, 2])
True
"""
assert u.size == v.size
sum = 0
for index, (compv, compu) in enumerate(zip(u.store,v.store)):
sum = sum + compv * compu
return sum | e431800750c8f7c14d7412753814e2498fdd3c09 | 16,694 |
def isvalid(number, numbers, choices=2):
"""Meh
>>> isvalid(40, (35, 20, 15, 25, 47))
True
>>> isvalid(62, (20, 15, 25, 47, 40))
True
>>> isvalid(127, (182, 150, 117, 102, 95))
False
"""
return number in sums(numbers, choices) | c32ee0fe1509c0c1f48bdf8f6b9f8fe5b00fb8f8 | 16,695 |
def from_rotation_matrix(rotation_matrix: type_alias.TensorLike,
name: str = "quaternion_from_rotation_matrix"
) -> tf.Tensor:
"""Converts a rotation matrix representation to a quaternion.
Warning:
This function is not smooth everywhere.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
rotation_matrix: A tensor of shape `[A1, ..., An, 3, 3]`, where the last two
dimensions represent a rotation matrix.
name: A name for this op that defaults to "quaternion_from_rotation_matrix".
Returns:
A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents
a normalized quaternion.
Raises:
ValueError: If the shape of `rotation_matrix` is not supported.
"""
with tf.name_scope(name):
rotation_matrix = tf.convert_to_tensor(value=rotation_matrix)
shape.check_static(
tensor=rotation_matrix,
tensor_name="rotation_matrix",
has_rank_greater_than=1,
has_dim_equals=((-1, 3), (-2, 3)))
rotation_matrix = rotation_matrix_3d.assert_rotation_matrix_normalized(
rotation_matrix)
trace = tf.linalg.trace(rotation_matrix)
eps_addition = asserts.select_eps_for_addition(rotation_matrix.dtype)
rows = tf.unstack(rotation_matrix, axis=-2)
entries = [tf.unstack(row, axis=-1) for row in rows]
def tr_positive():
sq = tf.sqrt(trace + 1.0) * 2. # sq = 4 * qw.
qw = 0.25 * sq
qx = safe_ops.safe_unsigned_div(entries[2][1] - entries[1][2], sq)
qy = safe_ops.safe_unsigned_div(entries[0][2] - entries[2][0], sq)
qz = safe_ops.safe_unsigned_div(entries[1][0] - entries[0][1], sq)
return tf.stack((qx, qy, qz, qw), axis=-1)
def cond_1():
sq = tf.sqrt(1.0 + entries[0][0] - entries[1][1] - entries[2][2] +
eps_addition) * 2. # sq = 4 * qx.
qw = safe_ops.safe_unsigned_div(entries[2][1] - entries[1][2], sq)
qx = 0.25 * sq
qy = safe_ops.safe_unsigned_div(entries[0][1] + entries[1][0], sq)
qz = safe_ops.safe_unsigned_div(entries[0][2] + entries[2][0], sq)
return tf.stack((qx, qy, qz, qw), axis=-1)
def cond_2():
sq = tf.sqrt(1.0 + entries[1][1] - entries[0][0] - entries[2][2] +
eps_addition) * 2. # sq = 4 * qy.
qw = safe_ops.safe_unsigned_div(entries[0][2] - entries[2][0], sq)
qx = safe_ops.safe_unsigned_div(entries[0][1] + entries[1][0], sq)
qy = 0.25 * sq
qz = safe_ops.safe_unsigned_div(entries[1][2] + entries[2][1], sq)
return tf.stack((qx, qy, qz, qw), axis=-1)
def cond_3():
sq = tf.sqrt(1.0 + entries[2][2] - entries[0][0] - entries[1][1] +
eps_addition) * 2. # sq = 4 * qz.
qw = safe_ops.safe_unsigned_div(entries[1][0] - entries[0][1], sq)
qx = safe_ops.safe_unsigned_div(entries[0][2] + entries[2][0], sq)
qy = safe_ops.safe_unsigned_div(entries[1][2] + entries[2][1], sq)
qz = 0.25 * sq
return tf.stack((qx, qy, qz, qw), axis=-1)
def cond_idx(cond):
cond = tf.expand_dims(cond, -1)
cond = tf.tile(cond, [1] * (rotation_matrix.shape.ndims - 2) + [4])
return cond
where_2 = tf.where(
cond_idx(entries[1][1] > entries[2][2]), cond_2(), cond_3())
where_1 = tf.where(
cond_idx((entries[0][0] > entries[1][1])
& (entries[0][0] > entries[2][2])), cond_1(), where_2)
quat = tf.where(cond_idx(trace > 0), tr_positive(), where_1)
return quat | 2eab1984206c57ec64c4be2b3652008773d9c037 | 16,696 |
def mark_text(text):
"""Compact rules processor"""
attrs = {}
rules = []
weight = 0
attrs['len'] = len(text)
text = text.replace('.', ' ').replace(',', ' ').replace(u'№', ' ').strip().lower()
words = text.split()
textjunk = []
spaced = 0
attrs['wl'] = len(words)
attrs['junkl'] = 0
attrs['mwords'] = []
for w in words:
n = len(w)
curw = 0
# is spaced
if len(w) == 1:
if w.isdigit():
if n > 3:
curw +=1
if 'SP' not in rules: rules.append('SP')
spaced = 0
else:
spaced += 1
else:
if spaced > 3:
curw +=1
if 'SP' not in rules: rules.append('SP')
spaced = 0
# is misspelled ?
if n in MISSPELL_WORDS.keys():
if w in MISSPELL_WORDS[n]:
curw += 1
if 'MS' not in rules: rules.append('MS')
# is latin word
pat, latweight = is_latin_word(w)
if latweight > 0:
curw += latweight
if 'LT' not in rules: rules.append('LT')
junk = 0
# is this text junk
if curw > 0:
junk = 1
else:
if n in ALLDICT_WORDS.keys():
if w in ALLDICT_WORDS[n]:
junk = 1
elif len(w) < 3 or w.isdigit():
junk = 1
attrs['junkl'] += junk
if junk == 0:
attrs['mwords'].append(w)
weight += curw
if spaced > 3:
if 'SP' not in rules: rules.append('SP')
weight += 1
isjunk = attrs['wl'] == attrs['junkl']
attrs['junksh'] = attrs['junkl'] * 100.0 / attrs['wl'] if attrs['wl'] > 0 else 0
# for junk in textjunk:
# if not junk: isjunk = False
if isjunk:
weight += 10
rules.append('JU')
return weight, rules, attrs | 7287535d3a9c3bb302f9cc98ca6e7fa2ec4c9a40 | 16,697 |
def model_flux(t_dec,B,P_max,R,Ne,d_l,z,mp,me,e,c,sigma_t,time,nu,Gamma,E_k,
n,eps_b,eps_e,p,j_ang):
""" Function for deriving the flux for the spectrum or light curve at
given times and frequencies """
# calculate lorentz factors, characteristic frequencies and
# jet break time
gamma_m = Gamma*eps_e*((p-2)/(p-1))*(mp/me)
gamma_c = (6*np.pi*me*c)/(sigma_t*Gamma*B**2*time)
gamma_crit = (6*np.pi*me*c)/(sigma_t*Gamma*B**2*t_dec)
t_jb = 86400*(((1/0.057)*j_ang*((1+z)/2)**(3/8)*(E_k/1e53)**(1/8)*
(n/0.1)**(-1/8))**(8/3))
nu_m0 = (gamma_m**2*Gamma*e*B)/(2*np.pi*me*c)
nu_c0 = (gamma_c**2*Gamma*e*B)/(2*np.pi*me*c)
flux_max = (Ne*P_max*1e26)/(4*np.pi*d_l**2)
# At times smaller than the deceleration timescale
if time <= t_dec:
flux_n = spec_flux(flux_max,time,nu,p,nu_m0,nu_c0)
flux_n = flux_n*(time/t_dec)**3
return flux_n
# At times greater than the deceleration timescale
if time > t_dec:
if p > 2:
nu_m = nu_m0*(time/t_dec)**(-3/2)
nu_c = nu_c0*(time/t_dec)**(-1/2)
if p < 2:
nu_m = nu_m0*(time/t_dec)**((-3*(p+2))/(8*(p-1)))
nu_c = nu_c0*(time/t_dec)**(-1/2)
if time > t_jb:
nu_c = nu_c0*(t_jb/t_dec)**(-1/2)
flux_max = flux_max*(time/t_jb)**(-1)
if p > 2:
nu_m = nu_m0*(t_jb/t_dec)**(-3/2)*(time/t_jb)**(-2)
if p < 2:
nu_m = (nu_m0*(t_jb/t_dec)**((-3*(p+2))/(8*(p-1)))*(time/t_jb)
**(-(p+2)/(2*(p-1))))
flux_n = spec_flux(flux_max,time,nu,p,nu_m,nu_c)
return flux_n | 15658d57ae5d837d416731427e1227eb304b4b75 | 16,699 |
def fix_lng_degrees(lng: float) -> float:
"""
For a lng degree outside [-180;180] return the appropriate
degree assuming -180 = 180°W and 180 = 180°E.
"""
sign = 1 if lng > 0 else -1
lng_adj = (abs(lng) % 360) * sign
if lng_adj > 180:
return (lng_adj % 180) - 180
elif lng_adj < -180:
return lng_adj % 180
return lng_adj | bde58152883874095b15ec38cfb24ea68d73c188 | 16,700 |
def create_code(traits):
"""Assign bits to list of traits.
"""
code = 1
result = {INVALID: code}
if not traits:
return result
for trait in traits:
code = code << 1
result[trait] = code
return result | cfc7b1662edaf7f3e3763009a460157f7ec677bb | 16,701 |
from typing import List
from typing import Dict
from typing import Any
from typing import Optional
def get_current_table(grid_id: str) -> List[Dict[Any, Any]]:
""" Get current Data from the grid
Args:
grid_id: Grid ID to retrieve data from.
Returns:
list: Exsiting grid data.
"""
current_table: Optional[List[dict]] = demisto.incidents()[0].get("CustomFields", {}).get(grid_id)
if current_table is None:
raise ValueError(f"The grid id isn't valid: {grid_id}")
return pd.DataFrame(current_table) | d1a8c21398aa2aca54ca587aa577c8ff50d8d46f | 16,702 |
def read_graph(filepath):
"""Creates a graph based on the content of the file at given filepath.
Parameters
----------
filename : filepath
Path to a file containing an adjacency matrix.
"""
g_data = np.loadtxt(open(filepath, "rb"), delimiter=",")
return nx.from_numpy_matrix(g_data) | 74e0b687c6cf9e404d9446505799a84b5680c5b3 | 16,703 |
def get_seed(seed=None):
"""Get valid Numpy random seed value"""
# https://groups.google.com/forum/#!topic/briansupport/9ErDidIBBFM
random = np.random.RandomState(seed)
return random.randint(0, 2147483647) | 5ac1280a30265518edcf8bb07a03cfe5fb0ae21d | 16,704 |
import typing
import inspect
def resolve_lookup(
context: dict, lookup: str, call_functions: bool = True
) -> typing.Any:
"""
Helper function to extract a value out of a context-dict.
A lookup string can access attributes, dict-keys, methods without parameters and indexes by using the dot-accessor (e.g. ``person.name``)
This is based on the implementation of the variable lookup of the django template system:
https://github.com/django/django/blob/master/django/template/base.py
"""
current = context
for bit in lookup.split("."):
try:
current = current[bit]
except (TypeError, AttributeError, KeyError, ValueError, IndexError):
try:
current = getattr(current, bit)
except (TypeError, AttributeError):
# Reraise if the exception was raised by a @property
if not isinstance(current, dict) and bit in dir(current):
raise
try: # list-index lookup
current = current[int(bit)]
except (
IndexError, # list index out of range
ValueError, # invalid literal for int()
KeyError, # current is a dict without `int(bit)` key
TypeError,
): # unsubscriptable object
return None
# raise LookupError(
# "Failed lookup for key " "[%s] in %r", (bit, current)
# ) # missing attribute
if callable(current) and call_functions:
try: # method call (assuming no args required)
current = current()
except TypeError:
signature = inspect.signature(current) # type: ignore
try:
signature.bind()
except TypeError: # arguments *were* required
pass # but we continue because we might use an attribute on the object instead of calling it
else:
raise
return current | a2090f2488ee10f7c11684952fd7a2498f6d4979 | 16,706 |
def check_actions_tool(tool):
"""2.2.x to 2.3.0 upgrade step checker
"""
atool = getToolByName(tool, 'portal_actions')
try:
atool['user']['change_password']
except KeyError:
return True
try:
atool['global']['members_register']
except KeyError:
return True
try:
atool['global']['search_form']
except KeyError:
return True
try:
atool['global']['search']
except KeyError:
return True
try:
atool['global']['syndication']
except KeyError:
return True
return False | 2ecc6064cd26aa670743c25018dd27e2ce0f41ca | 16,707 |
def integer_byte_length(number):
"""
Number of bytes needed to represent a integer excluding any prefix 0 bytes.
:param number:
Integer value. If num is 0, returns 0.
:returns:
The number of bytes in the integer.
"""
quanta, remainder = divmod(integer_bit_length(number), 8)
if remainder:
quanta += 1
return quanta | 0de5828117107461e23e36cf3c38bab0850b7203 | 16,708 |
def ones(input_dim, output_dim, name=None):
"""All zeros."""
initial = tf.ones((input_dim, output_dim), dtype=tf.float32)
return tf.Variable(initial, name=name) | 02867b278e224e436e470a9eaeac32b44e99a99a | 16,709 |
def enrichment_score2(mat, idx, line_width, norm_factors, distance_range=(20, 40), window_size=10,
stats_test_log=({}, {})):
"""
Calculate the enrichment score of a stripe given its location, width and the contact matrix
Parameters:
----------
mat: np.array (2D)
Contact matrix generated with strata2horizontal() or strata2vertical()
idx: int
The location (index) of the candidate stripe
line_width: int
Stripe width (# of bins)
norm_factors: np.array (1D)
The vector of normalization factors of the contact map.
distance_range: tuple
The distance range (# of bins) for the diagonal for calculating the scores
window_size: int
Window size (# of bins)
stats_test_log: tuple of dict
Previous log for accelerating statistical tests
Returns
----------
new_mat: np.array (1D)
The enrichment score of each pixel along the candidate stripe
"""
_calculated_values, _poisson_stats = stats_test_log
half = int(line_width // 2)
x1, x2 = idx - half, idx - half + line_width
if x1 == x2:
x2 += 1
new_mat = np.zeros((distance_range[1] - distance_range[0],))
for j in range(distance_range[0], distance_range[1]):
y = j - distance_range[0]
_min_temp = subsetNpMatrix(mat, (x1, x2), (j - window_size - half, j + window_size + half + 1))
line_min = np.median([_min_temp])
# print(_min_temp, line_min)
_inner_neighbor = subsetNpMatrix(mat, (idx - half - window_size, x1),
(j - window_size - half, j + window_size + half + 1))
_outer_neighbor = subsetNpMatrix(mat, (x2 + 1, idx + half + window_size + 1),
(j - window_size - half, j + window_size + half + 1))
if _outer_neighbor.size == 0 or _inner_neighbor.size == 0:
continue
neighbor_mean = max(np.mean(_inner_neighbor), np.mean(_outer_neighbor))
# There should be a lower bound for the expected value,
# otherwise situations like (exp=0.01 and obs=0.02) would also be significant
# Currently we can set this to 0 until KR norm factors can be loaded
lower_b = 1 / norm_factors[idx] # This should be (1 / KR_norm_factors) if we refer to JuiceTools HICCUPS
_exp = max(neighbor_mean, lower_b)
_obs = int(line_min) # the same as floor function when line_min > 0
# _calculated_values: store all calculated exp-obs pairs in dictionary, in which keys are obs since
# they are always integers. Each _calculated_values[obs] is a binary tree for quick searching,
# and each tree leaf is a exp value corresponding to the obs value. Since exp values are float,
# there is also an integer index attached for searching the exp-obs in dictionary _poisson_stats
# (float cannot be dict keys).
# _poisson_stats: record all calculated result in a dict. It should be
# _poisson_stats[(_exp, _obs)] = -log10(p). But _exp is a float and cannot be a dict key, we give
# each _exp a unique index and use the index.
# stats_log: record all p value calculation. Just for benchmarking. Delete this when publishing.
# global _calculated_values, _poisson_stats # , stats_log
tolerance = 0.02
# check if obs is a value calculated before
if _obs in _calculated_values:
# Find the nearest _exp values which were calculated before
# One larger, one smaller
(_upper, _lower) = _calculated_values[_obs].search(_exp)
# If _upper is close enough to _exp, directly use the p value from (_upper-_obs) pair
if _upper is not None and (_upper.key - _exp) < tolerance * _exp:
_exp = _upper.key
_exp_idx = _upper.val # The integer index for _upper (float cannot be dict keys!)
mlog_p_val = _poisson_stats[(_exp_idx, _obs)]
else:
# Else, calculate p value for _obs-_exp pair and store them in _calculated_values and _poisson_stats
_exp_idx = _calculated_values[_obs].insert(_exp) # insert to the binary tree and return an index
Poiss = poisson(_exp)
p_val = 1 - Poiss.cdf(_obs)
if 0 < p_val < 1:
mlog_p_val = - np.log10(p_val)
else: # Some p values are too small, -log(0) will return an error, so we use -1 to temporarily replace
mlog_p_val = -1
_poisson_stats[(_exp_idx, _obs)] = mlog_p_val
# stats_log.append([_exp, _obs, mlog_p_val])
else: # If _obs is not used before, generate a new binary tree _calculated_values[_obs]
_calculated_values[_obs] = AVLTree()
_exp_idx = _calculated_values[_obs].insert(_exp)
# calculate p value for _obs-_exp pair and store them in _calculated_values and _poisson_stats
Poiss = poisson(_exp)
p_val = 1 - Poiss.cdf(_obs)
if 0 < p_val < 1:
mlog_p_val = - np.log10(p_val)
else: # Some p values are too small, -log(0) will return an error, so we use -1 to temporarily replace
mlog_p_val = -1
_poisson_stats[(_exp_idx, _obs)] = mlog_p_val
# stats_log.append([_exp, _obs, mlog_p_val])
# Store enrichment score in new_mat
new_mat[y] = mlog_p_val
new_mat[new_mat < 0] = np.max(new_mat) # Replace all "-1"s with the largest -log(p)
return new_mat | bfb987bd2e2d0770d81f811ba2486893b62d269d | 16,710 |
def paginate(data, page=1, per_page=None):
"""Create a paginated response of the given query set.
Arguments:
data -- A flask_mongoengine.BaseQuerySet instance
"""
per_page = app.config['DEFAULT_PER_PAGE'] if not per_page else per_page
pagination_obj = data.paginate(page=page, per_page=per_page)
return {
'data': build_pagination_data(pagination_obj),
'meta': build_pagination_metadata(pagination_obj),
} | c5a692067e5f58a971762316c83bcfe6f75051bf | 16,711 |
def compute_mean_wind_dirs(res_path, dset, gids, fracs):
"""
Compute mean wind directions for given dset and gids
"""
with Resource(res_path) as f:
wind_dirs = np.radians(f[dset, :, gids])
sin = np.mean(np.sin(wind_dirs) * fracs, axis=1)
cos = np.mean(np.cos(wind_dirs) * fracs, axis=1)
mean_wind_dirs = np.degrees(np.arctan2(sin, cos))
mask = mean_wind_dirs < 0
mean_wind_dirs[mask] += 360
return mean_wind_dirs | bd3f91cc0f4b05f630d252f6026e3f27c56cd134 | 16,712 |
import numpy
def plot_area_and_score(samples: SampleList, compound_name: str, include_none: bool = False):
"""
Plot the peak area and score for the compound with the given name
:param samples: A list of samples to plot on the chart
:param compound_name:
:param include_none: Whether samples where the compound was not found
should be plotted.
"""
peak_areas, scores = samples.get_areas_and_scores(compound_name, include_none)
fig, ax1 = plt.subplots()
y_positions = numpy.arange(len(peak_areas))
y_positions = [x * 1.5 for x in y_positions]
bar_width = 0.5
offset = bar_width / 2
area_y_pos = [x + offset for x in y_positions]
area_bar = ax1.barh(
area_y_pos,
list(peak_areas.values()),
label="Peak Area",
color="tab:orange",
height=bar_width,
)
ax1.set_xscale("log")
ax1.set_xlabel("Log10(Peak Area)")
ax2 = ax1.twiny()
score_scatter = ax2.scatter(list(scores.values()), area_y_pos, label="Score", color="tab:blue")
ax2.set_xlabel("Score")
ax1.barh([], [], label="Score", color="tab:blue", height=bar_width)
ax1.set_yticks(y_positions)
ax1.set_yticklabels(list(peak_areas.keys()))
fig.suptitle(f"Peak Area and Score for {compound_name}\n")
fig.set_size_inches(A4_landscape)
plt.tight_layout()
plt.subplots_adjust(top=0.9)
ax1.legend()
return fig, ax1, ax2 | cce2dd3c3fca742627dca5c893f498d83e0d7840 | 16,713 |
def get_strides(fm: NpuFeatureMap) -> NpuShape3D:
"""Calculates STRIDE_C/Y/X"""
if fm.strides is not None:
return fm.strides
elem_size = fm.data_type.size_in_bytes()
if fm.layout == NpuLayout.NHWC:
stride_c = elem_size
stride_x = fm.shape.depth * stride_c
stride_y = fm.shape.width * stride_x
else:
stride_x = 16 * elem_size
stride_c = stride_x * fm.shape.width
stride_y = elem_size * fm.shape.width * numeric_util.round_up(fm.shape.depth, 16)
return NpuShape3D(depth=stride_c, height=stride_y, width=stride_x) | e933fd3b06fb53e44b81bcb28341137a14990dec | 16,714 |
def gram_linear(x):
"""Compute Gram (kernel) matrix for a linear kernel.
Args:
x: A num_examples x num_features matrix of features.
Returns:
A num_examples x num_examples Gram matrix of examples.
"""
return x.dot(x.T) | f0a625d3ca6b846396c3c7c723b1bc8130a6c140 | 16,715 |
def to_feature(shape, properties={}):
"""
Create a GeoJSON Feature object for the given shapely.geometry :shape:.
Optionally give the Feature a :properties: dict.
"""
collection = to_feature_collection(shape)
feature = collection["features"][0]
feature["properties"] = properties
# remove some unecessary and redundant data
if "id" in feature:
del feature["id"]
if isinstance(shape, shapely.geometry.Point) and "bbox" in feature:
del feature["bbox"]
return dict(feature) | 39d8e7658ae2043c081d137f0a69ddd4344876fc | 16,716 |
def read_responses(file):
"""
Read dialogs from file
:param file: str, file path to the dataset
:return: list, a list of dialogue (context) contained in file
"""
with open(file, 'r') as f:
samples = f.read().split('<|endoftext|>')
samples = samples[1:] # responses = [i.strip() for i in f.readlines() if len(i.strip()) != 0]
return samples | e654a075622f04c3eca6c18e3d092593387ef237 | 16,717 |
def build_parametric_ev(data, onset, name, value, duration=None,
center=None, scale=None):
"""Make design info for a multi-column constant-value ev.
Parameters
----------
data : DataFrame
Input data; must have "run" column and any others specified.
onset : string
Column name containing event onset information.
name : string
Condition name to use for this ev.
value : string
Column name containing event amplitude information.
duration : string, float, or ``None``
Column name containing event duration information, or a value
to use for all events, or ``None`` to model events as impulses.
center : float, optional
Value to center the ``value`` column at before scaling. If absent,
center at the mean across runs.
scale : callable, optional
Function to scale the centered value column with.
Returns
-------
ev : DataFrame
Returned DataFrame will have "run", "onset", "duration", "value",
and "condition" columns.
"""
ev = data[["run", onset, value]].copy()
ev.columns = ["run", "onset", "value"]
# Center the event amplitude
if center is None:
ev["value"] -= ev.value.mean()
else:
ev["value"] = ev.value - center
# (Possibly) scale the event amplitude
if scale is not None:
ev["value"] = scale(ev["value"])
# Set a condition name for all events
ev["condition"] = name
# Determine the event duration
ev = _add_duration_information(data, ev, duration)
return ev | 47400052e2b2f4bf8217d9eaf71a83257180f5c4 | 16,718 |
import operator
import bisect
def time_aware_indexes(t, train_size, test_size, granularity, start_date=None):
"""Return a list of indexes that partition the list t by time.
Sorts the list of dates t before dividing into training and testing
partitions, ensuring a 'history-aware' split in the ensuing classification
task.
Args:
t (np.ndarray): Array of timestamp tags.
train_size (int): The training window size W (in τ).
test_size (int): The testing window size Δ (in τ).
granularity (str): The unit of time τ, used to denote the window size.
Acceptable values are 'year|quarter|month|week|day'.
start_date (date): The date to begin partioning from (eg. to align with
the start of the year).
Returns:
(list, list):
Indexing for the training partition.
List of indexings for the testing partitions.
"""
# Order the dates as well as their original positions
with_indexes = zip(t, range(len(t)))
ordered = sorted(with_indexes, key=operator.itemgetter(0))
# Split out the dates from the indexes
dates = [tup[0] for tup in ordered]
indexes = [tup[1] for tup in ordered]
# Get earliest date
start_date = utils.resolve_date(start_date) if start_date else ordered[0][0]
# Slice out training partition
boundary = start_date + get_relative_delta(train_size, granularity)
to_idx = bisect.bisect_left(dates, boundary)
train = indexes[:to_idx]
tests = []
# Slice out testing partitions
while to_idx < len(indexes):
boundary += get_relative_delta(test_size, granularity)
from_idx = to_idx
to_idx = bisect.bisect_left(dates, boundary)
tests.append(indexes[from_idx:to_idx])
return train, tests | 96e27c7a3f7284476d615a8d03f7c365f0406187 | 16,719 |
def send_invite_mail(invite, request):
"""
Send an email invitation to user not yet registered in the system.
:param invite: ProjectInvite object
:param request: HTTP request
:return: Amount of sent email (int)
"""
invite_url = build_invite_url(invite, request)
message = get_invite_body(
project=invite.project,
issuer=invite.issuer,
role_name=invite.role.name,
invite_url=invite_url,
date_expire_str=localtime(invite.date_expire).strftime(
'%Y-%m-%d %H:%M'
),
)
message += get_invite_message(invite.message)
message += get_email_footer()
subject = get_invite_subject(invite.project)
return send_mail(subject, message, [invite.email], request) | 4554bb6bea20e03749739026583d6215714febbf | 16,720 |
def binary_n(total_N, min_n=50):
"""
Creates a list of values by successively halving the total length total_N
until the resulting value is less than min_n.
Non-integer results are rounded down.
Args:
total_N (int):
total length
Kwargs:
min_n (int):
minimal length after division
Returns:
list of integers:
total_N/2, total_N/4, total_N/8, ... until total_N/2^i < min_n
"""
max_exp = np.log2(1.0 * total_N / min_n)
max_exp = int(np.floor(max_exp))
return [int(np.floor(1.0 * total_N / (2**i))) for i in range(1, max_exp + 1)] | 240296c6024243da5750cb5aa7e64bea45ae91ca | 16,722 |
def thresholding(pred,label,thres):
""" Given the threshold return boolean matrix with 1 if > thres 0 if <= 1 """
conf =[]
for i in thres:
pr_th,lab_th = (pred>i),(label>i)
conf += confusion(pr_th,lab_th)
return np.array(conf).reshape(-1,4) | 97727a75b4f7648c82a095c7804709e9a52f13ed | 16,723 |
def unicode_test(request, oid):
"""Simple view to test funky characters from the database."""
funky = News.objects.using('livewhale').get(pk=oid)
return render(request, 'bridge/unicode.html', {'funky': funky}) | 8357d76bfc22fdc3f12176332a4b19fd3bfb79c9 | 16,724 |
from typing import Optional
from typing import Dict
from typing import Any
def _field_to_schema_object(field: BaseType, apistrap: Optional[Apistrap]) -> Optional[Dict[str, Any]]:
"""
Convert a field definition to OpenAPI 3 schema.
:param field: the field to be converted
:param apistrap: the extension used for adding reusable schema definitions
:return: a schema
"""
if isinstance(field, ModelType):
return _model_field_to_schema_object(field, apistrap)
elif isinstance(field, ListType):
if isinstance(field.field, ModelType):
return _model_array_to_schema_object(field, apistrap)
elif isinstance(field.field, BaseType):
return _primitive_array_to_schema_object(field)
elif isinstance(field, DictType):
if isinstance(field.field, ModelType):
return _model_dict_to_schema_object(field, apistrap)
elif isinstance(field.field, UnionType):
return _union_dict_to_schema_object(field, apistrap)
elif isinstance(field.field, ListType) and isinstance(field.field.field, ModelType):
return _dict_of_model_lists_to_schema_object(field, apistrap)
elif isinstance(field.field, BaseType):
return _primitive_dict_to_schema_object(field)
elif isinstance(field, StringType):
return _string_field_to_schema_object(field, apistrap)
elif isinstance(field, AnyType):
return {}
elif isinstance(field, UnionType):
return _union_field_to_schema_object(field, apistrap)
elif isinstance(field, DiscriminatedModelType):
return _discriminated_model_field_to_schema_object(field, apistrap)
elif isinstance(field, PolyModelType):
return _poly_model_field_to_schema_object(field, apistrap)
elif isinstance(field, BaseType):
return _primitive_field_to_schema_object(field)
return None | 1451f8795dc39d3168c141fd0ad8dd2615903163 | 16,725 |
from typing import Dict
from typing import Any
def drop_test(robot, *, z_rot: float, min_torque: bool, initial_height: float = 1.) -> Dict[str, Any]:
"""Params which have been tested for this task:
nfe = 20, total_time = 1.0, vary_timestep_with=(0.8,1.2), 5 mins for solving
if min_torque is True, quite a bit more time is needed as IPOPT refines things
"""
nfe = len(robot.m.fe)
ncp = len(robot.m.cp)
tested_models = ('3D monoped', '3D biped',
'3D quadruped', '3D prismatic monoped')
if not robot.name in tested_models:
visual.warn(
f'This robot configuration ("{robot.name}") hasn\'t been tested!\n'
f'Tested models are: {tested_models}')
body = robot['base_B'] if robot.name == '3D quadruped' else robot['base']
# start at the origin
body['q'][1, ncp, 'x'].fix(0)
body['q'][1, ncp, 'y'].fix(0)
body['q'][1, ncp, 'z'].fix(initial_height)
# fix initial angle
for link in robot.links:
for ang in ('phi', 'theta'):
link['q'][1, ncp, ang].fix(0)
link['q'][1, ncp, 'psi'].fix(z_rot)
# start stationary
for link in robot.links:
for q in link.pyomo_sets['q_set']:
link['dq'][1, ncp, q].fix(0)
# init to y plane
for link in robot.links:
for ang in ('phi', 'theta'):
link['q'][:, :, ang].value = 0
link['q'][:, :, 'psi'].value = z_rot
# legs slightly forward at the end
uplopairs = (('upper', 'lower'),) if robot.name == '3D monoped' \
else (('UL', 'LL'), ('UR', 'LR')) if robot.name == '3D biped' \
else (('UFL', 'LFL'), ('UFR', 'LFR'), ('UBL', 'LBL'), ('UBR', 'LBR')) if robot.name == '3D quadruped' \
else tuple() # <- iterating over this will result in the body not being evaluated
for upper, lower in uplopairs:
ang = 0.01 if not (
robot.name == '3D quadruped' and upper[1] == 'B') else -0.01
robot[upper]['q'][nfe, ncp, 'theta'].setlb(ang)
robot[lower]['q'][nfe, ncp, 'theta'].setub(-ang)
# but not properly fallen over
body['q'][nfe, ncp, 'z'].setlb(0.2)
# objective: reduce CoT, etc
utils.remove_constraint_if_exists(robot.m, 'cost')
torque_cost = torque_squared_penalty(robot)
pen_cost = feet_penalty(robot)
robot.m.cost = Objective(expr=(torque_cost if min_torque else 0)
+ 1000*pen_cost)
return {'torque': torque_cost, 'penalty': pen_cost} | e6a070fd52356a314e5d2992d03fa61ead40f950 | 16,727 |
from typing import List
from typing import Union
from datetime import datetime
from typing import Dict
from typing import Any
import httpx
from typing import cast
def get_user_list(
*, client: Client, an_enum_value: List[AnEnum], some_date: Union[date, datetime],
) -> Union[
List[AModel], HTTPValidationError,
]:
""" Get a list of things """
url = "{}/tests/".format(client.base_url)
headers: Dict[str, Any] = client.get_headers()
json_an_enum_value = []
for an_enum_value_item_data in an_enum_value:
an_enum_value_item = an_enum_value_item_data.value
json_an_enum_value.append(an_enum_value_item)
if isinstance(some_date, date):
json_some_date = some_date.isoformat()
else:
json_some_date = some_date.isoformat()
params: Dict[str, Any] = {
"an_enum_value": json_an_enum_value,
"some_date": json_some_date,
}
response = httpx.get(url=url, headers=headers, params=params,)
if response.status_code == 200:
return [AModel.from_dict(item) for item in cast(List[Dict[str, Any]], response.json())]
if response.status_code == 422:
return HTTPValidationError.from_dict(cast(Dict[str, Any], response.json()))
else:
raise ApiResponseError(response=response) | 77ca30fafb6c29f4cb04d25b52b7cca37e3ede04 | 16,728 |
def old_func5(self, x):
"""Summary.
Bizarre indentation.
"""
return x | 5bc9cdbc406fa49960613578296e81bdd4eeb771 | 16,729 |
def get_dotenv_variable(var_name: str) -> str:
""" """
try:
return config.get(var_name)
except KeyError:
error_msg = f"{var_name} not found!\nSet the '{var_name}' environment variable"
raise ImproperlyConfigured(error_msg) | e3a06f3a439f5eb238688805985fb54eea7221e4 | 16,731 |
def load_dataset():
"""
Create a PyTorch Dataset for the images.
Notes
-----
- See https://discuss.pytorch.org/t/computing-the-mean-and-std-of-dataset/34949
"""
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.9720, 0.9720, 0.9720),
(0.1559, 0.1559, 0.1559)) # Normalize with the mean and std of the whole dataset
])
dataset = ImageFolder(root='images', transform=transform)
return dataset | 204e33cb7cb79ef81349b083e21ed6779c04dad0 | 16,732 |
def vote(pred1, pred2, pred3=None):
"""Hard voting for the ensembles"""
vote_ = []
index = []
if pred3 is None:
mean = np.mean([pred1, pred2], axis=0)
for s, x in enumerate(mean):
if x == 1 or x == 0:
vote_.append(int(x))
else:
vote_.append(pred2[s])
index.append(s)
else:
mean = np.mean([pred1, pred2, pred3], axis=0)
for s, x in enumerate(mean):
if x == 1 or x == 0:
vote_.append(int(x))
elif x > 0.5:
vote_.append(1)
index.append(s)
else:
vote_.append(0)
index.append(s)
return mean, vote_, index | a24858c0f14fe51a70ff2c0146a4e1aa3a2afdeb | 16,734 |
from pathlib import Path
def generate_master_flat(
science_frame : CCDData,
bias_path : Path,
dark_path : Path,
flat_path : Path,
use_cache : bool=True
) -> CCDData:
"""
"""
cache_path = generate_cache_path(science_frame, flat_path) / 'flat'
if use_cache and cache_path.is_dir():
flat_frames = ccdp.ImageFileCollection(location=cache_path)
else:
cache_path.mkdir(parents=True, exist_ok=True)
flat_frames = calibrate_flat(science_frame=science_frame, bias_path=bias_path, dark_path=dark_path, flat_path=flat_path, output_path=cache_path)
ccd = select_flat_frame(science_frame=science_frame, flat_frames=flat_frames)
return ccd | 5448e6f95e1d2c9249ac9419c767e675dc424c5b | 16,735 |
def natrix_mqttclient(client_id):
"""Generate a natrix mqtt client.
This function encapsulates all configurations about natrix mqtt client.
Include:
- client_id
The unique id about mqtt connection.
- username & password
Username is device serial number which used to identify who am I;
:return:
"""
client = NatrixMQTTClient(client_id)
return client | 20960873f265068aff035ec554b880fa93c49e32 | 16,736 |
def insert_singletons(words, singletons, p=0.5):
"""
Replace singletons by the unknown word with a probability p.
"""
new_words = []
for word in words:
if word in singletons and np.random.uniform() < p:
new_words.append(0)
else:
new_words.append(word)
return new_words | c99e9ed38287c175cd97f9cec0c0f8fb8f3629a7 | 16,737 |
def talk(text, is_yelling=False, trim=False, verbose=True):
"""
Prints text
is_yelling capitalizes text
trim - trims whitespace from both ends
verbose - if you want to print something on screen
returns transformed text
"""
if trim:
text = text.strip()
if is_yelling:
text = text.upper()
if verbose:
print(text) # printing is considered a side effect inside a function
return text | 22728a877460b4504653e2a0ea9ecdf81fa422f9 | 16,738 |
def getNorthPoleAngle(target, position, C, B, camera):
"""
Get angle north pole of target makes with image y-axis, in radians.
"""
# get target spin axis
# the last row of the matrix is the north pole vector, *per spice docs*
# seems correct, as it's nearly 0,0,1
Bz = B[2]
print 'Bz=north pole spin axis',Bz
# get target radius, km
nvalues, radii = spice.bodvrd(target, 'RADII', 3)
targetRadiusEquator = (radii[0] + radii[1]) / 2
targetRadiusPoles = radii[2]
targetRadius = sum(radii) / 3
# flatteningCoefficient = (targetRadiusEquator - targetRadiusPoles) / targetRadiusEquator
# print 'target radius in km', targetRadius
# get north pole location
positionNP = position + targetRadius * Bz
print 'positionNP=north pole in world coords', positionNP
# get target position in camera space
c = np.dot(C, position)
cNP = np.dot(C, positionNP)
print 'c=position in camera space',c
print 'cNP=north pole in camera space',cNP
# get camera fov and focal length
fovDegrees = config.cameraFOVs[camera] # 0.424 or 3.169 deg
fovRadians = fovDegrees * math.pi / 180
f = 1.0 / math.tan(fovRadians/2) # focal length (relative to screen halfwidth of 1.0)
print 'f=focal length',f
# get camera-to-screen matrix S
cz = c[2]
fz = f/cz
# print 'fz=f/cz',fz
S = np.array([[fz,0,0],[0,fz,0]])
# get screen coordinate (-1 to 1, -1 to 1)
s = np.dot(S, c)
sNP = np.dot(S, cNP)
# ie sx=cx*f/cz; sy=cy*f/cz
print 's=screen space (-1 to 1)',s
print 'sNP=screen space north pole (-1 to 1)',sNP
# get angle between north pole and image y-axis
npDelta = sNP-s
npRadians = math.atan(npDelta[0]/npDelta[1])
npAngle = npRadians * 180/math.pi
print 'npAngle',npAngle
return npRadians | cf6d79ef3af005a170694d5fe00b93e9dd2665dd | 16,739 |
def ray_casting(polygon, ray_line):
""" checks number of intersection a ray makes with polygon
parameters: Polygon, ray (line)
output: number of intersection
"""
vertex_num = polygon.get_count()
ray_casting_result = [False] * vertex_num
''' count for vertices that is colinear and intersects with ray '''
vertex_colinear_intersect_with_ray = 0
cursor = polygon.head
for index in range(vertex_num):
edge = LineSegment(cursor.data, cursor.next.data)
ray_casting_result[index] = does_lines_intersect(edge, ray_line)
cursor = cursor.next
''' added to check whether vertex is colinear with ray '''
if is_vertex_colinear(ray_line, cursor.data) and ray_casting_result[index]:
vertex_colinear_intersect_with_ray = vertex_colinear_intersect_with_ray + 1
# print(ray_casting_result)
# print(vertex_colinear_intersect_with_ray)
''' adjusted for colinear vertices '''
return ray_casting_result.count(True) - vertex_colinear_intersect_with_ray | 004c73fbef35bec5af35b6b93cc5b2bdb2e40f33 | 16,740 |
def ErrorWrapper(err, resource_name):
"""Wraps http errors to handle resources names with more than 4 '/'s.
Args:
err: An apitools.base.py.exceptions.HttpError.
resource_name: The requested resource name.
Returns:
A googlecloudsdk.api_lib.util.exceptions.HttpException.
"""
exc = exceptions.HttpException(err)
if exc.payload.status_code == 404:
# status_code specific error message
exc.error_format = ('{{api_name}}: {resource_name} not found.').format(
resource_name=resource_name)
else:
# override default error message
exc.error_format = ('Unknown error. Status code {status_code}.')
return exc | ebcce6241f88d0fa4f093f6823d0ccb9ae1bd431 | 16,743 |
def get_str_cmd(cmd_lst):
"""Returns a string with the command to execute"""
params = []
for param in cmd_lst:
if len(param) > 12:
params.append('"{p}"'.format(p=param))
else:
params.append(param)
return ' '.join(params) | a7cc28293eb381604112265a99b9c03e762c2f2c | 16,744 |
def calculate_score(arr):
"""Inside calculate_score() check for a blackjack (a hand with only 2 cards: ace + 10) and return 0 instead of the actual score. 0 will represent a blackjack in our game.
It check for an 11 (ace). If the score is already over 21, remove the 11 and replace it with a 1"""
if sum(arr) == 21 and len(arr) == 2:
return 0 # represents blackjack
if sum(arr) > 21 and 11 in arr:
arr.remove(11)
arr.append(1)
return sum(arr) | 0890c55068b8a92d9f1f577ccf2c5a770f7887d4 | 16,745 |
def tt_true(alpha):
"""Is the propositional sentence alpha a tautology? (alpha will be
coerced to an expr.)
>>> tt_true(expr("(P >> Q) <=> (~P | Q)"))
True
"""
return tt_entails(TRUE, expr(alpha)) | 91ca0d445407f50d4b985e16428dfeb7f1e1b5a2 | 16,746 |
import configparser
def show_config_data_by_section(data:configparser.ConfigParser, section:str):
"""Print a section's data by section name
Args:
data (configparser.ConfigParser): Data
section (str): Section name
"""
if not _check_data_section_ok(data, section):
return None
val = data[section]
print("[{}]".format(section))
for k, v in val.items():
print("{} = {}".format(k, v))
print() | 620abac7791a9e34707236ea6186b4e77591a393 | 16,748 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.