filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_24596 | # Import functions
from cohortextractor import (
StudyDefinition,
patients,
codelist_from_csv,
codelist,
Measure
)
# Import codelists
from codelists import *
from datetime import date
start_date = "2020-12-07"
end_date = "2021-02-01"
# Specifiy study definition
study = StudyDefinition(
default_expectations={
"date": {"earliest": start_date, "latest": end_date},
"rate": "exponential_increase",
"incidence": 0.1,
},
population=patients.registered_as_of(start_date),
practice=patients.registered_practice_as_of(
start_date,
returning="pseudo_id",
return_expectations={
"int": {"distribution": "normal", "mean": 25, "stddev": 5}, "incidence": 0.5}
),
)
|
the-stack_106_24600 | import inspect
import hashlib
import logging
from django.core.cache import caches
from django.conf import settings
log = logging.getLogger(__name__)
class cache_types(object):
NONE = None
DEFAULT = 'default'
SIMPLE = 'simple' # Stores queryset objects directly in cache
PK_LIST = 'pk_list' # Stores queryset pks in cache for later expansion back to queryset
try:
CACHE_BACKEND = settings.DATATABLEVIEW_CACHE_BACKEND
except AttributeError:
CACHE_BACKEND = 'default'
try:
CACHE_PREFIX = settings.DATATABLEVIEW_CACHE_PREFIX
except AttributeError:
CACHE_PREFIX = 'datatableview_'
try:
DEFAULT_CACHE_TYPE = settings.DATATABLEVIEW_DEFAULT_CACHE_TYPE
except AttributeError:
DEFAULT_CACHE_TYPE = cache_types.SIMPLE
try:
CACHE_KEY_HASH = settings.DATATABLEVIEW_CACHE_KEY_HASH
except AttributeError:
CACHE_KEY_HASH = True
try:
CACHE_KEY_HASH_LENGTH = settings.DATATABLEVIEW_CACHE_KEY_HASH_LENGTH
except AttributeError:
CACHE_KEY_HASH_LENGTH = None
cache = caches[CACHE_BACKEND]
hash_slice = None
if CACHE_KEY_HASH:
hash_slice = slice(None, CACHE_KEY_HASH_LENGTH)
def _hash_key_component(s):
return hashlib.sha1(s).hexdigest()[hash_slice]
def get_cache_key(datatable_class, view=None, user=None, **kwargs):
"""
Returns a cache key unique to the current table, and (if available) the request user.
The ``view`` argument should be the class reference itself, since it is easily obtainable
in contexts where the instance is not available.
"""
datatable_name = datatable_class.__name__
if datatable_name.endswith('_Synthesized'):
datatable_name = datatable_name[:-12]
datatable_id = '%s.%s' % (datatable_class.__module__, datatable_name)
if CACHE_KEY_HASH:
datatable_id = _hash_key_component(datatable_id)
cache_key = 'datatable_%s' % (datatable_id,)
if view:
if not inspect.isclass(view):
# Reduce view to its class
view = view.__class__
view_id = '%s.%s' % (view.__module__, view.__name__)
if CACHE_KEY_HASH:
view_id = _hash_key_component(view_id)
cache_key += '__view_%s' % (view_id,)
if user and user.is_authenticated():
cache_key += '__user_%s' % (user.pk,)
# All other kwargs are used directly to create a hashed suffix
# Order the kwargs by key name, then convert them to their repr() values.
items = sorted(kwargs.items(), key=lambda item: item[0])
values = []
for k, v in items:
values.append('%r:%r' % (k, v))
if values:
kwargs_id = '__'.join(values)
kwargs_id = _hash_key_component(kwargs_id)
cache_key += '__kwargs_%s' % (kwargs_id,)
log.debug("Cache key derived for %r: %r (from kwargs %r)", datatable_class, cache_key, values)
return cache_key
def get_cached_data(datatable, **kwargs):
""" Returns the cached object list under the appropriate key, or None if not set. """
cache_key = '%s%s' % (CACHE_PREFIX, datatable.get_cache_key(**kwargs))
data = cache.get(cache_key)
log.debug("Reading data from cache at %r: %r", cache_key, data)
return data
def cache_data(datatable, data, **kwargs):
""" Stores the object list in the cache under the appropriate key. """
cache_key = '%s%s' % (CACHE_PREFIX, datatable.get_cache_key(**kwargs))
log.debug("Setting data to cache at %r: %r", cache_key, data)
cache.set(cache_key, data)
|
the-stack_106_24601 | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RFgsea(RPackage):
"""Fast Gene Set Enrichment Analysis.
The package implements an algorithm for fast gene set enrichment
analysis. Using the fast algorithm allows to make more permutations and
get more fine grained p-values, which allows to use accurate stantard
approaches to multiple hypothesis correction."""
homepage = "https://bioconductor.org/packages/fgsea"
git = "https://git.bioconductor.org/packages/fgsea.git"
version('1.10.1', commit='fb06a6ebfb4a195e77e37226d100a2148b90c5f3')
version('1.8.0', commit='bb2898aca9fb23e90770671a83fe23f79bb1841b')
version('1.6.0', commit='52b801b7c2dfd8238fa8f2b402fddb4fda60271d')
version('1.4.1', commit='73de5ff364e520ac99507a9ee5a61a0d23d3c44e')
version('1.2.1', commit='99b04eef664204d0dca4b9f8027cd7eefb006b72')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('r-rcpp', type=('build', 'run'))
depends_on('r-data-table', type=('build', 'run'))
depends_on('r-biocparallel', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('r-gridextra', type=('build', 'run'))
depends_on('r-fastmatch', type=('build', 'run'))
depends_on('r-matrix', when='@1.6.0:', type=('build', 'run'))
depends_on('r-bh', when='@1.10.1:', type=('build', 'run'))
|
the-stack_106_24606 | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" CircuitSampler Class """
from typing import Optional, Dict, List, Union, cast, Any
import logging
from functools import partial
from qiskit.providers import BaseBackend
from qiskit.circuit import ParameterExpression, ParameterVector
from qiskit import QiskitError
from qiskit.aqua import QuantumInstance
from qiskit.aqua.utils.backend_utils import is_aer_provider, is_statevector_backend
from qiskit.aqua.operators.operator_base import OperatorBase
from qiskit.aqua.operators.operator_globals import Zero
from qiskit.aqua.operators.list_ops.list_op import ListOp
from qiskit.aqua.operators.state_fns.state_fn import StateFn
from qiskit.aqua.operators.state_fns.circuit_state_fn import CircuitStateFn
from qiskit.aqua.operators.converters.converter_base import ConverterBase
logger = logging.getLogger(__name__)
class CircuitSampler(ConverterBase):
"""
The CircuitSampler traverses an Operator and converts any CircuitStateFns into
approximations of the state function by a DictStateFn or VectorStateFn using a quantum
backend. Note that in order to approximate the value of the CircuitStateFn, it must 1) send
state function through a depolarizing channel, which will destroy all phase information and
2) replace the sampled frequencies with **square roots** of the frequency, rather than the raw
probability of sampling (which would be the equivalent of sampling the **square** of the
state function, per the Born rule.
The CircuitSampler aggressively caches transpiled circuits to handle re-parameterization of
the same circuit efficiently. If you are converting multiple different Operators,
you are better off using a different CircuitSampler for each Operator to avoid cache thrashing.
"""
def __init__(self,
backend: Union[BaseBackend, QuantumInstance] = None,
statevector: Optional[bool] = None,
param_qobj: bool = False,
attach_results: bool = False) -> None:
"""
Args:
backend: The quantum backend or QuantumInstance to use to sample the circuits.
statevector: If backend is a statevector backend, whether to replace the
CircuitStateFns with DictStateFns (from the counts) or VectorStateFns (from the
statevector). ``None`` will set this argument automatically based on the backend.
param_qobj: (TODO, not yet available) Whether to use Aer's parameterized Qobj
capability to avoid re-assembling the circuits.
attach_results: Whether to attach the data from the backend ``Results`` object for
a given ``CircuitStateFn``` to an ``execution_results`` field added the converted
``DictStateFn`` or ``VectorStateFn``.
Raises:
ValueError: Set statevector or param_qobj True when not supported by backend.
"""
self._quantum_instance = backend if isinstance(backend, QuantumInstance) else\
QuantumInstance(backend=backend)
self._statevector = statevector if statevector is not None \
else self.quantum_instance.is_statevector
self._param_qobj = param_qobj
self._attach_results = attach_results
self._check_quantum_instance_and_modes_consistent()
# Object state variables
self._last_op = None
self._reduced_op_cache = None
self._circuit_ops_cache = {} # type: Dict[int, CircuitStateFn]
self._transpiled_circ_cache = None # type: Optional[List[Any]]
self._transpile_before_bind = True
self._binding_mappings = None
def _check_quantum_instance_and_modes_consistent(self) -> None:
""" Checks whether the statevector and param_qobj settings are compatible with the
backend
Raises:
ValueError: statevector or param_qobj are True when not supported by backend.
"""
if self._statevector and not is_statevector_backend(self.quantum_instance.backend):
raise ValueError('Statevector mode for circuit sampling requires statevector '
'backend, not {}.'.format(self.quantum_instance.backend))
if self._param_qobj and not is_aer_provider(self.quantum_instance.backend):
raise ValueError('Parameterized Qobj mode requires Aer '
'backend, not {}.'.format(self.quantum_instance.backend))
@property
def backend(self) -> BaseBackend:
""" Returns the backend.
Returns:
The backend used by the CircuitSampler
"""
return self.quantum_instance.backend
@backend.setter
def backend(self, backend: BaseBackend):
""" Sets backend without additional configuration. """
self.set_backend(backend)
def set_backend(self, backend: BaseBackend, **kwargs) -> None:
""" Sets backend with configuration.
Raises:
ValueError: statevector or param_qobj are True when not supported by backend.
"""
self.quantum_instance = QuantumInstance(backend)
self.quantum_instance.set_config(**kwargs)
@property
def quantum_instance(self) -> QuantumInstance:
""" Returns the quantum instance.
Returns:
The QuantumInstance used by the CircuitSampler
"""
return self._quantum_instance
@quantum_instance.setter
def quantum_instance(self, quantum_instance: Union[QuantumInstance, BaseBackend]) -> None:
""" Sets the QuantumInstance.
Raises:
ValueError: statevector or param_qobj are True when not supported by backend.
"""
if isinstance(quantum_instance, BaseBackend):
quantum_instance = QuantumInstance(quantum_instance)
self._quantum_instance = quantum_instance
self._check_quantum_instance_and_modes_consistent()
# pylint: disable=arguments-differ
def convert(self,
operator: OperatorBase,
params: Optional[Dict[Union[ParameterExpression, ParameterVector],
Union[float, List[float], List[List[float]]]]] = None
) -> OperatorBase:
r"""
Converts the Operator to one in which the CircuitStateFns are replaced by
DictStateFns or VectorStateFns. Extracts the CircuitStateFns out of the Operator,
caches them, calls ``sample_circuits`` below to get their converted replacements,
and replaces the CircuitStateFns in operator with the replacement StateFns.
Args:
operator: The Operator to convert
params: A dictionary mapping parameters to either single binding values or lists of
binding values. The dictionary can also contain pairs of ParameterVectors with
lists of parameters or lists of lists of parameters to bind to them.
Returns:
The converted Operator with CircuitStateFns replaced by DictStateFns or VectorStateFns.
"""
if self._last_op is None or id(operator) != id(self._last_op):
# Clear caches
self._last_op = operator
self._reduced_op_cache = None
self._circuit_ops_cache = None
self._transpiled_circ_cache = None
self._transpile_before_bind = True
if not self._reduced_op_cache:
operator_dicts_replaced = operator.to_circuit_op()
self._reduced_op_cache = operator_dicts_replaced.reduce()
if not self._circuit_ops_cache:
self._circuit_ops_cache = {}
self._extract_circuitstatefns(self._reduced_op_cache)
if params:
p_0 = list(params.values())[0] # type: ignore
num_parameterizations = len(cast(List, p_0))
param_bindings = [{param: value_list[i] # type: ignore
for (param, value_list) in params.items()}
for i in range(num_parameterizations)]
else:
param_bindings = None
num_parameterizations = 1
# Don't pass circuits if we have in the cache, the sampling function knows to use the cache
circs = list(self._circuit_ops_cache.values()) if not self._transpiled_circ_cache else None
p_b = cast(List[Dict[ParameterExpression, List[float]]], param_bindings)
sampled_statefn_dicts = self.sample_circuits(circuit_sfns=circs,
param_bindings=p_b)
def replace_circuits_with_dicts(operator, param_index=0):
if isinstance(operator, CircuitStateFn):
return sampled_statefn_dicts[id(operator)][param_index]
elif isinstance(operator, ListOp):
return operator.traverse(partial(replace_circuits_with_dicts,
param_index=param_index))
else:
return operator
if params:
return ListOp([replace_circuits_with_dicts(self._reduced_op_cache, param_index=i)
for i in range(num_parameterizations)])
else:
return replace_circuits_with_dicts(self._reduced_op_cache, param_index=0)
def _extract_circuitstatefns(self, operator: OperatorBase) -> None:
r"""
Recursively extract the ``CircuitStateFns`` contained in operator into the
``_circuit_ops_cache`` field.
"""
if isinstance(operator, CircuitStateFn):
self._circuit_ops_cache[id(operator)] = operator
elif isinstance(operator, ListOp):
for op in operator.oplist:
self._extract_circuitstatefns(op)
def sample_circuits(self,
circuit_sfns: Optional[List[CircuitStateFn]] = None,
param_bindings: Optional[List[Dict[ParameterExpression,
List[float]]]] = None
) -> Dict[int, Union[StateFn, List[StateFn]]]:
r"""
Samples the CircuitStateFns and returns a dict associating their ``id()`` values to their
replacement DictStateFn or VectorStateFn. If param_bindings is provided,
the CircuitStateFns are broken into their parameterizations, and a list of StateFns is
returned in the dict for each circuit ``id()``. Note that param_bindings is provided here
in a different format than in ``convert``, and lists of parameters within the dict is not
supported, and only binding dicts which are valid to be passed into Terra can be included
in this list.
Args:
circuit_sfns: The list of CircuitStateFns to sample.
param_bindings: The parameterizations to bind to each CircuitStateFn.
Returns:
The dictionary mapping ids of the CircuitStateFns to their replacement StateFns.
"""
if circuit_sfns or not self._transpiled_circ_cache:
if self._statevector:
circuits = [op_c.to_circuit(meas=False) for op_c in circuit_sfns]
else:
circuits = [op_c.to_circuit(meas=True) for op_c in circuit_sfns]
try:
self._transpiled_circ_cache = self.quantum_instance.transpile(circuits)
except QiskitError:
logger.debug(r'CircuitSampler failed to transpile circuits with unbound '
r'parameters. Attempting to transpile only when circuits are bound '
r'now, but this can hurt performance due to repeated transpilation.')
self._transpile_before_bind = False
self._transpiled_circ_cache = circuits
else:
circuit_sfns = list(self._circuit_ops_cache.values())
if param_bindings is not None:
if self._param_qobj:
ready_circs = self._transpiled_circ_cache
self._prepare_parameterized_run_config(param_bindings)
else:
ready_circs = [circ.assign_parameters(binding)
for circ in self._transpiled_circ_cache
for binding in param_bindings]
else:
ready_circs = self._transpiled_circ_cache
results = self.quantum_instance.execute(ready_circs,
had_transpiled=self._transpile_before_bind)
# Wipe parameterizations, if any
# self.quantum_instance._run_config.parameterizations = None
sampled_statefn_dicts = {}
for i, op_c in enumerate(circuit_sfns):
# Taking square root because we're replacing a statevector
# representation of probabilities.
reps = len(param_bindings) if param_bindings is not None else 1
c_statefns = []
for j in range(reps):
circ_index = (i * reps) + j
circ_results = results.data(circ_index)
if 'expval_measurement' in circ_results.get('snapshots', {}).get(
'expectation_value', {}):
snapshot_data = results.data(circ_index)['snapshots']
avg = snapshot_data['expectation_value']['expval_measurement'][0]['value']
if isinstance(avg, (list, tuple)):
# Aer versions before 0.4 use a list snapshot format
# which must be converted to a complex value.
avg = avg[0] + 1j * avg[1]
# Will be replaced with just avg when eval is called later
num_qubits = circuit_sfns[0].num_qubits
result_sfn = (Zero ^ num_qubits).adjoint() * avg
elif self._statevector:
result_sfn = StateFn(op_c.coeff * results.get_statevector(circ_index))
else:
shots = self.quantum_instance._run_config.shots
result_sfn = StateFn({b: (v * op_c.coeff / shots) ** .5
for (b, v) in results.get_counts(circ_index).items()})
if self._attach_results:
result_sfn.execution_results = circ_results
c_statefns.append(result_sfn)
sampled_statefn_dicts[id(op_c)] = c_statefns
return sampled_statefn_dicts
# TODO build Aer re-parameterized Qobj.
def _prepare_parameterized_run_config(self,
param_bindings: List[Dict[Any, List[float]]]) -> None:
raise NotImplementedError
# Wipe parameterizations, if any
# self.quantum_instance._run_config.parameterizations = None
# if not self._binding_mappings:
# phony_binding = {k: str(k) for k in param_bindings[0].keys()}
# phony_bound_circuits = [circ.bind_parameters(phony_binding)
# for circ in self._transpiled_circ_cache]
# qobj = self.quantum_instance.assemble(phony_bound_circuits)
# # for circ in qobj:
# # mapping = None
# # for
#
# # self.quantum_instance._run_config.parameterizations = [params_circ]
|
the-stack_106_24609 | # Download the Python helper library from twilio.com/docs/python/install
import os
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account
# To set up environmental variables, see http://twil.io/secure
account_sid = os.environ['TWILIO_ACCOUNT_SID']
auth_token = os.environ['TWILIO_AUTH_TOKEN']
workspace_sid = "WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
client = Client(account_sid, auth_token)
worker = client.taskrouter.workspaces(workspace_sid).workers.create(
friendly_name='Support Worker 1', attributes='{"type":"support"}'
)
print(worker.friendly_name)
|
the-stack_106_24610 | import logging
import os
from django.core.exceptions import ImproperlyConfigured
from django.conf import settings
from api.utilities.basic_utils import get_with_retry
from api.runners import get_runner
from api.storage_backends.google_cloud import GoogleBucketStorage
logger = logging.getLogger(__name__)
def get_instance_region():
try:
zone = get_instance_zone() # a string like us-east4-c
region = '-'.join(zone.split('-')[:2]) # now like us-east4
return region
except Exception as ex:
# if we could not get the region of the instance, return None for the region
#return None
raise ex
def get_instance_zone():
try:
response = get_with_retry(
'http://metadata/computeMetadata/v1/instance/zone',
headers={'Metadata-Flavor': 'Google'}
)
# zone_str is something like 'projects/{project ID number}/zones/us-east4-c'
zone_str = response.text
zone = zone_str.split('/')[-1] # now like us-east4-c
return zone
except Exception as ex:
# if we could not get the region of the instance, return None for the region
#return None
raise ex
def startup_check():
logger.info('Checking that everything is set for running MEV'
' in the Google Cloud environment.'
)
# get the location of our application
region = get_instance_region()
if settings.ENABLE_REMOTE_JOBS:
logger.info('Remote jobs were enabled. Check that the runners are ready.')
for job_runner in settings.REQUESTED_REMOTE_JOB_RUNNERS:
runner_class = get_runner(name=job_runner)
runner = runner_class()
try:
runner.check_if_ready()
except ImproperlyConfigured as ex:
logger.info('Runner was not ready.')
raise ex
except Exception as ex:
logger.info('Unexpected error upon checking if runners were ready.')
raise ex
else:
logger.info('Remote jobs disabled.')
# check that the bucket region and the VM region are the same.
# we could technically permit them to be different, but this can cause
# issues pushing data between regions.
if not settings.STORAGE_LOCATION == settings.LOCAL:
logger.info('Since storage is not local, have to check regions.')
gbs = GoogleBucketStorage()
try:
bucket_location = gbs.get_bucket_region(gbs.BUCKET_NAME)
except Exception as ex:
logger.info('Could not locate the expected storage bucket ({name}).'
' Make sure this bucket already exists.'.format(name=gbs.BUCKET_NAME)
)
raise ex
bucket_location = bucket_location.lower()
if bucket_location != region:
raise ImproperlyConfigured('The storage bucket ({b})'
' should be in the same region ({r}) as the host machine.'.format(
b = gbs.BUCKET_NAME,
r = region
)
)
else:
logger.info('Bucket region matched the instance region.')
|
the-stack_106_24612 | # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import concurrent.futures
import mock
from oslo_config import cfg
from oslo_config import fixture as oslo_fixture
from taskflow import engines as tf_engines
from octavia.common import base_taskflow
import octavia.tests.unit.base as base
MAX_WORKERS = 1
_engine_mock = mock.MagicMock()
class TestBaseTaskFlowEngine(base.TestCase):
def setUp(self):
conf = oslo_fixture.Config(cfg.CONF)
conf.config(group="task_flow", max_workers=MAX_WORKERS)
conf.config(group="task_flow", engine='TESTENGINE')
conf.config(group="task_flow", disable_revert=True)
super(TestBaseTaskFlowEngine, self).setUp()
@mock.patch('concurrent.futures.ThreadPoolExecutor',
return_value='TESTEXECUTOR')
@mock.patch('taskflow.engines.load',
return_value=_engine_mock)
def test_taskflow_load(self,
mock_tf_engine_load,
mock_ThreadPoolExecutor):
# Test __init__
base_taskflow_engine = base_taskflow.BaseTaskFlowEngine()
concurrent.futures.ThreadPoolExecutor.assert_called_once_with(
max_workers=MAX_WORKERS)
# Test _taskflow_load
base_taskflow_engine._taskflow_load('TEST')
tf_engines.load.assert_called_once_with(
'TEST',
engine='TESTENGINE',
executor='TESTEXECUTOR',
never_resolve=True)
_engine_mock.compile.assert_called_once_with()
_engine_mock.prepare.assert_called_once_with()
|
the-stack_106_24614 | import theano
import theano.tensor as T
import numpy as np
from collections import OrderedDict
from functools import reduce
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from lasagne.utils import floatX
__all__ = [
'softmin',
'join',
'lsum',
'joinc',
'ldot',
'lmean',
'log_barrier',
'make_copy',
'as_shared',
'make_uniform',
'make_normal',
'get_srng',
'border_mask'
]
join = lambda xs: reduce(lambda a, b: a + b, xs)
lsum = join
ldot = lambda xs, ys: join([ T.sum(x * y) for x, y in zip(xs, ys) ])
def joinc(xs, cs=None):
if cs is None and len(xs) == 1:
return xs[0]
elif cs is None:
return join(xs)
else:
return join([x * c for x, c in zip(xs, cs)])
def lmean(xs, cs = None):
if len(xs) is 1:
return xs[0]
elif cs is None:
return join(xs) / len(xs)
else:
return joinc(xs, cs)
def get_srng(srng):
if srng is None:
# from theano.sandbox.cuda.rng_curand import CURAND_RandomStreams as RandomStreams
return RandomStreams(seed=np.random.randint(2**30))
else:
return srng
def softmin(xs, alpha=1.0):
alpha = np.float32(alpha)
if hasattr(xs, '__len__'):
exp_xs = [ T.exp(-x * alpha) for x in xs ]
n = join(exp_xs)
return [ ex / n for ex in exp_xs ]
else:
T.nnet.softmax(-xs * alpha)
def log_barrier(v, bounds):
return -(T.log(v - bounds[0]) + T.log(bounds[1] - v))
def make_copy(shared):
value = shared.get_value(borrow=True)
return theano.shared(
np.zeros(value.shape, dtype=value.dtype),
broadcastable=shared.broadcastable
)
def as_shared(var):
return theano.shared(
np.zeros(shape=(0, ) * var.ndim, dtype=var.dtype),
broadcastable=var.broadcastable
)
def make_uniform(shared, a, b, srng=None):
srng = get_srng(srng)
return srng.uniform(
low=a, high=b,
size=shared.get_value(borrow=True).shape,
ndim=shared.ndim, dtype=shared.dtype
)
def make_normal(shared, srng):
srng = get_srng(srng)
return srng.normal(
size=shared.get_value(borrow=True).shape,
ndim=shared.ndim, dtype=shared.dtype
)
def border_mask(exclude_borders, img_shape, dtype='float32'):
if img_shape is None:
raise Exception('With non-zero border exclusion `img_shape` argument must be defined!')
mask = np.ones(
shape=tuple(img_shape[-2:]),
dtype=dtype
)
n = exclude_borders
mask[:n, :] = 0
mask[-n:, :] = 0
mask[:, :n] = 0
mask[:, -n:] = 0
return mask
def masked(exclude_borders, img_shape, dtype='float32'):
if exclude_borders > 0:
M = border_mask(exclude_borders, img_shape, dtype)
def m(X):
return X * M[None, None, :, :]
return m
else:
M = None
return lambda X: X |
the-stack_106_24615 | import curses
import collections
import tempfile
import subprocess
import multidict
from metaindex import shared
import metaindex.cache
import metaindex.indexer
from cursedspace import Key, InputLine, ShellContext
from metaindexmanager import command
from metaindexmanager import utils
from metaindexmanager.utils import logger
from metaindexmanager.panel import ListPanel
from metaindexmanager.detailpanel import DetailPanel
from metaindexmanager.docpanel import DocPanel
from metaindexmanager.filepanel import FilePanel
Change = collections.namedtuple("Change", ['index', 'new_value', 'prefix', 'tag', 'old_value'])
Insert = collections.namedtuple("Insert", ['prefix', 'tag', 'value'])
Delete = collections.namedtuple("Delete", ['index', 'prefix', 'tag', 'value'])
GroupedChange = collections.namedtuple("GroupedChange", ['changes'])
Line = collections.namedtuple("Line", ['group', 'prefix', 'tag', 'value'])
Header = collections.namedtuple("Header", ['group', 'title', 'prefix', 'tag', 'value'], defaults=['', '', ''])
class EditorLine(InputLine):
def __init__(self, panel, *args, text=None, **kwargs):
y = panel.pos[0] + panel.cursor - panel.offset + 1
x = panel.pos[1] + panel.columns[0] + 1
if text is None:
text = panel.selected_line.value or ''
super().__init__(panel.app, panel.columns[1], (y, x), text=text, background='░') # background='░.')
self.item = panel.selected_line
self.parent = panel
self.app.previous_focus = panel
self.app.current_panel = self
self.original_text = self.text
logger.debug(f"Enter tag edit mode for {self.item} (text: '{text}')")
def handle_key(self, key):
if key in [Key.ESCAPE, "^C"]:
self.destroy()
elif key in [Key.RETURN, "^S"]:
self.destroy()
if self.text != self.original_text:
idx = self.parent.items.index(self.item)
self.parent.changed(Change(idx, self.text,
self.item.prefix,
self.item.tag,
self.item.value))
else:
super().handle_key(key)
def destroy(self):
super().destroy()
self.parent.editor = None
self.app.current_panel = self.parent
self.parent.paint_item(self.parent.cursor)
class EditorPanel(ListPanel):
"""Metadata editing panel"""
SCOPE = 'editor'
SPACING = 3
CONFIG_ICON_MULTILINE = 'multiline-indicator'
CONFIG_ICON_CUTOFF = 'cutoff-indicator'
def __init__(self, filepath, *args, **kwargs):
super().__init__(*args, **kwargs)
self.item = filepath
self.columns = []
# the history of changes
self.changes = []
# the pointer into the history of changes,
# usually points beyond the end of the list of changes
self.change_ptr = 0
# list of items directly after reload
self.unchanged_items = []
self.editor = None
self.metadata = multidict.MultiDict()
self._multiline_icon = ' '
self._cutoff_icon = ' '
self.configuration_changed()
self.reload()
self.cursor = 1
@property
def selected_path(self):
return self.item
@property
def selected_paths(self):
return [self.item]
@property
def selected_item(self):
return str(self.item)
@property
def selected_line(self):
if 0 <= self.cursor < len(self.items):
return self.items[self.cursor]
return None
def open_selected(self):
if self.item is None:
return
self.app.open_file(self.item)
def changed(self, change):
logger.debug(f"Added change to stack: {change}")
self.changes = self.changes[:self.change_ptr]
self.changes.append(change)
self.change_ptr = len(self.changes)
self.rebuild_items()
self.rebuild_columns()
self.scroll()
self.paint(True)
def paint(self, clear=False):
super().paint(clear)
if self.change_ptr > 0:
self.win.addstr(0, 1, " Modified ")
self.win.noutrefresh()
def title(self):
return str(self.item)
def focus(self):
y, x = super().focus()
if self.editor is not None:
self.editor.focus()
else:
self.win.move(y, x+self.columns[0])
self.win.noutrefresh()
def multiline_edit(self):
logger.debug(f"start multi-line editing {self.selected_line}")
item = self.selected_line
if not isinstance(item, Line):
return
original = item.value
new_content = original
can_edit = item.prefix == 'extra'
editor = self.app.get_text_editor(True)
if editor is None:
return
with tempfile.NamedTemporaryFile("w+t", encoding="utf-8", suffix='.txt') as fh:
fh.write(original)
fh.flush()
with ShellContext(self.app.screen):
subprocess.run(editor + [fh.name])
self.app.paint(True)
# only actually apply changes when editing the 'extra' tags
if can_edit:
fh.flush()
fh.seek(0)
new_content = fh.read()
logger.debug(f"Can change? {can_edit} -- has changes? {new_content != original}")
if can_edit and new_content != original:
self.changed(Change(self.cursor, new_content, item.prefix, item.tag, original))
def start_edit(self, text=None):
logger.debug(f"start editing {self.selected_line}")
if self.editor is not None:
self.editor.destroy()
del self.editor
if (text is not None and '\n' in text) or (text is None and '\n' in self.selected_line.value):
self.multiline_edit()
else:
self.editor = EditorLine(self, text=text)
self.app.paint(True)
def cancel_edit(self):
if self.editor is None:
return
self.editor.destroy()
del self.editor
self.editor = None
self.paint(True)
def resize(self, *args):
super().resize(*args)
self.rebuild_columns()
def reset(self):
self.changes = []
self.change_ptr = 0
self.reload()
self.paint(True)
def save(self, blocker):
logger.debug(f"The file is {self.selected_path.name}")
if blocker is not None:
blocker.title(f"Saving changes to {self.selected_path.name}")
# Read in the current sidecar file, if it exists
collection_extra = None
sidecar_file, is_collection, store = \
self.app.metaindexconf.resolve_sidecar_for(self.selected_path)
if sidecar_file is None:
self.app.error("No usable metadata storage available")
return
if sidecar_file.exists():
if is_collection:
collection_extra = store.get_for_collection(sidecar_file, prefix='')
collection_extra = \
utils.collection_meta_as_writable(collection_extra, sidecar_file.parent)
logger.debug("Read collection metadata: %s", collection_extra)
if self.selected_path.name in collection_extra:
extra = collection_extra.pop(self.selected_path.name)
else:
extra = multidict.MultiDict()
logger.debug("Extra for %s is: %s", self.selected_path.name, extra)
else:
extra = store.get(sidecar_file, prefix='')
# remove control data
extra.popall(shared.IS_RECURSIVE, [])
else:
if is_collection:
collection_extra = multidict.MultiDict()
extra = multidict.MultiDict()
# apply all changes to the extra metadata
for change in self.expand_changes():
logger.debug(f" ... processing {change}")
prefix = ''
if change.prefix != 'extra':
prefix = change.prefix + '.'
if isinstance(change, Change):
values = extra.popall(prefix + change.tag, None)
if values is None:
values = []
applied = False
for value in values:
if value == change.old_value and not applied:
extra.add(prefix + change.tag, change.new_value)
applied = True
else:
extra.add(prefix + change.tag, value)
if not applied:
logger.info(f"Change to {prefix + change.tag} is actually an insert because of different sources")
extra.add(prefix + change.tag, change.new_value)
elif isinstance(change, Insert):
extra.add(prefix + change.tag, change.value)
elif isinstance(change, Delete):
values = extra.popall(prefix + change.tag, None)
if values is None:
logger.warning(f"Skipping deletion of {prefix + change.tag}: not found")
continue
if change.value in values:
values.remove(change.value)
for value in values:
extra.add(prefix + change.tag, value)
# 'store' will not remove the 'extra.' prefix per tag,
# so we have to do that here
for key in set(extra.keys()):
if not key.startswith('extra.'):
continue
values = extra.popall(key)
_, key = key.split('.', 1)
for value in values:
extra.add(key, value)
logger.debug(f"Writing new metadata: {extra}")
# save the extra metadata to the sidecar file
if is_collection:
assert collection_extra is not None
collection_extra[self.selected_path.name] = extra
logger.debug(f"Updating collection sidecar to {collection_extra}")
store.store(collection_extra, sidecar_file)
else:
store.store(extra, sidecar_file)
# reload the cache
with ShellContext(self.app.screen):
self.app.cache.refresh(self.selected_path)
self.app.cache.wait_for_reload()
self.app.paint(True)
# reset
self.reset()
def reload(self):
logger.debug("Reloading %s", self.item)
metadata = [entry.metadata
for entry in self.app.cache.get(self.item)
if entry.path == self.item]
if len(metadata) == 0:
self.metadata = multidict.MultiDict({'filename': str(self.selected_path.name)})
else:
self.metadata = metadata[0]
self.rebuild_items()
self.rebuild_columns()
def rebuild_columns(self):
if len(self.items) > 1:
self.columns = [max([1 if isinstance(row, Header) else len(str(row.tag))+self.SPACING
for row in self.items]), 0]
self.columns[1] = self.dim[1] - self.columns[0] - 2
else:
half_w = self.dim[1]//2
self.columns = [half_w, self.dim[1] - half_w]
def rebuild_items(self):
self.items = []
self.unchanged_items = []
if len(self.metadata) == 0:
self.unchanged_items = []
self.cursor = 0
return
keys = list(set(self.metadata.keys()))
keys.sort(key=lambda k: [not k.startswith('extra.'), '.' in k, k.lower()])
# prepare the unchanged items
for key in keys:
displaykey = key
prefix = 'general'
if '.' in key:
prefix, displaykey = key.split('.', 1)
for value in self.metadata.getall(key, []):
self.unchanged_items.append(Line(prefix, prefix, displaykey, value))
# apply all changes in order to the point where we are
self.items = self.unchanged_items[:]
for change in [None] + self.expand_changes():
if isinstance(change, Change):
original = self.items[change.index]
self.items[change.index] = Line(original.group,
original.prefix,
original.tag,
change.new_value)
elif isinstance(change, Insert):
group = change.prefix
if len(group) == 0:
group = 'general'
self.items.append(Line(group, change.prefix, change.tag, change.value))
elif isinstance(change, Delete):
self.items = self.items[:change.index] + self.items[change.index+1:]
self.items = [i for i in self.items if isinstance(i, Line)]
self.items += [Header(g, g.title())
for g in set([i.group for i in self.items])]
self.items.sort(key=lambda k: [k.group != 'extra',
k.group,
isinstance(k, Line),
k.tag.lower(),
self.app.humanize(k.value).lower()])
def do_paint_item(self, y, x, maxwidth, is_selected, item):
if isinstance(item, Header):
self.win.addstr(y, x, item.title[:self.dim[1]-x-2], curses.A_BOLD)
else:
for colidx, text in enumerate([item.tag, item.value]):
self.win.addstr(y, x, " "*self.columns[colidx])
maxlen = self.dim[1]-x-2
if text is None:
text = ''
if colidx == 1 and is_selected and self.editor is not None:
self.editor.paint()
else:
# make it a human-readable string
text = self.app.humanize(text)
# multi-line is special
is_multiline = '\r' in text or '\n' in text
text = utils.first_line(text)
if is_multiline:
text += ' ' + self._multiline_icon
# shorten the text to visible width
shortened = text[:maxlen]
if len(shortened) < len(text):
icon = self._cutoff_icon
if is_multiline:
icon = self._multiline_icon
shortened = shortened[:-1-len(icon)] + ' ' + icon
self.win.addstr(y, x, shortened[:maxlen])
x += self.columns[colidx]
def configuration_changed(self, name=None):
super().configuration_changed(name)
changed = False
if name is None or name == self.CONFIG_ICON_MULTILINE:
new_value = self.app.configuration.get(self.SCOPE, self.CONFIG_ICON_MULTILINE, '…')
changed = self._multiline_icon != new_value
self._multiline_icon = new_value
if name is None or name == self.CONFIG_ICON_CUTOFF:
new_value = self.app.configuration.get(self.SCOPE, self.CONFIG_ICON_CUTOFF, '→')
changed = self._multiline_icon != new_value
self._cutoff_icon = new_value
if changed:
if self.win is not None:
self.scroll()
self.paint(True)
def add_tag(self, name):
if self.editor is not None:
self.cancel_edit()
self.changed(Insert('extra', name, ''))
for nr, item in enumerate(self.items):
if not isinstance(item, Line):
continue
if item.prefix == 'extra' and item.tag == name and item.value == '':
self.cursor = nr
self.scroll()
self.paint(True)
def remove_tag(self, line):
if line not in self.items or isinstance(line, str):
return
if self.editor is not None:
self.cancel_edit()
idx = self.items.index(line)
self.changed(Delete(idx, line.prefix, line.tag, line.value))
def undo(self):
if self.change_ptr <= 0:
return
self.change_ptr -= 1
self.rebuild_items()
self.rebuild_columns()
self.scroll()
self.paint(True)
def redo(self):
if self.change_ptr >= len(self.changes):
return
self.change_ptr += 1
self.rebuild_items()
self.rebuild_columns()
self.scroll()
self.paint(True)
def expand_changes(self):
if len(self.changes[:self.change_ptr]) == 0:
return []
return sum([change.changes if isinstance(change, GroupedChange) else [change]
for change in self.changes[:self.change_ptr]], start=[])
@command.registered_command
class EditMetadata(command.Command):
"""Edit metadata of the selected file"""
NAME = 'edit-metadata'
ACCEPT_IN = (DocPanel, FilePanel, DetailPanel)
def execute(self, context):
if context.panel.is_busy:
return
item = context.panel.selected_path
if not item.is_file():
context.application.error(f"{item.stem} is not a file")
return
panel = EditorPanel(item, context.application)
context.application.add_panel(panel)
context.application.activate_panel(panel)
@command.registered_command
class EnterEditMode(command.Command):
"""Start editing the metadata"""
NAME = 'edit-mode'
ACCEPT_IN = (EditorPanel,)
def execute(self, context):
if context.panel.is_busy:
return
item = context.panel.selected_line
if not isinstance(item, Line):
return
if item.prefix == 'extra' or \
(isinstance(item.value, str) and ('\n' in item.value or '\r' in item.value)):
context.panel.start_edit()
@command.simple_command("edit-multiline", (EditorPanel,))
def edit_multiline_command(context):
"""Edit the tag value in an external editor"""
target = context.panel
if target.is_busy:
return
context.panel.multiline_edit()
@command.registered_command
class AddTagCommand(command.Command):
"""Add a new metadata field"""
NAME = 'add-tag'
ACCEPT_IN = (EditorPanel,)
def completion_options(self, context, *args):
text = "" if len(args) == 0 else args[0]
keys = {key.split('.', 1)[1]
for key in context.application.cache.keys()
if key.startswith('extra.')}
keys |= shared.DUBLINCORE_TAGS
return list(key for key in sorted(keys) if key.startswith(text))
def execute(self, context, name=None):
if context.panel.is_busy:
return
if name is None:
context.application.error("Usage: add-attr name")
return
context.panel.add_tag(name)
@command.registered_command
class AddValueForAttribute(command.Command):
"""Add a new metadata value for this field"""
NAME = 'add-value'
ACCEPT_IN = (EditorPanel,)
def execute(self, context):
if context.panel.is_busy:
return
item = context.panel.selected_line
if not isinstance(item, Line) or item.prefix != 'extra':
return
context.panel.add_tag(item.tag)
@command.registered_command
class ReplaceValueForAttribute(command.Command):
"""Replace the selected metadata value"""
NAME = 'replace-value'
ACCEPT_IN = (EditorPanel,)
def execute(self, context):
if context.panel.is_busy:
return
item = context.panel.selected_line
if not isinstance(item, Line) or item.prefix != 'extra':
return
context.panel.start_edit(text='')
@command.registered_command
class RemoveAttribute(command.Command):
"""Remove the selected metadata field"""
NAME = 'del-tag'
ACCEPT_IN = (EditorPanel,)
def execute(self, context):
if context.panel.is_busy:
return
item = context.panel.selected_line
if isinstance(item, Header):
context.application.error("Selected field cannot be deleted")
return
if item.prefix != 'extra':
# TODO support the null override of values
context.application.error("Selected field cannot be deleted")
return
context.panel.remove_tag(context.panel.selected_line)
@command.registered_command
class ResetEdits(command.Command):
"""Reset all unsaved changes"""
NAME = 'reset'
ACCEPT_IN = (EditorPanel,)
def execute(self, context):
if context.panel.is_busy:
return
context.panel.reset()
@command.registered_command
class SaveChanges(command.Command):
"""Save metadata changes"""
NAME = 'write'
ACCEPT_IN = (EditorPanel,)
def execute(self, context):
if context.panel.is_busy:
return
context.panel.run_blocking(context.panel.save)
context.application.paint(True)
@command.registered_command
class UndoChange(command.Command):
"""Undo the previous change"""
NAME = 'undo-change'
ACCEPT_IN = (EditorPanel,)
def execute(self, context):
if context.panel.is_busy:
return
context.panel.undo()
@command.registered_command
class RedoChange(command.Command):
"""Redo the next change (i.e. undo the undo)"""
NAME = 'redo-change'
ACCEPT_IN = (EditorPanel,)
def execute(self, context):
if context.panel.is_busy:
return
context.panel.redo()
@command.registered_command
class UndoAllChanges(command.Command):
"""Undo all changes"""
NAME = 'undo-all-changes'
ACCEPT_IN = (EditorPanel,)
def execute(self, context):
if context.panel.is_busy:
return
if context.panel.change_ptr <= 0:
return
context.panel.change_ptr = 1
context.panel.undo()
@command.simple_command("copy-tag", (EditorPanel,))
def copy_tag_command(context, clipboard=None):
"""Copy the selected tag and value to clipboard"""
source = context.panel
if source.is_busy:
return
context.application.clear_clipboard(clipboard)
line = source.selected_line
if line is None or isinstance(line, Header):
return
context.application.append_to_clipboard((line,), clipboard)
@command.simple_command("copy-append-tag", (EditorPanel,))
def copy_append_tag_command(context, clipboard=None):
"""Add the selected tag and value to clipboard"""
source = context.panel
if source.is_busy:
return
line = source.selected_line
if line is None or isinstance(line, Header):
return
context.application.append_to_clipboard((line,), clipboard)
@command.simple_command("paste-tag", (EditorPanel,))
def paste_tag_command(context, clipboard=None):
"""Paste tag and value from clipboard"""
target = context.panel
if target.is_busy:
return
content = context.application.get_clipboard_content(clipboard)
if content is None:
return
items = [item for item in content if isinstance(item, (Line,))]
if len(items) == 0:
return
changes = [Insert('extra', line.tag, line.value) for line in items]
grouped = GroupedChange(changes)
target.changed(grouped)
@command.registered_command
class RunRules(command.Command):
"""Run tag rules on this document"""
NAME = 'rules'
ACCEPT_IN = (EditorPanel,)
def execute(self, context):
if context.panel.is_busy:
return
if metaindex.indexer.get('rule-based') is None:
context.application.error("Rule based indexer not found")
return
path = context.panel.selected_path
context.panel.run_blocking(self.run_rules, context, path)
context.application.paint(True)
def run_rules(self, blocker, context, path):
blocker.title(f"Running rules on {path.name}")
base = context.application.cache.get(path, False)
if len(base) == 0:
info = metaindex.cache.Cache.Entry(path)
else:
info = base[0]
fulltext = shared.get_all_fulltext(info.metadata)
if len(fulltext) == 0:
# this will also run the rule-based indexer
logger.debug(f"No fulltext available, running indexer on {path}")
results = metaindex.indexer.index_files([path],
1,
metaindex.ocr.TesseractOCR(True),
True,
context.application.metaindexconf)
if len(results) == 0:
logger.debug("Indexers returned no results")
return
_, success, base = results[0]
info = metaindex.cache.Cache.Entry(path, base, shared.get_last_modified(path))
else:
# there is some fulltext, just rerun the rules
logger.debug(f"Fulltext is already here: {len(fulltext)}")
cache = metaindex.indexer.IndexerCache(metaindex.ocr.Dummy(),
False,
context.application.metaindexconf,
{},
info)
indexer = metaindex.indexer.get('rule-based')(cache)
success, extra = indexer.run(path, info.metadata.copy(), info)
if not success:
logger.debug(f"Indexer did not succeed")
return
# extend the cached metadata with the newly indexed data
new_info = False
for key in set(extra.keys()):
for value in extra.getall(key):
if value in info.metadata.getall(key, []):
continue
info.metadata.add(key, value)
new_info = True
if not new_info:
logger.debug("Nothing new here")
return
context.application.cache.insert(path, info.metadata)
context.application.callbacks.put((context.panel,
context.panel.reload))
|
the-stack_106_24616 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
try:
from urllib import parse as urllib_parse
except ImportError:
import urlparse as urllib_parse
import click
import pip.index
import pip.download
import progress.bar
@click.group()
@click.help_option('--help', '-h')
def cli():
"""
Start or Finish a git flow release
"""
click.echo("pymigrate")
@cli.command('download')
@click.help_option('--help', '-h')
@click.argument("projects", nargs=-1, required=True)
@click.option('--indexurl', '-i', help='Base URL of Python Package Index (default: https://pypi.python.org/simple/)')
def download(projects, indexurl):
session = pip.download.PipSession()
session.timeout = 10
session.auth.prompting = False
if indexurl:
index_url = indexurl
else:
index_url = "https://pypi.python.org/simple/"
finder = pip.index.PackageFinder(
[],
[index_url],
allow_all_external=True,
allow_unverified=projects,
allow_all_prereleases=True,
session=session,
)
for project in progress.bar.Bar("Downloading").iter(projects):
try:
all_versions = finder._find_all_versions(project)
except Exception:
click.echo("Skipping {}, an error occured.".format(project))
continue
seen = set()
urls = set()
for version in all_versions:
if version.version not in seen:
seen.add(version.version)
if not version.location.internal:
urls.add(version.location.url)
for url in urls:
try:
resp = session.get(url)
resp.raise_for_status()
o = urllib_parse.urlparse(url)
try:
os.makedirs("dist")
except Exception:
pass
path = os.path.join("dist", os.path.basename(o.path))
with open(path, "wb") as fp:
fp.write(resp.content)
except Exception:
click.echo("Skipping {}, an error occured.".format(url))
continue
click.echo(
"Downloaded all externally hosted files, upload to PyPI using `twine "
"upload --skip-existing dist/*`"
)
|
the-stack_106_24617 | # Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Benchmark backprop on small fake data set."""
import sys
import time
import numpy as np
from benchmarks import benchmark_utils
from pycoral.learn.backprop.softmax_regression import SoftmaxRegression
def _benchmark_for_training(num_classes, feature_dim):
"""Measures training time for given data set parameters.
Args:
num_classes: int, number of classes.
feature_dim: int, dimension of the feature vector.
Returns:
float, training time.
"""
num_train = 1024
num_val = 256
num_total = num_train + num_val
class_sizes = (num_total // num_classes) * np.ones(num_classes, dtype=int)
print('Preparing data set for num_classes=%d, feature_dim=%d' %
(num_classes, feature_dim))
np.random.seed(12345)
all_data = np.random.rand(num_total, feature_dim).astype(np.float32)
all_labels = np.tile(np.arange(num_classes), class_sizes[0])
np.random.shuffle(all_labels)
dataset = {}
dataset['data_train'] = all_data[0:num_train]
dataset['labels_train'] = all_labels[0:num_train]
dataset['data_val'] = all_data[num_train:]
dataset['labels_val'] = all_labels[num_train:]
model = SoftmaxRegression(feature_dim, num_classes)
# Train with SGD.
num_iter = 500
learning_rate = 0.01
batch_size = 100
print('Start backprop')
start_time = time.perf_counter()
model.train_with_sgd(
dataset, num_iter, learning_rate, batch_size, print_every=-1)
training_time = time.perf_counter() - start_time
print('Backprop time: ', training_time, 's')
return training_time
def main():
print('Python version: ', sys.version)
machine = benchmark_utils.machine_info()
benchmark_utils.check_cpu_scaling_governor_status()
# cases are defined by parameter pairs [num_classes, feature_dim].
cases = [[4, 256], [16, 256], [4, 1024], [16, 1024]]
results = [('CASE', 'TRAINING_TIME(s)')]
for params in cases:
num_classes = params[0]
feature_dim = params[1]
print('-------- num_classes=%d / feature_dim=%d --------' %
(num_classes, feature_dim))
results.append((':'.join(str(i) for i in params),
_benchmark_for_training(num_classes, feature_dim)))
benchmark_utils.save_as_csv(
'softmax_regression_benchmarks_%s_%s.csv' %
(machine, time.strftime('%Y%m%d-%H%M%S')), results)
if __name__ == '__main__':
main()
|
the-stack_106_24619 | import socket
import time
from io import BytesIO
from random import randint
from unittest import TestCase
from block import Block
from helper import (
hash256,
decode_base58,
encode_varint,
int_to_little_endian,
little_endian_to_int,
read_varint,
)
from tx import Tx
TX_DATA_TYPE = 1
BLOCK_DATA_TYPE = 2
FILTERED_BLOCK_DATA_TYPE = 3
COMPACT_BLOCK_DATA_TYPE = 4
NETWORK_MAGIC = b'\xf9\xbe\xb4\xd9'
TESTNET_NETWORK_MAGIC = b'\x0b\x11\x09\x07'
class NetworkEnvelope:
def __init__(self, command, payload, testnet=False):
self.command = command
self.payload = payload
if testnet:
self.magic = TESTNET_NETWORK_MAGIC
else:
self.magic = NETWORK_MAGIC
def __repr__(self):
return '{}: {}'.format(
self.command.decode('ascii'),
self.payload.hex(),
)
@classmethod
def parse(cls, s, testnet=False):
'''Takes a stream and creates a NetworkEnvelope'''
# check the network magic
magic = s.read(4)
if magic == b'':
raise RuntimeError('Connection reset!')
if testnet:
expected_magic = TESTNET_NETWORK_MAGIC
else:
expected_magic = NETWORK_MAGIC
if magic != expected_magic:
raise RuntimeError('magic is not right {} vs {}'.format(magic.hex(), expected_magic.hex()))
# command 12 bytes, strip the trailing 0's using .strip(b'\x00')
command = s.read(12).strip(b'\x00')
# payload length 4 bytes, little endian
payload_length = little_endian_to_int(s.read(4))
# checksum 4 bytes, first four of hash256 of payload
checksum = s.read(4)
# payload is of length payload_length
payload = s.read(payload_length)
# verify checksum
calculated_checksum = hash256(payload)[:4]
if calculated_checksum != checksum:
raise RuntimeError('checksum does not match')
return cls(command, payload, testnet=testnet)
def serialize(self):
'''Returns the byte serialization of the entire network message'''
# add the network magic
result = self.magic
# command 12 bytes, fill leftover with b'\x00' * (12 - len(self.command))
result += self.command + b'\x00' * (12 - len(self.command))
# payload length 4 bytes, little endian
result += int_to_little_endian(len(self.payload), 4)
# checksum 4 bytes, first four of hash256 of payload
result += hash256(self.payload)[:4]
# payload
result += self.payload
return result
def stream(self):
'''Returns a stream for parsing the payload'''
return BytesIO(self.payload)
class NetworkEnvelopeTest(TestCase):
def test_parse(self):
msg = bytes.fromhex('f9beb4d976657261636b000000000000000000005df6e0e2')
stream = BytesIO(msg)
envelope = NetworkEnvelope.parse(stream)
self.assertEqual(envelope.command, b'verack')
self.assertEqual(envelope.payload, b'')
msg = bytes.fromhex('f9beb4d976657273696f6e0000000000650000005f1a69d2721101000100000000000000bc8f5e5400000000010000000000000000000000000000000000ffffc61b6409208d010000000000000000000000000000000000ffffcb0071c0208d128035cbc97953f80f2f5361746f7368693a302e392e332fcf05050001')
stream = BytesIO(msg)
envelope = NetworkEnvelope.parse(stream)
self.assertEqual(envelope.command, b'version')
self.assertEqual(envelope.payload, msg[24:])
def test_serialize(self):
msg = bytes.fromhex('f9beb4d976657261636b000000000000000000005df6e0e2')
stream = BytesIO(msg)
envelope = NetworkEnvelope.parse(stream)
self.assertEqual(envelope.serialize(), msg)
msg = bytes.fromhex('f9beb4d976657273696f6e0000000000650000005f1a69d2721101000100000000000000bc8f5e5400000000010000000000000000000000000000000000ffffc61b6409208d010000000000000000000000000000000000ffffcb0071c0208d128035cbc97953f80f2f5361746f7368693a302e392e332fcf05050001')
stream = BytesIO(msg)
envelope = NetworkEnvelope.parse(stream)
self.assertEqual(envelope.serialize(), msg)
class VersionMessage:
command = b'version'
def __init__(self, version=70015, services=0, timestamp=None,
receiver_services=0,
receiver_ip=b'\x00\x00\x00\x00', receiver_port=8333,
sender_services=0,
sender_ip=b'\x00\x00\x00\x00', sender_port=8333,
nonce=None, user_agent=b'/programmingblockchain:0.1/',
latest_block=0, relay=True):
self.version = version
self.services = services
if timestamp is None:
self.timestamp = int(time.time())
else:
self.timestamp = timestamp
self.receiver_services = receiver_services
self.receiver_ip = receiver_ip
self.receiver_port = receiver_port
self.sender_services = sender_services
self.sender_ip = sender_ip
self.sender_port = sender_port
if nonce is None:
self.nonce = int_to_little_endian(randint(0, 2**64), 8)
else:
self.nonce = nonce
self.user_agent = user_agent
self.latest_block = latest_block
self.relay = relay
def serialize(self):
'''Serialize this message to send over the network'''
# version is 4 bytes little endian
result = int_to_little_endian(self.version, 4)
# services is 8 bytes little endian
result += int_to_little_endian(self.services, 8)
# timestamp is 8 bytes little endian
result += int_to_little_endian(self.timestamp, 8)
# receiver services is 8 bytes little endian
result += int_to_little_endian(self.receiver_services, 8)
# IPV4 is 10 00 bytes and 2 ff bytes then receiver ip
result += b'\x00' * 10 + b'\xff\xff' + self.receiver_ip
# receiver port is 2 bytes, little endian
result += int_to_little_endian(self.receiver_port, 2)
# sender services is 8 bytes little endian
result += int_to_little_endian(self.sender_services, 8)
# IPV4 is 10 00 bytes and 2 ff bytes then sender ip
result += b'\x00' * 10 + b'\xff\xff' + self.sender_ip
# sender port is 2 bytes, little endian
result += int_to_little_endian(self.sender_port, 2)
# nonce
result += self.nonce
# useragent is a variable string, so varint first
result += encode_varint(len(self.user_agent))
result += self.user_agent
# latest block is 4 bytes little endian
result += int_to_little_endian(self.latest_block, 4)
# relay is 00 if false, 01 if true
if self.relay:
result += b'\x01'
else:
result += b'\x00'
return result
class VersionMessageTest(TestCase):
def test_serialize(self):
v = VersionMessage(timestamp=0, nonce=b'\x00' * 8)
self.assertEqual(v.serialize().hex(), '7f11010000000000000000000000000000000000000000000000000000000000000000000000ffff000000008d20000000000000000000000000000000000000ffff000000008d2000000000000000001b2f70726f6772616d6d696e67626c6f636b636861696e3a302e312f0000000001')
class VerAckMessage:
command = b'verack'
def __init__(self):
pass
@classmethod
def parse(cls, s):
return cls()
def serialize(self):
return b''
class PingMessage:
command = b'ping'
def __init__(self, nonce):
self.nonce = nonce
@classmethod
def parse(cls, s):
nonce = s.read(8)
return cls(nonce)
def serialize(self):
return self.nonce
class PongMessage:
command = b'pong'
def __init__(self, nonce):
self.nonce = nonce
def parse(cls, s):
nonce = s.read(8)
return cls(nonce)
def serialize(self):
return self.nonce
class GetHeadersMessage:
command = b'getheaders'
def __init__(self, version=70015, num_hashes=1, start_block=None, end_block=None):
self.version = version
self.num_hashes = num_hashes
if start_block is None:
raise RuntimeError('a start block is required')
self.start_block = start_block
if end_block is None:
self.end_block = b'\x00' * 32
else:
self.end_block = end_block
def serialize(self):
'''Serialize this message to send over the network'''
# protocol version is 4 bytes little-endian
result = int_to_little_endian(self.version, 4)
# number of hashes is a varint
result += encode_varint(self.num_hashes)
# start block is in little-endian
result += self.start_block[::-1]
# end block is also in little-endian
result += self.end_block[::-1]
return result
class GetHeadersMessageTest(TestCase):
def test_serialize(self):
block_hex = '0000000000000000001237f46acddf58578a37e213d2a6edc4884a2fcad05ba3'
gh = GetHeadersMessage(start_block=bytes.fromhex(block_hex))
self.assertEqual(gh.serialize().hex(), '7f11010001a35bd0ca2f4a88c4eda6d213e2378a5758dfcd6af437120000000000000000000000000000000000000000000000000000000000000000000000000000000000')
class HeadersMessage:
command = b'headers'
def __init__(self, headers):
self.headers = headers
@classmethod
def parse(cls, s):
# number of headers is in a varint
num_headers = read_varint(s)
# initialize the headers array
headers = []
# loop through number of headers times
for _ in range(num_headers):
# add a header to the headers array by parsing the stream
headers.append(Block.parse_header(s))
# read the next varint (num_txs)
num_txs = read_varint(s)
# num_txs should be 0 or raise a RuntimeError
if num_txs != 0:
raise RuntimeError('number of txs not 0')
# return a class instance
return cls(headers)
def is_valid(self):
'''Return whether the headers satisfy proof-of-work and are sequential'''
last_block = None
for h in self.headers:
if not h.check_pow():
return False
if last_block and h.prev_block != last_block:
return False
last_block = h.hash()
return True
class HeadersMessageTest(TestCase):
def test_parse(self):
hex_msg = '0200000020df3b053dc46f162a9b00c7f0d5124e2676d47bbe7c5d0793a500000000000000ef445fef2ed495c275892206ca533e7411907971013ab83e3b47bd0d692d14d4dc7c835b67d8001ac157e670000000002030eb2540c41025690160a1014c577061596e32e426b712c7ca00000000000000768b89f07044e6130ead292a3f51951adbd2202df447d98789339937fd006bd44880835b67d8001ade09204600'
stream = BytesIO(bytes.fromhex(hex_msg))
headers = HeadersMessage.parse(stream)
self.assertEqual(len(headers.headers), 2)
for b in headers.headers:
self.assertEqual(b.__class__, Block)
class GetDataMessage:
command = b'getdata'
def __init__(self):
self.data = []
def add_data(self, data_type, identifier):
self.data.append((data_type, identifier))
def serialize(self):
# start with the number of items as a varint
result = encode_varint(len(self.data))
for data_type, identifier in self.data:
# data type is 4 bytes little endian
result += int_to_little_endian(data_type, 4)
# identifier needs to be in little endian
result += identifier[::-1]
return result
class GetDataMessageTest(TestCase):
def test_serialize(self):
hex_msg = '020300000030eb2540c41025690160a1014c577061596e32e426b712c7ca00000000000000030000001049847939585b0652fba793661c361223446b6fc41089b8be00000000000000'
get_data = GetDataMessage()
block1 = bytes.fromhex('00000000000000cac712b726e4326e596170574c01a16001692510c44025eb30')
get_data.add_data(FILTERED_BLOCK_DATA_TYPE, block1)
block2 = bytes.fromhex('00000000000000beb88910c46f6b442312361c6693a7fb52065b583979844910')
get_data.add_data(FILTERED_BLOCK_DATA_TYPE, block2)
self.assertEqual(get_data.serialize().hex(), hex_msg)
class GenericMessage:
def __init__(self, command, payload):
self.command = command
self.payload = payload
def serialize(self):
return self.payload
class SimpleNode:
def __init__(self, host, port=None, testnet=False, logging=False):
if port is None:
if testnet:
port = 18333
else:
port = 8333
self.testnet = testnet
self.logging = logging
# connect to socket
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((host, port))
# create a stream that we can use with the rest of the library
self.stream = self.socket.makefile('rb', None)
def handshake(self):
'''Do a handshake with the other node. Handshake is sending a version message and getting a verack back.'''
# create a version message
version = VersionMessage()
# send the command
self.send(version)
# wait for a verack message
self.wait_for(VerAckMessage)
def send(self, message):
'''Send a message to the connected node'''
# create a network envelope
envelope = NetworkEnvelope(
message.command, message.serialize(), testnet=self.testnet)
if self.logging:
print('sending: {}'.format(envelope))
# send the serialized envelope over the socket using sendall
self.socket.sendall(envelope.serialize())
def read(self):
'''Read a message from the socket'''
envelope = NetworkEnvelope.parse(self.stream, testnet=self.testnet)
if self.logging:
print('receiving: {}'.format(envelope))
return envelope
def wait_for(self, *message_classes):
'''Wait for one of the messages in the list'''
# initialize the command we have, which should be None
command = None
command_to_class = {m.command: m for m in message_classes}
# loop until the command is in the commands we want
while command not in command_to_class.keys():
# get the next network message
envelope = self.read()
# set the command to be evaluated
command = envelope.command
# we know how to respond to version and ping, handle that here
if command == VersionMessage.command:
# send verack
self.send(VerAckMessage())
elif command == PingMessage.command:
# send pong
self.send(PongMessage(envelope.payload))
# return the envelope parsed as a member of the right message class
return command_to_class[command].parse(envelope.stream())
def is_tx_accepted(self, tx_obj):
'''Returns whether a transaction has been accepted on the network'''
# create a GetDataMessage
get_data = GetDataMessage()
# ask for the tx
get_data.add_data(TX_DATA_TYPE, tx_obj.hash())
# send the GetDataMessage
self.send(get_data)
# now wait for a response
got_tx = self.wait_for(Tx)
if got_tx.id() == tx_obj.id():
return True
def get_filtered_txs(self, block_hashes):
'''Returns transactions that match the bloom filter'''
from merkleblock import MerkleBlock
# create a getdata message
getdata = GetDataMessage()
# for each block request the filtered block
for block_hash in block_hashes:
# add_data (FILTERED_BLOCK_DATA_TYPE, block_hash) to request the block
getdata.add_data(FILTERED_BLOCK_DATA_TYPE, block_hash)
# send the getdata message
self.send(getdata)
# initialize the results array we'll send back
results = []
# for each block hash
for block_hash in block_hashes:
# wait for the merkleblock command
mb = self.wait_for(MerkleBlock)
# check that the merkle block's hash is the same as the block hash
if mb.hash() != block_hash:
raise RuntimeError('Wrong block sent')
# check that the merkle block is valid
if not mb.is_valid():
raise RuntimeError('Merkle Proof is invalid')
# loop through the proved transactions from the Merkle block
for tx_hash in mb.proved_txs():
# wait for the tx command
tx_obj = self.wait_for(Tx)
# check that the hash matches
if tx_obj.hash() != tx_hash:
raise RuntimeError('Wrong tx sent {} vs {}'.format(tx_hash.hex(), tx_obj.id()))
# add to the results
results.append(tx_obj)
# return the results
return results
class SimpleNodeTest(TestCase):
def test_handshake(self):
node = SimpleNode('tbtc.programmingblockchain.com', testnet=True)
node.handshake()
def test_get_filtered_txs(self):
from bloomfilter import BloomFilter
bf = BloomFilter(30, 5, 90210)
h160 = decode_base58('mseRGXB89UTFVkWJhTRTzzZ9Ujj4ZPbGK5')
bf.add(h160)
node = SimpleNode('tbtc.programmingblockchain.com', testnet=True)
node.handshake()
node.send(bf.filterload())
block_hash = bytes.fromhex('00000000000377db7fde98411876c53e318a395af7304de298fd47b7c549d125')
txs = node.get_filtered_txs([block_hash])
self.assertEqual(txs[0].id(), '0c024b9d3aa2ae8faae96603b8d40c88df2fc6bf50b3f446295206f70f3cf6ad')
self.assertEqual(txs[1].id(), '0886537e27969a12478e0d33707bf6b9fe4fdaec8d5d471b5304453b04135e7e')
self.assertEqual(txs[2].id(), '23d4effc88b80fb7dbcc2e6a0b0af9821c6fe3bb4c8dc3b61bcab7c45f0f6888')
|
the-stack_106_24622 | # Copyright 2019 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Build defs for TF/NumPy/JAX-variadic libraries & tests."""
NO_REWRITE_NEEDED = [
"internal:all_util",
"internal:docstring_util",
"internal:reparameterization",
"layers",
"platform_google",
]
REWRITER_TARGET = "//tensorflow_probability/substrates/meta:rewrite"
RUNFILES_ROOT = "tensorflow_probability/"
def _substrate_src(src, substrate):
"""Rewrite a single src filename for the given substrate."""
# When src's are sourced from a different package we cut away the package
# name.
parts = src.split(":")
return "_{}/_generated_{}".format(substrate, parts[-1])
def _substrate_srcs(srcs, substrate):
"""Rewrite src filenames for the given substrate."""
return [_substrate_src(src, substrate) for src in srcs]
def _substrate_dep(dep, substrate):
"""Convert a single dep to one appropriate for the given substrate."""
dep_to_check = dep
if dep.startswith(":"):
dep_to_check = "{}{}".format(native.package_name(), dep)
for no_rewrite in NO_REWRITE_NEEDED:
if no_rewrite in dep_to_check:
return dep
if "tensorflow_probability/" in dep or dep.startswith(":"):
if "internal/backend" in dep:
return dep
if ":" in dep:
return "{}.{}".format(dep, substrate)
return "{}:{}.{}".format(dep, dep.split("/")[-1], substrate)
return dep
def _substrate_deps(deps, substrate):
"""Convert deps to those appropriate for the given substrate."""
new_deps = [_substrate_dep(dep, substrate) for dep in deps]
backend_dep = "//tensorflow_probability/python/internal/backend/{}".format(substrate)
if backend_dep not in new_deps:
new_deps.append(backend_dep)
return new_deps
def _resolve_omit_dep(dep):
"""Resolves a `substrates_omit_deps` item to full target."""
if ":" not in dep:
dep = "{}:{}".format(dep, dep.split("/")[-1])
if dep.startswith(":"):
dep = "{}{}".format(native.package_name(), dep)
return dep
def _substrate_runfiles_symlinks_impl(ctx):
"""A custom BUILD rule to generate python runfiles symlinks.
A custom build rule which adds runfiles symlinks for files matching a
substrate genrule file pattern, i.e. `'_jax/_generated_normal.py'`.
This rule will aggregate and pass along deps while adding the given
symlinks to the runfiles structure.
Build rule attributes:
- substrate: One of 'jax' or 'numpy'; which substrate this applies to.
- deps: A list of py_library labels. These are passed along.
Args:
ctx: Rule analysis context.
Returns:
Info objects to propagate deps and add runfiles symlinks.
"""
# Aggregate the depset inputs to resolve transitive dependencies.
transitive_sources = []
uses_shared_libraries = []
imports = []
has_py2_only_sources = []
has_py3_only_sources = []
cc_infos = []
for dep in ctx.attr.deps:
if PyInfo in dep:
transitive_sources.append(dep[PyInfo].transitive_sources)
uses_shared_libraries.append(dep[PyInfo].uses_shared_libraries)
imports.append(dep[PyInfo].imports)
has_py2_only_sources.append(dep[PyInfo].has_py2_only_sources)
has_py3_only_sources.append(dep[PyInfo].has_py3_only_sources)
# if PyCcLinkParamsProvider in dep: # DisableOnExport
# cc_infos.append(dep[PyCcLinkParamsProvider].cc_info) # DisableOnExport
if CcInfo in dep:
cc_infos.append(dep[CcInfo])
# Determine the set of symlinks to generate.
transitive_sources = depset(transitive = transitive_sources)
runfiles_dict = {}
substrate = ctx.attr.substrate
file_substr = "_{}/_generated_".format(substrate)
for f in transitive_sources.to_list():
if "tensorflow_probability/python/" in f.dirname and file_substr in f.short_path:
pre, post = f.short_path.split("/python/")
out_path = "{}/substrates/{}/{}".format(
pre,
substrate,
post.replace(file_substr, ""),
)
runfiles_dict[RUNFILES_ROOT + out_path] = f
elif "tensorflow_probability/google" in f.dirname and file_substr in f.short_path:
pre, post = f.short_path.split("/google/")
out_path = "{}/substrates/{}/google/{}".format(
pre,
substrate,
post.replace(file_substr, ""),
)
runfiles_dict[RUNFILES_ROOT + out_path] = f
# Construct the output structures to pass along Python srcs/deps/etc.
py_info = PyInfo(
transitive_sources = transitive_sources,
uses_shared_libraries = any(uses_shared_libraries),
imports = depset(transitive = imports),
has_py2_only_sources = any(has_py2_only_sources),
has_py3_only_sources = any(has_py3_only_sources),
)
py_cc_link_info = cc_common.merge_cc_infos(cc_infos = cc_infos)
py_runfiles = depset(
transitive = [depset(transitive = [
dep[DefaultInfo].data_runfiles.files,
dep[DefaultInfo].default_runfiles.files,
]) for dep in ctx.attr.deps],
)
runfiles = DefaultInfo(runfiles = ctx.runfiles(
transitive_files = py_runfiles,
root_symlinks = runfiles_dict,
))
return py_info, py_cc_link_info, runfiles
# See documentation at:
# https://docs.bazel.build/versions/3.4.0/skylark/rules.html
substrate_runfiles_symlinks = rule(
implementation = _substrate_runfiles_symlinks_impl,
attrs = {
"substrate": attr.string(),
"deps": attr.label_list(),
},
)
def multi_substrate_py_library(
name,
srcs = [],
deps = [],
substrates_omit_deps = [],
jax_omit_deps = [],
numpy_omit_deps = [],
srcs_version = "PY3",
**kwargs):
"""A TFP `py_library` for each of TF, NumPy, and JAX.
Args:
name: The TF `py_library` name. NumPy and JAX libraries have '.numpy' and
'.jax' appended.
srcs: As with `py_library`. A `genrule` is used to rewrite srcs for NumPy
and JAX substrates.
deps: As with `py_library`. The list is rewritten to depend on
substrate-specific libraries for substrate variants.
substrates_omit_deps: List of deps to omit if those libraries are not
rewritten for the substrates.
jax_omit_deps: List of deps to omit for the JAX substrate.
numpy_omit_deps: List of deps to omit for the NumPy substrate.
srcs_version: As with `py_library`.
**kwargs: Additional keyword arguments for targets.
"""
if srcs_version != "PY3":
fail("Must use PY3 for srcs_version", srcs_version)
native.py_library(
name = name,
srcs = srcs,
deps = deps,
srcs_version = srcs_version,
**kwargs
)
remove_deps = [
"//third_party/py/tensorflow",
"//third_party/py/tensorflow:tensorflow",
]
trimmed_deps = [dep for dep in deps if (dep not in substrates_omit_deps and
dep not in remove_deps)]
resolved_omit_deps_numpy = [
_resolve_omit_dep(dep)
for dep in substrates_omit_deps + numpy_omit_deps
]
for src in srcs:
native.genrule(
name = "rewrite_{}_numpy".format(src.replace(".", "_")),
srcs = [src],
outs = [_substrate_src(src, "numpy")],
cmd = "$(location {}) $(SRCS) --omit_deps={} > $@".format(
REWRITER_TARGET,
",".join(resolved_omit_deps_numpy),
),
exec_tools = [REWRITER_TARGET],
)
native.py_library(
name = "{}.numpy.raw".format(name),
srcs = _substrate_srcs(srcs, "numpy"),
deps = _substrate_deps(trimmed_deps, "numpy"),
srcs_version = srcs_version,
**kwargs
)
# Add symlinks under tfp/substrates/numpy.
substrate_runfiles_symlinks(
name = "{}.numpy".format(name),
substrate = "numpy",
deps = [":{}.numpy.raw".format(name)],
**kwargs
)
resolved_omit_deps_jax = [
_resolve_omit_dep(dep)
for dep in substrates_omit_deps + jax_omit_deps
]
jax_srcs = _substrate_srcs(srcs, "jax")
for src in srcs:
native.genrule(
name = "rewrite_{}_jax".format(src.replace(".", "_")),
srcs = [src],
outs = [_substrate_src(src, "jax")],
cmd = "$(location {}) $(SRCS) --omit_deps={} --numpy_to_jax > $@".format(
REWRITER_TARGET,
",".join(resolved_omit_deps_jax),
),
exec_tools = [REWRITER_TARGET],
)
native.py_library(
name = "{}.jax.raw".format(name),
srcs = jax_srcs,
deps = _substrate_deps(trimmed_deps, "jax"),
srcs_version = srcs_version,
**kwargs
)
# Add symlinks under tfp/substrates/jax.
substrate_runfiles_symlinks(
name = "{}.jax".format(name),
substrate = "jax",
deps = [":{}.jax.raw".format(name)],
**kwargs
)
def multi_substrate_py_test(
name,
size = "small",
jax_size = None,
numpy_size = None,
srcs = [],
main = None,
deps = [],
jax_extra_deps = [],
tags = [],
numpy_tags = [],
jax_tags = [],
tf_tags = [],
disabled_substrates = [],
srcs_version = "PY3",
python_version = "PY3",
timeout = None,
shard_count = None,
args = []):
"""A TFP `py_test` for each of TF, NumPy, and JAX.
Args:
name: Name of the `test_suite` which covers TF, NumPy and JAX variants
of the test. Each substrate will have a dedicated `py_test`
suffixed with '.tf', '.numpy', or '.jax' as appropriate.
size: As with `py_test`.
jax_size: A size override for the JAX target.
numpy_size: A size override for the numpy target.
srcs: As with `py_test`. These will have a `genrule` emitted to rewrite
NumPy and JAX variants, writing the test file into a subdirectory.
main: As with `py_test`. If this does not match "{name}.py", then we
suppress the genrule that rewrites "{name}.py", since the typical
use-case of the `main` argument is a secondary, i.e. GPU, test.
deps: As with `py_test`. The list is rewritten to depend on
substrate-specific libraries for substrate variants.
jax_extra_deps: Extra dependencies for the JAX substrate.
tags: Tags global to this test target. NumPy also gets a `'tfp_numpy'`
tag, and JAX gets a `'tfp_jax'` tag. A `f'_{name}'` tag is used
to produce the `test_suite`.
numpy_tags: Tags specific to the NumPy test. (e.g. `"notap"`).
jax_tags: Tags specific to the JAX test. (e.g. `"notap"`).
tf_tags: Tags specific to the TF test. (e.g. `"notap"`).
disabled_substrates: Iterable of substrates to disable, items from
["numpy", "jax"].
srcs_version: As with `py_test`.
python_version: As with `py_test`.
timeout: As with `py_test`.
shard_count: As with `py_test`.
args: As with `py_test`.
"""
tags = [t for t in tags]
tags.append("multi_substrate")
test_targets = []
native.py_test(
name = "{}.tf".format(name),
size = size,
srcs = srcs,
main = main or "{}.py".format(name),
deps = deps,
tags = tags + tf_tags,
srcs_version = srcs_version,
python_version = python_version,
timeout = timeout,
shard_count = shard_count,
args = args,
)
test_targets.append(":{}.tf".format(name))
if "numpy" not in disabled_substrates:
numpy_srcs = _substrate_srcs(srcs, "numpy")
if main == None or main == "{}.py".format(name):
native.genrule(
name = "rewrite_{}_numpy".format(name),
srcs = srcs,
outs = numpy_srcs,
cmd = "$(location {}) $(SRCS) > $@".format(REWRITER_TARGET),
exec_tools = [REWRITER_TARGET],
)
native.py_test(
name = "{}.numpy".format(name),
size = numpy_size or size,
srcs = numpy_srcs,
main = _substrate_src(main or "{}.py".format(name), "numpy"),
deps = _substrate_deps(deps, "numpy"),
tags = tags + ["tfp_numpy"] + numpy_tags,
srcs_version = srcs_version,
python_version = "PY3",
timeout = timeout,
shard_count = shard_count,
args = args,
)
test_targets.append(":{}.numpy".format(name))
if "jax" not in disabled_substrates:
jax_srcs = _substrate_srcs(srcs, "jax")
if main == None or main == "{}.py".format(name):
native.genrule(
name = "rewrite_{}_jax".format(name),
srcs = srcs,
outs = jax_srcs,
cmd = "$(location {}) $(SRCS) --numpy_to_jax > $@".format(REWRITER_TARGET),
exec_tools = [REWRITER_TARGET],
)
jax_deps = _substrate_deps(deps, "jax") + jax_extra_deps
# [internal] Add JAX build dep
native.py_test(
name = "{}.jax".format(name),
size = jax_size or size,
srcs = jax_srcs,
main = _substrate_src(main or "{}.py".format(name), "jax"),
deps = jax_deps,
tags = tags + ["tfp_jax"] + jax_tags,
srcs_version = srcs_version,
python_version = "PY3",
timeout = timeout,
shard_count = shard_count,
args = args,
)
test_targets.append(":{}.jax".format(name))
native.test_suite(
name = name,
tests = test_targets,
)
|
the-stack_106_24624 | # Copyright 2019 Baidu Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras model wrapper for SSD300 object detection."""
from __future__ import absolute_import
import numpy as np
import logging
import os
from perceptron.models.base import DifferentiableModel
from perceptron.zoo.ssd_300.keras_ssd300 import SSD300
from perceptron.zoo.ssd_300.layers_loss_bbox.keras_ssd_loss import SSDLoss
import keras
from keras import backend as K
from keras.models import load_model
from keras.optimizers import Adam
from matplotlib import pyplot as plt
from timeit import default_timer as timer
from PIL import Image, ImageFont, ImageDraw
import colorsys
from collections import defaultdict
class KerasSSD300Model(DifferentiableModel):
"""Create a :class:`Model` instance from a `DifferentiableModel` model.
Parameters
----------
model : `keras.model.Model`
The `Keras` model that are loaded.
bounds : tuple
Tuple of lower and upper bound for the pixel values, usually
(0, 1) or (0, 255).
model_image_shape : tuple
Tuple of the model input shape in format (height, width).
num_classes : int
Number of classes for which the model will output predictions.
channel_axis : int
The index of the axis that represents color channels.
max_boxes : int
The maximum number of boxes allowed in the prediction output.
score : float
The score threshold for considering a box as containing objects.
iou : float
The intersection over union (IoU) threshold.
preprocessing: 2-element tuple with floats or numpy arrays
Elementwises preprocessing of input; we first substract the first
element of preprocessing from the input and then divide the input
by the second element.
class_names : list
Class names for ground truth labels
"""
def __init__(self,
model,
bounds,
model_image_shape=(300, 300),
num_classes=20,
channel_axis=3,
max_boxes=20,
score=0.3,
iou=0.5,
preprocessing=(0, 1),
class_names=['background',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat',
'chair', 'cow', 'diningtable', 'dog',
'horse', 'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor']):
super(KerasSSD300Model, self).__init__(bounds=bounds,
channel_axis=channel_axis,
preprocessing=preprocessing)
self._model = model
self._th_conf = score
self._min_overlap = iou
self._img_height = model_image_shape[0]
self._img_width = model_image_shape[1]
self._class_names = class_names
self._num_classes = num_classes
self._task = 'det'
def num_classes(self):
"""Return the number of classes."""
return self._num_classes
def class_names(self):
"""Return the class names as list."""
return self._class_names
def get_class(self):
return self.class_names()
def model_task(self):
"""Return the task of the model: classification of detection."""
return self._task
def batch_predictions(self, images):
"""Batch prediction of images.
Parameters
----------
images : `numpy.ndarray`
The input image in [b, h, w, c] ndarry format.
Returns
-------
list
List of batch prediction resutls.
Each element is a dictionary containing:
{'boxes', 'scores', 'classes}
"""
import cv2
images_res = []
for image in images:
image_res = cv2.resize(image, (self._img_height, self._img_width))
images_res.append(image_res)
images_res = np.array(images_res)
y_preds = self._model.predict(images_res)
results = []
for y_pred in y_preds:
result = {}
out_boxes = []
out_scores = []
out_classes = []
for temp_pred in y_pred:
if temp_pred[1] >= self._th_conf:
temp_bbox = temp_pred[2:]
temp_bbox = np.array(
[temp_bbox[1], temp_bbox[0], temp_bbox[3], temp_bbox[2]])
out_boxes.append(temp_bbox)
out_scores.append(temp_pred[1])
out_classes.append(int(temp_pred[0]))
result['boxes'] = out_boxes
result['scores'] = out_scores
result['classes'] = out_classes
results.append(result)
return results
def predictions_and_gradient(self, image, criterion):
"""Returns both predictions and gradients, and
potentially loss w.r.t. to certain criterion.
"""
pass
def backward(self, target_class, image):
"""Get gradient with respect to the image."""
pass
|
the-stack_106_24625 | """Support for fetching Vulcan data."""
async def get_lessons(client, date_from=None, date_to=None):
"""Support for fetching Vulcan lessons."""
changes = {}
list_ans = []
async for lesson in await client.data.get_changed_lessons(
date_from=date_from, date_to=date_to
):
temp_dict = {}
_id = str(lesson.id)
temp_dict["id"] = lesson.id
temp_dict["number"] = lesson.time.position if lesson.time is not None else None
temp_dict["lesson"] = (
lesson.subject.name if lesson.subject is not None else None
)
temp_dict["room"] = lesson.room.code if lesson.room is not None else None
temp_dict["changes"] = lesson.changes
temp_dict["note"] = lesson.note
temp_dict["reason"] = lesson.reason
temp_dict["event"] = lesson.event
temp_dict["group"] = lesson.group
temp_dict["teacher"] = (
lesson.teacher.display_name if lesson.teacher is not None else None
)
temp_dict["from_to"] = (
lesson.time.displayed_time if lesson.time is not None else None
)
changes[str(_id)] = temp_dict
async for lesson in await client.data.get_lessons(
date_from=date_from, date_to=date_to
):
temp_dict = {}
temp_dict["id"] = lesson.id
temp_dict["number"] = lesson.time.position
temp_dict["time"] = lesson.time
temp_dict["date"] = lesson.date.date
temp_dict["lesson"] = (
lesson.subject.name if lesson.subject is not None else None
)
if lesson.room is not None:
temp_dict["room"] = lesson.room.code
else:
temp_dict["room"] = "-"
temp_dict["visible"] = lesson.visible
temp_dict["changes"] = lesson.changes
temp_dict["group"] = lesson.group
temp_dict["reason"] = None
temp_dict["teacher"] = (
lesson.teacher.display_name if lesson.teacher is not None else None
)
temp_dict["from_to"] = (
lesson.time.displayed_time if lesson.time is not None else None
)
if temp_dict["changes"] is None:
temp_dict["changes"] = ""
elif temp_dict["changes"].type == 1:
temp_dict["lesson"] = f"Lekcja odwołana ({temp_dict['lesson']})"
temp_dict["changes_info"] = f"Lekcja odwołana ({temp_dict['lesson']})"
if str(temp_dict["changes"].id) in changes:
temp_dict["reason"] = changes[str(temp_dict["changes"].id)]["reason"]
elif temp_dict["changes"].type == 2:
temp_dict["lesson"] = f"{temp_dict['lesson']} (Zastępstwo)"
temp_dict["teacher"] = changes[str(temp_dict["changes"].id)]["teacher"]
if str(temp_dict["changes"].id) in changes:
temp_dict["teacher"] = changes[str(temp_dict["changes"].id)]["teacher"]
temp_dict["reason"] = changes[str(temp_dict["changes"].id)]["reason"]
elif temp_dict["changes"].type == 4:
temp_dict["lesson"] = f"Lekcja odwołana ({temp_dict['lesson']})"
if str(temp_dict["changes"].id) in changes:
temp_dict["reason"] = changes[str(temp_dict["changes"].id)]["reason"]
if temp_dict["visible"]:
list_ans.append(temp_dict)
return list_ans
async def get_student_info(client, student_id):
"""Support for fetching Student info by student id."""
student_info = {}
for student in await client.get_students():
if str(student.pupil.id) == str(student_id):
student_info["first_name"] = student.pupil.first_name
if student.pupil.second_name:
student_info["second_name"] = student.pupil.second_name
student_info["last_name"] = student.pupil.last_name
student_info[
"full_name"
] = f"{student.pupil.first_name} {student.pupil.last_name}"
student_info["id"] = student.pupil.id
student_info["class"] = student.class_
student_info["school"] = student.school.name
student_info["symbol"] = student.symbol
return student_info
|
the-stack_106_24626 | from flask import Blueprint, Response, request
from bson.json_util import dumps
from config import db
groups_routes = Blueprint('groups', __name__, url_prefix = '/groups')
@groups_routes.route('')
def getGroups():
try:
groups = db.groups.find()
return Response(
dumps(groups),
status = 200,
content_type = 'application/json'
)
except Exception as error:
return 'Error: %s' % (error)
@groups_routes.route('', methods = ['POST'])
def postGroup():
members = []
try:
group = request.get_json()
for member in group['members']:
user = db.users.find_one(member)
if not user:
members.append(member)
group['members'].remove(member)
db.groups.insert_one(
{
'name': group['name'],
'members': group['members']
}
)
response = {
'message': 'Group %s created successfully!' % (group['name']),
'nonexistent_members': members
}
return Response(
dumps(response),
status = 201,
content_type = 'application/json'
)
except Exception as error:
return 'Error: %s' % (error)
@groups_routes.route('', methods = ['PATCH'])
def changeName():
try:
group = request.get_json()
updated = db.groups.update_one(
{
'name': group['name']
},
{'$set': {
'name': group['new_name']
}
}
)
if updated.modified_count:
response = {
'message': 'Name changed successfully!'
}
return Response(
dumps(response),
status = 200,
content_type = 'application/json'
)
if updated.matched_count:
response = {
'message': 'Group %s found, but not modified!' % (group['name'])
}
return Response(
dumps(response),
status = 400,
content_type = 'application/json'
)
else:
response = {
'message': 'Group %s not found!' % (group['name'])
}
return Response(
dumps(response),
status = 404,
content_type = 'application/json'
)
except Exception as error:
return 'Error: %s' % (error)
@groups_routes.route('/insert', methods = ['PATCH'])
def insert_member():
try:
group = request.get_json()
user = db.users.find_one({'email': group['member']})
if user:
user = db.groups.find_one({"name": group['name'], "members": {"email": group['member']}})
if user:
response = {
'message': 'The user %s is already part of the group.' % (group['member'])
}
return Response(
dumps(response),
status = 200,
content_type = 'application/json'
)
else:
inserted = db.groups.update_one(
{
'name': group['name']
},
{'$push': {
'members': {
'email': group['member']
}
}
}
)
if inserted.modified_count:
response = {
'message': 'User %s added successfully!' % (group['member'])
}
return Response(
dumps(response),
status = 200,
content_type = 'application/json'
)
if inserted.matched_count:
response = {
'message': 'Group %s found, but not modified!' % (group['name'])
}
return Response(
dumps(response),
status = 400,
content_type = 'application/json'
)
else:
response = {
'message': 'Group %s not found!' % (group['name'])
}
return Response(
dumps(response),
status = 404,
content_type = 'application/json'
)
else:
response = {
'message': 'User %s not found!' % (group['member'])
}
return Response(
dumps(response),
status = 404,
content_type = 'application/json'
)
except Exception as error:
return 'Error: %s' % (error)
@groups_routes.route('/remove', methods = ['PATCH'])
def remove_member():
try:
group = request.get_json()
user = db.groups.find_one({"name": group['name'], "members": {"email": group['member']}})
if user:
removed = db.groups.update_one(
{
'name': group['name']
},
{
'$pull': {
'members': {
'email': group['member']
}
}
}
)
if removed.modified_count:
response = {
'message': 'User %s deleted successfully!' % (group['member'])
}
return Response(
dumps(response),
status = 200,
content_type = 'application/json'
)
if removed.matched_count:
response = {
'message': 'Group %s found, but user has not been removed!' % (group['name'])
}
return Response(
dumps(response),
status = 400,
content_type = 'application/json'
)
else:
response = {
'message': 'Group %s not found!' % (group['name'])
}
return Response(
dumps(response),
status = 404,
content_type = 'application/json'
)
else:
response = {
'message': 'User %s not found in the group %s!' % (group['member'], group['name'])
}
return Response(
dumps(response),
status = 404,
content_type = 'application/json'
)
except Exception as error:
return 'Error: %s' % (error)
@groups_routes.route('', methods = ['DELETE'])
def deleteGroup():
try:
group = request.get_json()
deleted = db.groups.delete_one(
{
'name': group['name']
}
)
if deleted.deleted_count:
response = {
'message': 'Group %s deleted successfully!' % (group['name'])
}
return Response(
dumps(response),
status = 200,
content_type = 'application/json'
)
else:
response = {
'message': 'Group %s has not been deleted!' % (group['name'])
}
return Response(
dumps(response),
status = 404,
content_type = 'application/json'
)
except Exception as error:
return 'Error: %s' % (error)
|
the-stack_106_24627 | #############################################################################
# Copyright (C) 2020-2021 German Aerospace Center (DLR-SC)
#
# Authors:
#
# Contact: Martin J. Kuehn <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#############################################################################
import unittest
import memilio.simulation.secir as secir
import memilio.simulation as mio
import numpy as np
class Test_Migration(unittest.TestCase):
def test_params(self):
coeffs = mio.MigrationCoefficientGroup(1, 8)
coeffs[0] = mio.MigrationCoefficients(np.ones(8))
coeffs[0].add_damping(mio.MigrationDamping(0.5 * np.ones(8), t=1.0))
params = mio.MigrationParameters(coeffs)
self.assertTrue(
(params.coefficients.get_matrix_at(0) == np.ones(8)).all())
self.assertTrue((params.coefficients.get_matrix_at(2)
== 0.5 * np.ones(8)).all())
def test_params_graph(self):
graph = secir.SecirModelGraph()
graph.add_node(0, secir.SecirModel(1))
graph.add_node(1, secir.SecirModel(1))
graph.add_edge(0, 1, np.ones(8))
self.assertEqual(graph.num_nodes, 2)
self.assertEqual(graph.num_edges, 1)
self.assertEqual(graph.get_num_out_edges(0), 1)
self.assertEqual(graph.get_num_out_edges(1), 0)
def test_sim_graph(self):
graph = secir.MigrationGraph()
graph.add_node(0, secir.SecirModel(1), 0, 0.1)
graph.add_node(1, secir.SecirModel(1), 0)
graph.add_edge(0, 1, np.ones(8))
self.assertEqual(graph.num_nodes, 2)
self.assertEqual(graph.num_edges, 1)
self.assertEqual(graph.get_num_out_edges(0), 1)
self.assertEqual(graph.get_num_out_edges(1), 0)
def test_migration_sim(self):
graph = secir.MigrationGraph()
graph.add_node(0, secir.SecirModel(1), 0, 0.1)
graph.add_node(1, secir.SecirModel(1), 0)
graph.add_edge(0, 1, np.ones(8))
sim = secir.MigrationSimulation(graph, t0=0.0)
sim.advance(2)
# integration does adaptive time steps so exact count is unknown
self.assertGreaterEqual(sim.graph.get_node(
0).property.result.get_num_time_points(), 3)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_24628 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
TBR
"""
################################
#LOAD LIBRARIES
################################
from unittest import TestCase
import flight
import numpy as np
class TestFlight(TestCase):
def test_nchoosek(self):
self.assertEqual(flight.nchoosek(1, 1), 1)
self.assertEqual(flight.nchoosek(5, 2), 10)
self.assertEqual(flight.nchoosek(5, 1), 5)
self.assertEqual(flight.nchoosek(5, 10), 0)
self.assertEqual(flight.nchoosek(50, 5), 2118760)
self.assertEqual(flight.nchoosek(50, 10), 10272278170)
self.assertEqual(flight.nchoosek(0, 1), 0)
self.assertEqual(flight.nchoosek(0, 0), 1)
self.assertEqual(flight.nchoosek(1, 0), 1)
self.assertEqual(flight.nchoosek(-1, 1), 0)
self.assertEqual(flight.nchoosek(1, -10), 0)
# self.assertIsNone(flight.nchoosek(0, 1))
# self.assertIsNone(flight.nchoosek(1, 0))
# self.assertIsNone(flight.nchoosek(-1, 1))
# self.assertIsNone(flight.nchoosek(1, -10))
#
def test_interstar_angle(self):
arr = np.array([[1, 0, 0, 0, 1, 0]])
# flight.interstar_angle(arr, axis=None)
self.assertEqual(
flight.interstar_angle(arr, axis=1), np.pi/2)
arr = np.array([[1], [0], [0], [1], [0], [0]])
self.assertEqual(
flight.interstar_angle(arr, axis=0), 0)
# add tests for corner cases and random vectors and vector arrays
with self.assertRaises(ValueError):
arr = np.array([[1, 0, 0, 0, 0]])
flight.interstar_angle(arr, axis=None)
arr = np.array([[1], [0], [0], [0], [0], [0], [0]])
flight.interstar_angle(arr, axis=None)
arr = np.array([[1], [0], [0], [0], [0], [0]])
flight.interstar_angle(arr, axis=2)
def test_enhanced_pattern_shifting(self):
arr = np.arange(0, 4, 1)
arr_check = np.array([[0, 1, 2],
[1, 2, 3],
[0, 1, 3],
[0, 2, 3]])
self.assertIsNone(
np.testing.assert_array_equal(
flight.enhanced_pattern_shifting(arr), arr_check))
arr = np.arange(0, 3, 1)
arr_check = np.array([[0, 1, 2]])
self.assertIsNone(
np.testing.assert_array_equal(
flight.enhanced_pattern_shifting(arr), arr_check))
arr = np.arange(0, 6, 1)
arr_check = np.array([[0, 1, 2],
[3, 4, 5],
[1, 2, 3],
[2, 3, 4],
[0, 1, 3],
[1, 2, 4],
[2, 3, 5],
[0, 1, 4],
[1, 2, 5],
[0, 1, 5],
[0, 2, 3],
[1, 3, 4],
[2, 4, 5],
[0, 2, 4],
[1, 3, 5],
[0, 2, 5],
[0, 3, 4],
[1, 4, 5],
[0, 3, 5],
[0, 4, 5]])
self.assertIsNone(
np.testing.assert_array_equal(
flight.enhanced_pattern_shifting(arr), arr_check))
arr = np.array([])
self.assertIsNone(flight.enhanced_pattern_shifting(arr))
arr = np.array([0])
self.assertIsNone(flight.enhanced_pattern_shifting(arr))
arr = np.array([10, 11])
self.assertIsNone(flight.enhanced_pattern_shifting(arr))
arr = np.array([10, 11, 12, 11, 15, 20])
self.assertIsNone(flight.enhanced_pattern_shifting(arr))
def test_kvec_values(self):
# import ground
arr = np.arange(0, 1000, 100).astype(float)/10.0
# m, q = flight.kvec_values(istar_angle, istar_idx)
# print("{}\n{}".format(m, q))
m_test, q_test = (10.000000000000005, -10.000000000000025)
self.assertAlmostEqual(flight.kvec_values(arr)[0], m_test)
self.assertAlmostEqual(flight.kvec_values(arr)[1], q_test)
arr = np.arange(0.0, 6.0, 1)
m_test, q_test = (1.0000000000000004, -1.0000000000000016)
self.assertAlmostEqual(flight.kvec_values(arr)[0], m_test)
self.assertAlmostEqual(flight.kvec_values(arr)[1], q_test)
arr = np.linspace(0.3, 0.6, num=6)
arr[0] = 0.3247375897416183
arr[-1] = 0.6320437092965802
m_test, q_test = (0.06146122391099242, 0.26327636583062575)
self.assertAlmostEqual(flight.kvec_values(arr)[0], m_test)
self.assertAlmostEqual(flight.kvec_values(arr)[1], q_test)
# def test_ksearch(self):
# self.fail()
#
def test_full_obs_match(self):
# TODO: check for input types and incorrect data handling
x_obs = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
x_cat = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]])
isa_thresh = 0.0001
nmatch_test = 2
idmatch_test = np.array([[-1], [0], [2]])
self.assertIsNone(
np.testing.assert_array_equal(
flight.full_obs_match(
x_obs, x_cat, isa_thresh)[0], idmatch_test))
self.assertIsNone(
np.testing.assert_array_equal(
flight.full_obs_match(
x_obs, x_cat, isa_thresh)[1], nmatch_test))
#
def test_attitude_svd(self):
# TODO: check for input types and incorrect data handling
x_obs = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
x_cat = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]])
t_test = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]])
# t = flight.attitude_svd(x_obs, x_cat)
self.assertIsNone(
np.testing.assert_array_equal(
flight.attitude_svd(x_obs, x_cat), t_test))
#
# def test_triangle_isa_id(self):
# self.fail()
#
# def test_calculate_center_intensity(self):
# self.fail()
# img = np.zeros((128, 256))
# stats = None
# min_star_area = 5
# max_star_area = 30
# coi, intensities = flight.calculate_center_intensity(
# img, stats, min_star_area, max_star_area)
#
# def test_undistort_image(self):
# self.fail()
#
# def test_input_parser(self):
# self.fail()
if __name__ == '__main__':
import unittest
unittest.main()
|
the-stack_106_24632 | """An async GitHub API library"""
__version__ = "5.1.0.dev"
import http
from typing import Any, Optional
class GitHubException(Exception):
"""Base exception for this library."""
class ValidationFailure(GitHubException):
"""An exception representing failed validation of a webhook event."""
# https://docs.github.com/en/free-pro-team@latest/developers/webhooks-and-events/securing-your-webhooks#validating-payloads-from-github
class HTTPException(GitHubException):
"""A general exception to represent HTTP responses."""
def __init__(self, status_code: http.HTTPStatus, *args: Any) -> None:
self.status_code = status_code
if args:
super().__init__(*args)
else:
super().__init__(status_code.phrase)
class RedirectionException(HTTPException):
"""Exception for 3XX HTTP responses."""
class BadRequest(HTTPException):
"""The request is invalid.
Used for 4XX HTTP errors.
"""
# https://docs.github.com/en/free-pro-team@latest/rest/overview/resources-in-the-rest-api#client-errors
class BadRequestUnknownError(BadRequest):
"""A bad request whose response body is not JSON."""
def __init__(self, response: str) -> None:
self.response = response
super().__init__(http.HTTPStatus.UNPROCESSABLE_ENTITY)
class RateLimitExceeded(BadRequest):
"""Request rejected due to the rate limit being exceeded."""
# Technically rate_limit is of type gidgethub.sansio.RateLimit, but a
# circular import comes about if you try to properly declare it.
def __init__(self, rate_limit: Any, *args: Any) -> None:
self.rate_limit = rate_limit
if not args:
super().__init__(http.HTTPStatus.FORBIDDEN, "rate limit exceeded")
else:
super().__init__(http.HTTPStatus.FORBIDDEN, *args)
class InvalidField(BadRequest):
"""A field in the request is invalid.
Represented by a 422 HTTP Response. Details of what fields were
invalid are stored in the errors attribute.
"""
def __init__(self, errors: Any, *args: Any) -> None:
"""Store the error details."""
self.errors = errors
super().__init__(http.HTTPStatus.UNPROCESSABLE_ENTITY, *args)
class ValidationError(BadRequest):
"""A request was unable to be completed.
Represented by a 422 HTTP response. Details of what went wrong
are stored in the *errors* attribute.
"""
def __init__(self, errors: Any, *args: Any) -> None:
"""Store the error details."""
self.errors = errors
super().__init__(http.HTTPStatus.UNPROCESSABLE_ENTITY, *args)
class GitHubBroken(HTTPException):
"""Exception for 5XX HTTP responses."""
class GraphQLException(GitHubException):
"""Base exception for the GraphQL v4 API."""
def __init__(self, message: str, response: Any) -> None:
self.response = response
super().__init__(message)
class BadGraphQLRequest(GraphQLException):
"""A 4XX HTTP response."""
def __init__(self, status_code: http.HTTPStatus, response: Any) -> None:
assert 399 < status_code < 500
self.status_code = status_code
super().__init__(response["message"], response)
class GraphQLAuthorizationFailure(BadGraphQLRequest):
"""401 HTTP response to a bad oauth token."""
def __init__(self, response: Any) -> None:
super().__init__(http.HTTPStatus(401), response)
class QueryError(GraphQLException):
"""An error occurred while attempting to handle a GraphQL v4 query."""
def __init__(self, response: Any) -> None:
super().__init__(response["errors"][0]["message"], response)
class GraphQLResponseTypeError(GraphQLException):
"""The GraphQL response has an unexpected content type."""
def __init__(self, content_type: Optional[str], response: Any) -> None:
super().__init__(
f"Response had an unexpected content-type: '{content_type!r}'", response
)
|
the-stack_106_24633 | """File controls the application while it is running"""
import time
from components.loding_screen import *
from firebase_manager.handler import Handler
from keys import keys
def run() -> None:
start_time = time.time()
threshold_time = 15 * 60 # 15 minutes in secs
handler = Handler(keys.keys_auth, keys.keys_database)
curr_view = LoadingView(handler=handler)
while curr_view:
current_time = time.time()
time_diff = current_time - start_time
if threshold_time < time_diff:
handler.refresh()
start_time = time.time()
curr_view = curr_view.present()
if __name__ == '__main__':
run()
|
the-stack_106_24635 | import datetime
import logging
import os
from learners.fasttext_learner import FasttextLearner
from learners.spacy_learner import SpacyLearner
class TrainerProcessor:
"""A TrainerProcessor class is in charge of consuming the learning task.
"""
def consume(self, task):
"""This method should be invoked by the workers on a pool to actually train
models parallelly.
Notice that this method just shelters the actual code that will be executed,
so any internal asynchronous exception will stay trapped here and logged explicitly.
Args:
task (dict): The task to be consumed.
"""
# Tries to consume the task
try:
logging.info('Sending task to worker in the pool ...')
# Actually consumes the task
self._invoke_consume(task)
logging.info('Worker has finished the task.')
# If an exception has happened, logs it
except Exception as e:
logging.error('An exception has happened.')
logging.exception(e)
def _invoke_consume(self, task):
"""Runs the actual learning job.
This method runs on multiple parallel executors, which can be either threads or processes.
Anything that this method returns should be pickable (including possible
cython sub-objects).
Args:
task (dict): The task to be consumed.
"""
logging.info(f"Consuming a `{task['type']}` task ...")
# Checks if the task's type is from Spacy
if task['type'] == 'spacy':
# Creates a SpacyLearner
l = SpacyLearner()
# Checks if the task's type is from Fasttext
elif task['type'] == 'fasttext':
# Creates a FasttextLearner
l = FasttextLearner()
# Learns a new model
model_path = l.fit(task['language'], task['samples'], task['hyperparams'])
# Adding the model's path to the callback
task['callback']['path'] = model_path
# Adding the time when the task has ended
task['callback']['end_time'] = datetime.datetime.utcnow().isoformat()
# Checks if model has been properly trained
if model_path is None:
# Adding an error status to the callback
task['callback']['status'] = 'error'
# Notify someone that the model has not been trained (callback)
logging.info('Sending callback ...')
# Raises a RuntimeError warning that model could not been properly trained
raise RuntimeError('Model could not been properly trained.')
# Adding a success status to the callback
task['callback']['status'] = 'success'
logging.debug(f"Task callback: {task['callback']}")
# Uploads model to AWS
# logging.info('Uploading model ...')
# Deleting model from disk
# logging.info('Deleting model from local disk ...')
# os.remove(model_path)
# Notify someone that the model has been trained (callback)
# logging.info('Sending callback ...')
|
the-stack_106_24638 | import tensorflow as tf
from tensorflow.python.client import timeline
a = tf.random_normal([2000, 5000])
b = tf.random_normal([5000, 1000])
res = tf.matmul(a, b)
with tf.Session() as sess:
# 添加记录 session 执行的选项
options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
sess.run(res, options=options, run_metadata=run_metadata)
# 创建 Timeline 对象,并写入到 json 文件中
fetched_timeline = timeline.Timeline(run_metadata.step_stats)
chrome_trace = fetched_timeline.generate_chrome_trace_format()
with open('trace/timeline_1_simple.json', 'w') as f:
f.write(chrome_trace)
|
the-stack_106_24640 | """Read/Write image files using ITK
"""
# Copyright (c) 2013-2018 Erling Andersen, Haukeland University Hospital, Bergen, Norway
import os.path
import logging
import tempfile
import itk
import numpy as np
import imagedata.formats
import imagedata.axis
from imagedata.formats.abstractplugin import AbstractPlugin
logger = logging.getLogger(__name__)
class ImageTypeError(Exception):
"""
Thrown when trying to load or save an image of unknown type.
"""
pass
class DependencyError(Exception):
"""
Thrown when a required module could not be loaded.
"""
pass
# noinspection PyUnresolvedReferences
class ITKPlugin(AbstractPlugin):
"""Read/write ITK files."""
name = "itk"
description = "Read and write ITK files."
authors = "Erling Andersen"
version = "1.1.0"
url = "www.helse-bergen.no"
def __init__(self, name=None, description=None,
authors=None, version=None, url=None):
if name is not None:
self.name = name
if description is not None:
self.description = description
if authors is not None:
self.authors = authors
if version is not None:
self.version = version
if url is not None:
self.url = url
super(ITKPlugin, self).__init__(self.name, self.description,
self.authors, self.version, self.url)
self.shape = None
self.slices = None
self.spacing = None
self.imagePositions = None
self.transformationMatrix = None
# self.orientation = si.orientation
self.tags = None
self.origin = None
self.orientation = None
self.normal = None
self.output_sort = None
def _read_image(self, f, opts, hdr):
"""Read image data from given file handle
Args:
self: format plugin instance
f: file handle or filename (depending on self._need_local_file)
opts: Input options (dict)
hdr: Header dict
Returns:
Tuple of
hdr: Header dict
Return values:
- info: Internal data for the plugin
None if the given file should not be included (e.g. raw file)
si: numpy array (multi-dimensional)
"""
logger.debug("itkplugin._read_image")
logger.debug("itkplugin._read_image filehandle {}".format(f))
if f.endswith('.raw'):
return None, None
try:
# https://blog.kitware.com/itk-python-image-pixel-types/
reader = itk.imread(f)
# imagetype = itk.Image[itk.F, 3]
# reader = itk.ImageFileReader[imagetype].New()
# reader.SetFileName(f)
# reader.Update()
# img = itk.GetArrayFromImage(reader.GetOutput())
img = itk.GetArrayFromImage(reader)
self._reduce_shape(img)
logger.info("Data shape read ITK: {}".format(img.shape))
o = reader
except imagedata.formats.NotImageError as e:
logger.error('itkplugin._read_image: inner exception {}'.format(e))
raise imagedata.formats.NotImageError('{} does not look like a ITK file'.format(f))
# Color image?
hdr.photometricInterpretation = 'MONOCHROME2'
hdr.color = False
if o.GetNumberOfComponentsPerPixel() == 3:
logger.debug('ITKPlugin._read_image: RGB color')
hdr.photometricInterpretation = 'RGB'
hdr.color = True
return o, img
def _need_local_file(self):
"""Do the plugin need access to local files?
Returns:
Boolean. True: The plugin need access to local filenames. False: The plugin can access files given by an open file handle
"""
return True
def _set_tags(self, image_list, hdr, si):
"""Set header tags.
Args:
self: format plugin instance
image_list: list with (info,img) tuples
hdr: Header dict
si: numpy array (multi-dimensional)
Returns:
hdr: Header dict
"""
# def transformMatrix(direction, origin):
# matrix = itk.GetArrayFromMatrix(direction)
# A = np.array([[matrix[2,2], matrix[1,2], matrix[0,2], origin[2]],
# [matrix[2,1], matrix[1,1], matrix[0,1], origin[1]],
# [matrix[2,0], matrix[1,0], matrix[0,0], origin[0]],
# [ 0, 0, 0, 1]])
# return A
# orientation = self.orientation
# rotation = np.zeros([3,3])
# # X axis
# rotation[0,0] = orientation[0]
# rotation[0,1] = orientation[1]
# rotation[0,2] = orientation[2]
# # Y axis
# rotation[1,0] = orientation[3]
# rotation[1,1] = orientation[4]
# rotation[1,2] = orientation[5]
# # Z axis = X cross Y
# rotation[2,0] = orientation[1]*orientation[5]-orientation[2]*orientation[4]
# rotation[2,1] = orientation[2]*orientation[3]-orientation[0]*orientation[5]
# rotation[2,2] = orientation[0]*orientation[4]-orientation[1]*orientation[3]
# logger.debug(rotation)
#
# # Set direction by modifying default orientation in place
# d=image.GetDirection()
# dv=d.GetVnlMatrix()
# for col in range(3):
# v=itk.vnl_vector.D()
# v.set_size(3)
# v.put(0, rotation[col,0])
# v.put(1, rotation[col,1])
# v.put(2, rotation[col,2])
# dv.set_column(col,v)
o, img = image_list[0]
spacing = o.GetSpacing()
origin = o.GetOrigin()
direction = o.GetDirection()
# Set spacing
v = spacing.GetVnlVector()
logger.debug('ITKPlugin._set_tags: hdr {}'.format(hdr))
logger.debug('ITKPlugin._set_tags: spacing {} {} {}'.format(v.get(2), v.get(1), v.get(0)))
hdr.spacing = (float(v.get(2)), float(v.get(1)), float(v.get(0)))
if v.size() > 3:
dt = float(v.get(3))
else:
dt = 1.0
# Set imagePositions for first slice
v = origin.GetVnlVector()
hdr.imagePositions = {}
hdr.imagePositions[0] = np.array([v.get(2), v.get(1), v.get(0)])
# Do not calculate transformationMatrix here. Will be calculated by Series() when needed.
# self.transformationMatrix = transformMatrix(direction, hdr['imagePositions'][0])
# hdr['transformationMatrix'] = self.transformationMatrix
# logger.debug('ITKPlugin._set_tags: transformationMatrix=\n{}'.format(self.transformationMatrix))
# Set image orientation
iop = self._orientation_from_vnl_matrix(direction)
logger.debug('ITKPlugin._set_tags: iop=\n{}'.format(iop))
hdr.orientation = np.array((iop[2], iop[1], iop[0],
iop[5], iop[4], iop[3]))
# Set tags
axes = list()
_actual_shape = si.shape
_color = False
if hdr.color:
_actual_shape = si.shape[:-1]
_color = True
logger.debug('ITKPlugin.read: color')
_actual_ndim = len(_actual_shape)
nt = nz = 1
axes.append(imagedata.axis.UniformLengthAxis(
'row',
hdr.imagePositions[0][1],
_actual_shape[-2],
hdr.spacing[1])
)
axes.append(imagedata.axis.UniformLengthAxis(
'column',
hdr.imagePositions[0][2],
_actual_shape[-1],
hdr.spacing[2])
)
if _actual_ndim > 2:
nz = _actual_shape[-3]
axes.insert(0, imagedata.axis.UniformLengthAxis(
'slice',
hdr.imagePositions[0][0],
nz,
hdr.spacing[0])
)
if _actual_ndim > 3:
nt = _actual_shape[-4]
axes.insert(0, imagedata.axis.UniformLengthAxis(
imagedata.formats.input_order_to_dirname_str(hdr.input_order),
0,
nt,
dt)
)
times = np.arange(0, nt * dt, dt)
tags = {}
for slice in range(nz):
tags[slice] = np.array(times)
if _color:
axes.append(
imagedata.axis.VariableAxis(
'rgb',
['r', 'g', 'b']
)
)
hdr.axes = axes
hdr.tags = tags
logger.info("Data shape read DCM: {}".format(imagedata.formats.shape_to_str(si.shape)))
def write_3d_numpy(self, si, destination, opts):
"""Write 3D numpy image as ITK file
Args:
self: ITKPlugin instance
si: Series array (3D or 4D), including these attributes:
- slices
- spacing
- imagePositions
- transformationMatrix
- orientation
- tags
destination: dict of archive and filenames
opts: Output options (dict)
"""
if si.color:
raise imagedata.formats.WriteNotImplemented(
"Writing color ITK images not implemented.")
logger.debug('ITKPlugin.write_3d_numpy: destination {}'.format(destination))
archive = destination['archive']
filename_template = 'Image_%05d.mha'
if len(destination['files']) > 0 and len(destination['files'][0]) > 0:
filename_template = destination['files'][0]
self.shape = si.shape
self.slices = si.slices
self.spacing = si.spacing
self.imagePositions = si.imagePositions
self.transformationMatrix = si.transformationMatrix
# self.orientation = si.orientation
self.tags = si.tags
self.origin, self.orientation, self.normal = si.get_transformation_components_xyz()
logger.info("Data shape write: {}".format(imagedata.formats.shape_to_str(si.shape)))
# if si.ndim == 2:
# si.shape = (1,) + si.shape
# if si.ndim == 3:
# si.shape = (1,) + si.shape
# assert si.ndim == 4, "write_3d_series: input dimension %d is not 3D." % (si.ndim-1)
# if si.shape[0] != 1:
# raise ValueError("Attempt to write 4D image ({}) using write_3d_numpy".format(si.shape))
assert si.ndim == 2 or si.ndim == 3, "write_3d_series: input dimension %d is not 2D/3D." % si.ndim
# slices = si.shape[1]
# if slices != si.slices:
# raise ValueError("write_3d_series: slices of dicom template ({}) differ from input array ({}).".format(si.slices, slices))
# if not os.path.isdir(directory_name):
# os.makedirs(directory_name)
try:
filename = filename_template % 0
except TypeError:
filename = filename_template
# filename = os.path.join(directory_name, filename)
self.write_numpy_itk(si, archive, filename)
def write_4d_numpy(self, si, destination, opts):
"""Write 4D numpy image as ITK files
Args:
self: ITKPlugin instance
si[tag,slice,rows,columns]: Series array, including these attributes:
- slices
- spacing
- imagePositions
- transformationMatrix
- orientation
- tags
destination: dict of archive and filenames
opts: Output options (dict)
"""
if si.color:
raise imagedata.formats.WriteNotImplemented(
"Writing color ITK images not implemented.")
logger.debug('ITKPlugin.write_4d_numpy: destination {}'.format(destination))
archive = destination['archive']
filename_template = 'Image_%05d.mha'
if len(destination['files']) > 0 and len(destination['files'][0]) > 0:
filename_template = destination['files'][0]
self.shape = si.shape
self.slices = si.slices
self.spacing = si.spacing
self.imagePositions = si.imagePositions
self.transformationMatrix = si.transformationMatrix
# self.orientation = si.orientation
self.tags = si.tags
self.origin, self.orientation, self.normal = si.get_transformation_components_xyz()
# Defaults
self.output_sort = imagedata.formats.SORT_ON_SLICE
if 'output_sort' in opts:
self.output_sort = opts['output_sort']
# Should we allow to write 3D volume?
# if si.ndim == 3:
# si.shape = (1,) + si.shape
if si.ndim != 4:
raise ValueError("write_4d_numpy: input dimension {} is not 4D.".format(si.ndim))
logger.debug("write_4d_numpy: si dtype {}, shape {}, sort {}".format(
si.dtype, si.shape,
imagedata.formats.sort_on_to_str(self.output_sort)))
steps = si.shape[0]
slices = si.shape[1]
if steps != len(si.tags[0]):
raise ValueError(
"write_4d_series: tags of dicom template ({}) differ from input array ({}).".format(len(si.tags[0]),
steps))
if slices != si.slices:
raise ValueError(
"write_4d_series: slices of dicom template ({}) differ from input array ({}).".format(si.slices,
slices))
# if not os.path.isdir(directory_name):
# os.makedirs(directory_name)
logger.debug('write_4d_numpy: si[0,0,0,0]={}'.format(
si[0, 0, 0, 0]))
if self.output_sort == imagedata.formats.SORT_ON_TAG:
for slice in range(slices):
filename = filename_template % slice
# filename = os.path.join(directory_name, filename)
self.write_numpy_itk(si[:, slice, ...], archive, filename)
else: # default: imagedata.formats.SORT_ON_SLICE:
for tag in range(steps):
filename = filename_template % tag
# filename = os.path.join(directory_name, filename)
self.write_numpy_itk(si[tag, ...], archive, filename)
def write_numpy_itk(self, si, archive, filename):
"""Write single volume to file
Args:
self: ITKPlugin instance, including these attributes:
- slices (not used)
- spacing
- imagePositions
- transformationMatrix
- orientation (not used)
- tags (not used)
si: numpy 3D array [slice,row,column]
archive: archive object
filename: file name, possibly without extentsion
"""
if si.ndim != 2 and si.ndim != 3:
raise ValueError("write_numpy_itk: input dimension %d is not 2D/3D." % si.ndim)
if np.issubdtype(si.dtype, np.floating):
arr = np.float32(np.nan_to_num(si))
else:
arr = si.copy()
if arr.dtype == np.int32:
logger.debug("write_numpy_itk: arr {}".format(arr.dtype))
arr = arr.astype(np.float32)
# arr=arr.astype(np.uint16)
if arr.dtype == np.complex64 or arr.dtype == np.complex128:
arr = np.absolute(arr)
if arr.dtype == np.double:
arr = arr.astype(np.float32)
logger.debug("write_numpy_itk: arr {}".format(arr.dtype))
# Write it
logger.debug("write_numpy_itk: arr {} {}".format(arr.shape, arr.dtype))
image = itk.GetImageFromArray(arr)
from_image_type = self._get_image_type(image)
image = self.get_image_from_numpy(image)
logger.debug("write_numpy_itk: imagetype {} filename {}".format(from_image_type, filename))
if len(os.path.splitext(filename)[1]) == 0:
filename = filename + '.mha'
ext = os.path.splitext(filename)[1]
logger.debug('write_numpy_itk: ext %s' % ext)
f = tempfile.NamedTemporaryFile(
suffix=ext, delete=False)
logger.debug('write_numpy_itk: write local file %s' % f.name)
itk.imwrite(image, f.name)
f.close()
logger.debug('write_numpy_itk: written local file %s' % f)
logger.debug('write_numpy_itk: copy to archive %s as %s' % (
archive, filename))
archive.add_localfile(f.name, filename)
os.unlink(f.name)
@staticmethod
def _orientation_from_vnl_matrix(direction):
tr = direction.GetVnlMatrix()
arr = []
for c in range(2):
for r in range(3):
arr.append(float(tr.get(r, c)))
return arr
def set_direction_from_dicom_header(self, image):
orientation = self.orientation
rotation = np.zeros([3, 3])
# X axis
rotation[0, 0] = orientation[2]
rotation[0, 1] = orientation[1]
rotation[0, 2] = orientation[0]
# Y axis
rotation[1, 0] = orientation[5]
rotation[1, 1] = orientation[4]
rotation[1, 2] = orientation[3]
# Z axis = X cross Y
rotation[2, 0] = orientation[1] * orientation[3] - orientation[0] * orientation[4]
rotation[2, 1] = orientation[0] * orientation[5] - orientation[2] * orientation[3]
rotation[2, 2] = orientation[2] * orientation[4] - orientation[1] * orientation[5]
logger.debug('set_direction_from_dicom_header: rotation:\n{}'.format(rotation))
# Set direction by modifying default orientation in place
d = image.GetDirection()
dv = d.GetVnlMatrix()
for col in range(3):
v = itk.vnl_vector.D()
v.set_size(3)
v.put(0, rotation[col, 0])
v.put(1, rotation[col, 1])
v.put(2, rotation[col, 2])
dv.set_column(col, v)
def set_direction_from_transformation_matrix(self, image):
m = self.transformationMatrix
# Set direction by modifying default orientation in place
d = image.GetDirection()
dv = d.GetVnlMatrix()
for col in range(3):
v = itk.vnl_vector.D()
v.set_size(3)
v.put(0, m[2 - col, 2])
v.put(1, m[2 - col, 1])
v.put(2, m[2 - col, 0])
dv.set_column(col, v)
def get_image_from_numpy(self, image):
"""Returns an itk Image created from the supplied scipy ndarray.
If the image_type is supported, will be automatically transformed to that type,
otherwise the most suitable is selected.
Note: always use this instead of directly the itk.PyBuffer, as that
object transposes the image axes.
Args:
image an array, type image np.ndarray
Returns:
an instance of itk.Image holding the array's data, type itk.Image (instance)
"""
def itkMatrix_from_orientation(orientation, normal):
o_t = orientation.reshape((2, 3)).T
colr = o_t[:, 0].reshape((3, 1))
colc = o_t[:, 1].reshape((3, 1))
coln = normal.reshape((3, 1))
if len(self.shape) < 3:
m = np.hstack((colr[:2], colc[:2])).reshape((2, 2))
else:
m = np.hstack((colr, colc, coln)).reshape((3, 3))
itk_m = itk.GetMatrixFromArray(m)
return itk_m
image.SetDirection(
itkMatrix_from_orientation(
self.orientation, self.normal))
z, y, x = self.imagePositions[0]
logger.debug("get_image_from_numpy: (z,y,x)=({},{},{}) ({})".format(z, y, x, type(z)))
if isinstance(z, np.int64):
logger.debug("get_image_from_numpy: SetOrigin int")
if len(self.shape) < 3:
image.SetOrigin([int(x), int(y)])
else:
image.SetOrigin([int(x), int(y), int(z)])
else:
logger.debug("get_image_from_numpy: SetOrigin float")
if len(self.shape) < 3:
image.SetOrigin([float(x), float(y)])
else:
image.SetOrigin([float(x), float(y), float(z)])
logger.debug("get_image_from_numpy: SetSpacing float")
dz, dy, dx = self.spacing
dx = float(dx)
dy = float(dy)
dz = float(dz)
if len(self.shape) < 3:
image.SetSpacing([dx, dy])
else:
image.SetSpacing([dx, dy, dz])
return image
@staticmethod
def _get_image_type(image):
"""Returns the image type of the supplied image as itk.Image template.
Args:
image: an instance of itk.Image
Returns:
a template of itk.Image, type itk.Image
"""
try:
return itk.Image[itk.template(image)[1][0],
itk.template(image)[1][1]]
except IndexError:
raise (NotImplementedError, 'The python wrappers of ITK define no template class for this data type.')
|
the-stack_106_24641 | # coding=utf-8
import json
import os.path
import pickle
import requests
import time
import threading
# noinspection PyPackageRequirements
import websocket
# noinspection PyPackageRequirements
from bs4 import BeautifulSoup
from urllib.parse import urlparse
import chatcommunicate
import metasmoke
from globalvars import GlobalVars
import datahandling
from helpers import log
from parsing import fetch_post_id_and_site_from_url, to_protocol_relative
from tasks import Tasks
# noinspection PyClassHasNoInit,PyBroadException,PyMethodParameters
class DeletionWatcher:
next_request_time = time.time() - 1
def __init__(self):
DeletionWatcher.update_site_id_list()
self.posts = {}
try:
self.socket = websocket.create_connection("wss://qa.sockets.stackexchange.com/")
except websocket.WebSocketException:
log('error', 'DeletionWatcher failed to create a websocket connection')
return
if os.path.exists("deletionIDs.p"):
with open("deletionIDs.p", "rb") as fh:
for post in DeletionWatcher._check_batch(pickle.load(fh)):
self.subscribe(post, pickle=False)
self._save()
threading.Thread(name="deletion watcher", target=self._start, daemon=True).start()
def _start(self):
while True:
msg = self.socket.recv()
if msg:
msg = json.loads(msg)
action = msg["action"]
if action == "hb":
self.socket.send("hb")
else:
data = json.loads(msg["data"])
if data["a"] == "post-deleted":
try:
post_id, _, post_type, post_url, callbacks = self.posts[action]
del self.posts[action]
if not post_type == "answer" or ("aId" in data and str(data["aId"]) == post_id):
self.socket.send("-" + action)
Tasks.do(metasmoke.Metasmoke.send_deletion_stats_for_post, post_url, True)
for callback, max_time in callbacks:
if not max_time or time.time() < max_time:
callback()
except KeyError:
pass
def subscribe(self, post_url, callback=None, pickle=True, timeout=None):
post_id, post_site, post_type = fetch_post_id_and_site_from_url(post_url)
if post_site not in GlobalVars.site_id_dict:
log("warning", "unknown site {} when subscribing to {}".format(post_site, post_url))
return
if post_type == "answer":
question_id = datahandling.get_post_site_id_link((post_id, post_site, post_type))
if question_id is None:
return
else:
question_id = post_id
site_id = GlobalVars.site_id_dict[post_site]
action = "{}-question-{}".format(site_id, question_id)
max_time = (time.time() + timeout) if timeout else None
if action not in self.posts:
self.posts[action] = (post_id, post_site, post_type, post_url, [(callback, max_time)] if callback else [])
try:
self.socket.send(action)
except websocket.WebSocketException:
log('error', 'DeletionWatcher failed on sending {}'.format(action))
elif callback:
_, _, _, _, callbacks = self.posts[action]
callbacks.append((callback, max_time))
else:
return
if pickle:
Tasks.do(self._save)
def _save(self):
pickle_output = {}
for post_id, post_site, _, _, _ in self.posts.values():
if post_site not in pickle_output:
pickle_output[post_site] = [post_id]
else:
pickle_output[post_site].append(post_id)
with open("deletionIDs.p", "wb") as pickle_file:
pickle.dump(pickle_output, pickle_file)
@staticmethod
def _check_batch(saved):
if time.time() < DeletionWatcher.next_request_time:
time.sleep(DeletionWatcher.next_request_time - time.time())
for site, posts in saved.items():
ids = ";".join(post_id for post_id in posts if not DeletionWatcher._ignore((post_id, site)))
uri = "https://api.stackexchange.com/2.2/posts/{}".format(ids)
params = {
'site': site,
'key': 'IAkbitmze4B8KpacUfLqkw(('
}
res = requests.get(uri, params=params)
json = res.json()
if "items" not in json:
log('warning',
'DeletionWatcher API request received no items in response (code {})'.format(res.status_code))
log('warning', res.text)
return
if 'backoff' in json:
DeletionWatcher.next_request_time = time.time() + json['backoff']
for post in json['items']:
if time.time() - post["creation_date"] < 7200:
yield to_protocol_relative(post["link"]).replace("/q/", "/questions/")
@staticmethod
def _ignore(post_site_id):
return datahandling.is_false_positive(post_site_id) or datahandling.is_ignored_post(post_site_id) or \
datahandling.is_auto_ignored_post(post_site_id)
@staticmethod
def update_site_id_list():
soup = BeautifulSoup(requests.get("https://meta.stackexchange.com/topbar/site-switcher/site-list").text,
"html.parser")
site_id_dict = {}
for site in soup.findAll("a", attrs={"data-id": True}):
site_name = urlparse(site["href"]).netloc
site_id = site["data-id"]
site_id_dict[site_name] = site_id
GlobalVars.site_id_dict = site_id_dict
|
the-stack_106_24642 | def params2name(params):
params_str = []
for k, v in params.items():
try:
param_str = '{0}-{1:g}'.format(k,v)
except ValueError:
param_str = '{0}-{1}'.format(k,v)
params_str.append(param_str)
return '_'.join(params_str)
def name2params(name):
params = {}
params_str = name.split('_')
for param_str in params_str:
param = param_str.split('-',maxsplit=1)
key = param[0]
try:
params[key] = float( param[1] )
except ValueError:
params[key] = param[1]
return params
|
the-stack_106_24644 | """
Split an image into small square pieces of side `SIZE`
and save them to disk individually.
"""
from PIL import Image
import numpy as np
import os
import glob
# -------------------------------------------
# Edit these parameters as required
INFILES = glob.glob('test/4J7A0146_*.jpg')
CREATE_DIRS = True
SIZE = 128
OFFSETS = [(0, 0) for f in INFILES]
# -------------------------------------------
if __name__ == '__main__' :
if not INFILES :
print("Warning : No files were processed")
for addr, off in zip(INFILES, OFFSETS):
print(addr)
fname, ext = os.path.splitext(addr)
loc, fname = os.path.split(fname)
if CREATE_DIRS :
prf = os.path.join(loc,fname)
if not os.path.isdir(prf) :
os.mkdir(prf)
OUT_PREFIX = f"./{prf}/"
else :
OUT_PREFIX = f"./{fname}_"
src = Image.open(addr)
for ix, x in enumerate(range(off[0], src.width-SIZE+1, SIZE)):
for iy, y in enumerate(range(off[1], src.height-SIZE+1, SIZE)):
section = src.crop((x, y, x+SIZE, y+SIZE))
# if np.array(section).max() > 180 :
# section.save(OUT_PREFIX + f"{ix}_{iy}" + ext)
section.save(OUT_PREFIX + f"{ix}_{iy}" + ext)
|
the-stack_106_24645 | from gateway_test import GATEWAY_URL
import unittest
import common
GATEWAY_URL = "http://localhost/v0"
class TestData(unittest.TestCase):
def test_data(self):
users = 10
forms_per_user = 10
responses_per_form = 100
for u in range(users):
user = common.generate_user()
auth = common.make_user(GATEWAY_URL, user)
for f in range(forms_per_user):
form = common.make_form(GATEWAY_URL, auth, common.generate_form())
print(f"User: {u}, form: {f}")
for r in range(responses_per_form):
resp = common.generate_response()
common.post_form_user(GATEWAY_URL, form["id"], resp)
if __name__ == "__main__":
unittest.main() |
the-stack_106_24647 | from django.conf.urls import include, url
from django.views.generic import TemplateView
from django.conf.urls.static import static
from django.conf import settings
from django.contrib import admin
from . import apiv1, apiv2
from . import views
from .models import LastUpdated
admin.autodiscover()
class IndexView(TemplateView):
template_name = "index.html"
def get_context_data(self, **kwargs):
context = super(IndexView, self).get_context_data(**kwargs)
try:
last_updated = LastUpdated.objects.all().order_by('-update_date')[0]
context['last_updated'] = last_updated
context['price_parameters'] = settings.PRICE_PARAMETERS
except:
context['last_updated'] = u"Never"
return context
urlpatterns = [
url(r'^$', IndexView.as_view(template_name="index.html"), name="home"),
url(r"^admin/", include(admin.site.urls)),
url(r"^api/related$", views.RelatedProductsView.as_view(), name="api_related_products"),
url(r"^api/detail$", views.ProductDetailView.as_view(), name="api_product_detail"),
url(r"^api/search/by_product$", views.SearchByProductView.as_view(), name="api_search_by_product"),
url(r"^api/search/by_ingredient$", views.SearchByIngredientView.as_view(), name="api_search_by_ingredient"),
url(r"^api/search$", views.SearchView.as_view(), name="api_search"),
url(r"^api/search-lite$", views.SearchLiteView.as_view(), name="api_search_lite"),
url(r"^api/dump$", views.DumpView.as_view(), name="api_dump"),
url(r"^api/v1/related$", views.RelatedProductsView.as_view(), name="api1_related_products"),
url(r"^api/v1/detail$", views.ProductDetailView.as_view(), name="api1_product_detail"),
url(r"^api/v1/search/by_product$", views.SearchByProductView.as_view(), name="api1_search_by_product"),
url(r"^api/v1/search/by_ingredient$", views.SearchByIngredientView.as_view(), name="api1_search_by_ingredient"),
url(r"^api/v1/search$", views.SearchView.as_view(), name="api1_search"),
url(r"^api/v1/search-lite$", views.SearchLiteView.as_view(), name="api1_search_lite"),
url(r"^api/v1/dump$", views.DumpView.as_view(), name="api1_dump"),
url(r"^api/v2/related$", views.V2RelatedProductsView.as_view(), name="api2_related_products"),
url(r"^api/v2/detail$", views.V2ProductDetailView.as_view(), name="api2_product_detail"),
url(r"^api/v2/search/by_product$", views.V2SearchByProduct.as_view(), name="api2_search_by_product"),
url(r"^api/v2/search$", views.V2SearchView.as_view(), name="api2_search"),
url(r"^api/v2/search-lite$", views.V2SearchLiteView.as_view(), name="api2_search_lite"),
url(r"^api/v2/last-updated$", views.LastUpdatedView.as_view(), name="api2_last_updated"),
# currently still using v1 methods since there hasn't been a change
url(r"^api/v2/dump$", views.DumpView.as_view(), name="api2_dump"),
url(r"^api/v2/search/by_ingredient$", views.SearchByIngredientView.as_view(), name="api2_search_by_ingredient"),
# API V3 - using some old api calls as they haven't changed
url(r"^api/v3/related$", views.V2RelatedProductsView.as_view(), name="api3_related_products"),
url(r"^api/v3/detail$", views.V3ProductDetailView.as_view(), name="api3_product_detail"),
url(r"^api/v3/search/by_product$", views.V2SearchByProduct.as_view(), name="api3_search_by_product"),
url(r"^api/v3/search$", views.V2SearchView.as_view(), name="api3_search"),
url(r"^api/v3/search-lite$", views.V2SearchLiteView.as_view(), name="api3_search_lite"),
url(r"^api/v3/last-updated$", views.LastUpdatedView.as_view(), name="api3_last_updated"),
# currently still using v1 methods since there hasn't been a change
url(r"^api/v3/dump$", views.DumpView.as_view(), name="api3_dump"),
url(r"^api/v3/search/by_ingredient$", views.SearchByIngredientView.as_view(), name="api3_search_by_ingredient"),
]
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
the-stack_106_24649 | '''
Created on 29 Aug 2017
@author: igoroya
'''
class TreeNode(object):
def __init__(self, name=None):
'''
A very simple node of a tree, all what is needed to work with trees in the exercises
'''
self.name = name
self.children = []
def __repr__(self):
return "node: {} children {}".format(self.name.__repr__(), [child.name.__repr__() for child in self.children])
class BinaryTreeNode(object):
def __init__(self, name=None):
'''
A very simple node of a binary tree, all what is needed to work with trees in the exercises
'''
self.name = name
self.left = None
self.right = None
def __repr__(self):
if self.left is None:
left = 'None'
else:
left = self.left.name.__repr__()
if self.right is None:
right = 'None'
else:
right = self.right.name.__repr__()
return "node: {} l {}, r {}".format(self.name.__repr__(), left, right)
|
the-stack_106_24652 | """Data Provider module for providing data blocks made from similar stocks over a set time period, but separated.
This data provider is not intended to be used outside of this module, instead, upon import, this module will create an
instance of a SplitBlockProvider and register it with the global DataProviderRegistry. To register a consumer to
receive data from this provider, use the id provided by data_provider_static_names.SPLIT_BLOCK_PROVIDER.
The separation, or split, referred to by this module is that the data block for one cluster is not combined with
the data block from others into a large training set. This is in contrast to the ClusteredBlockProvider, which
combines its cluster's blocks into a larger data set.
A detailed argument list that is required by this provider can be found in the generate_data method.
"""
from datetime import datetime as dt, timedelta as td
import configparser
from data_providing_module import configurable_registry
from data_providing_module import data_provider_registry
from data_providing_module.data_providers import data_provider_static_names
from stock_data_analysis_module.data_processing_module import stock_cluster_data_manager
from general_utils.config import config_util
ENABLED_CONFIG_ID = "enabled"
class SplitBlockProvider(data_provider_registry.DataProviderBase):
"""Data Provider that provides data constructed by clustering stocks, but keeping the cluster's data separate
The organization of these clusters is handled according to the specifications established in the
StockClusterDataManager, and will operate on the time frame [start_date, end_date]. This time frame is currently
fixed where end_date is the current date, and start_date is 52 * 4 weeks ago (approximately four years).
Additionally this provider supports configuration of certain parameters through the configuration file. These
parameters are listed in the Configurable Parameters section.
Configurable Parameters:
enabled: Whether this provider is enabled for consumers to receive data from.
"""
def generate_prediction_data(self, *args, **kwargs):
"""Generates data that consumers will use to make predictions for the next trading day.
Currently there is no implementation for this, and calling the method will result in a NotImplementedError
"""
raise NotImplementedError()
def __init__(self):
"""Initializes a SplitBlockProvider and registers it to the global DataProviderRegistry
"""
super(SplitBlockProvider, self).__init__()
configurable_registry.config_registry.register_configurable(self)
def write_default_configuration(self, section: "configparser.SectionProxy"):
"""Writes default configuration values into the SectionProxy provided.
For more details see abstract class documentation.
"""
section[ENABLED_CONFIG_ID] = "True"
def load_configuration(self, parser: "configparser.ConfigParser"):
"""Attempts to load the configurable parameters for this provider from the provided parser.
For more details see abstract class documentation.
"""
section = config_util.create_type_section(parser, self)
if not parser.has_option(section.name, ENABLED_CONFIG_ID):
self.write_default_configuration(section)
enabled = parser.getboolean(section.name, ENABLED_CONFIG_ID)
if enabled:
data_provider_registry.registry.register_provider(data_provider_static_names.SPLIT_BLOCK_PROVIDER_ID, self)
def generate_data(self, *args, **kwargs):
"""Generates data for Consumers to use by clustering together stocks in a time period,
The time period for cluster creation is a period of 52 * 4 weeks (approximately 4 years).
Consumers requiring data from this provider are expected to provide the arguments specified in the
*args entry of the Arguments section
The split portion of this data provider is that the data returned is split into different entries in a
dictionary, keyed off of the root stock's ticker. The root stock is the stock that the cluster is based around
and all other data in the cluster is deemed as being similar to the root stock's data.
Arguments:
*args:
List of arguments that are expected to be in the following order, with the specified types
train_columns: List[str]
List of names of columns from a StockDataTable. These will be used to retrieve data
from the database and construct the returned data blocks
expectation_columns: List[int]
List of integers representing the indices of the columns to be used as the target data
in the generation of the data blocks
Returns:
See StockClusterDataManager.retrieve_training_data_movement_targets_split
"""
if len(args) < 1:
raise ValueError('Expected at least the first argument from the following list;' +
' train_columns: List["str"], expectation_columns: List["int"]')
columns = args[0]
expectation_columns = None
if len(args) == 2:
expectation_columns = args[1]
start_date = dt.now() - td(weeks=(52 * 4))
start_date = start_date.isoformat()[:10].replace('-', '/')
end_date = dt.now().isoformat()[:10].replace('-', '/')
data_retriever = stock_cluster_data_manager.StockClusterDataManager(start_date, end_date, column_list=columns)
return data_retriever.retrieveTrainingDataMovementTargetsSplit(expectation_columns=expectation_columns)
provider = SplitBlockProvider()
|
the-stack_106_24653 | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
import subprocess
import time
import re
import logging
from resource_management.core.base import Fail
from resource_management.core.providers import Provider
from resource_management.core.logger import Logger
from resource_management.core.utils import suppress_stdout
from resource_management.core import shell
PACKAGE_MANAGER_LOCK_ACQUIRED_MSG = "Cannot obtain lock for Package manager. Retrying after {0} seconds. Reason: {1}"
PACKAGE_MANAGER_REPO_ERROR_MSG = "Cannot download the package due to repository unavailability. Retrying after {0} seconds. Reason: {1}"
class PackageProvider(Provider):
def __init__(self, *args, **kwargs):
super(PackageProvider, self).__init__(*args, **kwargs)
def install_package(self, name, version):
raise NotImplementedError()
def remove_package(self, name):
raise NotImplementedError()
def upgrade_package(self, name, version):
raise NotImplementedError()
def action_install(self):
package_name = self.get_package_name_with_version()
self.install_package(package_name, self.resource.use_repos, self.resource.skip_repos)
def action_upgrade(self):
package_name = self.get_package_name_with_version()
self.upgrade_package(package_name, self.resource.use_repos, self.resource.skip_repos)
def action_remove(self):
package_name = self.get_package_name_with_version()
self.remove_package(package_name)
def get_package_name_with_version(self):
if self.resource.version:
return self.resource.package_name + '-' + self.resource.version
else:
return self.resource.package_name
def is_locked_output(self, out):
return False
def is_repo_error_output(self, out):
return False
def get_logoutput(self):
return self.resource.logoutput==True and Logger.logger.isEnabledFor(logging.INFO) or self.resource.logoutput==None and Logger.logger.isEnabledFor(logging.DEBUG)
def call_with_retries(self, cmd, **kwargs):
return self._call_with_retries(cmd, is_checked=False, **kwargs)
def checked_call_with_retries(self, cmd, **kwargs):
return self._call_with_retries(cmd, is_checked=True, **kwargs)
def _call_with_retries(self, cmd, is_checked=True, **kwargs):
func = shell.checked_call if is_checked else shell.call
for i in range(self.resource.retry_count):
is_last_time = (i == self.resource.retry_count - 1)
try:
code, out = func(cmd, **kwargs)
except Fail as ex:
# non-lock error
if not self._is_handled_error(str(ex), is_last_time) or is_last_time:
raise
self._notify_about_handled_error(str(ex), is_last_time)
else:
# didn't fail or failed with non-lock error.
if not code or not self._is_handled_error(out, is_last_time):
break
self._notify_about_handled_error(str(out), is_last_time)
time.sleep(self.resource.retry_sleep)
return code, out
def _is_handled_error(self, output, is_last_time):
if self.resource.retry_on_locked and self.is_locked_output(output):
return True
elif self.resource.retry_on_repo_unavailability and self.is_repo_error_output(output):
return True
return False
def _notify_about_handled_error(self, output, is_last_time):
if is_last_time:
return
if self.resource.retry_on_locked and self.is_locked_output(output):
Logger.info(PACKAGE_MANAGER_LOCK_ACQUIRED_MSG.format(self.resource.retry_sleep, str(output)))
elif self.resource.retry_on_repo_unavailability and self.is_repo_error_output(output):
Logger.info(PACKAGE_MANAGER_REPO_ERROR_MSG.format(self.resource.retry_sleep, str(output)))
def yum_check_package_available(self, name):
"""
Does the same as rpm_check_package_avaiable, but faster.
However need root permissions.
"""
import yum # Python Yum API is much faster then other check methods. (even then "import rpm")
yb = yum.YumBase()
name_regex = re.escape(name).replace("\\?", ".").replace("\\*", ".*") + '$'
regex = re.compile(name_regex)
with suppress_stdout():
package_list = yb.rpmdb.simplePkgList()
for package in package_list:
if regex.match(package[0]):
return True
return False
def rpm_check_package_available(self, name):
import rpm # this is faster then calling 'rpm'-binary externally.
ts = rpm.TransactionSet()
packages = ts.dbMatch()
name_regex = re.escape(name).replace("\\?", ".").replace("\\*", ".*") + '$'
regex = re.compile(name_regex)
for package in packages:
if regex.match(package['name']):
return True
return False
|
the-stack_106_24654 |
# Description:
# Author:
# Update Date:
import docx, copy, os,utils
# global variable
hidden_style = None
normal_style = None
#resource
text = utils.get_heyheyhey()
def run():
#global
global text
# open docs return obj
doc = docx.Document("./test.docx")
normal_style = utils.get_style("./style.docx", "正文")
hidden_style = utils.get_style("./style.docx", "嘿嘿嘿")
for p0 in doc.paragraphs:
p1 = copy.deepcopy(p0)
p0.clear()
for r in p1.runs:
for t in r.text:
p0.add_run(t, normal_style)
p0.add_run(utils.insert(text), hidden_style)
doc.save("done.docx")
def test():
#global
global text
# open docs return obj
doc = docx.Document("./test.docx")
normal_style = utils.get_style("./style.docx", "正文")
hidden_style = utils.get_style("./style.docx", "嘿嘿嘿")
print(normal_style)
print(hidden_style)
# ot = []
# it = "法律的概念其实很模糊法律所主张的正义依赖于判决而不是法律条文本身得益于消费者保护运动的"
# for p in doc.paragraphs:
# for r in p.runs:
# for t in r.text:
# ot.append(t)
# for p0 in doc.paragraphs:
# # p = p0.clear()
# for r in p0.runs:
# for t1 in r.text:
# p1.add_run(t1, normal_style)
# p1.add_run(insert(), hidden_style)
# break
# break
for p0 in doc.paragraphs:
p1 = copy.deepcopy(p0)
p0.clear()
for r in p1.runs:
for t in r.text:
p0.add_run(t, normal_style)
# p0.add_run(insert(text), hidden_style)
# print(type(p.text))
# print(p.text[1])
# if "嘿嘿" in p.text:
# for r in p.runs:
# print(r.style)
# hidden_style = r.style
# if "正文" in p.text:
# for r in p.runs:
# normal_style = r.style
# for p in doc.paragraphs:
# p.add_run("正文插入", normal_style)
# p.add_run("隐藏插入", hidden_style)
# doc.save("1.docx")
run()
# test() |
the-stack_106_24655 | import sys
##################################################
# python3 create_config.py d_min d_max num_proc #
# #
# @arg d_min - minimum delay #
# @arg d_max - maximum delay #
# @arg num_proc - number of processes #
##################################################
def main():
delay1 = sys.argv[1]
delay2 = sys.argv[2]
num_processes = int(sys.argv[3])
filename = "multicast.config"
file = open(filename, "w")
file.write(str(delay1) + " " + str(delay2) + "\n")
for i in range(num_processes):
file.write(str(i)+ " 127.0.0.1 " + str(4250+i) + "\n")
if __name__ == "__main__":
main()
|
the-stack_106_24657 | from abc import ABC, abstractmethod
from typing import List
from boatsandjoy_api.core.data_adapters import DjangoDataAdapter
from . import domain, models
from .exceptions import BoatNotFound
class BoatsRepository(ABC):
@classmethod
@abstractmethod
def filter(
cls,
obj_id: int = None,
name: str = None,
active: bool = None
) -> List[domain.Boat]:
pass
@classmethod
@abstractmethod
def get(
cls,
obj_id: int = None,
name: str = None,
active: bool = None
) -> domain.Boat:
pass
class DjangoBoatsRepository(BoatsRepository):
DATA_ADAPTER = DjangoDataAdapter
@classmethod
def filter(
cls,
obj_id: int = None,
name: str = None,
active: bool = None
) -> List[domain.Boat]:
django_filters = cls.DATA_ADAPTER.transform(
obj_id=obj_id,
name=name,
active=active
)
boats = models.Boat.objects.filter(**django_filters)
return [cls.get_boat_domain_object(boat) for boat in boats]
@classmethod
def get(
cls,
obj_id: int = None,
name: str = None,
active: bool = None
) -> domain.Boat:
django_filters = cls.DATA_ADAPTER.transform(
obj_id=obj_id,
name=name,
active=active
)
try:
boat = models.Boat.objects.get(**django_filters)
except models.Boat.DoesNotExist as e:
raise BoatNotFound(f'Boat not found: {e}')
return cls.get_boat_domain_object(boat)
@classmethod
def get_boat_domain_object(cls, boat: models.Boat) -> domain.Boat:
return domain.Boat(
id=boat.id,
created=boat.created,
name=boat.name,
active=boat.active
)
|
the-stack_106_24658 | import os
from tarfile import TarFile
import pytest
from PIL import Image, ImageDraw
from aizynthfinder.utils import image
from aizynthfinder.chem import TreeMolecule, RetroReaction
@pytest.fixture
def new_image():
img = Image.new(mode="RGB", size=(300, 300), color="white")
draw = ImageDraw.Draw(img)
draw.point([5, 150, 100, 250], fill="black")
return img
def test_crop(new_image):
cropped = image.crop_image(new_image)
assert cropped.width == 136
assert cropped.height == 141
assert cropped.getpixel((21, 21)) == (0, 0, 0)
assert cropped.getpixel((116, 121)) == (0, 0, 0)
def test_rounded_rectangle(new_image):
color = (255, 0, 0)
modified = image.draw_rounded_rectangle(new_image, color=color)
assert modified.getpixel((0, 150)) == color
assert modified.getpixel((150, 0)) == color
assert modified.getpixel((299, 150)) == color
assert modified.getpixel((150, 299)) == color
def test_save_molecule_images():
nfiles = len(os.listdir(image.IMAGE_FOLDER))
mols = [
TreeMolecule(smiles="CCCO", parent=None),
TreeMolecule(smiles="CCCO", parent=None),
TreeMolecule(smiles="CCCCO", parent=None),
]
image.save_molecule_images(mols, ["green", "green", "green"])
assert len(os.listdir(image.IMAGE_FOLDER)) == nfiles + 2
image.save_molecule_images(mols, ["green", "orange", "green"])
assert len(os.listdir(image.IMAGE_FOLDER)) == nfiles + 3
@pytest.fixture
def setup_graphviz_graph():
mol1 = TreeMolecule(smiles="CCCO", parent=None)
reaction = RetroReaction(mol=mol1, smarts="")
return [mol1], [reaction], [(mol1, reaction)], ["green"]
def test_graphviz_usage(mocker, tmpdir, setup_graphviz_graph):
mkstemp_patch = mocker.patch("aizynthfinder.utils.image.tempfile.mkstemp")
files = [
(None, str(tmpdir / "graph1.dot")),
(None, str(tmpdir / "img2.png")),
]
mkstemp_patch.side_effect = files
molecules, reactions, edges, frame_colors = setup_graphviz_graph
img = image.make_graphviz_image(molecules, reactions, edges, frame_colors)
assert img.height > 0
assert img.width > 0
for _, filename in files:
assert os.path.exists(filename)
def test_graphviz_usage_exception_dot(mocker, tmpdir, setup_graphviz_graph):
exists_patch = mocker.patch("aizynthfinder.utils.image.os.path.exists")
exists_patch.side_effect = [False, True]
molecules, reactions, edges, frame_colors = setup_graphviz_graph
img = image.make_graphviz_image(molecules, reactions, edges, frame_colors)
assert img.height > 0
assert img.width > 0
def test_graphviz_usage_exception_dot_both(mocker, tmpdir, setup_graphviz_graph):
exists_patch = mocker.patch("aizynthfinder.utils.image.os.path.exists")
exists_patch.return_value = False
molecules, reactions, edges, frame_colors = setup_graphviz_graph
with pytest.raises(FileNotFoundError, match=".*'dot'.*"):
image.make_graphviz_image(molecules, reactions, edges, frame_colors)
def test_visjs_page(mocker, tmpdir, setup_graphviz_graph):
mkdtemp_patch = mocker.patch("aizynthfinder.utils.image.tempfile.mkdtemp")
mkdtemp_patch.return_value = str(tmpdir / "tmp")
os.mkdir(tmpdir / "tmp")
molecules, reactions, edges, frame_colors = setup_graphviz_graph
filename = str(tmpdir / "arch.tar")
image.make_visjs_page(filename, molecules, reactions, edges, frame_colors)
assert os.path.exists(filename)
with TarFile(filename) as tarobj:
assert "./route.html" in tarobj.getnames()
assert len([name for name in tarobj.getnames() if name.endswith(".png")]) == 1
|
the-stack_106_24659 | import os
from jinja2 import Environment, PackageLoader
def create_google_problem(directory_path, problem_name):
"""
:param problem_name: shall be formatted as a python variable
(underscores between words).
:type problem_name: str
"""
problem_class_name = "".join(map(lambda s: s.capitalize(), problem_name.split("_")))
render_template("google/google_template.txt", directory_path, problem_name + ".py",
problem_class_name=problem_class_name)
def create_tuenti_problem(directory_path, problem_name):
python_problem_filename = problem_name + ".py"
problem_class_name = "".join(map(lambda s: s.capitalize(), problem_name.split("_")))
render_template("tuenti/fabfile.py", directory_path, "fabfile.py", python_problem_filename=python_problem_filename)
render_template("tuenti/problem_template.txt", directory_path, python_problem_filename, problem_class_name=problem_class_name)
render_template("tuenti/gitignore_template.txt", directory_path, ".gitignore")
render_template("tuenti/challenge.token", directory_path, "challenge.token")
render_template("tuenti/interpreter.txt", directory_path, "interpreter.txt")
render_template("tuenti/test_input.txt", directory_path, "test_input.txt")
render_template("tuenti/submit_challenge", directory_path, "submit_challenge")
render_template("tuenti/test_challenge", directory_path, "test_challenge")
def render_template(template_path, directory_path, file_name, **kwargs):
env = Environment(loader=PackageLoader("jam_utils", "templates"))
template = env.get_template(template_path)
render = template.render(**kwargs)
file_path = os.path.join(directory_path, file_name)
with open(file_path, "w") as fd:
fd.write(render) |
the-stack_106_24662 | def left(i):
return 2*i
def right(i):
return 2*i+1
def max_heapify(A,i):
l = left(i)
r = right(i)
if l<=len(A) and A[l-1]>A[i-1]:
largest = l
else:
largest = i
if r<=len(A) and A[r-1]>A[largest-1]:
largest = r
if largest != i:
A[i-1],A[largest-1] = A[largest-1],A[i-1]
max_heapify(A,largest)
A = [4,1,3,2,16,9,10,14,8,7]
B = []
#这只是在建最大堆。。。
for i in range(int(len(A)/2),0,-1):
max_heapify(A,i)
#踢出最大值
for i in range(len(A),1,-1):
A[0],A[len(A)-1] = A[len(A)-1],A[0]
B.append(A.pop())
max_heapify(A,1)
B.append(A.pop())
print(B)
|
the-stack_106_24663 | import io
import time
import traceback
import uuid
from http import HTTPStatus
from .imports import *
from .ipc import redis_ipc_new
from .templating import *
def etrace(ex):
return "".join(traceback.format_exception(ex)) # COMPAT: Python 3.10 only
class WebError():
@staticmethod
async def log(request, exc, error_id, curr_time):
try:
fl_info = f"Error ID: {error_id}\n\n" # Initial header
fl_info += etrace(exc)
except Exception:
fl_info = "No exception could be logged"
url = str(request.url).replace('https://', '').replace("http://localhost:1843", "fateslist.xyz")
msg = inspect.cleandoc(f"""500 (Internal Server Error) at {url}
**Error**: {exc}
**Error ID**: {error_id}
**Time When Error Happened**: {curr_time}""")
await redis_ipc_new(redis_db, "SENDMSG", msg = {"content": msg, "file_name": f"{error_id}.txt", "file_content": fl_info, "channel_id": str(site_errors_channel)})
# Reraise exception
try:
raise exc
except:
logger.exception("Site Error Occurred")
@staticmethod
async def error_handler(request, exc, log: bool = True):
error_id = str(uuid.uuid4())
curr_time = time.time()
try:
# All status codes other than 500 and 422
status_code = exc.status_code
except Exception:
# 500 and 422 do not have status codes and need special handling
if isinstance(exc, RequestValidationError):
status_code = 422
else:
status_code = 500
path = str(request.url.path)
code_str = HTTPStatus(status_code).phrase
api = path.startswith("/api/")
if status_code == 500:
# Log the error
asyncio.create_task(WebError.log(request, exc, error_id, curr_time))
if api:
return api_error(
"Internal Server Error",
error_id=error_id,
status_code=500,
traceback=etrace(exc),
headers={"FL-Error-ID": error_id}
)
tb_full = "".join(traceback.format_exception(exc))
errmsg = inspect.cleandoc(f"""
Fates List had a slight issue and our developers and looking into what happened<br/><br/>
Error ID: {error_id}<br/><br/>
Please check our support server at <a href='{support_url}'>{support_url}</a> for more information<br/><br/>
Please send the below traceback if asked:<br/><br/>
<pre>{tb_full}</pre>
Time When Error Happened: {curr_time}<br/>""")
return HTMLResponse(errmsg, status_code=status_code, headers={"FL-Error-ID": error_id})
if not api:
# Special error messages (some with custom-set status code)
if status_code == 404:
if path.startswith("/bot"):
code_str = "Bot Not Found"
elif path.startswith("/profile"):
code_str = "Profile Not Found"
elif status_code == 422:
if path.startswith("/bot"):
code_str = "Bot Not Found"
status_code = 404
elif path.startswith("/profile"):
code_str = "Profile Not Found"
status_code = 404
return await templates.e(request, code_str, status_code)
# API route handling
if status_code != 422:
# Normal handling
return ORJSONResponse({"done": False, "reason": exc.detail}, status_code=status_code)
else:
errors = exc.errors()
errors_fixed = []
for error in errors:
if error["type"] == "type_error.enum":
ev = [{"name": type(enum).__name__, "accepted": enum.value, "doc": enum.__doc__} for enum in error["ctx"]["enum_values"]]
error["ctx"]["enum"] = ev
del error["ctx"]["enum_values"]
errors_fixed.append(error)
return ORJSONResponse({"done": False, "reason": "Invalid fields present", "ctx": errors_fixed}, status_code=422)
|
the-stack_106_24664 | import math
import statistics
from collections import deque
from ParadoxTrading.Indicator.IndicatorAbstract import IndicatorAbstract
from ParadoxTrading.Utils import DataStruct
class SharpRate(IndicatorAbstract):
def __init__(
self, _period: int, _use_key: str = 'closeprice',
_idx_key: str = 'time', _ret_key: str = 'sharprate',
):
super().__init__()
self.use_key = _use_key
self.idx_key = _idx_key
self.ret_key = _ret_key
self.data = DataStruct(
[self.idx_key, self.ret_key],
self.idx_key
)
self.last_price = None
self.period = _period
self.buf = deque(maxlen=self.period)
def _addOne(self, _data_struct: DataStruct):
index_value = _data_struct.index()[0]
price_value = _data_struct[self.use_key][0]
if self.last_price is not None:
chg_rate = price_value / self.last_price - 1
self.buf.append(chg_rate)
buf_std = statistics.pstdev(self.buf)
if buf_std != 0:
self.data.addDict({
self.idx_key: index_value,
self.ret_key: statistics.mean(self.buf) / buf_std,
})
self.last_price = price_value
|
the-stack_106_24666 | # This file is modified version of benchmark.py.
# benchmark.py was released by RAMitchell (Copyright (c) 2018 Rory Mitchell) under MIT License
# and available at https://github.com/RAMitchell/GBM-Benchmarks/blob/master/benchmark.py
# License text is available at https://github.com/RAMitchell/GBM-Benchmarks/blob/master/LICENSE
import os
import sys
import time
from copy import deepcopy
from datetime import datetime
import catboost as cat
import lightgbm as lgb
import numpy as np
import xgboost as xgb
from sklearn.metrics import mean_squared_error, accuracy_score
RANDOM_SEED = 0
class TimeAnnotatedFile:
def __init__(self, file_descriptor):
self.file_descriptor = file_descriptor
def write(self, message):
if message == '\n':
self.file_descriptor.write('\n')
return
cur_time = datetime.now()
new_message = "Time: [%d.%06d]\t%s" % (cur_time.second, cur_time.microsecond, message)
self.file_descriptor.write(new_message)
def flush(self):
self.file_descriptor.flush()
def close(self):
self.file_descriptor.close()
class Logger:
def __init__(self, filename):
self.filename = filename
self.stdout = sys.stdout
def __enter__(self):
self.file = TimeAnnotatedFile(open(self.filename, 'w'))
sys.stdout = self.file
def __exit__(self, exception_type, exception_value, traceback):
if exception_type is not None:
print(str(exception_value) + '\n' + str(traceback))
sys.stdout = self.stdout
self.file.close()
def eval_metric(data, prediction):
if data.metric == "RMSE":
return np.sqrt(mean_squared_error(data.y_test, prediction))
elif data.metric == "Accuracy":
if data.task == "binclass":
prediction = prediction > 0.5
elif data.task == "multiclass":
if prediction.ndim > 1:
prediction = np.argmax(prediction, axis=1)
return accuracy_score(data.y_test, prediction)
else:
raise ValueError("Unknown metric: " + data.metric)
class Learner:
def __init__(self):
self.default_params = {}
def _fit(self, tunable_params):
params = deepcopy(self.default_params)
params.update(tunable_params)
print('Parameters:\n{}' + str(params))
return params
def eval(self, data, num_iterations, step=10):
scores = []
for n_tree in range(num_iterations, step=step):
prediction = self.predict(n_tree)
score = eval_metric(data, prediction)
scores.append(score)
return scores
def predict(self, n_tree):
raise Exception('Not implemented')
def set_train_dir(self, params, path):
pass
def run(self, params, log_filename):
log_dir_name = os.path.dirname(log_filename)
if not os.path.exists(log_dir_name):
os.makedirs(log_dir_name)
self.set_train_dir(params, log_filename + 'dir')
with Logger(log_filename):
start = time.time()
self._fit(params)
elapsed = time.time() - start
print('Elapsed: ' + str(elapsed))
return elapsed
class XGBoostLearner(Learner):
def __init__(self, data, task, metric, use_gpu):
Learner.__init__(self)
params = {
'n_gpus': 1,
'silent': 0,
'seed': RANDOM_SEED
}
if use_gpu:
params['tree_method'] = 'gpu_hist'
else:
params['tree_method'] = 'hist'
if task == "regression":
params["objective"] = "reg:linear"
if use_gpu:
params["objective"] = "gpu:" + params["objective"]
elif task == "multiclass":
params["objective"] = "multi:softmax"
params["num_class"] = int(np.max(data.y_test)) + 1
elif task == "binclass":
params["objective"] = "binary:logistic"
if use_gpu:
params["objective"] = "gpu:" + params["objective"]
else:
raise ValueError("Unknown task: " + task)
if metric == 'Accuracy':
if task == 'binclass':
params['eval_metric'] = 'error'
elif task == 'multiclass':
params['eval_metric'] = 'merror'
self.train = xgb.DMatrix(data.X_train, data.y_train)
self.test = xgb.DMatrix(data.X_test, data.y_test)
self.default_params = params
@staticmethod
def name():
return 'xgboost'
def _fit(self, tunable_params):
params = Learner._fit(self, tunable_params)
self.learner = xgb.train(params, self.train, tunable_params['iterations'], evals=[(self.test, 'eval')])
def predict(self, n_tree):
return self.learner.predict(self.test, ntree_limit=n_tree)
class LightGBMLearner(Learner):
def __init__(self, data, task, metric, use_gpu):
Learner.__init__(self)
params = {
'task': 'train',
'boosting_type': 'gbdt',
'verbose': 0,
'random_state': RANDOM_SEED,
'bagging_freq': 1
}
if use_gpu:
params["device"] = "gpu"
if task == "regression":
params["objective"] = "regression"
elif task == "multiclass":
params["objective"] = "multiclass"
params["num_class"] = int(np.max(data.y_test)) + 1
elif task == "binclass":
params["objective"] = "binary"
else:
raise ValueError("Unknown task: " + task)
if metric == 'Accuracy':
if task == 'binclass':
params['metric'] = 'binary_error'
elif task == 'multiclass':
params['metric'] = 'multi_error'
elif metric == 'RMSE':
params['metric'] = 'rmse'
self.train = lgb.Dataset(data.X_train, data.y_train)
self.test = lgb.Dataset(data.X_test, data.y_test, reference=self.train)
self.default_params = params
@staticmethod
def name():
return 'lightgbm'
def _fit(self, tunable_params):
params_copy = deepcopy(tunable_params)
if 'max_depth' in params_copy:
params_copy['num_leaves'] = 2 ** params_copy['max_depth']
del params_copy['max_depth']
num_iterations = params_copy['iterations']
del params_copy['iterations']
params = Learner._fit(self, params_copy)
self.learner = lgb.train(
params,
self.train,
num_boost_round=num_iterations,
valid_sets=self.test
)
def predict(self, n_tree):
return self.learner.predict(self.test, num_iteration=n_tree)
class CatBoostLearner(Learner):
def __init__(self, data, task, metric, use_gpu):
Learner.__init__(self)
params = {
'devices': [0],
'logging_level': 'Verbose',
'use_best_model': False,
'bootstrap_type': 'Bernoulli',
'random_seed': RANDOM_SEED
}
if use_gpu:
params['task_type'] = 'GPU'
if task == 'regression':
params['loss_function'] = 'RMSE'
elif task == 'binclass':
params['loss_function'] = 'Logloss'
elif task == 'multiclass':
params['loss_function'] = 'MultiClass'
if metric == 'Accuracy':
params['custom_metric'] = 'Accuracy'
self.train = cat.Pool(data.X_train, data.y_train)
self.test = cat.Pool(data.X_test, data.y_test)
self.default_params = params
@staticmethod
def name():
return 'catboost'
def _fit(self, tunable_params):
params = Learner._fit(self, tunable_params)
self.model = cat.CatBoost(params)
self.model.fit(self.train, eval_set=self.test, verbose_eval=True)
def set_train_dir(self, params, path):
if not os.path.exists(path):
os.makedirs(path)
params["train_dir"] = path
def predict(self, n_tree):
if self.default_params['loss_function'] == "MultiClass":
prediction = self.model.predict_proba(self.test, ntree_end=n_tree)
else:
prediction = self.model.predict(self.test, ntree_end=n_tree)
return prediction
|
the-stack_106_24668 | # Copyright 2017, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from opencensus.common import utils
from opencensus.trace import attributes
_SpanData = collections.namedtuple(
'_SpanData',
(
'name',
'context',
'span_id',
'parent_span_id',
'attributes',
'start_time',
'end_time',
'child_span_count',
'stack_trace',
'annotations',
'message_events',
'links',
'status',
'same_process_as_parent_span',
'span_kind',
),
)
class SpanData(_SpanData):
"""Immutable representation of all data collected by a
:class: `~opencensus.trace.span.Span`.
:type name: str
:param name: The name of the span.
:type: context: :class: `~opencensus.trace.span_context.SpanContext`
:param context: The SpanContext of the Span
:type span_id: int
:param span_id: Identifier for the span, unique within a trace.
:type parent_span_id: int
:param parent_span_id: (Optional) Parent span id.
:type attributes: dict
:param attributes: Collection of attributes associated with the span.
:type start_time: str
:param start_time: (Optional) Start of the time interval (inclusive)
during which the trace data was collected from the
application.
:type end_time: str
:param end_time: (Optional) End of the time interval (inclusive) during
which the trace data was collected from the application.
:type child_span_count: int
:param child_span_count: the number of child spans that were
generated while the span was active.
:type stack_trace: :class: `~opencensus.trace.stack_trace.StackTrace`
:param stack_trace: (Optional) A call stack appearing in a trace
:type annotations: list(:class:`opencensus.trace.time_event.Annotation`)
:param annotations: (Optional) The list of span annotations.
:type message_events:
list(:class:`opencensus.trace.time_event.MessageEvent`)
:param message_events: (Optional) The list of span message events.
:type links: list
:param links: (Optional) Links associated with the span. You can have up
to 128 links per Span.
:type status: :class: `~opencensus.trace.status.Status`
:param status: (Optional) An optional final status for this span.
:type same_process_as_parent_span: bool
:param same_process_as_parent_span: (Optional) A highly recommended but not
required flag that identifies when a
trace crosses a process boundary.
True when the parent_span belongs to
the same process as the current span.
:type span_kind: int
:param span_kind: (Optional) Highly recommended flag that denotes the type
of span (valid values defined by :class:
`opencensus.trace.span.SpanKind`)
"""
__slots__ = ()
def _format_legacy_span_json(span_data):
"""
:param SpanData span_data: SpanData object to convert
:rtype: dict
:return: Dictionary representing the Span
"""
span_json = {
'displayName': utils.get_truncatable_str(span_data.name),
'spanId': span_data.span_id,
'startTime': span_data.start_time,
'endTime': span_data.end_time,
'childSpanCount': span_data.child_span_count,
'kind': span_data.span_kind
}
if span_data.parent_span_id is not None:
span_json['parentSpanId'] = span_data.parent_span_id
if span_data.attributes:
span_json['attributes'] = attributes.Attributes(
span_data.attributes).format_attributes_json()
if span_data.stack_trace is not None:
span_json['stackTrace'] = \
span_data.stack_trace.format_stack_trace_json()
formatted_time_events = []
if span_data.annotations:
formatted_time_events.extend(
{'time': aa.timestamp,
'annotation': aa.format_annotation_json()}
for aa in span_data.annotations)
if span_data.message_events:
formatted_time_events.extend(
{'time': aa.timestamp,
'message_event': aa.format_message_event_json()}
for aa in span_data.message_events)
if formatted_time_events:
span_json['timeEvents'] = {
'timeEvent': formatted_time_events
}
if span_data.links:
span_json['links'] = {
'link': [
link.format_link_json() for link in span_data.links]
}
if span_data.status is not None:
span_json['status'] = span_data.status.format_status_json()
if span_data.same_process_as_parent_span is not None:
span_json['sameProcessAsParentSpan'] = \
span_data.same_process_as_parent_span
return span_json
def format_legacy_trace_json(span_datas):
"""Formats a list of SpanData tuples into the legacy 'trace' dictionary
format for backwards compatibility
:type span_datas: list of :class:
`~opencensus.trace.span_data.SpanData`
:param list of opencensus.trace.span_data.SpanData span_datas:
SpanData tuples to emit
:rtype: dict
:return: Legacy 'trace' dictionary representing given SpanData tuples
"""
if not span_datas:
return {}
top_span = span_datas[0]
assert isinstance(top_span, SpanData)
trace_id = top_span.context.trace_id if top_span.context is not None \
else None
assert trace_id is not None
return {
'traceId': trace_id,
'spans': [_format_legacy_span_json(sd) for sd in span_datas],
}
|
the-stack_106_24672 | #!/usr/bin/env python3
"""
Writes out the source and include files needed for AutoTools.
This script will update the collected_files.md file.
"""
import os
from typing import Iterable, Sequence, Tuple
import re
BANNER = "# This file was automatically generated by scripts/update_sources.py"
VENDOR_SOURCES = (
"src/vendor/glad.c",
"src/vendor/lodepng.c",
"src/vendor/stb.c",
"src/vendor/utf8proc/utf8proc.c",
)
def get_sources(
sources: bool = False, includes: bool = False
) -> Iterable[Tuple[str, Sequence[str]]]:
"""Iterate over sources and headers with sub-folders grouped together."""
re_inclusion = []
if sources:
re_inclusion.append("c|cpp")
if includes:
re_inclusion.append("h|hpp")
re_valid = re.compile(r".*\.(%s)$" % ("|".join(re_inclusion),))
for curpath, dirs, files in os.walk("src/libtcod"):
# Ignore hidden directories.
dirs[:] = [dir for dir in dirs if not dir.startswith(".")]
files = [
os.path.join(curpath, f).replace("\\", "/")
for f in files
if re_valid.match(f)
]
group = os.path.relpath(curpath, "src").replace("\\", "/")
yield group, files
if sources:
yield "vendor", VENDOR_SOURCES
def all_sources(includes: bool = False) -> Iterable[str]:
"""Iterate over all sources needed to compile libtcod."""
for _, sources in get_sources(sources=True, includes=includes):
yield from sources
def generate_am() -> str:
"""Returns an AutoMake script.
This might be run on Windows, so it must return Unix file separators.
"""
out = f"{BANNER}\n"
for group, files in get_sources(sources=False, includes=True):
include_name = group.replace("/", "_")
files = ["../../" + f for f in files]
out += f"\n{include_name}_includedir = $(includedir)/{group}"
out += f"\n{include_name}_include_HEADERS = \\"
out += "\n\t" + " \\\n\t".join(files)
out += "\n"
out += "\nlibtcod_la_SOURCES = \\"
out += "\n\t" + " \\\n\t".join("../../" + f for f in all_sources())
out += "\n"
return out
def generate_cmake() -> str:
"""Returns a CMake script with libtcod's sources."""
out = f"{BANNER}"
out += "\ntarget_sources(TCOD PRIVATE\n "
out += "\n ".join(os.path.relpath(f, "src") for f in all_sources(includes=True))
out += "\n)"
for group, files in get_sources(sources=True, includes=True):
group = group.replace("/", r"\\")
out += f"\nsource_group({group} FILES\n "
out += "\n ".join(os.path.relpath(f, "src") for f in files)
out += "\n)"
out += "\n"
return out
def main() -> None:
# Change to project root directory, using this file as a reference.
os.chdir(os.path.join(os.path.split(__file__)[0], ".."))
with open("buildsys/autotools/sources.am", "w") as file:
file.write(generate_am())
with open("src/sources.cmake", "w") as file:
file.write(generate_cmake())
if __name__ == "__main__":
main()
|
the-stack_106_24673 | #@+leo-ver=5-thin
#@+node:ekr.20031218072017.3439: * @file leoPlugins.py
"""Classes relating to Leo's plugin architecture."""
import sys
from typing import List
from leo.core import leoGlobals as g
# Define modules that may be enabled by default
# but that mignt not load because imports may fail.
optional_modules = [
'leo.plugins.livecode',
'leo.plugins.cursesGui2',
]
#@+others
#@+node:ekr.20100908125007.6041: ** Top-level functions (leoPlugins.py)
def init():
"""Init g.app.pluginsController."""
g.app.pluginsController = LeoPluginsController()
def registerHandler(tags, fn):
"""A wrapper so plugins can still call leoPlugins.registerHandler."""
return g.app.pluginsController.registerHandler(tags, fn)
#@+node:ville.20090222141717.2: ** TryNext (Exception)
class TryNext(Exception):
"""Try next hook exception.
Raise this in your hook function to indicate that the next hook handler
should be used to handle the operation. If you pass arguments to the
constructor those arguments will be used by the next hook instead of the
original ones.
"""
def __init__(self, *args, **kwargs):
super().__init__()
self.args = args
self.kwargs = kwargs
#@+node:ekr.20100908125007.6033: ** class CommandChainDispatcher
class CommandChainDispatcher:
""" Dispatch calls to a chain of commands until some func can handle it
Usage: instantiate, execute "add" to add commands (with optional
priority), execute normally via f() calling mechanism.
"""
def __init__(self, commands=None):
if commands is None:
self.chain = []
else:
self.chain = commands
def __call__(self, *args, **kw):
""" Command chain is called just like normal func.
This will call all funcs in chain with the same args as were given to this
function, and return the result of first func that didn't raise
TryNext """
for prio, cmd in self.chain:
#print "prio",prio,"cmd",cmd #dbg
try:
ret = cmd(*args, **kw)
return ret
except TryNext as exc:
if exc.args or exc.kwargs:
args = exc.args
kw = exc.kwargs
# if no function will accept it, raise TryNext up to the caller
raise TryNext
def __str__(self):
return str(self.chain)
def add(self, func, priority=0):
""" Add a func to the cmd chain with given priority """
self.chain.append((priority, func),)
self.chain.sort(key=lambda z: z[0])
def __iter__(self):
""" Return all objects in chain.
Handy if the objects are not callable.
"""
return iter(self.chain)
#@+node:ekr.20100908125007.6009: ** class BaseLeoPlugin
class BaseLeoPlugin:
#@+<<docstring>>
#@+node:ekr.20100908125007.6010: *3* <<docstring>>
"""A Convenience class to simplify plugin authoring
.. contents::
Usage
=====
Initialization
--------------
- import the base class::
from leoPlugins from leo.core import leoBasePlugin
- create a class which inherits from leoBasePlugin::
class myPlugin(leoBasePlugin):
- in the __init__ method of the class, call the parent constructor::
def __init__(self, tag, keywords):
super().__init__(tag, keywords)
- put the actual plugin code into a method; for this example, the work
is done by myPlugin.handler()
- put the class in a file which lives in the <LeoDir>/plugins directory
for this example it is named myPlugin.py
- add code to register the plugin::
leoPlugins.registerHandler("after-create-leo-frame", Hello)
Configuration
-------------
BaseLeoPlugins has 3 *methods* for setting commands
- setCommand::
def setCommand(self, commandName, handler,
shortcut = None, pane = 'all', verbose = True):
- setMenuItem::
def setMenuItem(self, menu, commandName = None, handler = None):
- setButton::
def setButton(self, buttonText = None, commandName = None, color = None):
*variables*
:commandName: the string typed into minibuffer to execute the ``handler``
:handler: the method in the class which actually does the work
:shortcut: the key combination to activate the command
:menu: a string designating on of the menus ('File', Edit', 'Outline', ...)
:buttonText: the text to put on the button if one is being created.
Example
=======
Contents of file ``<LeoDir>/plugins/hello.py``::
class Hello(BaseLeoPlugin):
def __init__(self, tag, keywords):
# call parent __init__
super().__init__(tag, keywords)
# if the plugin object defines only one command,
# just give it a name. You can then create a button and menu entry
self.setCommand('Hello', self.hello)
self.setButton()
self.setMenuItem('Cmds')
# create a command with a shortcut
self.setCommand('Hola', self.hola, 'Alt-Ctrl-H')
# create a button using different text than commandName
self.setButton('Hello in Spanish')
# create a menu item with default text
self.setMenuItem('Cmds')
# define a command using setMenuItem
self.setMenuItem('Cmds', 'Ciao baby', self.ciao)
def hello(self, event):
g.pr("hello from node %s" % self.c.p.h)
def hola(self, event):
g.pr("hola from node %s" % self.c.p.h)
def ciao(self, event):
g.pr("ciao baby (%s)" % self.c.p.h)
leoPlugins.registerHandler("after-create-leo-frame", Hello)
"""
#@-<<docstring>>
#@+others
#@+node:ekr.20100908125007.6012: *3* __init__ (BaseLeoPlugin)
def __init__(self, tag, keywords):
"""Set self.c to be the ``commander`` of the active node
"""
self.c = keywords['c']
self.commandNames = []
#@+node:ekr.20100908125007.6013: *3* setCommand
def setCommand(self, commandName, handler,
shortcut='', pane='all', verbose=True):
"""Associate a command name with handler code,
optionally defining a keystroke shortcut
"""
self.commandNames.append(commandName)
self.commandName = commandName
self.shortcut = shortcut
self.handler = handler
self.c.k.registerCommand(commandName, handler,
pane=pane, shortcut=shortcut, verbose=verbose)
#@+node:ekr.20100908125007.6014: *3* setMenuItem
def setMenuItem(self, menu, commandName=None, handler=None):
"""Create a menu item in 'menu' using text 'commandName' calling handler 'handler'
if commandName and handler are none, use the most recently defined values
"""
# setMenuItem can create a command, or use a previously defined one.
if commandName is None:
commandName = self.commandName
# make sure commandName is in the list of commandNames
else:
if commandName not in self.commandNames:
self.commandNames.append(commandName)
if handler is None:
handler = self.handler
table = ((commandName, None, handler),)
self.c.frame.menu.createMenuItemsFromTable(menu, table)
#@+node:ekr.20100908125007.6015: *3* setButton
def setButton(self, buttonText=None, commandName=None, color=None):
"""Associate an existing command with a 'button'
"""
if buttonText is None:
buttonText = self.commandName
if commandName is None:
commandName = self.commandName
else:
if commandName not in self.commandNames:
raise NameError(f"setButton error, {commandName} is not a commandName")
if color is None:
color = 'grey'
script = f"c.k.simulateCommand('{self.commandName}')"
g.app.gui.makeScriptButton(
self.c,
args=None,
script=script,
buttonText=buttonText, bg=color)
#@-others
#@+node:ekr.20100908125007.6007: ** class LeoPluginsController
class LeoPluginsController:
"""The global plugins controller, g.app.pluginsController"""
#@+others
#@+node:ekr.20100909065501.5954: *3* plugins.Birth
#@+node:ekr.20100908125007.6034: *4* plugins.ctor & reloadSettings
def __init__(self):
self.handlers = {}
self.loadedModulesFilesDict = {}
# Keys are regularized module names, values are the names of .leo files
# containing @enabled-plugins nodes that caused the plugin to be loaded
self.loadedModules = {}
# Keys are regularized module names, values are modules.
self.loadingModuleNameStack = []
# The stack of module names.
# The top is the module being loaded.
self.signonModule = None # A hack for plugin_signon.
# Settings. Set these here in case finishCreate is never called.
self.warn_on_failure = True
assert g
g.act_on_node = CommandChainDispatcher()
g.visit_tree_item = CommandChainDispatcher()
g.tree_popup_handlers = []
#@+node:ekr.20100909065501.5974: *4* plugins.finishCreate & reloadSettings
def finishCreate(self):
self.reloadSettings()
def reloadSettings(self):
self.warn_on_failure = g.app.config.getBool(
'warn_when_plugins_fail_to_load', default=True)
#@+node:ekr.20100909065501.5952: *3* plugins.Event handlers
#@+node:ekr.20161029060545.1: *4* plugins.on_idle
def on_idle(self):
"""Call all idle-time hooks."""
if g.app.idle_time_hooks_enabled:
for frame in g.app.windowList:
c = frame.c
# Do NOT compute c.currentPosition.
# This would be a MAJOR leak of positions.
g.doHook("idle", c=c)
#@+node:ekr.20100908125007.6017: *4* plugins.doHandlersForTag & helper
def doHandlersForTag(self, tag, keywords):
"""
Execute all handlers for a given tag, in alphabetical order.
The caller, doHook, catches all exceptions.
"""
if g.app.killed:
return None
#
# Execute hooks in some random order.
# Return if one of them returns a non-None result.
for bunch in self.handlers.get(tag, []):
val = self.callTagHandler(bunch, tag, keywords)
if val is not None:
return val
if 'all' in self.handlers:
bunches = self.handlers.get('all')
for bunch in bunches:
self.callTagHandler(bunch, tag, keywords)
return None
#@+node:ekr.20100908125007.6016: *5* plugins.callTagHandler
def callTagHandler(self, bunch, tag, keywords):
"""Call the event handler."""
handler, moduleName = bunch.fn, bunch.moduleName
# Make sure the new commander exists.
for key in ('c', 'new_c'):
c = keywords.get(key)
if c:
# Make sure c exists and has a frame.
if not c.exists or not hasattr(c, 'frame'):
# g.pr('skipping tag %s: c does not exist or does not have a frame.' % tag)
return None
# Calls to registerHandler from inside the handler belong to moduleName.
self.loadingModuleNameStack.append(moduleName)
try:
result = handler(tag, keywords)
except Exception:
g.es(f"hook failed: {tag}, {handler}, {moduleName}")
g.es_exception()
result = None
self.loadingModuleNameStack.pop()
return result
#@+node:ekr.20100908125007.6018: *4* plugins.doPlugins (g.app.hookFunction)
def doPlugins(self, tag, keywords):
"""The default g.app.hookFunction."""
if g.app.killed:
return None
if tag in ('start1', 'open0'):
self.loadHandlers(tag, keywords)
return self.doHandlersForTag(tag, keywords)
#@+node:ekr.20100909065501.5950: *3* plugins.Information
#@+node:ekr.20100908125007.6019: *4* plugins.getHandlersForTag
def getHandlersForTag(self, tags):
if isinstance(tags, (list, tuple)):
result = []
for tag in tags:
aList = self.getHandlersForOneTag(tag)
result.extend(aList)
return result
return self.getHandlersForOneTag(tags)
def getHandlersForOneTag(self, tag):
aList = self.handlers.get(tag, [])
return aList
#@+node:ekr.20100910075900.10204: *4* plugins.getLoadedPlugins
def getLoadedPlugins(self):
return list(self.loadedModules.keys())
#@+node:ekr.20100908125007.6020: *4* plugins.getPluginModule
def getPluginModule(self, moduleName):
return self.loadedModules.get(moduleName)
#@+node:ekr.20100908125007.6021: *4* plugins.isLoaded
def isLoaded(self, fn):
return self.regularizeName(fn) in self.loadedModules
#@+node:ekr.20100908125007.6025: *4* plugins.printHandlers
def printHandlers(self, c, moduleName=None):
"""Print the handlers for each plugin."""
tabName = 'Plugins'
c.frame.log.selectTab(tabName)
if moduleName:
s = 'handlers for {moduleName}...\n'
else:
s = 'all plugin handlers...\n'
g.es(s + '\n', tabName=tabName)
data = []
modules: dict[str, List[str]] = {}
for tag in self.handlers:
bunches = self.handlers.get(tag)
for bunch in bunches:
name = bunch.moduleName
tags = modules.get(name, [])
tags.append(tag)
modules[name] = tags
n = 4
for key in sorted(modules):
tags = modules.get(key)
if moduleName in (None, key):
for tag in tags:
n = max(n, len(tag))
data.append((tag, key),)
lines = ["%*s %s\n" % (-n, s1, s2) for (s1, s2) in data]
g.es('', ''.join(lines), tabName=tabName)
#@+node:ekr.20100908125007.6026: *4* plugins.printPlugins
def printPlugins(self, c):
"""Print all enabled plugins."""
tabName = 'Plugins'
c.frame.log.selectTab(tabName)
data = []
data.append('enabled plugins...\n')
for z in sorted(self.loadedModules):
data.append(z)
lines = [f"{z}\n" for z in data]
g.es('', ''.join(lines), tabName=tabName)
#@+node:ekr.20100908125007.6027: *4* plugins.printPluginsInfo
def printPluginsInfo(self, c):
"""
Print the file name responsible for loading a plugin.
This is the first .leo file containing an @enabled-plugins node
that enables the plugin.
"""
d = self.loadedModulesFilesDict
tabName = 'Plugins'
c.frame.log.selectTab(tabName)
data = []; n = 4
for moduleName in d:
fileName = d.get(moduleName)
n = max(n, len(moduleName))
data.append((moduleName, fileName),)
lines = ["%*s %s\n" % (-n, s1, s2) for (s1, s2) in data]
g.es('', ''.join(lines), tabName=tabName)
#@+node:ekr.20100909065501.5949: *4* plugins.regularizeName
def regularizeName(self, moduleOrFileName):
"""
Return the module name used as a key to this modules dictionaries.
We *must* allow .py suffixes, for compatibility with @enabled-plugins nodes.
"""
if not moduleOrFileName.endswith('.py'):
# A module name. Return it unchanged.
return moduleOrFileName
#
# 1880: The legacy code implictly assumed that os.path.dirname(fn) was empty!
# The new code explicitly ignores any directories in the path.
fn = g.os_path_basename(moduleOrFileName)
return "leo.plugins." + fn[:-3]
#@+node:ekr.20100909065501.5953: *3* plugins.Load & unload
#@+node:ekr.20100908125007.6022: *4* plugins.loadHandlers
def loadHandlers(self, tag, keys):
"""
Load all enabled plugins.
Using a module name (without the trailing .py) allows a plugin to
be loaded from outside the leo/plugins directory.
"""
def pr(*args, **keys):
if not g.app.unitTesting:
g.es_print(*args, **keys)
s = g.app.config.getEnabledPlugins()
if not s: return
if tag == 'open0' and not g.app.silentMode and not g.app.batchMode:
if 0:
s2 = f"@enabled-plugins found in {g.app.config.enabledPluginsFileName}"
g.blue(s2)
for plugin in s.splitlines():
if plugin.strip() and not plugin.lstrip().startswith('#'):
self.loadOnePlugin(plugin.strip(), tag=tag)
#@+node:ekr.20100908125007.6024: *4* plugins.loadOnePlugin & helper functions
def loadOnePlugin(self, moduleOrFileName, tag='open0', verbose=False):
"""
Load one plugin from a file name or module.
Use extensive tracing if --trace-plugins is in effect.
Using a module name allows plugins to be loaded from outside the leo/plugins directory.
"""
global optional_modules
trace = 'plugins' in g.app.debug
def report(message):
if trace and not g.unitTesting:
g.es_print(f"loadOnePlugin: {message}")
# Define local helper functions.
#@+others
#@+node:ekr.20180528160855.1: *5* function:callInitFunction
def callInitFunction(result):
"""True to call the top-level init function."""
try:
# Indicate success only if init_result is True.
init_result = result.init()
# Careful: this may throw an exception.
if init_result not in (True, False):
report(f"{moduleName}.init() did not return a bool")
if init_result:
self.loadedModules[moduleName] = result
self.loadedModulesFilesDict[moduleName] = (
g.app.config.enabledPluginsFileName
)
else:
report(f"{moduleName}.init() returned False")
result = None
except Exception:
report(f"exception loading plugin: {moduleName}")
g.es_exception()
result = None
return result
#@+node:ekr.20180528162604.1: *5* function:finishImport
def finishImport(result):
"""Handle last-minute checks."""
if tag == 'unit-test-load':
return result # Keep the result, but do no more.
if hasattr(result, 'init'):
return callInitFunction(result)
#
# No top-level init function.
if g.app.unitTesting:
# Do *not* load the module.
self.loadedModules[moduleName] = None
return None
# Guess that the module was loaded correctly.
report(f"fyi: no top-level init() function in {moduleName}")
self.loadedModules[moduleName] = result
return result
#@+node:ekr.20180528160744.1: *5* function:loadOnePluginHelper
def loadOnePluginHelper(moduleName):
result = None
try:
__import__(moduleName)
# Look up through sys.modules, __import__ returns toplevel package
result = sys.modules[moduleName]
except g.UiTypeException:
report(f"plugin {moduleName} does not support {g.app.gui.guiName()} gui")
except ImportError:
report(f"error importing plugin: {moduleName}")
# except ModuleNotFoundError:
# report('module not found: %s' % moduleName)
except SyntaxError:
report(f"syntax error importing plugin: {moduleName}")
except Exception:
report(f"exception importing plugin: {moduleName}")
g.es_exception()
return result
#@+node:ekr.20180528162300.1: *5* function:reportFailedImport
def reportFailedImport():
"""Report a failed import."""
if g.app.batchMode or g.app.inBridge or g.unitTesting:
return
if (
self.warn_on_failure and
tag == 'open0' and
not g.app.gui.guiName().startswith('curses') and
moduleName not in optional_modules
):
report(f"can not load enabled plugin: {moduleName}")
#@-others
if not g.app.enablePlugins:
report(f"plugins disabled: {moduleOrFileName}")
return None
if moduleOrFileName.startswith('@'):
report(f"ignoring Leo directive: {moduleOrFileName}")
return None
# Return None, not False, to keep pylint happy.
# Allow Leo directives in @enabled-plugins nodes.
moduleName = self.regularizeName(moduleOrFileName)
if self.isLoaded(moduleName):
module = self.loadedModules.get(moduleName)
return module
assert g.app.loadDir
moduleName = g.toUnicode(moduleName)
#
# Try to load the plugin.
try:
self.loadingModuleNameStack.append(moduleName)
result = loadOnePluginHelper(moduleName)
finally:
self.loadingModuleNameStack.pop()
if not result:
if trace:
reportFailedImport()
return None
#
# Last-minute checks.
try:
self.loadingModuleNameStack.append(moduleName)
result = finishImport(result)
finally:
self.loadingModuleNameStack.pop()
if result:
# #1688: Plugins can update globalDirectiveList.
# Recalculate g.directives_pat.
g.update_directives_pat()
report(f"loaded: {moduleName}")
self.signonModule = result # for self.plugin_signon.
return result
#@+node:ekr.20031218072017.1318: *4* plugins.plugin_signon
def plugin_signon(self, module_name, verbose=False):
"""Print the plugin signon."""
# This is called from as the result of the imports
# in self.loadOnePlugin
m = self.signonModule
if verbose:
g.es(f"...{m.__name__}.py v{m.__version__}: {g.plugin_date(m)}")
g.pr(m.__name__, m.__version__)
self.signonModule = None # Prevent double signons.
#@+node:ekr.20100908125007.6030: *4* plugins.unloadOnePlugin
def unloadOnePlugin(self, moduleOrFileName, verbose=False):
moduleName = self.regularizeName(moduleOrFileName)
if self.isLoaded(moduleName):
if verbose:
g.pr('unloading', moduleName)
del self.loadedModules[moduleName]
for tag in self.handlers:
bunches = self.handlers.get(tag)
bunches = [bunch for bunch in bunches if bunch.moduleName != moduleName]
self.handlers[tag] = bunches
#@+node:ekr.20100909065501.5951: *3* plugins.Registration
#@+node:ekr.20100908125007.6028: *4* plugins.registerExclusiveHandler
def registerExclusiveHandler(self, tags, fn):
""" Register one or more exclusive handlers"""
if isinstance(tags, (list, tuple)):
for tag in tags:
self.registerOneExclusiveHandler(tag, fn)
else:
self.registerOneExclusiveHandler(tags, fn)
def registerOneExclusiveHandler(self, tag, fn):
"""Register one exclusive handler"""
try:
moduleName = self.loadingModuleNameStack[-1]
except IndexError:
moduleName = '<no module>'
if 0:
if g.app.unitTesting: g.pr('')
g.pr(f"{g.app.unitTesting:6} {moduleName:15} {tag:25} {fn.__name__}")
if g.app.unitTesting: return
if tag in self.handlers:
g.es(f"*** Two exclusive handlers for '{tag}'")
else:
bunch = g.Bunch(fn=fn, moduleName=moduleName, tag='handler')
self.handlers[tag] = [bunch] # Vitalije
#@+node:ekr.20100908125007.6029: *4* plugins.registerHandler & registerOneHandler
def registerHandler(self, tags, fn):
""" Register one or more handlers"""
if isinstance(tags, (list, tuple)):
for tag in tags:
self.registerOneHandler(tag, fn)
else:
self.registerOneHandler(tags, fn)
def registerOneHandler(self, tag, fn):
"""Register one handler"""
try:
moduleName = self.loadingModuleNameStack[-1]
except IndexError:
moduleName = '<no module>'
if 0:
if g.app.unitTesting: g.pr('')
g.pr(f"{g.app.unitTesting:6} {moduleName:15} {tag:25} {fn.__name__}")
items = self.handlers.get(tag, [])
functions = [z.fn for z in items]
if fn not in functions: # Vitalije
bunch = g.Bunch(fn=fn, moduleName=moduleName, tag='handler')
items.append(bunch)
self.handlers[tag] = items
#@+node:ekr.20100908125007.6031: *4* plugins.unregisterHandler
def unregisterHandler(self, tags, fn):
if isinstance(tags, (list, tuple)):
for tag in tags:
self.unregisterOneHandler(tag, fn)
else:
self.unregisterOneHandler(tags, fn)
def unregisterOneHandler(self, tag, fn):
bunches = self.handlers.get(tag)
bunches = [bunch for bunch in bunches if bunch and bunch.fn != fn]
self.handlers[tag] = bunches
#@-others
#@-others
#@@language python
#@@tabwidth -4
#@@pagewidth 70
#@-leo
|
the-stack_106_24674 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import copy
import argparse
import cv2 as cv
import numpy as np
import mediapipe as mp
from utils import CvFpsCalc
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--device", type=int, default=0)
parser.add_argument("--width", help='cap width', type=int, default=960)
parser.add_argument("--height", help='cap height', type=int, default=540)
# parser.add_argument('--upper_body_only', action='store_true') # 0.8.3 or less
parser.add_argument("--model_complexity",
help='model_complexity(0,1(default),2)',
type=int,
default=1)
parser.add_argument("--min_detection_confidence",
help='min_detection_confidence',
type=float,
default=0.5)
parser.add_argument("--min_tracking_confidence",
help='min_tracking_confidence',
type=int,
default=0.5)
parser.add_argument('--use_brect', action='store_true')
parser.add_argument('--plot_world_landmark', action='store_true')
args = parser.parse_args()
return args
def main():
# 引数解析 #################################################################
args = get_args()
cap_device = args.device
cap_width = args.width
cap_height = args.height
# upper_body_only = args.upper_body_only
model_complexity = args.model_complexity
min_detection_confidence = args.min_detection_confidence
min_tracking_confidence = args.min_tracking_confidence
use_brect = args.use_brect
plot_world_landmark = args.plot_world_landmark
# カメラ準備 ###############################################################
cap = cv.VideoCapture(cap_device)
cap.set(cv.CAP_PROP_FRAME_WIDTH, cap_width)
cap.set(cv.CAP_PROP_FRAME_HEIGHT, cap_height)
# モデルロード #############################################################
mp_pose = mp.solutions.pose
pose = mp_pose.Pose(
# upper_body_only=upper_body_only,
model_complexity=model_complexity,
min_detection_confidence=min_detection_confidence,
min_tracking_confidence=min_tracking_confidence,
)
# FPS計測モジュール ########################################################
cvFpsCalc = CvFpsCalc(buffer_len=10)
# World座標プロット ########################################################
if plot_world_landmark:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
fig.subplots_adjust(left=0.0, right=1, bottom=0, top=1)
while True:
display_fps = cvFpsCalc.get()
# カメラキャプチャ #####################################################
ret, image = cap.read()
if not ret:
break
image = cv.flip(image, 1) # ミラー表示
debug_image = copy.deepcopy(image)
# 検出実施 #############################################################
image = cv.cvtColor(image, cv.COLOR_BGR2RGB)
results = pose.process(image)
# 描画 ################################################################
if results.pose_landmarks is not None:
# 外接矩形の計算
brect = calc_bounding_rect(debug_image, results.pose_landmarks)
# 描画
debug_image = draw_landmarks(
debug_image,
results.pose_landmarks,
# upper_body_only,
)
debug_image = draw_bounding_rect(use_brect, debug_image, brect)
# World座標プロット ###################################################
if plot_world_landmark:
if results.pose_world_landmarks is not None:
plot_world_landmarks(
plt,
ax,
results.pose_world_landmarks,
)
cv.putText(debug_image, "FPS:" + str(display_fps), (10, 30),
cv.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), 2, cv.LINE_AA)
# キー処理(ESC:終了) #################################################
key = cv.waitKey(1)
if key == 27: # ESC
break
# 画面反映 #############################################################
cv.imshow('MediaPipe Pose Demo', debug_image)
cap.release()
cv.destroyAllWindows()
def calc_bounding_rect(image, landmarks):
image_width, image_height = image.shape[1], image.shape[0]
landmark_array = np.empty((0, 2), int)
for _, landmark in enumerate(landmarks.landmark):
landmark_x = min(int(landmark.x * image_width), image_width - 1)
landmark_y = min(int(landmark.y * image_height), image_height - 1)
landmark_point = [np.array((landmark_x, landmark_y))]
landmark_array = np.append(landmark_array, landmark_point, axis=0)
x, y, w, h = cv.boundingRect(landmark_array)
return [x, y, x + w, y + h]
def draw_landmarks(
image,
landmarks,
# upper_body_only,
visibility_th=0.5,
):
image_width, image_height = image.shape[1], image.shape[0]
landmark_point = []
for index, landmark in enumerate(landmarks.landmark):
landmark_x = min(int(landmark.x * image_width), image_width - 1)
landmark_y = min(int(landmark.y * image_height), image_height - 1)
landmark_z = landmark.z
landmark_point.append([landmark.visibility, (landmark_x, landmark_y)])
if landmark.visibility < visibility_th:
continue
if index == 0: # 鼻
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 1: # 右目:目頭
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 2: # 右目:瞳
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 3: # 右目:目尻
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 4: # 左目:目頭
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 5: # 左目:瞳
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 6: # 左目:目尻
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 7: # 右耳
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 8: # 左耳
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 9: # 口:左端
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 10: # 口:左端
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 11: # 右肩
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 12: # 左肩
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 13: # 右肘
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 14: # 左肘
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 15: # 右手首
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 16: # 左手首
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 17: # 右手1(外側端)
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 18: # 左手1(外側端)
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 19: # 右手2(先端)
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 20: # 左手2(先端)
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 21: # 右手3(内側端)
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 22: # 左手3(内側端)
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 23: # 腰(右側)
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 24: # 腰(左側)
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 25: # 右ひざ
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 26: # 左ひざ
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 27: # 右足首
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 28: # 左足首
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 29: # 右かかと
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 30: # 左かかと
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 31: # 右つま先
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 32: # 左つま先
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
# if not upper_body_only:
if True:
cv.putText(image, "z:" + str(round(landmark_z, 3)),
(landmark_x - 10, landmark_y - 10),
cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1,
cv.LINE_AA)
if len(landmark_point) > 0:
# 右目
if landmark_point[1][0] > visibility_th and landmark_point[2][
0] > visibility_th:
cv.line(image, landmark_point[1][1], landmark_point[2][1],
(0, 255, 0), 2)
if landmark_point[2][0] > visibility_th and landmark_point[3][
0] > visibility_th:
cv.line(image, landmark_point[2][1], landmark_point[3][1],
(0, 255, 0), 2)
# 左目
if landmark_point[4][0] > visibility_th and landmark_point[5][
0] > visibility_th:
cv.line(image, landmark_point[4][1], landmark_point[5][1],
(0, 255, 0), 2)
if landmark_point[5][0] > visibility_th and landmark_point[6][
0] > visibility_th:
cv.line(image, landmark_point[5][1], landmark_point[6][1],
(0, 255, 0), 2)
# 口
if landmark_point[9][0] > visibility_th and landmark_point[10][
0] > visibility_th:
cv.line(image, landmark_point[9][1], landmark_point[10][1],
(0, 255, 0), 2)
# 肩
if landmark_point[11][0] > visibility_th and landmark_point[12][
0] > visibility_th:
cv.line(image, landmark_point[11][1], landmark_point[12][1],
(0, 255, 0), 2)
# 右腕
if landmark_point[11][0] > visibility_th and landmark_point[13][
0] > visibility_th:
cv.line(image, landmark_point[11][1], landmark_point[13][1],
(0, 255, 0), 2)
if landmark_point[13][0] > visibility_th and landmark_point[15][
0] > visibility_th:
cv.line(image, landmark_point[13][1], landmark_point[15][1],
(0, 255, 0), 2)
# 左腕
if landmark_point[12][0] > visibility_th and landmark_point[14][
0] > visibility_th:
cv.line(image, landmark_point[12][1], landmark_point[14][1],
(0, 255, 0), 2)
if landmark_point[14][0] > visibility_th and landmark_point[16][
0] > visibility_th:
cv.line(image, landmark_point[14][1], landmark_point[16][1],
(0, 255, 0), 2)
# 右手
if landmark_point[15][0] > visibility_th and landmark_point[17][
0] > visibility_th:
cv.line(image, landmark_point[15][1], landmark_point[17][1],
(0, 255, 0), 2)
if landmark_point[17][0] > visibility_th and landmark_point[19][
0] > visibility_th:
cv.line(image, landmark_point[17][1], landmark_point[19][1],
(0, 255, 0), 2)
if landmark_point[19][0] > visibility_th and landmark_point[21][
0] > visibility_th:
cv.line(image, landmark_point[19][1], landmark_point[21][1],
(0, 255, 0), 2)
if landmark_point[21][0] > visibility_th and landmark_point[15][
0] > visibility_th:
cv.line(image, landmark_point[21][1], landmark_point[15][1],
(0, 255, 0), 2)
# 左手
if landmark_point[16][0] > visibility_th and landmark_point[18][
0] > visibility_th:
cv.line(image, landmark_point[16][1], landmark_point[18][1],
(0, 255, 0), 2)
if landmark_point[18][0] > visibility_th and landmark_point[20][
0] > visibility_th:
cv.line(image, landmark_point[18][1], landmark_point[20][1],
(0, 255, 0), 2)
if landmark_point[20][0] > visibility_th and landmark_point[22][
0] > visibility_th:
cv.line(image, landmark_point[20][1], landmark_point[22][1],
(0, 255, 0), 2)
if landmark_point[22][0] > visibility_th and landmark_point[16][
0] > visibility_th:
cv.line(image, landmark_point[22][1], landmark_point[16][1],
(0, 255, 0), 2)
# 胴体
if landmark_point[11][0] > visibility_th and landmark_point[23][
0] > visibility_th:
cv.line(image, landmark_point[11][1], landmark_point[23][1],
(0, 255, 0), 2)
if landmark_point[12][0] > visibility_th and landmark_point[24][
0] > visibility_th:
cv.line(image, landmark_point[12][1], landmark_point[24][1],
(0, 255, 0), 2)
if landmark_point[23][0] > visibility_th and landmark_point[24][
0] > visibility_th:
cv.line(image, landmark_point[23][1], landmark_point[24][1],
(0, 255, 0), 2)
if len(landmark_point) > 25:
# 右足
if landmark_point[23][0] > visibility_th and landmark_point[25][
0] > visibility_th:
cv.line(image, landmark_point[23][1], landmark_point[25][1],
(0, 255, 0), 2)
if landmark_point[25][0] > visibility_th and landmark_point[27][
0] > visibility_th:
cv.line(image, landmark_point[25][1], landmark_point[27][1],
(0, 255, 0), 2)
if landmark_point[27][0] > visibility_th and landmark_point[29][
0] > visibility_th:
cv.line(image, landmark_point[27][1], landmark_point[29][1],
(0, 255, 0), 2)
if landmark_point[29][0] > visibility_th and landmark_point[31][
0] > visibility_th:
cv.line(image, landmark_point[29][1], landmark_point[31][1],
(0, 255, 0), 2)
# 左足
if landmark_point[24][0] > visibility_th and landmark_point[26][
0] > visibility_th:
cv.line(image, landmark_point[24][1], landmark_point[26][1],
(0, 255, 0), 2)
if landmark_point[26][0] > visibility_th and landmark_point[28][
0] > visibility_th:
cv.line(image, landmark_point[26][1], landmark_point[28][1],
(0, 255, 0), 2)
if landmark_point[28][0] > visibility_th and landmark_point[30][
0] > visibility_th:
cv.line(image, landmark_point[28][1], landmark_point[30][1],
(0, 255, 0), 2)
if landmark_point[30][0] > visibility_th and landmark_point[32][
0] > visibility_th:
cv.line(image, landmark_point[30][1], landmark_point[32][1],
(0, 255, 0), 2)
return image
def plot_world_landmarks(
plt,
ax,
landmarks,
visibility_th=0.5,
):
landmark_point = []
for index, landmark in enumerate(landmarks.landmark):
landmark_point.append([landmark.visibility, (landmark.x, landmark.y, landmark.z)])
face_index_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
right_arm_index_list = [11, 13, 15, 17, 19, 21]
left_arm_index_list = [12, 14, 16, 18, 20, 22]
right_body_side_index_list = [11, 23, 25, 27, 29, 31]
left_body_side_index_list = [12, 24, 26, 28, 30, 32]
shoulder_index_list = [11, 12]
waist_index_list = [23, 24]
# 顔
face_x, face_y, face_z = [], [], []
for index in face_index_list:
point = landmark_point[index][1]
face_x.append(point[0])
face_y.append(point[2])
face_z.append(point[1] * (-1))
# 右腕
right_arm_x, right_arm_y, right_arm_z = [], [], []
for index in right_arm_index_list:
point = landmark_point[index][1]
right_arm_x.append(point[0])
right_arm_y.append(point[2])
right_arm_z.append(point[1] * (-1))
# 左腕
left_arm_x, left_arm_y, left_arm_z = [], [], []
for index in left_arm_index_list:
point = landmark_point[index][1]
left_arm_x.append(point[0])
left_arm_y.append(point[2])
left_arm_z.append(point[1] * (-1))
# 右半身
right_body_side_x, right_body_side_y, right_body_side_z = [], [], []
for index in right_body_side_index_list:
point = landmark_point[index][1]
right_body_side_x.append(point[0])
right_body_side_y.append(point[2])
right_body_side_z.append(point[1] * (-1))
# 左半身
left_body_side_x, left_body_side_y, left_body_side_z = [], [], []
for index in left_body_side_index_list:
point = landmark_point[index][1]
left_body_side_x.append(point[0])
left_body_side_y.append(point[2])
left_body_side_z.append(point[1] * (-1))
# 肩
shoulder_x, shoulder_y, shoulder_z = [], [], []
for index in shoulder_index_list:
point = landmark_point[index][1]
shoulder_x.append(point[0])
shoulder_y.append(point[2])
shoulder_z.append(point[1] * (-1))
# 腰
waist_x, waist_y, waist_z = [], [], []
for index in waist_index_list:
point = landmark_point[index][1]
waist_x.append(point[0])
waist_y.append(point[2])
waist_z.append(point[1] * (-1))
ax.cla()
ax.set_xlim3d(-1, 1)
ax.set_ylim3d(-1, 1)
ax.set_zlim3d(-1, 1)
ax.scatter(face_x, face_y, face_z)
ax.plot(right_arm_x, right_arm_y, right_arm_z)
ax.plot(left_arm_x, left_arm_y, left_arm_z)
ax.plot(right_body_side_x, right_body_side_y, right_body_side_z)
ax.plot(left_body_side_x, left_body_side_y, left_body_side_z)
ax.plot(shoulder_x, shoulder_y, shoulder_z)
ax.plot(waist_x, waist_y, waist_z)
plt.pause(.001)
return
def draw_bounding_rect(use_brect, image, brect):
if use_brect:
# 外接矩形
cv.rectangle(image, (brect[0], brect[1]), (brect[2], brect[3]),
(0, 255, 0), 2)
return image
if __name__ == '__main__':
main()
|
the-stack_106_24675 | # Chopsticks
from dataclasses import dataclass
from typing import Tuple, Generator
from rl_games.core.game import Game, PlayerIndex
MAX_ROUNDS = 100
FingerCount = int
HandIndex = int
PlayerState = Tuple[FingerCount, ...]
@dataclass(frozen=True)
class ChopsticksState:
finger_counts: Tuple[PlayerState, ...] = ()
next_player_index: PlayerIndex = PlayerIndex(0)
num_turns: int = 0
def __str__(self) -> str:
return '\n'.join([f'P{i + 1}: {fingers}' for i, fingers in enumerate(self.finger_counts)])
@dataclass(frozen=True, repr=False)
class ChopsticksAction:
from_hand: HandIndex
to_player: PlayerIndex
to_hand: HandIndex
fingers: FingerCount
def __repr__(self) -> str:
return f'H{self.from_hand + 1} {self.fingers} -> H{self.to_hand + 1} P{self.to_player + 1}'
@dataclass
class Chopsticks(Game[ChopsticksState, ChopsticksAction]):
num_players: int = 2
num_hands: int = 2
fingers_per_hand: int = 4
def get_init_state(self) -> ChopsticksState:
"""
>>> game = Chopsticks()
>>> game.get_init_state()
ChopsticksState(finger_counts=((1, 1), (1, 1)), next_player_index=0, num_turns=0)
"""
return ChopsticksState(finger_counts=((1,) * self.num_hands,) * self.num_players)
def get_actions(self, state: ChopsticksState) -> Generator[ChopsticksAction, None, None]:
"""
>>> game = Chopsticks()
>>> state = game.get_init_state()
>>> len(list(game.get_actions(state)))
6
>>> state = ChopsticksState(((0, 1), (2, 0)), 0)
>>> list(game.get_actions(state))
[H2 1 -> H1 P2]
>>> state = ChopsticksState(((1, 1), (2, 0)), 0)
>>> list(game.get_actions(state))
[H1 1 -> H2 P1, H2 1 -> H1 P1, H1 1 -> H1 P2, H2 1 -> H1 P2]
>>> state = ChopsticksState(((1, 1), (2, 0)), 1)
>>> list(game.get_actions(state))
[H1 2 -> H1 P1, H1 2 -> H2 P1, H1 1 -> H2 P2]
"""
# pylint: disable=too-many-nested-blocks
this_player = state.next_player_index
for to_player in range(self.num_players):
for from_hand in range(self.num_hands):
num_fingers = state.finger_counts[this_player][from_hand]
min_fingers = 1 if to_player == this_player else max(1, num_fingers)
for fingers in range(min_fingers, 1 + num_fingers):
for to_hand in range(self.num_hands):
# Special rules when giving to yourself:
# 1. You can't transfer to the same hand (of course).
# 2. You can't leave the set of finger counts unchanged.
if to_player == this_player:
if from_hand == to_hand:
continue
if set(state.finger_counts[this_player]) == {
x if i not in [from_hand, to_hand] else (x - fingers if i == from_hand else x + fingers)
for i, x in enumerate(state.finger_counts[this_player])
}:
continue
# Special rules when hitting another player:
# 1. You can't hit a 0-finger hand.
else:
if state.finger_counts[to_player][to_hand] == 0:
continue
yield ChopsticksAction(from_hand, PlayerIndex(to_player), to_hand, fingers)
def updated(self, state: ChopsticksState, action: ChopsticksAction) -> ChopsticksState:
"""
>>> game = Chopsticks()
>>> state = game.get_init_state()
>>> actions = list(game.get_actions(state))
>>> actions[0], actions[2]
(H1 1 -> H2 P1, H1 1 -> H1 P2)
>>> game.updated(state, actions[0])
ChopsticksState(finger_counts=((0, 2), (1, 1)), next_player_index=1, num_turns=1)
>>> game.updated(state, actions[2])
ChopsticksState(finger_counts=((1, 1), (2, 1)), next_player_index=1, num_turns=1)
>>> state = ChopsticksState(finger_counts=((4, 1), (1, 1)), next_player_index=1)
>>> game.updated(state, ChopsticksAction(from_hand=0, to_player=0, to_hand=0, fingers=1))
ChopsticksState(finger_counts=((0, 1), (1, 1)), next_player_index=0, num_turns=1)
"""
this_player = state.next_player_index
updated_counts = [list(player_state) for player_state in state.finger_counts]
updated_counts[action.to_player][action.to_hand] += action.fingers
if updated_counts[action.to_player][action.to_hand] > self.fingers_per_hand:
updated_counts[action.to_player][action.to_hand] = 0
if this_player == action.to_player:
updated_counts[this_player][action.from_hand] -= action.fingers
return ChopsticksState(
finger_counts=tuple(tuple(player_state) for player_state in updated_counts),
next_player_index=PlayerIndex((this_player + 1) % self.num_players),
num_turns=state.num_turns + 1
)
def get_score_and_game_over(self, state: ChopsticksState) -> Tuple[float, bool]:
"""
In this game, you only win if you knock everyone else down to all zero fingers.
Assume the turns happen in player order.
We also cap the game at 1000 turns.
>>> game = Chopsticks()
>>> state = ChopsticksState(finger_counts=((4, 1), (1, 1)), next_player_index=1, num_turns=0)
>>> game.get_score_and_game_over(state)
(0, False)
>>> state = ChopsticksState(finger_counts=((0, 0), (1, 1)), next_player_index=0, num_turns=0)
>>> game.get_score_and_game_over(state)
(1, True)
>>> state = ChopsticksState(finger_counts=((0, 0), (1, 1)), next_player_index=1, num_turns=0)
>>> game.get_score_and_game_over(state)
(-1, True)
"""
has_lost = [all(fingers == 0 for fingers in player_state)
for player_state in state.finger_counts]
if has_lost[(state.next_player_index - 1) % self.num_players]:
return -1, True
if sum(1 if l else 0 for l in has_lost) == 1:
return 1, True
return 0, state.num_turns > MAX_ROUNDS * self.num_players
|
the-stack_106_24677 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for utils."""
import functools
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v1 as tf
from protein_lm import domains
from protein_lm import models
from protein_lm import utils
lm_cfg = dict(
batch_size=1, num_layers=2, num_heads=2, emb_dim=32, mlp_dim=32, qkv_dim=32)
lm_cls = functools.partial(models.FlaxLM, **lm_cfg)
class UtilsTest(tf.test.TestCase, parameterized.TestCase):
def test_count_params(self):
domain = domains.FixedLengthDiscreteDomain(length=4, vocab_size=2)
lm = lm_cls(domain=domain)
count = utils.param_count(lm)
self.assertEqual(13059, count)
# Check these methods run.
utils.param_pprint(lm)
sizes = utils.param_reduce(lm, log=True)
self.assertIsInstance(sizes, dict)
@parameterized.parameters((5, 5), (5, 1), (5, 2), (5, 6), (5, 12))
def test_batch_apply(self, batch_size, num_inputs):
def fn(inputs):
return np.power(inputs + 1, 2)
def batch_fn(batched_inputs):
if len(batched_inputs) != batch_size:
raise ValueError('fn() called with a batch that is '
'the wrong size (%d vs. %d).' % (len(batched_inputs),
batch_size))
return fn(batched_inputs)
inputs = np.stack([np.arange(num_inputs), -np.arange(num_inputs)], axis=1)
unbatched_output = fn(inputs)
batched_output = utils.batch_apply(batch_fn, inputs, batch_size)
np.testing.assert_array_equal(unbatched_output, batched_output)
def test_get_normalized_matrix(self):
"""Tests that the normalized matrix is computed correctly."""
domain = domains.FixedLengthDiscreteDomain(
vocab=domains.Vocabulary(tokens=['A', 'B', 'C']),
length=2)
freq_dict = {'A': {'A': 5, 'B': 3, 'C': 1},
'B': {'A': 3, 'B': 5, 'C': 1},
'C': {'A': 1, 'B': 1, 'C': 1}}
matrix = utils.get_normalized_matrix(domain, freq_dict)
expected_matrix = [[1, 0.5, 0], [0.5, 1, 0,], [0, 0, 0]]
self.assertAllEqual(matrix, expected_matrix)
def test_soft_accuracy(self):
"""Tests that soft accuracy is computed correctly."""
domain = domains.FixedLengthDiscreteDomain(
vocab=domains.Vocabulary(tokens=['A', 'B', 'C']),
length=2)
targets = np.array([[0, 1]])
logits = np.log([[[0.9, 0.1], [0.6, 0.4]]])
freq_dict = {'A': {'A': 5, 'B': 3, 'C': 1},
'B': {'A': 3, 'B': 5, 'C': 1},
'C': {'A': 1, 'B': 1, 'C': 1}}
accuracy, denominator = utils.compute_weighted_soft_accuracy(
logits, targets,
weights=None,
matrix=utils.get_normalized_matrix(domain, freq_dict))
self.assertEqual(accuracy / denominator, 0.75)
if __name__ == '__main__':
tf.test.main()
|
the-stack_106_24678 | from opencmiss.zinc.field import Field
from opencmiss.zinc.graphics import Graphics
from opencmiss.zinc.glyph import Glyph
from opencmiss.zinc.material import Material
from opencmiss.zinc.node import Node
from opencmiss.zinc.streamregion import StreaminformationRegion
from opencmiss.utils.zinc import create_finite_element_field
from scaffoldmaker.scaffolds import Scaffolds
from scaffoldmaker.scaffoldpackage import ScaffoldPackage
from ..utils import maths
class ScaffoldModel(object):
def __init__(self, context, region, generator_model, parameters, material_module, scaffold_package,
scaffold_package_class):
self._context = context
self._region = region
# self._region = self._context.createRegion()
# self._region.setName('custom_scaffold')
# self._region.readFile('D:\\sparc\\tmp\\pig_scaffold.exf')
self._generator_model = generator_model
self._material_module = material_module
self._parameters = parameters.keys()
self._coordinate_field = None
_scaffold_package = scaffold_package
_scaffold_package_class = scaffold_package_class
scaffolds = Scaffolds()
self._all_scaffold_types = scaffolds.getScaffoldTypes()
scaffold_type = None
for scaffold in self._all_scaffold_types:
if scaffold == _scaffold_package[-1].getScaffoldType():
scaffold_type = scaffold
if scaffold_type is None:
raise TypeError('Scaffold Type was not found.')
scaffold_package = ScaffoldPackage(scaffold_type)
self._parameterSetName = scaffold_type.getParameterSetNames()[0]
self._scaffold_package = scaffold_package
self._scaffold = None
self._scaffold_options = None
self._temp_region = None
self._annotation_groups = None
self._scene = None
self._scaffold_is_time_aware = None
self._scaffold_fit_parameters = None
self._initialise_surface_material()
# self._timekeeper = self._scene.getTimekeepermodule().getDefaultTimekeeper()
# self._current_time = None
# self._maximum_time = None
# self._time_sequence = None
def get_region(self):
return self._region
# def set_time(self, time):
# self._current_time = time
# self._timekeeper.setTime(time)
def _create_surface_graphics(self):
self._scene.beginChange()
surface = self._scene.createGraphicsSurfaces()
surface.setCoordinateField(self._coordinate_field)
surface.setRenderPolygonMode(Graphics.RENDER_POLYGON_MODE_SHADED)
surface_material = self._material_module.findMaterialByName('trans_blue')
surface.setMaterial(surface_material)
surface.setName('display_surfaces')
self._scene.endChange()
return surface
def _create_node_graphics(self):
self._scene.beginChange()
self._node_derivative_labels = ['D1', 'D2', 'D3', 'D12', 'D13', 'D23', 'D123']
fm = self._region.getFieldmodule()
fm.beginChange()
cmiss_number = fm.findFieldByName('cmiss_number')
node_points = self._scene.createGraphicsPoints()
node_points.setFieldDomainType(Field.DOMAIN_TYPE_NODES)
node_points.setCoordinateField(self._coordinate_field)
point_attr = node_points.getGraphicspointattributes()
point_attr.setBaseSize([500, 500, 500])
point_attr.setGlyphShapeType(Glyph.SHAPE_TYPE_SPHERE)
node_points.setMaterial(self._material_module.findMaterialByName('white'))
node_points.setName('display_node_points')
node_numbers = self._scene.createGraphicsPoints()
node_numbers.setFieldDomainType(Field.DOMAIN_TYPE_NODES)
node_numbers.setCoordinateField(self._coordinate_field)
point_attr = node_numbers.getGraphicspointattributes()
point_attr.setLabelField(cmiss_number)
point_attr.setGlyphShapeType(Glyph.SHAPE_TYPE_NONE)
node_numbers.setMaterial(self._material_module.findMaterialByName('green'))
node_numbers.setName('display_node_numbers')
node_derivative_fields = [
fm.createFieldNodeValue(self._coordinate_field, Node.VALUE_LABEL_D_DS1, 1),
fm.createFieldNodeValue(self._coordinate_field, Node.VALUE_LABEL_D_DS2, 1),
fm.createFieldNodeValue(self._coordinate_field, Node.VALUE_LABEL_D_DS3, 1),
fm.createFieldNodeValue(self._coordinate_field, Node.VALUE_LABEL_D2_DS1DS2, 1),
fm.createFieldNodeValue(self._coordinate_field, Node.VALUE_LABEL_D2_DS1DS3, 1),
fm.createFieldNodeValue(self._coordinate_field, Node.VALUE_LABEL_D2_DS2DS3, 1),
fm.createFieldNodeValue(self._coordinate_field, Node.VALUE_LABEL_D3_DS1DS2DS3, 1)
]
node_derivative_material_names = ['gold', 'silver', 'green', 'cyan', 'magenta', 'yellow', 'blue']
derivative_scales = [1.0, 1.0, 1.0, 0.5, 0.5, 0.5, 0.25]
for i in range(len(self._node_derivative_labels)):
node_derivative_label = self._node_derivative_labels[i]
node_derivatives = self._scene.createGraphicsPoints()
node_derivatives.setFieldDomainType(Field.DOMAIN_TYPE_NODES)
node_derivatives.setCoordinateField(self._coordinate_field)
point_attr = node_derivatives.getGraphicspointattributes()
point_attr.setGlyphShapeType(Glyph.SHAPE_TYPE_ARROW_SOLID)
point_attr.setOrientationScaleField(node_derivative_fields[i])
point_attr.setBaseSize([0.0, 50, 50])
point_attr.setScaleFactors([derivative_scales[i], 0.0, 0.0])
material = self._material_module.findMaterialByName(node_derivative_material_names[i])
node_derivatives.setMaterial(material)
node_derivatives.setSelectedMaterial(material)
node_derivatives.setName('display_node_derivatives' + node_derivative_label)
fm.endChange()
self._scene.endChange()
return
def _create_line_graphics(self):
self._scene.beginChange()
lines = self._scene.createGraphicsLines()
fieldmodule = self._context.getMaterialmodule()
lines.setCoordinateField(self._coordinate_field)
lines.setName('display_lines')
black = fieldmodule.findMaterialByName('white')
lines.setMaterial(black)
self._scene.endChange()
return lines
def create_scaffold_graphics(self):
# self._create_node_graphics()
self._create_line_graphics()
self._create_surface_graphics()
def _get_mesh(self):
parent_region = self._region
fm = parent_region.getFieldmodule()
for dimension in range(3, 0, -1):
mesh = fm.findMeshByDimension(dimension)
if mesh.getSize() > 0:
return mesh
raise ValueError('Model contains no mesh')
def get_model_coordinate_field(self):
mesh = self._get_mesh()
element = mesh.createElementiterator().next()
if not element.isValid():
raise ValueError('Model contains no elements')
fm = self._region.getFieldmodule()
cache = fm.createFieldcache()
cache.setElement(element)
field_iter = fm.createFielditerator()
field = field_iter.next()
while field.isValid():
if field.isTypeCoordinate() and (field.getNumberOfComponents() <= 3):
if field.isDefinedAtLocation(cache):
return field
field = field_iter.next()
raise ValueError('Could not determine model coordinate field')
def _get_node_coordinates_range(self, time=0):
fm = self._coordinate_field.getFieldmodule()
fm.beginChange()
nodes = fm.findNodesetByFieldDomainType(Field.DOMAIN_TYPE_NODES)
min_coordinates = fm.createFieldNodesetMinimum(self._coordinate_field, nodes)
max_coordinates = fm.createFieldNodesetMaximum(self._coordinate_field, nodes)
components_count = self._coordinate_field.getNumberOfComponents()
cache = fm.createFieldcache()
cache.setTime(time)
result, min_x = min_coordinates.evaluateReal(cache, components_count)
result, max_x = max_coordinates.evaluateReal(cache, components_count)
fm.endChange()
return min_x, max_x
def get_range(self, time=0):
return self._get_node_coordinates_range(time=time)
def get_scale(self):
minimums, maximums = self._get_node_coordinates_range()
return maths.sub(minimums, maximums)
def get_coordinate_field(self):
return self._coordinate_field
def get_scaffold_options(self):
return self._scaffold_options
def initialise_scaffold(self):
# self._coordinate_field = self.get_model_coordinate_field()
self._coordinate_field = self._region.getFieldmodule().findFieldByName('coordinates')
print('Coordinate = ', self._coordinate_field.isValid())
def _update(self):
self._scene.beginChange()
for name in ['display_lines', 'display_surfaces']:
graphics = self._scene.findGraphicsByName(name)
graphics.setCoordinateField(self._coordinate_field)
self._scene.endChange()
def get_scaffold_package(self):
return self._scaffold_package
def _get_scaffold_package_settings(self):
return self._scaffold_package.getScaffoldSettings()
def _get_scaffold_package_type(self):
return self._scaffold_package.getScaffoldType()
def get_edit_scaffold_settings(self):
return self._scaffold_package.getScaffoldSettings()
def get_edit_scaffold_option(self, key):
# print(self.get_edit_scaffold_settings()[key])
return self.get_edit_scaffold_settings()[key]
def generate_mesh_for_fitting(self):
scaffold_package = self._scaffold_package
# if self._region:
# self._region.removeChild(self._region)
# self._region = self._region.createChild('fitting_region')
scaffold_package.getScaffoldType().generateMesh(self._region, self.get_edit_scaffold_settings())
self._update()
def _initialise_surface_material(self):
self._material_module = self._context.getMaterialmodule()
self._material_module.beginChange()
solid_blue = self._material_module.createMaterial()
solid_blue.setName('solid_blue')
solid_blue.setManaged(True)
solid_blue.setAttributeReal3(Material.ATTRIBUTE_AMBIENT, [0.0, 0.2, 0.6])
solid_blue.setAttributeReal3(Material.ATTRIBUTE_DIFFUSE, [0.0, 0.7, 1.0])
solid_blue.setAttributeReal3(Material.ATTRIBUTE_EMISSION, [0.0, 0.0, 0.0])
solid_blue.setAttributeReal3(Material.ATTRIBUTE_SPECULAR, [0.1, 0.1, 0.1])
solid_blue.setAttributeReal(Material.ATTRIBUTE_SHININESS, 0.2)
trans_blue = self._material_module.createMaterial()
trans_blue.setName('trans_blue')
trans_blue.setManaged(True)
trans_blue.setAttributeReal3(Material.ATTRIBUTE_AMBIENT, [0.0, 0.2, 0.6])
trans_blue.setAttributeReal3(Material.ATTRIBUTE_DIFFUSE, [0.0, 0.7, 1.0])
trans_blue.setAttributeReal3(Material.ATTRIBUTE_EMISSION, [0.0, 0.0, 0.0])
trans_blue.setAttributeReal3(Material.ATTRIBUTE_SPECULAR, [0.1, 0.1, 0.1])
trans_blue.setAttributeReal(Material.ATTRIBUTE_ALPHA, 0.3)
trans_blue.setAttributeReal(Material.ATTRIBUTE_SHININESS, 0.2)
glyph_module = self._context.getGlyphmodule()
glyph_module.defineStandardGlyphs()
self._material_module.defineStandardMaterials()
solid_tissue = self._material_module.createMaterial()
solid_tissue.setName('heart_tissue')
solid_tissue.setManaged(True)
solid_tissue.setAttributeReal3(Material.ATTRIBUTE_AMBIENT, [0.913, 0.541, 0.33])
solid_tissue.setAttributeReal3(Material.ATTRIBUTE_EMISSION, [0.0, 0.0, 0.0])
solid_tissue.setAttributeReal3(Material.ATTRIBUTE_SPECULAR, [0.2, 0.2, 0.3])
solid_tissue.setAttributeReal(Material.ATTRIBUTE_ALPHA, 1.0)
solid_tissue.setAttributeReal(Material.ATTRIBUTE_SHININESS, 0.6)
self._material_module.endChange()
def set_coordinate_field(self, field):
if self._coordinate_field is not None:
self._coordinate_field = None
self._coordinate_field = field
def set_scaffold_graphics_post_rotate(self, field):
self._scene.beginChange()
for name in ['display_lines', 'display_surfaces']:
graphics = self._scene.findGraphicsByName(name)
graphics.setCoordinateField(field)
self._scene.endChange()
self.set_coordinate_field(field)
def transfer_temp_into_main(self, time):
node_descriptions = _extract_node_descriptions(self._temp_region)
if not self._scaffold_is_time_aware:
self._undefine_scaffold_nodes()
self._scaffold_is_time_aware = True
_read_node_descriptions(self._region, node_descriptions, time)
def generate_temp_mesh(self, fit_options_array=None):
fit_options = {}
if fit_options_array is not None:
for index in range(len(self._parameters)):
fit_options[self._parameters[index]] = fit_options_array[index]
temp_options = self.get_scaffold_options().copy()
temp_options.update(fit_options)
self._temp_region = self._region.createRegion()
self._scaffold.generateMesh(self._temp_region, temp_options)
def set_scaffold_options(self, options):
self._scaffold_options = options
parameters = []
for option in self._parameters:
parameters.append(self._scaffold_options[option])
self._scaffold_fit_parameters = parameters
def initialise_scene(self):
self._scene = self._region.getScene()
def set_scaffold(self, scaffold):
self._scaffold = scaffold
def _undefine_scaffold_nodes(self):
field_module = self._region.getFieldmodule()
field_module.beginChange()
node_set = field_module.findNodesetByName('nodes')
node_template = node_set.createNodetemplate()
node_template.undefineField(self._coordinate_field)
node_iterator = node_set.createNodeiterator()
node = node_iterator.next()
while node.isValid():
node.merge(node_template)
node = node_iterator.next()
field_module.endChange()
def write_model(self, filename):
self._region.writeFile(filename)
def _extract_node_descriptions(region):
stream_information = region.createStreaminformationRegion()
memory_resource = stream_information.createStreamresourceMemory()
stream_information.setResourceDomainTypes(memory_resource, Field.DOMAIN_TYPE_NODES)
region.write(stream_information)
_, buffer_contents = memory_resource.getBuffer()
return buffer_contents
def _read_node_descriptions(region, buffer, time):
stream_information = region.createStreaminformationRegion()
memory_resource = stream_information.createStreamresourceMemoryBuffer(buffer)
stream_information.setResourceDomainTypes(memory_resource, Field.DOMAIN_TYPE_NODES)
stream_information.setResourceAttributeReal(memory_resource, StreaminformationRegion.ATTRIBUTE_TIME, time)
region.read(stream_information)
def _read_aligner_description(scaffold_region, scaffold_description):
scaffold_stream_information = scaffold_region.createStreaminformationRegion()
memory_resource = scaffold_stream_information.createStreamresourceMemoryBuffer(scaffold_description['elements3D'])
scaffold_stream_information.setResourceDomainTypes(memory_resource, Field.DOMAIN_TYPE_MESH3D)
memory_resource = scaffold_stream_information.createStreamresourceMemoryBuffer(scaffold_description['elements2D'])
scaffold_stream_information.setResourceDomainTypes(memory_resource, Field.DOMAIN_TYPE_MESH2D)
memory_resource = scaffold_stream_information.createStreamresourceMemoryBuffer(scaffold_description['elements1D'])
scaffold_stream_information.setResourceDomainTypes(memory_resource, Field.DOMAIN_TYPE_MESH1D)
memory_resource = scaffold_stream_information.createStreamresourceMemoryBuffer(scaffold_description['nodes'])
scaffold_stream_information.setResourceDomainTypes(memory_resource, Field.DOMAIN_TYPE_NODES)
return scaffold_stream_information
|
the-stack_106_24681 | #!/usr/bin/env python
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('docs/HISTORY.rst') as history_file:
history = history_file.read()
with open('requirements.txt') as req:
requirements = req.read()
setup_requirements = ['pytest-runner', ]
test_requirements = ['pytest>=3', ]
setup(
author="USDA-ARS-NWRC",
author_email='[email protected]',
python_requires='>=3.5',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
description="Python package for comparing dataset changes in a repos that have output files they check",
install_requires=requirements,
license="CC0 1.0",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='goldmeister',
name='goldmeister',
packages=find_packages(include=['goldmeister', 'goldmeister.*']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/USDA-ARS-NWRC/goldmeister',
version='0.2.0',
zip_safe=False,
)
|
the-stack_106_24682 | """
This part of the flask app responds to api requests
"""
from flask import Blueprint, jsonify, request
from remote_camera.camera import CameraReader
from io import BytesIO
import base64
bp = Blueprint("api", __name__, url_prefix="/api/v1.0/")
@bp.route("/get_image", defaults={'width': None, 'height': None})
@bp.route("/get_image/<int:width>,<int:height>")
def get_image(width, height):
"""
Get an image from the camera in a json response in base64 encoding. If width and height are provided, return a scaled image.
"""
response = {"success": True}
cam = CameraReader()
image_size = None
if width is not None and height is not None:
image_size = (width, height)
try:
# Resolution setting not currently working, don't turn on for now.
#current_resolution = cam.resolution
# if current_resolution[0] != image_size[0] or current_resolution[1] != image_size[1]:
# cam.set_resolution(image_size[0], image_size[1])
# time.sleep(0.1) # wait for buffer to clear
image_array = cam.capture()
image = cam.array_to_image(image_array)
response["image"] = 'data:image/png;base64,' + \
get_base_64_image(image, image_size).decode()
except:
response["success"] = False
response["message"] = "Failed to capture an image."
return jsonify(response)
@bp.route("/exposure", defaults={'exposure': None})
@bp.route("/exposure/<int:exposure>")
def exposure(exposure):
"""
Setter or getter for the camera exposure value.
"""
response = {"success": True}
cam = CameraReader()
if not exposure:
try:
response["exposure"] = round(cam.exposure)
except:
response["success"] = False
response["message"] = "Failed to read exposure from camera."
else:
try:
cam.exposure = exposure
response["exposure"] = round(cam.exposure)
except:
response["success"] = False
response["message"] = "Failed to set exposure from camera."
return jsonify(response)
@bp.route("/start_capture")
def start():
"""
Start camera capture.
"""
cam = CameraReader()
response = {"success": True}
try:
cam.stop_capture()
except:
response["success"] = False
response["message"] = "Failed to start camera capture."
return jsonify(response)
@bp.route("/stop_capture")
def stop():
"""
Stop camera capture.
"""
cam = CameraReader()
response = {"success": True}
try:
cam.stop_capture()
except:
response["success"] = False
response["message"] = "Failed to stop camera capture."
return jsonify(response)
def get_base_64_image(image, size=None):
"""
Return the image resized with base64 encoding, ready to be sent with json.
"""
if size:
new_image = image.resize(size)
else:
new_image = image
output_stream = BytesIO()
new_image.save(output_stream, format='jpeg')
output_stream.seek(0)
return base64.b64encode(output_stream.getvalue())
|
the-stack_106_24683 | from django.conf import settings
from django.forms.renderers import TemplatesSetting
from django.contrib.gis.forms import widgets
class LeafletPointWidget(widgets.BaseGeometryWidget):
template_name = 'leaflet/point_widget.html'
def render(self, name, value, attrs=None, renderer=None):
# add point
if value:
attrs.update({
'point': {'x': value.x,
'y': value.y,
'z': value.z,
'srid': value.srid}
})
# add map_provider
attrs.update({
'map_provider': {'provider': settings.MAP_PROVIDER, 'access_token': settings.MAP_PROVIDER_TOKEN}
})
return super().render(name, value, attrs, TemplatesSetting())
|
the-stack_106_24684 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import ray
from ray.rllib.dqn import models
from ray.rllib.dqn.common.wrappers import wrap_dqn
from ray.rllib.dqn.common.schedules import LinearSchedule
from ray.rllib.optimizers import SampleBatch, TFMultiGPUSupport
class DQNEvaluator(TFMultiGPUSupport):
"""The base DQN Evaluator that does not include the replay buffer.
TODO(rliaw): Support observation/reward filters?"""
def __init__(self, registry, env_creator, config, logdir):
env = env_creator(config["env_config"])
env = wrap_dqn(registry, env, config["model"])
self.env = env
self.config = config
tf_config = tf.ConfigProto(**config["tf_session_args"])
self.sess = tf.Session(config=tf_config)
self.dqn_graph = models.DQNGraph(registry, env, config, logdir)
# Create the schedule for exploration starting from 1.
self.exploration = LinearSchedule(
schedule_timesteps=int(
config["exploration_fraction"] *
config["schedule_max_timesteps"]),
initial_p=1.0,
final_p=config["exploration_final_eps"])
# Initialize the parameters and copy them to the target network.
self.sess.run(tf.global_variables_initializer())
self.dqn_graph.update_target(self.sess)
self.global_timestep = 0
self.local_timestep = 0
# Note that this encompasses both the Q and target network
self.variables = ray.experimental.TensorFlowVariables(
tf.group(self.dqn_graph.q_t, self.dqn_graph.q_tp1), self.sess)
self.episode_rewards = [0.0]
self.episode_lengths = [0.0]
self.saved_mean_reward = None
self.obs = self.env.reset()
def set_global_timestep(self, global_timestep):
self.global_timestep = global_timestep
def update_target(self):
self.dqn_graph.update_target(self.sess)
def sample(self):
obs, actions, rewards, new_obs, dones = [], [], [], [], []
for _ in range(self.config["sample_batch_size"]):
ob, act, rew, ob1, done = self._step(self.global_timestep)
obs.append(ob)
actions.append(act)
rewards.append(rew)
new_obs.append(ob1)
dones.append(done)
return SampleBatch({
"obs": obs, "actions": actions, "rewards": rewards,
"new_obs": new_obs, "dones": dones,
"weights": np.ones_like(rewards)})
def compute_gradients(self, samples):
_, grad = self.dqn_graph.compute_gradients(
self.sess, samples["obs"], samples["actions"], samples["rewards"],
samples["new_obs"], samples["dones"], samples["weights"])
return grad
def apply_gradients(self, grads):
self.dqn_graph.apply_gradients(self.sess, grads)
def get_weights(self):
return self.variables.get_weights()
def set_weights(self, weights):
self.variables.set_weights(weights)
def tf_loss_inputs(self):
return self.dqn_graph.loss_inputs
def build_tf_loss(self, input_placeholders):
return self.dqn_graph.build_loss(*input_placeholders)
def _step(self, global_timestep):
"""Takes a single step, and returns the result of the step."""
action = self.dqn_graph.act(
self.sess, np.array(self.obs)[None],
self.exploration.value(global_timestep))[0]
new_obs, rew, done, _ = self.env.step(action)
ret = (self.obs, action, rew, new_obs, float(done))
self.obs = new_obs
self.episode_rewards[-1] += rew
self.episode_lengths[-1] += 1
if done:
self.obs = self.env.reset()
self.episode_rewards.append(0.0)
self.episode_lengths.append(0.0)
self.local_timestep += 1
return ret
def stats(self):
mean_100ep_reward = round(np.mean(self.episode_rewards[-101:-1]), 5)
mean_100ep_length = round(np.mean(self.episode_lengths[-101:-1]), 5)
exploration = self.exploration.value(self.global_timestep)
return {
"mean_100ep_reward": mean_100ep_reward,
"mean_100ep_length": mean_100ep_length,
"num_episodes": len(self.episode_rewards),
"exploration": exploration,
"local_timestep": self.local_timestep,
}
def save(self):
return [
self.exploration,
self.episode_rewards,
self.episode_lengths,
self.saved_mean_reward,
self.obs]
def restore(self, data):
self.exploration = data[0]
self.episode_rewards = data[1]
self.episode_lengths = data[2]
self.saved_mean_reward = data[3]
self.obs = data[4]
|
the-stack_106_24686 | from django.urls import path
from .views import PostListView, PostDetailView, PostCreateView, PostUpdateView, PostDeleteView
from . import views
urlpatterns = [
path('', PostListView.as_view(), name='blog-home'),
path('post/<int:pk>/', PostDetailView.as_view(), name='post-detail'),
path('post/new/', PostCreateView.as_view(), name='post-create'),
path('post/<int:pk>/update/', PostUpdateView.as_view(), name='post-update'),
path('post/<int:pk>/delete/', PostDeleteView.as_view(), name='post-delete'),
path('about/', views.about, name='blog-about'),
] |
the-stack_106_24692 | # Bench mark function 12
# Generalized Penalized Function No.01
# HW dimension: 30
# Min = 0
# Range [-50,50]
# Reference: https://al-roomi.org/benchmarks/unconstrained/n-dimensions/172-generalized-penalized-function-no-1
import math
import numpy as np
name = "F12"
l_bound = -50
u_bound = 50
dim = 30
opt = 0
def u(u_X, a, k, m):
X = u_X.copy()
for index in range(X.size):
element = X[index]
if element > a:
X[index] = k * (element - a)**m
elif element < -1 * a:
X[index] = k * (-1 * element - a)**m
else:
X[index] = 0
return X
def func(X):
Y = 1 + (X+1)/4
f_term = 10 * math.sin(math.pi*Y[0])**2
m_1 = (Y[0:-1] - 1)**2
m_2 = 1 + 10 * np.sin(math.pi*Y[1:])**2
m_term = np.sum(np.multiply(m_1, m_2))
l_term = (Y[-1]-1)**2
front = math.pi * (f_term + m_term + l_term) / X.size
end = np.sum(u(X, 10, 100, 4))
result = front + end
return result
if __name__ == '__main__':
X = np.arange(10)
X = X * 0.3
print(X)
result = func(X)
print(result) |
the-stack_106_24693 |
import io
import setuptools
with io.open('README.rst', 'r') as readme:
try:
long_description = readme.read()
except IOError:
long_description = ''
setup_params = dict(
author='Alex Malykh',
author_email='[email protected]',
name='cmsplugin-css-background',
use_scm_version=dict(root='.', relative_to=__file__),
description='A django CMS plugin for managing CSS background styles',
long_description=long_description,
license='MIT License',
url='https://github.com/alexmalykh/cmsplugin-css-background',
packages=setuptools.find_packages(),
install_requires=['django-cms>=3.3,<4.0', 'django-sekizai>=0.7'],
setup_requires=['setuptools_scm'],
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: Site Management',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
if __name__ == '__main__':
setuptools.setup(**setup_params)
|
the-stack_106_24694 | import os
from src.loaders.depth_image.CameraConfig import CameraConfig
from src.loaders.depth_image.CameraIntrinsics import CameraIntrinsics
from src.loaders.depth_image.ImageLoader import ImageLoader
class TumLoader(ImageLoader):
def __init__(self, path):
super().__init__(path)
def _provide_config(self) -> CameraConfig:
return self.TumCameraConfig()
def _provide_rgb_and_depth_path(self, path):
depth_path = os.path.join(path, "depth")
rgb_path = os.path.join(path, "rgb")
return rgb_path, depth_path
@staticmethod
def __filenames_sorted_mapper(filename):
return int(filename.split(".")[0])
def _provide_filenames(self, rgb_path, depth_path) -> (list, list):
rgb_filenames = os.listdir(rgb_path)
depth_filenames = os.listdir(depth_path)
rgb_filenames = sorted(rgb_filenames, key=TumLoader.__filenames_sorted_mapper)
depth_filenames = sorted(depth_filenames, key=TumLoader.__filenames_sorted_mapper)
return rgb_filenames, depth_filenames
def _match_rgb_with_depth(self, rgb_filenames, depth_filenames) -> list:
depth_to_rgb_index = []
rgb_index = 0
depth_index = 0
prev_delta = float('inf')
while depth_index < len(depth_filenames) and rgb_index < len(rgb_filenames):
rgb_timestamp = float(rgb_filenames[rgb_index][:-4])
depth_timestamp = float(depth_filenames[depth_index][:-4])
delta = abs(depth_timestamp - rgb_timestamp)
if rgb_timestamp < depth_timestamp:
prev_delta = delta
rgb_index += 1
continue
if prev_delta < delta:
depth_to_rgb_index.append(rgb_index - 1)
else:
depth_to_rgb_index.append(rgb_index)
depth_index += 1
# Fix case when the last timestamp was for depth img
while depth_index < len(depth_filenames):
depth_to_rgb_index.append(rgb_index - 1)
depth_index += 1
return depth_to_rgb_index
class TumCameraConfig(CameraConfig):
def get_cam_intrinsic(self, image_shape=(480, 640)) -> CameraIntrinsics:
return CameraIntrinsics(
width=image_shape[1],
height=image_shape[0],
fx=591.1, # X-axis focal length
fy=590.1, # Y-axis focal length
cx=331.0, # X-axis principle point
cy=234.0, # Y-axis principle point
factor=5000 # for the 16-bit PNG files
)
def get_initial_pcd_transform(self):
return [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]
|
the-stack_106_24695 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Jinja2 template rendering context helper."""
import contextlib
import copy
import functools
import warnings
from typing import (
AbstractSet,
Any,
Container,
Dict,
ItemsView,
Iterator,
List,
Mapping,
MutableMapping,
Optional,
Tuple,
ValuesView,
)
import lazy_object_proxy
from airflow.utils.types import NOTSET
# NOTE: Please keep this in sync with Context in airflow/utils/context.pyi.
KNOWN_CONTEXT_KEYS = {
"conf",
"conn",
"dag",
"dag_run",
"data_interval_end",
"data_interval_start",
"ds",
"ds_nodash",
"execution_date",
"exception",
"inlets",
"logical_date",
"macros",
"next_ds",
"next_ds_nodash",
"next_execution_date",
"outlets",
"params",
"prev_data_interval_start_success",
"prev_data_interval_end_success",
"prev_ds",
"prev_ds_nodash",
"prev_execution_date",
"prev_execution_date_success",
"prev_start_date_success",
"run_id",
"task",
"task_instance",
"task_instance_key_str",
"test_mode",
"templates_dict",
"ti",
"tomorrow_ds",
"tomorrow_ds_nodash",
"ts",
"ts_nodash",
"ts_nodash_with_tz",
"try_number",
"var",
"yesterday_ds",
"yesterday_ds_nodash",
}
class VariableAccessor:
"""Wrapper to access Variable values in template."""
def __init__(self, *, deserialize_json: bool) -> None:
self._deserialize_json = deserialize_json
self.var: Any = None
def __getattr__(self, key: str) -> Any:
from airflow.models.variable import Variable
self.var = Variable.get(key, deserialize_json=self._deserialize_json)
return self.var
def __repr__(self) -> str:
return str(self.var)
def get(self, key, default: Any = NOTSET) -> Any:
from airflow.models.variable import Variable
if default is NOTSET:
return Variable.get(key, deserialize_json=self._deserialize_json)
return Variable.get(key, default, deserialize_json=self._deserialize_json)
class ConnectionAccessor:
"""Wrapper to access Connection entries in template."""
def __init__(self) -> None:
self.var: Any = None
def __getattr__(self, key: str) -> Any:
from airflow.models.connection import Connection
self.var = Connection.get_connection_from_secrets(key)
return self.var
def __repr__(self) -> str:
return str(self.var)
def get(self, key: str, default_conn: Any = None) -> Any:
from airflow.exceptions import AirflowNotFoundException
from airflow.models.connection import Connection
try:
return Connection.get_connection_from_secrets(key)
except AirflowNotFoundException:
return default_conn
class AirflowContextDeprecationWarning(DeprecationWarning):
"""Warn for usage of deprecated context variables in a task."""
def _create_deprecation_warning(key: str, replacements: List[str]) -> DeprecationWarning:
message = f"Accessing {key!r} from the template is deprecated and will be removed in a future version."
if not replacements:
return AirflowContextDeprecationWarning(message)
display_except_last = ", ".join(repr(r) for r in replacements[:-1])
if display_except_last:
message += f" Please use {display_except_last} or {replacements[-1]!r} instead."
else:
message += f" Please use {replacements[-1]!r} instead."
return AirflowContextDeprecationWarning(message)
class Context(MutableMapping[str, Any]):
"""Jinja2 template context for task rendering.
This is a mapping (dict-like) class that can lazily emit warnings when
(and only when) deprecated context keys are accessed.
"""
_DEPRECATION_REPLACEMENTS: Dict[str, List[str]] = {
"execution_date": ["data_interval_start", "logical_date"],
"next_ds": ["{{ data_interval_end | ds }}"],
"next_ds_nodash": ["{{ data_interval_end | ds_nodash }}"],
"next_execution_date": ["data_interval_end"],
"prev_ds": [],
"prev_ds_nodash": [],
"prev_execution_date": [],
"prev_execution_date_success": ["prev_data_interval_start_success"],
"tomorrow_ds": [],
"tomorrow_ds_nodash": [],
"yesterday_ds": [],
"yesterday_ds_nodash": [],
}
def __init__(self, context: Optional[MutableMapping[str, Any]] = None, **kwargs: Any) -> None:
self._context = context or {}
if kwargs:
self._context.update(kwargs)
self._deprecation_replacements = self._DEPRECATION_REPLACEMENTS.copy()
def __repr__(self) -> str:
return repr(self._context)
def __reduce_ex__(self, protocol: int) -> Tuple[Any, ...]:
"""Pickle the context as a dict.
We are intentionally going through ``__getitem__`` in this function,
instead of using ``items()``, to trigger deprecation warnings.
"""
items = [(key, self[key]) for key in self._context]
return dict, (items,)
def __copy__(self) -> "Context":
new = type(self)(copy.copy(self._context))
new._deprecation_replacements = self._deprecation_replacements.copy()
return new
def __getitem__(self, key: str) -> Any:
with contextlib.suppress(KeyError):
warnings.warn(_create_deprecation_warning(key, self._deprecation_replacements[key]))
with contextlib.suppress(KeyError):
return self._context[key]
raise KeyError(key)
def __setitem__(self, key: str, value: Any) -> None:
self._deprecation_replacements.pop(key, None)
self._context[key] = value
def __delitem__(self, key: str) -> None:
self._deprecation_replacements.pop(key, None)
del self._context[key]
def __contains__(self, key: object) -> bool:
return key in self._context
def __iter__(self) -> Iterator[str]:
return iter(self._context)
def __len__(self) -> int:
return len(self._context)
def __eq__(self, other: Any) -> bool:
if not isinstance(other, Context):
return NotImplemented
return self._context == other._context
def __ne__(self, other: Any) -> bool:
if not isinstance(other, Context):
return NotImplemented
return self._context != other._context
def keys(self) -> AbstractSet[str]:
return self._context.keys()
def items(self):
return ItemsView(self._context)
def values(self):
return ValuesView(self._context)
def context_merge(context: "Context", *args: Any, **kwargs: Any) -> None:
"""Merge parameters into an existing context.
Like ``dict.update()`` , this take the same parameters, and updates
``context`` in-place.
This is implemented as a free function because the ``Context`` type is
"faked" as a ``TypedDict`` in ``context.pyi``, which cannot have custom
functions.
:meta private:
"""
context.update(*args, **kwargs)
def context_copy_partial(source: "Context", keys: Container[str]) -> "Context":
"""Create a context by copying items under selected keys in ``source``.
This is implemented as a free function because the ``Context`` type is
"faked" as a ``TypedDict`` in ``context.pyi``, which cannot have custom
functions.
:meta private:
"""
new = Context({k: v for k, v in source._context.items() if k in keys})
new._deprecation_replacements = source._deprecation_replacements.copy()
return new
def lazy_mapping_from_context(source: Context) -> Mapping[str, Any]:
"""Create a mapping that wraps deprecated entries in a lazy object proxy.
This further delays deprecation warning to until when the entry is actually
used, instead of when it's accessed in the context. The result is useful for
passing into a callable with ``**kwargs``, which would unpack the mapping
too eagerly otherwise.
This is implemented as a free function because the ``Context`` type is
"faked" as a ``TypedDict`` in ``context.pyi``, which cannot have custom
functions.
:meta private:
"""
def _deprecated_proxy_factory(k: str, v: Any) -> Any:
replacements = source._deprecation_replacements[k]
warnings.warn(_create_deprecation_warning(k, replacements))
return v
def _create_value(k: str, v: Any) -> Any:
if k not in source._deprecation_replacements:
return v
factory = functools.partial(_deprecated_proxy_factory, k, v)
return lazy_object_proxy.Proxy(factory)
return {k: _create_value(k, v) for k, v in source._context.items()}
|
the-stack_106_24696 | import functools
from typing import Callable
__all__ = ('Achievement',)
class Achievement:
"""A class to represent a single osu! achievement."""
__slots__ = ('id', 'file', 'name', 'desc', 'cond')
def __init__(self, id: int, file: str, name: str,
desc: str, cond: Callable) -> None:
self.id = id
self.file = file
self.name = name
self.desc = desc
self.cond = cond
@functools.cache
def __repr__(self) -> str:
return f'{self.file}+{self.name}+{self.desc}'
|
the-stack_106_24698 | """Support for Iperf3 network measurement tool."""
from __future__ import annotations
from datetime import timedelta
import logging
import iperf3
import voluptuous as vol
from homeassistant.components.sensor import (
DOMAIN as SENSOR_DOMAIN,
SensorEntityDescription,
)
from homeassistant.const import (
CONF_HOST,
CONF_HOSTS,
CONF_MONITORED_CONDITIONS,
CONF_PORT,
CONF_PROTOCOL,
CONF_SCAN_INTERVAL,
DATA_RATE_MEGABITS_PER_SECOND,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.helpers.dispatcher import dispatcher_send
from homeassistant.helpers.event import async_track_time_interval
DOMAIN = "iperf3"
DATA_UPDATED = f"{DOMAIN}_data_updated"
_LOGGER = logging.getLogger(__name__)
CONF_DURATION = "duration"
CONF_PARALLEL = "parallel"
CONF_MANUAL = "manual"
DEFAULT_DURATION = 10
DEFAULT_PORT = 5201
DEFAULT_PARALLEL = 1
DEFAULT_PROTOCOL = "tcp"
DEFAULT_INTERVAL = timedelta(minutes=60)
ATTR_DOWNLOAD = "download"
ATTR_UPLOAD = "upload"
ATTR_VERSION = "Version"
ATTR_HOST = "host"
SENSOR_TYPES: tuple[SensorEntityDescription, ...] = (
SensorEntityDescription(
key=ATTR_DOWNLOAD,
name=ATTR_DOWNLOAD.capitalize(),
native_unit_of_measurement=DATA_RATE_MEGABITS_PER_SECOND,
),
SensorEntityDescription(
key=ATTR_UPLOAD,
name=ATTR_UPLOAD.capitalize(),
native_unit_of_measurement=DATA_RATE_MEGABITS_PER_SECOND,
),
)
SENSOR_KEYS: list[str] = [desc.key for desc in SENSOR_TYPES]
PROTOCOLS = ["tcp", "udp"]
HOST_CONFIG_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_DURATION, default=DEFAULT_DURATION): vol.Range(5, 10),
vol.Optional(CONF_PARALLEL, default=DEFAULT_PARALLEL): vol.Range(1, 20),
vol.Optional(CONF_PROTOCOL, default=DEFAULT_PROTOCOL): vol.In(PROTOCOLS),
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_HOSTS): vol.All(cv.ensure_list, [HOST_CONFIG_SCHEMA]),
vol.Optional(CONF_MONITORED_CONDITIONS, default=SENSOR_KEYS): vol.All(
cv.ensure_list, [vol.In(SENSOR_KEYS)]
),
vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_INTERVAL): vol.All(
cv.time_period, cv.positive_timedelta
),
vol.Optional(CONF_MANUAL, default=False): cv.boolean,
}
)
},
extra=vol.ALLOW_EXTRA,
)
SERVICE_SCHEMA = vol.Schema({vol.Optional(ATTR_HOST, default=None): cv.string})
async def async_setup(hass, config):
"""Set up the iperf3 component."""
hass.data[DOMAIN] = {}
conf = config[DOMAIN]
for host in conf[CONF_HOSTS]:
data = hass.data[DOMAIN][host[CONF_HOST]] = Iperf3Data(hass, host)
if not conf[CONF_MANUAL]:
async_track_time_interval(hass, data.update, conf[CONF_SCAN_INTERVAL])
def update(call):
"""Service call to manually update the data."""
called_host = call.data[ATTR_HOST]
if called_host in hass.data[DOMAIN]:
hass.data[DOMAIN][called_host].update()
else:
for iperf3_host in hass.data[DOMAIN].values():
iperf3_host.update()
hass.services.async_register(DOMAIN, "speedtest", update, schema=SERVICE_SCHEMA)
hass.async_create_task(
async_load_platform(
hass, SENSOR_DOMAIN, DOMAIN, conf[CONF_MONITORED_CONDITIONS], config
)
)
return True
class Iperf3Data:
"""Get the latest data from iperf3."""
def __init__(self, hass, host):
"""Initialize the data object."""
self._hass = hass
self._host = host
self.data = {ATTR_DOWNLOAD: None, ATTR_UPLOAD: None, ATTR_VERSION: None}
def create_client(self):
"""Create a new iperf3 client to use for measurement."""
client = iperf3.Client()
client.duration = self._host[CONF_DURATION]
client.server_hostname = self._host[CONF_HOST]
client.port = self._host[CONF_PORT]
client.num_streams = self._host[CONF_PARALLEL]
client.protocol = self._host[CONF_PROTOCOL]
client.verbose = False
return client
@property
def protocol(self):
"""Return the protocol used for this connection."""
return self._host[CONF_PROTOCOL]
@property
def host(self):
"""Return the host connected to."""
return self._host[CONF_HOST]
@property
def port(self):
"""Return the port on the host connected to."""
return self._host[CONF_PORT]
def update(self, now=None):
"""Get the latest data from iperf3."""
if self.protocol == "udp":
# UDP only have 1 way attribute
result = self._run_test(ATTR_DOWNLOAD)
self.data[ATTR_DOWNLOAD] = self.data[ATTR_UPLOAD] = getattr(
result, "Mbps", None
)
self.data[ATTR_VERSION] = getattr(result, "version", None)
else:
result = self._run_test(ATTR_DOWNLOAD)
self.data[ATTR_DOWNLOAD] = getattr(result, "received_Mbps", None)
self.data[ATTR_VERSION] = getattr(result, "version", None)
self.data[ATTR_UPLOAD] = getattr(
self._run_test(ATTR_UPLOAD), "sent_Mbps", None
)
dispatcher_send(self._hass, DATA_UPDATED, self.host)
def _run_test(self, test_type):
"""Run and return the iperf3 data."""
client = self.create_client()
client.reverse = test_type == ATTR_DOWNLOAD
try:
result = client.run()
except (AttributeError, OSError, ValueError) as error:
_LOGGER.error("Iperf3 error: %s", error)
return None
if result is not None and hasattr(result, "error") and result.error is not None:
_LOGGER.error("Iperf3 error: %s", result.error)
return None
return result
|
the-stack_106_24700 | import pandas as pd
TITLE_NAME = "Wrong data"
SOURCE_NAME = "wrong_data"
LABELS = ["Scout",
"Team",
"Match",
"Alliance",
"Double outtakes",
"Wrong auto line",
"Wrong climb"]
def get_rows(manager):
tracked_data_types = ['Tele intake',
'Tele scale',
'Tele exchange',
'Tele opponent switch',
'Tele alliance switch']
outtakes = ['Tele scale',
'Tele exchange',
'Tele opponent switch',
'Tele alliance switch']
if manager.tba_available:
matches = manager.tba.event_matches(manager.tba_event)
else:
matches = None
for entry in manager.entries:
if not entry.board.alliance() == "N":
time_series = [None for _ in range(150)]
for data_type in tracked_data_types:
for occurrence_time in entry.look(data_type):
time_series[occurrence_time - 1] = data_type
has_cube = False
first_outtake_ignored = False
double_outtakes = 0
for event in time_series:
if not first_outtake_ignored:
if event in outtakes:
first_outtake_ignored = True
else:
if event in outtakes:
if not has_cube:
double_outtakes += 1
has_cube = False
if event == "Tele intake":
has_cube = True
if matches is not None:
match_key = str(manager.tba_event) + "_qm" + str(entry.match)
if entry.board.alliance().lower() == "r":
alliance = "red"
elif entry.board.alliance().lower() == "b":
alliance = "blue"
else:
alliance = "unknown"
tba_match = None
for match in matches:
if match['key'] == match_key:
tba_match = match
alliance_teams = tba_match['alliances'][alliance]["team_keys"]
if "frc" + str(entry.team) in alliance_teams:
tba_robot_number = alliance_teams.index("frc" + str(entry.team)) + 1
else:
continue
tba_climbed = tba_match['score_breakdown'][alliance][
"endgameRobot" + str(tba_robot_number)] == "Climbing"
tba_auto_line = tba_match['score_breakdown'][alliance][
"autoRobot" + str(tba_robot_number)] == "AutoRun"
yield {"Scout": entry.name,
"Team": entry.team,
"Match": entry.match,
"Alliance": entry.board.alliance(),
"Double outtakes": double_outtakes,
"Wrong auto line": not (entry.final_value("Auto line", default=0) == 1) == tba_auto_line,
"Wrong climb": not (entry.final_value("Climb", default=0) == 2) == tba_climbed}
else:
yield {"Scout": entry.name,
"Team": entry.team,
"Match": entry.match,
"Alliance": entry.board.alliance(),
"Double outtakes": double_outtakes,
"Wrong auto line": "",
"Wrong climb": ""}
def compute_table(manager):
table = pd.DataFrame(get_rows(manager))[LABELS]
return table
|
the-stack_106_24701 | # -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749
~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for consuming OAuth 2.0 RFC6749.
"""
import time
import warnings
from oauthlib.common import generate_token
from oauthlib.oauth2.rfc6749 import tokens
from oauthlib.oauth2.rfc6749.errors import (InsecureTransportError,
TokenExpiredError)
from oauthlib.oauth2.rfc6749.parameters import (parse_token_response,
prepare_token_request,
prepare_token_revocation_request)
from oauthlib.oauth2.rfc6749.utils import is_secure_transport
AUTH_HEADER = 'auth_header'
URI_QUERY = 'query'
BODY = 'body'
FORM_ENC_HEADERS = {
'Content-Type': 'application/x-www-form-urlencoded'
}
class Client:
"""Base OAuth2 client responsible for access token management.
This class also acts as a generic interface providing methods common to all
client types such as ``prepare_authorization_request`` and
``prepare_token_revocation_request``. The ``prepare_x_request`` methods are
the recommended way of interacting with clients (as opposed to the abstract
prepare uri/body/etc methods). They are recommended over the older set
because they are easier to use (more consistent) and add a few additional
security checks, such as HTTPS and state checking.
Some of these methods require further implementation only provided by the
specific purpose clients such as
:py:class:`oauthlib.oauth2.MobileApplicationClient` and thus you should always
seek to use the client class matching the OAuth workflow you need. For
Python, this is usually :py:class:`oauthlib.oauth2.WebApplicationClient`.
"""
refresh_token_key = 'refresh_token'
def __init__(self, client_id,
default_token_placement=AUTH_HEADER,
token_type='Bearer',
access_token=None,
refresh_token=None,
mac_key=None,
mac_algorithm=None,
token=None,
scope=None,
state=None,
redirect_url=None,
state_generator=generate_token,
**kwargs):
"""Initialize a client with commonly used attributes.
:param client_id: Client identifier given by the OAuth provider upon
registration.
:param default_token_placement: Tokens can be supplied in the Authorization
header (default), the URL query component (``query``) or the request
body (``body``).
:param token_type: OAuth 2 token type. Defaults to Bearer. Change this
if you specify the ``access_token`` parameter and know it is of a
different token type, such as a MAC, JWT or SAML token. Can
also be supplied as ``token_type`` inside the ``token`` dict parameter.
:param access_token: An access token (string) used to authenticate
requests to protected resources. Can also be supplied inside the
``token`` dict parameter.
:param refresh_token: A refresh token (string) used to refresh expired
tokens. Can also be supplied inside the ``token`` dict parameter.
:param mac_key: Encryption key used with MAC tokens.
:param mac_algorithm: Hashing algorithm for MAC tokens.
:param token: A dict of token attributes such as ``access_token``,
``token_type`` and ``expires_at``.
:param scope: A list of default scopes to request authorization for.
:param state: A CSRF protection string used during authorization.
:param redirect_url: The redirection endpoint on the client side to which
the user returns after authorization.
:param state_generator: A no argument state generation callable. Defaults
to :py:meth:`oauthlib.common.generate_token`.
"""
self.client_id = client_id
self.default_token_placement = default_token_placement
self.token_type = token_type
self.access_token = access_token
self.refresh_token = refresh_token
self.mac_key = mac_key
self.mac_algorithm = mac_algorithm
self.token = token or {}
self.scope = scope
self.state_generator = state_generator
self.state = state
self.redirect_url = redirect_url
self.code = None
self.expires_in = None
self._expires_at = None
self.populate_token_attributes(self.token)
@property
def token_types(self):
"""Supported token types and their respective methods
Additional tokens can be supported by extending this dictionary.
The Bearer token spec is stable and safe to use.
The MAC token spec is not yet stable and support for MAC tokens
is experimental and currently matching version 00 of the spec.
"""
return {
'Bearer': self._add_bearer_token,
'MAC': self._add_mac_token
}
def prepare_request_uri(self, *args, **kwargs):
"""Abstract method used to create request URIs."""
raise NotImplementedError("Must be implemented by inheriting classes.")
def prepare_request_body(self, *args, **kwargs):
"""Abstract method used to create request bodies."""
raise NotImplementedError("Must be implemented by inheriting classes.")
def parse_request_uri_response(self, *args, **kwargs):
"""Abstract method used to parse redirection responses."""
raise NotImplementedError("Must be implemented by inheriting classes.")
def add_token(self, uri, http_method='GET', body=None, headers=None,
token_placement=None, **kwargs):
"""Add token to the request uri, body or authorization header.
The access token type provides the client with the information
required to successfully utilize the access token to make a protected
resource request (along with type-specific attributes). The client
MUST NOT use an access token if it does not understand the token
type.
For example, the "bearer" token type defined in
[`I-D.ietf-oauth-v2-bearer`_] is utilized by simply including the access
token string in the request:
.. code-block:: http
GET /resource/1 HTTP/1.1
Host: example.com
Authorization: Bearer mF_9.B5f-4.1JqM
while the "mac" token type defined in [`I-D.ietf-oauth-v2-http-mac`_] is
utilized by issuing a MAC key together with the access token which is
used to sign certain components of the HTTP requests:
.. code-block:: http
GET /resource/1 HTTP/1.1
Host: example.com
Authorization: MAC id="h480djs93hd8",
nonce="274312:dj83hs9s",
mac="kDZvddkndxvhGRXZhvuDjEWhGeE="
.. _`I-D.ietf-oauth-v2-bearer`: https://tools.ietf.org/html/rfc6749#section-12.2
.. _`I-D.ietf-oauth-v2-http-mac`: https://tools.ietf.org/html/rfc6749#section-12.2
"""
if not is_secure_transport(uri):
raise InsecureTransportError()
token_placement = token_placement or self.default_token_placement
case_insensitive_token_types = {
k.lower(): v for k, v in self.token_types.items()}
if not self.token_type.lower() in case_insensitive_token_types:
raise ValueError("Unsupported token type: %s" % self.token_type)
if not (self.access_token or self.token.get('access_token')):
raise ValueError("Missing access token.")
if self._expires_at and self._expires_at < time.time():
raise TokenExpiredError()
return case_insensitive_token_types[self.token_type.lower()](uri, http_method, body,
headers, token_placement, **kwargs)
def prepare_authorization_request(self, authorization_url, state=None,
redirect_url=None, scope=None, **kwargs):
"""Prepare the authorization request.
This is the first step in many OAuth flows in which the user is
redirected to a certain authorization URL. This method adds
required parameters to the authorization URL.
:param authorization_url: Provider authorization endpoint URL.
:param state: CSRF protection string. Will be automatically created if
not provided. The generated state is available via the ``state``
attribute. Clients should verify that the state is unchanged and
present in the authorization response. This verification is done
automatically if using the ``authorization_response`` parameter
with ``prepare_token_request``.
:param redirect_url: Redirect URL to which the user will be returned
after authorization. Must be provided unless previously setup with
the provider. If provided then it must also be provided in the
token request.
:param scope:
:param kwargs: Additional parameters to included in the request.
:returns: The prepared request tuple with (url, headers, body).
"""
if not is_secure_transport(authorization_url):
raise InsecureTransportError()
self.state = state or self.state_generator()
self.redirect_url = redirect_url or self.redirect_url
self.scope = scope or self.scope
auth_url = self.prepare_request_uri(
authorization_url, redirect_uri=self.redirect_url,
scope=self.scope, state=self.state, **kwargs)
return auth_url, FORM_ENC_HEADERS, ''
def prepare_token_request(self, token_url, authorization_response=None,
redirect_url=None, state=None, body='', **kwargs):
"""Prepare a token creation request.
Note that these requests usually require client authentication, either
by including client_id or a set of provider specific authentication
credentials.
:param token_url: Provider token creation endpoint URL.
:param authorization_response: The full redirection URL string, i.e.
the location to which the user was redirected after successfull
authorization. Used to mine credentials needed to obtain a token
in this step, such as authorization code.
:param redirect_url: The redirect_url supplied with the authorization
request (if there was one).
:param state:
:param body: Existing request body (URL encoded string) to embed parameters
into. This may contain extra paramters. Default ''.
:param kwargs: Additional parameters to included in the request.
:returns: The prepared request tuple with (url, headers, body).
"""
if not is_secure_transport(token_url):
raise InsecureTransportError()
state = state or self.state
if authorization_response:
self.parse_request_uri_response(
authorization_response, state=state)
self.redirect_url = redirect_url or self.redirect_url
body = self.prepare_request_body(body=body,
redirect_uri=self.redirect_url, **kwargs)
return token_url, FORM_ENC_HEADERS, body
def prepare_refresh_token_request(self, token_url, refresh_token=None,
body='', scope=None, **kwargs):
"""Prepare an access token refresh request.
Expired access tokens can be replaced by new access tokens without
going through the OAuth dance if the client obtained a refresh token.
This refresh token and authentication credentials can be used to
obtain a new access token, and possibly a new refresh token.
:param token_url: Provider token refresh endpoint URL.
:param refresh_token: Refresh token string.
:param body: Existing request body (URL encoded string) to embed parameters
into. This may contain extra paramters. Default ''.
:param scope: List of scopes to request. Must be equal to
or a subset of the scopes granted when obtaining the refresh
token.
:param kwargs: Additional parameters to included in the request.
:returns: The prepared request tuple with (url, headers, body).
"""
if not is_secure_transport(token_url):
raise InsecureTransportError()
self.scope = scope or self.scope
body = self.prepare_refresh_body(body=body,
refresh_token=refresh_token, scope=self.scope, **kwargs)
return token_url, FORM_ENC_HEADERS, body
def prepare_token_revocation_request(self, revocation_url, token,
token_type_hint="access_token", body='', callback=None, **kwargs):
"""Prepare a token revocation request.
:param revocation_url: Provider token revocation endpoint URL.
:param token: The access or refresh token to be revoked (string).
:param token_type_hint: ``"access_token"`` (default) or
``"refresh_token"``. This is optional and if you wish to not pass it you
must provide ``token_type_hint=None``.
:param body:
:param callback: A jsonp callback such as ``package.callback`` to be invoked
upon receiving the response. Not that it should not include a () suffix.
:param kwargs: Additional parameters to included in the request.
:returns: The prepared request tuple with (url, headers, body).
Note that JSONP request may use GET requests as the parameters will
be added to the request URL query as opposed to the request body.
An example of a revocation request
.. code-block: http
POST /revoke HTTP/1.1
Host: server.example.com
Content-Type: application/x-www-form-urlencoded
Authorization: Basic czZCaGRSa3F0MzpnWDFmQmF0M2JW
token=45ghiukldjahdnhzdauz&token_type_hint=refresh_token
An example of a jsonp revocation request
.. code-block: http
GET /revoke?token=agabcdefddddafdd&callback=package.myCallback HTTP/1.1
Host: server.example.com
Content-Type: application/x-www-form-urlencoded
Authorization: Basic czZCaGRSa3F0MzpnWDFmQmF0M2JW
and an error response
.. code-block: http
package.myCallback({"error":"unsupported_token_type"});
Note that these requests usually require client credentials, client_id in
the case for public clients and provider specific authentication
credentials for confidential clients.
"""
if not is_secure_transport(revocation_url):
raise InsecureTransportError()
return prepare_token_revocation_request(revocation_url, token,
token_type_hint=token_type_hint, body=body, callback=callback,
**kwargs)
def parse_request_body_response(self, body, scope=None, **kwargs):
"""Parse the JSON response body.
If the access token request is valid and authorized, the
authorization server issues an access token as described in
`Section 5.1`_. A refresh token SHOULD NOT be included. If the request
failed client authentication or is invalid, the authorization server
returns an error response as described in `Section 5.2`_.
:param body: The response body from the token request.
:param scope: Scopes originally requested.
:return: Dictionary of token parameters.
:raises: Warning if scope has changed. OAuth2Error if response is invalid.
These response are json encoded and could easily be parsed without
the assistance of OAuthLib. However, there are a few subtle issues
to be aware of regarding the response which are helpfully addressed
through the raising of various errors.
A successful response should always contain
**access_token**
The access token issued by the authorization server. Often
a random string.
**token_type**
The type of the token issued as described in `Section 7.1`_.
Commonly ``Bearer``.
While it is not mandated it is recommended that the provider include
**expires_in**
The lifetime in seconds of the access token. For
example, the value "3600" denotes that the access token will
expire in one hour from the time the response was generated.
If omitted, the authorization server SHOULD provide the
expiration time via other means or document the default value.
**scope**
Providers may supply this in all responses but are required to only
if it has changed since the authorization request.
.. _`Section 5.1`: https://tools.ietf.org/html/rfc6749#section-5.1
.. _`Section 5.2`: https://tools.ietf.org/html/rfc6749#section-5.2
.. _`Section 7.1`: https://tools.ietf.org/html/rfc6749#section-7.1
"""
self.token = parse_token_response(body, scope=scope)
self.populate_token_attributes(self.token)
return self.token
def prepare_refresh_body(self, body='', refresh_token=None, scope=None, **kwargs):
"""Prepare an access token request, using a refresh token.
If the authorization server issued a refresh token to the client, the
client makes a refresh request to the token endpoint by adding the
following parameters using the "application/x-www-form-urlencoded"
format in the HTTP request entity-body:
grant_type
REQUIRED. Value MUST be set to "refresh_token".
refresh_token
REQUIRED. The refresh token issued to the client.
scope
OPTIONAL. The scope of the access request as described by
Section 3.3. The requested scope MUST NOT include any scope
not originally granted by the resource owner, and if omitted is
treated as equal to the scope originally granted by the
resource owner.
"""
refresh_token = refresh_token or self.refresh_token
return prepare_token_request(self.refresh_token_key, body=body, scope=scope,
refresh_token=refresh_token, **kwargs)
def _add_bearer_token(self, uri, http_method='GET', body=None,
headers=None, token_placement=None):
"""Add a bearer token to the request uri, body or authorization header."""
if token_placement == AUTH_HEADER:
headers = tokens.prepare_bearer_headers(self.access_token, headers)
elif token_placement == URI_QUERY:
uri = tokens.prepare_bearer_uri(self.access_token, uri)
elif token_placement == BODY:
body = tokens.prepare_bearer_body(self.access_token, body)
else:
raise ValueError("Invalid token placement.")
return uri, headers, body
def _add_mac_token(self, uri, http_method='GET', body=None,
headers=None, token_placement=AUTH_HEADER, ext=None, **kwargs):
"""Add a MAC token to the request authorization header.
Warning: MAC token support is experimental as the spec is not yet stable.
"""
if token_placement != AUTH_HEADER:
raise ValueError("Invalid token placement.")
headers = tokens.prepare_mac_header(self.access_token, uri,
self.mac_key, http_method, headers=headers, body=body, ext=ext,
hash_algorithm=self.mac_algorithm, **kwargs)
return uri, headers, body
def _populate_attributes(self, response):
warnings.warn("Please switch to the public method "
"populate_token_attributes.", DeprecationWarning)
return self.populate_token_attributes(response)
def populate_code_attributes(self, response):
"""Add attributes from an auth code response to self."""
if 'code' in response:
self.code = response.get('code')
def populate_token_attributes(self, response):
"""Add attributes from a token exchange response to self."""
if 'access_token' in response:
self.access_token = response.get('access_token')
if 'refresh_token' in response:
self.refresh_token = response.get('refresh_token')
if 'token_type' in response:
self.token_type = response.get('token_type')
if 'expires_in' in response:
self.expires_in = response.get('expires_in')
self._expires_at = time.time() + int(self.expires_in)
if 'expires_at' in response:
self._expires_at = int(response.get('expires_at'))
if 'mac_key' in response:
self.mac_key = response.get('mac_key')
if 'mac_algorithm' in response:
self.mac_algorithm = response.get('mac_algorithm')
|
the-stack_106_24702 | # TG-UserBot - A modular Telegram UserBot script for Python.
# Copyright (C) 2019 Kandarp <https://github.com/kandnub>
#
# TG-UserBot is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# TG-UserBot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with TG-UserBot. If not, see <https://www.gnu.org/licenses/>.
# This was purely based on https://github.com/ezdev128/telethon-session-redis/
# since it hasn't been updated for a while now and missed a few things.
import logging
import redis
from telethon.crypto import AuthKey
from telethon.sessions import MemorySession
"""
from telethon import utils
from telethon.sessions.memory import _SentFileType
from telethon.tl import types
"""
LOGGER = logging.getLogger(__name__)
class RedisSession(MemorySession):
"""Session to store the authentication information in Redis.
The entities and files are cached in memory instead of Redis.
"""
def __init__(self, session_name=None, redis_connection=None):
if not isinstance(session_name, (str, bytes)):
raise TypeError("Session name must be a string or bytes.")
if (not redis_connection
or not isinstance(redis_connection, redis.Redis)):
raise TypeError(
'The given redis_connection must be a Redis instance.')
super().__init__()
self._dc_id: int = 0
self._server_address: str = None
self._port: int = None
self._auth_key: AuthKey = None
self._takeout_id = None
self.session_name = (session_name if isinstance(session_name, str) else
session_name.decode())
self.redis_connection = redis_connection
self.sess_prefix = "telethon:session:{}".format(self.session_name)
self.feed_session()
self._files = {}
self._entities = set()
self._update_states = {}
def feed_session(self):
try:
s = self._get_sessions()
if len(s) == 0:
return
s = self.redis_connection.hgetall(s[-1])
if not s:
return
self._dc_id = int(s.get(b'dc_id').decode())
self._server_address = s.get(b'server_address').decode()
self._port = int(s.get(b'port').decode())
self._takeout_id = (s.get(b'takeout_id').decode() if s.get(
b'takeout_id', False) else None)
if s.get(b'auth_key', False):
self._auth_key = AuthKey(s.get(b'auth_key'))
except Exception as ex:
LOGGER.exception(ex.args)
def _get_sessions(self, strip_prefix=False):
key_pattern = "{}:auth".format(self.sess_prefix)
try:
sessions = self.redis_connection.keys(key_pattern + '*')
return [
s.decode().replace(key_pattern, '')
if strip_prefix else s.decode() for s in sessions
]
except Exception as ex:
LOGGER.exception(ex.args)
return []
def _update_sessions(self):
if not self._dc_id:
return
auth_key = self._auth_key.key if self._auth_key else bytes()
s = {
'dc_id': self._dc_id,
'server_address': self._server_address,
'port': self._port,
'auth_key': auth_key,
'takeout_id': self.takeout_id or b''
}
key = "{}:auth".format(self.sess_prefix)
try:
self.redis_connection.hmset(key, s)
except Exception as ex:
LOGGER.exception(ex.args)
def set_dc(self, dc_id, server_address, port):
super().set_dc(dc_id, server_address, port)
self._update_sessions()
auth_key = bytes()
if not self._dc_id:
self._auth_key = AuthKey(data=auth_key)
return
key_pattern = "{}:auth".format(self.sess_prefix)
s = self.redis_connection.hgetall(key_pattern)
if s:
auth_key = s.get(b'auth_key') or auth_key
self._auth_key = AuthKey(s.get(auth_key))
@property
def auth_key(self):
return self._auth_key
@auth_key.setter
def auth_key(self, value):
self._auth_key = value
self._update_sessions()
@property
def takeout_id(self):
return self._takeout_id
@takeout_id.setter
def takeout_id(self, value):
self._takeout_id = value
self._update_sessions()
def delete(self):
keys = self.redis_connection.keys(f"{self.sess_prefix}*")
self.redis_connection.delete(*keys)
def get_update_state(self, entity_id):
key_pattern = "{}:update_states:{}".format(self.sess_prefix, entity_id)
return self.redis_connection.get(key_pattern)
def set_update_state(self, entity_id, state):
key_pattern = "{}:update_states:{}".format(self.sess_prefix, entity_id)
self.redis_connection.set(key_pattern, state)
def _get_entities(self, strip_prefix=False):
key_pattern = "{}:entities:".format(self.sess_prefix)
try:
entities = self.redis_connection.keys(key_pattern + "*")
if not strip_prefix:
return entities
return [s.decode().replace(key_pattern, "") for s in entities]
except Exception as ex:
LOGGER.exception(ex.args)
return []
def process_entities(self, tlo):
rows = self._entities_to_rows(tlo)
if not rows or len(rows) == 0 or len(rows[0]) == 0:
return
try:
for row in rows:
key = "{}:entities:{}".format(self.sess_prefix, row[0])
s = {
"id": row[0],
"hash": row[1],
"username": row[2] or b'',
"phone": row[3] or b'',
"name": row[4] or b'',
}
self.redis_connection.hmset(key, s)
except Exception as ex:
LOGGER.exception(ex.args)
def get_entity_rows_by_phone(self, phone):
try:
for key in self._get_entities():
entity = self.redis_connection.hgetall(key)
p = (
entity.get(b'phone').decode()
if entity.get(b'phone') else
None
)
if p and p == phone:
return (
int(entity.get(b'id').deocde()),
int(entity.get(b'hash').decode())
)
except Exception as ex:
LOGGER.exception(ex.args)
return None
def get_entity_rows_by_username(self, username):
try:
for key in self._get_entities():
entity = self.redis_connection.hgetall(key)
u = (
entity.get(b'username').decode()
if entity.get(b'username') else
None
)
if u and u == username:
return (
int(entity.get(b'id').deocde()),
int(entity.get(b'hash').decode())
)
except Exception as ex:
LOGGER.exception(ex.args)
return None
def get_entity_rows_by_name(self, name):
try:
for key in self._get_entities():
entity = self.redis_connection.hgetall(key)
n = (
entity.get(b'name').decode()
if entity.get(b'name') else
None
)
if n and n == name:
return (
int(entity.get(b'id').deocde()),
int(entity.get(b'hash').decode())
)
except Exception as ex:
LOGGER.exception(ex.args)
return None
def get_entity_rows_by_id(self, id, exact=True):
if exact:
key = "{}:entities:{}".format(self.sess_prefix, id)
s = self.redis_connection.hgetall(key)
if not s:
return None
try:
return id, int(s.get(b'hash').decode())
except Exception as ex:
LOGGER.exception(ex.args)
return None
else:
ids = (
utils.get_peer_id(types.PeerUser(id)),
utils.get_peer_id(types.PeerChat(id)),
utils.get_peer_id(types.PeerChannel(id))
)
try:
for key in self._get_entities():
entity = self.redis_connection.hgetall(key)
ID = entity.get(b'id').decode()
if ID and ID in ids:
return ID, int(entity.get(b'hash').decode())
except Exception as ex:
LOGGER.exception(ex.args)
def cache_file(self, md5_digest, file_size, instance):
if not isinstance(instance, (types.InputDocument, types.InputPhoto)):
raise TypeError('Cannot cache %s instance' % type(instance))
key = "{}:sent_files:{}".format(self.sess_prefix, md5_digest)
s = {
'md5_digest': md5_digest,
'file_size': file_size,
'type': _SentFileType.from_type(type(instance)),
'id': instance.id,
'access_hash': instance.access_hash,
}
try:
self.redis_connection.hmset(key, s)
except Exception as ex:
LOGGER.exception(ex.args)
def get_file(self, md5_digest, file_size, cls):
key = "{}:sent_files:{}".format(self.sess_prefix, md5_digest)
s = self.redis_connection.hgetall(key)
if s:
try:
if (
s.get(b'md5_digest').decode() == md5_digest and
s.get(b'file_size').decode() == file_size and
s.get(b'type').decode() == _SentFileType.from_type(cls)
):
return (cls(
s.get(b'id').decode(),
s.get(b'access_hash').decode()
))
except Exception as ex:
LOGGER.exception(ex.args)
return None
|
the-stack_106_24703 | def insertionSort(lista):
#Para cada elemento de la lista la rrecoremos hasta encontrar su posición donde es mayor que (i-1) y menor que (i+1)
for i in range(1,len(lista)):
lugar = i
valor = lista[i]
while lugar>0 and lista[lugar-1]>valor:
lista[lugar]=lista[lugar-1]
lugar = lugar-1
lista[lugar] = valor
#Imprime la lista en pantalla.
print(lista)
#Regresamos la lista por que la vamos a utilizar en otras funciones
return(lista)
#Tomamos los valores de los numeros del arreglo y los ordenamos con insertionSort
def insertionSorts(n):
lista = []
x = 1
while (x!=-1):
x = int(input("Escribe un número: "))
if(x==-1):
break
else:
lista.append(x)
return(insertionSort(lista))
insertionSorts(2)
|
the-stack_106_24704 | import unittest
import itertools
import numpy
from six import moves
import chainer
from chainer.backends import cuda
from chainer import initializers
from chainer import links
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'initial_cost': ['random', None],
'transpose': [True, False],
}))
class TestCRF1d(unittest.TestCase):
def _calc_score(self, batch, ys):
cost = self.link.cost.array
return sum(x[batch, y] for x, y in zip(self.xs, ys)) + \
sum(cost[y1, y2] for y1, y2 in zip(ys[:-1], ys[1:]))
def _crf1d(self, cost_data, xs_data, ys_data):
z = numpy.zeros((self.batches[0],), numpy.float32)
for b, length in enumerate(self.lengths):
for ys in itertools.product(range(self.n_label), repeat=length):
z[b] += numpy.exp(chainer.cuda.to_cpu(self._calc_score(b, ys)))
score = numpy.zeros((self.batches[0],), numpy.float32)
for b, length in enumerate(self.lengths):
ys = [self.ys[i][b] for i in range(length)]
score[b] = self._calc_score(b, ys)
loss = -(score - numpy.log(z))
return numpy.sum(loss) / self.batches[0]
def setUp(self):
self._config_user = chainer.using_config('dtype', self.dtype)
self._config_user.__enter__()
self.n_label = 3
self.lengths = [3, 3]
self.batches = [2, 2, 2]
self.xs = [numpy.random.uniform(-1, 1, (b, 3)).astype(self.dtype)
for b in self.batches]
self.ys = [numpy.random.randint(
0, self.n_label, (b,)).astype(numpy.int32)
for b in self.batches]
self.link = links.CRF1d(n_label=self.n_label)
self.cost_shape = (self.n_label, self.n_label)
if self.dtype == numpy.float16:
self.check_forward_options = {'rtol': 5e-3, 'atol': 1e-2}
else:
self.check_forward_options = {'atol': 1e-4}
def tearDown(self):
self._config_user.__exit__(None, None, None)
def check_forward(self, x_data, t_data):
if self.transpose:
# Make transposed arrays manually
xs = [self.link.xp.empty((l, 3), dtype=self.dtype)
for l in self.lengths]
ts = [self.link.xp.empty((l,), dtype=numpy.int32)
for l in self.lengths]
for i, batch in enumerate(self.batches):
for j in moves.range(batch):
xs[j][i] = x_data[i][j]
ts[j][i] = t_data[i][j]
else:
xs = x_data
ts = t_data
x = self.link(xs, ts, transpose=self.transpose)
t = self._crf1d(self.link.cost.array, x_data, t_data)
testing.assert_allclose(x.array, t,
**self.check_forward_options)
def test_forward_cpu(self):
self.check_forward(self.xs, self.ys)
@attr.gpu
def test_forward_gpu(self):
self.link.to_gpu()
self.check_forward(cuda.to_gpu(self.xs), cuda.to_gpu(self.ys))
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'initializer': ['random', None]
}))
class TestInitialization(unittest.TestCase):
def setUp(self):
self.n_label = 3
self.initial_cost = numpy.empty((self.n_label, self.n_label),
dtype=self.dtype)
if self.initializer is None:
initializer = initializers.constant.Zero()
elif self.initializer == 'random':
initializer = initializers.GlorotUniform()
initializer(self.initial_cost)
with chainer.using_config('dtype', self.dtype):
self.link = links.CRF1d(self.n_label,
initial_cost=self.initial_cost)
def check_param(self):
link = self.link
dtype = self.dtype
assert link.cost.dtype == dtype
testing.assert_allclose(link.cost.array,
self.initial_cost,
atol=0, rtol=0)
def test_param_cpu(self):
self.check_param()
@attr.gpu
def test_param_gpu(self):
self.link.to_gpu()
self.check_param()
testing.run_module(__name__, __file__)
|
the-stack_106_24706 | import logging
import requests
from django.http import Http404, HttpResponse, HttpResponseBadRequest, HttpResponseForbidden, JsonResponse
from django.shortcuts import get_object_or_404
from django.urls import reverse
from django.utils.cache import patch_response_headers
from django.views.generic import View
from giscube.tilecache.caches import GiscubeServiceCache
from giscube.tilecache.image import tile_cache_image
from giscube.tilecache.proj import GoogleProjection
from giscube.utils import get_service_wms_bbox
from giscube.views_mixins import WMSProxyViewMixin
from giscube.views_utils import web_map_view
from .models import Service
logger = logging.getLogger(__name__)
class ImageServerWMSView(WMSProxyViewMixin):
def get(self, request, service_name):
service = get_object_or_404(Service, name=service_name, active=True)
self.service = service
if service.visibility == 'private' and not request.user.is_authenticated:
return HttpResponseForbidden()
response = super().get(request)
headers = getattr(response, '_headers', {})
if 'content-type' in headers:
content_type = None
try:
content_type = headers.get('content-type')[1]
except Exception:
pass
if content_type == 'application/vnd.ogc.wms_xml; charset=UTF-8':
response['Content-Type'] = 'text/xml; charset=UTF-8'
return response
def do_post(self, request, service_name):
service = get_object_or_404(Service, name=service_name, active=True)
if service.visibility == 'private' and not request.user.is_authenticated:
return HttpResponseForbidden()
url = self.build_url(request)
return requests.post(url, data=request.body)
def build_url(self, request):
meta = request.META.get('QUERY_STRING', '')
url = "%s&%s" % (self.service.service_internal_url, meta)
return url
class ImageServerTileCacheView(View):
def get(self, request, service_name):
service = get_object_or_404(Service, name=service_name, active=True)
if service.visibility == 'private' and not request.user.is_authenticated:
return HttpResponseForbidden()
data = {}
if service.tilecache_enabled:
data.update(
{
'bbox': service.tilecache_bbox,
'min_zoom': service.tilecache_minzoom_level,
'max_zoom': service.tilecache_maxzoom_level
}
)
return JsonResponse(data)
class ImageServerTileCacheTilesView(View):
def build_url(self, service):
return service.service_internal_url
def get(self, request, service_name, z, x, y, image_format='png'):
service = get_object_or_404(Service, name=service_name, active=True)
if service.visibility == 'private' and not request.user.is_authenticated:
return HttpResponseForbidden()
if not service.tilecache_enabled:
raise Http404
if z < service.tilecache_minzoom_level or z > service.tilecache_maxzoom_level:
return HttpResponseBadRequest()
bbox = self.tile2bbox(z, x, y)
tile_options = {
'url': self.build_url(service),
'layers': ','.join(service.servicelayer_set.all().values_list('layer__name', flat=True)),
'xyz': [z, x, y],
'bbox': bbox,
'srs': 'EPSG:3857'
}
buffer = [0, 0]
cache = GiscubeServiceCache(service)
image = tile_cache_image(tile_options, buffer, cache)
response = HttpResponse(image, content_type='image/%s' % image_format)
patch_response_headers(response, cache_timeout=60 * 60 * 24 * 7)
response.status_code = 200
return response
def tile2bbox(self, z, x, y):
proj = GoogleProjection(256, [z])
bbox = proj.tile_bbox((z, x, y))
return proj.project(bbox[:2]) + proj.project(bbox[2:])
class ImageServerMapViewerView(View):
def get(self, request, service_name):
service = get_object_or_404(Service, name=service_name, active=True)
if service.visibility == 'private' and not request.user.is_authenticated:
return HttpResponseForbidden()
layers = []
layers.append(
{
'name': '%s (WMS)' % (service.title or service.name),
'type': 'wms',
'layers': service.default_layer,
'url': reverse('imageserver', args=(service.name, '',))
}
)
if service.tilecache_enabled:
layers.append(
{
'name': '%s (Tile Cache)' % (service.title or service.name),
'type': 'tile',
'url': '%s{z}/{x}/{y}.png' % reverse('imageserver-tilecache', args=(service.name,))
}
)
extra_context = {
'title': service.title or service.name,
'layers': layers
}
bbox = get_service_wms_bbox(service.service_internal_url)
if bbox:
extra_context['bbox'] = list(bbox)
return web_map_view(request, extra_context)
|
the-stack_106_24707 | import dragonfly as df
from srabuilder import rules
import title_menu, menu_utils, server, df_utils, game, container_menu, objective, server, constants
CARPENTER_MENU = 'carpenterMenu'
async def get_carpenter_menu():
return await menu_utils.get_active_menu(CARPENTER_MENU)
async def click_button(name):
menu = await get_carpenter_menu()
await menu_utils.click_component(menu[name])
async def move_cursor_tile(direction, amount):
await game.move_mouse_in_direction(direction, amount * 64)
mapping = {
"pan <direction_keys>": objective.objective_action(objective.HoldKeyObjective, "direction_keys"),
"previous": df_utils.async_action(click_button, "backButton"),
"cancel": df_utils.async_action(click_button, "cancelButton"),
"demolish [buildings]": df_utils.async_action(click_button, "demolishButton"),
"next": df_utils.async_action(click_button, "forwardButton"),
"move [buildings]": df_utils.async_action(click_button, "moveButton"),
"build": df_utils.async_action(click_button, "okButton"),
"paint": df_utils.async_action(click_button, "paintButton"),
"upgrade": df_utils.async_action(click_button, "upgradeIcon"),
}
@menu_utils.valid_menu_test
def is_active():
game.get_context_menu(CARPENTER_MENU)
def load_grammar():
grammar = df.Grammar("carpenter_menu")
main_rule = df.MappingRule(
name="carpenter_menu_rule",
mapping=mapping,
extras=[
df.Choice("direction_keys", game.direction_keys),
df.Choice("direction_nums", game.direction_nums),
rules.num,
df_utils.positive_index,
df_utils.positive_num
],
context=is_active,
defaults={'positive_num': 1},
)
grammar.add_rule(main_rule)
grammar.load()
|
the-stack_106_24711 | """
This file is part of the Semantic Quality Benchmark for Word Embeddings Tool in Python (SeaQuBe).
Copyright (c) 2021 by Benjamin Manns
:author: Benjamin Manns
"""
import time
from os.path import join, basename, dirname
import unittest
from seaqube.benchmark.corpus4ir import WordCentroidSimilarityBenchmark
from seaqube.benchmark.semantic_wordnet import SemanticWordnetBenchmark
from seaqube.benchmark.wordanalogy import WordAnalogyBenchmark
from seaqube.benchmark.wordoutliers import WordOutliersBenchmark
from seaqube.benchmark.wordsimilarity import WordSimilarityBenchmark
from seaqube.nlp.seaqube_model import SeaQuBeNLPLoader, SeaQuBeCompressLoader
from seaqube.nlp.tools import tokenize_corpus
from seaqube.tools.io import load_json
from gensim.models import FastText
from seaqube.nlp.seaqube_model import BaseModelWrapper
from seaqube.nlp.tools import gensim_we_model_to_custom_we_model
from gensim.models.callbacks import CallbackAny2Vec
from progressbar import ProgressBar
class GensimEpochLogger(CallbackAny2Vec):
'''Callback to log information about training'''
def __init__(self, epochs):
self.epoch = 0
self.bar = ProgressBar(max_value=epochs)
def on_train_begin(self, model):
pass
def on_epoch_begin(self, model):
pass
def on_epoch_end(self, model):
self.epoch += 1
self.bar.update(self.epoch)
class BaseFTGensimModel(BaseModelWrapper):
def get_config(self):
return dict(sg=self.model.sg, cbow_mean=self.model.cbow_mean, size=self.model.vector_size,
alpha=self.model.alpha, min_alpha=self.model.min_alpha, min_n=self.model.wv.min_n,
max_n=self.model.wv.max_n, window=self.model.window, min_count=self.model.vocabulary.min_count,
sample=self.model.vocabulary.sample, negative=self.model.negative, workers=self.model.workers,
class_name=str(self))
def _wrap_nlp_model(self, model):
return gensim_we_model_to_custom_we_model(model)
class SmallFTModel(BaseFTGensimModel):
def define_epochs(self):
return 1000
def define_model(self):
return FastText(sg=1, cbow_mean=1, size=200, alpha=0.025, min_alpha=0.0001, min_n=1, max_n=6,
window=5, min_count=1, sample=0.001, negative=5, workers=self.cpus - 1,
callbacks=[GensimEpochLogger(self.epochs)])
def define_training(self):
self.model.build_vocab(sentences=self.data, update=False)
self.model.train(sentences=self.data, total_examples=len(self.data), epochs=self.epochs)
## preparation, needs to be done not so often:
# SeaQuBeCompressLoader.save_model_compressed(model.get(), 'small_copressed_model.dill')
def load_corpus():
return tokenize_corpus(load_json(join(dirname(__file__), 'test_data', 'small_corpus_01.json')))
def model_path():
return join(dirname(__file__), 'test_data', 'small_copressed_model.dill')
class TestWordSimilarityBenchmark(unittest.TestCase):
def test_simple_benchmark(self):
# need to load a simple model, i.e. small dataset
#model = SmallModel()
#model.process(load_corpus())
#nlp = SeaQuBeNLPLoader.load_model_from_tin_can(model.get(), 'small_model')
nlp = SeaQuBeCompressLoader.load_compressed_model(model_path(), 'test_model')
test_sets = ['semeval17', 'yp-130', 'mturk-771', 'verb-143', 'rg-65', 'simlex999', 'rw', 'simverb-3500', 'wordsim353-rel', 'men', 'mturk-287', 'mc-30', 'wordsim353-sim']
scores = [0.0, 0.0, 0.0, 0.0, 0.0, 0.4959, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
print("vocab", nlp.model.vocabs())
for i, test_set in enumerate(test_sets):
simi_bench = WordSimilarityBenchmark(test_set)
res = simi_bench(nlp.model)
print(test_set, "result = ", res)
self.assertAlmostEqual(res.score, scores[i], delta=0.1)
class TestWordAnalogyBenchmark(unittest.TestCase):
def test_multiproc(self):
nlp = SeaQuBeCompressLoader.load_compressed_model(model_path(), 'test_model')
simi_bench = WordAnalogyBenchmark('google-analogies', max_cpus=10, multiprocessing=True)
res = simi_bench(nlp.model)
print("res = ", res)
self.assertAlmostEqual(res.score, 0.0, delta=0.01)
def test_nearest_neighbors(self):
nlp = SeaQuBeCompressLoader.load_compressed_model(model_path(), 'test_model')
test_set = 'google-analogies'
mm = "NearestNeighbors"
start = time.time()
simi_bench = WordAnalogyBenchmark(test_set, method=mm, multiprocessing=False)
res = simi_bench(nlp.model)
end = time.time()
print(test_set, "result = ", res, "time = ", end-start)
self.assertAlmostEqual(res.score, 0.5, delta=0.01)
def test_simple_benchmark(self):
# need to load a simple model, i.e. small dataset
#model = SmallModel()
#model.process(load_corpus())
#nlp = SeaQuBeNLPLoader.load_model_from_tin_can(model.get(), 'small_model')
nlp = SeaQuBeCompressLoader.load_compressed_model(model_path(), 'test_model')
scores = {
"semeval":{
'3CosAdd': 0.0,
'VectorCalc': 0.0,
'PairDir': 0.0,
'SpaceEvolution': 0.0,
'NearestNeighbors': 0.0
},
"google-analogies": {
'3CosAdd': 0.0,
'VectorCalc': 0.0,
'PairDir': 0.0,
'SpaceEvolution': 0.0,
'NearestNeighbors': 0.5
},
"sat": {
'3CosAdd': 0.0,
'VectorCalc': 0.0,
'PairDir': 0.0,
'SpaceEvolution': 0.0,
'NearestNeighbors': 0.0
},
"msr": {
'3CosAdd': 0.0,
'VectorCalc': 0.0,
'PairDir': 0.0,
'SpaceEvolution': 0.0,
'NearestNeighbors': 0.0
},
"jair": {
'3CosAdd': 0.0,
'VectorCalc': 0.0,
'PairDir': 0.0,
'SpaceEvolution': 0.0,
'NearestNeighbors': 0.0
},
}
for test_set in ['semeval', 'google-analogies', 'sat', 'msr', 'jair']:
for i, mm in enumerate(['3CosAdd', 'VectorCalc', 'PairDir', 'SpaceEvolution', 'NearestNeighbors']):
start = time.time()
simi_bench = WordAnalogyBenchmark(test_set, method=mm, multiprocessing=True)
res = simi_bench(nlp.model)
end = time.time()
print(test_set, "result = ", res.score, scores[test_set][mm], mm, "time=", end-start)
self.assertAlmostEqual(res.score, scores[test_set][mm], delta=0.01)
class TestWordOutlierBenchmark(unittest.TestCase):
def test_simple_benchmark(self):
nlp = SeaQuBeCompressLoader.load_compressed_model(model_path(), 'test_model')
for test_set in ['8-8-8', 'wikisem500']:
simi_bench = WordOutliersBenchmark(test_set)
res = simi_bench(nlp.model)
print(test_set, "result = ", res)
self.assertAlmostEqual(res.score, 0.0, delta=0.01)
class TestSemanticWordnet(unittest.TestCase):
def test_with_wordnet(self):
nlp = SeaQuBeCompressLoader.load_compressed_model(model_path(), 'test_model')
vocabs = nlp.vocab()
word_pairs, length = SemanticWordnetBenchmark.word_pairs_from_vocab_list(vocabs)
print("Pairs Length:", length)
self.assertEqual(length, 1378) # this is predictable, except model/corpus changes
swb = SemanticWordnetBenchmark(word_pairs, False, 2)
self.assertEqual(length, 1378)
results = swb(model=nlp.model).to_dict()
self.assertEqual(type(results), dict)
self.assertAlmostEquals(results["score"], 0.182, delta=0.2) # this is not predictable, ...
class TestWordCentroidSimilarityBenchmark(unittest.TestCase):
def test_simple_benchmark(self):
# need to load a simple model, i.e. small dataset
#model = SmallModel()
#model.process(load_corpus())
#nlp = SeaQuBeNLPLoader.load_model_from_tin_can(model.get(), 'small_model')
nlp = SeaQuBeCompressLoader.load_compressed_model(model_path(), 'test_model')
bench_corpus = WordCentroidSimilarityBenchmark(load_corpus())
res = bench_corpus(nlp.model)
self.assertAlmostEqual(res.score, 0.295066, delta=0.01)
if __name__ == "__main__":
unittest.main()
|
the-stack_106_24712 |
from flask import request, make_response, jsonify
from cerberus import Validator
from core.transactions import Transaction
from lib.request import is_json
from lib.db import session
from .. import blueprint
@blueprint.route('/', methods=['GET'])
def get_transactions():
delivered = request.args.get('delivered', 'true') == 'true'
query = session.query(Transaction)
if delivered is not None:
query = query.filter(Transaction.delivered == delivered)
transactions = query.all()
payload = [transaction.to_json() for transaction in transactions]
return make_response(jsonify(payload), 200)
@blueprint.route('/<uuid:code>/', methods=['GET'])
def get_sale(code):
transaction = Transaction.get(code)
if transaction is None:
return make_response(jsonify({'errors': ['No transaction found']}), 404)
payload = transaction.to_json()
return make_response(jsonify(payload), 200)
@blueprint.route('/', methods=['POST'])
@is_json
def save_transaction():
validator = Validator(Transaction.schema)
payload = request.get_json()
if not validator.validate(payload):
return make_response(jsonify(validator.errors), 400)
transaction = Transaction.from_json(payload)
session.add(transaction)
session.commit()
session.flush()
return make_response(jsonify(transaction.to_json()), 201)
@blueprint.route('/<uuid:code>/', methods=['PUT'])
@is_json
def update_transaction(code):
validator = Validator(Transaction.schema)
payload = request.get_json()
if not validator.validate(payload):
return make_response(jsonify(validator.errors), 400)
transaction = Transaction.get(code)
transaction = Transaction.from_json(payload, transaction)
session.add(transaction)
session.commit()
session.flush()
return make_response(jsonify(transaction.to_json()), 200)
@blueprint.route('/<uuid:code>/deliver/', methods=['PUT'])
def deliver_transaction(code):
transaction = Transaction.get(code)
transaction.delivered = True
session.add(transaction)
session.commit()
session.flush()
return make_response(jsonify(transaction.to_json()), 200)
|
the-stack_106_24713 | # 3p
import rediscluster
import wrapt
# project
from ...pin import Pin
from ...ext import AppTypes, redis as redisx
from ...utils.wrappers import unwrap
from ..redis.patch import traced_execute_command, traced_pipeline
from ..redis.util import format_command_args
def patch():
"""Patch the instrumented methods
"""
if getattr(rediscluster, '_datadog_patch', False):
return
setattr(rediscluster, '_datadog_patch', True)
_w = wrapt.wrap_function_wrapper
_w('rediscluster', 'StrictRedisCluster.execute_command', traced_execute_command)
_w('rediscluster', 'StrictRedisCluster.pipeline', traced_pipeline)
_w('rediscluster', 'StrictClusterPipeline.execute', traced_execute_pipeline)
Pin(service=redisx.DEFAULT_SERVICE, app=redisx.APP, app_type=AppTypes.db).onto(rediscluster.StrictRedisCluster)
def unpatch():
if getattr(rediscluster, '_datadog_patch', False):
setattr(rediscluster, '_datadog_patch', False)
unwrap(rediscluster.StrictRedisCluster, 'execute_command')
unwrap(rediscluster.StrictRedisCluster, 'pipeline')
unwrap(rediscluster.StrictClusterPipeline, 'execute')
#
# tracing functions
#
def traced_execute_pipeline(func, instance, args, kwargs):
pin = Pin.get_from(instance)
if not pin or not pin.enabled():
return func(*args, **kwargs)
cmds = [format_command_args(c.args) for c in instance.command_stack]
resource = '\n'.join(cmds)
tracer = pin.tracer
with tracer.trace(redisx.CMD, resource=resource, service=pin.service) as s:
s.span_type = redisx.TYPE
s.set_tag(redisx.RAWCMD, resource)
s.set_metric(redisx.PIPELINE_LEN, len(instance.command_stack))
return func(*args, **kwargs)
|
the-stack_106_24715 | import os
import math
from affine import Affine
import pytest
import numpy as np
from distancerasters import DistanceRaster
from distancerasters.utils import calc_haversine_distance
@pytest.fixture
def example_raster_array():
arr = [[0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]
return np.array(arr)
@pytest.fixture
def example_affine():
return Affine(1.5, 1.0, 0.0, 1.0, 1.5, 1.0)
@pytest.fixture
def example_path():
return "tests/testdata/build_distance_array_raster"
def test_build_distance_array(example_raster_array, example_affine, example_path):
built_array = DistanceRaster(example_raster_array).dist_array
# [1][1] was 1 in the passed array
# The distance should be 0
assert built_array[1][1] == 0
# [1][2] was a single unit away from two 1 elements
# This distance should be 1
assert built_array[1][2] == 1
assert built_array[1][3] == math.sqrt(2)
assert built_array[0][3] == math.sqrt(5)
def test_build_distance_array_output(example_raster_array, example_affine, example_path):
# Delete any previous test export, if it exists
if os.path.isfile(example_path):
os.remove(example_path)
built_array = DistanceRaster(
example_raster_array, affine=example_affine, output_path=example_path
)
# export_raster should have been called, and wrote output to example_path
# perhaps we should more rigorously check if the exported raster is correct?
assert os.path.exists(example_path)
def test_build_distance_array_with_affine(example_raster_array, example_affine):
built_array = DistanceRaster(example_raster_array, affine=example_affine).dist_array
# Testing the same array elements as the last function did
assert built_array[1][1] == 0
assert built_array[0][1] == 1.5 * 111.321 * 1000
assert (
built_array[1][3]
== calc_haversine_distance((5.25, -1.25), (3.75, -2.75)) * 1000
)
assert (
built_array[0][3] == calc_haversine_distance((5.25, 0.25), (2.25, -1.25)) * 1000
)
def test_bad_build_distance_array(example_raster_array):
with pytest.raises(TypeError):
# Pass DistanceRaster a 2D list
DistanceRaster([[0, 1], [1, 0]])
with pytest.raises(TypeError):
# Pass DistanceRaster a bad affine
DistanceRaster(example_raster_array, affine="not_an_affine")
# perhaps this should be a more specific type of exception?
with pytest.raises(Exception):
# Pass DistanceRaster an output without an affine
DistanceRaster(example_raster_array, output_path="just_output")
# perhaps this should be a more specific type of exception?
with pytest.raises(Exception):
# Pass DistanceRaster an uncallable conditional
DistanceRaster(example_raster_array, conditional="not_a_function")
|
the-stack_106_24717 | from __future__ import division
from pylab import *
from mandelbulb import mandelbulb, pow3d, biaxial_julia, pow_quaternion_inplace, buddhabulb
from shapes import tetrahedron, cube, merkaba
from util import generate_mesh_slices, threaded_anti_alias
from density import illuminate_and_absorb
import numpy as np
from threading import Thread, Lock
def make_picture_frame(rgb, dither=1.0/256.0):
if dither:
rgb = [channel + random(channel.shape)*dither for channel in rgb]
frame = stack(rgb, axis=-1)
frame = clip(frame, 0.0, 1.0)
return frame
if __name__ == '__main__':
scale = 10
depth = 1<<8
theta = 0.5
phi = 0.2
gamma = 0.1
zoom = 0
anti_aliasing = 1
width = 108*scale
height = 108*scale
num_samples = 1 << 32
du = 1.0 / depth
sample_scale = (scale*anti_aliasing)**2 * (num_samples * 2**-20)**-1
def cgen(cx, cy, cz, w):
def gen(n):
x = randn(n)*w + cx
y = randn(n)*w + cy
z = randn(n)*w + cz
return x, y, z
return gen
w = 0.2
exposures = [(9, 10, cgen(0.5, 0.92, 0.15, w)), (10, 11, cgen(0.5, 0.95, 0.2, w)), (11, 12, cgen(0.5, 0.9, 0.1, w))]
def color_map(exposures):
e = exposures[0][0]*0.0
result = array([e, e, e])
result[0] = 0.04
result[1] = 0.05
result[2] = 0.045
for k in range(depth):
a = exposures[0][k] * sample_scale
b = exposures[1][k] * sample_scale
c = exposures[2][k] * sample_scale
illumination = array([0.9*a*a, 0.5*a**1.5, 0.4*a])*0.7
illumination += array([0.2*b, 0.6*b, 0.2*sqrt(b)])
illumination += array([0.3*c, 0.7*sqrt(c), 0.8*c**0.75])
result += 3*illumination*du
absorption = array([a*0.8, a*0.3, a*0.4])
absorption += array([b*0.7, b*0.8, b*0.2])
absorption += array([c*0.5, c*0.4, c*0.9])
result *= exp(-2*absorption*du)
return result
image = buddhabulb(width, height, depth, 0.5, 0.7, 0, zoom, theta, phi, gamma, 8, 8, 8, num_samples, exposures, color_map, anti_aliasing=anti_aliasing)
imsave("/tmp/buddhabulb.png", make_picture_frame(image))
|
the-stack_106_24719 | # -*- coding: utf-8 -*-
"""
Created on Sun Jun 23 16:07:41 2019
@author: idswx
"""
import numpy as np
class attackerConstraint:
"""
Create a class of attacker with constraint
"""
def __init__(self, i, j, d, m, n):
"""
Arguements: (i,j) - attacker is at location (i, j)
d - allowed distance to move. Attacker can move within range (i-d,j-d) to (i+d,j+d)
(m,n) 2-D space A.shape = (m,n)
"""
self.i = i
self.j = j
self.d = d
self.m = m
self.n = n
def allowed_moves(self):
i2D,j2D = np.unravel_index(self.j, (26, 34))[0], np.unravel_index(self.j, (26, 34))[1]
r = [[x, y] for x in range(i2D-self.d, i2D+self.d+1) \
for y in range(j2D-self.d, j2D+self.d+1) \
if ((x >= 0) & (x < self.m) & (y >= 0) & (y < self.n))]
# print(r)
x, y = np.array(r).transpose()
# print("self ijdmn: ", self.i, self.j, self.d, self.m, self.n)
# print("x range: ", self.i-self.d, self.i+self.d+1)
# print("y range: ", self.j-self.d, self.j+self.d+1)
# print("x: ", x)
# print("y: ", y)
# print("r is : ", r)
x, y = np.array(r).transpose()
return x, y
def strategyGUASconstraint(self, B):
"""
Run GUAS with constraint
Arguments: B-a matrix(if 2 dim) of pmf
Return: Attacker's belief by GUAS in the grid of allowed moves
"""
x, y = self.allowed_moves()
ravel_xy = np.ravel_multi_index(np.array([x, y]), (self.m,self.n))
#print("x: ", x)
#print("y: ", y)
#return y[np.argmax(B[x,y])]
#return np.ravel_multi_index([x, y], (self.m,self.n))
return ravel_xy[np.argmax(B[0,ravel_xy])] |
the-stack_106_24720 | # -*- coding: utf-8 -*-
"""
requests.adapters
~~~~~~~~~~~~~~~~~
This module contains the transport adapters that Requests uses to define
and maintain connections.
"""
import os.path
import socket
from urllib3.poolmanager import PoolManager, proxy_from_url
from urllib3.util import parse_url
from urllib3.util import Timeout as TimeoutSauce
from urllib3.util.retry import Retry
from urllib3.exceptions import ClosedPoolError
from urllib3.exceptions import ConnectTimeoutError
from urllib3.exceptions import HTTPError as _HTTPError
from urllib3.exceptions import MaxRetryError
from urllib3.exceptions import NewConnectionError
from urllib3.exceptions import ProxyError as _ProxyError
from urllib3.exceptions import ProtocolError
from urllib3.exceptions import ReadTimeoutError
from urllib3.exceptions import SSLError as _SSLError
from urllib3.exceptions import ResponseError
from urllib3.exceptions import LocationValueError
from .models import Response
from .compat import urlparse, basestring
from .utils import (DEFAULT_CA_BUNDLE_PATH, extract_zipped_paths,
get_encoding_from_headers, prepend_scheme_if_needed,
get_auth_from_url, urldefragauth, select_proxy)
from .structures import CaseInsensitiveDict
from .cookies import extract_cookies_to_jar
from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError,
ProxyError, RetryError, InvalidSchema, InvalidProxyURL,
InvalidURL)
from .auth import _basic_auth_str
try:
from urllib3.contrib.socks import SOCKSProxyManager
except ImportError:
def SOCKSProxyManager(*args, **kwargs):
raise InvalidSchema("Missing dependencies for SOCKS support.")
DEFAULT_POOLBLOCK = False
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
DEFAULT_POOL_TIMEOUT = None
class BaseAdapter(object):
"""The Base Transport Adapter"""
def __init__(self):
super(BaseAdapter, self).__init__()
def send(self, request, stream=False, timeout=None, verify=True,
cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
raise NotImplementedError
def close(self):
"""Cleans up adapter specific items."""
raise NotImplementedError
class HTTPAdapter(BaseAdapter):
"""The built-in HTTP Adapter for urllib3.
Provides a general-case interface for Requests sessions to contact HTTP and
HTTPS urls by implementing the Transport Adapter interface. This class will
usually be created by the :class:`Session <Session>` class under the
covers.
:param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool.
:param max_retries: The maximum number of retries each connection
should attempt. Note, this applies only to failed DNS lookups, socket
connections and connection timeouts, never to requests where data has
made it to the server. By default, Requests does not retry failed
connections. If you need granular control over the conditions under
which we retry a request, import urllib3's ``Retry`` class and pass
that instead.
:param pool_block: Whether the connection pool should block for connections.
Usage::
>>> import requests
>>> s = requests.Session()
>>> a = requests.adapters.HTTPAdapter(max_retries=3)
>>> s.mount('http://', a)
"""
__attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
'_pool_block']
Response = Response
def __init__(self, pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK):
if max_retries == DEFAULT_RETRIES:
self.max_retries = Retry(0, read=False)
else:
self.max_retries = Retry.from_int(max_retries)
self.config = {}
self.proxy_manager = {}
super(HTTPAdapter, self).__init__()
self._pool_connections = pool_connections
self._pool_maxsize = pool_maxsize
self._pool_block = pool_block
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
def __getstate__(self):
return {attr: getattr(self, attr, None) for attr in self.__attrs__}
def __setstate__(self, state):
# Can't handle by adding 'proxy_manager' to self.__attrs__ because
# self.poolmanager uses a lambda function, which isn't pickleable.
self.proxy_manager = {}
self.config = {}
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager(self._pool_connections, self._pool_maxsize,
block=self._pool_block)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs):
"""Initializes a urllib3 PoolManager.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
:param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
"""
# save these values for pickling
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block, strict=True, **pool_kwargs)
def proxy_manager_for(self, proxy, **proxy_kwargs):
"""Return urllib3 ProxyManager for the given proxy.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The proxy to return a urllib3 ProxyManager for.
:param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
:returns: ProxyManager
:rtype: urllib3.ProxyManager
"""
if proxy in self.proxy_manager:
manager = self.proxy_manager[proxy]
elif proxy.lower().startswith('socks'):
username, password = get_auth_from_url(proxy)
manager = self.proxy_manager[proxy] = SOCKSProxyManager(
proxy,
username=username,
password=password,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs
)
else:
proxy_headers = self.proxy_headers(proxy)
manager = self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs)
return manager
def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use
:param cert: The SSL certificate to verify.
"""
if url.lower().startswith('https') and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = extract_zipped_paths(DEFAULT_CA_BUNDLE_PATH)
if not cert_loc or not os.path.exists(cert_loc):
raise IOError("Could not find a suitable TLS CA certificate bundle, "
"invalid path: {}".format(cert_loc))
conn.cert_reqs = 'CERT_REQUIRED'
if not os.path.isdir(cert_loc):
conn.ca_certs = cert_loc
else:
conn.ca_cert_dir = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
conn.ca_cert_dir = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
conn.key_file = None
if conn.cert_file and not os.path.exists(conn.cert_file):
raise IOError("Could not find the TLS certificate file, "
"invalid path: {}".format(conn.cert_file))
if conn.key_file and not os.path.exists(conn.key_file):
raise IOError("Could not find the TLS key file, "
"invalid path: {}".format(conn.key_file))
def build_response(self, req, resp):
"""Builds a :class:`Response <requests.Response>` object from a urllib3
response. This should not be called from user code, and is only exposed
for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
:param resp: The urllib3 response object.
:rtype: requests.Response
"""
response = self.Response()
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
# Add new cookies from the server.
extract_cookies_to_jar(response.cookies, req, resp)
# Give the Response some context.
response.request = req
response.connection = self
return response
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
:rtype: urllib3.ConnectionPool
"""
proxy = select_proxy(url, proxies)
if proxy:
proxy = prepend_scheme_if_needed(proxy, 'http')
proxy_url = parse_url(proxy)
if not proxy_url.host:
raise InvalidProxyURL("Please check proxy URL. It is malformed"
" and could be missing the host.")
proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn
def close(self):
"""Disposes of any internal state.
Currently, this closes the PoolManager and any active ProxyManager,
which closes any pooled connections.
"""
self.poolmanager.clear()
for proxy in self.proxy_manager.values():
proxy.clear()
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs.
:rtype: str
"""
proxy = select_proxy(request.url, proxies)
scheme = urlparse(request.url).scheme
is_proxied_http_request = (proxy and scheme != 'https')
using_socks_proxy = False
if proxy:
proxy_scheme = urlparse(proxy).scheme.lower()
using_socks_proxy = proxy_scheme.startswith('socks')
url = request.path_url
if is_proxied_http_request and not using_socks_proxy:
url = urldefragauth(request.url)
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. As of v2.0 this does
nothing by default, but is left for overriding by users that subclass
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
pass
def proxy_headers(self, proxy):
"""Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The url of the proxy being used for this request.
:rtype: dict
"""
headers = {}
username, password = get_auth_from_url(proxy)
if username:
headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
return headers
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple or urllib3 Timeout object
:param verify: (optional) Either a boolean, in which case it controls whether
we verify the server's TLS certificate, or a string, in which case it
must be a path to a CA bundle to use
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
:rtype: requests.Response
"""
try:
conn = self.get_connection(request.url, proxies)
except LocationValueError as e:
raise InvalidURL(e, request=request)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies)
chunked = not (request.body is None or 'Content-Length' in request.headers)
if isinstance(timeout, tuple):
try:
connect, read = timeout
timeout = TimeoutSauce(connect=connect, read=read)
except ValueError as e:
# this may raise a string formatting error.
err = ("Invalid timeout {}. Pass a (connect, read) "
"timeout tuple, or a single float to set "
"both timeouts to the same value".format(timeout))
raise ValueError(err)
elif isinstance(timeout, TimeoutSauce):
pass
else:
timeout = TimeoutSauce(connect=timeout, read=timeout)
try:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=self.max_retries,
timeout=timeout,
chunked=chunked
)
except (ProtocolError, socket.error) as err:
raise ConnectionError(err, request=request)
except MaxRetryError as e:
if isinstance(e.reason, ConnectTimeoutError):
# TODO: Remove this in 3.0.0: see #2811
if not isinstance(e.reason, NewConnectionError):
raise ConnectTimeout(e, request=request)
if isinstance(e.reason, ResponseError):
raise RetryError(e, request=request)
if isinstance(e.reason, _ProxyError):
raise ProxyError(e, request=request)
if isinstance(e.reason, _SSLError):
# This branch is for urllib3 v1.22 and later.
raise SSLError(e, request=request)
raise ConnectionError(e, request=request)
except ClosedPoolError as e:
raise ConnectionError(e, request=request)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
# This branch is for urllib3 versions earlier than v1.22
raise SSLError(e, request=request)
elif isinstance(e, ReadTimeoutError):
raise ReadTimeout(e, request=request)
else:
raise
return self.build_response(request, resp)
|
the-stack_106_24723 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The CenterNet meta architecture as described in the "Objects as Points" paper [1].
[1]: https://arxiv.org/abs/1904.07850
"""
import abc
import collections
import functools
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow.compat.v2 as tf2
from object_detection.core import box_list
from object_detection.core import box_list_ops
from object_detection.core import keypoint_ops
from object_detection.core import model
from object_detection.core import standard_fields as fields
from object_detection.core import target_assigner as cn_assigner
from object_detection.utils import shape_utils
from object_detection.utils import target_assigner_utils as ta_utils
from object_detection.utils import tf_version
# Number of channels needed to predict size and offsets.
NUM_OFFSET_CHANNELS = 2
NUM_SIZE_CHANNELS = 2
# Error range for detecting peaks.
PEAK_EPSILON = 1e-6
class CenterNetFeatureExtractor(tf.keras.Model):
"""Base class for feature extractors for the CenterNet meta architecture.
Child classes are expected to override the _output_model property which will
return 1 or more tensors predicted by the feature extractor.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, name=None, channel_means=(0., 0., 0.),
channel_stds=(1., 1., 1.), bgr_ordering=False):
"""Initializes a CenterNet feature extractor.
Args:
name: str, the name used for the underlying keras model.
channel_means: A tuple of floats, denoting the mean of each channel
which will be subtracted from it. If None or empty, we use 0s.
channel_stds: A tuple of floats, denoting the standard deviation of each
channel. Each channel will be divided by its standard deviation value.
If None or empty, we use 1s.
bgr_ordering: bool, if set will change the channel ordering to be in the
[blue, red, green] order.
"""
super(CenterNetFeatureExtractor, self).__init__(name=name)
if channel_means is None or len(channel_means) == 0: # pylint:disable=g-explicit-length-test
channel_means = [0., 0., 0.]
if channel_stds is None or len(channel_stds) == 0: # pylint:disable=g-explicit-length-test
channel_stds = [1., 1., 1.]
self._channel_means = channel_means
self._channel_stds = channel_stds
self._bgr_ordering = bgr_ordering
def preprocess(self, inputs):
"""Converts a batch of unscaled images to a scale suitable for the model.
This method normalizes the image using the given `channel_means` and
`channels_stds` values at initialization time while optionally flipping
the channel order if `bgr_ordering` is set.
Args:
inputs: a [batch, height, width, channels] float32 tensor
Returns:
outputs: a [batch, height, width, channels] float32 tensor
"""
if self._bgr_ordering:
red, green, blue = tf.unstack(inputs, axis=3)
inputs = tf.stack([blue, green, red], axis=3)
channel_means = tf.reshape(tf.constant(self._channel_means),
[1, 1, 1, -1])
channel_stds = tf.reshape(tf.constant(self._channel_stds),
[1, 1, 1, -1])
return (inputs - channel_means)/channel_stds
@property
@abc.abstractmethod
def out_stride(self):
"""The stride in the output image of the network."""
pass
@property
@abc.abstractmethod
def num_feature_outputs(self):
"""Ther number of feature outputs returned by the feature extractor."""
pass
@property
@abc.abstractmethod
def supported_sub_model_types(self):
"""Valid sub model types supported by the get_sub_model function."""
pass
@abc.abstractmethod
def get_sub_model(self, sub_model_type):
"""Returns the underlying keras model for the given sub_model_type.
This function is useful when we only want to get a subset of weights to
be restored from a checkpoint.
Args:
sub_model_type: string, the type of sub model. Currently, CenterNet
feature extractors support 'detection' and 'classification'.
"""
pass
def make_prediction_net(num_out_channels, kernel_size=3, num_filters=256,
bias_fill=None, use_depthwise=False, name=None):
"""Creates a network to predict the given number of output channels.
This function is intended to make the prediction heads for the CenterNet
meta architecture.
Args:
num_out_channels: Number of output channels.
kernel_size: The size of the conv kernel in the intermediate layer
num_filters: The number of filters in the intermediate conv layer.
bias_fill: If not None, is used to initialize the bias in the final conv
layer.
use_depthwise: If true, use SeparableConv2D to construct the Sequential
layers instead of Conv2D.
name: Optional name for the prediction net.
Returns:
net: A keras module which when called on an input tensor of size
[batch_size, height, width, num_in_channels] returns an output
of size [batch_size, height, width, num_out_channels]
"""
if use_depthwise:
conv_fn = tf.keras.layers.SeparableConv2D
else:
conv_fn = tf.keras.layers.Conv2D
# We name the convolution operations explicitly because Keras, by default,
# uses different names during training and evaluation. By setting the names
# here, we avoid unexpected pipeline breakage in TF1.
out_conv = tf.keras.layers.Conv2D(
num_out_channels,
kernel_size=1,
name='conv1' if tf_version.is_tf1() else None)
if bias_fill is not None:
out_conv.bias_initializer = tf.keras.initializers.constant(bias_fill)
net = tf.keras.Sequential([
conv_fn(
num_filters,
kernel_size=kernel_size,
padding='same',
name='conv2' if tf_version.is_tf1() else None),
tf.keras.layers.ReLU(), out_conv
],
name=name)
return net
def _to_float32(x):
return tf.cast(x, tf.float32)
def _get_shape(tensor, num_dims):
assert len(tensor.shape.as_list()) == num_dims
return shape_utils.combined_static_and_dynamic_shape(tensor)
def _flatten_spatial_dimensions(batch_images):
batch_size, height, width, channels = _get_shape(batch_images, 4)
return tf.reshape(batch_images, [batch_size, height * width,
channels])
def _multi_range(limit,
value_repetitions=1,
range_repetitions=1,
dtype=tf.int32):
"""Creates a sequence with optional value duplication and range repetition.
As an example (see the Args section for more details),
_multi_range(limit=2, value_repetitions=3, range_repetitions=4) returns:
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]
Args:
limit: A 0-D Tensor (scalar). Upper limit of sequence, exclusive.
value_repetitions: Integer. The number of times a value in the sequence is
repeated. With value_repetitions=3, the result is [0, 0, 0, 1, 1, 1, ..].
range_repetitions: Integer. The number of times the range is repeated. With
range_repetitions=3, the result is [0, 1, 2, .., 0, 1, 2, ..].
dtype: The type of the elements of the resulting tensor.
Returns:
A 1-D tensor of type `dtype` and size
[`limit` * `value_repetitions` * `range_repetitions`] that contains the
specified range with given repetitions.
"""
return tf.reshape(
tf.tile(
tf.expand_dims(tf.range(limit, dtype=dtype), axis=-1),
multiples=[range_repetitions, value_repetitions]), [-1])
def top_k_feature_map_locations(feature_map, max_pool_kernel_size=3, k=100,
per_channel=False):
"""Returns the top k scores and their locations in a feature map.
Given a feature map, the top k values (based on activation) are returned. If
`per_channel` is True, the top k values **per channel** are returned. Note
that when k equals to 1, ths function uses reduce_max and argmax instead of
top_k to make the logics more efficient.
The `max_pool_kernel_size` argument allows for selecting local peaks in a
region. This filtering is done per channel, so nothing prevents two values at
the same location to be returned.
Args:
feature_map: [batch, height, width, channels] float32 feature map.
max_pool_kernel_size: integer, the max pool kernel size to use to pull off
peak score locations in a neighborhood (independently for each channel).
For example, to make sure no two neighboring values (in the same channel)
are returned, set max_pool_kernel_size=3. If None or 1, will not apply max
pooling.
k: The number of highest scoring locations to return.
per_channel: If True, will return the top k scores and locations per
feature map channel. If False, the top k across the entire feature map
(height x width x channels) are returned.
Returns:
Tuple of
scores: A [batch, N] float32 tensor with scores from the feature map in
descending order. If per_channel is False, N = k. Otherwise,
N = k * channels, and the first k elements correspond to channel 0, the
second k correspond to channel 1, etc.
y_indices: A [batch, N] int tensor with y indices of the top k feature map
locations. If per_channel is False, N = k. Otherwise,
N = k * channels.
x_indices: A [batch, N] int tensor with x indices of the top k feature map
locations. If per_channel is False, N = k. Otherwise,
N = k * channels.
channel_indices: A [batch, N] int tensor with channel indices of the top k
feature map locations. If per_channel is False, N = k. Otherwise,
N = k * channels.
"""
if not max_pool_kernel_size or max_pool_kernel_size == 1:
feature_map_peaks = feature_map
else:
feature_map_max_pool = tf.nn.max_pool(
feature_map, ksize=max_pool_kernel_size, strides=1, padding='SAME')
feature_map_peak_mask = tf.math.abs(
feature_map - feature_map_max_pool) < PEAK_EPSILON
# Zero out everything that is not a peak.
feature_map_peaks = (
feature_map * _to_float32(feature_map_peak_mask))
batch_size, _, width, num_channels = _get_shape(feature_map, 4)
if per_channel:
if k == 1:
feature_map_flattened = tf.reshape(
feature_map_peaks, [batch_size, -1, num_channels])
scores = tf.math.reduce_max(feature_map_flattened, axis=1)
peak_flat_indices = tf.math.argmax(
feature_map_flattened, axis=1, output_type=tf.dtypes.int32)
peak_flat_indices = tf.expand_dims(peak_flat_indices, axis=-1)
else:
# Perform top k over batch and channels.
feature_map_peaks_transposed = tf.transpose(feature_map_peaks,
perm=[0, 3, 1, 2])
feature_map_peaks_transposed = tf.reshape(
feature_map_peaks_transposed, [batch_size, num_channels, -1])
scores, peak_flat_indices = tf.math.top_k(
feature_map_peaks_transposed, k=k)
# Convert the indices such that they represent the location in the full
# (flattened) feature map of size [batch, height * width * channels].
channel_idx = tf.range(num_channels)[tf.newaxis, :, tf.newaxis]
peak_flat_indices = num_channels * peak_flat_indices + channel_idx
scores = tf.reshape(scores, [batch_size, -1])
peak_flat_indices = tf.reshape(peak_flat_indices, [batch_size, -1])
else:
if k == 1:
feature_map_peaks_flat = tf.reshape(feature_map_peaks, [batch_size, -1])
scores = tf.math.reduce_max(feature_map_peaks_flat, axis=1, keepdims=True)
peak_flat_indices = tf.expand_dims(tf.math.argmax(
feature_map_peaks_flat, axis=1, output_type=tf.dtypes.int32), axis=-1)
else:
feature_map_peaks_flat = tf.reshape(feature_map_peaks, [batch_size, -1])
scores, peak_flat_indices = tf.math.top_k(feature_map_peaks_flat, k=k)
# Get x, y and channel indices corresponding to the top indices in the flat
# array.
y_indices, x_indices, channel_indices = (
row_col_channel_indices_from_flattened_indices(
peak_flat_indices, width, num_channels))
return scores, y_indices, x_indices, channel_indices
def prediction_tensors_to_boxes(detection_scores, y_indices, x_indices,
channel_indices, height_width_predictions,
offset_predictions):
"""Converts CenterNet class-center, offset and size predictions to boxes.
Args:
detection_scores: A [batch, num_boxes] float32 tensor with detection
scores in range [0, 1].
y_indices: A [batch, num_boxes] int32 tensor with y indices corresponding to
object center locations (expressed in output coordinate frame).
x_indices: A [batch, num_boxes] int32 tensor with x indices corresponding to
object center locations (expressed in output coordinate frame).
channel_indices: A [batch, num_boxes] int32 tensor with channel indices
corresponding to object classes.
height_width_predictions: A float tensor of shape [batch_size, height,
width, 2] representing the height and width of a box centered at each
pixel.
offset_predictions: A float tensor of shape [batch_size, height, width, 2]
representing the y and x offsets of a box centered at each pixel. This
helps reduce the error from downsampling.
Returns:
detection_boxes: A tensor of shape [batch_size, num_boxes, 4] holding the
the raw bounding box coordinates of boxes.
detection_classes: An integer tensor of shape [batch_size, num_boxes]
indicating the predicted class for each box.
detection_scores: A float tensor of shape [batch_size, num_boxes] indicating
the score for each box.
num_detections: An integer tensor of shape [batch_size,] indicating the
number of boxes detected for each sample in the batch.
"""
batch_size, num_boxes = _get_shape(y_indices, 2)
# TF Lite does not support tf.gather with batch_dims > 0, so we need to use
# tf_gather_nd instead and here we prepare the indices for that.
combined_indices = tf.stack([
_multi_range(batch_size, value_repetitions=num_boxes),
tf.reshape(y_indices, [-1]),
tf.reshape(x_indices, [-1])
], axis=1)
new_height_width = tf.gather_nd(height_width_predictions, combined_indices)
new_height_width = tf.reshape(new_height_width, [batch_size, num_boxes, 2])
new_offsets = tf.gather_nd(offset_predictions, combined_indices)
offsets = tf.reshape(new_offsets, [batch_size, num_boxes, 2])
y_indices = _to_float32(y_indices)
x_indices = _to_float32(x_indices)
height_width = tf.maximum(new_height_width, 0)
heights, widths = tf.unstack(height_width, axis=2)
y_offsets, x_offsets = tf.unstack(offsets, axis=2)
detection_classes = channel_indices
num_detections = tf.reduce_sum(tf.to_int32(detection_scores > 0), axis=1)
boxes = tf.stack([y_indices + y_offsets - heights / 2.0,
x_indices + x_offsets - widths / 2.0,
y_indices + y_offsets + heights / 2.0,
x_indices + x_offsets + widths / 2.0], axis=2)
return boxes, detection_classes, detection_scores, num_detections
def prediction_tensors_to_temporal_offsets(
y_indices, x_indices, offset_predictions):
"""Converts CenterNet temporal offset map predictions to batched format.
This function is similar to the box offset conversion function, as both
temporal offsets and box offsets are size-2 vectors.
Args:
y_indices: A [batch, num_boxes] int32 tensor with y indices corresponding to
object center locations (expressed in output coordinate frame).
x_indices: A [batch, num_boxes] int32 tensor with x indices corresponding to
object center locations (expressed in output coordinate frame).
offset_predictions: A float tensor of shape [batch_size, height, width, 2]
representing the y and x offsets of a box's center across adjacent frames.
Returns:
offsets: A tensor of shape [batch_size, num_boxes, 2] holding the
the object temporal offsets of (y, x) dimensions.
"""
batch_size, num_boxes = _get_shape(y_indices, 2)
# TF Lite does not support tf.gather with batch_dims > 0, so we need to use
# tf_gather_nd instead and here we prepare the indices for that.
combined_indices = tf.stack([
_multi_range(batch_size, value_repetitions=num_boxes),
tf.reshape(y_indices, [-1]),
tf.reshape(x_indices, [-1])
], axis=1)
new_offsets = tf.gather_nd(offset_predictions, combined_indices)
offsets = tf.reshape(new_offsets, [batch_size, num_boxes, -1])
return offsets
def prediction_tensors_to_keypoint_candidates(keypoint_heatmap_predictions,
keypoint_heatmap_offsets,
keypoint_score_threshold=0.1,
max_pool_kernel_size=1,
max_candidates=20,
keypoint_depths=None):
"""Convert keypoint heatmap predictions and offsets to keypoint candidates.
Args:
keypoint_heatmap_predictions: A float tensor of shape [batch_size, height,
width, num_keypoints] representing the per-keypoint heatmaps.
keypoint_heatmap_offsets: A float tensor of shape [batch_size, height,
width, 2] (or [batch_size, height, width, 2 * num_keypoints] if
'per_keypoint_offset' is set True) representing the per-keypoint offsets.
keypoint_score_threshold: float, the threshold for considering a keypoint a
candidate.
max_pool_kernel_size: integer, the max pool kernel size to use to pull off
peak score locations in a neighborhood. For example, to make sure no two
neighboring values for the same keypoint are returned, set
max_pool_kernel_size=3. If None or 1, will not apply any local filtering.
max_candidates: integer, maximum number of keypoint candidates per keypoint
type.
keypoint_depths: (optional) A float tensor of shape [batch_size, height,
width, 1] (or [batch_size, height, width, num_keypoints] if
'per_keypoint_depth' is set True) representing the per-keypoint depths.
Returns:
keypoint_candidates: A tensor of shape
[batch_size, max_candidates, num_keypoints, 2] holding the
location of keypoint candidates in [y, x] format (expressed in absolute
coordinates in the output coordinate frame).
keypoint_scores: A float tensor of shape
[batch_size, max_candidates, num_keypoints] with the scores for each
keypoint candidate. The scores come directly from the heatmap predictions.
num_keypoint_candidates: An integer tensor of shape
[batch_size, num_keypoints] with the number of candidates for each
keypoint type, as it's possible to filter some candidates due to the score
threshold.
depth_candidates: A tensor of shape [batch_size, max_candidates,
num_keypoints] representing the estimated depth of each keypoint
candidate. Return None if the input keypoint_depths is None.
"""
batch_size, _, _, num_keypoints = _get_shape(keypoint_heatmap_predictions, 4)
# Get x, y and channel indices corresponding to the top indices in the
# keypoint heatmap predictions.
# Note that the top k candidates are produced for **each keypoint type**.
# Might be worth eventually trying top k in the feature map, independent of
# the keypoint type.
keypoint_scores, y_indices, x_indices, channel_indices = (
top_k_feature_map_locations(keypoint_heatmap_predictions,
max_pool_kernel_size=max_pool_kernel_size,
k=max_candidates,
per_channel=True))
# TF Lite does not support tf.gather with batch_dims > 0, so we need to use
# tf_gather_nd instead and here we prepare the indices for that.
_, num_indices = _get_shape(y_indices, 2)
combined_indices = tf.stack([
_multi_range(batch_size, value_repetitions=num_indices),
tf.reshape(y_indices, [-1]),
tf.reshape(x_indices, [-1])
], axis=1)
selected_offsets_flat = tf.gather_nd(keypoint_heatmap_offsets,
combined_indices)
selected_offsets = tf.reshape(selected_offsets_flat,
[batch_size, num_indices, -1])
y_indices = _to_float32(y_indices)
x_indices = _to_float32(x_indices)
_, _, num_channels = _get_shape(selected_offsets, 3)
if num_channels > 2:
# Offsets are per keypoint and the last dimension of selected_offsets
# contains all those offsets, so reshape the offsets to make sure that the
# last dimension contains (y_offset, x_offset) for a single keypoint.
reshaped_offsets = tf.reshape(selected_offsets,
[batch_size, num_indices, -1, 2])
# TF Lite does not support tf.gather with batch_dims > 0, so we need to use
# tf_gather_nd instead and here we prepare the indices for that. In this
# case, channel_indices indicates which keypoint to use the offset from.
channel_combined_indices = tf.stack([
_multi_range(batch_size, value_repetitions=num_indices),
_multi_range(num_indices, range_repetitions=batch_size),
tf.reshape(channel_indices, [-1])
], axis=1)
offsets = tf.gather_nd(reshaped_offsets, channel_combined_indices)
offsets = tf.reshape(offsets, [batch_size, num_indices, -1])
else:
offsets = selected_offsets
y_offsets, x_offsets = tf.unstack(offsets, axis=2)
keypoint_candidates = tf.stack([y_indices + y_offsets,
x_indices + x_offsets], axis=2)
keypoint_candidates = tf.reshape(
keypoint_candidates,
[batch_size, num_keypoints, max_candidates, 2])
keypoint_candidates = tf.transpose(keypoint_candidates, [0, 2, 1, 3])
keypoint_scores = tf.reshape(
keypoint_scores,
[batch_size, num_keypoints, max_candidates])
keypoint_scores = tf.transpose(keypoint_scores, [0, 2, 1])
num_candidates = tf.reduce_sum(
tf.to_int32(keypoint_scores >= keypoint_score_threshold), axis=1)
depth_candidates = None
if keypoint_depths is not None:
selected_depth_flat = tf.gather_nd(keypoint_depths, combined_indices)
selected_depth = tf.reshape(selected_depth_flat,
[batch_size, num_indices, -1])
_, _, num_depth_channels = _get_shape(selected_depth, 3)
if num_depth_channels > 1:
combined_indices = tf.stack([
_multi_range(batch_size, value_repetitions=num_indices),
_multi_range(num_indices, range_repetitions=batch_size),
tf.reshape(channel_indices, [-1])
], axis=1)
depth = tf.gather_nd(selected_depth, combined_indices)
depth = tf.reshape(depth, [batch_size, num_indices, -1])
else:
depth = selected_depth
depth_candidates = tf.reshape(depth,
[batch_size, num_keypoints, max_candidates])
depth_candidates = tf.transpose(depth_candidates, [0, 2, 1])
return keypoint_candidates, keypoint_scores, num_candidates, depth_candidates
def prediction_to_single_instance_keypoints(object_heatmap,
keypoint_heatmap,
keypoint_offset,
keypoint_regression,
stride,
object_center_std_dev,
keypoint_std_dev,
kp_params,
keypoint_depths=None):
"""Postprocess function to predict single instance keypoints.
This is a simplified postprocessing function based on the assumption that
there is only one instance in the image. If there are multiple instances in
the image, the model prefers to predict the one that is closest to the image
center. Here is a high-level description of what this function does:
1) Object heatmap re-weighted by image center Gaussian is used to determine
the instance center.
2) Regressed keypoint locations are retrieved from the instance center. The
Gaussian kernel is applied to the regressed keypoint locations to
re-weight the keypoint heatmap. This is to select the keypoints that are
associated with the center instance without using top_k op.
3) The keypoint locations are computed by the re-weighted keypoint heatmap
and the keypoint offset.
Args:
object_heatmap: A float tensor of shape [1, height, width, 1] representing
the heapmap of the class.
keypoint_heatmap: A float tensor of shape [1, height, width, num_keypoints]
representing the per-keypoint heatmaps.
keypoint_offset: A float tensor of shape [1, height, width, 2] (or [1,
height, width, 2 * num_keypoints] if 'per_keypoint_offset' is set True)
representing the per-keypoint offsets.
keypoint_regression: A float tensor of shape [1, height, width, 2 *
num_keypoints] representing the joint regression prediction.
stride: The stride in the output space.
object_center_std_dev: The standard deviation of the Gaussian mask which is
applied to the object_heatmap. The goal is to upweight the instance that
is closer to the image center. Expressed in units of input image pixels.
keypoint_std_dev: The standard deviation of the Gaussian masks which are
applied to the keypoint_heatmap based on the regressed joint location. It
is used to upweight the keypoint joints that belongs to the targeted
instance. If keypoint_std_dev contains 1 element, all keypoint joints will
share the same value. Otherwise, it must contain num_keypoints elements,
representing the standard deviation corresponding to each joint.
kp_params: A `KeypointEstimationParams` object with parameters for a single
keypoint class.
keypoint_depths: (optional) A float tensor of shape [batch_size, height,
width, 1] (or [batch_size, height, width, num_keypoints] if
'per_keypoint_depth' is set True) representing the per-keypoint depths.
Returns:
A tuple of two tensors:
keypoint_candidates: A float tensor with shape [1, 1, num_keypoints, 2]
representing the yx-coordinates of the keypoints in the output feature
map space.
keypoint_scores: A float tensor with shape [1, 1, num_keypoints]
representing the keypoint prediction scores.
keypoint_depths: A float tensor with shape [1, 1, num_keypoints]
representing the estimated keypoint depths. Return None if the input
keypoint_depths is None.
Raises:
ValueError: if the input keypoint_std_dev doesn't have valid number of
elements (1 or num_keypoints).
"""
num_keypoints = len(kp_params.keypoint_std_dev)
batch_size, height, width, _ = _get_shape(keypoint_heatmap, 4)
# Apply the Gaussian mask to the image center.
image_center_y = tf.convert_to_tensor([0.5 * height], dtype=tf.float32)
image_center_x = tf.convert_to_tensor([0.5 * width], dtype=tf.float32)
(y_grid, x_grid) = ta_utils.image_shape_to_grids(height, width)
# Mask shape: [1, height, width, 1]
object_mask = tf.expand_dims(
ta_utils.coordinates_to_heatmap(y_grid, x_grid, image_center_y,
image_center_x,
object_center_std_dev / stride,
tf.one_hot(tf.range(1), depth=1)), axis=0)
object_heatmap = tf.math.multiply(object_heatmap, object_mask)
# Pick the highest score and location of the weighted object heatmap.
_, y_indices, x_indices, _ = (
top_k_feature_map_locations(
object_heatmap, max_pool_kernel_size=1, k=1, per_channel=True))
_, num_indices = _get_shape(y_indices, 2)
combined_indices = tf.stack([
_multi_range(batch_size, value_repetitions=num_indices),
tf.reshape(y_indices, [-1]),
tf.reshape(x_indices, [-1])
], axis=1)
# Select the regression vectors from the object center.
selected_regression_flat = tf.gather_nd(keypoint_regression, combined_indices)
# shape: [num_keypoints, 2]
regression_offsets = tf.reshape(selected_regression_flat, [num_keypoints, -1])
(y_reg, x_reg) = tf.unstack(regression_offsets, axis=1)
y_regressed = tf.cast(y_indices, dtype=tf.float32) + y_reg
x_regressed = tf.cast(x_indices, dtype=tf.float32) + x_reg
# Prepare and apply the keypoint heatmap masks.
keypoint_std_dev = [x / stride for x in keypoint_std_dev]
if len(keypoint_std_dev) == 1:
std_dev = tf.convert_to_tensor(
keypoint_std_dev * num_keypoints, dtype=tf.float32)
elif len(keypoint_std_dev) == num_keypoints:
std_dev = tf.convert_to_tensor(
keypoint_std_dev, dtype=tf.float32)
else:
raise ValueError('keypoint_std_dev needs to have length either '
'equal to 1 or num_keypoints.')
channel_onehot = tf.one_hot(tf.range(num_keypoints), depth=num_keypoints)
keypoint_mask = tf.expand_dims(
ta_utils.coordinates_to_heatmap(y_grid, x_grid, y_regressed, x_regressed,
std_dev, channel_onehot), axis=0)
keypoint_predictions = tf.math.multiply(keypoint_heatmap, keypoint_mask)
# Get the keypoint locations/scores:
# keypoint_candidates: [1, 1, num_keypoints, 2]
# keypoint_scores: [1, 1, num_keypoints]
# depth_candidates: [1, 1, num_keypoints]
(keypoint_candidates, keypoint_scores, _,
depth_candidates) = prediction_tensors_to_keypoint_candidates(
keypoint_predictions,
keypoint_offset,
keypoint_score_threshold=kp_params.keypoint_candidate_score_threshold,
max_pool_kernel_size=kp_params.peak_max_pool_kernel_size,
max_candidates=1,
keypoint_depths=keypoint_depths)
return keypoint_candidates, keypoint_scores, depth_candidates
def regressed_keypoints_at_object_centers(regressed_keypoint_predictions,
y_indices, x_indices):
"""Returns the regressed keypoints at specified object centers.
The original keypoint predictions are regressed relative to each feature map
location. The returned keypoints are expressed in absolute coordinates in the
output frame (i.e. the center offsets are added to each individual regressed
set of keypoints).
Args:
regressed_keypoint_predictions: A float tensor of shape
[batch_size, height, width, 2 * num_keypoints] holding regressed
keypoints. The last dimension has keypoint coordinates ordered as follows:
[y0, x0, y1, x1, ..., y{J-1}, x{J-1}] where J is the number of keypoints.
y_indices: A [batch, num_instances] int tensor holding y indices for object
centers. These indices correspond to locations in the output feature map.
x_indices: A [batch, num_instances] int tensor holding x indices for object
centers. These indices correspond to locations in the output feature map.
Returns:
A float tensor of shape [batch_size, num_objects, 2 * num_keypoints] where
regressed keypoints are gathered at the provided locations, and converted
to absolute coordinates in the output coordinate frame.
"""
batch_size, num_instances = _get_shape(y_indices, 2)
# TF Lite does not support tf.gather with batch_dims > 0, so we need to use
# tf_gather_nd instead and here we prepare the indices for that.
combined_indices = tf.stack([
_multi_range(batch_size, value_repetitions=num_instances),
tf.reshape(y_indices, [-1]),
tf.reshape(x_indices, [-1])
], axis=1)
relative_regressed_keypoints = tf.gather_nd(regressed_keypoint_predictions,
combined_indices)
relative_regressed_keypoints = tf.reshape(
relative_regressed_keypoints,
[batch_size, num_instances, -1, 2])
relative_regressed_keypoints_y, relative_regressed_keypoints_x = tf.unstack(
relative_regressed_keypoints, axis=3)
y_indices = _to_float32(tf.expand_dims(y_indices, axis=-1))
x_indices = _to_float32(tf.expand_dims(x_indices, axis=-1))
absolute_regressed_keypoints = tf.stack(
[y_indices + relative_regressed_keypoints_y,
x_indices + relative_regressed_keypoints_x],
axis=3)
return tf.reshape(absolute_regressed_keypoints,
[batch_size, num_instances, -1])
def refine_keypoints(regressed_keypoints,
keypoint_candidates,
keypoint_scores,
num_keypoint_candidates,
bboxes=None,
unmatched_keypoint_score=0.1,
box_scale=1.2,
candidate_search_scale=0.3,
candidate_ranking_mode='min_distance',
score_distance_offset=1e-6,
keypoint_depth_candidates=None):
"""Refines regressed keypoints by snapping to the nearest candidate keypoints.
The initial regressed keypoints represent a full set of keypoints regressed
from the centers of the objects. The keypoint candidates are estimated
independently from heatmaps, and are not associated with any object instances.
This function refines the regressed keypoints by "snapping" to the
nearest/highest score/highest score-distance ratio (depending on the
candidate_ranking_mode) candidate of the same keypoint type (e.g. "nose").
If no candidates are nearby, the regressed keypoint remains unchanged.
In order to snap a regressed keypoint to a candidate keypoint, the following
must be satisfied:
- the candidate keypoint must be of the same type as the regressed keypoint
- the candidate keypoint must not lie outside the predicted boxes (or the
boxes which encloses the regressed keypoints for the instance if `bboxes` is
not provided). Note that the box is scaled by
`regressed_box_scale` in height and width, to provide some margin around the
keypoints
- the distance to the closest candidate keypoint cannot exceed
candidate_search_scale * max(height, width), where height and width refer to
the bounding box for the instance.
Note that the same candidate keypoint is allowed to snap to regressed
keypoints in difference instances.
Args:
regressed_keypoints: A float tensor of shape
[batch_size, num_instances, num_keypoints, 2] with the initial regressed
keypoints.
keypoint_candidates: A tensor of shape
[batch_size, max_candidates, num_keypoints, 2] holding the location of
keypoint candidates in [y, x] format (expressed in absolute coordinates in
the output coordinate frame).
keypoint_scores: A float tensor of shape
[batch_size, max_candidates, num_keypoints] indicating the scores for
keypoint candidates.
num_keypoint_candidates: An integer tensor of shape
[batch_size, num_keypoints] indicating the number of valid candidates for
each keypoint type, as there may be padding (dim 1) of
`keypoint_candidates` and `keypoint_scores`.
bboxes: A tensor of shape [batch_size, num_instances, 4] with predicted
bounding boxes for each instance, expressed in the output coordinate
frame. If not provided, boxes will be computed from regressed keypoints.
unmatched_keypoint_score: float, the default score to use for regressed
keypoints that are not successfully snapped to a nearby candidate.
box_scale: float, the multiplier to expand the bounding boxes (either the
provided boxes or those which tightly cover the regressed keypoints) for
an instance. This scale is typically larger than 1.0 when not providing
`bboxes`.
candidate_search_scale: float, the scale parameter that multiplies the
largest dimension of a bounding box. The resulting distance becomes a
search radius for candidates in the vicinity of each regressed keypoint.
candidate_ranking_mode: A string as one of ['min_distance',
'score_distance_ratio'] indicating how to select the candidate. If invalid
value is provided, an ValueError will be raised.
score_distance_offset: The distance offset to apply in the denominator when
candidate_ranking_mode is 'score_distance_ratio'. The metric to maximize
in this scenario is score / (distance + score_distance_offset). Larger
values of score_distance_offset make the keypoint score gain more relative
importance.
keypoint_depth_candidates: (optional) A float tensor of shape
[batch_size, max_candidates, num_keypoints] indicating the depths for
keypoint candidates.
Returns:
A tuple with:
refined_keypoints: A float tensor of shape
[batch_size, num_instances, num_keypoints, 2] with the final, refined
keypoints.
refined_scores: A float tensor of shape
[batch_size, num_instances, num_keypoints] with scores associated with all
instances and keypoints in `refined_keypoints`.
Raises:
ValueError: if provided candidate_ranking_mode is not one of
['min_distance', 'score_distance_ratio']
"""
batch_size, num_instances, num_keypoints, _ = (
shape_utils.combined_static_and_dynamic_shape(regressed_keypoints))
max_candidates = keypoint_candidates.shape[1]
# Replace all invalid (i.e. padded) keypoint candidates with NaN.
# This will prevent them from being considered.
range_tiled = tf.tile(
tf.reshape(tf.range(max_candidates), [1, max_candidates, 1]),
[batch_size, 1, num_keypoints])
num_candidates_tiled = tf.tile(tf.expand_dims(num_keypoint_candidates, 1),
[1, max_candidates, 1])
invalid_candidates = range_tiled >= num_candidates_tiled
nan_mask = tf.where(
invalid_candidates,
np.nan * tf.ones_like(invalid_candidates, dtype=tf.float32),
tf.ones_like(invalid_candidates, dtype=tf.float32))
keypoint_candidates_with_nans = tf.math.multiply(
keypoint_candidates, tf.expand_dims(nan_mask, -1))
# Pairwise squared distances between regressed keypoints and candidate
# keypoints (for a single keypoint type).
# Shape [batch_size, num_instances, 1, num_keypoints, 2].
regressed_keypoint_expanded = tf.expand_dims(regressed_keypoints,
axis=2)
# Shape [batch_size, 1, max_candidates, num_keypoints, 2].
keypoint_candidates_expanded = tf.expand_dims(
keypoint_candidates_with_nans, axis=1)
# Use explicit tensor shape broadcasting (since the tensor dimensions are
# expanded to 5D) to make it tf.lite compatible.
regressed_keypoint_expanded = tf.tile(
regressed_keypoint_expanded, multiples=[1, 1, max_candidates, 1, 1])
keypoint_candidates_expanded = tf.tile(
keypoint_candidates_expanded, multiples=[1, num_instances, 1, 1, 1])
# Replace tf.math.squared_difference by "-" operator and tf.multiply ops since
# tf.lite convert doesn't support squared_difference with undetermined
# dimension.
diff = regressed_keypoint_expanded - keypoint_candidates_expanded
sqrd_distances = tf.math.reduce_sum(tf.multiply(diff, diff), axis=-1)
distances = tf.math.sqrt(sqrd_distances)
# Replace the NaNs with Infs to make sure the following reduce_min/argmin
# behaves properly.
distances = tf.where(
tf.math.is_nan(distances), np.inf * tf.ones_like(distances), distances)
# Determine the candidates that have the minimum distance to the regressed
# keypoints. Shape [batch_size, num_instances, num_keypoints].
min_distances = tf.math.reduce_min(distances, axis=2)
if candidate_ranking_mode == 'min_distance':
nearby_candidate_inds = tf.math.argmin(distances, axis=2)
elif candidate_ranking_mode == 'score_distance_ratio':
# tiled_keypoint_scores:
# Shape [batch_size, num_instances, max_candidates, num_keypoints].
tiled_keypoint_scores = tf.tile(
tf.expand_dims(keypoint_scores, axis=1),
multiples=[1, num_instances, 1, 1])
ranking_scores = tiled_keypoint_scores / (distances + score_distance_offset)
nearby_candidate_inds = tf.math.argmax(ranking_scores, axis=2)
else:
raise ValueError('Not recognized candidate_ranking_mode: %s' %
candidate_ranking_mode)
# Gather the coordinates and scores corresponding to the closest candidates.
# Shape of tensors are [batch_size, num_instances, num_keypoints, 2] and
# [batch_size, num_instances, num_keypoints], respectively.
(nearby_candidate_coords, nearby_candidate_scores,
nearby_candidate_depths) = (
_gather_candidates_at_indices(keypoint_candidates, keypoint_scores,
nearby_candidate_inds,
keypoint_depth_candidates))
if bboxes is None:
# Create bboxes from regressed keypoints.
# Shape [batch_size * num_instances, 4].
regressed_keypoints_flattened = tf.reshape(
regressed_keypoints, [-1, num_keypoints, 2])
bboxes_flattened = keypoint_ops.keypoints_to_enclosing_bounding_boxes(
regressed_keypoints_flattened)
else:
bboxes_flattened = tf.reshape(bboxes, [-1, 4])
# Scale the bounding boxes.
# Shape [batch_size, num_instances, 4].
boxlist = box_list.BoxList(bboxes_flattened)
boxlist_scaled = box_list_ops.scale_height_width(
boxlist, box_scale, box_scale)
bboxes_scaled = boxlist_scaled.get()
bboxes = tf.reshape(bboxes_scaled, [batch_size, num_instances, 4])
# Get ymin, xmin, ymax, xmax bounding box coordinates, tiled per keypoint.
# Shape [batch_size, num_instances, num_keypoints].
bboxes_tiled = tf.tile(tf.expand_dims(bboxes, 2), [1, 1, num_keypoints, 1])
ymin, xmin, ymax, xmax = tf.unstack(bboxes_tiled, axis=3)
# Produce a mask that indicates whether the original regressed keypoint
# should be used instead of a candidate keypoint.
# Shape [batch_size, num_instances, num_keypoints].
search_radius = (
tf.math.maximum(ymax - ymin, xmax - xmin) * candidate_search_scale)
mask = (tf.cast(nearby_candidate_coords[:, :, :, 0] < ymin, tf.int32) +
tf.cast(nearby_candidate_coords[:, :, :, 0] > ymax, tf.int32) +
tf.cast(nearby_candidate_coords[:, :, :, 1] < xmin, tf.int32) +
tf.cast(nearby_candidate_coords[:, :, :, 1] > xmax, tf.int32) +
# Filter out the chosen candidate with score lower than unmatched
# keypoint score.
tf.cast(nearby_candidate_scores <
unmatched_keypoint_score, tf.int32) +
tf.cast(min_distances > search_radius, tf.int32))
mask = mask > 0
# Create refined keypoints where candidate keypoints replace original
# regressed keypoints if they are in the vicinity of the regressed keypoints.
# Shape [batch_size, num_instances, num_keypoints, 2].
refined_keypoints = tf.where(
tf.tile(tf.expand_dims(mask, -1), [1, 1, 1, 2]),
regressed_keypoints,
nearby_candidate_coords)
# Update keypoints scores. In the case where we use the original regressed
# keypoints, we use a default score of `unmatched_keypoint_score`.
# Shape [batch_size, num_instances, num_keypoints].
refined_scores = tf.where(
mask,
unmatched_keypoint_score * tf.ones_like(nearby_candidate_scores),
nearby_candidate_scores)
refined_depths = None
if nearby_candidate_depths is not None:
refined_depths = tf.where(mask, tf.zeros_like(nearby_candidate_depths),
nearby_candidate_depths)
return refined_keypoints, refined_scores, refined_depths
def _pad_to_full_keypoint_dim(keypoint_coords, keypoint_scores, keypoint_inds,
num_total_keypoints):
"""Scatter keypoint elements into tensors with full keypoints dimension.
Args:
keypoint_coords: a [batch_size, num_instances, num_keypoints, 2] float32
tensor.
keypoint_scores: a [batch_size, num_instances, num_keypoints] float32
tensor.
keypoint_inds: a list of integers that indicate the keypoint indices for
this specific keypoint class. These indices are used to scatter into
tensors that have a `num_total_keypoints` dimension.
num_total_keypoints: The total number of keypoints that this model predicts.
Returns:
A tuple with
keypoint_coords_padded: a
[batch_size, num_instances, num_total_keypoints,2] float32 tensor.
keypoint_scores_padded: a [batch_size, num_instances, num_total_keypoints]
float32 tensor.
"""
batch_size, num_instances, _, _ = (
shape_utils.combined_static_and_dynamic_shape(keypoint_coords))
kpt_coords_transposed = tf.transpose(keypoint_coords, [2, 0, 1, 3])
kpt_scores_transposed = tf.transpose(keypoint_scores, [2, 0, 1])
kpt_inds_tensor = tf.expand_dims(keypoint_inds, axis=-1)
kpt_coords_scattered = tf.scatter_nd(
indices=kpt_inds_tensor,
updates=kpt_coords_transposed,
shape=[num_total_keypoints, batch_size, num_instances, 2])
kpt_scores_scattered = tf.scatter_nd(
indices=kpt_inds_tensor,
updates=kpt_scores_transposed,
shape=[num_total_keypoints, batch_size, num_instances])
keypoint_coords_padded = tf.transpose(kpt_coords_scattered, [1, 2, 0, 3])
keypoint_scores_padded = tf.transpose(kpt_scores_scattered, [1, 2, 0])
return keypoint_coords_padded, keypoint_scores_padded
def _pad_to_full_instance_dim(keypoint_coords, keypoint_scores, instance_inds,
max_instances):
"""Scatter keypoint elements into tensors with full instance dimension.
Args:
keypoint_coords: a [batch_size, num_instances, num_keypoints, 2] float32
tensor.
keypoint_scores: a [batch_size, num_instances, num_keypoints] float32
tensor.
instance_inds: a list of integers that indicate the instance indices for
these keypoints. These indices are used to scatter into tensors
that have a `max_instances` dimension.
max_instances: The maximum number of instances detected by the model.
Returns:
A tuple with
keypoint_coords_padded: a [batch_size, max_instances, num_keypoints, 2]
float32 tensor.
keypoint_scores_padded: a [batch_size, max_instances, num_keypoints]
float32 tensor.
"""
batch_size, _, num_keypoints, _ = (
shape_utils.combined_static_and_dynamic_shape(keypoint_coords))
kpt_coords_transposed = tf.transpose(keypoint_coords, [1, 0, 2, 3])
kpt_scores_transposed = tf.transpose(keypoint_scores, [1, 0, 2])
instance_inds = tf.expand_dims(instance_inds, axis=-1)
kpt_coords_scattered = tf.scatter_nd(
indices=instance_inds,
updates=kpt_coords_transposed,
shape=[max_instances, batch_size, num_keypoints, 2])
kpt_scores_scattered = tf.scatter_nd(
indices=instance_inds,
updates=kpt_scores_transposed,
shape=[max_instances, batch_size, num_keypoints])
keypoint_coords_padded = tf.transpose(kpt_coords_scattered, [1, 0, 2, 3])
keypoint_scores_padded = tf.transpose(kpt_scores_scattered, [1, 0, 2])
return keypoint_coords_padded, keypoint_scores_padded
def _gather_candidates_at_indices(keypoint_candidates,
keypoint_scores,
indices,
keypoint_depth_candidates=None):
"""Gathers keypoint candidate coordinates and scores at indices.
Args:
keypoint_candidates: a float tensor of shape [batch_size, max_candidates,
num_keypoints, 2] with candidate coordinates.
keypoint_scores: a float tensor of shape [batch_size, max_candidates,
num_keypoints] with keypoint scores.
indices: an integer tensor of shape [batch_size, num_indices, num_keypoints]
with indices.
keypoint_depth_candidates: (optional) a float tensor of shape [batch_size,
max_candidates, num_keypoints] with keypoint depths.
Returns:
A tuple with
gathered_keypoint_candidates: a float tensor of shape [batch_size,
num_indices, num_keypoints, 2] with gathered coordinates.
gathered_keypoint_scores: a float tensor of shape [batch_size,
num_indices, num_keypoints].
gathered_keypoint_depths: a float tensor of shape [batch_size,
num_indices, num_keypoints]. Return None if the input
keypoint_depth_candidates is None.
"""
batch_size, num_indices, num_keypoints = _get_shape(indices, 3)
# Transpose tensors so that all batch dimensions are up front.
keypoint_candidates_transposed = tf.transpose(keypoint_candidates,
[0, 2, 1, 3])
keypoint_scores_transposed = tf.transpose(keypoint_scores, [0, 2, 1])
nearby_candidate_inds_transposed = tf.transpose(indices, [0, 2, 1])
# TF Lite does not support tf.gather with batch_dims > 0, so we need to use
# tf_gather_nd instead and here we prepare the indices for that.
combined_indices = tf.stack([
_multi_range(
batch_size,
value_repetitions=num_keypoints * num_indices,
dtype=tf.int64),
_multi_range(
num_keypoints,
value_repetitions=num_indices,
range_repetitions=batch_size,
dtype=tf.int64),
tf.reshape(nearby_candidate_inds_transposed, [-1])
], axis=1)
nearby_candidate_coords_transposed = tf.gather_nd(
keypoint_candidates_transposed, combined_indices)
nearby_candidate_coords_transposed = tf.reshape(
nearby_candidate_coords_transposed,
[batch_size, num_keypoints, num_indices, -1])
nearby_candidate_scores_transposed = tf.gather_nd(keypoint_scores_transposed,
combined_indices)
nearby_candidate_scores_transposed = tf.reshape(
nearby_candidate_scores_transposed,
[batch_size, num_keypoints, num_indices])
gathered_keypoint_candidates = tf.transpose(
nearby_candidate_coords_transposed, [0, 2, 1, 3])
gathered_keypoint_scores = tf.transpose(nearby_candidate_scores_transposed,
[0, 2, 1])
gathered_keypoint_depths = None
if keypoint_depth_candidates is not None:
keypoint_depths_transposed = tf.transpose(keypoint_depth_candidates,
[0, 2, 1])
nearby_candidate_depths_transposed = tf.gather_nd(
keypoint_depths_transposed, combined_indices)
nearby_candidate_depths_transposed = tf.reshape(
nearby_candidate_depths_transposed,
[batch_size, num_keypoints, num_indices])
gathered_keypoint_depths = tf.transpose(nearby_candidate_depths_transposed,
[0, 2, 1])
return (gathered_keypoint_candidates, gathered_keypoint_scores,
gathered_keypoint_depths)
def flattened_indices_from_row_col_indices(row_indices, col_indices, num_cols):
"""Get the index in a flattened array given row and column indices."""
return (row_indices * num_cols) + col_indices
def row_col_channel_indices_from_flattened_indices(indices, num_cols,
num_channels):
"""Computes row, column and channel indices from flattened indices.
Args:
indices: An integer tensor of any shape holding the indices in the flattened
space.
num_cols: Number of columns in the image (width).
num_channels: Number of channels in the image.
Returns:
row_indices: The row indices corresponding to each of the input indices.
Same shape as indices.
col_indices: The column indices corresponding to each of the input indices.
Same shape as indices.
channel_indices. The channel indices corresponding to each of the input
indices.
"""
# Avoid using mod operator to make the ops more easy to be compatible with
# different environments, e.g. WASM.
row_indices = (indices // num_channels) // num_cols
col_indices = (indices // num_channels) - row_indices * num_cols
channel_indices_temp = indices // num_channels
channel_indices = indices - channel_indices_temp * num_channels
return row_indices, col_indices, channel_indices
def get_valid_anchor_weights_in_flattened_image(true_image_shapes, height,
width):
"""Computes valid anchor weights for an image assuming pixels will be flattened.
This function is useful when we only want to penalize valid areas in the
image in the case when padding is used. The function assumes that the loss
function will be applied after flattening the spatial dimensions and returns
anchor weights accordingly.
Args:
true_image_shapes: An integer tensor of shape [batch_size, 3] representing
the true image shape (without padding) for each sample in the batch.
height: height of the prediction from the network.
width: width of the prediction from the network.
Returns:
valid_anchor_weights: a float tensor of shape [batch_size, height * width]
with 1s in locations where the spatial coordinates fall within the height
and width in true_image_shapes.
"""
indices = tf.reshape(tf.range(height * width), [1, -1])
batch_size = tf.shape(true_image_shapes)[0]
batch_indices = tf.ones((batch_size, 1), dtype=tf.int32) * indices
y_coords, x_coords, _ = row_col_channel_indices_from_flattened_indices(
batch_indices, width, 1)
max_y, max_x = true_image_shapes[:, 0], true_image_shapes[:, 1]
max_x = _to_float32(tf.expand_dims(max_x, 1))
max_y = _to_float32(tf.expand_dims(max_y, 1))
x_coords = _to_float32(x_coords)
y_coords = _to_float32(y_coords)
valid_mask = tf.math.logical_and(x_coords < max_x, y_coords < max_y)
return _to_float32(valid_mask)
def convert_strided_predictions_to_normalized_boxes(boxes, stride,
true_image_shapes):
"""Converts predictions in the output space to normalized boxes.
Boxes falling outside the valid image boundary are clipped to be on the
boundary.
Args:
boxes: A tensor of shape [batch_size, num_boxes, 4] holding the raw
coordinates of boxes in the model's output space.
stride: The stride in the output space.
true_image_shapes: A tensor of shape [batch_size, 3] representing the true
shape of the input not considering padding.
Returns:
boxes: A tensor of shape [batch_size, num_boxes, 4] representing the
coordinates of the normalized boxes.
"""
# Note: We use tf ops instead of functions in box_list_ops to make this
# function compatible with dynamic batch size.
boxes = boxes * stride
true_image_shapes = tf.tile(true_image_shapes[:, tf.newaxis, :2], [1, 1, 2])
boxes = boxes / tf.cast(true_image_shapes, tf.float32)
boxes = tf.clip_by_value(boxes, 0.0, 1.0)
return boxes
def convert_strided_predictions_to_normalized_keypoints(
keypoint_coords, keypoint_scores, stride, true_image_shapes,
clip_out_of_frame_keypoints=False):
"""Converts predictions in the output space to normalized keypoints.
If clip_out_of_frame_keypoints=False, keypoint coordinates falling outside
the valid image boundary are normalized but not clipped; If
clip_out_of_frame_keypoints=True, keypoint coordinates falling outside the
valid image boundary are clipped to the closest image boundary and the scores
will be set to 0.0.
Args:
keypoint_coords: A tensor of shape
[batch_size, num_instances, num_keypoints, 2] holding the raw coordinates
of keypoints in the model's output space.
keypoint_scores: A tensor of shape
[batch_size, num_instances, num_keypoints] holding the keypoint scores.
stride: The stride in the output space.
true_image_shapes: A tensor of shape [batch_size, 3] representing the true
shape of the input not considering padding.
clip_out_of_frame_keypoints: A boolean indicating whether keypoints outside
the image boundary should be clipped. If True, keypoint coords will be
clipped to image boundary. If False, keypoints are normalized but not
filtered based on their location.
Returns:
keypoint_coords_normalized: A tensor of shape
[batch_size, num_instances, num_keypoints, 2] representing the coordinates
of the normalized keypoints.
keypoint_scores: A tensor of shape
[batch_size, num_instances, num_keypoints] representing the updated
keypoint scores.
"""
# Flatten keypoints and scores.
batch_size, _, _, _ = (
shape_utils.combined_static_and_dynamic_shape(keypoint_coords))
# Scale and normalize keypoints.
true_heights, true_widths, _ = tf.unstack(true_image_shapes, axis=1)
yscale = float(stride) / tf.cast(true_heights, tf.float32)
xscale = float(stride) / tf.cast(true_widths, tf.float32)
yx_scale = tf.stack([yscale, xscale], axis=1)
keypoint_coords_normalized = keypoint_coords * tf.reshape(
yx_scale, [batch_size, 1, 1, 2])
if clip_out_of_frame_keypoints:
# Determine the keypoints that are in the true image regions.
valid_indices = tf.logical_and(
tf.logical_and(keypoint_coords_normalized[:, :, :, 0] >= 0.0,
keypoint_coords_normalized[:, :, :, 0] <= 1.0),
tf.logical_and(keypoint_coords_normalized[:, :, :, 1] >= 0.0,
keypoint_coords_normalized[:, :, :, 1] <= 1.0))
batch_window = tf.tile(
tf.constant([[0.0, 0.0, 1.0, 1.0]], dtype=tf.float32),
multiples=[batch_size, 1])
def clip_to_window(inputs):
keypoints, window = inputs
return keypoint_ops.clip_to_window(keypoints, window)
# Specify the TensorSpec explicitly in the tf.map_fn to make it tf.lite
# compatible.
kpts_dims = _get_shape(keypoint_coords_normalized, 4)
output_spec = tf.TensorSpec(
shape=[kpts_dims[1], kpts_dims[2], kpts_dims[3]], dtype=tf.float32)
keypoint_coords_normalized = tf.map_fn(
clip_to_window, (keypoint_coords_normalized, batch_window),
dtype=tf.float32, back_prop=False,
fn_output_signature=output_spec)
keypoint_scores = tf.where(valid_indices, keypoint_scores,
tf.zeros_like(keypoint_scores))
return keypoint_coords_normalized, keypoint_scores
def convert_strided_predictions_to_instance_masks(
boxes, classes, masks, true_image_shapes,
densepose_part_heatmap=None, densepose_surface_coords=None, stride=4,
mask_height=256, mask_width=256, score_threshold=0.5,
densepose_class_index=-1):
"""Converts predicted full-image masks into instance masks.
For each predicted detection box:
* Crop and resize the predicted mask (and optionally DensePose coordinates)
based on the detected bounding box coordinates and class prediction. Uses
bilinear resampling.
* Binarize the mask using the provided score threshold.
Args:
boxes: A tensor of shape [batch, max_detections, 4] holding the predicted
boxes, in normalized coordinates (relative to the true image dimensions).
classes: An integer tensor of shape [batch, max_detections] containing the
detected class for each box (0-indexed).
masks: A [batch, output_height, output_width, num_classes] float32
tensor with class probabilities.
true_image_shapes: A tensor of shape [batch, 3] representing the true
shape of the inputs not considering padding.
densepose_part_heatmap: (Optional) A [batch, output_height, output_width,
num_parts] float32 tensor with part scores (i.e. logits).
densepose_surface_coords: (Optional) A [batch, output_height, output_width,
2 * num_parts] float32 tensor with predicted part coordinates (in
vu-format).
stride: The stride in the output space.
mask_height: The desired resized height for instance masks.
mask_width: The desired resized width for instance masks.
score_threshold: The threshold at which to convert predicted mask
into foreground pixels.
densepose_class_index: The class index (0-indexed) corresponding to the
class which has DensePose labels (e.g. person class).
Returns:
A tuple of masks and surface_coords.
instance_masks: A [batch_size, max_detections, mask_height, mask_width]
uint8 tensor with predicted foreground mask for each
instance. If DensePose tensors are provided, then each pixel value in the
mask encodes the 1-indexed part.
surface_coords: A [batch_size, max_detections, mask_height, mask_width, 2]
float32 tensor with (v, u) coordinates. Note that v, u coordinates are
only defined on instance masks, and the coordinates at each location of
the foreground mask correspond to coordinates on a local part coordinate
system (the specific part can be inferred from the `instance_masks`
output. If DensePose feature maps are not passed to this function, this
output will be None.
Raises:
ValueError: If one but not both of `densepose_part_heatmap` and
`densepose_surface_coords` is provided.
"""
batch_size, output_height, output_width, _ = (
shape_utils.combined_static_and_dynamic_shape(masks))
input_height = stride * output_height
input_width = stride * output_width
true_heights, true_widths, _ = tf.unstack(true_image_shapes, axis=1)
# If necessary, create dummy DensePose tensors to simplify the map function.
densepose_present = True
if ((densepose_part_heatmap is not None) ^
(densepose_surface_coords is not None)):
raise ValueError('To use DensePose, both `densepose_part_heatmap` and '
'`densepose_surface_coords` must be provided')
if densepose_part_heatmap is None and densepose_surface_coords is None:
densepose_present = False
densepose_part_heatmap = tf.zeros(
(batch_size, output_height, output_width, 1), dtype=tf.float32)
densepose_surface_coords = tf.zeros(
(batch_size, output_height, output_width, 2), dtype=tf.float32)
crop_and_threshold_fn = functools.partial(
crop_and_threshold_masks, input_height=input_height,
input_width=input_width, mask_height=mask_height, mask_width=mask_width,
score_threshold=score_threshold,
densepose_class_index=densepose_class_index)
instance_masks, surface_coords = shape_utils.static_or_dynamic_map_fn(
crop_and_threshold_fn,
elems=[boxes, classes, masks, densepose_part_heatmap,
densepose_surface_coords, true_heights, true_widths],
dtype=[tf.uint8, tf.float32],
back_prop=False)
surface_coords = surface_coords if densepose_present else None
return instance_masks, surface_coords
def crop_and_threshold_masks(elems, input_height, input_width, mask_height=256,
mask_width=256, score_threshold=0.5,
densepose_class_index=-1):
"""Crops and thresholds masks based on detection boxes.
Args:
elems: A tuple of
boxes - float32 tensor of shape [max_detections, 4]
classes - int32 tensor of shape [max_detections] (0-indexed)
masks - float32 tensor of shape [output_height, output_width, num_classes]
part_heatmap - float32 tensor of shape [output_height, output_width,
num_parts]
surf_coords - float32 tensor of shape [output_height, output_width,
2 * num_parts]
true_height - scalar int tensor
true_width - scalar int tensor
input_height: Input height to network.
input_width: Input width to network.
mask_height: Height for resizing mask crops.
mask_width: Width for resizing mask crops.
score_threshold: The threshold at which to convert predicted mask
into foreground pixels.
densepose_class_index: scalar int tensor with the class index (0-indexed)
for DensePose.
Returns:
A tuple of
all_instances: A [max_detections, mask_height, mask_width] uint8 tensor
with a predicted foreground mask for each instance. Background is encoded
as 0, and foreground is encoded as a positive integer. Specific part
indices are encoded as 1-indexed parts (for classes that have part
information).
surface_coords: A [max_detections, mask_height, mask_width, 2]
float32 tensor with (v, u) coordinates. for each part.
"""
(boxes, classes, masks, part_heatmap, surf_coords, true_height,
true_width) = elems
# Boxes are in normalized coordinates relative to true image shapes. Convert
# coordinates to be normalized relative to input image shapes (since masks
# may still have padding).
boxlist = box_list.BoxList(boxes)
y_scale = true_height / input_height
x_scale = true_width / input_width
boxlist = box_list_ops.scale(boxlist, y_scale, x_scale)
boxes = boxlist.get()
# Convert masks from [output_height, output_width, num_classes] to
# [num_classes, output_height, output_width, 1].
num_classes = tf.shape(masks)[-1]
masks_4d = tf.transpose(masks, perm=[2, 0, 1])[:, :, :, tf.newaxis]
# Tile part and surface coordinate masks for all classes.
part_heatmap_4d = tf.tile(part_heatmap[tf.newaxis, :, :, :],
multiples=[num_classes, 1, 1, 1])
surf_coords_4d = tf.tile(surf_coords[tf.newaxis, :, :, :],
multiples=[num_classes, 1, 1, 1])
feature_maps_concat = tf.concat([masks_4d, part_heatmap_4d, surf_coords_4d],
axis=-1)
# The following tensor has shape
# [max_detections, mask_height, mask_width, 1 + 3 * num_parts].
cropped_masks = tf2.image.crop_and_resize(
feature_maps_concat,
boxes=boxes,
box_indices=classes,
crop_size=[mask_height, mask_width],
method='bilinear')
# Split the cropped masks back into instance masks, part masks, and surface
# coordinates.
num_parts = tf.shape(part_heatmap)[-1]
instance_masks, part_heatmap_cropped, surface_coords_cropped = tf.split(
cropped_masks, [1, num_parts, 2 * num_parts], axis=-1)
# Threshold the instance masks. Resulting tensor has shape
# [max_detections, mask_height, mask_width, 1].
instance_masks_int = tf.cast(
tf.math.greater_equal(instance_masks, score_threshold), dtype=tf.int32)
# Produce a binary mask that is 1.0 only:
# - in the foreground region for an instance
# - in detections corresponding to the DensePose class
det_with_parts = tf.equal(classes, densepose_class_index)
det_with_parts = tf.cast(
tf.reshape(det_with_parts, [-1, 1, 1, 1]), dtype=tf.int32)
instance_masks_with_parts = tf.math.multiply(instance_masks_int,
det_with_parts)
# Similarly, produce a binary mask that holds the foreground masks only for
# instances without parts (i.e. non-DensePose classes).
det_without_parts = 1 - det_with_parts
instance_masks_without_parts = tf.math.multiply(instance_masks_int,
det_without_parts)
# Assemble a tensor that has standard instance segmentation masks for
# non-DensePose classes (with values in [0, 1]), and part segmentation masks
# for DensePose classes (with vaues in [0, 1, ..., num_parts]).
part_mask_int_zero_indexed = tf.math.argmax(
part_heatmap_cropped, axis=-1, output_type=tf.int32)[:, :, :, tf.newaxis]
part_mask_int_one_indexed = part_mask_int_zero_indexed + 1
all_instances = (instance_masks_without_parts +
instance_masks_with_parts * part_mask_int_one_indexed)
# Gather the surface coordinates for the parts.
surface_coords_cropped = tf.reshape(
surface_coords_cropped, [-1, mask_height, mask_width, num_parts, 2])
surface_coords = gather_surface_coords_for_parts(surface_coords_cropped,
part_mask_int_zero_indexed)
surface_coords = (
surface_coords * tf.cast(instance_masks_with_parts, tf.float32))
return [tf.squeeze(all_instances, axis=3), surface_coords]
def gather_surface_coords_for_parts(surface_coords_cropped,
highest_scoring_part):
"""Gathers the (v, u) coordinates for the highest scoring DensePose parts.
Args:
surface_coords_cropped: A [max_detections, height, width, num_parts, 2]
float32 tensor with (v, u) surface coordinates.
highest_scoring_part: A [max_detections, height, width] integer tensor with
the highest scoring part (0-indexed) indices for each location.
Returns:
A [max_detections, height, width, 2] float32 tensor with the (v, u)
coordinates selected from the highest scoring parts.
"""
max_detections, height, width, num_parts, _ = (
shape_utils.combined_static_and_dynamic_shape(surface_coords_cropped))
flattened_surface_coords = tf.reshape(surface_coords_cropped, [-1, 2])
flattened_part_ids = tf.reshape(highest_scoring_part, [-1])
# Produce lookup indices that represent the locations of the highest scoring
# parts in the `flattened_surface_coords` tensor.
flattened_lookup_indices = (
num_parts * tf.range(max_detections * height * width) +
flattened_part_ids)
vu_coords_flattened = tf.gather(flattened_surface_coords,
flattened_lookup_indices, axis=0)
return tf.reshape(vu_coords_flattened, [max_detections, height, width, 2])
def predicted_embeddings_at_object_centers(embedding_predictions,
y_indices, x_indices):
"""Returns the predicted embeddings at specified object centers.
Args:
embedding_predictions: A float tensor of shape [batch_size, height, width,
reid_embed_size] holding predicted embeddings.
y_indices: A [batch, num_instances] int tensor holding y indices for object
centers. These indices correspond to locations in the output feature map.
x_indices: A [batch, num_instances] int tensor holding x indices for object
centers. These indices correspond to locations in the output feature map.
Returns:
A float tensor of shape [batch_size, num_objects, reid_embed_size] where
predicted embeddings are gathered at the provided locations.
"""
batch_size, _, width, _ = _get_shape(embedding_predictions, 4)
flattened_indices = flattened_indices_from_row_col_indices(
y_indices, x_indices, width)
_, num_instances = _get_shape(flattened_indices, 2)
embeddings_flat = _flatten_spatial_dimensions(embedding_predictions)
embeddings = tf.gather(embeddings_flat, flattened_indices, batch_dims=1)
embeddings = tf.reshape(embeddings, [batch_size, num_instances, -1])
return embeddings
class ObjectDetectionParams(
collections.namedtuple('ObjectDetectionParams', [
'localization_loss', 'scale_loss_weight', 'offset_loss_weight',
'task_loss_weight'
])):
"""Namedtuple to host object detection related parameters.
This is a wrapper class over the fields that are either the hyper-parameters
or the loss functions needed for the object detection task. The class is
immutable after constructed. Please see the __new__ function for detailed
information for each fields.
"""
__slots__ = ()
def __new__(cls,
localization_loss,
scale_loss_weight,
offset_loss_weight,
task_loss_weight=1.0):
"""Constructor with default values for ObjectDetectionParams.
Args:
localization_loss: a object_detection.core.losses.Loss object to compute
the loss for the center offset and height/width predictions in
CenterNet.
scale_loss_weight: float, The weight for localizing box size. Note that
the scale loss is dependent on the input image size, since we penalize
the raw height and width. This constant may need to be adjusted
depending on the input size.
offset_loss_weight: float, The weight for localizing center offsets.
task_loss_weight: float, the weight of the object detection loss.
Returns:
An initialized ObjectDetectionParams namedtuple.
"""
return super(ObjectDetectionParams,
cls).__new__(cls, localization_loss, scale_loss_weight,
offset_loss_weight, task_loss_weight)
class KeypointEstimationParams(
collections.namedtuple('KeypointEstimationParams', [
'task_name', 'class_id', 'keypoint_indices', 'classification_loss',
'localization_loss', 'keypoint_labels', 'keypoint_std_dev',
'keypoint_heatmap_loss_weight', 'keypoint_offset_loss_weight',
'keypoint_regression_loss_weight', 'keypoint_candidate_score_threshold',
'heatmap_bias_init', 'num_candidates_per_keypoint', 'task_loss_weight',
'peak_max_pool_kernel_size', 'unmatched_keypoint_score', 'box_scale',
'candidate_search_scale', 'candidate_ranking_mode',
'offset_peak_radius', 'per_keypoint_offset', 'predict_depth',
'per_keypoint_depth', 'keypoint_depth_loss_weight',
'score_distance_offset', 'clip_out_of_frame_keypoints',
'rescore_instances'
])):
"""Namedtuple to host object detection related parameters.
This is a wrapper class over the fields that are either the hyper-parameters
or the loss functions needed for the keypoint estimation task. The class is
immutable after constructed. Please see the __new__ function for detailed
information for each fields.
"""
__slots__ = ()
def __new__(cls,
task_name,
class_id,
keypoint_indices,
classification_loss,
localization_loss,
keypoint_labels=None,
keypoint_std_dev=None,
keypoint_heatmap_loss_weight=1.0,
keypoint_offset_loss_weight=1.0,
keypoint_regression_loss_weight=1.0,
keypoint_candidate_score_threshold=0.1,
heatmap_bias_init=-2.19,
num_candidates_per_keypoint=100,
task_loss_weight=1.0,
peak_max_pool_kernel_size=3,
unmatched_keypoint_score=0.1,
box_scale=1.2,
candidate_search_scale=0.3,
candidate_ranking_mode='min_distance',
offset_peak_radius=0,
per_keypoint_offset=False,
predict_depth=False,
per_keypoint_depth=False,
keypoint_depth_loss_weight=1.0,
score_distance_offset=1e-6,
clip_out_of_frame_keypoints=False,
rescore_instances=False):
"""Constructor with default values for KeypointEstimationParams.
Args:
task_name: string, the name of the task this namedtuple corresponds to.
Note that it should be an unique identifier of the task.
class_id: int, the ID of the class that contains the target keypoints to
considered in this task. For example, if the task is human pose
estimation, the class id should correspond to the "human" class. Note
that the ID is 0-based, meaning that class 0 corresponds to the first
non-background object class.
keypoint_indices: A list of integers representing the indicies of the
keypoints to be considered in this task. This is used to retrieve the
subset of the keypoints from gt_keypoints that should be considered in
this task.
classification_loss: an object_detection.core.losses.Loss object to
compute the loss for the class predictions in CenterNet.
localization_loss: an object_detection.core.losses.Loss object to compute
the loss for the center offset and height/width predictions in
CenterNet.
keypoint_labels: A list of strings representing the label text of each
keypoint, e.g. "nose", 'left_shoulder". Note that the length of this
list should be equal to keypoint_indices.
keypoint_std_dev: A list of float represent the standard deviation of the
Gaussian kernel used to generate the keypoint heatmap. It is to provide
the flexibility of using different sizes of Gaussian kernel for each
keypoint class.
keypoint_heatmap_loss_weight: float, The weight for the keypoint heatmap.
keypoint_offset_loss_weight: float, The weight for the keypoint offsets
loss.
keypoint_regression_loss_weight: float, The weight for keypoint regression
loss. Note that the loss is dependent on the input image size, since we
penalize the raw height and width. This constant may need to be adjusted
depending on the input size.
keypoint_candidate_score_threshold: float, The heatmap score threshold for
a keypoint to become a valid candidate.
heatmap_bias_init: float, the initial value of bias in the convolutional
kernel of the class prediction head. If set to None, the bias is
initialized with zeros.
num_candidates_per_keypoint: The maximum number of candidates to retrieve
for each keypoint.
task_loss_weight: float, the weight of the keypoint estimation loss.
peak_max_pool_kernel_size: Max pool kernel size to use to pull off peak
score locations in a neighborhood (independently for each keypoint
types).
unmatched_keypoint_score: The default score to use for regressed keypoints
that are not successfully snapped to a nearby candidate.
box_scale: The multiplier to expand the bounding boxes (either the
provided boxes or those which tightly cover the regressed keypoints).
candidate_search_scale: The scale parameter that multiplies the largest
dimension of a bounding box. The resulting distance becomes a search
radius for candidates in the vicinity of each regressed keypoint.
candidate_ranking_mode: One of ['min_distance', 'score_distance_ratio']
indicating how to select the keypoint candidate.
offset_peak_radius: The radius (in the unit of output pixel) around
groundtruth heatmap peak to assign the offset targets. If set 0, then
the offset target will only be assigned to the heatmap peak (same
behavior as the original paper).
per_keypoint_offset: A bool indicates whether to assign offsets for each
keypoint channel separately. If set False, the output offset target has
the shape [batch_size, out_height, out_width, 2] (same behavior as the
original paper). If set True, the output offset target has the shape
[batch_size, out_height, out_width, 2 * num_keypoints] (recommended when
the offset_peak_radius is not zero).
predict_depth: A bool indicates whether to predict the depth of each
keypoints.
per_keypoint_depth: A bool indicates whether the model predicts the depth
of each keypoints in independent channels. Similar to
per_keypoint_offset but for the keypoint depth.
keypoint_depth_loss_weight: The weight of the keypoint depth loss.
score_distance_offset: The distance offset to apply in the denominator
when candidate_ranking_mode is 'score_distance_ratio'. The metric to
maximize in this scenario is score / (distance + score_distance_offset).
Larger values of score_distance_offset make the keypoint score gain more
relative importance.
clip_out_of_frame_keypoints: Whether keypoints outside the image frame
should be clipped back to the image boundary. If True, the keypoints
that are clipped have scores set to 0.0.
rescore_instances: Whether to rescore instances based on a combination of
detection score and keypoint scores.
Returns:
An initialized KeypointEstimationParams namedtuple.
"""
return super(KeypointEstimationParams, cls).__new__(
cls, task_name, class_id, keypoint_indices, classification_loss,
localization_loss, keypoint_labels, keypoint_std_dev,
keypoint_heatmap_loss_weight, keypoint_offset_loss_weight,
keypoint_regression_loss_weight, keypoint_candidate_score_threshold,
heatmap_bias_init, num_candidates_per_keypoint, task_loss_weight,
peak_max_pool_kernel_size, unmatched_keypoint_score, box_scale,
candidate_search_scale, candidate_ranking_mode, offset_peak_radius,
per_keypoint_offset, predict_depth, per_keypoint_depth,
keypoint_depth_loss_weight, score_distance_offset,
clip_out_of_frame_keypoints, rescore_instances)
class ObjectCenterParams(
collections.namedtuple('ObjectCenterParams', [
'classification_loss', 'object_center_loss_weight', 'heatmap_bias_init',
'min_box_overlap_iou', 'max_box_predictions', 'use_only_known_classes'
])):
"""Namedtuple to store object center prediction related parameters."""
__slots__ = ()
def __new__(cls,
classification_loss,
object_center_loss_weight,
heatmap_bias_init=-2.19,
min_box_overlap_iou=0.7,
max_box_predictions=100,
use_labeled_classes=False):
"""Constructor with default values for ObjectCenterParams.
Args:
classification_loss: an object_detection.core.losses.Loss object to
compute the loss for the class predictions in CenterNet.
object_center_loss_weight: float, The weight for the object center loss.
heatmap_bias_init: float, the initial value of bias in the convolutional
kernel of the object center prediction head. If set to None, the bias is
initialized with zeros.
min_box_overlap_iou: float, the minimum IOU overlap that predicted boxes
need have with groundtruth boxes to not be penalized. This is used for
computing the class specific center heatmaps.
max_box_predictions: int, the maximum number of boxes to predict.
use_labeled_classes: boolean, compute the loss only labeled classes.
Returns:
An initialized ObjectCenterParams namedtuple.
"""
return super(ObjectCenterParams,
cls).__new__(cls, classification_loss,
object_center_loss_weight, heatmap_bias_init,
min_box_overlap_iou, max_box_predictions,
use_labeled_classes)
class MaskParams(
collections.namedtuple('MaskParams', [
'classification_loss', 'task_loss_weight', 'mask_height', 'mask_width',
'score_threshold', 'heatmap_bias_init'
])):
"""Namedtuple to store mask prediction related parameters."""
__slots__ = ()
def __new__(cls,
classification_loss,
task_loss_weight=1.0,
mask_height=256,
mask_width=256,
score_threshold=0.5,
heatmap_bias_init=-2.19):
"""Constructor with default values for MaskParams.
Args:
classification_loss: an object_detection.core.losses.Loss object to
compute the loss for the semantic segmentation predictions in CenterNet.
task_loss_weight: float, The loss weight for the segmentation task.
mask_height: The height of the resized instance segmentation mask.
mask_width: The width of the resized instance segmentation mask.
score_threshold: The threshold at which to convert predicted mask
probabilities (after passing through sigmoid) into foreground pixels.
heatmap_bias_init: float, the initial value of bias in the convolutional
kernel of the semantic segmentation prediction head. If set to None, the
bias is initialized with zeros.
Returns:
An initialized MaskParams namedtuple.
"""
return super(MaskParams,
cls).__new__(cls, classification_loss,
task_loss_weight, mask_height, mask_width,
score_threshold, heatmap_bias_init)
class DensePoseParams(
collections.namedtuple('DensePoseParams', [
'class_id', 'classification_loss', 'localization_loss',
'part_loss_weight', 'coordinate_loss_weight', 'num_parts',
'task_loss_weight', 'upsample_to_input_res', 'upsample_method',
'heatmap_bias_init'
])):
"""Namedtuple to store DensePose prediction related parameters."""
__slots__ = ()
def __new__(cls,
class_id,
classification_loss,
localization_loss,
part_loss_weight=1.0,
coordinate_loss_weight=1.0,
num_parts=24,
task_loss_weight=1.0,
upsample_to_input_res=True,
upsample_method='bilinear',
heatmap_bias_init=-2.19):
"""Constructor with default values for DensePoseParams.
Args:
class_id: the ID of the class that contains the DensePose groundtruth.
This should typically correspond to the "person" class. Note that the ID
is 0-based, meaning that class 0 corresponds to the first non-background
object class.
classification_loss: an object_detection.core.losses.Loss object to
compute the loss for the body part predictions in CenterNet.
localization_loss: an object_detection.core.losses.Loss object to compute
the loss for the surface coordinate regression in CenterNet.
part_loss_weight: The loss weight to apply to part prediction.
coordinate_loss_weight: The loss weight to apply to surface coordinate
prediction.
num_parts: The number of DensePose parts to predict.
task_loss_weight: float, the loss weight for the DensePose task.
upsample_to_input_res: Whether to upsample the DensePose feature maps to
the input resolution before applying loss. Note that the prediction
outputs are still at the standard CenterNet output stride.
upsample_method: Method for upsampling DensePose feature maps. Options are
either 'bilinear' or 'nearest'). This takes no effect when
`upsample_to_input_res` is False.
heatmap_bias_init: float, the initial value of bias in the convolutional
kernel of the part prediction head. If set to None, the
bias is initialized with zeros.
Returns:
An initialized DensePoseParams namedtuple.
"""
return super(DensePoseParams,
cls).__new__(cls, class_id, classification_loss,
localization_loss, part_loss_weight,
coordinate_loss_weight, num_parts,
task_loss_weight, upsample_to_input_res,
upsample_method, heatmap_bias_init)
class TrackParams(
collections.namedtuple('TrackParams', [
'num_track_ids', 'reid_embed_size', 'num_fc_layers',
'classification_loss', 'task_loss_weight'
])):
"""Namedtuple to store tracking prediction related parameters."""
__slots__ = ()
def __new__(cls,
num_track_ids,
reid_embed_size,
num_fc_layers,
classification_loss,
task_loss_weight=1.0):
"""Constructor with default values for TrackParams.
Args:
num_track_ids: int. The maximum track ID in the dataset. Used for ReID
embedding classification task.
reid_embed_size: int. The embedding size for ReID task.
num_fc_layers: int. The number of (fully-connected, batch-norm, relu)
layers for track ID classification head.
classification_loss: an object_detection.core.losses.Loss object to
compute the loss for the ReID embedding in CenterNet.
task_loss_weight: float, the loss weight for the tracking task.
Returns:
An initialized TrackParams namedtuple.
"""
return super(TrackParams,
cls).__new__(cls, num_track_ids, reid_embed_size,
num_fc_layers, classification_loss,
task_loss_weight)
class TemporalOffsetParams(
collections.namedtuple('TemporalOffsetParams', [
'localization_loss', 'task_loss_weight'
])):
"""Namedtuple to store temporal offset related parameters."""
__slots__ = ()
def __new__(cls,
localization_loss,
task_loss_weight=1.0):
"""Constructor with default values for TrackParams.
Args:
localization_loss: an object_detection.core.losses.Loss object to
compute the loss for the temporal offset in CenterNet.
task_loss_weight: float, the loss weight for the temporal offset
task.
Returns:
An initialized TemporalOffsetParams namedtuple.
"""
return super(TemporalOffsetParams,
cls).__new__(cls, localization_loss, task_loss_weight)
# The following constants are used to generate the keys of the
# (prediction, loss, target assigner,...) dictionaries used in CenterNetMetaArch
# class.
DETECTION_TASK = 'detection_task'
OBJECT_CENTER = 'object_center'
BOX_SCALE = 'box/scale'
BOX_OFFSET = 'box/offset'
KEYPOINT_REGRESSION = 'keypoint/regression'
KEYPOINT_HEATMAP = 'keypoint/heatmap'
KEYPOINT_OFFSET = 'keypoint/offset'
KEYPOINT_DEPTH = 'keypoint/depth'
SEGMENTATION_TASK = 'segmentation_task'
SEGMENTATION_HEATMAP = 'segmentation/heatmap'
DENSEPOSE_TASK = 'densepose_task'
DENSEPOSE_HEATMAP = 'densepose/heatmap'
DENSEPOSE_REGRESSION = 'densepose/regression'
LOSS_KEY_PREFIX = 'Loss'
TRACK_TASK = 'track_task'
TRACK_REID = 'track/reid'
TEMPORALOFFSET_TASK = 'temporal_offset_task'
TEMPORAL_OFFSET = 'track/offset'
def get_keypoint_name(task_name, head_name):
return '%s/%s' % (task_name, head_name)
def get_num_instances_from_weights(groundtruth_weights_list):
"""Computes the number of instances/boxes from the weights in a batch.
Args:
groundtruth_weights_list: A list of float tensors with shape
[max_num_instances] representing whether there is an actual instance in
the image (with non-zero value) or is padded to match the
max_num_instances (with value 0.0). The list represents the batch
dimension.
Returns:
A scalar integer tensor incidating how many instances/boxes are in the
images in the batch. Note that this function is usually used to normalize
the loss so the minimum return value is 1 to avoid weird behavior.
"""
num_instances = tf.reduce_sum(
[tf.math.count_nonzero(w) for w in groundtruth_weights_list])
num_instances = tf.maximum(num_instances, 1)
return num_instances
class CenterNetMetaArch(model.DetectionModel):
"""The CenterNet meta architecture [1].
[1]: https://arxiv.org/abs/1904.07850
"""
def __init__(self,
is_training,
add_summaries,
num_classes,
feature_extractor,
image_resizer_fn,
object_center_params,
object_detection_params=None,
keypoint_params_dict=None,
mask_params=None,
densepose_params=None,
track_params=None,
temporal_offset_params=None,
use_depthwise=False,
compute_heatmap_sparse=False,
non_max_suppression_fn=None):
"""Initializes a CenterNet model.
Args:
is_training: Set to True if this model is being built for training.
add_summaries: Whether to add tf summaries in the model.
num_classes: int, The number of classes that the model should predict.
feature_extractor: A CenterNetFeatureExtractor to use to extract features
from an image.
image_resizer_fn: a callable for image resizing. This callable always
takes a rank-3 image tensor (corresponding to a single image) and
returns a rank-3 image tensor, possibly with new spatial dimensions and
a 1-D tensor of shape [3] indicating shape of true image within the
resized image tensor as the resized image tensor could be padded. See
builders/image_resizer_builder.py.
object_center_params: An ObjectCenterParams namedtuple. This object holds
the hyper-parameters for object center prediction. This is required by
either object detection or keypoint estimation tasks.
object_detection_params: An ObjectDetectionParams namedtuple. This object
holds the hyper-parameters necessary for object detection. Please see
the class definition for more details.
keypoint_params_dict: A dictionary that maps from task name to the
corresponding KeypointEstimationParams namedtuple. This object holds the
hyper-parameters necessary for multiple keypoint estimations. Please
see the class definition for more details.
mask_params: A MaskParams namedtuple. This object
holds the hyper-parameters for segmentation. Please see the class
definition for more details.
densepose_params: A DensePoseParams namedtuple. This object holds the
hyper-parameters for DensePose prediction. Please see the class
definition for more details. Note that if this is provided, it is
expected that `mask_params` is also provided.
track_params: A TrackParams namedtuple. This object
holds the hyper-parameters for tracking. Please see the class
definition for more details.
temporal_offset_params: A TemporalOffsetParams namedtuple. This object
holds the hyper-parameters for offset prediction based tracking.
use_depthwise: If true, all task heads will be constructed using
separable_conv. Otherwise, standard convoltuions will be used.
compute_heatmap_sparse: bool, whether or not to use the sparse version of
the Op that computes the center heatmaps. The sparse version scales
better with number of channels in the heatmap, but in some cases is
known to cause an OOM error. See b/170989061.
non_max_suppression_fn: Optional Non Max Suppression function to apply.
"""
assert object_detection_params or keypoint_params_dict
# Shorten the name for convenience and better formatting.
self._is_training = is_training
# The Objects as Points paper attaches loss functions to multiple
# (`num_feature_outputs`) feature maps in the the backbone. E.g.
# for the hourglass backbone, `num_feature_outputs` is 2.
self._num_classes = num_classes
self._feature_extractor = feature_extractor
self._num_feature_outputs = feature_extractor.num_feature_outputs
self._stride = self._feature_extractor.out_stride
self._image_resizer_fn = image_resizer_fn
self._center_params = object_center_params
self._od_params = object_detection_params
self._kp_params_dict = keypoint_params_dict
self._mask_params = mask_params
if densepose_params is not None and mask_params is None:
raise ValueError('To run DensePose prediction, `mask_params` must also '
'be supplied.')
self._densepose_params = densepose_params
self._track_params = track_params
self._temporal_offset_params = temporal_offset_params
self._use_depthwise = use_depthwise
self._compute_heatmap_sparse = compute_heatmap_sparse
# Construct the prediction head nets.
self._prediction_head_dict = self._construct_prediction_heads(
num_classes,
self._num_feature_outputs,
class_prediction_bias_init=self._center_params.heatmap_bias_init)
# Initialize the target assigners.
self._target_assigner_dict = self._initialize_target_assigners(
stride=self._stride,
min_box_overlap_iou=self._center_params.min_box_overlap_iou)
# Will be used in VOD single_frame_meta_arch for tensor reshape.
self._batched_prediction_tensor_names = []
self._non_max_suppression_fn = non_max_suppression_fn
super(CenterNetMetaArch, self).__init__(num_classes)
@property
def batched_prediction_tensor_names(self):
if not self._batched_prediction_tensor_names:
raise RuntimeError('Must call predict() method to get batched prediction '
'tensor names.')
return self._batched_prediction_tensor_names
def _make_prediction_net_list(self, num_feature_outputs, num_out_channels,
kernel_size=3, num_filters=256, bias_fill=None,
name=None):
prediction_net_list = []
for i in range(num_feature_outputs):
prediction_net_list.append(
make_prediction_net(
num_out_channels,
kernel_size=kernel_size,
num_filters=num_filters,
bias_fill=bias_fill,
use_depthwise=self._use_depthwise,
name='{}_{}'.format(name, i) if name else name))
return prediction_net_list
def _construct_prediction_heads(self, num_classes, num_feature_outputs,
class_prediction_bias_init):
"""Constructs the prediction heads based on the specific parameters.
Args:
num_classes: An integer indicating how many classes in total to predict.
num_feature_outputs: An integer indicating how many feature outputs to use
for calculating the loss. The Objects as Points paper attaches loss
functions to multiple (`num_feature_outputs`) feature maps in the the
backbone. E.g. for the hourglass backbone, `num_feature_outputs` is 2.
class_prediction_bias_init: float, the initial value of bias in the
convolutional kernel of the class prediction head. If set to None, the
bias is initialized with zeros.
Returns:
A dictionary of keras modules generated by calling make_prediction_net
function. It will also create and set a private member of the class when
learning the tracking task.
"""
prediction_heads = {}
prediction_heads[OBJECT_CENTER] = self._make_prediction_net_list(
num_feature_outputs, num_classes, bias_fill=class_prediction_bias_init,
name='center')
if self._od_params is not None:
prediction_heads[BOX_SCALE] = self._make_prediction_net_list(
num_feature_outputs, NUM_SIZE_CHANNELS, name='box_scale')
prediction_heads[BOX_OFFSET] = self._make_prediction_net_list(
num_feature_outputs, NUM_OFFSET_CHANNELS, name='box_offset')
if self._kp_params_dict is not None:
for task_name, kp_params in self._kp_params_dict.items():
num_keypoints = len(kp_params.keypoint_indices)
prediction_heads[get_keypoint_name(
task_name, KEYPOINT_HEATMAP)] = self._make_prediction_net_list(
num_feature_outputs,
num_keypoints,
bias_fill=kp_params.heatmap_bias_init,
name='kpt_heatmap')
prediction_heads[get_keypoint_name(
task_name, KEYPOINT_REGRESSION)] = self._make_prediction_net_list(
num_feature_outputs,
NUM_OFFSET_CHANNELS * num_keypoints,
name='kpt_regress')
if kp_params.per_keypoint_offset:
prediction_heads[get_keypoint_name(
task_name, KEYPOINT_OFFSET)] = self._make_prediction_net_list(
num_feature_outputs,
NUM_OFFSET_CHANNELS * num_keypoints,
name='kpt_offset')
else:
prediction_heads[get_keypoint_name(
task_name, KEYPOINT_OFFSET)] = self._make_prediction_net_list(
num_feature_outputs, NUM_OFFSET_CHANNELS, name='kpt_offset')
if kp_params.predict_depth:
num_depth_channel = (
num_keypoints if kp_params.per_keypoint_depth else 1)
prediction_heads[get_keypoint_name(
task_name, KEYPOINT_DEPTH)] = self._make_prediction_net_list(
num_feature_outputs, num_depth_channel, name='kpt_depth')
if self._mask_params is not None:
prediction_heads[SEGMENTATION_HEATMAP] = self._make_prediction_net_list(
num_feature_outputs,
num_classes,
bias_fill=self._mask_params.heatmap_bias_init,
name='seg_heatmap')
if self._densepose_params is not None:
prediction_heads[DENSEPOSE_HEATMAP] = self._make_prediction_net_list(
num_feature_outputs,
self._densepose_params.num_parts,
bias_fill=self._densepose_params.heatmap_bias_init,
name='dense_pose_heatmap')
prediction_heads[DENSEPOSE_REGRESSION] = self._make_prediction_net_list(
num_feature_outputs,
2 * self._densepose_params.num_parts,
name='dense_pose_regress')
if self._track_params is not None:
prediction_heads[TRACK_REID] = self._make_prediction_net_list(
num_feature_outputs,
self._track_params.reid_embed_size,
name='track_reid')
# Creates a classification network to train object embeddings by learning
# a projection from embedding space to object track ID space.
self.track_reid_classification_net = tf.keras.Sequential()
for _ in range(self._track_params.num_fc_layers - 1):
self.track_reid_classification_net.add(
tf.keras.layers.Dense(self._track_params.reid_embed_size,
input_shape=(
self._track_params.reid_embed_size,)))
self.track_reid_classification_net.add(
tf.keras.layers.BatchNormalization())
self.track_reid_classification_net.add(tf.keras.layers.ReLU())
self.track_reid_classification_net.add(
tf.keras.layers.Dense(self._track_params.num_track_ids,
input_shape=(
self._track_params.reid_embed_size,)))
if self._temporal_offset_params is not None:
prediction_heads[TEMPORAL_OFFSET] = self._make_prediction_net_list(
num_feature_outputs, NUM_OFFSET_CHANNELS, name='temporal_offset')
return prediction_heads
def _initialize_target_assigners(self, stride, min_box_overlap_iou):
"""Initializes the target assigners and puts them in a dictionary.
Args:
stride: An integer indicating the stride of the image.
min_box_overlap_iou: float, the minimum IOU overlap that predicted boxes
need have with groundtruth boxes to not be penalized. This is used for
computing the class specific center heatmaps.
Returns:
A dictionary of initialized target assigners for each task.
"""
target_assigners = {}
target_assigners[OBJECT_CENTER] = (
cn_assigner.CenterNetCenterHeatmapTargetAssigner(
stride, min_box_overlap_iou, self._compute_heatmap_sparse))
if self._od_params is not None:
target_assigners[DETECTION_TASK] = (
cn_assigner.CenterNetBoxTargetAssigner(stride))
if self._kp_params_dict is not None:
for task_name, kp_params in self._kp_params_dict.items():
target_assigners[task_name] = (
cn_assigner.CenterNetKeypointTargetAssigner(
stride=stride,
class_id=kp_params.class_id,
keypoint_indices=kp_params.keypoint_indices,
keypoint_std_dev=kp_params.keypoint_std_dev,
peak_radius=kp_params.offset_peak_radius,
per_keypoint_offset=kp_params.per_keypoint_offset,
compute_heatmap_sparse=self._compute_heatmap_sparse))
if self._mask_params is not None:
target_assigners[SEGMENTATION_TASK] = (
cn_assigner.CenterNetMaskTargetAssigner(stride))
if self._densepose_params is not None:
dp_stride = 1 if self._densepose_params.upsample_to_input_res else stride
target_assigners[DENSEPOSE_TASK] = (
cn_assigner.CenterNetDensePoseTargetAssigner(dp_stride))
if self._track_params is not None:
target_assigners[TRACK_TASK] = (
cn_assigner.CenterNetTrackTargetAssigner(
stride, self._track_params.num_track_ids))
if self._temporal_offset_params is not None:
target_assigners[TEMPORALOFFSET_TASK] = (
cn_assigner.CenterNetTemporalOffsetTargetAssigner(stride))
return target_assigners
def _compute_object_center_loss(self, input_height, input_width,
object_center_predictions, per_pixel_weights):
"""Computes the object center loss.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
object_center_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, num_classes] representing the object center
feature maps.
per_pixel_weights: A float tensor of shape [batch_size,
out_height * out_width, 1] with 1s in locations where the spatial
coordinates fall within the height and width in true_image_shapes.
Returns:
A float scalar tensor representing the object center loss per instance.
"""
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
if self._center_params.use_only_known_classes:
gt_labeled_classes_list = self.groundtruth_lists(
fields.InputDataFields.groundtruth_labeled_classes)
batch_labeled_classes = tf.stack(gt_labeled_classes_list, axis=0)
batch_labeled_classes_shape = tf.shape(batch_labeled_classes)
batch_labeled_classes = tf.reshape(
batch_labeled_classes,
[batch_labeled_classes_shape[0], 1, batch_labeled_classes_shape[-1]])
per_pixel_weights = per_pixel_weights * batch_labeled_classes
# Convert the groundtruth to targets.
assigner = self._target_assigner_dict[OBJECT_CENTER]
heatmap_targets = assigner.assign_center_targets_from_boxes(
height=input_height,
width=input_width,
gt_boxes_list=gt_boxes_list,
gt_classes_list=gt_classes_list,
gt_weights_list=gt_weights_list)
flattened_heatmap_targets = _flatten_spatial_dimensions(heatmap_targets)
num_boxes = _to_float32(get_num_instances_from_weights(gt_weights_list))
loss = 0.0
object_center_loss = self._center_params.classification_loss
# Loop through each feature output head.
for pred in object_center_predictions:
pred = _flatten_spatial_dimensions(pred)
loss += object_center_loss(
pred, flattened_heatmap_targets, weights=per_pixel_weights)
loss_per_instance = tf.reduce_sum(loss) / (
float(len(object_center_predictions)) * num_boxes)
return loss_per_instance
def _compute_object_detection_losses(self, input_height, input_width,
prediction_dict, per_pixel_weights):
"""Computes the weighted object detection losses.
This wrapper function calls the function which computes the losses for
object detection task and applies corresponding weights to the losses.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
prediction_dict: A dictionary holding predicted tensors output by
"predict" function. See "predict" function for more detailed
description.
per_pixel_weights: A float tensor of shape [batch_size,
out_height * out_width, 1] with 1s in locations where the spatial
coordinates fall within the height and width in true_image_shapes.
Returns:
A dictionary of scalar float tensors representing the weighted losses for
object detection task:
BOX_SCALE: the weighted scale (height/width) loss.
BOX_OFFSET: the weighted object offset loss.
"""
od_scale_loss, od_offset_loss = self._compute_box_scale_and_offset_loss(
scale_predictions=prediction_dict[BOX_SCALE],
offset_predictions=prediction_dict[BOX_OFFSET],
input_height=input_height,
input_width=input_width)
loss_dict = {}
loss_dict[BOX_SCALE] = (
self._od_params.scale_loss_weight * od_scale_loss)
loss_dict[BOX_OFFSET] = (
self._od_params.offset_loss_weight * od_offset_loss)
return loss_dict
def _compute_box_scale_and_offset_loss(self, input_height, input_width,
scale_predictions, offset_predictions):
"""Computes the scale loss of the object detection task.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
scale_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, 2] representing the prediction heads of the model
for object scale (i.e height and width).
offset_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, 2] representing the prediction heads of the model
for object offset.
Returns:
A tuple of two losses:
scale_loss: A float scalar tensor representing the object height/width
loss normalized by total number of boxes.
offset_loss: A float scalar tensor representing the object offset loss
normalized by total number of boxes
"""
# TODO(vighneshb) Explore a size invariant version of scale loss.
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
num_boxes = _to_float32(get_num_instances_from_weights(gt_weights_list))
num_predictions = float(len(scale_predictions))
assigner = self._target_assigner_dict[DETECTION_TASK]
(batch_indices, batch_height_width_targets, batch_offset_targets,
batch_weights) = assigner.assign_size_and_offset_targets(
height=input_height,
width=input_width,
gt_boxes_list=gt_boxes_list,
gt_weights_list=gt_weights_list)
batch_weights = tf.expand_dims(batch_weights, -1)
scale_loss = 0
offset_loss = 0
localization_loss_fn = self._od_params.localization_loss
for scale_pred, offset_pred in zip(scale_predictions, offset_predictions):
# Compute the scale loss.
scale_pred = cn_assigner.get_batch_predictions_from_indices(
scale_pred, batch_indices)
scale_loss += localization_loss_fn(
scale_pred, batch_height_width_targets, weights=batch_weights)
# Compute the offset loss.
offset_pred = cn_assigner.get_batch_predictions_from_indices(
offset_pred, batch_indices)
offset_loss += localization_loss_fn(
offset_pred, batch_offset_targets, weights=batch_weights)
scale_loss = tf.reduce_sum(scale_loss) / (
num_predictions * num_boxes)
offset_loss = tf.reduce_sum(offset_loss) / (
num_predictions * num_boxes)
return scale_loss, offset_loss
def _compute_keypoint_estimation_losses(self, task_name, input_height,
input_width, prediction_dict,
per_pixel_weights):
"""Computes the weighted keypoint losses."""
kp_params = self._kp_params_dict[task_name]
heatmap_key = get_keypoint_name(task_name, KEYPOINT_HEATMAP)
offset_key = get_keypoint_name(task_name, KEYPOINT_OFFSET)
regression_key = get_keypoint_name(task_name, KEYPOINT_REGRESSION)
depth_key = get_keypoint_name(task_name, KEYPOINT_DEPTH)
heatmap_loss = self._compute_kp_heatmap_loss(
input_height=input_height,
input_width=input_width,
task_name=task_name,
heatmap_predictions=prediction_dict[heatmap_key],
classification_loss_fn=kp_params.classification_loss,
per_pixel_weights=per_pixel_weights)
offset_loss = self._compute_kp_offset_loss(
input_height=input_height,
input_width=input_width,
task_name=task_name,
offset_predictions=prediction_dict[offset_key],
localization_loss_fn=kp_params.localization_loss)
reg_loss = self._compute_kp_regression_loss(
input_height=input_height,
input_width=input_width,
task_name=task_name,
regression_predictions=prediction_dict[regression_key],
localization_loss_fn=kp_params.localization_loss)
loss_dict = {}
loss_dict[heatmap_key] = (
kp_params.keypoint_heatmap_loss_weight * heatmap_loss)
loss_dict[offset_key] = (
kp_params.keypoint_offset_loss_weight * offset_loss)
loss_dict[regression_key] = (
kp_params.keypoint_regression_loss_weight * reg_loss)
if kp_params.predict_depth:
depth_loss = self._compute_kp_depth_loss(
input_height=input_height,
input_width=input_width,
task_name=task_name,
depth_predictions=prediction_dict[depth_key],
localization_loss_fn=kp_params.localization_loss)
loss_dict[depth_key] = kp_params.keypoint_depth_loss_weight * depth_loss
return loss_dict
def _compute_kp_heatmap_loss(self, input_height, input_width, task_name,
heatmap_predictions, classification_loss_fn,
per_pixel_weights):
"""Computes the heatmap loss of the keypoint estimation task.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
task_name: A string representing the name of the keypoint task.
heatmap_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, num_keypoints] representing the prediction heads
of the model for keypoint heatmap.
classification_loss_fn: An object_detection.core.losses.Loss object to
compute the loss for the class predictions in CenterNet.
per_pixel_weights: A float tensor of shape [batch_size,
out_height * out_width, 1] with 1s in locations where the spatial
coordinates fall within the height and width in true_image_shapes.
Returns:
loss: A float scalar tensor representing the object keypoint heatmap loss
normalized by number of instances.
"""
gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
assigner = self._target_assigner_dict[task_name]
(keypoint_heatmap, num_instances_per_kp_type,
valid_mask_batch) = assigner.assign_keypoint_heatmap_targets(
height=input_height,
width=input_width,
gt_keypoints_list=gt_keypoints_list,
gt_weights_list=gt_weights_list,
gt_classes_list=gt_classes_list,
gt_boxes_list=gt_boxes_list)
flattened_valid_mask = _flatten_spatial_dimensions(
tf.expand_dims(valid_mask_batch, axis=-1))
flattened_heapmap_targets = _flatten_spatial_dimensions(keypoint_heatmap)
# Sum over the number of instances per keypoint types to get the total
# number of keypoints. Note that this is used to normalized the loss and we
# keep the minimum value to be 1 to avoid generating weird loss value when
# no keypoint is in the image batch.
num_instances = tf.maximum(
tf.cast(tf.reduce_sum(num_instances_per_kp_type), dtype=tf.float32),
1.0)
loss = 0.0
# Loop through each feature output head.
for pred in heatmap_predictions:
pred = _flatten_spatial_dimensions(pred)
unweighted_loss = classification_loss_fn(
pred,
flattened_heapmap_targets,
weights=tf.ones_like(per_pixel_weights))
# Apply the weights after the loss function to have full control over it.
loss += unweighted_loss * per_pixel_weights * flattened_valid_mask
loss = tf.reduce_sum(loss) / (
float(len(heatmap_predictions)) * num_instances)
return loss
def _compute_kp_offset_loss(self, input_height, input_width, task_name,
offset_predictions, localization_loss_fn):
"""Computes the offset loss of the keypoint estimation task.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
task_name: A string representing the name of the keypoint task.
offset_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, 2] representing the prediction heads of the model
for keypoint offset.
localization_loss_fn: An object_detection.core.losses.Loss object to
compute the loss for the keypoint offset predictions in CenterNet.
Returns:
loss: A float scalar tensor representing the keypoint offset loss
normalized by number of total keypoints.
"""
gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
assigner = self._target_assigner_dict[task_name]
(batch_indices, batch_offsets,
batch_weights) = assigner.assign_keypoints_offset_targets(
height=input_height,
width=input_width,
gt_keypoints_list=gt_keypoints_list,
gt_weights_list=gt_weights_list,
gt_classes_list=gt_classes_list)
# Keypoint offset loss.
loss = 0.0
for prediction in offset_predictions:
batch_size, out_height, out_width, channels = _get_shape(prediction, 4)
if channels > 2:
prediction = tf.reshape(
prediction, shape=[batch_size, out_height, out_width, -1, 2])
prediction = cn_assigner.get_batch_predictions_from_indices(
prediction, batch_indices)
# The dimensions passed are not as per the doc string but the loss
# still computes the correct value.
unweighted_loss = localization_loss_fn(
prediction,
batch_offsets,
weights=tf.expand_dims(tf.ones_like(batch_weights), -1))
# Apply the weights after the loss function to have full control over it.
loss += batch_weights * tf.reduce_sum(unweighted_loss, axis=1)
loss = tf.reduce_sum(loss) / (
float(len(offset_predictions)) *
tf.maximum(tf.reduce_sum(batch_weights), 1.0))
return loss
def _compute_kp_regression_loss(self, input_height, input_width, task_name,
regression_predictions, localization_loss_fn):
"""Computes the keypoint regression loss of the keypoint estimation task.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
task_name: A string representing the name of the keypoint task.
regression_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, 2 * num_keypoints] representing the prediction
heads of the model for keypoint regression offset.
localization_loss_fn: An object_detection.core.losses.Loss object to
compute the loss for the keypoint regression offset predictions in
CenterNet.
Returns:
loss: A float scalar tensor representing the keypoint regression offset
loss normalized by number of total keypoints.
"""
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
# keypoint regression offset loss.
assigner = self._target_assigner_dict[task_name]
(batch_indices, batch_regression_offsets,
batch_weights) = assigner.assign_joint_regression_targets(
height=input_height,
width=input_width,
gt_keypoints_list=gt_keypoints_list,
gt_classes_list=gt_classes_list,
gt_weights_list=gt_weights_list,
gt_boxes_list=gt_boxes_list)
loss = 0.0
for prediction in regression_predictions:
batch_size, out_height, out_width, _ = _get_shape(prediction, 4)
reshaped_prediction = tf.reshape(
prediction, shape=[batch_size, out_height, out_width, -1, 2])
reg_prediction = cn_assigner.get_batch_predictions_from_indices(
reshaped_prediction, batch_indices)
unweighted_loss = localization_loss_fn(
reg_prediction,
batch_regression_offsets,
weights=tf.expand_dims(tf.ones_like(batch_weights), -1))
# Apply the weights after the loss function to have full control over it.
loss += batch_weights * tf.reduce_sum(unweighted_loss, axis=1)
loss = tf.reduce_sum(loss) / (
float(len(regression_predictions)) *
tf.maximum(tf.reduce_sum(batch_weights), 1.0))
return loss
def _compute_kp_depth_loss(self, input_height, input_width, task_name,
depth_predictions, localization_loss_fn):
"""Computes the loss of the keypoint depth estimation.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
task_name: A string representing the name of the keypoint task.
depth_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, 1 (or num_keypoints)] representing the prediction
heads of the model for keypoint depth.
localization_loss_fn: An object_detection.core.losses.Loss object to
compute the loss for the keypoint offset predictions in CenterNet.
Returns:
loss: A float scalar tensor representing the keypoint depth loss
normalized by number of total keypoints.
"""
kp_params = self._kp_params_dict[task_name]
gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
gt_keypoint_depths_list = self.groundtruth_lists(
fields.BoxListFields.keypoint_depths)
gt_keypoint_depth_weights_list = self.groundtruth_lists(
fields.BoxListFields.keypoint_depth_weights)
assigner = self._target_assigner_dict[task_name]
(batch_indices, batch_depths,
batch_weights) = assigner.assign_keypoints_depth_targets(
height=input_height,
width=input_width,
gt_keypoints_list=gt_keypoints_list,
gt_weights_list=gt_weights_list,
gt_classes_list=gt_classes_list,
gt_keypoint_depths_list=gt_keypoint_depths_list,
gt_keypoint_depth_weights_list=gt_keypoint_depth_weights_list)
if kp_params.per_keypoint_offset and not kp_params.per_keypoint_depth:
batch_indices = batch_indices[:, 0:3]
# Keypoint offset loss.
loss = 0.0
for prediction in depth_predictions:
selected_depths = cn_assigner.get_batch_predictions_from_indices(
prediction, batch_indices)
if kp_params.per_keypoint_offset and kp_params.per_keypoint_depth:
selected_depths = tf.expand_dims(selected_depths, axis=-1)
# The dimensions passed are not as per the doc string but the loss
# still computes the correct value.
unweighted_loss = localization_loss_fn(
selected_depths,
batch_depths,
weights=tf.expand_dims(tf.ones_like(batch_weights), -1))
# Apply the weights after the loss function to have full control over it.
loss += batch_weights * tf.squeeze(unweighted_loss, axis=1)
loss = tf.reduce_sum(loss) / (
float(len(depth_predictions)) *
tf.maximum(tf.reduce_sum(batch_weights), 1.0))
return loss
def _compute_segmentation_losses(self, prediction_dict, per_pixel_weights):
"""Computes all the losses associated with segmentation.
Args:
prediction_dict: The dictionary returned from the predict() method.
per_pixel_weights: A float tensor of shape [batch_size,
out_height * out_width, 1] with 1s in locations where the spatial
coordinates fall within the height and width in true_image_shapes.
Returns:
A dictionary with segmentation losses.
"""
segmentation_heatmap = prediction_dict[SEGMENTATION_HEATMAP]
mask_loss = self._compute_mask_loss(
segmentation_heatmap, per_pixel_weights)
losses = {
SEGMENTATION_HEATMAP: mask_loss
}
return losses
def _compute_mask_loss(self, segmentation_predictions,
per_pixel_weights):
"""Computes the mask loss.
Args:
segmentation_predictions: A list of float32 tensors of shape [batch_size,
out_height, out_width, num_classes].
per_pixel_weights: A float tensor of shape [batch_size,
out_height * out_width, 1] with 1s in locations where the spatial
coordinates fall within the height and width in true_image_shapes.
Returns:
A float scalar tensor representing the mask loss.
"""
gt_masks_list = self.groundtruth_lists(fields.BoxListFields.masks)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
# Convert the groundtruth to targets.
assigner = self._target_assigner_dict[SEGMENTATION_TASK]
heatmap_targets = assigner.assign_segmentation_targets(
gt_masks_list=gt_masks_list,
gt_classes_list=gt_classes_list)
flattened_heatmap_targets = _flatten_spatial_dimensions(heatmap_targets)
loss = 0.0
mask_loss_fn = self._mask_params.classification_loss
total_pixels_in_loss = tf.reduce_sum(per_pixel_weights)
# Loop through each feature output head.
for pred in segmentation_predictions:
pred = _flatten_spatial_dimensions(pred)
loss += mask_loss_fn(
pred, flattened_heatmap_targets, weights=per_pixel_weights)
# TODO(ronnyvotel): Consider other ways to normalize loss.
total_loss = tf.reduce_sum(loss) / (
float(len(segmentation_predictions)) * total_pixels_in_loss)
return total_loss
def _compute_densepose_losses(self, input_height, input_width,
prediction_dict):
"""Computes the weighted DensePose losses.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
prediction_dict: A dictionary holding predicted tensors output by the
"predict" function. See the "predict" function for more detailed
description.
Returns:
A dictionary of scalar float tensors representing the weighted losses for
the DensePose task:
DENSEPOSE_HEATMAP: the weighted part segmentation loss.
DENSEPOSE_REGRESSION: the weighted part surface coordinate loss.
"""
dp_heatmap_loss, dp_regression_loss = (
self._compute_densepose_part_and_coordinate_losses(
input_height=input_height,
input_width=input_width,
part_predictions=prediction_dict[DENSEPOSE_HEATMAP],
surface_coord_predictions=prediction_dict[DENSEPOSE_REGRESSION]))
loss_dict = {}
loss_dict[DENSEPOSE_HEATMAP] = (
self._densepose_params.part_loss_weight * dp_heatmap_loss)
loss_dict[DENSEPOSE_REGRESSION] = (
self._densepose_params.coordinate_loss_weight * dp_regression_loss)
return loss_dict
def _compute_densepose_part_and_coordinate_losses(
self, input_height, input_width, part_predictions,
surface_coord_predictions):
"""Computes the individual losses for the DensePose task.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
part_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, num_parts].
surface_coord_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, 2 * num_parts].
Returns:
A tuple with two scalar loss tensors: part_prediction_loss and
surface_coord_loss.
"""
gt_dp_num_points_list = self.groundtruth_lists(
fields.BoxListFields.densepose_num_points)
gt_dp_part_ids_list = self.groundtruth_lists(
fields.BoxListFields.densepose_part_ids)
gt_dp_surface_coords_list = self.groundtruth_lists(
fields.BoxListFields.densepose_surface_coords)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
assigner = self._target_assigner_dict[DENSEPOSE_TASK]
batch_indices, batch_part_ids, batch_surface_coords, batch_weights = (
assigner.assign_part_and_coordinate_targets(
height=input_height,
width=input_width,
gt_dp_num_points_list=gt_dp_num_points_list,
gt_dp_part_ids_list=gt_dp_part_ids_list,
gt_dp_surface_coords_list=gt_dp_surface_coords_list,
gt_weights_list=gt_weights_list))
part_prediction_loss = 0
surface_coord_loss = 0
classification_loss_fn = self._densepose_params.classification_loss
localization_loss_fn = self._densepose_params.localization_loss
num_predictions = float(len(part_predictions))
num_valid_points = tf.math.count_nonzero(batch_weights)
num_valid_points = tf.cast(tf.math.maximum(num_valid_points, 1), tf.float32)
for part_pred, surface_coord_pred in zip(part_predictions,
surface_coord_predictions):
# Potentially upsample the feature maps, so that better quality (i.e.
# higher res) groundtruth can be applied.
if self._densepose_params.upsample_to_input_res:
part_pred = tf.keras.layers.UpSampling2D(
self._stride, interpolation=self._densepose_params.upsample_method)(
part_pred)
surface_coord_pred = tf.keras.layers.UpSampling2D(
self._stride, interpolation=self._densepose_params.upsample_method)(
surface_coord_pred)
# Compute the part prediction loss.
part_pred = cn_assigner.get_batch_predictions_from_indices(
part_pred, batch_indices[:, 0:3])
part_prediction_loss += classification_loss_fn(
part_pred[:, tf.newaxis, :],
batch_part_ids[:, tf.newaxis, :],
weights=batch_weights[:, tf.newaxis, tf.newaxis])
# Compute the surface coordinate loss.
batch_size, out_height, out_width, _ = _get_shape(
surface_coord_pred, 4)
surface_coord_pred = tf.reshape(
surface_coord_pred, [batch_size, out_height, out_width, -1, 2])
surface_coord_pred = cn_assigner.get_batch_predictions_from_indices(
surface_coord_pred, batch_indices)
surface_coord_loss += localization_loss_fn(
surface_coord_pred,
batch_surface_coords,
weights=batch_weights[:, tf.newaxis])
part_prediction_loss = tf.reduce_sum(part_prediction_loss) / (
num_predictions * num_valid_points)
surface_coord_loss = tf.reduce_sum(surface_coord_loss) / (
num_predictions * num_valid_points)
return part_prediction_loss, surface_coord_loss
def _compute_track_losses(self, input_height, input_width, prediction_dict):
"""Computes all the losses associated with tracking.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
prediction_dict: The dictionary returned from the predict() method.
Returns:
A dictionary with tracking losses.
"""
object_reid_predictions = prediction_dict[TRACK_REID]
embedding_loss = self._compute_track_embedding_loss(
input_height=input_height,
input_width=input_width,
object_reid_predictions=object_reid_predictions)
losses = {
TRACK_REID: embedding_loss
}
return losses
def _compute_track_embedding_loss(self, input_height, input_width,
object_reid_predictions):
"""Computes the object ReID loss.
The embedding is trained as a classification task where the target is the
ID of each track among all tracks in the whole dataset.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
object_reid_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, reid_embed_size] representing the object
embedding feature maps.
Returns:
A float scalar tensor representing the object ReID loss per instance.
"""
gt_track_ids_list = self.groundtruth_lists(fields.BoxListFields.track_ids)
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
num_boxes = _to_float32(get_num_instances_from_weights(gt_weights_list))
# Convert the groundtruth to targets.
assigner = self._target_assigner_dict[TRACK_TASK]
batch_indices, batch_weights, track_targets = assigner.assign_track_targets(
height=input_height,
width=input_width,
gt_track_ids_list=gt_track_ids_list,
gt_boxes_list=gt_boxes_list,
gt_weights_list=gt_weights_list)
batch_weights = tf.expand_dims(batch_weights, -1)
loss = 0.0
object_reid_loss = self._track_params.classification_loss
# Loop through each feature output head.
for pred in object_reid_predictions:
embedding_pred = cn_assigner.get_batch_predictions_from_indices(
pred, batch_indices)
reid_classification = self.track_reid_classification_net(embedding_pred)
loss += object_reid_loss(
reid_classification, track_targets, weights=batch_weights)
loss_per_instance = tf.reduce_sum(loss) / (
float(len(object_reid_predictions)) * num_boxes)
return loss_per_instance
def _compute_temporal_offset_loss(self, input_height,
input_width, prediction_dict):
"""Computes the temporal offset loss for tracking.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
prediction_dict: The dictionary returned from the predict() method.
Returns:
A dictionary with track/temporal_offset losses.
"""
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
gt_offsets_list = self.groundtruth_lists(
fields.BoxListFields.temporal_offsets)
gt_match_list = self.groundtruth_lists(
fields.BoxListFields.track_match_flags)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
num_boxes = tf.cast(
get_num_instances_from_weights(gt_weights_list), tf.float32)
offset_predictions = prediction_dict[TEMPORAL_OFFSET]
num_predictions = float(len(offset_predictions))
assigner = self._target_assigner_dict[TEMPORALOFFSET_TASK]
(batch_indices, batch_offset_targets,
batch_weights) = assigner.assign_temporal_offset_targets(
height=input_height,
width=input_width,
gt_boxes_list=gt_boxes_list,
gt_offsets_list=gt_offsets_list,
gt_match_list=gt_match_list,
gt_weights_list=gt_weights_list)
batch_weights = tf.expand_dims(batch_weights, -1)
offset_loss_fn = self._temporal_offset_params.localization_loss
loss_dict = {}
offset_loss = 0
for offset_pred in offset_predictions:
offset_pred = cn_assigner.get_batch_predictions_from_indices(
offset_pred, batch_indices)
offset_loss += offset_loss_fn(offset_pred[:, None],
batch_offset_targets[:, None],
weights=batch_weights)
offset_loss = tf.reduce_sum(offset_loss) / (num_predictions * num_boxes)
loss_dict[TEMPORAL_OFFSET] = offset_loss
return loss_dict
def _should_clip_keypoints(self):
"""Returns a boolean indicating whether keypoint clipping should occur.
If there is only one keypoint task, clipping is controlled by the field
`clip_out_of_frame_keypoints`. If there are multiple keypoint tasks,
clipping logic is defined based on unanimous agreement of keypoint
parameters. If there is any ambiguity, clip_out_of_frame_keypoints is set
to False (default).
"""
kp_params_iterator = iter(self._kp_params_dict.values())
if len(self._kp_params_dict) == 1:
kp_params = next(kp_params_iterator)
return kp_params.clip_out_of_frame_keypoints
# Multi-task setting.
kp_params = next(kp_params_iterator)
should_clip = kp_params.clip_out_of_frame_keypoints
for kp_params in kp_params_iterator:
if kp_params.clip_out_of_frame_keypoints != should_clip:
return False
return should_clip
def _rescore_instances(self, classes, scores, keypoint_scores):
"""Rescores instances based on detection and keypoint scores.
Args:
classes: A [batch, max_detections] int32 tensor with detection classes.
scores: A [batch, max_detections] float32 tensor with detection scores.
keypoint_scores: A [batch, max_detections, total_num_keypoints] float32
tensor with keypoint scores.
Returns:
A [batch, max_detections] float32 tensor with possibly altered detection
scores.
"""
batch, max_detections, total_num_keypoints = (
shape_utils.combined_static_and_dynamic_shape(keypoint_scores))
classes_tiled = tf.tile(classes[:, :, tf.newaxis],
multiples=[1, 1, total_num_keypoints])
# TODO(yuhuic): Investigate whether this function will create subgraphs in
# tflite that will cause the model to run slower at inference.
for kp_params in self._kp_params_dict.values():
if not kp_params.rescore_instances:
continue
class_id = kp_params.class_id
keypoint_indices = kp_params.keypoint_indices
kpt_mask = tf.reduce_sum(
tf.one_hot(keypoint_indices, depth=total_num_keypoints), axis=0)
kpt_mask_tiled = tf.tile(kpt_mask[tf.newaxis, tf.newaxis, :],
multiples=[batch, max_detections, 1])
class_and_keypoint_mask = tf.math.logical_and(
classes_tiled == class_id,
kpt_mask_tiled == 1.0)
class_and_keypoint_mask_float = tf.cast(class_and_keypoint_mask,
dtype=tf.float32)
visible_keypoints = tf.math.greater(keypoint_scores, 0.0)
num_visible_keypoints = tf.reduce_sum(
class_and_keypoint_mask_float *
tf.cast(visible_keypoints, tf.float32), axis=-1)
num_visible_keypoints = tf.math.maximum(num_visible_keypoints, 1.0)
scores_for_class = (1./num_visible_keypoints) * (
tf.reduce_sum(class_and_keypoint_mask_float *
scores[:, :, tf.newaxis] *
keypoint_scores, axis=-1))
scores = tf.where(classes == class_id,
scores_for_class,
scores)
return scores
def preprocess(self, inputs):
outputs = shape_utils.resize_images_and_return_shapes(
inputs, self._image_resizer_fn)
resized_inputs, true_image_shapes = outputs
return (self._feature_extractor.preprocess(resized_inputs),
true_image_shapes)
def predict(self, preprocessed_inputs, _):
"""Predicts CenterNet prediction tensors given an input batch.
Feature extractors are free to produce predictions from multiple feature
maps and therefore we return a dictionary mapping strings to lists.
E.g. the hourglass backbone produces two feature maps.
Args:
preprocessed_inputs: a [batch, height, width, channels] float32 tensor
representing a batch of images.
Returns:
prediction_dict: a dictionary holding predicted tensors with
'preprocessed_inputs' - The input image after being resized and
preprocessed by the feature extractor.
'object_center' - A list of size num_feature_outputs containing
float tensors of size [batch_size, output_height, output_width,
num_classes] representing the predicted object center heatmap logits.
'box/scale' - [optional] A list of size num_feature_outputs holding
float tensors of size [batch_size, output_height, output_width, 2]
representing the predicted box height and width at each output
location. This field exists only when object detection task is
specified.
'box/offset' - [optional] A list of size num_feature_outputs holding
float tensors of size [batch_size, output_height, output_width, 2]
representing the predicted y and x offsets at each output location.
'$TASK_NAME/keypoint_heatmap' - [optional] A list of size
num_feature_outputs holding float tensors of size [batch_size,
output_height, output_width, num_keypoints] representing the predicted
keypoint heatmap logits.
'$TASK_NAME/keypoint_offset' - [optional] A list of size
num_feature_outputs holding float tensors of size [batch_size,
output_height, output_width, 2] representing the predicted keypoint
offsets at each output location.
'$TASK_NAME/keypoint_regression' - [optional] A list of size
num_feature_outputs holding float tensors of size [batch_size,
output_height, output_width, 2 * num_keypoints] representing the
predicted keypoint regression at each output location.
'segmentation/heatmap' - [optional] A list of size num_feature_outputs
holding float tensors of size [batch_size, output_height,
output_width, num_classes] representing the mask logits.
'densepose/heatmap' - [optional] A list of size num_feature_outputs
holding float tensors of size [batch_size, output_height,
output_width, num_parts] representing the mask logits for each part.
'densepose/regression' - [optional] A list of size num_feature_outputs
holding float tensors of size [batch_size, output_height,
output_width, 2 * num_parts] representing the DensePose surface
coordinate predictions.
Note the $TASK_NAME is provided by the KeypointEstimation namedtuple
used to differentiate between different keypoint tasks.
"""
features_list = self._feature_extractor(preprocessed_inputs)
predictions = {}
for head_name, heads in self._prediction_head_dict.items():
predictions[head_name] = [
head(feature) for (feature, head) in zip(features_list, heads)
]
predictions['preprocessed_inputs'] = preprocessed_inputs
self._batched_prediction_tensor_names = predictions.keys()
return predictions
def loss(self, prediction_dict, true_image_shapes, scope=None):
"""Computes scalar loss tensors with respect to provided groundtruth.
This function implements the various CenterNet losses.
Args:
prediction_dict: a dictionary holding predicted tensors returned by
"predict" function.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is of
the form [height, width, channels] indicating the shapes of true images
in the resized images, as resized images can be padded with zeros.
scope: Optional scope name.
Returns:
A dictionary mapping the keys [
'Loss/object_center',
'Loss/box/scale', (optional)
'Loss/box/offset', (optional)
'Loss/$TASK_NAME/keypoint/heatmap', (optional)
'Loss/$TASK_NAME/keypoint/offset', (optional)
'Loss/$TASK_NAME/keypoint/regression', (optional)
'Loss/segmentation/heatmap', (optional)
'Loss/densepose/heatmap', (optional)
'Loss/densepose/regression', (optional)
'Loss/track/reid'] (optional)
'Loss/track/offset'] (optional)
scalar tensors corresponding to the losses for different tasks. Note the
$TASK_NAME is provided by the KeypointEstimation namedtuple used to
differentiate between different keypoint tasks.
"""
_, input_height, input_width, _ = _get_shape(
prediction_dict['preprocessed_inputs'], 4)
output_height, output_width = (input_height // self._stride,
input_width // self._stride)
# TODO(vighneshb) Explore whether using floor here is safe.
output_true_image_shapes = tf.ceil(
tf.to_float(true_image_shapes) / self._stride)
valid_anchor_weights = get_valid_anchor_weights_in_flattened_image(
output_true_image_shapes, output_height, output_width)
valid_anchor_weights = tf.expand_dims(valid_anchor_weights, 2)
object_center_loss = self._compute_object_center_loss(
object_center_predictions=prediction_dict[OBJECT_CENTER],
input_height=input_height,
input_width=input_width,
per_pixel_weights=valid_anchor_weights)
losses = {
OBJECT_CENTER:
self._center_params.object_center_loss_weight * object_center_loss
}
if self._od_params is not None:
od_losses = self._compute_object_detection_losses(
input_height=input_height,
input_width=input_width,
prediction_dict=prediction_dict,
per_pixel_weights=valid_anchor_weights)
for key in od_losses:
od_losses[key] = od_losses[key] * self._od_params.task_loss_weight
losses.update(od_losses)
if self._kp_params_dict is not None:
for task_name, params in self._kp_params_dict.items():
kp_losses = self._compute_keypoint_estimation_losses(
task_name=task_name,
input_height=input_height,
input_width=input_width,
prediction_dict=prediction_dict,
per_pixel_weights=valid_anchor_weights)
for key in kp_losses:
kp_losses[key] = kp_losses[key] * params.task_loss_weight
losses.update(kp_losses)
if self._mask_params is not None:
seg_losses = self._compute_segmentation_losses(
prediction_dict=prediction_dict,
per_pixel_weights=valid_anchor_weights)
for key in seg_losses:
seg_losses[key] = seg_losses[key] * self._mask_params.task_loss_weight
losses.update(seg_losses)
if self._densepose_params is not None:
densepose_losses = self._compute_densepose_losses(
input_height=input_height,
input_width=input_width,
prediction_dict=prediction_dict)
for key in densepose_losses:
densepose_losses[key] = (
densepose_losses[key] * self._densepose_params.task_loss_weight)
losses.update(densepose_losses)
if self._track_params is not None:
track_losses = self._compute_track_losses(
input_height=input_height,
input_width=input_width,
prediction_dict=prediction_dict)
for key in track_losses:
track_losses[key] = (
track_losses[key] * self._track_params.task_loss_weight)
losses.update(track_losses)
if self._temporal_offset_params is not None:
offset_losses = self._compute_temporal_offset_loss(
input_height=input_height,
input_width=input_width,
prediction_dict=prediction_dict)
for key in offset_losses:
offset_losses[key] = (
offset_losses[key] * self._temporal_offset_params.task_loss_weight)
losses.update(offset_losses)
# Prepend the LOSS_KEY_PREFIX to the keys in the dictionary such that the
# losses will be grouped together in Tensorboard.
return dict([('%s/%s' % (LOSS_KEY_PREFIX, key), val)
for key, val in losses.items()])
def postprocess(self, prediction_dict, true_image_shapes, **params):
"""Produces boxes given a prediction dict returned by predict().
Although predict returns a list of tensors, only the last tensor in
each list is used for making box predictions.
Args:
prediction_dict: a dictionary holding predicted tensors from "predict"
function.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is of
the form [height, width, channels] indicating the shapes of true images
in the resized images, as resized images can be padded with zeros.
**params: Currently ignored.
Returns:
detections: a dictionary containing the following fields
detection_boxes - A tensor of shape [batch, max_detections, 4]
holding the predicted boxes.
detection_boxes_strided: A tensor of shape [batch_size, num_detections,
4] holding the predicted boxes in absolute coordinates of the
feature extractor's final layer output.
detection_scores: A tensor of shape [batch, max_detections] holding
the predicted score for each box.
detection_multiclass_scores: A tensor of shape [batch, max_detection,
num_classes] holding multiclass score for each box.
detection_classes: An integer tensor of shape [batch, max_detections]
containing the detected class for each box.
num_detections: An integer tensor of shape [batch] containing the
number of detected boxes for each sample in the batch.
detection_keypoints: (Optional) A float tensor of shape [batch,
max_detections, num_keypoints, 2] with normalized keypoints. Any
invalid keypoints have their coordinates and scores set to 0.0.
detection_keypoint_scores: (Optional) A float tensor of shape [batch,
max_detection, num_keypoints] with scores for each keypoint.
detection_masks: (Optional) A uint8 tensor of shape [batch,
max_detections, mask_height, mask_width] with masks for each
detection. Background is specified with 0, and foreground is specified
with positive integers (1 for standard instance segmentation mask, and
1-indexed parts for DensePose task).
detection_surface_coords: (Optional) A float32 tensor of shape [batch,
max_detection, mask_height, mask_width, 2] with DensePose surface
coordinates, in (v, u) format.
detection_embeddings: (Optional) A float tensor of shape [batch,
max_detections, reid_embed_size] containing object embeddings.
"""
object_center_prob = tf.nn.sigmoid(prediction_dict[OBJECT_CENTER][-1])
# Get x, y and channel indices corresponding to the top indices in the class
# center predictions.
detection_scores, y_indices, x_indices, channel_indices = (
top_k_feature_map_locations(
object_center_prob, max_pool_kernel_size=3,
k=self._center_params.max_box_predictions))
multiclass_scores = tf.gather_nd(
object_center_prob, tf.stack([y_indices, x_indices], -1), batch_dims=1)
boxes_strided, classes, scores, num_detections = (
prediction_tensors_to_boxes(
detection_scores, y_indices, x_indices, channel_indices,
prediction_dict[BOX_SCALE][-1], prediction_dict[BOX_OFFSET][-1]))
boxes = convert_strided_predictions_to_normalized_boxes(
boxes_strided, self._stride, true_image_shapes)
postprocess_dict = {
fields.DetectionResultFields.detection_boxes: boxes,
fields.DetectionResultFields.detection_scores: scores,
fields.DetectionResultFields.detection_multiclass_scores:
multiclass_scores,
fields.DetectionResultFields.detection_classes: classes,
fields.DetectionResultFields.num_detections: num_detections,
'detection_boxes_strided': boxes_strided
}
if self._kp_params_dict:
# If the model is trained to predict only one class of object and its
# keypoint, we fall back to a simpler postprocessing function which uses
# the ops that are supported by tf.lite on GPU.
clip_keypoints = self._should_clip_keypoints()
if len(self._kp_params_dict) == 1 and self._num_classes == 1:
(keypoints, keypoint_scores,
keypoint_depths) = self._postprocess_keypoints_single_class(
prediction_dict, classes, y_indices, x_indices, boxes_strided,
num_detections)
keypoints, keypoint_scores = (
convert_strided_predictions_to_normalized_keypoints(
keypoints, keypoint_scores, self._stride, true_image_shapes,
clip_out_of_frame_keypoints=clip_keypoints))
if keypoint_depths is not None:
postprocess_dict.update({
fields.DetectionResultFields.detection_keypoint_depths:
keypoint_depths
})
else:
# Multi-class keypoint estimation task does not support depth
# estimation.
assert all([
not kp_dict.predict_depth
for kp_dict in self._kp_params_dict.values()
])
keypoints, keypoint_scores = self._postprocess_keypoints_multi_class(
prediction_dict, classes, y_indices, x_indices,
boxes_strided, num_detections)
keypoints, keypoint_scores = (
convert_strided_predictions_to_normalized_keypoints(
keypoints, keypoint_scores, self._stride, true_image_shapes,
clip_out_of_frame_keypoints=clip_keypoints))
# Update instance scores based on keypoints.
scores = self._rescore_instances(classes, scores, keypoint_scores)
postprocess_dict.update({
fields.DetectionResultFields.detection_scores: scores,
fields.DetectionResultFields.detection_keypoints: keypoints,
fields.DetectionResultFields.detection_keypoint_scores:
keypoint_scores
})
if self._mask_params:
masks = tf.nn.sigmoid(prediction_dict[SEGMENTATION_HEATMAP][-1])
densepose_part_heatmap, densepose_surface_coords = None, None
densepose_class_index = 0
if self._densepose_params:
densepose_part_heatmap = prediction_dict[DENSEPOSE_HEATMAP][-1]
densepose_surface_coords = prediction_dict[DENSEPOSE_REGRESSION][-1]
densepose_class_index = self._densepose_params.class_id
instance_masks, surface_coords = (
convert_strided_predictions_to_instance_masks(
boxes, classes, masks, true_image_shapes,
densepose_part_heatmap, densepose_surface_coords,
stride=self._stride, mask_height=self._mask_params.mask_height,
mask_width=self._mask_params.mask_width,
score_threshold=self._mask_params.score_threshold,
densepose_class_index=densepose_class_index))
postprocess_dict[
fields.DetectionResultFields.detection_masks] = instance_masks
if self._densepose_params:
postprocess_dict[
fields.DetectionResultFields.detection_surface_coords] = (
surface_coords)
if self._track_params:
embeddings = self._postprocess_embeddings(prediction_dict,
y_indices, x_indices)
postprocess_dict.update({
fields.DetectionResultFields.detection_embeddings: embeddings
})
if self._temporal_offset_params:
offsets = prediction_tensors_to_temporal_offsets(
y_indices, x_indices,
prediction_dict[TEMPORAL_OFFSET][-1])
postprocess_dict[fields.DetectionResultFields.detection_offsets] = offsets
if self._non_max_suppression_fn:
boxes = tf.expand_dims(
postprocess_dict.pop(fields.DetectionResultFields.detection_boxes),
axis=-2)
multiclass_scores = postprocess_dict[
fields.DetectionResultFields.detection_multiclass_scores]
num_valid_boxes = postprocess_dict.pop(
fields.DetectionResultFields.num_detections)
# Remove scores and classes as NMS will compute these form multiclass
# scores.
postprocess_dict.pop(fields.DetectionResultFields.detection_scores)
postprocess_dict.pop(fields.DetectionResultFields.detection_classes)
(nmsed_boxes, nmsed_scores, nmsed_classes, _, nmsed_additional_fields,
num_detections) = self._non_max_suppression_fn(
boxes,
multiclass_scores,
additional_fields=postprocess_dict,
num_valid_boxes=num_valid_boxes)
postprocess_dict = nmsed_additional_fields
postprocess_dict[
fields.DetectionResultFields.detection_boxes] = nmsed_boxes
postprocess_dict[
fields.DetectionResultFields.detection_scores] = nmsed_scores
postprocess_dict[
fields.DetectionResultFields.detection_classes] = nmsed_classes
postprocess_dict[
fields.DetectionResultFields.num_detections] = num_detections
postprocess_dict.update(nmsed_additional_fields)
return postprocess_dict
def postprocess_single_instance_keypoints(self, prediction_dict,
true_image_shapes,
object_center_std_dev,
keypoint_std_dev):
"""Postprocess for predicting single instance keypoints.
This postprocess function is a special case of predicting the keypoint of
a single instance in the image (original CenterNet postprocess supports
multi-instance prediction). Due to the simplification assumption, this
postprocessing function achieves much faster inference time.
Here is a short list of the modifications made in this function:
1) Assume the model predicts only single class keypoint.
2) Assume there is only one instance in the image. If multiple instances
appear in the image, the model tends to predict the one that is closer
to the image center (the other ones are considered as background and
are rejected by the model).
3) Avoid using top_k ops in the postprocessing logics since it is slower
than using argmax.
4) The predictions other than the keypoints are ignored, e.g. boxes.
5) The input batch size is assumed to be 1.
Args:
prediction_dict: a dictionary holding predicted tensors from "predict"
function.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is of
the form [height, width, channels] indicating the shapes of true images
in the resized images, as resized images can be padded with zeros.
object_center_std_dev: The standard deviation of the Gaussian mask which
is applied to the object_heatmap. The goal is to upweight the instance
that is closer to the image center. Expressed in units of input image
pixels.
keypoint_std_dev: The standard deviation of the Gaussian masks which are
applied to the keypoint_heatmap based on the regressed joint location.
It is used to upweight the keypoint joints that belongs to the targeted
instance. If keypoint_std_dev contains one value, then we assume the
same value is applied to all keypoint joints. If keypoint_std_dev is a
list, it must contain num_keypoints elements, representing the standard
deviation corresponding to each joints.
Returns:
detections: a dictionary containing the following fields
detection_keypoints: A float tensor of shape
[1, 1, num_keypoints, 2] with normalized keypoints. Any invalid
keypoints have their coordinates and scores set to 0.0.
detection_keypoint_scores: A float tensor of shape
[1, 1, num_keypoints] with scores for each keypoint.
"""
# The number of keypoint task is expected to be 1.
assert len(self._kp_params_dict) == 1
task_name, kp_params = next(iter(self._kp_params_dict.items()))
keypoint_heatmap = tf.nn.sigmoid(prediction_dict[get_keypoint_name(
task_name, KEYPOINT_HEATMAP)][-1])
keypoint_offset = prediction_dict[get_keypoint_name(task_name,
KEYPOINT_OFFSET)][-1]
keypoint_regression = prediction_dict[get_keypoint_name(
task_name, KEYPOINT_REGRESSION)][-1]
object_heatmap = tf.nn.sigmoid(prediction_dict[OBJECT_CENTER][-1])
keypoint_depths = None
if kp_params.predict_depth:
keypoint_depths = prediction_dict[get_keypoint_name(
task_name, KEYPOINT_DEPTH)][-1]
keypoints, keypoint_scores, keypoint_depths = (
prediction_to_single_instance_keypoints(
object_heatmap=object_heatmap,
keypoint_heatmap=keypoint_heatmap,
keypoint_offset=keypoint_offset,
keypoint_regression=keypoint_regression,
stride=self._stride,
object_center_std_dev=object_center_std_dev,
keypoint_std_dev=keypoint_std_dev,
kp_params=kp_params,
keypoint_depths=keypoint_depths))
keypoints, keypoint_scores = (
convert_strided_predictions_to_normalized_keypoints(
keypoints,
keypoint_scores,
self._stride,
true_image_shapes,
clip_out_of_frame_keypoints=False))
postprocess_dict = {
fields.DetectionResultFields.detection_keypoints: keypoints,
fields.DetectionResultFields.detection_keypoint_scores: keypoint_scores
}
if kp_params.predict_depth:
postprocess_dict.update({
fields.DetectionResultFields.detection_keypoint_depths:
keypoint_depths
})
return postprocess_dict
def _postprocess_embeddings(self, prediction_dict, y_indices, x_indices):
"""Performs postprocessing on embedding predictions.
Args:
prediction_dict: a dictionary holding predicted tensors, returned from the
predict() method. This dictionary should contain embedding prediction
feature maps for tracking task.
y_indices: A [batch_size, max_detections] int tensor with y indices for
all object centers.
x_indices: A [batch_size, max_detections] int tensor with x indices for
all object centers.
Returns:
embeddings: A [batch_size, max_detection, reid_embed_size] float32
tensor with L2 normalized embeddings extracted from detection box
centers.
"""
embedding_predictions = prediction_dict[TRACK_REID][-1]
embeddings = predicted_embeddings_at_object_centers(
embedding_predictions, y_indices, x_indices)
embeddings, _ = tf.linalg.normalize(embeddings, axis=-1)
return embeddings
def _scatter_keypoints_to_batch(self, num_ind, kpt_coords_for_example,
kpt_scores_for_example,
instance_inds_for_example, max_detections,
total_num_keypoints):
"""Helper function to convert scattered keypoints into batch."""
def left_fn(kpt_coords_for_example, kpt_scores_for_example,
instance_inds_for_example):
# Scatter into tensor where instances align with original detection
# instances. New shape of keypoint coordinates and scores are
# [1, max_detections, num_total_keypoints, 2] and
# [1, max_detections, num_total_keypoints], respectively.
return _pad_to_full_instance_dim(
kpt_coords_for_example, kpt_scores_for_example,
instance_inds_for_example,
self._center_params.max_box_predictions)
def right_fn():
kpt_coords_for_example_all_det = tf.zeros(
[1, max_detections, total_num_keypoints, 2], dtype=tf.float32)
kpt_scores_for_example_all_det = tf.zeros(
[1, max_detections, total_num_keypoints], dtype=tf.float32)
return (kpt_coords_for_example_all_det,
kpt_scores_for_example_all_det)
left_fn = functools.partial(left_fn, kpt_coords_for_example,
kpt_scores_for_example,
instance_inds_for_example)
# Use dimension values instead of tf.size for tf.lite compatibility.
return tf.cond(num_ind[0] > 0, left_fn, right_fn)
def _postprocess_keypoints_multi_class(self, prediction_dict, classes,
y_indices, x_indices, boxes,
num_detections):
"""Performs postprocessing on keypoint predictions.
This is the most general keypoint postprocessing function which supports
multiple keypoint tasks (e.g. human and dog keypoints) and multiple object
detection classes. Note that it is the most expensive postprocessing logics
and is currently not tf.lite/tf.js compatible. See
_postprocess_keypoints_single_class if you plan to export the model in more
portable format.
Args:
prediction_dict: a dictionary holding predicted tensors, returned from the
predict() method. This dictionary should contain keypoint prediction
feature maps for each keypoint task.
classes: A [batch_size, max_detections] int tensor with class indices for
all detected objects.
y_indices: A [batch_size, max_detections] int tensor with y indices for
all object centers.
x_indices: A [batch_size, max_detections] int tensor with x indices for
all object centers.
boxes: A [batch_size, max_detections, 4] float32 tensor with bounding
boxes in (un-normalized) output space.
num_detections: A [batch_size] int tensor with the number of valid
detections for each image.
Returns:
A tuple of
keypoints: a [batch_size, max_detection, num_total_keypoints, 2] float32
tensor with keypoints in the output (strided) coordinate frame.
keypoint_scores: a [batch_size, max_detections, num_total_keypoints]
float32 tensor with keypoint scores.
"""
total_num_keypoints = sum(len(kp_dict.keypoint_indices) for kp_dict
in self._kp_params_dict.values())
batch_size, max_detections, _ = _get_shape(boxes, 3)
kpt_coords_for_example_list = []
kpt_scores_for_example_list = []
for ex_ind in range(batch_size):
kpt_coords_for_class_list = []
kpt_scores_for_class_list = []
instance_inds_for_class_list = []
for task_name, kp_params in self._kp_params_dict.items():
keypoint_heatmap = prediction_dict[
get_keypoint_name(task_name, KEYPOINT_HEATMAP)][-1]
keypoint_offsets = prediction_dict[
get_keypoint_name(task_name, KEYPOINT_OFFSET)][-1]
keypoint_regression = prediction_dict[
get_keypoint_name(task_name, KEYPOINT_REGRESSION)][-1]
instance_inds = self._get_instance_indices(
classes, num_detections, ex_ind, kp_params.class_id)
num_ind = _get_shape(instance_inds, 1)
def true_fn(keypoint_heatmap, keypoint_offsets, keypoint_regression,
classes, y_indices, x_indices, boxes, instance_inds, ex_ind,
kp_params):
"""Logics to execute when instance_inds is not an empty set."""
# Gather the feature map locations corresponding to the object class.
y_indices_for_kpt_class = tf.gather(y_indices, instance_inds, axis=1)
x_indices_for_kpt_class = tf.gather(x_indices, instance_inds, axis=1)
boxes_for_kpt_class = tf.gather(boxes, instance_inds, axis=1)
# Postprocess keypoints and scores for class and single image. Shapes
# are [1, num_instances_i, num_keypoints_i, 2] and
# [1, num_instances_i, num_keypoints_i], respectively. Note that
# num_instances_i and num_keypoints_i refers to the number of
# instances and keypoints for class i, respectively.
(kpt_coords_for_class, kpt_scores_for_class, _) = (
self._postprocess_keypoints_for_class_and_image(
keypoint_heatmap, keypoint_offsets, keypoint_regression,
classes, y_indices_for_kpt_class, x_indices_for_kpt_class,
boxes_for_kpt_class, ex_ind, kp_params))
# Expand keypoint dimension (with padding) so that coordinates and
# scores have shape [1, num_instances_i, num_total_keypoints, 2] and
# [1, num_instances_i, num_total_keypoints], respectively.
kpts_coords_for_class_padded, kpt_scores_for_class_padded = (
_pad_to_full_keypoint_dim(kpt_coords_for_class,
kpt_scores_for_class,
kp_params.keypoint_indices,
total_num_keypoints))
return kpts_coords_for_class_padded, kpt_scores_for_class_padded
def false_fn():
"""Logics to execute when the instance_inds is an empty set."""
return (tf.zeros([1, 0, total_num_keypoints, 2], dtype=tf.float32),
tf.zeros([1, 0, total_num_keypoints], dtype=tf.float32))
true_fn = functools.partial(
true_fn, keypoint_heatmap, keypoint_offsets, keypoint_regression,
classes, y_indices, x_indices, boxes, instance_inds, ex_ind,
kp_params)
# Use dimension values instead of tf.size for tf.lite compatibility.
results = tf.cond(num_ind[0] > 0, true_fn, false_fn)
kpt_coords_for_class_list.append(results[0])
kpt_scores_for_class_list.append(results[1])
instance_inds_for_class_list.append(instance_inds)
# Concatenate all keypoints across all classes (single example).
kpt_coords_for_example = tf.concat(kpt_coords_for_class_list, axis=1)
kpt_scores_for_example = tf.concat(kpt_scores_for_class_list, axis=1)
instance_inds_for_example = tf.concat(instance_inds_for_class_list,
axis=0)
(kpt_coords_for_example_all_det,
kpt_scores_for_example_all_det) = self._scatter_keypoints_to_batch(
num_ind, kpt_coords_for_example, kpt_scores_for_example,
instance_inds_for_example, max_detections, total_num_keypoints)
kpt_coords_for_example_list.append(kpt_coords_for_example_all_det)
kpt_scores_for_example_list.append(kpt_scores_for_example_all_det)
# Concatenate all keypoints and scores from all examples in the batch.
# Shapes are [batch_size, max_detections, num_total_keypoints, 2] and
# [batch_size, max_detections, num_total_keypoints], respectively.
keypoints = tf.concat(kpt_coords_for_example_list, axis=0)
keypoint_scores = tf.concat(kpt_scores_for_example_list, axis=0)
return keypoints, keypoint_scores
def _postprocess_keypoints_single_class(self, prediction_dict, classes,
y_indices, x_indices, boxes,
num_detections):
"""Performs postprocessing on keypoint predictions (single class only).
This function handles the special case of keypoint task that the model
predicts only one class of the bounding box/keypoint (e.g. person). By the
assumption, the function uses only tf.lite supported ops and should run
faster.
Args:
prediction_dict: a dictionary holding predicted tensors, returned from the
predict() method. This dictionary should contain keypoint prediction
feature maps for each keypoint task.
classes: A [batch_size, max_detections] int tensor with class indices for
all detected objects.
y_indices: A [batch_size, max_detections] int tensor with y indices for
all object centers.
x_indices: A [batch_size, max_detections] int tensor with x indices for
all object centers.
boxes: A [batch_size, max_detections, 4] float32 tensor with bounding
boxes in (un-normalized) output space.
num_detections: A [batch_size] int tensor with the number of valid
detections for each image.
Returns:
A tuple of
keypoints: a [batch_size, max_detection, num_total_keypoints, 2] float32
tensor with keypoints in the output (strided) coordinate frame.
keypoint_scores: a [batch_size, max_detections, num_total_keypoints]
float32 tensor with keypoint scores.
"""
# This function only works when there is only one keypoint task and the
# number of classes equal to one. For more general use cases, please use
# _postprocess_keypoints instead.
assert len(self._kp_params_dict) == 1 and self._num_classes == 1
task_name, kp_params = next(iter(self._kp_params_dict.items()))
keypoint_heatmap = prediction_dict[
get_keypoint_name(task_name, KEYPOINT_HEATMAP)][-1]
keypoint_offsets = prediction_dict[
get_keypoint_name(task_name, KEYPOINT_OFFSET)][-1]
keypoint_regression = prediction_dict[
get_keypoint_name(task_name, KEYPOINT_REGRESSION)][-1]
keypoint_depth_predictions = None
if kp_params.predict_depth:
keypoint_depth_predictions = prediction_dict[get_keypoint_name(
task_name, KEYPOINT_DEPTH)][-1]
batch_size, _, _ = _get_shape(boxes, 3)
kpt_coords_for_example_list = []
kpt_scores_for_example_list = []
kpt_depths_for_example_list = []
for ex_ind in range(batch_size):
# Postprocess keypoints and scores for class and single image. Shapes
# are [1, max_detections, num_keypoints, 2] and
# [1, max_detections, num_keypoints], respectively.
(kpt_coords_for_class, kpt_scores_for_class, kpt_depths_for_class) = (
self._postprocess_keypoints_for_class_and_image(
keypoint_heatmap,
keypoint_offsets,
keypoint_regression,
classes,
y_indices,
x_indices,
boxes,
ex_ind,
kp_params,
keypoint_depth_predictions=keypoint_depth_predictions))
kpt_coords_for_example_list.append(kpt_coords_for_class)
kpt_scores_for_example_list.append(kpt_scores_for_class)
kpt_depths_for_example_list.append(kpt_depths_for_class)
# Concatenate all keypoints and scores from all examples in the batch.
# Shapes are [batch_size, max_detections, num_keypoints, 2] and
# [batch_size, max_detections, num_keypoints], respectively.
keypoints = tf.concat(kpt_coords_for_example_list, axis=0)
keypoint_scores = tf.concat(kpt_scores_for_example_list, axis=0)
keypoint_depths = None
if kp_params.predict_depth:
keypoint_depths = tf.concat(kpt_depths_for_example_list, axis=0)
return keypoints, keypoint_scores, keypoint_depths
def _get_instance_indices(self, classes, num_detections, batch_index,
class_id):
"""Gets the instance indices that match the target class ID.
Args:
classes: A [batch_size, max_detections] int tensor with class indices for
all detected objects.
num_detections: A [batch_size] int tensor with the number of valid
detections for each image.
batch_index: An integer specifying the index for an example in the batch.
class_id: Class id
Returns:
instance_inds: A [num_instances] int32 tensor where each element indicates
the instance location within the `classes` tensor. This is useful to
associate the refined keypoints with the original detections (i.e.
boxes)
"""
classes = classes[batch_index:batch_index+1, ...]
_, max_detections = shape_utils.combined_static_and_dynamic_shape(
classes)
# Get the detection indices corresponding to the target class.
# Call tf.math.equal with matched tensor shape to make it tf.lite
# compatible.
valid_detections_with_kpt_class = tf.math.logical_and(
tf.range(max_detections) < num_detections[batch_index],
tf.math.equal(classes[0], tf.fill(classes[0].shape, class_id)))
instance_inds = tf.where(valid_detections_with_kpt_class)[:, 0]
# Cast the indices tensor to int32 for tf.lite compatibility.
return tf.cast(instance_inds, tf.int32)
def _postprocess_keypoints_for_class_and_image(
self,
keypoint_heatmap,
keypoint_offsets,
keypoint_regression,
classes,
y_indices,
x_indices,
boxes,
batch_index,
kp_params,
keypoint_depth_predictions=None):
"""Postprocess keypoints for a single image and class.
Args:
keypoint_heatmap: A [batch_size, height, width, num_keypoints] float32
tensor with keypoint heatmaps.
keypoint_offsets: A [batch_size, height, width, 2] float32 tensor with
local offsets to keypoint centers.
keypoint_regression: A [batch_size, height, width, 2 * num_keypoints]
float32 tensor with regressed offsets to all keypoints.
classes: A [batch_size, max_detections] int tensor with class indices for
all detected objects.
y_indices: A [batch_size, max_detections] int tensor with y indices for
all object centers.
x_indices: A [batch_size, max_detections] int tensor with x indices for
all object centers.
boxes: A [batch_size, max_detections, 4] float32 tensor with detected
boxes in the output (strided) frame.
batch_index: An integer specifying the index for an example in the batch.
kp_params: A `KeypointEstimationParams` object with parameters for a
single keypoint class.
keypoint_depth_predictions: (optional) A [batch_size, height, width, 1]
float32 tensor representing the keypoint depth prediction.
Returns:
A tuple of
refined_keypoints: A [1, num_instances, num_keypoints, 2] float32 tensor
with refined keypoints for a single class in a single image, expressed
in the output (strided) coordinate frame. Note that `num_instances` is a
dynamic dimension, and corresponds to the number of valid detections
for the specific class.
refined_scores: A [1, num_instances, num_keypoints] float32 tensor with
keypoint scores.
refined_depths: A [1, num_instances, num_keypoints] float32 tensor with
keypoint depths. Return None if the input keypoint_depth_predictions is
None.
"""
num_keypoints = len(kp_params.keypoint_indices)
keypoint_heatmap = tf.nn.sigmoid(
keypoint_heatmap[batch_index:batch_index+1, ...])
keypoint_offsets = keypoint_offsets[batch_index:batch_index+1, ...]
keypoint_regression = keypoint_regression[batch_index:batch_index+1, ...]
keypoint_depths = None
if keypoint_depth_predictions is not None:
keypoint_depths = keypoint_depth_predictions[batch_index:batch_index + 1,
...]
y_indices = y_indices[batch_index:batch_index+1, ...]
x_indices = x_indices[batch_index:batch_index+1, ...]
boxes_slice = boxes[batch_index:batch_index+1, ...]
# Gather the regressed keypoints. Final tensor has shape
# [1, num_instances, num_keypoints, 2].
regressed_keypoints_for_objects = regressed_keypoints_at_object_centers(
keypoint_regression, y_indices, x_indices)
regressed_keypoints_for_objects = tf.reshape(
regressed_keypoints_for_objects, [1, -1, num_keypoints, 2])
# Get the candidate keypoints and scores.
# The shape of keypoint_candidates and keypoint_scores is:
# [1, num_candidates_per_keypoint, num_keypoints, 2] and
# [1, num_candidates_per_keypoint, num_keypoints], respectively.
(keypoint_candidates, keypoint_scores, num_keypoint_candidates,
keypoint_depth_candidates) = (
prediction_tensors_to_keypoint_candidates(
keypoint_heatmap,
keypoint_offsets,
keypoint_score_threshold=(
kp_params.keypoint_candidate_score_threshold),
max_pool_kernel_size=kp_params.peak_max_pool_kernel_size,
max_candidates=kp_params.num_candidates_per_keypoint,
keypoint_depths=keypoint_depths))
# Get the refined keypoints and scores, of shape
# [1, num_instances, num_keypoints, 2] and
# [1, num_instances, num_keypoints], respectively.
(refined_keypoints, refined_scores, refined_depths) = refine_keypoints(
regressed_keypoints_for_objects,
keypoint_candidates,
keypoint_scores,
num_keypoint_candidates,
bboxes=boxes_slice,
unmatched_keypoint_score=kp_params.unmatched_keypoint_score,
box_scale=kp_params.box_scale,
candidate_search_scale=kp_params.candidate_search_scale,
candidate_ranking_mode=kp_params.candidate_ranking_mode,
score_distance_offset=kp_params.score_distance_offset,
keypoint_depth_candidates=keypoint_depth_candidates)
return refined_keypoints, refined_scores, refined_depths
def regularization_losses(self):
return []
def restore_map(self,
fine_tune_checkpoint_type='detection',
load_all_detection_checkpoint_vars=False):
raise RuntimeError('CenterNetMetaArch not supported under TF1.x.')
def restore_from_objects(self, fine_tune_checkpoint_type='detection'):
"""Returns a map of Trackable objects to load from a foreign checkpoint.
Returns a dictionary of Tensorflow 2 Trackable objects (e.g. tf.Module
or Checkpoint). This enables the model to initialize based on weights from
another task. For example, the feature extractor variables from a
classification model can be used to bootstrap training of an object
detector. When loading from an object detection model, the checkpoint model
should have the same parameters as this detection model with exception of
the num_classes parameter.
Note that this function is intended to be used to restore Keras-based
models when running Tensorflow 2, whereas restore_map (not implemented
in CenterNet) is intended to be used to restore Slim-based models when
running Tensorflow 1.x.
TODO(jonathanhuang): Make this function consistent with other
meta-architectures.
Args:
fine_tune_checkpoint_type: whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Valid values: `detection`, `classification`, `fine_tune`.
Default 'detection'.
'detection': used when loading models pre-trained on other detection
tasks. With this checkpoint type the weights of the feature extractor
are expected under the attribute 'feature_extractor'.
'classification': used when loading models pre-trained on an image
classification task. Note that only the encoder section of the network
is loaded and not the upsampling layers. With this checkpoint type,
the weights of only the encoder section are expected under the
attribute 'feature_extractor'.
'fine_tune': used when loading the entire CenterNet feature extractor
pre-trained on other tasks. The checkpoints saved during CenterNet
model training can be directly loaded using this type. With this
checkpoint type, the weights of the feature extractor are expected
under the attribute 'model._feature_extractor'.
For more details, see the tensorflow section on Loading mechanics.
https://www.tensorflow.org/guide/checkpoint#loading_mechanics
Returns:
A dict mapping keys to Trackable objects (tf.Module or Checkpoint).
"""
supported_types = self._feature_extractor.supported_sub_model_types
supported_types += ['fine_tune']
if fine_tune_checkpoint_type not in supported_types:
message = ('Checkpoint type "{}" not supported for {}. '
'Supported types are {}')
raise ValueError(
message.format(fine_tune_checkpoint_type,
self._feature_extractor.__class__.__name__,
supported_types))
elif fine_tune_checkpoint_type == 'fine_tune':
feature_extractor_model = tf.train.Checkpoint(
_feature_extractor=self._feature_extractor)
return {'model': feature_extractor_model}
else:
return {'feature_extractor': self._feature_extractor.get_sub_model(
fine_tune_checkpoint_type)}
def updates(self):
if tf_version.is_tf2():
raise RuntimeError('This model is intended to be used with model_lib_v2 '
'which does not support updates()')
else:
update_ops = []
slim_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
# Copy the slim ops to avoid modifying the collection
if slim_update_ops:
update_ops.extend(slim_update_ops)
return update_ops
|
the-stack_106_24724 | """Fourier Series"""
from __future__ import print_function, division
from sympy import pi, oo, Wild, Basic
from sympy.core.expr import Expr
from sympy.core.add import Add
from sympy.core.compatibility import is_sequence
from sympy.core.containers import Tuple
from sympy.core.singleton import S
from sympy.core.symbol import Dummy, Symbol
from sympy.core.sympify import sympify
from sympy.functions.elementary.trigonometric import sin, cos, sinc
from sympy.series.series_class import SeriesBase
from sympy.series.sequences import SeqFormula
from sympy.sets.sets import Interval
from sympy.simplify.fu import TR8, TR2, TR1, TR10, sincos_to_sum
def fourier_cos_seq(func, limits, n):
"""Returns the cos sequence in a Fourier series"""
from sympy.integrals import integrate
x, L = limits[0], limits[2] - limits[1]
cos_term = cos(2*n*pi*x / L)
formula = 2 * cos_term * integrate(func * cos_term, limits) / L
a0 = formula.subs(n, S.Zero) / 2
return a0, SeqFormula(2 * cos_term * integrate(func * cos_term, limits)
/ L, (n, 1, oo))
def fourier_sin_seq(func, limits, n):
"""Returns the sin sequence in a Fourier series"""
from sympy.integrals import integrate
x, L = limits[0], limits[2] - limits[1]
sin_term = sin(2*n*pi*x / L)
return SeqFormula(2 * sin_term * integrate(func * sin_term, limits)
/ L, (n, 1, oo))
def _process_limits(func, limits):
"""
Limits should be of the form (x, start, stop).
x should be a symbol. Both start and stop should be bounded.
* If x is not given, x is determined from func.
* If limits is None. Limit of the form (x, -pi, pi) is returned.
Examples
========
>>> from sympy import pi
>>> from sympy.series.fourier import _process_limits as pari
>>> from sympy.abc import x
>>> pari(x**2, (x, -2, 2))
(x, -2, 2)
>>> pari(x**2, (-2, 2))
(x, -2, 2)
>>> pari(x**2, None)
(x, -pi, pi)
"""
def _find_x(func):
free = func.free_symbols
if len(free) == 1:
return free.pop()
elif not free:
return Dummy('k')
else:
raise ValueError(
" specify dummy variables for %s. If the function contains"
" more than one free symbol, a dummy variable should be"
" supplied explicitly e.g. FourierSeries(m*n**2, (n, -pi, pi))"
% func)
x, start, stop = None, None, None
if limits is None:
x, start, stop = _find_x(func), -pi, pi
if is_sequence(limits, Tuple):
if len(limits) == 3:
x, start, stop = limits
elif len(limits) == 2:
x = _find_x(func)
start, stop = limits
if not isinstance(x, Symbol) or start is None or stop is None:
raise ValueError('Invalid limits given: %s' % str(limits))
unbounded = [S.NegativeInfinity, S.Infinity]
if start in unbounded or stop in unbounded:
raise ValueError("Both the start and end value should be bounded")
return sympify((x, start, stop))
def finite_check(f, x, L):
def check_fx(exprs, x):
return x not in exprs.free_symbols
def check_sincos(expr, x, L):
if type(expr) == sin or type(expr) == cos:
sincos_args = expr.args[0]
if sincos_args.match(a*(pi/L)*x + b) is not None:
return True
else:
return False
expr = sincos_to_sum(TR2(TR1(f)))
res_expr = S.Zero
add_coeff = expr.as_coeff_add()
res_expr += add_coeff[0]
a = Wild('a', properties=[lambda k: k.is_Integer, lambda k: k != S.Zero, ])
b = Wild('b', properties=[lambda k: x not in k.free_symbols or k == S.Zero, ])
for s in add_coeff[1]:
mul_coeffs = s.as_coeff_mul()[1]
for t in mul_coeffs:
if not (check_fx(t, x) or check_sincos(t, x, L)):
return False, f
res_expr += TR10(s)
return True, res_expr.collect([sin(a*(pi/L)*x), cos(a*(pi/L)*x)])
class FourierSeries(SeriesBase):
r"""Represents Fourier sine/cosine series.
This class only represents a fourier series.
No computation is performed.
For how to compute Fourier series, see the :func:`fourier_series`
docstring.
See Also
========
sympy.series.fourier.fourier_series
"""
def __new__(cls, *args):
args = map(sympify, args)
return Expr.__new__(cls, *args)
@property
def function(self):
return self.args[0]
@property
def x(self):
return self.args[1][0]
@property
def period(self):
return (self.args[1][1], self.args[1][2])
@property
def a0(self):
return self.args[2][0]
@property
def an(self):
return self.args[2][1]
@property
def bn(self):
return self.args[2][2]
@property
def interval(self):
return Interval(0, oo)
@property
def start(self):
return self.interval.inf
@property
def stop(self):
return self.interval.sup
@property
def length(self):
return oo
def _eval_subs(self, old, new):
x = self.x
if old.has(x):
return self
def truncate(self, n=3):
"""
Return the first n nonzero terms of the series.
If n is None return an iterator.
Parameters
==========
n : int or None
Amount of non-zero terms in approximation or None.
Returns
=======
Expr or iterator
Approximation of function expanded into Fourier series.
Examples
========
>>> from sympy import fourier_series, pi
>>> from sympy.abc import x
>>> s = fourier_series(x, (x, -pi, pi))
>>> s.truncate(4)
2*sin(x) - sin(2*x) + 2*sin(3*x)/3 - sin(4*x)/2
See Also
========
sympy.series.fourier.FourierSeries.sigma_approximation
"""
if n is None:
return iter(self)
terms = []
for t in self:
if len(terms) == n:
break
if t is not S.Zero:
terms.append(t)
return Add(*terms)
def sigma_approximation(self, n=3):
r"""
Return :math:`\sigma`-approximation of Fourier series with respect
to order n.
Sigma approximation adjusts a Fourier summation to eliminate the Gibbs
phenomenon which would otherwise occur at discontinuities.
A sigma-approximated summation for a Fourier series of a T-periodical
function can be written as
.. math::
s(\theta) = \frac{1}{2} a_0 + \sum _{k=1}^{m-1}
\operatorname{sinc} \Bigl( \frac{k}{m} \Bigr) \cdot
\left[ a_k \cos \Bigl( \frac{2\pi k}{T} \theta \Bigr)
+ b_k \sin \Bigl( \frac{2\pi k}{T} \theta \Bigr) \right],
where :math:`a_0, a_k, b_k, k=1,\ldots,{m-1}` are standard Fourier
series coefficients and
:math:`\operatorname{sinc} \Bigl( \frac{k}{m} \Bigr)` is a Lanczos
:math:`\sigma` factor (expressed in terms of normalized
:math:`\operatorname{sinc}` function).
Parameters
==========
n : int
Highest order of the terms taken into account in approximation.
Returns
=======
Expr
Sigma approximation of function expanded into Fourier series.
Examples
========
>>> from sympy import fourier_series, pi
>>> from sympy.abc import x
>>> s = fourier_series(x, (x, -pi, pi))
>>> s.sigma_approximation(4)
2*sin(x)*sinc(pi/4) - 2*sin(2*x)/pi + 2*sin(3*x)*sinc(3*pi/4)/3
See Also
========
sympy.series.fourier.FourierSeries.truncate
Notes
=====
The behaviour of
:meth:`~sympy.series.fourier.FourierSeries.sigma_approximation`
is different from :meth:`~sympy.series.fourier.FourierSeries.truncate`
- it takes all nonzero terms of degree smaller than n, rather than
first n nonzero ones.
References
==========
.. [1] https://en.wikipedia.org/wiki/Gibbs_phenomenon
.. [2] https://en.wikipedia.org/wiki/Sigma_approximation
"""
terms = [sinc(pi * i / n) * t for i, t in enumerate(self[:n])
if t is not S.Zero]
return Add(*terms)
def shift(self, s):
"""Shift the function by a term independent of x.
f(x) -> f(x) + s
This is fast, if Fourier series of f(x) is already
computed.
Examples
========
>>> from sympy import fourier_series, pi
>>> from sympy.abc import x
>>> s = fourier_series(x**2, (x, -pi, pi))
>>> s.shift(1).truncate()
-4*cos(x) + cos(2*x) + 1 + pi**2/3
"""
s, x = sympify(s), self.x
if x in s.free_symbols:
raise ValueError("'%s' should be independent of %s" % (s, x))
a0 = self.a0 + s
sfunc = self.function + s
return self.func(sfunc, self.args[1], (a0, self.an, self.bn))
def shiftx(self, s):
"""Shift x by a term independent of x.
f(x) -> f(x + s)
This is fast, if Fourier series of f(x) is already
computed.
Examples
========
>>> from sympy import fourier_series, pi
>>> from sympy.abc import x
>>> s = fourier_series(x**2, (x, -pi, pi))
>>> s.shiftx(1).truncate()
-4*cos(x + 1) + cos(2*x + 2) + pi**2/3
"""
s, x = sympify(s), self.x
if x in s.free_symbols:
raise ValueError("'%s' should be independent of %s" % (s, x))
an = self.an.subs(x, x + s)
bn = self.bn.subs(x, x + s)
sfunc = self.function.subs(x, x + s)
return self.func(sfunc, self.args[1], (self.a0, an, bn))
def scale(self, s):
"""Scale the function by a term independent of x.
f(x) -> s * f(x)
This is fast, if Fourier series of f(x) is already
computed.
Examples
========
>>> from sympy import fourier_series, pi
>>> from sympy.abc import x
>>> s = fourier_series(x**2, (x, -pi, pi))
>>> s.scale(2).truncate()
-8*cos(x) + 2*cos(2*x) + 2*pi**2/3
"""
s, x = sympify(s), self.x
if x in s.free_symbols:
raise ValueError("'%s' should be independent of %s" % (s, x))
an = self.an.coeff_mul(s)
bn = self.bn.coeff_mul(s)
a0 = self.a0 * s
sfunc = self.args[0] * s
return self.func(sfunc, self.args[1], (a0, an, bn))
def scalex(self, s):
"""Scale x by a term independent of x.
f(x) -> f(s*x)
This is fast, if Fourier series of f(x) is already
computed.
Examples
========
>>> from sympy import fourier_series, pi
>>> from sympy.abc import x
>>> s = fourier_series(x**2, (x, -pi, pi))
>>> s.scalex(2).truncate()
-4*cos(2*x) + cos(4*x) + pi**2/3
"""
s, x = sympify(s), self.x
if x in s.free_symbols:
raise ValueError("'%s' should be independent of %s" % (s, x))
an = self.an.subs(x, x * s)
bn = self.bn.subs(x, x * s)
sfunc = self.function.subs(x, x * s)
return self.func(sfunc, self.args[1], (self.a0, an, bn))
def _eval_as_leading_term(self, x):
for t in self:
if t is not S.Zero:
return t
def _eval_term(self, pt):
if pt == 0:
return self.a0
return self.an.coeff(pt) + self.bn.coeff(pt)
def __neg__(self):
return self.scale(-1)
def __add__(self, other):
if isinstance(other, FourierSeries):
if self.period != other.period:
raise ValueError("Both the series should have same periods")
x, y = self.x, other.x
function = self.function + other.function.subs(y, x)
if self.x not in function.free_symbols:
return function
an = self.an + other.an
bn = self.bn + other.bn
a0 = self.a0 + other.a0
return self.func(function, self.args[1], (a0, an, bn))
return Add(self, other)
def __sub__(self, other):
return self.__add__(-other)
class FiniteFourierSeries(Basic):
def __new__(cls, *args):
obj = Basic.__new__(cls, *args)
return obj
def truncate(self, n=3):
return Add(*self._args)
def fourier_series(f, limits=None):
"""Computes Fourier sine/cosine series expansion.
Returns a :class:`FourierSeries` object.
Examples
========
>>> from sympy import fourier_series, pi, cos
>>> from sympy.abc import x
>>> s = fourier_series(x**2, (x, -pi, pi))
>>> s.truncate(n=3)
-4*cos(x) + cos(2*x) + pi**2/3
Shifting
>>> s.shift(1).truncate()
-4*cos(x) + cos(2*x) + 1 + pi**2/3
>>> s.shiftx(1).truncate()
-4*cos(x + 1) + cos(2*x + 2) + pi**2/3
Scaling
>>> s.scale(2).truncate()
-8*cos(x) + 2*cos(2*x) + 2*pi**2/3
>>> s.scalex(2).truncate()
-4*cos(2*x) + cos(4*x) + pi**2/3
Notes
=====
Computing Fourier series can be slow
due to the integration required in computing
an, bn.
It is faster to compute Fourier series of a function
by using shifting and scaling on an already
computed Fourier series rather than computing
again.
e.g. If the Fourier series of ``x**2`` is known
the Fourier series of ``x**2 - 1`` can be found by shifting by ``-1``.
See Also
========
sympy.series.fourier.FourierSeries
References
==========
.. [1] mathworld.wolfram.com/FourierSeries.html
"""
f = sympify(f)
limits = _process_limits(f, limits)
x = limits[0]
if x not in f.free_symbols:
return f
L = abs(limits[2] - limits[1])/2
is_finite, res_f = finite_check(f, x, L)
if is_finite:
return FiniteFourierSeries(res_f)
n = Dummy('n')
neg_f = f.subs(x, -x)
if f == neg_f:
a0, an = fourier_cos_seq(f, limits, n)
bn = SeqFormula(0, (1, oo))
elif f == -neg_f:
a0 = S.Zero
an = SeqFormula(0, (1, oo))
bn = fourier_sin_seq(f, limits, n)
else:
a0, an = fourier_cos_seq(f, limits, n)
bn = fourier_sin_seq(f, limits, n)
return FourierSeries(f, limits, (a0, an, bn))
|
the-stack_106_24725 | __author__ = 'sxjscience'
import numpy
import time
import theano
import logging
import theano.tensor as TT
from sparnn.utils import *
from sparnn.optimizers import Optimizer
logger = logging.getLogger(__name__)
class AdaGrad(Optimizer):
"""
Duchi, J., Hazan, E., & Singer, Y. "Adaptive subgradient methods for online learning and stochastic optimization"
Chris Dyer "Notes on AdaGrad." http://www.ark.cs.cmu.edu/cdyer/adagrad.pdf.
"""
def __init__(self,
model,
train_data_iterator,
valid_data_iterator,
test_data_iterator,
hyper_param
):
super(AdaGrad, self).__init__(model, train_data_iterator, valid_data_iterator, test_data_iterator, hyper_param)
self.learning_rate = numpy_floatX(hyper_param["learning_rate"])
def set_name(self):
self.name = "AdaGrad-" + self.id
def get_update_func(self):
updates = []
lr = TT.scalar(self._s("learning_rate"), dtype=theano.config.floatX)
eps = numpy_floatX(1E-6)
self.g2_list = [theano.shared(p.get_value() * numpy_floatX(0.), name="%s.acc_g" % p.name) for p in self.model.param]
g2_new_list = [g2 + TT.square(g) for g, g2 in zip(self.grad, self.g2_list)]
updates += [(g2, g2_new) for g2, g2_new in zip(self.g2_list, g2_new_list)]
updates += [(p, p - lr*g/TT.sqrt(g2_new + eps)) for p, g, g2_new in zip(self.model.param, self.grad, g2_new_list)]
return self.model.get_update_func(updates, [lr])
def learning_param(self):
return [self.learning_rate]
def print_stat(self):
super(AdaGrad, self).print_stat()
logger.info(" Learning Parameters:")
logger.info(" Clipping Threshold: " + str(self.clip_threshold))
logger.info(" Learning Rate: " + str(self.decay_rate))
|
the-stack_106_24727 | """
Created on Apr 10, 2017
@author: lubo
"""
import os
import matplotlib as mpl
import numpy as np
from dae.pheno.pheno_db import Measure
import matplotlib.pyplot as plt
from dae.pheno_browser.db import DbManager
from dae.pheno.common import Role, MeasureType
from dae.pheno_browser.graphs import draw_linregres
from dae.pheno_browser.graphs import draw_measure_violinplot
from dae.pheno_browser.graphs import draw_categorical_violin_distribution
from dae.pheno_browser.graphs import draw_ordinal_violin_distribution
from dae.utils.progress import progress, progress_nl
mpl.use("PS") # noqa
plt.ioff() # noqa
class PreparePhenoBrowserBase(object):
LARGE_DPI = 150
SMALL_DPI = 16
def __init__(
self,
pheno_name,
phenotype_data,
output_dir,
pheno_regressions=None,
images_dir=None,
):
assert os.path.exists(output_dir)
self.output_dir = output_dir
if images_dir is None:
images_dir = os.path.join(self.output_dir, "images")
if not os.path.exists(images_dir):
os.makedirs(images_dir)
assert os.path.exists(images_dir)
self.images_dir = images_dir
self.phenotype_data = phenotype_data
self.pheno_regressions = pheno_regressions
self.browser_db = os.path.join(
output_dir, "{}_browser.db".format(pheno_name)
)
def load_measure(self, measure):
df = self.phenotype_data.get_persons_values_df([measure.measure_id])
return df
def _augment_measure_values_df(self, augment, augment_name, measure):
assert augment is not None
assert isinstance(augment, Measure)
augment_instrument = augment.instrument_name
augment_measure = augment.measure_name
if augment_instrument is not None:
augment_id = "{}.{}".format(augment_instrument, augment_measure)
else:
augment_id = "{}.{}".format(
measure.instrument_name, augment_measure
)
if augment_id == measure.measure_id:
return None
if not self.phenotype_data.has_measure(augment_id):
return None
df = self.phenotype_data.get_persons_values_df(
[augment_id, measure.measure_id]
)
df.loc[df.role == Role.mom, "role"] = Role.parent
df.loc[df.role == Role.dad, "role"] = Role.parent
df.rename(columns={augment_id: augment_name}, inplace=True)
return df
@staticmethod
def _measure_to_dict(measure):
return {
"measure_id": measure.measure_id,
"instrument_name": measure.instrument_name,
"measure_name": measure.measure_name,
"measure_type": measure.measure_type,
"description": measure.description,
"values_domain": measure.values_domain,
}
def figure_filepath(self, measure, suffix):
filename = "{}.{}.png".format(measure.measure_id, suffix)
outdir = os.path.join(self.images_dir, measure.instrument_name)
if not os.path.exists(outdir):
os.mkdir(outdir)
filepath = os.path.join(outdir, filename)
return filepath
def figure_path(self, measure, suffix):
filename = "{}.{}.png".format(measure.measure_id, suffix)
filepath = os.path.join(measure.instrument_name, filename)
return filepath
def save_fig(self, measure, suffix):
if "/" in measure.measure_id:
return (None, None)
small_filepath = self.figure_filepath(
measure, "{}_small".format(suffix)
)
plt.savefig(small_filepath, dpi=self.SMALL_DPI)
filepath = self.figure_filepath(measure, suffix)
plt.savefig(filepath, dpi=self.LARGE_DPI)
plt.close()
return (
self.figure_path(measure, "{}_small".format(suffix)),
self.figure_path(measure, suffix),
)
def build_regression(self, dependent_measure, independent_measure, jitter):
MIN_VALUES = 5
MIN_UNIQUE_VALUES = 2
res = {}
if dependent_measure.measure_id == independent_measure.measure_id:
return res
aug_col_name = independent_measure.measure_name
aug_df = self._augment_measure_values_df(
independent_measure, aug_col_name, dependent_measure
)
if aug_df is None:
return res
aug_df = aug_df[aug_df.role == Role.prb]
aug_df.loc[:, aug_col_name] = aug_df[aug_col_name].astype(np.float32)
aug_df = aug_df[np.isfinite(aug_df[aug_col_name])]
if (
aug_df[dependent_measure.measure_id].nunique() < MIN_UNIQUE_VALUES
or len(aug_df) <= MIN_VALUES
):
return res
res_male, res_female = draw_linregres(
aug_df, aug_col_name, dependent_measure.measure_id, jitter
)
res["pvalue_regression_male"] = (
res_male.pvalues[aug_col_name] if res_male is not None else None
)
res["pvalue_regression_female"] = (
res_female.pvalues[aug_col_name]
if res_female is not None
else None
)
if res_male is not None or res_female is not None:
(
res["figure_regression_small"],
res["figure_regression"],
) = self.save_fig(
dependent_measure, "prb_regression_by_{}".format(aug_col_name)
)
return res
def build_values_violinplot(self, measure):
df = self.load_measure(measure)
drawn = draw_measure_violinplot(df, measure.measure_id)
res = {}
if drawn:
(
res["figure_distribution_small"],
res["figure_distribution"],
) = self.save_fig(measure, "violinplot")
return res
def build_values_categorical_distribution(self, measure):
df = self.load_measure(measure)
drawn = draw_categorical_violin_distribution(df, measure.measure_id)
res = {}
if drawn:
(
res["figure_distribution_small"],
res["figure_distribution"],
) = self.save_fig(measure, "distribution")
return res
def build_values_other_distribution(self, measure):
df = self.load_measure(measure)
drawn = draw_categorical_violin_distribution(df, measure.measure_id)
res = {}
if drawn:
(
res["figure_distribution_small"],
res["figure_distribution"],
) = self.save_fig(measure, "distribution")
return res
def build_values_ordinal_distribution(self, measure):
df = self.load_measure(measure)
drawn = draw_ordinal_violin_distribution(df, measure.measure_id)
res = {}
if drawn:
(
res["figure_distribution_small"],
res["figure_distribution"],
) = self.save_fig(measure, "distribution")
return res
def dump_browser_variable(self, var):
print("-------------------------------------------")
print(var["measure_id"])
print("-------------------------------------------")
print("instrument: {}".format(var["instrument_name"]))
print("measure: {}".format(var["measure_name"]))
print("type: {}".format(var["measure_type"]))
print("description: {}".format(var["description"]))
print("domain: {}".format(var["values_domain"]))
print("-------------------------------------------")
def _get_measure_by_name(self, measure_name, instrument_name):
if instrument_name:
measure_id = ".".join([instrument_name, measure_name])
if self.phenotype_data.has_measure(measure_id):
return self.phenotype_data.get_measure(measure_id)
return None
def handle_measure(self, measure):
res = PreparePhenoBrowserBase._measure_to_dict(measure)
if measure.measure_type == MeasureType.continuous:
res.update(self.build_values_violinplot(measure))
elif measure.measure_type == MeasureType.ordinal:
res.update(self.build_values_ordinal_distribution(measure))
elif measure.measure_type == MeasureType.categorical:
res.update(self.build_values_categorical_distribution(measure))
return res
def _has_regression_measure(self, measure_name, instrument_name):
for reg in self.pheno_regressions.regression.values():
if measure_name == reg.measure_name:
if (
instrument_name
and reg.instrument_name
and instrument_name != reg.instrument_name
):
continue
return True
return False
def handle_regressions(self, measure):
if measure.measure_type not in [
MeasureType.continuous,
MeasureType.ordinal,
]:
return
for reg_id, reg in self.pheno_regressions.regression.items():
res = {"measure_id": measure.measure_id}
reg_measure = self._get_measure_by_name(
reg.measure_name,
reg.instrument_name or measure.instrument_name,
)
if not reg_measure:
continue
if self._has_regression_measure(
measure.measure_name, measure.instrument_name
):
continue
res["regression_id"] = reg_id
res.update(self.build_regression(measure, reg_measure, reg.jitter))
if (
res.get("pvalue_regression_male") is not None
or res.get("pvalue_regression_female") is not None
):
yield res
def run(self):
db = DbManager(dbfile=self.browser_db)
db.build()
if self.pheno_regressions:
for reg_id, reg_data in self.pheno_regressions.regression.items():
db.save_regression(
{
"regression_id": reg_id,
"instrument_name": reg_data.instrument_name,
"measure_name": reg_data.measure_name,
"display_name": reg_data.display_name,
}
)
for instrument in list(self.phenotype_data.instruments.values()):
progress_nl()
for measure in list(instrument.measures.values()):
progress(text=str(measure) + "\n")
var = self.handle_measure(measure)
db.save(var)
if self.pheno_regressions:
for regression in self.handle_regressions(measure):
db.save_regression_values(regression)
|
the-stack_106_24731 | from .job import CronJob
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
import asyncio
import subprocess
import logging
logger = logging.getLogger(__package__)
class JobLoader:
def __init__(self,
name=None,
loop=None,
log_path=None,
log_level=None,
thread=True,
**kwargs):
self.name = name
if thread:
self.executor = ThreadPoolExecutor()
else:
self.executor = ProcessPoolExecutor()
self.loop = loop or asyncio.get_event_loop()
self.log_path = log_path.strip('/')
async def load(self):
raise NotImplementedError
@staticmethod
def sub_process_command(command, env, name, log_path):
out_path_name = log_path + '/' + f'{name}_out.txt'
err_path_name = log_path + '/' + f'{name}_err.txt'
out_log_file = open(out_path_name, 'a')
err_log_file = open(err_path_name, 'a')
subprocess.run(command.split(','),
env=env,
stdout=out_log_file,
stderr=err_log_file)
out_log_file.close()
err_log_file.close()
def create_executor_task(self,
command=None,
env=None,
name=None,
log_path=None):
logger.info('create_executor_task---start\n\n')
try:
result = self.loop.run_in_executor(self.executor,
self.sub_process_command,
*(command, env, name, log_path))
except Exception as tmp:
logger.info('create_executor_task_exception')
logger.exception(tmp)
result = None
return result
def parse_env(self, env_string=None):
result = {}
for i in env_string.split(','):
k, v = i.split('=')
result[k] = v
return result
def parse(self, line_data):
"""
crontab name job env total_times
example='*/2,*,*,*,* ceshi python,--name=12 aa=123,bb=345 10'
"""
try:
cron, name, command, *env_total = line_data.split(' ')
if len(env_total) == 2:
env, total_times = env_total
total_times = int(total_times)
else:
env = env_total[0]
total_times = None
job = self.parse_cron(cron=cron,
name=name,
total_times=total_times)
if job is not None:
env = self.parse_env(env_string=env)
job.go(self.create_executor_task,
command=command,
env=env,
name=name,
log_path=self.log_path)
return job
except Exception as tmp:
logger.info('cron file format error')
logger.exception(tmp)
return None
def parse_cron(self, cron=None, name=None, total_times=None):
minute, hour, day, month, week = [i.strip() for i in cron.split(',')]
month_every, month_at_time = self.parse_detail(month)
week_every, week_at_time = self.parse_detail(week)
day_every, day_at_time = self.parse_detail(day)
hour_every, hour_at_time = self.parse_detail(hour)
minute_every, minute_at_time = self.parse_detail(minute)
tmp_job = CronJob(name=name, run_total=total_times)
if (month_at_time and day_at_time and hour_at_time and minute_at_time):
tmp_job = tmp_job.at(f"{month_at_time}-{day_at_time}\
{hour_at_time}:{minute_at_time}")
return tmp_job
if month != '*':
if month_every is None:
month_every = 1
tmp_job = tmp_job.every(month_every)
if day_at_time is not None:
tmp_job = tmp_job.month_day(day_at_time)
if hour_at_time is not None and minute is not None:
tmp_job = tmp_job.at_time(f'{hour_at_time}:{minute_at_time}')
elif week != '*':
if week_every is None:
month_every = 1
if week_at_time is not None:
tmp_job = tmp_job.week_day(week_at_time)
if hour_at_time is not None and minute is not None:
tmp_job = tmp_job.at_time(f'{hour_at_time}:{minute_at_time}')
elif day != '*':
if day_every is None:
day_every = 1
tmp_job = tmp_job.every(day_every).day
if hour_at_time is not None and minute is not None:
tmp_job = tmp_job.at_time(f'{hour_at_time}:{minute_at_time}')
elif hour != '*':
if hour_every is None:
hour_every = 1
tmp_job = tmp_job.every(hour_every).hour
if minute is not None:
tmp_job = tmp_job.at_time(f':{minute_at_time}')
elif minute != '*':
if minute_every is None:
minute_every = 1
tmp_job = tmp_job.every(minute_every).minute
if tmp_job.unit:
return tmp_job
else:
return None
def parse_detail(self, data=None):
data = data.split('/')
every = None
at_time = None
if len(data) > 1:
f_data, every = data
else:
at_time = data[0]
if every is not None:
if '*' not in every:
every = int(every)
if at_time is not None:
if '*' not in at_time:
at_time = int(at_time)
return every, at_time
def gen_job(self, data=None):
pass
async def run(self, schedule=None):
try:
jobs = await self.load()
for job in jobs:
if isinstance(job, CronJob):
self.add_job_schedule(job, schedule)
print(schedule.jobs)
except KeyboardInterrupt:
logger.info('keyboard exit')
self.executor.shutdown()
@staticmethod
def add_job_schedule(job, schedule):
schedule.add_job(job)
class FileJobLoader(JobLoader):
def __init__(self, file_path=None, **kwargs):
super(FileJobLoader, self).__init__(**kwargs)
self.file_path = file_path
async def load(self):
result = []
if self.file_path:
file = open(self.file_path, 'r')
crons = [i.strip('\n').strip() for i in file.readlines()]
for i in crons:
tmp_cron = self.parse(i)
if tmp_cron is not None:
result.append(tmp_cron)
return result
|
the-stack_106_24733 | """
Machine arithmetics - determine the parameters of the
floating-point arithmetic system
Author: Pearu Peterson, September 2003
"""
__all__ = ['MachAr']
from numpy.core.fromnumeric import any
from numpy.core._ufunc_config import errstate
from numpy.core.overrides import set_module
# Need to speed this up...especially for longfloat
@set_module('numpy')
class MachAr:
"""
Diagnosing machine parameters.
Attributes
----------
ibeta : int
Radix in which numbers are represented.
it : int
Number of base-`ibeta` digits in the floating point mantissa M.
machep : int
Exponent of the smallest (most negative) power of `ibeta` that,
added to 1.0, gives something different from 1.0
eps : float
Floating-point number ``beta**machep`` (floating point precision)
negep : int
Exponent of the smallest power of `ibeta` that, subtracted
from 1.0, gives something different from 1.0.
epsneg : float
Floating-point number ``beta**negep``.
iexp : int
Number of bits in the exponent (including its sign and bias).
minexp : int
Smallest (most negative) power of `ibeta` consistent with there
being no leading zeros in the mantissa.
xmin : float
Floating-point number ``beta**minexp`` (the smallest [in
magnitude] positive floating point number with full precision).
maxexp : int
Smallest (positive) power of `ibeta` that causes overflow.
xmax : float
``(1-epsneg) * beta**maxexp`` (the largest [in magnitude]
usable floating value).
irnd : int
In ``range(6)``, information on what kind of rounding is done
in addition, and on how underflow is handled.
ngrd : int
Number of 'guard digits' used when truncating the product
of two mantissas to fit the representation.
epsilon : float
Same as `eps`.
tiny : float
An alias for `smallest_normal`, kept for backwards compatibility.
huge : float
Same as `xmax`.
precision : float
``- int(-log10(eps))``
resolution : float
``- 10**(-precision)``
smallest_normal : float
The smallest positive floating point number with 1 as leading bit in
the mantissa following IEEE-754. Same as `xmin`.
smallest_subnormal : float
The smallest positive floating point number with 0 as leading bit in
the mantissa following IEEE-754.
Parameters
----------
float_conv : function, optional
Function that converts an integer or integer array to a float
or float array. Default is `float`.
int_conv : function, optional
Function that converts a float or float array to an integer or
integer array. Default is `int`.
float_to_float : function, optional
Function that converts a float array to float. Default is `float`.
Note that this does not seem to do anything useful in the current
implementation.
float_to_str : function, optional
Function that converts a single float to a string. Default is
``lambda v:'%24.16e' %v``.
title : str, optional
Title that is printed in the string representation of `MachAr`.
See Also
--------
finfo : Machine limits for floating point types.
iinfo : Machine limits for integer types.
References
----------
.. [1] Press, Teukolsky, Vetterling and Flannery,
"Numerical Recipes in C++," 2nd ed,
Cambridge University Press, 2002, p. 31.
"""
def __init__(self, float_conv=float,int_conv=int,
float_to_float=float,
float_to_str=lambda v:'%24.16e' % v,
title='Python floating point number'):
"""
float_conv - convert integer to float (array)
int_conv - convert float (array) to integer
float_to_float - convert float array to float
float_to_str - convert array float to str
title - description of used floating point numbers
"""
# We ignore all errors here because we are purposely triggering
# underflow to detect the properties of the runninng arch.
with errstate(under='ignore'):
self._do_init(float_conv, int_conv, float_to_float, float_to_str, title)
def _do_init(self, float_conv, int_conv, float_to_float, float_to_str, title):
max_iterN = 10000
msg = "Did not converge after %d tries with %s"
one = float_conv(1)
two = one + one
zero = one - one
# Do we really need to do this? Aren't they 2 and 2.0?
# Determine ibeta and beta
a = one
for _ in range(max_iterN):
a = a + a
temp = a + one
temp1 = temp - a
if any(temp1 - one != zero):
break
else:
raise RuntimeError(msg % (_, one.dtype))
b = one
for _ in range(max_iterN):
b = b + b
temp = a + b
itemp = int_conv(temp-a)
if any(itemp != 0):
break
else:
raise RuntimeError(msg % (_, one.dtype))
ibeta = itemp
beta = float_conv(ibeta)
# Determine it and irnd
it = -1
b = one
for _ in range(max_iterN):
it = it + 1
b = b * beta
temp = b + one
temp1 = temp - b
if any(temp1 - one != zero):
break
else:
raise RuntimeError(msg % (_, one.dtype))
betah = beta / two
a = one
for _ in range(max_iterN):
a = a + a
temp = a + one
temp1 = temp - a
if any(temp1 - one != zero):
break
else:
raise RuntimeError(msg % (_, one.dtype))
temp = a + betah
irnd = 0
if any(temp-a != zero):
irnd = 1
tempa = a + beta
temp = tempa + betah
if irnd == 0 and any(temp-tempa != zero):
irnd = 2
# Determine negep and epsneg
negep = it + 3
betain = one / beta
a = one
for i in range(negep):
a = a * betain
b = a
for _ in range(max_iterN):
temp = one - a
if any(temp-one != zero):
break
a = a * beta
negep = negep - 1
# Prevent infinite loop on PPC with gcc 4.0:
if negep < 0:
raise RuntimeError("could not determine machine tolerance "
"for 'negep', locals() -> %s" % (locals()))
else:
raise RuntimeError(msg % (_, one.dtype))
negep = -negep
epsneg = a
# Determine machep and eps
machep = - it - 3
a = b
for _ in range(max_iterN):
temp = one + a
if any(temp-one != zero):
break
a = a * beta
machep = machep + 1
else:
raise RuntimeError(msg % (_, one.dtype))
eps = a
# Determine ngrd
ngrd = 0
temp = one + eps
if irnd == 0 and any(temp*one - one != zero):
ngrd = 1
# Determine iexp
i = 0
k = 1
z = betain
t = one + eps
nxres = 0
for _ in range(max_iterN):
y = z
z = y*y
a = z*one # Check here for underflow
temp = z*t
if any(a+a == zero) or any(abs(z) >= y):
break
temp1 = temp * betain
if any(temp1*beta == z):
break
i = i + 1
k = k + k
else:
raise RuntimeError(msg % (_, one.dtype))
if ibeta != 10:
iexp = i + 1
mx = k + k
else:
iexp = 2
iz = ibeta
while k >= iz:
iz = iz * ibeta
iexp = iexp + 1
mx = iz + iz - 1
# Determine minexp and xmin
for _ in range(max_iterN):
xmin = y
y = y * betain
a = y * one
temp = y * t
if any((a + a) != zero) and any(abs(y) < xmin):
k = k + 1
temp1 = temp * betain
if any(temp1*beta == y) and any(temp != y):
nxres = 3
xmin = y
break
else:
break
else:
raise RuntimeError(msg % (_, one.dtype))
minexp = -k
# Determine maxexp, xmax
if mx <= k + k - 3 and ibeta != 10:
mx = mx + mx
iexp = iexp + 1
maxexp = mx + minexp
irnd = irnd + nxres
if irnd >= 2:
maxexp = maxexp - 2
i = maxexp + minexp
if ibeta == 2 and not i:
maxexp = maxexp - 1
if i > 20:
maxexp = maxexp - 1
if any(a != y):
maxexp = maxexp - 2
xmax = one - epsneg
if any(xmax*one != xmax):
xmax = one - beta*epsneg
xmax = xmax / (xmin*beta*beta*beta)
i = maxexp + minexp + 3
for j in range(i):
if ibeta == 2:
xmax = xmax + xmax
else:
xmax = xmax * beta
smallest_subnormal = abs(xmin / beta ** (it))
self.ibeta = ibeta
self.it = it
self.negep = negep
self.epsneg = float_to_float(epsneg)
self._str_epsneg = float_to_str(epsneg)
self.machep = machep
self.eps = float_to_float(eps)
self._str_eps = float_to_str(eps)
self.ngrd = ngrd
self.iexp = iexp
self.minexp = minexp
self.xmin = float_to_float(xmin)
self._str_xmin = float_to_str(xmin)
self.maxexp = maxexp
self.xmax = float_to_float(xmax)
self._str_xmax = float_to_str(xmax)
self.irnd = irnd
self.title = title
# Commonly used parameters
self.epsilon = self.eps
self.tiny = self.xmin
self.huge = self.xmax
self.smallest_normal = self.xmin
self.smallest_subnormal = float_to_float(smallest_subnormal)
import math
self.precision = int(-math.log10(float_to_float(self.eps)))
ten = two + two + two + two + two
resolution = ten ** (-self.precision)
self.resolution = float_to_float(resolution)
self._str_resolution = float_to_str(resolution)
def __str__(self):
fmt = (
'Machine parameters for %(title)s\n'
'---------------------------------------------------------------------\n'
'ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s\n'
'machep=%(machep)s eps=%(_str_eps)s (beta**machep == epsilon)\n'
'negep =%(negep)s epsneg=%(_str_epsneg)s (beta**epsneg)\n'
'minexp=%(minexp)s xmin=%(_str_xmin)s (beta**minexp == tiny)\n'
'maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge)\n'
'smallest_normal=%(smallest_normal)s '
'smallest_subnormal=%(smallest_subnormal)s\n'
'---------------------------------------------------------------------\n'
)
return fmt % self.__dict__
if __name__ == '__main__':
print(MachAr())
|
the-stack_106_24734 | import feedparser
import datetime
from django_yaba.models import *
from django.conf import settings
from django import template
register = template.Library()
def parse_github():
if settings.GITHUB_USERNAME:
""" Grab latest commits from GitHub """
d = feedparser.parse("http://github.com/%s.atom" % settings.GITHUB_USERNAME)
e = d.entries[:5]
commit = "<ul>"
for x in e:
link = x['link']
link = link.lstrip("http://github.com/")
link = "http://github.com/%s" % link
commit += "<li>"
commit += '<a href="%s">' % link
commit += x['title_detail']['value']
commit += "</a>\n@ %s" % x['updated']
commit += "</li>"
commit += "</ul>"
return commit
else:
commit = False
return commit
def theme():
theme = Configuration.objects.all()[0].theme.slug
theme = theme.rstrip()
return {'theme': theme}
def sitename():
sitename = settings.BLOG_NAME
return {'sitename': sitename}
def archives():
"""
Creating Archives navigation for the side bar. We start by grabbing all
the content, which needs to be made more effecient. Then we parse out
the year date times, then the month date times.
"""
stories = Story.objects.all()
galleries = Gallery.objects.all()
articles = Article.objects.all()
created = datetime.datetime(2000, 1, 1)
year_range = []
for x in stories.dates('created', 'year'):
if x not in year_range:
year_range.append(x)
for x in galleries.dates('created', 'year'):
if x not in year_range:
year_range.append(x)
for x in articles.dates('created', 'year'):
if x not in year_range:
year_range.append(x)
year_range.sort()
month_range = []
for x in stories.dates('created', 'month'):
if x not in month_range:
month_range.append(x)
for x in galleries.dates('created', 'month'):
if x not in month_range:
month_range.append(x)
for x in articles.dates('created', 'month'):
if x not in month_range:
month_range.append(x)
month_range.sort()
return year_range, month_range
def sidebar():
categories = Category.objects.all()
link_list = Links.objects.all()
commit = parse_github()
sitename = settings.BLOG_NAME
year_range, month_range = archives()
if settings.TWITTER_USERNAME or settings.TWITTER_PASSWORD:
tweet = True
else:
tweet = False
return {'link_list': link_list, 'commit': commit, 'sitename': sitename,
'categories': categories, 'tweet_it': tweet,
'tweet_user': settings.TWITTER_USERNAME, 'year_range': year_range,
'month_range': month_range}
def main_nav():
articles = Article.objects.all()
return {'articles': articles}
register.inclusion_tag('sidebar.html')(sidebar)
register.inclusion_tag('main_nav.html')(main_nav)
register.inclusion_tag('sitename.html')(sitename)
register.inclusion_tag('theme.html')(theme)
|
the-stack_106_24736 | '''
@author: l4zyc0d3r
People who are happy makes other happy. I am gonna finish it slowly but definitely.cdt
'''
#O(V+E)
class Solution:
def canFinish(self, N: int, P: List[List[int]]) -> bool:
mp = collections.defaultdict(list)
mp_pre = collections.defaultdict(list)
for c, p in P:
mp[c].append(p)
mp_pre[p].append(c)
ans = []
st = []
vst = ['W']*N
for i in range(N):
if i not in mp_pre and i not in mp:ans.append(i)
elif i not in mp_pre:
st.append(i)
#print(st)
while st:
#print(st[-1], st, vst)
cur = st[-1]
if vst[cur] == 'W':
for x in mp[cur]:
if vst[x]=='W':
st.append(x)
elif vst[x]=='G':return []
vst[cur] = 'G'
elif vst[cur] == 'G':
vst[cur] = 'B'
ans.append(cur)
st.pop()
elif vst[cur] == 'B':st.pop()
return True if len(ans) ==N else False
|
the-stack_106_24737 | # coding: utf-8
# Copyright (c) Tingzheng Hou.
# Distributed under the terms of the MIT License.
"""
This module calculates species correlation lifetime (residence time).
"""
from typing import List, Dict, Union, Tuple
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.tsa.stattools import acovf
from scipy.optimize import curve_fit
from tqdm.notebook import tqdm
from MDAnalysis import Universe
from MDAnalysis.core.groups import Atom
__author__ = "Kara Fong, Tingzheng Hou"
__version__ = "1.0"
__maintainer__ = "Tingzheng Hou"
__email__ = "[email protected]"
__date__ = "Feb 9, 2021"
def neighbors_one_atom(
nvt_run: Universe,
center_atom: Atom,
species: str,
select_dict: Dict[str, str],
distance: float,
run_start: int,
run_end: int,
) -> Dict[str, np.ndarray]:
"""
Create adjacency matrix for one center atom.
Args:
nvt_run: An MDAnalysis ``Universe``.
center_atom: The center atom object.
species: The neighbor species in the select_dict.
select_dict: A dictionary of atom species selection, where each atom species name is a key
and the corresponding values are the selection language.
distance: The neighbor cutoff distance.
run_start: Start frame of analysis.
run_end: End frame of analysis.
Returns:
A neighbor dict with neighbor atom id as keys and arrays of adjacent boolean (0/1) as values.
"""
bool_values = {}
time_count = 0
for ts in nvt_run.trajectory[run_start:run_end:]:
if species in select_dict.keys():
selection = (
"("
+ select_dict[species]
+ ") and (around "
+ str(distance)
+ " index "
+ str(center_atom.id - 1)
+ ")"
)
shell = nvt_run.select_atoms(selection)
else:
raise ValueError("Invalid species selection")
for atom in shell.atoms:
if str(atom.id) not in bool_values:
bool_values[str(atom.id)] = np.zeros(int((run_end - run_start) / 1))
bool_values[str(atom.id)][time_count] = 1
time_count += 1
return bool_values
def calc_acf(a_values: Dict[str, np.ndarray]) -> List[np.ndarray]:
"""
Calculate auto-correlation function (ACF)
Args:
a_values: A dict of adjacency matrix with neighbor atom id as keys and arrays
of adjacent boolean (0/1) as values.
Returns:
A list of auto-correlation functions for each neighbor species.
"""
acfs = []
for atom_id, neighbors in a_values.items():
# atom_id_numeric = int(re.search(r"\d+", atom_id).group())
acfs.append(acovf(neighbors, demean=False, adjusted=True, fft=True))
return acfs
def exponential_func(
x: Union[float, np.floating, np.ndarray],
a: Union[float, np.floating, np.ndarray],
b: Union[float, np.floating, np.ndarray],
c: Union[float, np.floating, np.ndarray],
) -> Union[np.floating, np.ndarray]:
"""
An exponential decay function
Args:
x: Independent variable.
a: Initial quantity.
b: Exponential decay constant.
c: Constant.
Returns:
The acf
"""
return a * np.exp(-b * x) + c
def calc_neigh_corr(
nvt_run: Universe,
distance_dict: Dict[str, float],
select_dict: Dict[str, str],
time_step: float,
run_start: int,
run_end: int,
center_atom: str = "cation",
) -> Tuple[np.ndarray, Dict[str, np.ndarray]]:
"""Calculates the neighbor auto-correlation function (ACF)
of selected species around center atom.
Args:
nvt_run: An MDAnalysis ``Universe``.
distance_dict:
select_dict:
time_step:
run_start: Start frame of analysis.
run_end: End frame of analysis.
center_atom: The center atom to calculate the ACF for. Default to "cation".
Returns:
A tuple containing the time series, and a dict of acf of neighbor species.
"""
# Set up times array
times = []
step = 0
center_atoms = nvt_run.select_atoms(select_dict[center_atom])
for ts in nvt_run.trajectory[run_start:run_end]:
times.append(step * time_step)
step += 1
times = np.array(times)
acf_avg = {}
for kw in distance_dict.keys():
acf_all = []
for atom in center_atoms[::]:
distance = distance_dict.get(kw)
assert distance is not None
adjacency_matrix = neighbors_one_atom(
nvt_run,
atom,
kw,
select_dict,
distance,
run_start,
run_end,
)
acfs = calc_acf(adjacency_matrix)
for acf in acfs:
acf_all.append(acf)
acf_avg[kw] = np.mean(acf_all, axis=0)
return times, acf_avg
def fit_residence_time(
times: np.ndarray,
acf_avg_dict: Dict[str, np.ndarray],
cutoff_time: int,
time_step: float,
plot: bool = False,
) -> Dict[str, np.floating]:
"""
Use the ACF to fit the residence time (Exponential decay constant).
TODO: allow defining the residence time according to a threshold value of the decay
Args:
times: A time series.
acf_avg_dict: A dict containing the ACFs of the species.
cutoff_time: Fitting cutoff time.
time_step: The time step between each frame, in ps.
Returns:
A dict containing residence time of each species
"""
acf_avg_norm = {}
popt = {}
pcov = {}
tau = {}
species_list = list(acf_avg_dict.keys())
# Exponential fit of solvent-Li ACF
for kw in species_list:
acf_avg_norm[kw] = acf_avg_dict[kw] / acf_avg_dict[kw][0]
popt[kw], pcov[kw] = curve_fit(
exponential_func,
times[:cutoff_time],
acf_avg_norm[kw][:cutoff_time],
p0=(1, 1e-4, 0),
maxfev=5000
)
tau[kw] = 1 / popt[kw][1] # ps
if plot:
# Plot ACFs
colors = ["b", "g", "r", "c", "m", "y"]
line_styles = ["-", "--", "-.", ":"]
for i, kw in enumerate(species_list):
plt.plot(times, acf_avg_norm[kw], label=kw, color=colors[i])
plt.plot(
times,
exponential_func(times, *popt[kw]),
line_styles[i],
color="k",
label=kw + " Fit",
)
plt.xlabel("Time (ps)")
plt.legend()
plt.ylabel("Neighbor Auto-correlation Function")
plt.ylim(0, 1)
plt.xlim(0, cutoff_time * time_step)
plt.show()
return tau
|
the-stack_106_24740 | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
# noinspection PyUnresolvedReferences,PyCompatibility
from builtins import *
import cProfile
import pprint
import bag
from bag.layout import RoutingGrid, TemplateDB
#from adc_sar.sampler import NPassGateWClk
from abs_templates_ec.adc_sar.sampler import NPassGateWClk
import yaml
#impl_lib = 'adc_sar_generated'
impl_lib = 'adc_sampler_ec'
if __name__ == '__main__':
prj = bag.BagProject()
lib_name = 'adc_ec_templates'
cell_name = 'sampler_nmos'
params = dict(
lch=16e-9,
wp=8,
wn=8,
fgn=12,
fg_inbuf_list=[(8, 8), (14, 14)],
fg_outbuf_list=[(4, 4), (24, 24)],
nduml=10,
ndumr=4,
nsep=2,
intent='ulvt',
)
load_from_file=True
yamlfile_spec="adc_sar_spec.yaml"
yamlfile_size="adc_sar_size.yaml"
if load_from_file==True:
with open(yamlfile_spec, 'r') as stream:
specdict = yaml.load(stream)
with open(yamlfile_size, 'r') as stream:
sizedict = yaml.load(stream)
params['lch']=sizedict['lch']
params['wp']=sizedict['sampler_nmos']['wp']
params['wn']=sizedict['sampler_nmos']['wn']
params['fgn']=sizedict['sampler_nmos']['fgn']
params['fg_inbuf_list']=sizedict['sampler_nmos']['fg_inbuf_list']
params['fg_outbuf_list']=sizedict['sampler_nmos']['fg_outbuf_list']
params['nduml']=sizedict['sampler_nmos']['nduml']
params['ndumr']=sizedict['sampler_nmos']['ndumr']
params['nsep']=sizedict['sampler_nmos']['nsep']
'''
params['wp']=sizedict['pw']*2
params['wn']=sizedict['nw']*2
params['fgn']=int(sizedict['sarsamp']['m_sw']*sizedict['sarsamp']['m_sw_arr']/2)
params['fg_inbuf_list']=[]
params['fg_outbuf_list']=[]
for m in sizedict['sarsamp']['m_inbuf_list']:
params['fg_inbuf_list']+=[(m, m)]
for m in sizedict['sarsamp']['m_outbuf_list']:
params['fg_outbuf_list']+=[(m, m)]
'''
# create design module and run design method.
print('designing module')
dsn = prj.create_design_module(lib_name, cell_name)
print('design parameters:\n%s' % pprint.pformat(params))
dsn.design_specs(**params)
# implement the design
print('implementing design with library %s' % impl_lib)
dsn.implement_design(impl_lib, top_cell_name=cell_name, erase=True)
|
the-stack_106_24742 | #!python3
# -*- coding: utf-8 -*-
'''
@name: life
@author: Memory&Xinxin
@date: 2018/11/19
@document: {"F11": 全屏,
"空格": 暂停游戏,
"点击": 复活或者杀死一个生命
}
'''
import pygame
from mxgames import game
from random import randint
ROWS = 50
SCREEN_SIZE = (500, 500) # 屏幕的尺寸
COLOR = [0xff0000, 0x00ff00, 0x0000ff, 0xffff00, 0xff00ff, 0x00ffff] # 颜色
UPDATE_TIME = 1500 # 刷新间隔
ALL_INDEX = [(i, j) for i in range(ROWS) for j in range(ROWS)]
class Life(game.Game):
def __init__(self, title, size, rows, fps=30):
super(Life, self).__init__(title, size, fps)
self.rows = rows # 一条边小格子数量
self.side = self.screen.get_width() // rows # 小格子的边长
self.lifes = [[False for i in range(rows)] for j in range(rows)]# 生命,True 是活,False 是死
self.init_lifes()
self.bind_key(pygame.K_SPACE, self.pause)
self.bind_click(1, self.reverse)
def init_lifes(self): # 初始化,随机生成一些生命
t = self.rows // 3 # 控制范围
for i in range(t, 2*t):
for j in range(t, 2*t):
if randint(1, 5) == 1:
self.lifes[i][j] = True # 设置为活的
def reverse(self, x, y): # 翻转,死的活,活的死
i = x // self.side
j = y // self.side
if i < 0 or j < 0 or i >= self.rows or j >= self.rows:
return
self.lifes[i][j] = not self.lifes[i][j]
rect = pygame.Rect(i*self.side, j*self.side, self.side, self.side)
if self.lifes[i][j]:
self.screen.fill(COLOR[randint(0, len(COLOR)-1)], rect)
else:
self.screen.fill((0, 0, 0), rect)
pygame.display.update(rect)
def neigh_num(self, x, y): # 有几个邻居
num = 0
for i in game.EIGHT_NEIGH:
tx = x + i[0]
ty = y + i[1]
if tx > 0 and ty > 0 and tx < self.rows and ty < self.rows:
if self.lifes[tx][ty]:
num += 1
return num
def update(self, current_time): # 根据有几个邻居更新生命
if current_time < self.last_time + UPDATE_TIME or self.is_pause:
return
self.last_time = current_time
self.is_draw = True
temp_lifes = [[False for i in range(self.rows)] for j in range(self.rows)]
for i, j in ALL_INDEX:
num = self.neigh_num(i, j)
if num == 3: # 3个邻居为活
temp_lifes[i][j] = True
elif num == 2: # 两个邻居保持现状
temp_lifes[i][j] = self.lifes[i][j] # 其他情况全死
self.lifes = temp_lifes
def draw(self, current_time): # 绘制
if not self.is_draw or self.is_pause:
return
self.is_draw = False
self.screen.fill((0, 0, 0)) # 背景
for i, j in ALL_INDEX:
if self.lifes[i][j]: # 绘制活的
rect = pygame.Rect(i*self.side, j*self.side, self.side, self.side)
self.screen.fill(COLOR[randint(0, len(COLOR)-1)], rect)
pygame.display.update() # 刷新屏幕
if __name__ == '__main__':
print('''
Welcome to Life Game!
press SPACE to pause game.
click LEFT MOUSE BUTTON to create a life, or death a life.
''')
life = Life("life", SCREEN_SIZE, ROWS)
life.run()
|
the-stack_106_24745 |
print("Qual base de conversão você quer escolher?")
n = int(input("Digite um número: "))
print("""Escolha uma das bases para conversão:
[1] converter em binário
[2] converter em octal
[3] converter em hexadécimal""")
escolha = int(input("Escolha sua opção: "))
if escolha == 1:
print("{} convertido para binário é {}.".format(n, bin(n)[2:]))
elif escolha == 2:
print("{} convertido para octal é {}".format(n, oct(n)[2:]))
elif escolha == 3:
print("{} convertido para hexadécimal é {}.".format(n, hex(n)[2:]))
else:
print("Opção inválida. Tente novamente.")
|
the-stack_106_24748 | """
@author: Maziar Raissi
"""
from Multistep_NN import Multistep_NN
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from plotting import newfig, savefig
import matplotlib.gridspec as gridspec
def colorline3d(ax, x, y, z, cmap):
N = len(x)
skip = int(0.01*N)
for i in range(0,N,skip):
ax.plot(x[i:i+skip+1], y[i:i+skip+1], z[i:i+skip+1], color=cmap(int(255*i/N)))
if __name__ == "__main__":
# function that returns dx/dt
def f(x,t): # x is 2 x 1
mu = x[0]
omega = 1
A = 1
f1 = 0
f2 = mu*x[1] - omega*x[2] - A*x[1]*(x[1]**2+x[2]**2)
f3 = omega*x[1] + mu*x[2] - A*x[2]*(x[1]**2+x[2]**2)
f = np.array([f1,f2,f3])
return f
# time points
t_star = np.arange(0,75,0.1)
dt = t_star[1] - t_star[0]
# initial condition
x0 = np.array([[-0.15,2,0],
[-0.05,2,0],
[.05,.01,0],
[.15,.01,0],
[.25,.01,0],
[.35,.01,0],
[.45,.01,0],
[.55,.01,0],
[.05,2,0],
[.15,2,0],
[.25,2,0],
[.35,2,0],
[.45,2,0],
[.55,2,0]])
S = x0.shape[0] # number of trajectories
N = t_star.shape[0] # number of time snapshots
D = x0.shape[1] # dimension
X_star = np.zeros((S, N, D))
# solve ODE
for k in range(0,S):
X_star[k,:,:] = odeint(f, x0[k,:], t_star)
noise = 0
X_train = X_star
X_train = X_train + noise*X_train.std(1,keepdims=True)*np.random.randn(X_train.shape[0], X_train.shape[1], X_train.shape[2])
layers = [3, 256, 3]
M = 1
scheme = 'AM'
model = Multistep_NN(dt, X_train, layers, M, scheme)
N_Iter = 50000
model.train(N_Iter)
def learned_f(x,t):
f = model.predict_f(x[None,:])
return f.flatten()
# initial condition
learned_x0 = np.array([[-0.15,2,0],
[-0.05,2,0],
[.05,.01,0],
[.15,.01,0],
[.25,.01,0],
[.35,.01,0],
[.45,.01,0],
[.55,.01,0],
[.05,2,0],
[.15,2,0],
[.25,2,0],
[.35,2,0],
[.45,2,0],
[.55,2,0],
[-0.2,2,0],
[-0.1,2,0],
[.1,.01,0],
[.2,.01,0],
[.3,.01,0],
[.4,.01,0],
[.5,.01,0],
[.6,.01,0],
[.1,2,0],
[.2,2,0],
[.3,2,0],
[.4,2,0],
[.5,2,0],
[.6,2,0],
[0,2,0],
[0,.01,0]])
learned_S = learned_x0.shape[0] # number of trajectories
learned_N = t_star.shape[0] # number of time snapshots
learned_D = learned_x0.shape[1] # dimension
learned_X_star = np.zeros((learned_S, learned_N, learned_D))
# solve ODE
for k in range(0,learned_S):
learned_X_star[k,:,:] = odeint(learned_f, learned_x0[k,:], t_star)
####### Plotting ##################
fig, ax = newfig(1.0, 0.8)
ax.axis('off')
gs0 = gridspec.GridSpec(1, 2)
gs0.update(top=0.95, bottom=0.1, left=0.0, right=0.90, wspace=0.15)
ax = plt.subplot(gs0[:, 0:1], projection='3d')
ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))
ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))
ax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))
for k in range(0,S):
colorline3d(ax, X_star[k,:,0], X_star[k,:,1], X_star[k,:,2], cmap = plt.cm.seismic)
ax.grid(False)
ax.set_xlabel('$\mu$')
ax.set_ylabel('$x$')
ax.set_zlabel('$y$')
ax.set_title('Exact Dynamics', fontsize = 10)
ax = plt.subplot(gs0[:, 1:2], projection='3d')
ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))
ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))
ax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))
for k in range(0,learned_S):
colorline3d(ax, learned_X_star[k,:,0], learned_X_star[k,:,1], learned_X_star[k,:,2], cmap = plt.cm.seismic)
ax.grid(False)
ax.grid(False)
ax.set_xlabel('$\mu$')
ax.set_ylabel('$x$')
ax.set_zlabel('$y$')
ax.set_title('Learned Dynamics', fontsize = 10)
# savefig('./figures/Hopf', crop = False)
|
the-stack_106_24750 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
app_name = "kanban"
app_title = "Kanban"
app_publisher = "Alec Ruiz-Ramon"
app_description = "Kanban views for ERPNext"
app_icon = "octicon octicon-file-directory"
app_color = "grey"
app_email = "[email protected]"
app_version = "0.0.1"
app_license = "MIT"
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
# app_include_css = "/assets/kanban/css/kanban.css"
# app_include_js = "/assets/kanban/js/kanban.js"
# include js, css files in header of web template
# web_include_css = "/assets/kanban/css/kanban.css"
# web_include_js = "/assets/kanban/js/kanban.js"
# Home Pages
# ----------
# application home page (will override Website Settings)
# home_page = "login"
# website user home page (by Role)
# role_home_page = {
# "Role": "home_page"
# }
# Website user home page (by function)
# get_website_user_home_page = "kanban.utils.get_home_page"
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
# before_install = "kanban.install.before_install"
# after_install = "kanban.install.after_install"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "kanban.notifications.get_notification_config"
# Permissions
# -----------
# Permissions evaluated in scripted ways
# permission_query_conditions = {
# "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
# }
#
# has_permission = {
# "Event": "frappe.desk.doctype.event.event.has_permission",
# }
# Document Events
# ---------------
# Hook on document methods and events
# doc_events = {
# "*": {
# "on_update": "method",
# "on_cancel": "method",
# "on_trash": "method"
# }
# }
# Scheduled Tasks
# ---------------
# scheduler_events = {
# "all": [
# "kanban.tasks.all"
# ],
# "daily": [
# "kanban.tasks.daily"
# ],
# "hourly": [
# "kanban.tasks.hourly"
# ],
# "weekly": [
# "kanban.tasks.weekly"
# ]
# "monthly": [
# "kanban.tasks.monthly"
# ]
# }
# Testing
# -------
# before_tests = "kanban.install.before_tests"
# Overriding Whitelisted Methods
# ------------------------------
#
# override_whitelisted_methods = {
# "frappe.desk.doctype.event.event.get_events": "kanban.event.get_events"
# }
|
the-stack_106_24751 | import statistics
import time
from problog.engine import DefaultEngine
from refactor.back_end_picking import get_back_end_default, QueryBackEnd
from refactor.tilde_essentials.tree import DecisionTree
from refactor.tilde_essentials.tree_builder import TreeBuilder
from refactor.query_testing_back_end.django.clause_handling import destruct_tree_tests
from refactor.io.label_collector import LabelCollectorMapper
from refactor.io.parsing_background_knowledge import parse_background_knowledge_keys
from refactor.io.parsing_examples import KeysExampleBuilder
from refactor.io.parsing_settings.setting_parser import KeysSettingsParser
from refactor.representation.example import InternalExampleFormat
from tilde_config import kb_file, s_file, bg_file
# default_handler = get_back_end_default(QueryBackEnd.DJANGO)
file_name_labeled_examples = kb_file()
file_name_settings = s_file()
fname_background_knowledge = bg_file()
parsed_settings = KeysSettingsParser().parse(file_name_settings)
debug_printing_example_parsing = False
debug_printing_tree_building = False
debug_printing_tree_pruning = False
debug_printing_program_conversion = True
debug_printing_get_classifier = False
debug_printing_classification = False
# fname_background_knowledge = None
internal_ex_format = InternalExampleFormat.CLAUSEDB
engine = DefaultEngine()
engine.unknown = 1
language = parsed_settings.language # type: TypeModeLanguage
# TODO: unify this with models --> let models use a prediction goal predicate label()
prediction_goal_handler = parsed_settings.get_prediction_goal_handler() # type: KeysPredictionGoalHandler
prediction_goal = prediction_goal_handler.get_prediction_goal() # type: Term
print('=== START parsing background ===')
background_knowledge_wrapper \
= parse_background_knowledge_keys(fname_background_knowledge,
prediction_goal) # type: BackgroundKnowledgeWrapper
full_background_knowledge_sp \
= background_knowledge_wrapper.get_full_background_knowledge_simple_program() # type: Optional[SimpleProgram]
print('=== END parsing background ===\n')
# =================================================================================================================
print('=== START parsing examples ===')
# EXAMPLES
example_builder = KeysExampleBuilder(prediction_goal, debug_printing_example_parsing)
training_examples_collection = example_builder.parse(internal_ex_format, file_name_labeled_examples,
full_background_knowledge_sp) # type: ExampleCollection
# =================================================================================================================
print('=== START collecting labels ===')
# LABELS
index_of_label_var = prediction_goal_handler.get_predicate_goal_index_of_label_var() # type: int
label_collector = LabelCollectorMapper.get_label_collector(internal_ex_format, prediction_goal, index_of_label_var,
engine=engine)
label_collector.extract_labels(training_examples_collection)
possible_labels = label_collector.get_labels() # type: Set[Label]
possible_labels = list(possible_labels)
print('=== END collecting labels ===\n')
default_handlers = [
# ('django', get_back_end_default(QueryBackEnd.DJANGO)),
# ('problog-simple', get_back_end_default(QueryBackEnd.SIMPLE_PROGRAM)),
('subtle', get_back_end_default(QueryBackEnd.SUBTLE)),
# ('FLGG', get_back_end_default(QueryBackEnd.FLGG))
]
average_run_time_list = []
for name, default_handler in default_handlers:
# =================================================================================================================
examples = default_handler.get_transformed_example_list(training_examples_collection)
# =================================================================================================================
run_time_list = []
for i in range(0, 10):
print('=== START tree building ===')
# test_evaluator = SimpleProgramQueryEvaluator(engine=engine)
# splitter = ProblogSplitter(language=language,split_criterion_str='entropy', test_evaluator=test_evaluator,
# query_head_if_keys_format=prediction_goal)
tree_builder = default_handler.get_default_decision_tree_builder(language, prediction_goal) # type: TreeBuilder
decision_tree = DecisionTree()
start_time = time.time()
decision_tree.fit(examples=examples, tree_builder=tree_builder)
end_time = time.time()
run_time_sec = end_time - start_time
run_time_ms = 1000.0 * run_time_sec
run_time_list.append(run_time_ms)
print("run time (ms):", run_time_ms)
print('=== END tree building ===\n')
average_run_time_ms = statistics.mean(run_time_list)
average_run_time_list.append((name, average_run_time_ms))
print("average tree build time (ms):", average_run_time_ms)
print(decision_tree)
if name == 'django':
print("=== start destructing examples ===")
for instance in examples:
instance.data.destruct()
print("=== end destructing examples ===")
print("=== start destructing tree queries ===")
destruct_tree_tests(decision_tree.tree)
print("=== start destructing tree queries ===")
print ("\n=== average run times (ms) =======")
for name, average_run_time_ms in average_run_time_list:
print(name, ':', average_run_time_ms)
# === average run times (ms) =======
# django : 175.32496452331543
# subtle : 2955.9953451156616
# FLGG : 2411.164665222168
|
the-stack_106_24752 | import vtk
from array import *
import numpy
import os
# todo range the folder to get the file name
gridnum = 15
massR = 4
massOrigin = [6,0,6]
initVlue = 1.5
targetValue = 7.5
# detect the value at iteration timestep 41
# interested event
# i 6 7 8 9
# j 0 1 2 3
# k 6 7 8 9
rootDir = "./image"
# refer to https://xrunhprof.wordpress.com/2012/02/06/array-exchange-between-vtk-and-python/
def vtk_to_array(vtk_array):
at = vtk_array.GetDataType()
if at == 11:
#vtkDoubleArray
pt='d'
elif at == 12:
#vtkIdTypeArray
pt='l'
#this is slow. numpy.zeros would be faster.
r = array(pt, [0]*vtk_array.GetSize())
vtk_array.ExportToVoidPointer(r)
return r
# draw graph
def drawGraph(filePath):
#TODO call the paraview to draw graph
print ("draw graph for")
print (filePath)
for dirName, subdirList, fileList in os.walk(rootDir):
print('Found directory: %s' % dirName)
for fname in fileList:
filePath = rootDir+"/"+fname
#print filePath
# Read the file (to test that it was written correctly)
reader = vtk.vtkXMLImageDataReader()
reader.SetFileName(filePath)
reader.Update()
obj = reader.GetOutput().GetCellData()
# use dir(obj) to get all the function of the class
# print dir(obj)
vtkarray = obj.GetArray("pressure")
parray = vtk_to_array(vtkarray)
#print parray
nparray = numpy.asarray(parray).reshape(gridnum, gridnum, gridnum, order='F')
#print nparray[1][1]
# detect if all the mass info is targeted one
ifTargetEventHappen = True
#if (fname == "image41.vti"):
# print nparray
for i in range (massOrigin[0],massOrigin[0]+massR):
for j in range (massOrigin[1],massOrigin[1]+massR):
for k in range (massOrigin[2],massOrigin[2]+massR):
#print "index i j k (%d %d %d)" % (i,j,k)
#print nparray[i][j][k]
#print "index i j k (%d %d %d)" % (i,j,k)
#print nparray[i][j][k]
if (nparray[i][j][k]!=targetValue):
ifTargetEventHappen = False
break
if (ifTargetEventHappen == True):
print (filePath)
drawGraph(filePath)
exit(0)
# draw the graph
# if the interested things happen
# output the timestep info
# and send the command to draw out the picture
'''
# Convert the image to a polydata
imageDataGeometryFilter = vtk.vtkImageDataGeometryFilter()
imageDataGeometryFilter.SetInputConnection(reader.GetOutputPort())
imageDataGeometryFilter.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(imageDataGeometryFilter.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetPointSize(3)
# Setup rendering
renderer = vtk.vtkRenderer()
renderer.AddActor(actor)
renderer.SetBackground(1, 1, 1)
renderer.ResetCamera()
renderWindow = vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
renderWindowInteractor.Initialize()
renderWindowInteractor.Start()
''' |
the-stack_106_24753 | import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Modifier_tracker_apv_vfp30_2016_cff import tracker_apv_vfp30_2016 as _tracker_apv_vfp30_2016
import RecoTracker.IterativeTracking.iterativeTkConfig as _cfg
from Configuration.Eras.Modifier_fastSim_cff import fastSim
# NEW CLUSTERS (remove previously used clusters)
lowPtTripletStepClusters = _cfg.clusterRemoverForIter("LowPtTripletStep")
for _eraName, _postfix, _era in _cfg.nonDefaultEras():
_era.toReplaceWith(lowPtTripletStepClusters, _cfg.clusterRemoverForIter("LowPtTripletStep", _eraName, _postfix))
# SEEDING LAYERS
import RecoTracker.TkSeedingLayers.PixelLayerTriplets_cfi
lowPtTripletStepSeedLayers = RecoTracker.TkSeedingLayers.PixelLayerTriplets_cfi.PixelLayerTriplets.clone()
lowPtTripletStepSeedLayers.BPix.skipClusters = cms.InputTag('lowPtTripletStepClusters')
lowPtTripletStepSeedLayers.FPix.skipClusters = cms.InputTag('lowPtTripletStepClusters')
_layerListForPhase1 = [
'BPix1+BPix2+BPix3', 'BPix2+BPix3+BPix4',
'BPix1+BPix3+BPix4', 'BPix1+BPix2+BPix4',
'BPix2+BPix3+FPix1_pos', 'BPix2+BPix3+FPix1_neg',
'BPix1+BPix2+FPix1_pos', 'BPix1+BPix2+FPix1_neg',
'BPix1+BPix3+FPix1_pos', 'BPix1+BPix3+FPix1_neg',
'BPix2+FPix1_pos+FPix2_pos', 'BPix2+FPix1_neg+FPix2_neg',
'BPix1+FPix1_pos+FPix2_pos', 'BPix1+FPix1_neg+FPix2_neg',
'BPix1+BPix2+FPix2_pos', 'BPix1+BPix2+FPix2_neg',
'FPix1_pos+FPix2_pos+FPix3_pos', 'FPix1_neg+FPix2_neg+FPix3_neg',
'BPix1+FPix2_pos+FPix3_pos', 'BPix1+FPix2_neg+FPix3_neg',
'BPix1+FPix1_pos+FPix3_pos', 'BPix1+FPix1_neg+FPix3_neg'
]
from Configuration.Eras.Modifier_trackingPhase1_cff import trackingPhase1
trackingPhase1.toModify(lowPtTripletStepSeedLayers, layerList = _layerListForPhase1)
# combination with gap removed as only source of fakes in current geometry (kept for doc,=)
_layerListForPhase2 = ['BPix1+BPix2+BPix3', 'BPix2+BPix3+BPix4',
# 'BPix1+BPix3+BPix4', 'BPix1+BPix2+BPix4',
'BPix1+BPix2+FPix1_pos', 'BPix1+BPix2+FPix1_neg',
'BPix1+FPix1_pos+FPix2_pos', 'BPix1+FPix1_neg+FPix2_neg',
# 'BPix1+BPix2+FPix2_pos', 'BPix1+BPix2+FPix2_neg',
'FPix1_pos+FPix2_pos+FPix3_pos', 'FPix1_neg+FPix2_neg+FPix3_neg',
# 'BPix1+FPix1_pos+FPix3_pos', 'BPix1+FPix1_neg+FPix3_neg',
'FPix2_pos+FPix3_pos+FPix4_pos', 'FPix2_neg+FPix3_neg+FPix4_neg',
'FPix3_pos+FPix4_pos+FPix5_pos', 'FPix3_neg+FPix4_neg+FPix5_neg',
'FPix4_pos+FPix5_pos+FPix6_pos', 'FPix4_neg+FPix5_neg+FPix6_neg',
# removed as redunant and covering effectively only eta>4 (here for documentation, to be optimized after TDR)
# 'FPix5_pos+FPix6_pos+FPix7_pos', 'FPix5_neg+FPix6_neg+FPix7_neg',
# 'FPix6_pos+FPix7_pos+FPix8_pos', 'FPix6_neg+FPix7_neg+FPix8_neg'
]
from Configuration.Eras.Modifier_trackingPhase2PU140_cff import trackingPhase2PU140
trackingPhase2PU140.toModify(lowPtTripletStepSeedLayers, layerList = _layerListForPhase2)
# TrackingRegion
from RecoTracker.TkTrackingRegions.globalTrackingRegionFromBeamSpot_cfi import globalTrackingRegionFromBeamSpot as _globalTrackingRegionFromBeamSpot
lowPtTripletStepTrackingRegions = _globalTrackingRegionFromBeamSpot.clone(RegionPSet = dict(
ptMin = 0.2,
originRadius = 0.02,
nSigmaZ = 4.0
))
trackingPhase1.toModify(lowPtTripletStepTrackingRegions, RegionPSet = dict(ptMin = 0.2))
trackingPhase2PU140.toModify(lowPtTripletStepTrackingRegions, RegionPSet = dict(ptMin = 0.40))
from Configuration.Eras.Modifier_pp_on_XeXe_2017_cff import pp_on_XeXe_2017
from Configuration.Eras.Modifier_pp_on_AA_2018_cff import pp_on_AA_2018
from RecoTracker.TkTrackingRegions.globalTrackingRegionWithVertices_cff import globalTrackingRegionWithVertices as _globalTrackingRegionWithVertices
(pp_on_XeXe_2017 | pp_on_AA_2018).toReplaceWith(lowPtTripletStepTrackingRegions,
_globalTrackingRegionWithVertices.clone(RegionPSet=dict(
useFixedError = False,
ptMin = 0.49,
originRadius = 0.02
)
)
)
from Configuration.Eras.Modifier_highBetaStar_2018_cff import highBetaStar_2018
highBetaStar_2018.toModify(lowPtTripletStepTrackingRegions,RegionPSet = dict(
ptMin = 0.05,
originRadius = 0.2,
))
# seeding
from RecoTracker.TkHitPairs.hitPairEDProducer_cfi import hitPairEDProducer as _hitPairEDProducer
lowPtTripletStepHitDoublets = _hitPairEDProducer.clone(
seedingLayers = "lowPtTripletStepSeedLayers",
trackingRegions = "lowPtTripletStepTrackingRegions",
maxElement = 50000000,
produceIntermediateHitDoublets = True,
)
from RecoPixelVertexing.PixelTriplets.pixelTripletHLTEDProducer_cfi import pixelTripletHLTEDProducer as _pixelTripletHLTEDProducer
from RecoPixelVertexing.PixelLowPtUtilities.ClusterShapeHitFilterESProducer_cfi import *
import RecoPixelVertexing.PixelLowPtUtilities.LowPtClusterShapeSeedComparitor_cfi
lowPtTripletStepHitTriplets = _pixelTripletHLTEDProducer.clone(
doublets = "lowPtTripletStepHitDoublets",
produceSeedingHitSets = True,
SeedComparitorPSet = RecoPixelVertexing.PixelLowPtUtilities.LowPtClusterShapeSeedComparitor_cfi.LowPtClusterShapeSeedComparitor.clone()
)
from RecoTracker.TkSeedGenerator.seedCreatorFromRegionConsecutiveHitsEDProducer_cff import seedCreatorFromRegionConsecutiveHitsEDProducer as _seedCreatorFromRegionConsecutiveHitsEDProducer
lowPtTripletStepSeeds = _seedCreatorFromRegionConsecutiveHitsEDProducer.clone(
seedingHitSets = "lowPtTripletStepHitTriplets",
)
from RecoPixelVertexing.PixelTriplets.caHitTripletEDProducer_cfi import caHitTripletEDProducer as _caHitTripletEDProducer
trackingPhase1.toModify(lowPtTripletStepHitDoublets, layerPairs = [0,1]) # layer pairs (0,1), (1,2)
trackingPhase1.toReplaceWith(lowPtTripletStepHitTriplets, _caHitTripletEDProducer.clone(
doublets = "lowPtTripletStepHitDoublets",
extraHitRPhitolerance = lowPtTripletStepHitTriplets.extraHitRPhitolerance,
SeedComparitorPSet = lowPtTripletStepHitTriplets.SeedComparitorPSet,
maxChi2 = dict(
pt1 = 0.8, pt2 = 2,
value1 = 70 , value2 = 8,
),
useBendingCorrection = True,
CAThetaCut = 0.002,
CAPhiCut = 0.05,
))
trackingPhase2PU140.toModify(lowPtTripletStepHitDoublets, layerPairs = [0,1]) # layer pairs (0,1), (1,2)
trackingPhase2PU140.toReplaceWith(lowPtTripletStepHitTriplets, _caHitTripletEDProducer.clone(
doublets = "lowPtTripletStepHitDoublets",
extraHitRPhitolerance = lowPtTripletStepHitTriplets.extraHitRPhitolerance,
SeedComparitorPSet = lowPtTripletStepHitTriplets.SeedComparitorPSet,
maxChi2 = dict(
pt1 = 0.8, pt2 = 2,
value1 = 70 , value2 = 8,
),
useBendingCorrection = True,
CAThetaCut = 0.002,
CAPhiCut = 0.05,
))
highBetaStar_2018.toModify(lowPtTripletStepHitTriplets,CAThetaCut = 0.004,CAPhiCut = 0.1)
import FastSimulation.Tracking.TrajectorySeedProducer_cfi
_fastSim_lowPtTripletStepSeeds = FastSimulation.Tracking.TrajectorySeedProducer_cfi.trajectorySeedProducer.clone(
trackingRegions = "lowPtTripletStepTrackingRegions",
hitMasks = cms.InputTag("lowPtTripletStepMasks"),
)
from FastSimulation.Tracking.SeedingMigration import _hitSetProducerToFactoryPSet
_fastSim_lowPtTripletStepSeeds.seedFinderSelector.pixelTripletGeneratorFactory = _hitSetProducerToFactoryPSet(lowPtTripletStepHitTriplets)
_fastSim_lowPtTripletStepSeeds.seedFinderSelector.pixelTripletGeneratorFactory.SeedComparitorPSet.ComponentName = "none"
_fastSim_lowPtTripletStepSeeds.seedFinderSelector.layerList = lowPtTripletStepSeedLayers.layerList.value()
#new for phase1
trackingPhase1.toModify(_fastSim_lowPtTripletStepSeeds, seedFinderSelector = dict(
pixelTripletGeneratorFactory = None,
CAHitTripletGeneratorFactory = _hitSetProducerToFactoryPSet(lowPtTripletStepHitTriplets).clone(SeedComparitorPSet = dict(ComponentName = "none")),
#new parameters required for phase1 seeding
BPix = dict(
TTRHBuilder = 'WithoutRefit',
HitProducer = 'TrackingRecHitProducer',
),
FPix = dict(
TTRHBuilder = 'WithoutRefit',
HitProducer = 'TrackingRecHitProducer',
),
layerPairs = lowPtTripletStepHitDoublets.layerPairs.value()
)
)
fastSim.toReplaceWith(lowPtTripletStepSeeds,_fastSim_lowPtTripletStepSeeds)
# QUALITY CUTS DURING TRACK BUILDING
import TrackingTools.TrajectoryFiltering.TrajectoryFilter_cff as _TrajectoryFilter_cff
_lowPtTripletStepStandardTrajectoryFilterBase = _TrajectoryFilter_cff.CkfBaseTrajectoryFilter_block.clone(
minimumNumberOfHits = 3,
minPt = 0.075,
)
lowPtTripletStepStandardTrajectoryFilter = _lowPtTripletStepStandardTrajectoryFilterBase.clone(
maxCCCLostHits = 0,
minGoodStripCharge = cms.PSet(refToPSet_ = cms.string('SiStripClusterChargeCutLoose'))
)
from Configuration.Eras.Modifier_tracker_apv_vfp30_2016_cff import tracker_apv_vfp30_2016
_tracker_apv_vfp30_2016.toModify(lowPtTripletStepStandardTrajectoryFilter, maxCCCLostHits = 1)
from Configuration.Eras.Modifier_trackingLowPU_cff import trackingLowPU
trackingLowPU.toReplaceWith(lowPtTripletStepStandardTrajectoryFilter, _lowPtTripletStepStandardTrajectoryFilterBase)
trackingPhase2PU140.toReplaceWith(lowPtTripletStepStandardTrajectoryFilter, _lowPtTripletStepStandardTrajectoryFilterBase)
for e in [pp_on_XeXe_2017, pp_on_AA_2018]:
e.toModify(lowPtTripletStepStandardTrajectoryFilter, minPt=0.49)
from RecoPixelVertexing.PixelLowPtUtilities.ClusterShapeTrajectoryFilter_cfi import *
# Composite filter
lowPtTripletStepTrajectoryFilter = _TrajectoryFilter_cff.CompositeTrajectoryFilter_block.clone(
filters = [cms.PSet(refToPSet_ = cms.string('lowPtTripletStepStandardTrajectoryFilter')),
# cms.PSet(refToPSet_ = cms.string('ClusterShapeTrajectoryFilter'))
]
)
trackingPhase2PU140.toModify(lowPtTripletStepTrajectoryFilter,
filters = lowPtTripletStepTrajectoryFilter.filters + [cms.PSet(refToPSet_ = cms.string('ClusterShapeTrajectoryFilter'))]
)
lowPtTripletStepTrajectoryFilterInOut = lowPtTripletStepStandardTrajectoryFilter.clone(
minimumNumberOfHits = 4,
seedExtension = 1,
strictSeedExtension = False, # allow inactive
pixelSeedExtension = False,
)
import RecoTracker.MeasurementDet.Chi2ChargeMeasurementEstimator_cfi
lowPtTripletStepChi2Est = RecoTracker.MeasurementDet.Chi2ChargeMeasurementEstimator_cfi.Chi2ChargeMeasurementEstimator.clone(
ComponentName = cms.string('lowPtTripletStepChi2Est'),
nSigma = cms.double(3.0),
MaxChi2 = cms.double(9.0),
clusterChargeCut = cms.PSet(refToPSet_ = cms.string('SiStripClusterChargeCutTight')),
)
_tracker_apv_vfp30_2016.toModify(lowPtTripletStepChi2Est,
clusterChargeCut = dict(refToPSet_ = "SiStripClusterChargeCutTiny")
)
# TRACK BUILDING
import RecoTracker.CkfPattern.GroupedCkfTrajectoryBuilder_cfi
lowPtTripletStepTrajectoryBuilder = RecoTracker.CkfPattern.GroupedCkfTrajectoryBuilder_cfi.GroupedCkfTrajectoryBuilder.clone(
MeasurementTrackerName = '',
trajectoryFilter = cms.PSet(refToPSet_ = cms.string('lowPtTripletStepTrajectoryFilter')),
maxCand = 4,
estimator = cms.string('lowPtTripletStepChi2Est'),
maxDPhiForLooperReconstruction = cms.double(2.0),
# 0.63 GeV is the maximum pT for a charged particle to loop within the 1.1m radius
# of the outermost Tracker barrel layer (with B=3.8T)
maxPtForLooperReconstruction = cms.double(0.7)
)
trackingLowPU.toModify(lowPtTripletStepTrajectoryBuilder, maxCand = 3)
trackingPhase2PU140.toModify(lowPtTripletStepTrajectoryBuilder,
inOutTrajectoryFilter = dict(refToPSet_ = "lowPtTripletStepTrajectoryFilterInOut"),
useSameTrajFilter = False,
maxCand = 3,
)
# MAKING OF TRACK CANDIDATES
import RecoTracker.CkfPattern.CkfTrackCandidates_cfi
lowPtTripletStepTrackCandidates = RecoTracker.CkfPattern.CkfTrackCandidates_cfi.ckfTrackCandidates.clone(
src = cms.InputTag('lowPtTripletStepSeeds'),
### these two parameters are relevant only for the CachingSeedCleanerBySharedInput
numHitsForSeedCleaner = cms.int32(50),
onlyPixelHitsForSeedCleaner = cms.bool(True),
TrajectoryBuilderPSet = cms.PSet(refToPSet_ = cms.string('lowPtTripletStepTrajectoryBuilder')),
clustersToSkip = cms.InputTag('lowPtTripletStepClusters'),
doSeedingRegionRebuilding = True,
useHitsSplitting = True,
TrajectoryCleaner = 'lowPtTripletStepTrajectoryCleanerBySharedHits'
)
trackingPhase2PU140.toModify(lowPtTripletStepTrackCandidates,
clustersToSkip = None,
phase2clustersToSkip = cms.InputTag("lowPtTripletStepClusters")
)
import FastSimulation.Tracking.TrackCandidateProducer_cfi
fastSim.toReplaceWith(lowPtTripletStepTrackCandidates,
FastSimulation.Tracking.TrackCandidateProducer_cfi.trackCandidateProducer.clone(
src = cms.InputTag("lowPtTripletStepSeeds"),
MinNumberOfCrossedLayers = 3,
hitMasks = cms.InputTag("lowPtTripletStepMasks"))
)
# TRACK FITTING
import RecoTracker.TrackProducer.TrackProducer_cfi
lowPtTripletStepTracks = RecoTracker.TrackProducer.TrackProducer_cfi.TrackProducer.clone(
src = 'lowPtTripletStepTrackCandidates',
AlgorithmName = cms.string('lowPtTripletStep'),
Fitter = cms.string('FlexibleKFFittingSmoother')
)
fastSim.toModify(lowPtTripletStepTracks, TTRHBuilder = 'WithoutRefit')
from TrackingTools.TrajectoryCleaning.TrajectoryCleanerBySharedHits_cfi import trajectoryCleanerBySharedHits
lowPtTripletStepTrajectoryCleanerBySharedHits = trajectoryCleanerBySharedHits.clone(
ComponentName = cms.string('lowPtTripletStepTrajectoryCleanerBySharedHits'),
fractionShared = cms.double(0.16),
allowSharedFirstHit = cms.bool(True)
)
trackingLowPU.toModify(lowPtTripletStepTrajectoryCleanerBySharedHits, fractionShared = 0.19)
trackingPhase2PU140.toModify(lowPtTripletStepTrajectoryCleanerBySharedHits, fractionShared = 0.09)
# Final selection
from RecoTracker.FinalTrackSelectors.TrackMVAClassifierPrompt_cfi import *
lowPtTripletStep = TrackMVAClassifierPrompt.clone()
lowPtTripletStep.src = 'lowPtTripletStepTracks'
lowPtTripletStep.mva.GBRForestLabel = 'MVASelectorIter1_13TeV'
lowPtTripletStep.qualityCuts = [-0.6,-0.3,-0.1]
#LWTNN selector
from RecoTracker.FinalTrackSelectors.TrackLwtnnClassifier_cfi import *
from RecoTracker.FinalTrackSelectors.trackSelectionLwtnn_cfi import *
trackingPhase1.toReplaceWith(lowPtTripletStep, TrackLwtnnClassifier.clone(
src = 'lowPtTripletStepTracks',
qualityCuts = [0.2, 0.5, 0.8],
))
fastSim.toModify(lowPtTripletStep, vertices = "firstStepPrimaryVerticesBeforeMixing")
highBetaStar_2018.toReplaceWith(lowPtTripletStep, TrackMVAClassifierPrompt.clone(
src = 'lowPtTripletStepTracks',
qualityCuts = [-0.7,-0.3,-0.1],
mva = dict(GBRForestLabel = 'MVASelectorLowPtTripletStep_Phase1')
))
pp_on_AA_2018.toReplaceWith(lowPtTripletStep, TrackMVAClassifierPrompt.clone(
src = 'lowPtTripletStepTracks',
qualityCuts = [-0.8, -0.4, 0.5],
mva = dict(GBRForestLabel = 'HIMVASelectorLowPtTripletStep_Phase1')
))
# For LowPU and Phase2PU140
import RecoTracker.FinalTrackSelectors.multiTrackSelector_cfi
lowPtTripletStepSelector = RecoTracker.FinalTrackSelectors.multiTrackSelector_cfi.multiTrackSelector.clone(
src = 'lowPtTripletStepTracks',
useAnyMVA = cms.bool(False),
GBRForestLabel = cms.string('MVASelectorIter1'),
trackSelectors= [
RecoTracker.FinalTrackSelectors.multiTrackSelector_cfi.looseMTS.clone(
name = 'lowPtTripletStepLoose',
), #end of pset
RecoTracker.FinalTrackSelectors.multiTrackSelector_cfi.tightMTS.clone(
name = 'lowPtTripletStepTight',
preFilterName = 'lowPtTripletStepLoose',
),
RecoTracker.FinalTrackSelectors.multiTrackSelector_cfi.highpurityMTS.clone(
name = 'QualityMasks',
preFilterName = 'lowPtTripletStepTight',
),
] #end of vpset
) #end of clone
trackingPhase2PU140.toModify(lowPtTripletStepSelector,
useAnyMVA = None,
GBRForestLabel = None,
trackSelectors= cms.VPSet(
RecoTracker.FinalTrackSelectors.multiTrackSelector_cfi.looseMTS.clone(
name = 'lowPtTripletStepLoose',
chi2n_par = 1.2,
res_par = ( 0.003, 0.002 ),
minNumberLayers = 3,
maxNumberLostLayers = 2,
minNumber3DLayers = 3,
d0_par1 = ( 0.7, 4.0 ),
dz_par1 = ( 0.7, 4.0 ),
d0_par2 = ( 0.6, 4.0 ),
dz_par2 = ( 0.6, 4.0 )
), #end of pset
RecoTracker.FinalTrackSelectors.multiTrackSelector_cfi.tightMTS.clone(
name = 'lowPtTripletStepTight',
preFilterName = 'lowPtTripletStepLoose',
chi2n_par = 0.7,
res_par = ( 0.003, 0.002 ),
minNumberLayers = 3,
maxNumberLostLayers = 2,
minNumber3DLayers = 3,
d0_par1 = ( 0.6, 4.0 ),
dz_par1 = ( 0.6, 4.0 ),
d0_par2 = ( 0.5, 4.0 ),
dz_par2 = ( 0.5, 4.0 )
),
RecoTracker.FinalTrackSelectors.multiTrackSelector_cfi.highpurityMTS.clone(
name = 'lowPtTripletStep',
preFilterName = 'lowPtTripletStepTight',
chi2n_par = 0.4,
res_par = ( 0.003, 0.001 ),
min_nhits = 3,
minNumberLayers = 4,
maxNumberLostLayers = 2,
minNumber3DLayers = 4,
d0_par1 = ( 0.5, 4.0 ),
dz_par1 = ( 0.5, 4.0 ),
d0_par2 = ( 0.45, 4.0 ),
dz_par2 = ( 0.45, 4.0 )
),
), #end of vpset
) #end of clone
# Final sequence
LowPtTripletStepTask = cms.Task(lowPtTripletStepClusters,
lowPtTripletStepSeedLayers,
lowPtTripletStepTrackingRegions,
lowPtTripletStepHitDoublets,
lowPtTripletStepHitTriplets,
lowPtTripletStepSeeds,
lowPtTripletStepTrackCandidates,
lowPtTripletStepTracks,
lowPtTripletStep)
LowPtTripletStep = cms.Sequence(LowPtTripletStepTask)
_LowPtTripletStepTask_LowPU_Phase2PU140 = LowPtTripletStepTask.copy()
_LowPtTripletStepTask_LowPU_Phase2PU140.replace(lowPtTripletStep, lowPtTripletStepSelector)
trackingLowPU.toReplaceWith(LowPtTripletStepTask, _LowPtTripletStepTask_LowPU_Phase2PU140)
trackingPhase2PU140.toReplaceWith(LowPtTripletStepTask, _LowPtTripletStepTask_LowPU_Phase2PU140)
#fastsim
from FastSimulation.Tracking.FastTrackerRecHitMaskProducer_cfi import maskProducerFromClusterRemover
lowPtTripletStepMasks = maskProducerFromClusterRemover(lowPtTripletStepClusters)
fastSim.toReplaceWith(LowPtTripletStepTask,
cms.Task(lowPtTripletStepMasks
,lowPtTripletStepTrackingRegions
,lowPtTripletStepSeeds
,lowPtTripletStepTrackCandidates
,lowPtTripletStepTracks
,lowPtTripletStep
))
|
the-stack_106_24754 | #!/usr/bin/env python3
"""
Author : fpjrh <[email protected]>
Date : 2021-11-29
Purpose: Welcome the world to this wonder
"""
import argparse
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Say hello in some kind fashion!')
parser.add_argument('positional',
metavar='str',
help='A positional argument')
parser.add_argument('-a',
'--arg',
help='A named string argument',
metavar='str',
type=str,
default='')
parser.add_argument('-i',
'--int',
help='A named integer argument',
metavar='int',
type=int,
default=0)
parser.add_argument('-f',
'--file',
help='A readable file',
metavar='FILE',
type=argparse.FileType('rt'),
default=None)
parser.add_argument('-o',
'--on',
help='A boolean flag',
action='store_true')
return parser.parse_args()
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
str_arg = args.arg
int_arg = args.int
file_arg = args.file
flag_arg = args.on
pos_arg = args.positional
print(f'str_arg = "{str_arg}"')
print(f'int_arg = "{int_arg}"')
print('file_arg = "{}"'.format(file_arg.name if file_arg else ''))
print(f'flag_arg = "{flag_arg}"')
print(f'positional = "{pos_arg}"')
# --------------------------------------------------
if __name__ == '__main__':
main()
|
the-stack_106_24757 | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2016 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import discord
import asyncio
import re
import inspect
from .errors import BadArgument, NoPrivateMessage
__all__ = [ 'Converter', 'MemberConverter', 'UserConverter',
'ChannelConverter', 'InviteConverter', 'RoleConverter',
'GameConverter', 'ColourConverter' ]
def _get_from_servers(bot, getter, argument):
result = None
for server in bot.servers:
result = getattr(server, getter)(argument)
if result:
return result
return result
class Converter:
"""The base class of custom converters that require the :class:`Context`
to be passed to be useful.
This allows you to implement converters that function similar to the
special cased ``discord`` classes.
Classes that derive from this should override the :meth:`convert` method
to do its conversion logic. This method could be a coroutine or a regular
function.
Attributes
-----------
ctx: :class:`Context`
The invocation context that the argument is being used in.
argument: str
The argument that is being converted.
"""
def __init__(self, ctx, argument):
self.ctx = ctx
self.argument = argument
def convert(self):
raise NotImplementedError('Derived classes need to implement this.')
class IDConverter(Converter):
def __init__(self, ctx, argument):
super().__init__(ctx, argument)
self._id_regex = re.compile(r'([0-9]{15,21})$')
def _get_id_match(self):
return self._id_regex.match(self.argument)
class MemberConverter(IDConverter):
def convert(self):
message = self.ctx.message
bot = self.ctx.bot
match = self._get_id_match() or re.match(r'<@!?([0-9]+)>$', self.argument)
server = message.server
result = None
if match is None:
# not a mention...
if server:
result = server.get_member_named(self.argument)
else:
result = _get_from_servers(bot, 'get_member_named', self.argument)
else:
user_id = match.group(1)
if server:
result = server.get_member(user_id)
else:
result = _get_from_servers(bot, 'get_member', user_id)
if result is None:
raise BadArgument('Member "{}" not found'.format(self.argument))
return result
UserConverter = MemberConverter
class ChannelConverter(IDConverter):
def convert(self):
message = self.ctx.message
bot = self.ctx.bot
match = self._get_id_match() or re.match(r'<#([0-9]+)>$', self.argument)
result = None
server = message.server
if match is None:
# not a mention
if server:
result = discord.utils.get(server.channels, name=self.argument)
else:
result = discord.utils.get(bot.get_all_channels(), name=self.argument)
else:
channel_id = match.group(1)
if server:
result = server.get_channel(channel_id)
else:
result = _get_from_servers(bot, 'get_channel', channel_id)
if result is None:
raise BadArgument('Channel "{}" not found.'.format(self.argument))
return result
class ColourConverter(Converter):
def convert(self):
arg = self.argument.replace('0x', '').lower()
if arg[0] == '#':
arg = arg[1:]
try:
value = int(arg, base=16)
return discord.Colour(value=value)
except ValueError:
method = getattr(discord.Colour, arg, None)
if method is None or not inspect.ismethod(method):
raise BadArgument('Colour "{}" is invalid.'.format(arg))
return method()
class RoleConverter(IDConverter):
def convert(self):
server = self.ctx.message.server
if not server:
raise NoPrivateMessage()
match = self._get_id_match() or re.match(r'<@&([0-9]+)>$', self.argument)
params = dict(id=match.group(1)) if match else dict(name=self.argument)
result = discord.utils.get(server.roles, **params)
if result is None:
raise BadArgument('Role "{}" not found.'.format(self.argument))
return result
class GameConverter(Converter):
def convert(self):
return discord.Game(name=self.argument)
class InviteConverter(Converter):
@asyncio.coroutine
def convert(self):
try:
invite = yield from self.ctx.bot.get_invite(self.argument)
return invite
except Exception as e:
raise BadArgument('Invite is invalid or expired') from e
class EmojiConverter(IDConverter):
@asyncio.coroutine
def convert(self):
message = self.ctx.message
bot = self.ctx.bot
match = self._get_id_match() or re.match(r'<:[a-zA-Z0-9]+:([0-9]+)>$', self.argument)
result = None
server = message.server
if match is None:
# Try to get the emoji by name. Try local server first.
if server:
result = discord.utils.get(server.emojis, name=self.argument)
if result is None:
result = discord.utils.get(bot.get_all_emojis(), name=self.argument)
else:
emoji_id = match.group(1)
# Try to look up emoji by id.
if server:
result = discord.utils.get(server.emojis, id=emoji_id)
if result is None:
result = discord.utils.get(bot.get_all_emojis(), id=emoji_id)
if result is None:
raise BadArgument('Emoji "{}" not found.'.format(self.argument))
return result
|
the-stack_106_24758 | from selenium import webdriver
from bs4 import BeautifulSoup
import ipdb
import pandas as pd
import string
from time import sleep
def driver_init():
options = webdriver.ChromeOptions()
options.add_argument('--ignore-certificate-errors')
options.add_argument('--incognito')
options.add_argument('--headless')
driver = webdriver.Chrome("./chromedriver", options=options)
return driver
def get_soup(driver, url):
#add error checking
driver.get(url)
return BeautifulSoup(driver.page_source, "lxml")
def get_teams(driver):
teams_soup = get_soup(driver, 'https://www.basketball-reference.com/teams/')
teams_table = teams_soup.find('table', id='teams_active')
for tags in teams_table.find_all('tr', 'partial_table'):
tags.decompose()
for tags in teams_table.find_all('tr', 'thread'):
tags.decompose()
teams_df = pd.read_html(str(teams_table))[0]
franch_tags = teams_table.find_all(attrs={'data-stat': 'franch_name', 'scope' : 'row'})
franch_ids = []
for t in franch_tags:
franch_ids.append(t.a['href'].split('/')[2])
teams_df = teams_df[~teams_df['Franchise'].str.contains('Franchise')]
teams_df['franch_id'] = franch_ids
teams_df = teams_df.set_index('franch_id', drop=False)
cols_as_series = teams_df.columns.to_series()
int_cols = list(cols_as_series.loc['Yrs':'L']) +\
list(cols_as_series.loc['Plyfs':'Champ'])
teams_df = teams_df.astype({col:'int64' for col in int_cols})
teams_df = teams_df.astype({'W/L%':'float'})
return teams_df
def correct_team_url(franch_id, season): #works for 1975 and on
if franch_id == 'ATL':
if season <= 1950:
return 'TRI'
if season <= 1954:
return 'MLH'
if season <= 1967:
return 'STL'
return 'ATL'
if franch_id == 'NJN':
if season <= 1967:
return 'NJA'
if season <= 1975:
return 'NYA'
if season == 1976:
return 'NYN'
if season <= 2011:
return 'NJN'
return 'BRK'
if franch_id == 'CHA':
if season <= 2001:
return 'CHH'
if season <= 2013:
return 'CHA'
return 'CHO'
if franch_id == 'DEN':
if season <= 1973:
return 'DNR'
if season <= 1975:
return 'DNA'
return 'DEN'
if franch_id == 'DET' and season <= 1956:
return 'FTW'
if franch_id == 'GSW':
if season <= 1961:
return 'PHW'
if season <= 1970:
return 'SFW'
return 'GSW'
if franch_id == 'HOU' and season <= 1970:
return 'SDR'
if franch_id == 'IND' and season <= 1975:
return 'INA'
if franch_id == 'LAC':
if season <= 1977:
return 'BUF'
if season <= 1983:
return 'SDC'
return 'LAC'
if franch_id == 'LAL' and season <= 1959:
return 'MNL'
if franch_id == 'MEM' and season <= 2000:
return 'VAN'
if franch_id == 'NOH':
if season <= 2004:
return 'NOH'
if season <= 2006:
return 'NOK'
if season <= 2012:
return 'NOH'
return 'NOP'
if franch_id == 'OKC' and season <= 2007:
return 'SEA'
if franch_id == 'PHI' and season <= 1962:
return 'SYR'
if franch_id == 'SAC':
if season <= 1956:
return 'ROC'
if season <= 1971:
return 'CIN'
if season <= 1974:
return 'KCO'
if season <= 1985:
return 'KCK'
return 'SAC'
if franch_id == 'SAS':
if season <= 1969:
return 'DLC'
if season == 1970:
return 'TEX'
if season <= 1972:
return 'DLC'
if season <= 1975:
return 'SAA'
return 'SAS'
if franch_id == 'UTA':
if season <= 1978:
return 'NOJ'
return 'UTA'
if franch_id == 'WAS':
if season == 1961:
return 'CHP'
if season == 1962:
return 'CHZ'
if season <= 1972:
return 'BAL'
if season == 1973:
return 'CAP'
if season <= 1996:
return 'WSB'
return 'WAS'
return franch_id
def get_correct_franch_id(other_id):
if other_id == 'BRK':
return 'NJN'
elif other_id == 'SEA':
return 'OKC'
elif other_id in ['CHO', 'CHH']:
return 'CHA'
elif other_id == 'VAN':
return 'MEM'
elif other_id in ['NOK', 'NOP']:
return 'NOH'
elif other_id == 'KCK':
return 'SAC'
elif other_id == 'WSB':
return 'WAS'
else:
return other_id
def process_team_table(driver):
soup = get_soup(driver, 'https://www.basketball-reference.com/teams/')
soup = soup.tbody
for tag in soup.find_all('tr', attrs={'class': 'thead'}):
tag.decompose()
franch_dict = dict()
for row in soup.find_all('tr'):
if row['class'] == ['full_table']:
last_team = row.th.a['href'].split('/')[2]
year_min = int(row.find('td', attrs={'data-stat': 'year_min'}).text[0:4])
year_max = int(row.find('td', attrs={'data-stat': 'year_max'}).text[0:4])
franch_dict[row.th.text] = (last_team, year_min, year_max)
return pd.DataFrame.from_dict(franch_dict, orient = 'index',
columns = ['franch_id', 'From', 'To'])
def get_franch_id(team_name):
pass
|
the-stack_106_24760 | import _plotly_utils.basevalidators
class DashValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="dash", parent_name="scattersmith.line", **kwargs):
super(DashValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
values=kwargs.pop(
"values", ["solid", "dot", "dash", "longdash", "dashdot", "longdashdot"]
),
**kwargs
)
|
the-stack_106_24761 | # Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
import numpy as np
import pandas as pd
from .... import opcodes
from ....core import recursive_tile
from ....dataframe.utils import parse_index
from ....serialization.serializables import BoolField, BytesField, DictField, KeyField
from ....tensor.core import TENSOR_TYPE, TensorOrder
from ...operands import LearnOperand, LearnOperandMixin, OutputType
class LGBMPredict(LearnOperand, LearnOperandMixin):
_op_type_ = opcodes.LGBM_PREDICT
_data = KeyField('data')
_model = BytesField('model', on_serialize=pickle.dumps, on_deserialize=pickle.loads)
_proba = BoolField('proba')
_kwds = DictField('kwds')
def __init__(self, data=None, model=None, proba=None, kwds=None,
output_types=None, **kw):
super().__init__(_data=data, _model=model, _proba=proba, _kwds=kwds,
_output_types=output_types, **kw)
@property
def data(self):
return self._data
@property
def model(self):
return self._model
@property
def proba(self) -> bool:
return self._proba
@property
def kwds(self) -> dict:
return self._kwds
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
it = iter(inputs)
self._data = next(it)
def __call__(self):
num_class = int(getattr(self.model, 'n_classes_', 2))
if self.proba:
shape = (self.data.shape[0], num_class)
else:
shape = (self.data.shape[0],)
if self._proba:
dtype = np.dtype(np.float_)
elif hasattr(self.model, 'classes_'):
dtype = np.array(self.model.classes_).dtype
else:
dtype = getattr(self.model, 'out_dtype_', np.dtype('float'))
if self.output_types[0] == OutputType.tensor:
# tensor
return self.new_tileable([self.data], shape=shape, dtype=dtype,
order=TensorOrder.C_ORDER)
elif self.output_types[0] == OutputType.dataframe:
# dataframe
dtypes = pd.Series([dtype] * num_class)
columns_value = parse_index(pd.Index(self.model.classes_), store_data=True)
return self.new_tileable([self.data], shape=shape, dtypes=dtypes,
columns_value=columns_value,
index_value=self.data.index_value)
else:
return self.new_tileable([self.data], shape=shape, index_value=self.data.index_value,
name='predictions', dtype=dtype)
@classmethod
def tile(cls, op: "LGBMPredict"):
out = op.outputs[0]
out_chunks = []
data = op.data
if data.chunk_shape[1] > 1:
data = yield from recursive_tile(data.rechunk({1: op.data.shape[1]}))
for in_chunk in data.chunks:
chunk_op = op.copy().reset_key()
chunk_index = (in_chunk.index[0],)
if len(out.shape) > 1:
chunk_shape = (in_chunk.shape[0], out.shape[1])
chunk_index += (0,)
else:
chunk_shape = (in_chunk.shape[0],)
if op.output_types[0] == OutputType.tensor:
out_chunk = chunk_op.new_chunk([in_chunk], shape=chunk_shape,
dtype=out.dtype,
order=out.order, index=chunk_index)
elif op.output_types[0] == OutputType.dataframe:
# dataframe chunk
out_chunk = chunk_op.new_chunk([in_chunk], shape=chunk_shape,
dtypes=out.dtypes,
columns_value=out.columns_value,
index_value=in_chunk.index_value,
index=chunk_index)
else:
# series chunk
out_chunk = chunk_op.new_chunk([in_chunk], shape=chunk_shape,
dtype=out.dtype,
index_value=in_chunk.index_value,
name=out.name, index=chunk_index)
out_chunks.append(out_chunk)
new_op = op.copy()
params = out.params
params['chunks'] = out_chunks
nsplits = (data.nsplits[0],)
if out.ndim > 1:
nsplits += ((out.shape[1],),)
params['nsplits'] = nsplits
return new_op.new_tileables(op.inputs, kws=[params])
@classmethod
def execute(cls, ctx, op: "LGBMPredict"):
in_data = ctx[op.data.key]
in_data = in_data.spmatrix if hasattr(in_data, 'spmatrix') else in_data
out = op.outputs[0]
if op.data.shape[0] == 0:
result = np.array([])
elif op.proba:
result = op.model.predict_proba(in_data, **op.kwds)
else:
result = op.model.predict(in_data, **op.kwds)
if op.output_types[0] == OutputType.dataframe:
result = pd.DataFrame(result, index=in_data.index, columns=out.columns_value.to_pandas())
elif op.output_types[0] == OutputType.series:
result = pd.Series(result, index=in_data.index, name='predictions')
ctx[out.key] = result
def predict_base(model, data, session=None, run_kwargs=None, run=True, **kwargs):
from lightgbm import LGBMModel
if not isinstance(model, LGBMModel):
raise TypeError(f'model has to be a lightgbm.LGBMModel, got {type(model)} instead')
model = model.to_local() if hasattr(model, 'to_local') else model
proba = kwargs.pop('proba', hasattr(model, 'classes_'))
if isinstance(data, TENSOR_TYPE):
output_types = [OutputType.tensor]
elif proba:
output_types = [OutputType.dataframe]
else:
output_types = [OutputType.series]
op = LGBMPredict(data=data, model=model, gpu=data.op.gpu, output_types=output_types,
proba=proba, kwds=kwargs)
result = op()
if run:
result.execute(session=session, **(run_kwargs or dict()))
return result
def predict(model, data, session=None, run_kwargs=None, run=True, **kw):
if hasattr(model, 'classes_'):
return predict_base(model, data, session=session, run_kwargs=run_kwargs,
proba=False, run=run, **kw)
else:
return predict_base(model, data, session=session, run_kwargs=run_kwargs,
run=run, **kw)
def predict_proba(model, data, session=None, run_kwargs=None, run=True, **kw):
return predict_base(model, data, session=session, run_kwargs=run_kwargs,
run=run, proba=True, **kw)
|
the-stack_106_24766 | from pathlib import (
Path
)
from tempfile import (
gettempdir,
)
import pytest
from web3.providers.ipc import (
IPCProvider,
)
from web3.providers.rpc import (
HTTPProvider,
)
from populus.config.web3 import Web3Config
def test_provider_property_when_not_set():
web3_config = Web3Config()
with pytest.raises(KeyError):
web3_config.provider
def test_provider_property_without_settings():
web3_config = Web3Config({'provider': {'class': 'web3.providers.ipc.IPCProvider'}})
assert isinstance(web3_config.provider, IPCProvider)
def test_provider_property_with_settings():
tmp_path = Path(gettempdir()).resolve()
web3_config = Web3Config({
'provider': {
'class': 'web3.providers.ipc.IPCProvider',
'settings': {
'ipc_path': tmp_path
},
},
})
assert isinstance(web3_config.provider, IPCProvider)
assert web3_config.provider.ipc_path == str(tmp_path)
@pytest.mark.parametrize(
'value,expected',
(
('ipc', 'web3.providers.ipc.IPCProvider'),
('rpc', 'web3.providers.rpc.HTTPProvider'),
('web3.providers.ipc.IPCProvider', 'web3.providers.ipc.IPCProvider'),
('web3.providers.rpc.HTTPProvider', 'web3.providers.rpc.HTTPProvider'),
(
'web3.providers.websocket.WebsocketProvider',
'web3.providers.websocket.WebsocketProvider'
),
(IPCProvider, 'web3.providers.ipc.IPCProvider'),
(HTTPProvider, 'web3.providers.rpc.HTTPProvider'),
),
ids=(
'ipc-shorthand',
'rpc-shorthand',
'ipc-pythonpath',
'http-pythonpath',
'rpc-pythonpath',
'ipc-classobj',
'http-classobj',
)
)
def test_set_provider_class_api(value, expected):
web3_config = Web3Config()
web3_config.set_provider_class(value)
assert web3_config['provider.class'] == expected
def test_provider_kwargs_property():
web3_config = Web3Config({
'provider': {
'class': 'web3.providers.ipc.IPCProvider',
},
})
assert web3_config.provider_kwargs == {}
web3_config.provider_kwargs = {'ipc_path': '/not/a/real-path'}
assert web3_config.provider_kwargs == {'ipc_path': '/not/a/real-path'}
web3_config.provider_kwargs['some-key'] = 32
assert web3_config.provider_kwargs == {'ipc_path': '/not/a/real-path', 'some-key': 32}
def test_getting_web3_instance():
web3_config = Web3Config({'provider': {'class': 'web3.providers.ipc.IPCProvider'}})
web3 = web3_config.get_web3()
assert isinstance(web3.provider, IPCProvider)
def test_default_account_property():
web3_config = Web3Config()
with pytest.raises(KeyError):
web3_config.default_account
web3_config.default_account = '0x0000000000000000000000000000000000000001'
assert web3_config.default_account == '0x0000000000000000000000000000000000000001'
|
the-stack_106_24768 | # -*- coding: utf-8 -*- #
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to describe the specified channel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.eventarc import channels
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.eventarc import flags
_DETAILED_HELP = {
'DESCRIPTION':
'{description}',
'EXAMPLES':
""" \
To describe the channel ``my-channel'' in location ``us-central1'', run:
$ {command} my-channel --location us-central1
""",
}
@base.Hidden
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Describe(base.DescribeCommand):
"""Describes an Eventarc channel."""
detailed_help = _DETAILED_HELP
@classmethod
def Args(cls, parser):
flags.AddChannelResourceArg(parser, 'Channel to describe.', required=True)
def Run(self, args):
client = channels.ChannelClientV1()
channel_ref = args.CONCEPTS.channel.Parse()
return client.Get(channel_ref)
|
the-stack_106_24770 | # -*- coding: utf-8 -*-
#
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from datetime import datetime
from dateutil.relativedelta import relativedelta
from mycroft.util.lang.parse_common import is_numeric, look_for_fractions
de_numbers = {
'null': 0,
'ein': 1,
'eins': 1,
'eine': 1,
'einer': 1,
'einem': 1,
'einen': 1,
'eines': 1,
'zwei': 2,
'drei': 3,
'vier': 4,
u'fünf': 5,
'sechs': 6,
'sieben': 7,
'acht': 8,
'neun': 9,
'zehn': 10,
'elf': 11,
u'zwölf': 12,
'dreizehn': 13,
'vierzehn': 14,
u'fünfzehn': 15,
'sechzehn': 16,
'siebzehn': 17,
'achtzehn': 18,
'neunzehn': 19,
'zwanzig': 20,
'einundzwanzig': 21,
'zweiundzwanzig': 22,
'dreiundzwanzig': 23,
'vierundzwanzig': 24,
u'fünfundzwanzig': 25,
'sechsundzwanzig': 26,
'siebenundzwanzig': 27,
'achtundzwanzig': 28,
'neunundzwanzig': 29,
u'dreißig': 30,
u'einunddreißig': 31,
'vierzig': 40,
u'fünfzig': 50,
'sechzig': 60,
'siebzig': 70,
'achtzig': 80,
'neunzig': 90,
'hundert': 100,
'zweihundert': 200,
'dreihundert': 300,
'vierhundert': 400,
u'fünfhundert': 500,
'sechshundert': 600,
'siebenhundert': 700,
'achthundert': 800,
'neunhundert': 900,
'tausend': 1000,
'million': 1000000
}
def extractnumber_de(text):
"""
This function prepares the given text for parsing by making
numbers consistent, getting rid of contractions, etc.
Args:
text (str): the string to normalize
Returns:
(int) or (float): The value of extracted number
undefined articles cannot be suppressed in German:
'ein Pferd' means 'one horse' and 'a horse'
"""
aWords = text.split()
aWords = [word for word in aWords if
word not in ["der", "die", "das", "des", "den", "dem"]]
and_pass = False
valPreAnd = False
val = False
count = 0
while count < len(aWords):
word = aWords[count]
if is_numeric(word):
# if word.isdigit(): # doesn't work with decimals
val = float(word)
elif isFractional_de(word):
val = isFractional_de(word)
elif isOrdinal_de(word):
val = isOrdinal_de(word)
else:
if word in de_numbers:
val = de_numbers[word]
if count < (len(aWords) - 1):
wordNext = aWords[count + 1]
else:
wordNext = ""
valNext = isFractional_de(wordNext)
if valNext:
val = val * valNext
aWords[count + 1] = ""
if not val:
# look for fractions like "2/3"
aPieces = word.split('/')
# if (len(aPieces) == 2 and is_numeric(aPieces[0])
# and is_numeric(aPieces[1])):
if look_for_fractions(aPieces):
val = float(aPieces[0]) / float(aPieces[1])
elif and_pass:
# added to value, quit here
val = valPreAnd
break
else:
count += 1
continue
aWords[count] = ""
if and_pass:
aWords[count - 1] = '' # remove "and"
val += valPreAnd
elif count + 1 < len(aWords) and aWords[count + 1] == 'und':
and_pass = True
valPreAnd = val
val = False
count += 2
continue
elif count + 2 < len(aWords) and aWords[count + 2] == 'und':
and_pass = True
valPreAnd = val
val = False
count += 3
continue
break
if not val:
return False
return val
def extract_datetime_de(string, currentDate):
def clean_string(s):
"""
cleans the input string of unneeded punctuation
and capitalization among other things.
'am' is a preposition, so cannot currently be used
for 12 hour date format
"""
s = s.lower().replace('?', '').replace('.', '').replace(',', '') \
.replace(' der ', ' ').replace(' den ', ' ').replace(' an ',
' ').replace(
' am ', ' ') \
.replace(' auf ', ' ').replace(' um ', ' ')
wordList = s.split()
for idx, word in enumerate(wordList):
if isOrdinal_de(word) is not False:
word = str(isOrdinal_de(word))
wordList[idx] = word
return wordList
def date_found():
return found or \
(
datestr != "" or timeStr != "" or
yearOffset != 0 or monthOffset != 0 or
dayOffset is True or hrOffset != 0 or
hrAbs != 0 or minOffset != 0 or
minAbs != 0 or secOffset != 0
)
if string == "" or not currentDate:
return None
found = False
daySpecified = False
dayOffset = False
monthOffset = 0
yearOffset = 0
dateNow = currentDate
today = dateNow.strftime("%w")
currentYear = dateNow.strftime("%Y")
fromFlag = False
datestr = ""
hasYear = False
timeQualifier = ""
timeQualifiersList = [u'früh', 'morgens', 'vormittag', 'vormittags',
'nachmittag', 'nachmittags', 'abend', 'abends',
'nachts']
markers = ['in', 'am', 'gegen', 'bis', u'für']
days = ['montag', 'dienstag', 'mittwoch',
'donnerstag', 'freitag', 'samstag', 'sonntag']
months = ['januar', 'februar', u'märz', 'april', 'mai', 'juni',
'juli', 'august', 'september', 'october', 'november',
'dezember']
monthsShort = ['jan', 'feb', u'mär', 'apr', 'mai', 'juni', 'juli', 'aug',
'sept', 'oct', 'nov', 'dez']
validFollowups = days + months + monthsShort
validFollowups.append("heute")
validFollowups.append("morgen")
validFollowups.append(u"nächste")
validFollowups.append(u"nächster")
validFollowups.append(u"nächstes")
validFollowups.append(u"nächsten")
validFollowups.append(u"nächstem")
validFollowups.append("letzte")
validFollowups.append("letzter")
validFollowups.append("letztes")
validFollowups.append("letzten")
validFollowups.append("letztem")
validFollowups.append("jetzt")
words = clean_string(string)
for idx, word in enumerate(words):
if word == "":
continue
wordPrevPrev = words[idx - 2] if idx > 1 else ""
wordPrev = words[idx - 1] if idx > 0 else ""
wordNext = words[idx + 1] if idx + 1 < len(words) else ""
wordNextNext = words[idx + 2] if idx + 2 < len(words) else ""
# this isn't in clean string because I don't want to save back to words
if word != 'morgen' and word != u'übermorgen':
if word[-2:] == "en":
word = word[:-2] # remove en
if word != 'heute':
if word[-1:] == "e":
word = word[:-1] # remove plural for most nouns
start = idx
used = 0
# save timequalifier for later
if word in timeQualifiersList:
timeQualifier = word
# parse today, tomorrow, day after tomorrow
elif word == "heute" and not fromFlag:
dayOffset = 0
used += 1
elif word == "morgen" and not fromFlag and wordPrev != "am" and \
wordPrev not in days: # morgen means tomorrow if not "am
# Morgen" and not [day of the week] morgen
dayOffset = 1
used += 1
elif word == u"übermorgen" and not fromFlag:
dayOffset = 2
used += 1
# parse 5 days, 10 weeks, last week, next week
elif word == "tag" or word == "tage":
if wordPrev[0].isdigit():
dayOffset += int(wordPrev)
start -= 1
used = 2
elif word == "woch" and not fromFlag:
if wordPrev[0].isdigit():
dayOffset += int(wordPrev) * 7
start -= 1
used = 2
elif wordPrev[:6] == u"nächst":
dayOffset = 7
start -= 1
used = 2
elif wordPrev[:5] == "letzt":
dayOffset = -7
start -= 1
used = 2
# parse 10 months, next month, last month
elif word == "monat" and not fromFlag:
if wordPrev[0].isdigit():
monthOffset = int(wordPrev)
start -= 1
used = 2
elif wordPrev[:6] == u"nächst":
monthOffset = 1
start -= 1
used = 2
elif wordPrev[:5] == "letzt":
monthOffset = -1
start -= 1
used = 2
# parse 5 years, next year, last year
elif word == "jahr" and not fromFlag:
if wordPrev[0].isdigit():
yearOffset = int(wordPrev)
start -= 1
used = 2
elif wordPrev[:6] == u"nächst":
yearOffset = 1
start -= 1
used = 2
elif wordPrev[:6] == u"nächst":
yearOffset = -1
start -= 1
used = 2
# parse Monday, Tuesday, etc., and next Monday,
# last Tuesday, etc.
elif word in days and not fromFlag:
d = days.index(word)
dayOffset = (d + 1) - int(today)
used = 1
if dayOffset < 0:
dayOffset += 7
if wordNext == "morgen": # morgen means morning if preceded by
# the day of the week
words[idx + 1] = u"früh"
if wordPrev[:6] == u"nächst":
dayOffset += 7
used += 1
start -= 1
elif wordPrev[:5] == "letzt":
dayOffset -= 7
used += 1
start -= 1
# parse 15 of July, June 20th, Feb 18, 19 of February
elif word in months or word in monthsShort and not fromFlag:
try:
m = months.index(word)
except ValueError:
m = monthsShort.index(word)
used += 1
datestr = months[m]
if wordPrev and (wordPrev[0].isdigit() or
(wordPrev == "of" and wordPrevPrev[0].isdigit())):
if wordPrev == "of" and wordPrevPrev[0].isdigit():
datestr += " " + words[idx - 2]
used += 1
start -= 1
else:
datestr += " " + wordPrev
start -= 1
used += 1
if wordNext and wordNext[0].isdigit():
datestr += " " + wordNext
used += 1
hasYear = True
else:
hasYear = False
elif wordNext and wordNext[0].isdigit():
datestr += " " + wordNext
used += 1
if wordNextNext and wordNextNext[0].isdigit():
datestr += " " + wordNextNext
used += 1
hasYear = True
else:
hasYear = False
# parse 5 days from tomorrow, 10 weeks from next thursday,
# 2 months from July
if (
word == "von" or word == "nach" or word == "ab") and wordNext \
in validFollowups:
used = 2
fromFlag = True
if wordNext == "morgen" and wordPrev != "am" and \
wordPrev not in days: # morgen means tomorrow if not "am
# Morgen" and not [day of the week] morgen:
dayOffset += 1
elif wordNext in days:
d = days.index(wordNext)
tmpOffset = (d + 1) - int(today)
used = 2
if tmpOffset < 0:
tmpOffset += 7
dayOffset += tmpOffset
elif wordNextNext and wordNextNext in days:
d = days.index(wordNextNext)
tmpOffset = (d + 1) - int(today)
used = 3
if wordNext[:6] == u"nächst":
tmpOffset += 7
used += 1
start -= 1
elif wordNext[:5] == "letzt":
tmpOffset -= 7
used += 1
start -= 1
dayOffset += tmpOffset
if used > 0:
if start - 1 > 0 and words[start - 1].startswith("diese"):
start -= 1
used += 1
for i in range(0, used):
words[i + start] = ""
if start - 1 >= 0 and words[start - 1] in markers:
words[start - 1] = ""
found = True
daySpecified = True
# parse time
timeStr = ""
hrOffset = 0
minOffset = 0
secOffset = 0
hrAbs = 0
minAbs = 0
for idx, word in enumerate(words):
if word == "":
continue
wordPrevPrev = words[idx - 2] if idx > 1 else ""
wordPrev = words[idx - 1] if idx > 0 else ""
wordNext = words[idx + 1] if idx + 1 < len(words) else ""
wordNextNext = words[idx + 2] if idx + 2 < len(words) else ""
wordNextNextNext = words[idx + 3] if idx + 3 < len(words) else ""
wordNextNextNextNext = words[idx + 4] if idx + 4 < len(words) else ""
# parse noon, midnight, morning, afternoon, evening
used = 0
if word[:6] == "mittag":
hrAbs = 12
used += 1
elif word[:11] == "mitternacht":
hrAbs = 0
used += 1
elif word == "morgens" or (
wordPrev == "am" and word == "morgen") or word == u"früh":
if hrAbs == 0:
hrAbs = 8
used += 1
elif word[:10] == "nachmittag":
if hrAbs == 0:
hrAbs = 15
used += 1
elif word[:5] == "abend":
if hrAbs == 0:
hrAbs = 19
used += 1
# parse half an hour, quarter hour
elif word == "stunde" and \
(wordPrev in markers or wordPrevPrev in markers):
if wordPrev[:4] == "halb":
minOffset = 30
elif wordPrev == "viertel":
minOffset = 15
elif wordPrev == "dreiviertel":
minOffset = 45
else:
hrOffset = 1
if wordPrevPrev in markers:
words[idx - 2] = ""
words[idx - 1] = ""
used += 1
hrAbs = -1
minAbs = -1
# parse 5:00 am, 12:00 p.m., etc
elif word[0].isdigit():
isTime = True
strHH = ""
strMM = ""
remainder = ""
if ':' in word:
# parse colons
# "3:00 in the morning"
stage = 0
length = len(word)
for i in range(length):
if stage == 0:
if word[i].isdigit():
strHH += word[i]
elif word[i] == ":":
stage = 1
else:
stage = 2
i -= 1
elif stage == 1:
if word[i].isdigit():
strMM += word[i]
else:
stage = 2
i -= 1
elif stage == 2:
remainder = word[i:].replace(".", "")
break
if remainder == "":
nextWord = wordNext.replace(".", "")
if nextWord == "am" or nextWord == "pm":
remainder = nextWord
used += 1
elif nextWord == "abends":
remainder = "pm"
used += 1
elif wordNext == "am" and wordNextNext == "morgen":
remainder = "am"
used += 2
elif wordNext == "am" and wordNextNext == "nachmittag":
remainder = "pm"
used += 2
elif wordNext == "am" and wordNextNext == "abend":
remainder = "pm"
used += 2
elif wordNext == "morgens":
remainder = "am"
used += 1
elif wordNext == "nachmittags":
remainder = "pm"
used += 1
elif wordNext == "abends":
remainder = "pm"
used += 1
elif wordNext == "heute" and wordNextNext == "morgen":
remainder = "am"
used = 2
elif wordNext == "heute" and wordNextNext == "nachmittag":
remainder = "pm"
used = 2
elif wordNext == "heute" and wordNextNext == "abend":
remainder = "pm"
used = 2
elif wordNext == "nachts":
if strHH > 4:
remainder = "pm"
else:
remainder = "am"
used += 1
else:
if timeQualifier != "":
if strHH <= 12 and \
(timeQualifier == "abends" or
timeQualifier == "nachmittags"):
strHH += 12 # what happens when strHH is 24?
else:
# try to parse # s without colons
# 5 hours, 10 minutes etc.
length = len(word)
strNum = ""
remainder = ""
for i in range(length):
if word[i].isdigit():
strNum += word[i]
else:
remainder += word[i]
if remainder == "":
remainder = wordNext.replace(".", "").lstrip().rstrip()
if (
remainder == "pm" or
wordNext == "pm" or
remainder == "p.m." or
wordNext == "p.m."):
strHH = strNum
remainder = "pm"
used = 1
elif (
remainder == "am" or
wordNext == "am" or
remainder == "a.m." or
wordNext == "a.m."):
strHH = strNum
remainder = "am"
used = 1
else:
if wordNext == "stund" and int(word) < 100:
# "in 3 hours"
hrOffset = int(word)
used = 2
isTime = False
hrAbs = -1
minAbs = -1
elif wordNext == "minut":
# "in 10 minutes"
minOffset = int(word)
used = 2
isTime = False
hrAbs = -1
minAbs = -1
elif wordNext == "sekund":
# in 5 seconds
secOffset = int(word)
used = 2
isTime = False
hrAbs = -1
minAbs = -1
elif wordNext == "uhr":
strHH = word
used += 1
isTime = True
if wordNextNext == timeQualifier:
strMM = ""
if wordNextNext[:10] == "nachmittag":
used += 1
remainder = "pm"
elif wordNextNext == "am" and wordNextNextNext == \
"nachmittag":
used += 2
remainder = "pm"
elif wordNextNext[:5] == "abend":
used += 1
remainder = "pm"
elif wordNextNext == "am" and wordNextNextNext == \
"abend":
used += 2
remainder = "pm"
elif wordNextNext[:7] == "morgens":
used += 1
remainder = "am"
elif wordNextNext == "am" and wordNextNextNext == \
"morgen":
used += 2
remainder = "am"
elif wordNextNext == "nachts":
used += 1
if 8 <= int(word) <= 12:
remainder = "pm"
else:
remainder = "am"
elif is_numeric(wordNextNext):
strMM = wordNextNext
used += 1
if wordNextNextNext == timeQualifier:
if wordNextNextNext[:10] == "nachmittag":
used += 1
remainder = "pm"
elif wordNextNextNext == "am" and \
wordNextNextNextNext == "nachmittag":
used += 2
remainder = "pm"
elif wordNextNextNext[:5] == "abend":
used += 1
remainder = "pm"
elif wordNextNextNext == "am" and \
wordNextNextNextNext == "abend":
used += 2
remainder = "pm"
elif wordNextNextNext[:7] == "morgens":
used += 1
remainder = "am"
elif wordNextNextNext == "am" and \
wordNextNextNextNext == "morgen":
used += 2
remainder = "am"
elif wordNextNextNext == "nachts":
used += 1
if 8 <= int(word) <= 12:
remainder = "pm"
else:
remainder = "am"
elif wordNext == timeQualifier:
strHH = word
strMM = 00
isTime = True
if wordNext[:10] == "nachmittag":
used += 1
remainder = "pm"
elif wordNext == "am" and wordNextNext == "nachmittag":
used += 2
remainder = "pm"
elif wordNext[:5] == "abend":
used += 1
remainder = "pm"
elif wordNext == "am" and wordNextNext == "abend":
used += 2
remainder = "pm"
elif wordNext[:7] == "morgens":
used += 1
remainder = "am"
elif wordNext == "am" and wordNextNext == "morgen":
used += 2
remainder = "am"
elif wordNext == "nachts":
used += 1
if 8 <= int(word) <= 12:
remainder = "pm"
else:
remainder = "am"
# if timeQualifier != "":
# military = True
# else:
# isTime = False
strHH = int(strHH) if strHH else 0
strMM = int(strMM) if strMM else 0
strHH = strHH + 12 if remainder == "pm" and strHH < 12 else strHH
strHH = strHH - 12 if remainder == "am" and strHH >= 12 else strHH
if strHH > 24 or strMM > 59:
isTime = False
used = 0
if isTime:
hrAbs = strHH * 1
minAbs = strMM * 1
used += 1
if used > 0:
# removed parsed words from the sentence
for i in range(used):
words[idx + i] = ""
if wordPrev == "Uhr":
words[words.index(wordPrev)] = ""
if wordPrev == u"früh":
hrOffset = -1
words[idx - 1] = ""
idx -= 1
elif wordPrev == u"spät":
hrOffset = 1
words[idx - 1] = ""
idx -= 1
if idx > 0 and wordPrev in markers:
words[idx - 1] = ""
if idx > 1 and wordPrevPrev in markers:
words[idx - 2] = ""
idx += used - 1
found = True
# check that we found a date
if not date_found:
return None
if dayOffset is False:
dayOffset = 0
# perform date manipulation
extractedDate = dateNow
extractedDate = extractedDate.replace(microsecond=0,
second=0,
minute=0,
hour=0)
if datestr != "":
en_months = ['january', 'february', 'march', 'april', 'may', 'june',
'july', 'august', 'september', 'october', 'november',
'december']
en_monthsShort = ['jan', 'feb', 'mar', 'apr', 'may', 'june', 'july',
'aug',
'sept', 'oct', 'nov', 'dec']
for idx, en_month in enumerate(en_months):
datestr = datestr.replace(months[idx], en_month)
for idx, en_month in enumerate(en_monthsShort):
datestr = datestr.replace(monthsShort[idx], en_month)
temp = datetime.strptime(datestr, "%B %d")
if not hasYear:
temp = temp.replace(year=extractedDate.year)
if extractedDate < temp:
extractedDate = extractedDate.replace(year=int(currentYear),
month=int(
temp.strftime(
"%m")),
day=int(temp.strftime(
"%d")))
else:
extractedDate = extractedDate.replace(
year=int(currentYear) + 1,
month=int(temp.strftime("%m")),
day=int(temp.strftime("%d")))
else:
extractedDate = extractedDate.replace(
year=int(temp.strftime("%Y")),
month=int(temp.strftime("%m")),
day=int(temp.strftime("%d")))
if timeStr != "":
temp = datetime(timeStr)
extractedDate = extractedDate.replace(hour=temp.strftime("%H"),
minute=temp.strftime("%M"),
second=temp.strftime("%S"))
if yearOffset != 0:
extractedDate = extractedDate + relativedelta(years=yearOffset)
if monthOffset != 0:
extractedDate = extractedDate + relativedelta(months=monthOffset)
if dayOffset != 0:
extractedDate = extractedDate + relativedelta(days=dayOffset)
if hrAbs != -1 and minAbs != -1:
extractedDate = extractedDate + relativedelta(hours=hrAbs,
minutes=minAbs)
if (hrAbs != 0 or minAbs != 0) and datestr == "":
if not daySpecified and dateNow > extractedDate:
extractedDate = extractedDate + relativedelta(days=1)
if hrOffset != 0:
extractedDate = extractedDate + relativedelta(hours=hrOffset)
if minOffset != 0:
extractedDate = extractedDate + relativedelta(minutes=minOffset)
if secOffset != 0:
extractedDate = extractedDate + relativedelta(seconds=secOffset)
for idx, word in enumerate(words):
if words[idx] == "und" and words[idx - 1] == "" \
and words[idx + 1] == "":
words[idx] = ""
resultStr = " ".join(words)
resultStr = ' '.join(resultStr.split())
return [extractedDate, resultStr]
def isFractional_de(input_str):
"""
This function takes the given text and checks if it is a fraction.
Args:
input_str (str): the string to check if fractional
Returns:
(bool) or (float): False if not a fraction, otherwise the fraction
"""
if input_str.lower().startswith("halb"):
return 0.5
if input_str.lower() == "drittel":
return 1.0 / 3
elif input_str.endswith('tel'):
if input_str.endswith('stel'):
input_str = input_str[:len(input_str) - 4] # e.g. "hundertstel"
else:
input_str = input_str[:len(input_str) - 3] # e.g. "fünftel"
if input_str.lower() in de_numbers:
return 1.0 / (de_numbers[input_str.lower()])
return False
def isOrdinal_de(input_str):
"""
This function takes the given text and checks if it is an ordinal number.
Args:
input_str (str): the string to check if ordinal
Returns:
(bool) or (float): False if not an ordinal, otherwise the number
corresponding to the ordinal
ordinals for 1, 3, 7 and 8 are irregular
only works for ordinals corresponding to the numbers in de_numbers
"""
lowerstr = input_str.lower()
if lowerstr.startswith("erste"):
return 1
if lowerstr.startswith("dritte"):
return 3
if lowerstr.startswith("siebte"):
return 7
if lowerstr.startswith("achte"):
return 8
if lowerstr[-3:] == "ste": # from 20 suffix is -ste*
lowerstr = lowerstr[:-3]
if lowerstr in de_numbers:
return de_numbers[lowerstr]
if lowerstr[-4:] in ["ster", "stes", "sten", "stem"]:
lowerstr = lowerstr[:-4]
if lowerstr in de_numbers:
return de_numbers[lowerstr]
if lowerstr[-2:] == "te": # below 20 suffix is -te*
lowerstr = lowerstr[:-2]
if lowerstr in de_numbers:
return de_numbers[lowerstr]
if lowerstr[-3:] in ["ter", "tes", "ten", "tem"]:
lowerstr = lowerstr[:-3]
if lowerstr in de_numbers:
return de_numbers[lowerstr]
return False
def normalize_de(text, remove_articles):
""" German string normalization """
words = text.split() # this also removed extra spaces
normalized = ""
for word in words:
if remove_articles and word in ["der", "die", "das", "des", "den",
"dem"]:
continue
# Expand common contractions, e.g. "isn't" -> "is not"
contraction = ["net", "nett"]
if word in contraction:
expansion = ["nicht", "nicht"]
word = expansion[contraction.index(word)]
# Convert numbers into digits, e.g. "two" -> "2"
if word in de_numbers:
word = str(de_numbers[word])
normalized += " " + word
return normalized[1:] # strip the initial space
|
the-stack_106_24771 | from __future__ import absolute_import, print_function
import collections
import logging
import six
from django.conf import settings
from django.db import transaction
from django.utils.encoding import force_text
from sentry.utils import json
from sentry.utils.strings import truncatechars
def safe_execute(func, *args, **kwargs):
# TODO: we should make smart savepoints (only executing the savepoint server
# side if we execute a query)
_with_transaction = kwargs.pop('_with_transaction', True)
expected_errors = kwargs.pop('expected_errors', None)
_passthrough_errors = kwargs.pop('_passthrough_errors', None)
try:
if _with_transaction:
with transaction.atomic():
result = func(*args, **kwargs)
else:
result = func(*args, **kwargs)
except Exception as e:
if _passthrough_errors and isinstance(e, _passthrough_errors):
raise
if hasattr(func, 'im_class'):
cls = func.im_class
else:
cls = func.__class__
func_name = getattr(func, '__name__', six.text_type(func))
cls_name = cls.__name__
logger = logging.getLogger('sentry.safe.%s' % (cls_name.lower(), ))
if expected_errors and isinstance(e, expected_errors):
logger.info('%s.process_error_ignored', func_name, extra={'exception': e})
return
logger.error('%s.process_error', func_name, exc_info=True, extra={'exception': e})
else:
return result
def trim(
value,
max_size=settings.SENTRY_MAX_VARIABLE_SIZE,
max_depth=6,
object_hook=None,
_depth=0,
_size=0,
**kwargs
):
"""
Truncates a value to ```MAX_VARIABLE_SIZE```.
The method of truncation depends on the type of value.
"""
options = {
'max_depth': max_depth,
'max_size': max_size,
'object_hook': object_hook,
'_depth': _depth + 1,
}
if _depth > max_depth:
if not isinstance(value, six.string_types):
value = json.dumps(value)
return trim(value, _size=_size, max_size=max_size)
elif isinstance(value, dict):
result = {}
_size += 2
for k in sorted(value.keys()):
v = value[k]
trim_v = trim(v, _size=_size, **options)
result[k] = trim_v
_size += len(force_text(trim_v)) + 1
if _size >= max_size:
break
elif isinstance(value, (list, tuple)):
result = []
_size += 2
for v in value:
trim_v = trim(v, _size=_size, **options)
result.append(trim_v)
_size += len(force_text(trim_v))
if _size >= max_size:
break
if isinstance(value, tuple):
result = tuple(result)
elif isinstance(value, six.string_types):
result = truncatechars(value, max_size - _size)
else:
result = value
if object_hook is None:
return result
return object_hook(result)
def trim_pairs(iterable, max_items=settings.SENTRY_MAX_DICTIONARY_ITEMS, **kwargs):
max_items -= 1
result = []
for idx, item in enumerate(iterable):
key, value = item
result.append((key, trim(value, **kwargs)))
if idx > max_items:
return result
return result
def trim_dict(value, max_items=settings.SENTRY_MAX_DICTIONARY_ITEMS, **kwargs):
max_items -= 1
for idx, key in enumerate(list(iter(value))):
value[key] = trim(value[key], **kwargs)
if idx > max_items:
del value[key]
return value
def get_path(data, *path, **kwargs):
"""
Safely resolves data from a recursive data structure. A value is only
returned if the full path exists, otherwise ``None`` is returned.
If the ``default`` argument is specified, it is returned instead of ``None``.
If the ``filter`` argument is specified and the value is a list, it is
filtered with the given callback. Alternatively, pass ``True`` as filter to
only filter ``None`` values.
"""
default = kwargs.pop('default', None)
f = kwargs.pop('filter', None)
for k in kwargs:
raise TypeError("set_path() got an undefined keyword argument '%s'" % k)
for p in path:
if isinstance(data, collections.Mapping) and p in data:
data = data[p]
elif isinstance(data, (list, tuple)) and -len(data) <= p < len(data):
data = data[p]
else:
return default
if f and data and isinstance(data, (list, tuple)):
data = list(filter((lambda x: x is not None) if f is True else f, data))
return data if data is not None else default
def set_path(data, *path, **kwargs):
"""
Recursively traverses or creates the specified path and sets the given value
argument. `None` is treated like a missing value. If a non-mapping item is
encountered while traversing, the value is not set.
This function is equivalent to a recursive dict.__setitem__. Returns True if
the value was set, otherwise False.
If the ``overwrite` kwarg is set to False, the value is only set if there is
no existing value or it is None. See ``setdefault_path``.
"""
try:
value = kwargs.pop('value')
except KeyError:
raise TypeError("set_path() requires a 'value' keyword argument")
overwrite = kwargs.pop('overwrite', True)
for k in kwargs:
raise TypeError("set_path() got an undefined keyword argument '%s'" % k)
for p in path[:-1]:
if not isinstance(data, collections.Mapping):
return False
if data.get(p) is None:
data[p] = {}
data = data[p]
if not isinstance(data, collections.Mapping):
return False
p = path[-1]
if overwrite or data.get(p) is None:
data[p] = value
return True
return False
def setdefault_path(data, *path, **kwargs):
"""
Recursively traverses or creates the specified path and sets the given value
argument if it does not exist. `None` is treated like a missing value. If a
non-mapping item is encountered while traversing, the value is not set.
This function is equivalent to a recursive dict.setdefault, except for None
values. Returns True if the value was set, otherwise False.
"""
kwargs['overwrite'] = False
return set_path(data, *path, **kwargs)
|
the-stack_106_24772 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Ipv6ExpressRouteCircuitPeeringConfig(Model):
"""Contains IPv6 peering config.
:param primary_peer_address_prefix: The primary address prefix.
:type primary_peer_address_prefix: str
:param secondary_peer_address_prefix: The secondary address prefix.
:type secondary_peer_address_prefix: str
:param microsoft_peering_config: The Microsoft peering configuration.
:type microsoft_peering_config:
~azure.mgmt.network.v2018_06_01.models.ExpressRouteCircuitPeeringConfig
:param route_filter: The reference of the RouteFilter resource.
:type route_filter: ~azure.mgmt.network.v2018_06_01.models.RouteFilter
:param state: The state of peering. Possible values are: 'Disabled' and
'Enabled'. Possible values include: 'Disabled', 'Enabled'
:type state: str or
~azure.mgmt.network.v2018_06_01.models.ExpressRouteCircuitPeeringState
"""
_attribute_map = {
'primary_peer_address_prefix': {'key': 'primaryPeerAddressPrefix', 'type': 'str'},
'secondary_peer_address_prefix': {'key': 'secondaryPeerAddressPrefix', 'type': 'str'},
'microsoft_peering_config': {'key': 'microsoftPeeringConfig', 'type': 'ExpressRouteCircuitPeeringConfig'},
'route_filter': {'key': 'routeFilter', 'type': 'RouteFilter'},
'state': {'key': 'state', 'type': 'str'},
}
def __init__(self, **kwargs):
super(Ipv6ExpressRouteCircuitPeeringConfig, self).__init__(**kwargs)
self.primary_peer_address_prefix = kwargs.get('primary_peer_address_prefix', None)
self.secondary_peer_address_prefix = kwargs.get('secondary_peer_address_prefix', None)
self.microsoft_peering_config = kwargs.get('microsoft_peering_config', None)
self.route_filter = kwargs.get('route_filter', None)
self.state = kwargs.get('state', None)
|
the-stack_106_24773 | # coding: utf-8
from django import forms
from django.forms.models import modelform_factory
from wagtail.images.edit_handlers import AdminImageChooser
def get_embed_video_form(model):
if hasattr(model, 'admin_form_fields'):
fields = model.admin_form_fields
else:
fields = '__all__'
return modelform_factory(
model,
fields=fields,
widgets={
'thumbnail': AdminImageChooser,
})
class EmbedVideoInsertionForm(forms.Form):
alt_text = forms.CharField()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.